diff --git a/src/test/regress/bin/normalize.sed b/src/test/regress/bin/normalize.sed index 575059c5d..42ade579d 100644 --- a/src/test/regress/bin/normalize.sed +++ b/src/test/regress/bin/normalize.sed @@ -63,9 +63,9 @@ s/"(ref_table_[0-9]_|ref_table_[0-9]_value_fkey_)[0-9]+"/"\1xxxxxxx"/g # Line info varies between versions /^LINE [0-9]+:.*$/d /^ *\^$/d -# -## Remove trailing whitespace -#s/ *$//g + +# Remove trailing whitespace +s/ *$//g # ## pg12 changes #s/Partitioned table "/Table "/g diff --git a/src/test/regress/expected/adaptive_executor.out b/src/test/regress/expected/adaptive_executor.out index 87170b24b..d8ebc7d95 100644 --- a/src/test/regress/expected/adaptive_executor.out +++ b/src/test/regress/expected/adaptive_executor.out @@ -5,9 +5,9 @@ SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; SET citus.next_shard_id TO 801009000; SELECT create_distributed_table('test','x'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO test VALUES (1,2); @@ -18,7 +18,7 @@ SET citus.max_adaptive_executor_pool_size TO 2; SET citus.task_executor_type TO 'adaptive'; BEGIN; SELECT count(*) FROM test a JOIN (SELECT x, pg_sleep(0.1) FROM test) b USING (x); - count + count --------------------------------------------------------------------- 2 (1 row) @@ -27,7 +27,7 @@ SELECT sum(result::bigint) FROM run_command_on_workers($$ SELECT count(*) FROM pg_stat_activity WHERE pid <> pg_backend_pid() AND query LIKE '%8010090%' $$); - sum + sum --------------------------------------------------------------------- 2 (1 row) @@ -37,7 +37,7 @@ END; SET citus.executor_slow_start_interval TO '10ms'; BEGIN; SELECT count(*) FROM test a JOIN (SELECT x, pg_sleep(0.1) FROM test) b USING (x); - count + count --------------------------------------------------------------------- 2 (1 row) @@ -46,7 +46,7 @@ SELECT sum(result::bigint) FROM run_command_on_workers($$ SELECT count(*) FROM pg_stat_activity WHERE pid <> pg_backend_pid() AND query LIKE '%8010090%' $$); - sum + sum --------------------------------------------------------------------- 4 (1 row) diff --git a/src/test/regress/expected/adaptive_executor_repartition.out b/src/test/regress/expected/adaptive_executor_repartition.out index e8711493a..f0f22eaa6 100644 --- a/src/test/regress/expected/adaptive_executor_repartition.out +++ b/src/test/regress/expected/adaptive_executor_repartition.out @@ -5,47 +5,47 @@ SET citus.shard_replication_factor to 1; SET citus.enable_repartition_joins TO true; CREATE TABLE ab(a int, b int); SELECT create_distributed_table('ab', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO ab SELECT *,* FROM generate_series(1,10); SELECT COUNT(*) FROM ab k, ab l WHERE k.a = l.b; - count + count --------------------------------------------------------------------- 10 (1 row) SELECT COUNT(*) FROM ab k, ab l, ab m, ab t WHERE k.a = l.b AND k.a = m.b AND t.b = l.a; - count + count --------------------------------------------------------------------- 10 (1 row) SELECT count(*) FROM (SELECT k.a FROM ab k, ab l WHERE k.a = l.b) first, (SELECT * FROM ab) second WHERE first.a = second.b; - count + count --------------------------------------------------------------------- 10 (1 row) BEGIN; SELECT count(*) FROM (SELECT k.a FROM ab k, ab l WHERE k.a = l.b) first, (SELECT * FROM ab) second WHERE first.a = second.b; - count + count --------------------------------------------------------------------- 10 (1 row) SELECT count(*) FROM (SELECT k.a FROM ab k, ab l WHERE k.a = l.b) first, (SELECT * FROM ab) second WHERE first.a = second.b; - count + count --------------------------------------------------------------------- 10 (1 row) SELECT count(*) FROM (SELECT k.a FROM ab k, ab l WHERE k.a = l.b) first, (SELECT * FROM ab) second WHERE first.a = second.b; - count + count --------------------------------------------------------------------- 10 (1 row) @@ -62,21 +62,21 @@ CREATE TABLE single_hash_repartition_first (id int, sum int, avg float); CREATE TABLE single_hash_repartition_second (id int, sum int, avg float); CREATE TABLE ref_table (id int, sum int, avg float); SELECT create_distributed_table('single_hash_repartition_first', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('single_hash_repartition_second', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_reference_table('ref_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- single hash repartition after bcast joins @@ -86,7 +86,7 @@ FROM ref_table r1, single_hash_repartition_second t1, single_hash_repartition_first t2 WHERE r1.id = t1.id AND t2.sum = t1.id; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) @@ -104,7 +104,7 @@ FROM single_hash_repartition_first t1, single_hash_repartition_first t2, single_hash_repartition_second t3 WHERE t1.id = t2.id AND t1.sum = t3.id; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) diff --git a/src/test/regress/expected/add_coordinator.out b/src/test/regress/expected/add_coordinator.out index 14e0a1dbe..d1e5d7cd8 100644 --- a/src/test/regress/expected/add_coordinator.out +++ b/src/test/regress/expected/add_coordinator.out @@ -4,7 +4,7 @@ SELECT master_add_node('localhost', :master_port, groupid => 0) AS master_nodeid \gset -- adding the same node again should return the existing nodeid SELECT master_add_node('localhost', :master_port, groupid => 0) = :master_nodeid; - ?column? + ?column? --------------------------------------------------------------------- t (1 row) @@ -15,8 +15,8 @@ ERROR: group 0 already has a primary node -- start_metadata_sync_to_node() for coordinator should raise a notice SELECT start_metadata_sync_to_node('localhost', :master_port); NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata - start_metadata_sync_to_node + start_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) diff --git a/src/test/regress/expected/aggregate_support.out b/src/test/regress/expected/aggregate_support.out index 633f68bc7..5b86da4b3 100644 --- a/src/test/regress/expected/aggregate_support.out +++ b/src/test/regress/expected/aggregate_support.out @@ -39,47 +39,47 @@ create aggregate sum2_strict (int) ( combinefunc = sum2_sfunc_strict ); select create_distributed_function('sum2(int)'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) select create_distributed_function('sum2_strict(int)'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) create table aggdata (id int, key int, val int, valf float8); select create_distributed_table('aggdata', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) insert into aggdata (id, key, val, valf) values (1, 1, 2, 11.2), (2, 1, NULL, 2.1), (3, 2, 2, 3.22), (4, 2, 3, 4.23), (5, 2, 5, 5.25), (6, 3, 4, 63.4), (7, 5, NULL, 75), (8, 6, NULL, NULL), (9, 6, NULL, 96), (10, 7, 8, 1078), (11, 9, 0, 1.19); select key, sum2(val), sum2_strict(val), stddev(valf) from aggdata group by key order by key; - key | sum2 | sum2_strict | stddev + key | sum2 | sum2_strict | stddev --------------------------------------------------------------------- 1 | | 4 | 6.43467170879758 2 | 20 | 20 | 1.01500410508201 - 3 | 8 | 8 | - 5 | | | - 6 | | | - 7 | 16 | 16 | - 9 | 0 | 0 | + 3 | 8 | 8 | + 5 | | | + 6 | | | + 7 | 16 | 16 | + 9 | 0 | 0 | (7 rows) -- FILTER supported select key, sum2(val) filter (where valf < 5), sum2_strict(val) filter (where valf < 5) from aggdata group by key order by key; - key | sum2 | sum2_strict + key | sum2 | sum2_strict --------------------------------------------------------------------- - 1 | | + 1 | | 2 | 10 | 10 - 3 | 0 | - 5 | 0 | - 6 | 0 | - 7 | 0 | + 3 | 0 | + 5 | 0 | + 6 | 0 | + 7 | 0 | 9 | 0 | 0 (7 rows) @@ -88,17 +88,17 @@ select key, sum2(distinct val), sum2_strict(distinct val) from aggdata group by ERROR: cannot compute aggregate (distinct) DETAIL: table partitioning is unsuitable for aggregate (distinct) select id, sum2(distinct val), sum2_strict(distinct val) from aggdata group by id order by id; - id | sum2 | sum2_strict + id | sum2 | sum2_strict --------------------------------------------------------------------- 1 | 4 | 4 - 2 | | + 2 | | 3 | 4 | 4 4 | 6 | 6 5 | 10 | 10 6 | 8 | 8 - 7 | | - 8 | | - 9 | | + 7 | | + 8 | | + 9 | | 10 | 16 | 16 11 | 0 | 0 (11 rows) @@ -108,9 +108,9 @@ select key, sum2(val order by valf), sum2_strict(val order by valf) from aggdata ERROR: unsupported aggregate function sum2 -- Test handling a lack of intermediate results select sum2(val), sum2_strict(val) from aggdata where valf = 0; - sum2 | sum2_strict + sum2 | sum2_strict --------------------------------------------------------------------- - 0 | + 0 | (1 row) -- test polymorphic aggregates from https://github.com/citusdata/citus/issues/2397 @@ -136,15 +136,15 @@ CREATE AGGREGATE last ( combinefunc = last_agg ); SELECT create_distributed_function('first(anyelement)'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_function('last(anyelement)'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) SELECT key, first(val ORDER BY id), last(val ORDER BY id) @@ -153,17 +153,17 @@ ERROR: unsupported aggregate function first -- However, GROUP BY on distribution column gets pushed down SELECT id, first(val ORDER BY key), last(val ORDER BY key) FROM aggdata GROUP BY id ORDER BY id; - id | first | last + id | first | last --------------------------------------------------------------------- 1 | 2 | 2 - 2 | | + 2 | | 3 | 2 | 2 4 | 3 | 3 5 | 5 | 5 6 | 4 | 4 - 7 | | - 8 | | - 9 | | + 7 | | + 8 | | + 9 | | 10 | 8 | 8 11 | 0 | 0 (11 rows) @@ -189,13 +189,13 @@ select sumstring(valf::text) from aggdata where valf is not null; ERROR: function "aggregate_support.sumstring(text)" does not exist CONTEXT: while executing command on localhost:xxxxx select create_distributed_function('sumstring(text)'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) select sumstring(valf::text) from aggdata where valf is not null; - sumstring + sumstring --------------------------------------------------------------------- 1339.59 (1 row) @@ -213,13 +213,13 @@ create aggregate array_collect_sort(el int) ( initcond = '{}' ); select create_distributed_function('array_collect_sort(int)'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) select array_collect_sort(val) from aggdata; - array_collect_sort + array_collect_sort --------------------------------------------------------------------- {0,2,2,3,4,5,8,NULL,NULL,NULL,NULL} (1 row) @@ -229,7 +229,7 @@ create user notsuper; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. select run_command_on_workers($$create user notsuper$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") @@ -241,7 +241,7 @@ select run_command_on_workers($$ grant all on schema aggregate_support to notsuper; grant all on all tables in schema aggregate_support to notsuper; $$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,GRANT) (localhost,57638,t,GRANT) @@ -249,7 +249,7 @@ $$); set role notsuper; select array_collect_sort(val) from aggdata; - array_collect_sort + array_collect_sort --------------------------------------------------------------------- {0,2,2,3,4,5,8,NULL,NULL,NULL,NULL} (1 row) diff --git a/src/test/regress/expected/alter_role_propagation.out b/src/test/regress/expected/alter_role_propagation.out index e9a308e98..0d31dc733 100644 --- a/src/test/regress/expected/alter_role_propagation.out +++ b/src/test/regress/expected/alter_role_propagation.out @@ -4,7 +4,7 @@ CREATE ROLE alter_role_1 WITH LOGIN; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. SELECT run_command_on_workers($$CREATE ROLE alter_role_1 WITH LOGIN;$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") @@ -14,30 +14,30 @@ SELECT run_command_on_workers($$CREATE ROLE alter_role_1 WITH LOGIN;$$); ALTER ROLE alter_role_1 WITH SUPERUSER NOSUPERUSER; ERROR: conflicting or redundant options -- make sure that we propagate all options accurately -ALTER ROLE alter_role_1 WITH SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 66 VALID UNTIL '2032-05-05'; +ALTER ROLE alter_role_1 WITH SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 66 VALID UNTIL '2032-05-05'; SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'; - row + row --------------------------------------------------------------------- (alter_role_1,t,t,t,t,t,t,t,66,,2032) (1 row) SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"(alter_role_1,t,t,t,t,t,t,t,66,,2032)") (localhost,57638,t,"(alter_role_1,t,t,t,t,t,t,t,66,,2032)") (2 rows) -- make sure that we propagate all options accurately -ALTER ROLE alter_role_1 WITH NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT 0 VALID UNTIL '2052-05-05'; +ALTER ROLE alter_role_1 WITH NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT 0 VALID UNTIL '2052-05-05'; SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'; - row + row --------------------------------------------------------------------- (alter_role_1,f,f,f,f,f,f,f,0,,2052) (1 row) SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"(alter_role_1,f,f,f,f,f,f,f,0,,2052)") (localhost,57638,t,"(alter_role_1,f,f,f,f,f,f,f,0,,2052)") @@ -46,18 +46,18 @@ SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcr -- make sure that non-existent users are handled properly ALTER ROLE alter_role_2 WITH SUPERUSER NOSUPERUSER; ERROR: conflicting or redundant options -ALTER ROLE alter_role_2 WITH SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 66 VALID UNTIL '2032-05-05'; +ALTER ROLE alter_role_2 WITH SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 66 VALID UNTIL '2032-05-05'; ERROR: role "alter_role_2" does not exist -- make sure that CURRENT_USER just works fine ALTER ROLE CURRENT_USER WITH CONNECTION LIMIT 123; SELECT rolconnlimit FROM pg_authid WHERE rolname = CURRENT_USER; - rolconnlimit + rolconnlimit --------------------------------------------------------------------- 123 (1 row) SELECT run_command_on_workers($$SELECT rolconnlimit FROM pg_authid WHERE rolname = CURRENT_USER;$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,123) (localhost,57638,t,123) @@ -66,13 +66,13 @@ SELECT run_command_on_workers($$SELECT rolconnlimit FROM pg_authid WHERE rolname -- make sure that SESSION_USER just works fine ALTER ROLE SESSION_USER WITH CONNECTION LIMIT 124; SELECT rolconnlimit FROM pg_authid WHERE rolname = SESSION_USER; - rolconnlimit + rolconnlimit --------------------------------------------------------------------- 124 (1 row) SELECT run_command_on_workers($$SELECT rolconnlimit FROM pg_authid WHERE rolname = SESSION_USER;$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,124) (localhost,57638,t,124) @@ -81,13 +81,13 @@ SELECT run_command_on_workers($$SELECT rolconnlimit FROM pg_authid WHERE rolname -- now lets test the passwords in more detail ALTER ROLE alter_role_1 WITH PASSWORD NULL; SELECT rolpassword is NULL FROM pg_authid WHERE rolname = 'alter_role_1'; - ?column? + ?column? --------------------------------------------------------------------- t (1 row) SELECT run_command_on_workers($$SELECT rolpassword is NULL FROM pg_authid WHERE rolname = 'alter_role_1'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,t) (localhost,57638,t,t) @@ -95,13 +95,13 @@ SELECT run_command_on_workers($$SELECT rolpassword is NULL FROM pg_authid WHERE ALTER ROLE alter_role_1 WITH PASSWORD 'test1'; SELECT rolpassword FROM pg_authid WHERE rolname = 'alter_role_1'; - rolpassword + rolpassword --------------------------------------------------------------------- md52f9cc8d65e37edcc45c4a489bdfc699d (1 row) SELECT run_command_on_workers($$SELECT rolpassword FROM pg_authid WHERE rolname = 'alter_role_1'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,md52f9cc8d65e37edcc45c4a489bdfc699d) (localhost,57638,t,md52f9cc8d65e37edcc45c4a489bdfc699d) @@ -109,13 +109,13 @@ SELECT run_command_on_workers($$SELECT rolpassword FROM pg_authid WHERE rolname ALTER ROLE alter_role_1 WITH ENCRYPTED PASSWORD 'test2'; SELECT rolpassword FROM pg_authid WHERE rolname = 'alter_role_1'; - rolpassword + rolpassword --------------------------------------------------------------------- md5e17f7818c5ec023fa87bdb97fd3e842e (1 row) SELECT run_command_on_workers($$SELECT rolpassword FROM pg_authid WHERE rolname = 'alter_role_1'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,md5e17f7818c5ec023fa87bdb97fd3e842e) (localhost,57638,t,md5e17f7818c5ec023fa87bdb97fd3e842e) @@ -123,13 +123,13 @@ SELECT run_command_on_workers($$SELECT rolpassword FROM pg_authid WHERE rolname ALTER ROLE alter_role_1 WITH ENCRYPTED PASSWORD 'md59cce240038b7b335c6aa9674a6f13e72'; SELECT rolpassword FROM pg_authid WHERE rolname = 'alter_role_1'; - rolpassword + rolpassword --------------------------------------------------------------------- md59cce240038b7b335c6aa9674a6f13e72 (1 row) SELECT run_command_on_workers($$SELECT rolpassword FROM pg_authid WHERE rolname = 'alter_role_1'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,md59cce240038b7b335c6aa9674a6f13e72) (localhost,57638,t,md59cce240038b7b335c6aa9674a6f13e72) @@ -140,7 +140,7 @@ CREATE ROLE "alter_role'1" WITH LOGIN; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. SELECT run_command_on_workers($$CREATE ROLE "alter_role'1" WITH LOGIN;$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") @@ -148,13 +148,13 @@ SELECT run_command_on_workers($$CREATE ROLE "alter_role'1" WITH LOGIN;$$); ALTER ROLE "alter_role'1" CREATEROLE; SELECT rolcreaterole FROM pg_authid WHERE rolname = 'alter_role''1'; - rolcreaterole + rolcreaterole --------------------------------------------------------------------- t (1 row) SELECT run_command_on_workers($$SELECT rolcreaterole FROM pg_authid WHERE rolname = 'alter_role''1'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,t) (localhost,57638,t,t) @@ -164,7 +164,7 @@ CREATE ROLE "alter_role""1" WITH LOGIN; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. SELECT run_command_on_workers($$CREATE ROLE "alter_role""1" WITH LOGIN;$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") @@ -172,66 +172,66 @@ SELECT run_command_on_workers($$CREATE ROLE "alter_role""1" WITH LOGIN;$$); ALTER ROLE "alter_role""1" CREATEROLE; SELECT rolcreaterole FROM pg_authid WHERE rolname = 'alter_role"1'; - rolcreaterole + rolcreaterole --------------------------------------------------------------------- t (1 row) SELECT run_command_on_workers($$SELECT rolcreaterole FROM pg_authid WHERE rolname = 'alter_role"1'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,t) (localhost,57638,t,t) (2 rows) -- add node -ALTER ROLE alter_role_1 WITH SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 66 VALID UNTIL '2032-05-05' PASSWORD 'test3'; +ALTER ROLE alter_role_1 WITH SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 66 VALID UNTIL '2032-05-05' PASSWORD 'test3'; SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'; - row + row --------------------------------------------------------------------- (alter_role_1,t,t,t,t,t,t,t,66,md5ead5c53df946838b1291bba7757f41a7,2032) (1 row) SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"(alter_role_1,t,t,t,t,t,t,t,66,md5ead5c53df946838b1291bba7757f41a7,2032)") (localhost,57638,t,"(alter_role_1,t,t,t,t,t,t,t,66,md5ead5c53df946838b1291bba7757f41a7,2032)") (2 rows) SELECT master_remove_node('localhost', :worker_1_port); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) -ALTER ROLE alter_role_1 WITH NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT 0 VALID UNTIL '2052-05-05' PASSWORD 'test4'; +ALTER ROLE alter_role_1 WITH NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT 0 VALID UNTIL '2052-05-05' PASSWORD 'test4'; SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'; - row + row --------------------------------------------------------------------- (alter_role_1,f,f,f,f,f,f,f,0,md5be308f25c7b1a2d50c85cf7e6f074df9,2052) (1 row) SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57638,t,"(alter_role_1,f,f,f,f,f,f,f,0,md5be308f25c7b1a2d50c85cf7e6f074df9,2052)") (1 row) SELECT 1 FROM master_add_node('localhost', :worker_1_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'; - row + row --------------------------------------------------------------------- (alter_role_1,f,f,f,f,f,f,f,0,md5be308f25c7b1a2d50c85cf7e6f074df9,2052) (1 row) SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"(alter_role_1,f,f,f,f,f,f,f,0,md5be308f25c7b1a2d50c85cf7e6f074df9,2052)") (localhost,57638,t,"(alter_role_1,f,f,f,f,f,f,f,0,md5be308f25c7b1a2d50c85cf7e6f074df9,2052)") diff --git a/src/test/regress/expected/base_enable_mx.out b/src/test/regress/expected/base_enable_mx.out index 17985908f..403921e22 100644 --- a/src/test/regress/expected/base_enable_mx.out +++ b/src/test/regress/expected/base_enable_mx.out @@ -2,14 +2,14 @@ -- Setup MX data syncing -- SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node + start_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_port); - start_metadata_sync_to_node + start_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) diff --git a/src/test/regress/expected/bool_agg.out b/src/test/regress/expected/bool_agg.out index ca634f3c2..b8c872bf9 100644 --- a/src/test/regress/expected/bool_agg.out +++ b/src/test/regress/expected/bool_agg.out @@ -3,21 +3,21 @@ CREATE SCHEMA bool_agg; SET search_path TO bool_agg; CREATE TABLE bool_test (id int, val int, flag bool, kind int); SELECT create_distributed_table('bool_agg.bool_test','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO bool_test VALUES (1, 1, true, 99), (2, 2, false, 99), (2, 3, true, 88); -- mix of true and false SELECT bool_and(flag), bool_or(flag), every(flag) FROM bool_test; - bool_and | bool_or | every + bool_and | bool_or | every --------------------------------------------------------------------- f | t | f (1 row) SELECT kind, bool_and(flag), bool_or(flag), every(flag) FROM bool_test GROUP BY kind ORDER BY 2; - kind | bool_and | bool_or | every + kind | bool_and | bool_or | every --------------------------------------------------------------------- 99 | f | t | f 88 | t | t | t @@ -25,13 +25,13 @@ SELECT kind, bool_and(flag), bool_or(flag), every(flag) FROM bool_test GROUP BY -- expressions in aggregate SELECT bool_or(val > 2 OR id < 2), bool_and(val < 3) FROM bool_test; - bool_or | bool_and + bool_or | bool_and --------------------------------------------------------------------- t | f (1 row) SELECT kind, bool_or(val > 2 OR id < 2), bool_and(val < 3) FROM bool_test GROUP BY kind ORDER BY 3; - kind | bool_or | bool_and + kind | bool_or | bool_and --------------------------------------------------------------------- 88 | t | f 99 | t | t @@ -39,13 +39,13 @@ SELECT kind, bool_or(val > 2 OR id < 2), bool_and(val < 3) FROM bool_test GROUP -- 1 & 3, 1 | 3 SELECT bit_and(val), bit_or(val) FROM bool_test WHERE flag; - bit_and | bit_or + bit_and | bit_or --------------------------------------------------------------------- 1 | 3 (1 row) SELECT flag, bit_and(val), bit_or(val) FROM bool_test GROUP BY flag ORDER BY flag; - flag | bit_and | bit_or + flag | bit_and | bit_or --------------------------------------------------------------------- f | 2 | 2 t | 1 | 3 diff --git a/src/test/regress/expected/ch_bench_having.out b/src/test/regress/expected/ch_bench_having.out index f16affede..7a98dda3d 100644 --- a/src/test/regress/expected/ch_bench_having.out +++ b/src/test/regress/expected/ch_bench_having.out @@ -7,9 +7,9 @@ CREATE TABLE stock ( s_order_cnt int NOT NULL ); SELECT create_distributed_table('stock','s_w_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) explain (costs false, summary false, timing false) @@ -19,7 +19,7 @@ where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock) group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Sort Sort Key: s_i_id @@ -65,7 +65,7 @@ from stock group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Sort Sort Key: s_i_id @@ -98,7 +98,7 @@ select s_i_id, sum(s_order_cnt) as ordercount from stock group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock); - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- HashAggregate Group Key: s_i_id @@ -129,7 +129,7 @@ from stock s group by s_i_id having (select true) order by s_i_id; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Sort (cost=0.00..0.00 rows=0 width=0) Sort Key: remote_scan.s_i_id @@ -152,7 +152,7 @@ explain select s_i_id, sum(s_order_cnt) as ordercount from stock s group by s_i_id having (select true); - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- HashAggregate (cost=0.00..0.00 rows=0 width=0) Group Key: remote_scan.s_i_id @@ -175,7 +175,7 @@ where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock) group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; - s_i_id | ordercount + s_i_id | ordercount --------------------------------------------------------------------- (0 rows) @@ -186,7 +186,7 @@ where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock) group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; - s_i_id | ordercount + s_i_id | ordercount --------------------------------------------------------------------- 3 | 3 4 | 4 @@ -198,7 +198,7 @@ from stock group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; - s_i_id | ordercount + s_i_id | ordercount --------------------------------------------------------------------- 3 | 3 4 | 4 @@ -211,7 +211,7 @@ where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock) group by s_i_id having (select true) order by s_i_id; - s_i_id | ordercount + s_i_id | ordercount --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -226,7 +226,7 @@ where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock) group by s_i_id having (select false) order by s_i_id; - s_i_id | ordercount + s_i_id | ordercount --------------------------------------------------------------------- (0 rows) @@ -235,7 +235,7 @@ from stock s group by s_i_id having (select true) order by s_i_id; - s_i_id | ordercount + s_i_id | ordercount --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -249,7 +249,7 @@ from stock s group by s_i_id having (select false) order by s_i_id; - s_i_id | ordercount + s_i_id | ordercount --------------------------------------------------------------------- (0 rows) @@ -258,7 +258,7 @@ from stock s group by s_i_id having (select true) order by s_i_id; - s_i_id | ordercount + s_i_id | ordercount --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -309,9 +309,9 @@ insert into stock VALUES (32, 1, 1, 1, 1, 1, '', '','','','','','','','','',''); SELECT create_distributed_table('stock','s_w_id'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) select s_i_id, sum(s_order_cnt) as ordercount @@ -327,7 +327,7 @@ having sum(s_order_cnt) > and s_nationkey = n_nationkey and n_name = 'GERMANY') order by ordercount desc; - s_i_id | ordercount + s_i_id | ordercount --------------------------------------------------------------------- 33 | 1 1 | 1 @@ -348,7 +348,7 @@ having sum(s_order_cnt) > and s_nationkey = n_nationkey and n_name = 'GERMANY') order by ordercount desc; - s_i_id | ordercount + s_i_id | ordercount --------------------------------------------------------------------- 1 | 100001 (1 row) diff --git a/src/test/regress/expected/ch_bench_having_mx.out b/src/test/regress/expected/ch_bench_having_mx.out index 132cabce2..c8bcf7b33 100644 --- a/src/test/regress/expected/ch_bench_having_mx.out +++ b/src/test/regress/expected/ch_bench_having_mx.out @@ -10,9 +10,9 @@ CREATE TABLE stock ( s_order_cnt int NOT NULL ); SELECT create_distributed_table('stock','s_w_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) \c - - - :worker_1_port @@ -24,7 +24,7 @@ where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock) group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Sort Sort Key: s_i_id @@ -70,7 +70,7 @@ from stock group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Sort Sort Key: s_i_id @@ -103,7 +103,7 @@ select s_i_id, sum(s_order_cnt) as ordercount from stock group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock); - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- HashAggregate Group Key: s_i_id @@ -134,7 +134,7 @@ from stock s group by s_i_id having (select true) order by s_i_id; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Sort (cost=0.00..0.00 rows=0 width=0) Sort Key: remote_scan.s_i_id @@ -157,7 +157,7 @@ explain select s_i_id, sum(s_order_cnt) as ordercount from stock s group by s_i_id having (select true); - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- HashAggregate (cost=0.00..0.00 rows=0 width=0) Group Key: remote_scan.s_i_id @@ -180,7 +180,7 @@ where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock) group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; - s_i_id | ordercount + s_i_id | ordercount --------------------------------------------------------------------- (0 rows) @@ -191,7 +191,7 @@ where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock) group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; - s_i_id | ordercount + s_i_id | ordercount --------------------------------------------------------------------- 3 | 3 4 | 4 @@ -203,7 +203,7 @@ from stock group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; - s_i_id | ordercount + s_i_id | ordercount --------------------------------------------------------------------- 3 | 3 4 | 4 @@ -216,7 +216,7 @@ where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock) group by s_i_id having (select true) order by s_i_id; - s_i_id | ordercount + s_i_id | ordercount --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -231,7 +231,7 @@ where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock) group by s_i_id having (select false) order by s_i_id; - s_i_id | ordercount + s_i_id | ordercount --------------------------------------------------------------------- (0 rows) @@ -240,7 +240,7 @@ from stock s group by s_i_id having (select true) order by s_i_id; - s_i_id | ordercount + s_i_id | ordercount --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -254,7 +254,7 @@ from stock s group by s_i_id having (select false) order by s_i_id; - s_i_id | ordercount + s_i_id | ordercount --------------------------------------------------------------------- (0 rows) @@ -263,7 +263,7 @@ from stock s group by s_i_id having (select true) order by s_i_id; - s_i_id | ordercount + s_i_id | ordercount --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -319,9 +319,9 @@ insert into stock VALUES (32, 1, 1, 1, 1, 1, '', '','','','','','','','','',''); SELECT create_distributed_table('stock','s_w_id'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) \c - - - :worker_1_port @@ -339,7 +339,7 @@ having sum(s_order_cnt) > and s_nationkey = n_nationkey and n_name = 'GERMANY') order by ordercount desc; - s_i_id | ordercount + s_i_id | ordercount --------------------------------------------------------------------- 33 | 1 1 | 1 @@ -360,7 +360,7 @@ having sum(s_order_cnt) > and s_nationkey = n_nationkey and n_name = 'GERMANY') order by ordercount desc; - s_i_id | ordercount + s_i_id | ordercount --------------------------------------------------------------------- 1 | 100001 (1 row) diff --git a/src/test/regress/expected/ch_bench_subquery_repartition.out b/src/test/regress/expected/ch_bench_subquery_repartition.out index d2eb8381d..371829fc1 100644 --- a/src/test/regress/expected/ch_bench_subquery_repartition.out +++ b/src/test/regress/expected/ch_bench_subquery_repartition.out @@ -61,33 +61,33 @@ create table supplier ( PRIMARY KEY ( su_suppkey ) ); SELECT create_distributed_table('order_line','ol_w_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('stock','s_w_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_reference_table('item'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_reference_table('nation'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_reference_table('supplier'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) INSERT INTO order_line SELECT c, c, c, c, c, NULL, c, c, c, 'abc' FROM generate_series(1, 10) as c; @@ -102,7 +102,7 @@ select s_i_id s_i_id in (select i_id from item) AND s_i_id = ol_i_id order by s_i_id; - s_i_id + s_i_id --------------------------------------------------------------------- 1 2 @@ -150,7 +150,7 @@ where su_suppkey in and su_nationkey = n_nationkey and n_name = 'Germany' order by su_name; - su_name | su_address + su_name | su_address --------------------------------------------------------------------- (0 rows) @@ -184,7 +184,7 @@ where s_suppkey in and s_nationkey = n_nationkey and n_name = 'GERMANY' order by s_name; - s_name | s_address + s_name | s_address --------------------------------------------------------------------- Supplier#000000033 | gfeKpYw3400L0SDywXA6Ya1Qmq1w6YB9f3R (1 row) @@ -205,7 +205,7 @@ where s_suppkey in and s_nationkey = n_nationkey and n_name = 'GERMANY' order by s_name; - s_name | s_address + s_name | s_address --------------------------------------------------------------------- Supplier#000000033 | gfeKpYw3400L0SDywXA6Ya1Qmq1w6YB9f3R Supplier#000000044 | kERxlLDnlIZJdN66zAPHklyL diff --git a/src/test/regress/expected/chbenchmark_all_queries.out b/src/test/regress/expected/chbenchmark_all_queries.out index 8377432fa..53bab669e 100644 --- a/src/test/regress/expected/chbenchmark_all_queries.out +++ b/src/test/regress/expected/chbenchmark_all_queries.out @@ -145,75 +145,75 @@ CREATE TABLE supplier ( PRIMARY KEY ( su_suppkey ) ); SELECT create_distributed_table('order_line','ol_w_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('new_order','no_w_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('stock','s_w_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('oorder','o_w_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('history','h_w_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('customer','c_w_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('district','d_w_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('warehouse','w_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_reference_table('item'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_reference_table('region'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_reference_table('nation'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_reference_table('supplier'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) TRUNCATE order_line, new_order, stock, oorder, history, customer, district, warehouse, item, region, nation, supplier; -- for easy copy in development @@ -245,7 +245,7 @@ FROM order_line WHERE ol_delivery_d > '2007-01-02 00:00:00.000000' GROUP BY ol_number ORDER BY ol_number; - ol_number | sum_qty | sum_amount | avg_qty | avg_amount | count_order + ol_number | sum_qty | sum_amount | avg_qty | avg_amount | count_order --------------------------------------------------------------------- 0 | 0 | 0.00 | 0.00000000000000000000 | 0.00000000000000000000 | 1 1 | 1 | 1.00 | 1.00000000000000000000 | 1.00000000000000000000 | 1 @@ -301,10 +301,10 @@ ORDER BY n_name, su_name, i_id; - su_suppkey | su_name | n_name | i_id | i_name | su_address | su_phone | su_comment + su_suppkey | su_name | n_name | i_id | i_name | su_address | su_phone | su_comment --------------------------------------------------------------------- - 9 | abc | Germany | 3 | Keyboard | def | ghi | jkl - 4 | abc | The Netherlands | 2 | Keyboard | def | ghi | jkl + 9 | abc | Germany | 3 | Keyboard | def | ghi | jkl + 4 | abc | The Netherlands | 2 | Keyboard | def | ghi | jkl (2 rows) -- Query 3 @@ -338,7 +338,7 @@ GROUP BY ORDER BY revenue DESC, o_entry_d; - ol_o_id | ol_w_id | ol_d_id | revenue | o_entry_d + ol_o_id | ol_w_id | ol_d_id | revenue | o_entry_d --------------------------------------------------------------------- 10 | 10 | 10 | 10.00 | Fri Oct 17 00:00:00 2008 9 | 9 | 9 | 9.00 | Fri Oct 17 00:00:00 2008 @@ -369,7 +369,7 @@ WHERE o_entry_d >= '2007-01-02 00:00:00.000000' AND ol_delivery_d >= o_entry_d) GROUP BY o_ol_cnt ORDER BY o_ol_cnt; - o_ol_cnt | order_count + o_ol_cnt | order_count --------------------------------------------------------------------- 1 | 11 (1 row) @@ -406,7 +406,7 @@ WHERE c_id = o_c_id AND o_entry_d >= '2007-01-02 00:00:00.000000' GROUP BY n_name ORDER BY revenue DESC; - n_name | revenue + n_name | revenue --------------------------------------------------------------------- Germany | 3.00 The Netherlands | 2.00 @@ -419,7 +419,7 @@ FROM order_line WHERE ol_delivery_d >= '1999-01-01 00:00:00.000000' AND ol_delivery_d < '2020-01-01 00:00:00.000000' AND ol_quantity BETWEEN 1 AND 100000; - revenue + revenue --------------------------------------------------------------------- 55.00 (1 row) @@ -462,7 +462,7 @@ ORDER BY su_nationkey, cust_nation, l_year; - supp_nation | cust_nation | l_year | revenue + supp_nation | cust_nation | l_year | revenue --------------------------------------------------------------------- 9 | C | 2008 | 3.00 (1 row) @@ -501,7 +501,7 @@ WHERE i_id = s_i_id AND i_id = ol_i_id GROUP BY extract(YEAR FROM o_entry_d) ORDER BY l_year; - l_year | mkt_share + l_year | mkt_share --------------------------------------------------------------------- 2008 | 0.50000000000000000000 (1 row) @@ -533,7 +533,7 @@ GROUP BY ORDER BY n_name, l_year DESC; - n_name | l_year | sum_profit + n_name | l_year | sum_profit --------------------------------------------------------------------- Germany | 2008 | 3.00 The Netherlands | 2008 | 2.00 @@ -569,19 +569,19 @@ GROUP BY c_phone, n_name ORDER BY revenue DESC; - c_id | c_last | revenue | c_city | c_phone | n_name + c_id | c_last | revenue | c_city | c_phone | n_name --------------------------------------------------------------------- - 10 | John | 10.00 | Some City | +1 000 0000000 | Cambodia - 9 | John | 9.00 | Some City | +1 000 0000000 | Cambodia - 8 | John | 8.00 | Some City | +1 000 0000000 | Cambodia - 7 | John | 7.00 | Some City | +1 000 0000000 | Cambodia - 6 | John | 6.00 | Some City | +1 000 0000000 | Cambodia - 5 | John | 5.00 | Some City | +1 000 0000000 | Cambodia - 4 | John | 4.00 | Some City | +1 000 0000000 | Cambodia - 3 | John | 3.00 | Some City | +1 000 0000000 | Cambodia - 2 | John | 2.00 | Some City | +1 000 0000000 | Cambodia - 1 | John | 1.00 | Some City | +1 000 0000000 | Cambodia - 0 | John | 0.00 | Some City | +1 000 0000000 | Cambodia + 10 | John | 10.00 | Some City | +1 000 0000000 | Cambodia + 9 | John | 9.00 | Some City | +1 000 0000000 | Cambodia + 8 | John | 8.00 | Some City | +1 000 0000000 | Cambodia + 7 | John | 7.00 | Some City | +1 000 0000000 | Cambodia + 6 | John | 6.00 | Some City | +1 000 0000000 | Cambodia + 5 | John | 5.00 | Some City | +1 000 0000000 | Cambodia + 4 | John | 4.00 | Some City | +1 000 0000000 | Cambodia + 3 | John | 3.00 | Some City | +1 000 0000000 | Cambodia + 2 | John | 2.00 | Some City | +1 000 0000000 | Cambodia + 1 | John | 1.00 | Some City | +1 000 0000000 | Cambodia + 0 | John | 0.00 | Some City | +1 000 0000000 | Cambodia (11 rows) -- Query 11 @@ -606,7 +606,7 @@ HAVING sum(s_order_cnt) > AND su_nationkey = n_nationkey AND n_name = 'Germany') ORDER BY ordercount DESC; - s_i_id | ordercount + s_i_id | ordercount --------------------------------------------------------------------- 3 | 3 (1 row) @@ -626,7 +626,7 @@ WHERE ol_w_id = o_w_id AND ol_delivery_d < '2020-01-01 00:00:00.000000' GROUP BY o_ol_cnt ORDER BY o_ol_cnt; - o_ol_cnt | high_line_count | low_line_count + o_ol_cnt | high_line_count | low_line_count --------------------------------------------------------------------- 1 | 2 | 9 (1 row) @@ -649,7 +649,7 @@ GROUP BY c_count ORDER BY custdist DESC, c_count DESC; - c_count | custdist + c_count | custdist --------------------------------------------------------------------- 0 | 9 1 | 2 @@ -664,7 +664,7 @@ FROM WHERE ol_i_id = i_id AND ol_delivery_d >= '2007-01-02 00:00:00.000000' AND ol_delivery_d < '2020-01-02 00:00:00.000000'; - promo_revenue + promo_revenue --------------------------------------------------------------------- 0.00000000000000000000 (1 row) @@ -693,7 +693,7 @@ FROM WHERE su_suppkey = supplier_no AND total_revenue = (SELECT max(total_revenue) FROM revenue) ORDER BY su_suppkey; - su_suppkey | su_name | su_address | su_phone | total_revenue + su_suppkey | su_name | su_address | su_phone | total_revenue --------------------------------------------------------------------- 9 | abc | def | ghi | 3.00 (1 row) @@ -718,7 +718,7 @@ GROUP BY substr(i_data, 1, 3), i_price ORDER BY supplier_cnt DESC; - i_name | brand | i_price | supplier_cnt + i_name | brand | i_price | supplier_cnt --------------------------------------------------------------------- Keyboard | co | 50.00 | 3 (1 row) @@ -738,7 +738,7 @@ FROM AND ol_i_id = i_id GROUP BY i_id) t WHERE ol_i_id = t.i_id; - avg_yearly + avg_yearly --------------------------------------------------------------------- 27.5000000000000000 (1 row) @@ -775,7 +775,7 @@ HAVING sum(ol_amount) > 5 -- was 200, but thats too big for the dataset ORDER BY sum(ol_amount) DESC, o_entry_d; - c_last | o_id | o_entry_d | o_ol_cnt | sum + c_last | o_id | o_entry_d | o_ol_cnt | sum --------------------------------------------------------------------- John | 10 | Fri Oct 17 00:00:00 2008 | 1 | 10.00 John | 9 | Fri Oct 17 00:00:00 2008 | 1 | 9.00 @@ -808,7 +808,7 @@ WHERE ( ol_i_id = i_id AND ol_quantity <= 10 AND i_price BETWEEN 1 AND 400000 AND ol_w_id IN (1,5,3)); - revenue + revenue --------------------------------------------------------------------- 7.00 (1 row) @@ -837,7 +837,7 @@ WHERE su_suppkey in AND su_nationkey = n_nationkey AND n_name = 'Germany' ORDER BY su_name; - su_name | su_address + su_name | su_address --------------------------------------------------------------------- abc | def (1 row) @@ -872,7 +872,7 @@ GROUP BY su_name ORDER BY numwait desc, su_name; - su_name | numwait + su_name | numwait --------------------------------------------------------------------- (0 rows) @@ -895,7 +895,7 @@ WHERE substr(c_phone,1,1) in ('1','2','3','4','5','6','7') AND o_d_id = c_d_id) GROUP BY substr(c_state,1,1) ORDER BY substr(c_state,1,1); - country | numcust | totacctbal + country | numcust | totacctbal --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/coordinator_shouldhaveshards.out b/src/test/regress/expected/coordinator_shouldhaveshards.out index 46a06f70c..06e20c4b3 100644 --- a/src/test/regress/expected/coordinator_shouldhaveshards.out +++ b/src/test/regress/expected/coordinator_shouldhaveshards.out @@ -4,14 +4,14 @@ SET search_path TO coordinator_shouldhaveshards; -- idempotently add node to allow this test to run without add_coordinator SET client_min_messages TO WARNING; SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) RESET client_min_messages; SELECT 1 FROM master_set_node_property('localhost', :master_port, 'shouldhaveshards', true); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) @@ -19,14 +19,14 @@ SELECT 1 FROM master_set_node_property('localhost', :master_port, 'shouldhavesha SET citus.shard_replication_factor TO 1; CREATE TABLE test (x int, y int); SELECT create_distributed_table('test','x', colocate_with := 'none'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_dist_shard JOIN pg_dist_placement USING (shardid) WHERE logicalrelid = 'test'::regclass AND groupid = 0; - count + count --------------------------------------------------------------------- 2 (1 row) @@ -36,20 +36,20 @@ INSERT INTO test SELECT s,s FROM generate_series(2,100) s; -- router queries execute locally INSERT INTO test VALUES (1, 1); SELECT y FROM test WHERE x = 1; - y + y --------------------------------------------------------------------- 1 (1 row) -- multi-shard queries connect to localhost SELECT count(*) FROM test; - count + count --------------------------------------------------------------------- 100 (1 row) WITH a AS (SELECT * FROM test) SELECT count(*) FROM test; - count + count --------------------------------------------------------------------- 100 (1 row) @@ -57,13 +57,13 @@ WITH a AS (SELECT * FROM test) SELECT count(*) FROM test; -- multi-shard queries in transaction blocks execute locally BEGIN; SELECT y FROM test WHERE x = 1; - y + y --------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM test; - count + count --------------------------------------------------------------------- 100 (1 row) @@ -71,13 +71,13 @@ SELECT count(*) FROM test; END; BEGIN; SELECT y FROM test WHERE x = 1; - y + y --------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM test; - count + count --------------------------------------------------------------------- 100 (1 row) @@ -88,7 +88,7 @@ ALTER TABLE test ADD COLUMN z int; -- DDL after local execution BEGIN; SELECT y FROM test WHERE x = 1; - y + y --------------------------------------------------------------------- 1 (1 row) @@ -101,7 +101,7 @@ ROLLBACK; BEGIN; ALTER TABLE test DROP COLUMN z; SELECT y FROM test WHERE x = 1; - y + y --------------------------------------------------------------------- 1 (1 row) @@ -111,7 +111,7 @@ DELETE FROM test; DROP TABLE test; DROP SCHEMA coordinator_shouldhaveshards CASCADE; SELECT 1 FROM master_set_node_property('localhost', :master_port, 'shouldhaveshards', false); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) diff --git a/src/test/regress/expected/cte_nested_modification.out b/src/test/regress/expected/cte_nested_modification.out index f5010a62f..49f478f01 100644 --- a/src/test/regress/expected/cte_nested_modification.out +++ b/src/test/regress/expected/cte_nested_modification.out @@ -4,18 +4,18 @@ CREATE TABLE tt1(id int, value_1 int); INSERT INTO tt1 VALUES(1,2),(2,3),(3,4); SELECT create_distributed_table('tt1','id'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE tt2(id int, value_1 int); INSERT INTO tt2 VALUES(3,3),(4,4),(5,5); SELECT create_distributed_table('tt2','id'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE tt3(id int, json_val json); @@ -40,7 +40,7 @@ SET value_1 = abs(2 + 3.5) FROM cte_1 WHERE cte_1.id = tt1.id; SELECT * FROM tt1 ORDER BY id; - id | value_1 + id | value_1 --------------------------------------------------------------------- 1 | 2 2 | 6 @@ -64,7 +64,7 @@ WITH cte_1 AS ( UPDATE tt1 SET value_1 = (SELECT max(id) + abs(2 + 3.5) FROM cte_1); SELECT * FROM tt1 ORDER BY id; - id | value_1 + id | value_1 --------------------------------------------------------------------- 1 | 9 2 | 9 @@ -88,7 +88,7 @@ WITH cte_1(id) AS ( UPDATE tt1 SET value_1 = (SELECT max(id) + abs(2 + 3.5) FROM cte_1); SELECT * FROM tt1 ORDER BY id; - id | value_1 + id | value_1 --------------------------------------------------------------------- 1 | 9 2 | 9 @@ -114,7 +114,7 @@ DELETE FROM tt1 USING cte_1 WHERE tt1.id < cte_1.id; SELECT * FROM tt1 ORDER BY id; - id | value_1 + id | value_1 --------------------------------------------------------------------- 3 | 4 (1 row) @@ -134,7 +134,7 @@ DELETE FROM tt1 USING cte_1 WHERE tt1.id < cte_1.id; SELECT * FROM tt1 ORDER BY id; - id | value_1 + id | value_1 --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/cte_prepared_modify.out b/src/test/regress/expected/cte_prepared_modify.out index 560f455b6..9460ab275 100644 --- a/src/test/regress/expected/cte_prepared_modify.out +++ b/src/test/regress/expected/cte_prepared_modify.out @@ -4,18 +4,18 @@ CREATE TABLE tt1(id int, value_1 int); INSERT INTO tt1 VALUES(1,2),(2,3),(3,4); SELECT create_distributed_table('tt1','id'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE tt2(id int, value_1 int); INSERT INTO tt2 VALUES(3,3),(4,4),(5,5); SELECT create_distributed_table('tt2','id'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- Test with prepared statements (parameter used by SET) diff --git a/src/test/regress/expected/custom_aggregate_support.out b/src/test/regress/expected/custom_aggregate_support.out index bb825160b..606e1e2ab 100644 --- a/src/test/regress/expected/custom_aggregate_support.out +++ b/src/test/regress/expected/custom_aggregate_support.out @@ -14,15 +14,15 @@ SET citus.shard_count TO 4; CREATE TABLE raw_table (day date, user_id int); CREATE TABLE daily_uniques(day date, unique_users hll); SELECT create_distributed_table('raw_table', 'user_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('daily_uniques', 'day'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO raw_table @@ -38,7 +38,7 @@ SELECT hll_cardinality(hll_union_agg(agg)) FROM ( SELECT hll_add_agg(hll_hash_integer(user_id)) AS agg FROM raw_table)a; - hll_cardinality + hll_cardinality --------------------------------------------------------------------- 19 (1 row) @@ -54,7 +54,7 @@ FROM daily_uniques WHERE day >= '2018-06-20' and day <= '2018-06-30' ORDER BY 2 DESC,1 LIMIT 10; - day | hll_cardinality + day | hll_cardinality --------------------------------------------------------------------- 06-20-2018 | 19 06-21-2018 | 19 @@ -72,7 +72,7 @@ LIMIT 10; SELECT hll_cardinality(hll_union_agg(unique_users)) FROM daily_uniques WHERE day >= '2018-05-24'::date AND day <= '2018-05-31'::date; - hll_cardinality + hll_cardinality --------------------------------------------------------------------- 19 (1 row) @@ -82,7 +82,7 @@ FROM daily_uniques WHERE day >= '2018-06-23' AND day <= '2018-07-01' GROUP BY 1 ORDER BY 1; - month | hll_cardinality + month | hll_cardinality --------------------------------------------------------------------- 6 | 19 7 | 13 @@ -108,7 +108,7 @@ SELECT FROM daily_uniques GROUP BY(1); - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 4 @@ -142,7 +142,7 @@ SELECT FROM daily_uniques GROUP BY(1); - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 4 @@ -177,7 +177,7 @@ SELECT FROM daily_uniques GROUP BY(1); - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 4 @@ -211,7 +211,7 @@ SELECT FROM daily_uniques GROUP BY(1); - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 4 @@ -246,7 +246,7 @@ SELECT FROM daily_uniques GROUP BY(1); - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 4 @@ -280,7 +280,7 @@ SELECT FROM daily_uniques GROUP BY(1); - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 4 @@ -315,7 +315,7 @@ SELECT FROM daily_uniques GROUP BY(1); - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 4 @@ -350,7 +350,7 @@ FROM daily_uniques GROUP BY(1) HAVING hll_cardinality(hll_union_agg(unique_users)) > 1; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 4 @@ -395,15 +395,15 @@ WHERE name = 'topn' CREATE TABLE customer_reviews (day date, user_id int, review int); CREATE TABLE popular_reviewer(day date, reviewers jsonb); SELECT create_distributed_table('customer_reviews', 'user_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('popular_reviewer', 'day'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO customer_reviews @@ -421,7 +421,7 @@ FROM ( FROM customer_reviews )a ORDER BY 2 DESC, 1; - item | frequency + item | frequency --------------------------------------------------------------------- 1 | 7843 2 | 7843 @@ -446,7 +446,7 @@ FROM popular_reviewer WHERE day >= '2018-06-20' and day <= '2018-06-30' ORDER BY 3 DESC, 1, 2 LIMIT 10; - day | item | frequency + day | item | frequency --------------------------------------------------------------------- 06-20-2018 | 1 | 248 06-20-2018 | 2 | 248 @@ -468,7 +468,7 @@ FROM ( WHERE day >= '2018-05-24'::date AND day <= '2018-05-31'::date )a ORDER BY 2 DESC, 1; - item | frequency + item | frequency --------------------------------------------------------------------- 1 | 1240 2 | 1240 @@ -488,7 +488,7 @@ FROM ( ORDER BY 1 )a ORDER BY 1, 3 DESC, 2; - month | item | frequency + month | item | frequency --------------------------------------------------------------------- 6 | 1 | 1054 6 | 2 | 1054 diff --git a/src/test/regress/expected/custom_aggregate_support_0.out b/src/test/regress/expected/custom_aggregate_support_0.out index 3afd153ba..c2e322e52 100644 --- a/src/test/regress/expected/custom_aggregate_support_0.out +++ b/src/test/regress/expected/custom_aggregate_support_0.out @@ -9,7 +9,7 @@ AS create_cmd FROM pg_available_extensions() WHERE name = 'hll' \gset :create_cmd; - hll_present + hll_present --------------------------------------------------------------------- f (1 row) @@ -19,50 +19,50 @@ CREATE TABLE raw_table (day date, user_id int); CREATE TABLE daily_uniques(day date, unique_users hll); ERROR: type "hll" does not exist SELECT create_distributed_table('raw_table', 'user_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('daily_uniques', 'day'); ERROR: relation "daily_uniques" does not exist -INSERT INTO raw_table - SELECT day, user_id % 19 +INSERT INTO raw_table + SELECT day, user_id % 19 FROM generate_series('2018-05-24'::timestamp, '2018-06-24'::timestamp, '1 day'::interval) as f(day), generate_series(1,100) as g(user_id); -INSERT INTO raw_table - SELECT day, user_id % 13 - FROM generate_series('2018-06-10'::timestamp, '2018-07-10'::timestamp, '1 day'::interval) as f(day), +INSERT INTO raw_table + SELECT day, user_id % 13 + FROM generate_series('2018-06-10'::timestamp, '2018-07-10'::timestamp, '1 day'::interval) as f(day), generate_series(1,100) as g(user_id); -- Run hll on raw data -SELECT hll_cardinality(hll_union_agg(agg)) +SELECT hll_cardinality(hll_union_agg(agg)) FROM ( - SELECT hll_add_agg(hll_hash_integer(user_id)) AS agg + SELECT hll_add_agg(hll_hash_integer(user_id)) AS agg FROM raw_table)a; ERROR: function hll_hash_integer(integer) does not exist HINT: No function matches the given name and argument types. You might need to add explicit type casts. -- Aggregate the data into daily_uniques -INSERT INTO daily_uniques - SELECT day, hll_add_agg(hll_hash_integer(user_id)) +INSERT INTO daily_uniques + SELECT day, hll_add_agg(hll_hash_integer(user_id)) FROM raw_table GROUP BY 1; ERROR: relation "daily_uniques" does not exist -- Basic hll_cardinality check on aggregated data -SELECT day, hll_cardinality(unique_users) -FROM daily_uniques -WHERE day >= '2018-06-20' and day <= '2018-06-30' -ORDER BY 2 DESC,1 +SELECT day, hll_cardinality(unique_users) +FROM daily_uniques +WHERE day >= '2018-06-20' and day <= '2018-06-30' +ORDER BY 2 DESC,1 LIMIT 10; ERROR: relation "daily_uniques" does not exist -- Union aggregated data for one week -SELECT hll_cardinality(hll_union_agg(unique_users)) -FROM daily_uniques +SELECT hll_cardinality(hll_union_agg(unique_users)) +FROM daily_uniques WHERE day >= '2018-05-24'::date AND day <= '2018-05-31'::date; ERROR: relation "daily_uniques" does not exist SELECT EXTRACT(MONTH FROM day) AS month, hll_cardinality(hll_union_agg(unique_users)) FROM daily_uniques WHERE day >= '2018-06-23' AND day <= '2018-07-01' -GROUP BY 1 +GROUP BY 1 ORDER BY 1; ERROR: relation "daily_uniques" does not exist -- These are going to be supported after window function support @@ -156,7 +156,7 @@ AS create_topn FROM pg_available_extensions() WHERE name = 'topn' \gset :create_topn; - topn_present + topn_present --------------------------------------------------------------------- f (1 row) @@ -164,51 +164,51 @@ WHERE name = 'topn' CREATE TABLE customer_reviews (day date, user_id int, review int); CREATE TABLE popular_reviewer(day date, reviewers jsonb); SELECT create_distributed_table('customer_reviews', 'user_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('popular_reviewer', 'day'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -INSERT INTO customer_reviews +INSERT INTO customer_reviews SELECT day, user_id % 7, review % 5 FROM generate_series('2018-05-24'::timestamp, '2018-06-24'::timestamp, '1 day'::interval) as f(day), generate_series(1,30) as g(user_id), generate_series(0,30) AS r(review); -INSERT INTO customer_reviews +INSERT INTO customer_reviews SELECT day, user_id % 13, review % 3 - FROM generate_series('2018-06-10'::timestamp, '2018-07-10'::timestamp, '1 day'::interval) as f(day), + FROM generate_series('2018-06-10'::timestamp, '2018-07-10'::timestamp, '1 day'::interval) as f(day), generate_series(1,30) as g(user_id), generate_series(0,30) AS r(review); -- Run topn on raw data SELECT (topn(agg, 10)).* FROM ( - SELECT topn_add_agg(user_id::text) AS agg + SELECT topn_add_agg(user_id::text) AS agg FROM customer_reviews )a ORDER BY 2 DESC, 1; ERROR: function topn_add_agg(text) does not exist HINT: No function matches the given name and argument types. You might need to add explicit type casts. -- Aggregate the data into popular_reviewer -INSERT INTO popular_reviewer +INSERT INTO popular_reviewer SELECT day, topn_add_agg(user_id::text) FROM customer_reviews GROUP BY 1; ERROR: function topn_add_agg(text) does not exist HINT: No function matches the given name and argument types. You might need to add explicit type casts. -- Basic topn check on aggregated data -SELECT day, (topn(reviewers, 10)).* -FROM popular_reviewer -WHERE day >= '2018-06-20' and day <= '2018-06-30' +SELECT day, (topn(reviewers, 10)).* +FROM popular_reviewer +WHERE day >= '2018-06-20' and day <= '2018-06-30' ORDER BY 3 DESC, 1, 2 LIMIT 10; ERROR: function topn(jsonb, integer) does not exist HINT: No function matches the given name and argument types. You might need to add explicit type casts. -- Union aggregated data for one week -SELECT (topn(agg, 10)).* +SELECT (topn(agg, 10)).* FROM ( SELECT topn_union_agg(reviewers) AS agg FROM popular_reviewer @@ -217,7 +217,7 @@ FROM ( ORDER BY 2 DESC, 1; ERROR: function topn_union_agg(jsonb) does not exist HINT: No function matches the given name and argument types. You might need to add explicit type casts. -SELECT month, (topn(agg, 5)).* +SELECT month, (topn(agg, 5)).* FROM ( SELECT EXTRACT(MONTH FROM day) AS month, topn_union_agg(reviewers) AS agg FROM popular_reviewer @@ -230,7 +230,7 @@ ERROR: function topn_union_agg(jsonb) does not exist HINT: No function matches the given name and argument types. You might need to add explicit type casts. -- TODO the following queries will be supported after we fix #2265 -- They work for PG9.6 but not for PG10 -SELECT (topn(topn_union_agg(reviewers), 10)).* +SELECT (topn(topn_union_agg(reviewers), 10)).* FROM popular_reviewer WHERE day >= '2018-05-24'::date AND day <= '2018-05-31'::date ORDER BY 2 DESC, 1; diff --git a/src/test/regress/expected/custom_aggregate_support_1.out b/src/test/regress/expected/custom_aggregate_support_1.out index 499252779..8709a716b 100644 --- a/src/test/regress/expected/custom_aggregate_support_1.out +++ b/src/test/regress/expected/custom_aggregate_support_1.out @@ -14,47 +14,47 @@ SET citus.shard_count TO 4; CREATE TABLE raw_table (day date, user_id int); CREATE TABLE daily_uniques(day date, unique_users hll); SELECT create_distributed_table('raw_table', 'user_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('daily_uniques', 'day'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -INSERT INTO raw_table - SELECT day, user_id % 19 +INSERT INTO raw_table + SELECT day, user_id % 19 FROM generate_series('2018-05-24'::timestamp, '2018-06-24'::timestamp, '1 day'::interval) as f(day), generate_series(1,100) as g(user_id); -INSERT INTO raw_table - SELECT day, user_id % 13 - FROM generate_series('2018-06-10'::timestamp, '2018-07-10'::timestamp, '1 day'::interval) as f(day), +INSERT INTO raw_table + SELECT day, user_id % 13 + FROM generate_series('2018-06-10'::timestamp, '2018-07-10'::timestamp, '1 day'::interval) as f(day), generate_series(1,100) as g(user_id); -- Run hll on raw data -SELECT hll_cardinality(hll_union_agg(agg)) +SELECT hll_cardinality(hll_union_agg(agg)) FROM ( - SELECT hll_add_agg(hll_hash_integer(user_id)) AS agg + SELECT hll_add_agg(hll_hash_integer(user_id)) AS agg FROM raw_table)a; - hll_cardinality + hll_cardinality --------------------------------------------------------------------- 19 (1 row) -- Aggregate the data into daily_uniques -INSERT INTO daily_uniques - SELECT day, hll_add_agg(hll_hash_integer(user_id)) +INSERT INTO daily_uniques + SELECT day, hll_add_agg(hll_hash_integer(user_id)) FROM raw_table GROUP BY 1; -- Basic hll_cardinality check on aggregated data -SELECT day, hll_cardinality(unique_users) -FROM daily_uniques -WHERE day >= '2018-06-20' and day <= '2018-06-30' -ORDER BY 2 DESC,1 +SELECT day, hll_cardinality(unique_users) +FROM daily_uniques +WHERE day >= '2018-06-20' and day <= '2018-06-30' +ORDER BY 2 DESC,1 LIMIT 10; - day | hll_cardinality + day | hll_cardinality --------------------------------------------------------------------- 06-20-2018 | 19 06-21-2018 | 19 @@ -69,10 +69,10 @@ LIMIT 10; (10 rows) -- Union aggregated data for one week -SELECT hll_cardinality(hll_union_agg(unique_users)) -FROM daily_uniques +SELECT hll_cardinality(hll_union_agg(unique_users)) +FROM daily_uniques WHERE day >= '2018-05-24'::date AND day <= '2018-05-31'::date; - hll_cardinality + hll_cardinality --------------------------------------------------------------------- 19 (1 row) @@ -80,9 +80,9 @@ WHERE day >= '2018-05-24'::date AND day <= '2018-05-31'::date; SELECT EXTRACT(MONTH FROM day) AS month, hll_cardinality(hll_union_agg(unique_users)) FROM daily_uniques WHERE day >= '2018-06-23' AND day <= '2018-07-01' -GROUP BY 1 +GROUP BY 1 ORDER BY 1; - month | hll_cardinality + month | hll_cardinality --------------------------------------------------------------------- 6 | 19 7 | 13 @@ -108,7 +108,7 @@ SELECT FROM daily_uniques GROUP BY(1); - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- HashAggregate Group Key: remote_scan.day @@ -144,7 +144,7 @@ SELECT FROM daily_uniques GROUP BY(1); - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- GroupAggregate Group Key: remote_scan.day @@ -183,7 +183,7 @@ SELECT FROM daily_uniques GROUP BY(1); - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- HashAggregate Group Key: remote_scan.day @@ -219,7 +219,7 @@ SELECT FROM daily_uniques GROUP BY(1); - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- GroupAggregate Group Key: remote_scan.day @@ -258,7 +258,7 @@ SELECT FROM daily_uniques GROUP BY(1); - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- HashAggregate Group Key: remote_scan.day @@ -294,7 +294,7 @@ SELECT FROM daily_uniques GROUP BY(1); - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- GroupAggregate Group Key: remote_scan.day @@ -333,7 +333,7 @@ SELECT FROM daily_uniques GROUP BY(1); - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- HashAggregate Group Key: remote_scan.day @@ -370,7 +370,7 @@ FROM daily_uniques GROUP BY(1) HAVING hll_cardinality(hll_union_agg(unique_users)) > 1; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- GroupAggregate Group Key: remote_scan.day @@ -428,33 +428,33 @@ WHERE name = 'topn' CREATE TABLE customer_reviews (day date, user_id int, review int); CREATE TABLE popular_reviewer(day date, reviewers jsonb); SELECT create_distributed_table('customer_reviews', 'user_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('popular_reviewer', 'day'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -INSERT INTO customer_reviews +INSERT INTO customer_reviews SELECT day, user_id % 7, review % 5 FROM generate_series('2018-05-24'::timestamp, '2018-06-24'::timestamp, '1 day'::interval) as f(day), generate_series(1,30) as g(user_id), generate_series(0,30) AS r(review); -INSERT INTO customer_reviews +INSERT INTO customer_reviews SELECT day, user_id % 13, review % 3 - FROM generate_series('2018-06-10'::timestamp, '2018-07-10'::timestamp, '1 day'::interval) as f(day), + FROM generate_series('2018-06-10'::timestamp, '2018-07-10'::timestamp, '1 day'::interval) as f(day), generate_series(1,30) as g(user_id), generate_series(0,30) AS r(review); -- Run topn on raw data SELECT (topn(agg, 10)).* FROM ( - SELECT topn_add_agg(user_id::text) AS agg + SELECT topn_add_agg(user_id::text) AS agg FROM customer_reviews )a ORDER BY 2 DESC, 1; - item | frequency + item | frequency --------------------------------------------------------------------- 1 | 7843 2 | 7843 @@ -469,17 +469,17 @@ ORDER BY 2 DESC, 1; (10 rows) -- Aggregate the data into popular_reviewer -INSERT INTO popular_reviewer +INSERT INTO popular_reviewer SELECT day, topn_add_agg(user_id::text) FROM customer_reviews GROUP BY 1; -- Basic topn check on aggregated data -SELECT day, (topn(reviewers, 10)).* -FROM popular_reviewer -WHERE day >= '2018-06-20' and day <= '2018-06-30' +SELECT day, (topn(reviewers, 10)).* +FROM popular_reviewer +WHERE day >= '2018-06-20' and day <= '2018-06-30' ORDER BY 3 DESC, 1, 2 LIMIT 10; - day | item | frequency + day | item | frequency --------------------------------------------------------------------- 06-20-2018 | 1 | 248 06-20-2018 | 2 | 248 @@ -494,14 +494,14 @@ LIMIT 10; (10 rows) -- Union aggregated data for one week -SELECT (topn(agg, 10)).* +SELECT (topn(agg, 10)).* FROM ( SELECT topn_union_agg(reviewers) AS agg FROM popular_reviewer WHERE day >= '2018-05-24'::date AND day <= '2018-05-31'::date )a ORDER BY 2 DESC, 1; - item | frequency + item | frequency --------------------------------------------------------------------- 1 | 1240 2 | 1240 @@ -512,7 +512,7 @@ ORDER BY 2 DESC, 1; 6 | 992 (7 rows) -SELECT month, (topn(agg, 5)).* +SELECT month, (topn(agg, 5)).* FROM ( SELECT EXTRACT(MONTH FROM day) AS month, topn_union_agg(reviewers) AS agg FROM popular_reviewer @@ -521,7 +521,7 @@ FROM ( ORDER BY 1 )a ORDER BY 1, 3 DESC, 2; - month | item | frequency + month | item | frequency --------------------------------------------------------------------- 6 | 1 | 1054 6 | 2 | 1054 @@ -537,11 +537,11 @@ ORDER BY 1, 3 DESC, 2; -- TODO the following queries will be supported after we fix #2265 -- They work for PG9.6 but not for PG10 -SELECT (topn(topn_union_agg(reviewers), 10)).* +SELECT (topn(topn_union_agg(reviewers), 10)).* FROM popular_reviewer WHERE day >= '2018-05-24'::date AND day <= '2018-05-31'::date ORDER BY 2 DESC, 1; - item | frequency + item | frequency --------------------------------------------------------------------- 1 | 1240 2 | 1240 @@ -555,7 +555,7 @@ ORDER BY 2 DESC, 1; SELECT (topn(topn_add_agg(user_id::text), 10)).* FROM customer_reviews ORDER BY 2 DESC, 1; - item | frequency + item | frequency --------------------------------------------------------------------- 1 | 7843 2 | 7843 diff --git a/src/test/regress/expected/disable_object_propagation.out b/src/test/regress/expected/disable_object_propagation.out index c654de960..78247223c 100644 --- a/src/test/regress/expected/disable_object_propagation.out +++ b/src/test/regress/expected/disable_object_propagation.out @@ -9,9 +9,9 @@ SET search_path TO disabled_object_propagation; -- verify the table gets created, which requires schema distribution to still work CREATE TABLE t1 (a int PRIMARY KEY , b int); SELECT create_distributed_table('t1','a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- verify types are not created, preventing distributed tables to be created unless created manually on the workers @@ -26,16 +26,16 @@ SELECT 1 FROM run_command_on_workers($$ CREATE TYPE disabled_object_propagation.tt1 AS (a int , b int); COMMIT; $$); - ?column? + ?column? --------------------------------------------------------------------- 1 1 (2 rows) SELECT create_distributed_table('t2', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- verify enum types are not created, preventing distributed tables to be created unless created manually on the workers @@ -50,16 +50,16 @@ SELECT 1 FROM run_command_on_workers($$ CREATE TYPE disabled_object_propagation.tt2 AS ENUM ('a', 'b'); COMMIT; $$); - ?column? + ?column? --------------------------------------------------------------------- 1 1 (2 rows) SELECT create_distributed_table('t3', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- verify ALTER TYPE statements are not propagated for types, even though they are marked distributed @@ -69,16 +69,16 @@ SET LOCAL citus.enable_object_propagation TO on; CREATE TYPE tt3 AS (a int, b int); CREATE TABLE t4 (a int PRIMARY KEY, b tt3); SELECT create_distributed_table('t4','a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) DROP TABLE t4; -- as long as the table is using the type some operations are hard to force COMMIT; -- verify the type is distributed SELECT count(*) FROM citus.pg_dist_object WHERE objid = 'disabled_object_propagation.tt3'::regtype::oid; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -97,7 +97,7 @@ SELECT row(nspname, typname, usename) JOIN pg_namespace ON (pg_namespace.oid = typnamespace) WHERE typname = 'tt3'; $$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"(disabled_object_propagation,tt3,postgres)") (localhost,57638,t,"(disabled_object_propagation,tt3,postgres)") @@ -112,7 +112,7 @@ SELECT run_command_on_workers($$ WHERE pg_type.typname = 'tt3' GROUP BY pg_type.typname; $$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"(tt3,""a int4, b int4"")") (localhost,57638,t,"(tt3,""a int4, b int4"")") diff --git a/src/test/regress/expected/distributed_collations.out b/src/test/regress/expected/distributed_collations.out index 0ca0361b4..521147e15 100644 --- a/src/test/regress/expected/distributed_collations.out +++ b/src/test/regress/expected/distributed_collations.out @@ -3,7 +3,7 @@ CREATE USER collationuser; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. SELECT run_command_on_workers($$CREATE USER collationuser;$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") @@ -23,7 +23,7 @@ JOIN pg_namespace nsp ON nsp.oid = c.collnamespace JOIN pg_authid a ON a.oid = c.collowner WHERE collname like 'german_phonebook%' ORDER BY 1,2,3; - collname | nspname | rolname + collname | nspname | rolname --------------------------------------------------------------------- german_phonebook | collation_tests | postgres (1 row) @@ -35,20 +35,20 @@ CREATE TABLE test_propagate(id int, t1 text COLLATE german_phonebook, INSERT INTO test_propagate VALUES (1, 'aesop', U&'\00E4sop'), (2, U&'Vo\1E9Er', 'Vossr'); SELECT create_distributed_table('test_propagate', 'id'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- Test COLLATE is pushed down SELECT * FROM collation_tests.test_propagate WHERE t2 < 'b'; - id | t1 | t2 + id | t1 | t2 --------------------------------------------------------------------- 1 | aesop | äsop (1 row) SELECT * FROM collation_tests.test_propagate WHERE t2 < 'b' COLLATE "C"; - id | t1 | t2 + id | t1 | t2 --------------------------------------------------------------------- 2 | Voẞr | Vossr (1 row) @@ -56,9 +56,9 @@ SELECT * FROM collation_tests.test_propagate WHERE t2 < 'b' COLLATE "C"; -- Test range table with collated distribution column CREATE TABLE test_range(key text COLLATE german_phonebook, val int); SELECT create_distributed_table('test_range', 'key', 'range'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_create_empty_shard('test_range') AS new_shard_id @@ -76,7 +76,7 @@ SET client_min_messages TO debug; SELECT * FROM test_range WHERE key > 'Ab' AND key < U&'\00E4z'; DEBUG: Creating router plan DEBUG: Plan is router executable - key | val + key | val --------------------------------------------------------------------- äsop | 1 (1 row) @@ -88,7 +88,7 @@ JOIN pg_namespace nsp ON nsp.oid = c.collnamespace JOIN pg_authid a ON a.oid = c.collowner WHERE collname like 'german_phonebook%' ORDER BY 1,2,3; - collname | nspname | rolname + collname | nspname | rolname --------------------------------------------------------------------- german_phonebook | collation_tests | postgres german_phonebook_unpropagated | collation_tests | postgres @@ -105,7 +105,7 @@ JOIN pg_namespace nsp ON nsp.oid = c.collnamespace JOIN pg_authid a ON a.oid = c.collowner WHERE collname like 'german_phonebook%' ORDER BY 1,2,3; - collname | nspname | rolname + collname | nspname | rolname --------------------------------------------------------------------- german_phonebook2 | collation_tests2 | collationuser german_phonebook_unpropagated | collation_tests | postgres @@ -127,7 +127,7 @@ DROP SCHEMA collation_tests2 CASCADE; \c - - - :master_port DROP USER collationuser; SELECT run_command_on_workers($$DROP USER collationuser;$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"DROP ROLE") (localhost,57638,t,"DROP ROLE") diff --git a/src/test/regress/expected/distributed_collations_conflict.out b/src/test/regress/expected/distributed_collations_conflict.out index dd0624558..8643ae290 100644 --- a/src/test/regress/expected/distributed_collations_conflict.out +++ b/src/test/regress/expected/distributed_collations_conflict.out @@ -1,6 +1,6 @@ CREATE SCHEMA collation_conflict; SELECT run_command_on_workers($$CREATE SCHEMA collation_conflict;$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"CREATE SCHEMA") (localhost,57638,t,"CREATE SCHEMA") @@ -20,9 +20,9 @@ CREATE COLLATION caseinsensitive ( ); CREATE TABLE tblcoll(val text COLLATE caseinsensitive); SELECT create_reference_table('tblcoll'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) \c - - - :worker_1_port @@ -32,7 +32,7 @@ JOIN pg_namespace nsp ON nsp.oid = c.collnamespace JOIN pg_authid a ON a.oid = c.collowner WHERE collname like 'caseinsensitive%' ORDER BY 1,2,3; - collname | nspname | rolname + collname | nspname | rolname --------------------------------------------------------------------- caseinsensitive | collation_conflict | postgres (1 row) @@ -58,9 +58,9 @@ CREATE COLLATION caseinsensitive ( ); CREATE TABLE tblcoll(val text COLLATE caseinsensitive); SELECT create_reference_table('tblcoll'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) \c - - - :worker_1_port @@ -70,7 +70,7 @@ JOIN pg_namespace nsp ON nsp.oid = c.collnamespace JOIN pg_authid a ON a.oid = c.collowner WHERE collname like 'caseinsensitive%' ORDER BY 1,2,3; - collname | nspname | rolname + collname | nspname | rolname --------------------------------------------------------------------- caseinsensitive | collation_conflict | postgres caseinsensitive(citus_backup_0) | collation_conflict | postgres @@ -80,13 +80,13 @@ ORDER BY 1,2,3; SET search_path TO collation_conflict; -- now test worker_create_or_replace_object directly SELECT worker_create_or_replace_object($$CREATE COLLATION collation_conflict.caseinsensitive (provider = 'icu', lc_collate = 'und-u-ks-level2', lc_ctype = 'und-u-ks-level2')$$); - worker_create_or_replace_object + worker_create_or_replace_object --------------------------------------------------------------------- f (1 row) SELECT worker_create_or_replace_object($$CREATE COLLATION collation_conflict.caseinsensitive (provider = 'icu', lc_collate = 'und-u-ks-level2', lc_ctype = 'und-u-ks-level2')$$); - worker_create_or_replace_object + worker_create_or_replace_object --------------------------------------------------------------------- f (1 row) diff --git a/src/test/regress/expected/distributed_functions.out b/src/test/regress/expected/distributed_functions.out index 6693c73dc..6e63d15a2 100644 --- a/src/test/regress/expected/distributed_functions.out +++ b/src/test/regress/expected/distributed_functions.out @@ -3,7 +3,7 @@ CREATE USER functionuser; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. SELECT run_command_on_workers($$CREATE USER functionuser;$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") @@ -130,9 +130,9 @@ CREATE TABLE statement_table(id int2); SET citus.replication_model TO 'statement'; SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('statement_table','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- create a table uses streaming-based replication (can be synced) @@ -140,15 +140,15 @@ CREATE TABLE streaming_table(id int); SET citus.replication_model TO 'streaming'; SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('streaming_table','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- make sure that none of the active and primary nodes hasmetadata -- at the start of the test select bool_or(hasmetadata) from pg_dist_node WHERE isactive AND noderole = 'primary'; - bool_or + bool_or --------------------------------------------------------------------- f (1 row) @@ -156,21 +156,21 @@ select bool_or(hasmetadata) from pg_dist_node WHERE isactive AND noderole = 'pr -- if not paremeters are supplied, we'd see that function doesn't have -- distribution_argument_index and colocationid SELECT create_distributed_function('"add_mi''xed_param_names"(int, int)'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) SELECT distribution_argument_index is NULL, colocationid is NULL from citus.pg_dist_object WHERE objid = 'add_mi''xed_param_names(int, int)'::regprocedure; - ?column? | ?column? + ?column? | ?column? --------------------------------------------------------------------- t | t (1 row) -- also show that we can use the function SELECT * FROM run_command_on_workers('SELECT function_tests."add_mi''xed_param_names"(2,3);') ORDER BY 1,2; - nodename | nodeport | success | result + nodename | nodeport | success | result --------------------------------------------------------------------- localhost | 57637 | t | 5 localhost | 57638 | t | 5 @@ -179,7 +179,7 @@ SELECT * FROM run_command_on_workers('SELECT function_tests."add_mi''xed_param_n -- make sure that none of the active and primary nodes hasmetadata -- since the function doesn't have a parameter select bool_or(hasmetadata) from pg_dist_node WHERE isactive AND noderole = 'primary'; - bool_or + bool_or --------------------------------------------------------------------- f (1 row) @@ -202,54 +202,54 @@ HINT: Set citus.replication_model to 'streaming' before creating distributed ta END; -- try to co-locate with a table that uses streaming replication SELECT create_distributed_function('dup(int)', '$1', colocate_with := 'streaming_table'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) SELECT * FROM run_command_on_workers('SELECT function_tests.dup(42);') ORDER BY 1,2; - nodename | nodeport | success | result + nodename | nodeport | success | result --------------------------------------------------------------------- localhost | 57637 | t | (42,"42 is text") localhost | 57638 | t | (42,"42 is text") (2 rows) SELECT create_distributed_function('add(int,int)', '$1', colocate_with := 'streaming_table'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) SELECT * FROM run_command_on_workers('SELECT function_tests.add(2,3);') ORDER BY 1,2; - nodename | nodeport | success | result + nodename | nodeport | success | result --------------------------------------------------------------------- localhost | 57637 | t | 5 localhost | 57638 | t | 5 (2 rows) SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); - verify_function_is_same_on_workers + verify_function_is_same_on_workers --------------------------------------------------------------------- t (1 row) -- distribute aggregate SELECT create_distributed_function('sum2(int)'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_function('my_rank("any")'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_function('agg_names(dup_result,dup_result)'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) -- testing alter statements for a distributed function @@ -257,21 +257,21 @@ SELECT create_distributed_function('agg_names(dup_result,dup_result)'); -- ERROR: ROWS is not applicable when function does not return a set ALTER FUNCTION add(int,int) CALLED ON NULL INPUT IMMUTABLE SECURITY INVOKER PARALLEL UNSAFE LEAKPROOF COST 5; SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); - verify_function_is_same_on_workers + verify_function_is_same_on_workers --------------------------------------------------------------------- t (1 row) ALTER FUNCTION add(int,int) RETURNS NULL ON NULL INPUT STABLE SECURITY DEFINER PARALLEL RESTRICTED; SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); - verify_function_is_same_on_workers + verify_function_is_same_on_workers --------------------------------------------------------------------- t (1 row) ALTER FUNCTION add(int,int) STRICT VOLATILE PARALLEL SAFE; SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); - verify_function_is_same_on_workers + verify_function_is_same_on_workers --------------------------------------------------------------------- t (1 row) @@ -279,49 +279,49 @@ SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); -- Test SET/RESET for alter function ALTER FUNCTION add(int,int) SET client_min_messages TO warning; SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); - verify_function_is_same_on_workers + verify_function_is_same_on_workers --------------------------------------------------------------------- t (1 row) ALTER FUNCTION add(int,int) SET client_min_messages TO error; SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); - verify_function_is_same_on_workers + verify_function_is_same_on_workers --------------------------------------------------------------------- t (1 row) ALTER FUNCTION add(int,int) SET client_min_messages TO debug; SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); - verify_function_is_same_on_workers + verify_function_is_same_on_workers --------------------------------------------------------------------- t (1 row) ALTER FUNCTION add(int,int) RESET client_min_messages; SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); - verify_function_is_same_on_workers + verify_function_is_same_on_workers --------------------------------------------------------------------- t (1 row) ALTER FUNCTION add(int,int) SET "citus.setting;'" TO 'hello '' world'; SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); - verify_function_is_same_on_workers + verify_function_is_same_on_workers --------------------------------------------------------------------- t (1 row) ALTER FUNCTION add(int,int) RESET "citus.setting;'"; SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); - verify_function_is_same_on_workers + verify_function_is_same_on_workers --------------------------------------------------------------------- t (1 row) ALTER FUNCTION add(int,int) SET search_path TO 'sch'';ma', public; SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); - verify_function_is_same_on_workers + verify_function_is_same_on_workers --------------------------------------------------------------------- t (1 row) @@ -332,7 +332,7 @@ ALTER FUNCTION add(int,int) SET client_min_messages FROM CURRENT; ERROR: unsupported ALTER FUNCTION ... SET ... FROM CURRENT for a distributed function HINT: SET FROM CURRENT is not supported for distributed functions, instead use the SET ... TO ... syntax with a constant value. SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); - verify_function_is_same_on_workers + verify_function_is_same_on_workers --------------------------------------------------------------------- t (1 row) @@ -341,7 +341,7 @@ ALTER FUNCTION add(int,int) RETURNS NULL ON NULL INPUT SET client_min_messages F ERROR: unsupported ALTER FUNCTION ... SET ... FROM CURRENT for a distributed function HINT: SET FROM CURRENT is not supported for distributed functions, instead use the SET ... TO ... syntax with a constant value. SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); - verify_function_is_same_on_workers + verify_function_is_same_on_workers --------------------------------------------------------------------- t (1 row) @@ -350,7 +350,7 @@ ALTER FUNCTION add(int,int) SET client_min_messages FROM CURRENT SECURITY DEFINE ERROR: unsupported ALTER FUNCTION ... SET ... FROM CURRENT for a distributed function HINT: SET FROM CURRENT is not supported for distributed functions, instead use the SET ... TO ... syntax with a constant value. SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); - verify_function_is_same_on_workers + verify_function_is_same_on_workers --------------------------------------------------------------------- t (1 row) @@ -358,20 +358,20 @@ SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); -- rename function and make sure the new name can be used on the workers while the old name can't ALTER FUNCTION add(int,int) RENAME TO add2; SELECT public.verify_function_is_same_on_workers('function_tests.add2(int,int)'); - verify_function_is_same_on_workers + verify_function_is_same_on_workers --------------------------------------------------------------------- t (1 row) SELECT * FROM run_command_on_workers('SELECT function_tests.add(2,3);') ORDER BY 1,2; - nodename | nodeport | success | result + nodename | nodeport | success | result --------------------------------------------------------------------- localhost | 57637 | f | ERROR: function function_tests.add(integer, integer) does not exist localhost | 57638 | f | ERROR: function function_tests.add(integer, integer) does not exist (2 rows) SELECT * FROM run_command_on_workers('SELECT function_tests.add2(2,3);') ORDER BY 1,2; - nodename | nodeport | success | result + nodename | nodeport | success | result --------------------------------------------------------------------- localhost | 57637 | t | 5 localhost | 57638 | t | 5 @@ -380,7 +380,7 @@ SELECT * FROM run_command_on_workers('SELECT function_tests.add2(2,3);') ORDER B ALTER FUNCTION add2(int,int) RENAME TO add; ALTER AGGREGATE sum2(int) RENAME TO sum27; SELECT * FROM run_command_on_workers($$SELECT 1 from pg_proc where proname = 'sum27';$$) ORDER BY 1,2; - nodename | nodeport | success | result + nodename | nodeport | success | result --------------------------------------------------------------------- localhost | 57637 | t | 1 localhost | 57638 | t | 1 @@ -390,7 +390,7 @@ ALTER AGGREGATE sum27(int) RENAME TO sum2; -- change the owner of the function and verify the owner has been changed on the workers ALTER FUNCTION add(int,int) OWNER TO functionuser; SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); - verify_function_is_same_on_workers + verify_function_is_same_on_workers --------------------------------------------------------------------- t (1 row) @@ -403,7 +403,7 @@ JOIN pg_user ON (usesysid = proowner) JOIN pg_namespace ON (pg_namespace.oid = pronamespace and nspname = 'function_tests') WHERE proname = 'add'; $$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"(functionuser,function_tests,add)") (localhost,57638,t,"(functionuser,function_tests,add)") @@ -416,7 +416,7 @@ JOIN pg_user ON (usesysid = proowner) JOIN pg_namespace ON (pg_namespace.oid = pronamespace and nspname = 'function_tests') WHERE proname = 'sum2'; $$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"(functionuser,function_tests,sum2)") (localhost,57638,t,"(functionuser,function_tests,sum2)") @@ -426,20 +426,20 @@ $$); -- the new schema has the function. ALTER FUNCTION add(int,int) SET SCHEMA function_tests2; SELECT public.verify_function_is_same_on_workers('function_tests2.add(int,int)'); - verify_function_is_same_on_workers + verify_function_is_same_on_workers --------------------------------------------------------------------- t (1 row) SELECT * FROM run_command_on_workers('SELECT function_tests.add(2,3);') ORDER BY 1,2; - nodename | nodeport | success | result + nodename | nodeport | success | result --------------------------------------------------------------------- localhost | 57637 | f | ERROR: function function_tests.add(integer, integer) does not exist localhost | 57638 | f | ERROR: function function_tests.add(integer, integer) does not exist (2 rows) SELECT * FROM run_command_on_workers('SELECT function_tests2.add(2,3);') ORDER BY 1,2; - nodename | nodeport | success | result + nodename | nodeport | success | result --------------------------------------------------------------------- localhost | 57637 | t | 5 localhost | 57638 | t | 5 @@ -454,13 +454,13 @@ AS 'select $1 * $2;' -- I know, this is not an add, but the output will tell us IMMUTABLE RETURNS NULL ON NULL INPUT; SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)'); - verify_function_is_same_on_workers + verify_function_is_same_on_workers --------------------------------------------------------------------- t (1 row) SELECT * FROM run_command_on_workers('SELECT function_tests.add(2,3);') ORDER BY 1,2; - nodename | nodeport | success | result + nodename | nodeport | success | result --------------------------------------------------------------------- localhost | 57637 | t | 6 localhost | 57638 | t | 6 @@ -477,7 +477,7 @@ DETAIL: Function "pg_catalog.citus_drop_trigger()" has a dependency on extensio DROP FUNCTION add(int,int); -- call should fail as function should have been dropped SELECT * FROM run_command_on_workers('SELECT function_tests.add(2,3);') ORDER BY 1,2; - nodename | nodeport | success | result + nodename | nodeport | success | result --------------------------------------------------------------------- localhost | 57637 | f | ERROR: function function_tests.add(integer, integer) does not exist localhost | 57638 | f | ERROR: function function_tests.add(integer, integer) does not exist @@ -486,7 +486,7 @@ SELECT * FROM run_command_on_workers('SELECT function_tests.add(2,3);') ORDER BY DROP AGGREGATE function_tests2.sum2(int); -- call should fail as aggregate should have been dropped SELECT * FROM run_command_on_workers('SELECT function_tests2.sum2(id) FROM (select 1 id, 2) subq;') ORDER BY 1,2; - nodename | nodeport | success | result + nodename | nodeport | success | result --------------------------------------------------------------------- localhost | 57637 | f | ERROR: function function_tests2.sum2(integer) does not exist localhost | 57638 | f | ERROR: function function_tests2.sum2(integer) does not exist @@ -498,10 +498,10 @@ ERROR: syntax error at or near "int" CONTEXT: invalid type name "val1 int" -- invalid distribution_arg_name SELECT create_distributed_function('add_with_param_names(int, int)', distribution_arg_name:='test'); -ERROR: cannot distribute the function "add_with_param_names" since the distribution argument is not valid +ERROR: cannot distribute the function "add_with_param_names" since the distribution argument is not valid HINT: Either provide a valid function argument name or a valid "$paramIndex" to create_distributed_function() SELECT create_distributed_function('add_with_param_names(int, int)', distribution_arg_name:='int'); -ERROR: cannot distribute the function "add_with_param_names" since the distribution argument is not valid +ERROR: cannot distribute the function "add_with_param_names" since the distribution argument is not valid HINT: Either provide a valid function argument name or a valid "$paramIndex" to create_distributed_function() -- invalid distribution_arg_index SELECT create_distributed_function('add_with_param_names(int, int)', '$0'); @@ -520,7 +520,7 @@ SELECT create_distributed_function('add_with_param_names(int, int)', '$1a'); ERROR: invalid input syntax for integer: "1a" -- non existing column name SELECT create_distributed_function('add_with_param_names(int, int)', 'aaa'); -ERROR: cannot distribute the function "add_with_param_names" since the distribution argument is not valid +ERROR: cannot distribute the function "add_with_param_names" since the distribution argument is not valid HINT: Either provide a valid function argument name or a valid "$paramIndex" to create_distributed_function() -- NULL function SELECT create_distributed_function(NULL); @@ -532,21 +532,21 @@ ERROR: colocate_with parameter should not be NULL HINT: To use the default value, set colocate_with option to "default" -- empty string distribution_arg_index SELECT create_distributed_function('add_with_param_names(int, int)', ''); -ERROR: cannot distribute the function "add_with_param_names" since the distribution argument is not valid +ERROR: cannot distribute the function "add_with_param_names" since the distribution argument is not valid HINT: Either provide a valid function argument name or a valid "$paramIndex" to create_distributed_function() -- The first distributed function syncs the metadata to nodes -- and metadata syncing is not supported within transaction blocks BEGIN; SELECT create_distributed_function('add_with_param_names(int, int)', distribution_arg_name:='val1'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) ROLLBACK; -- make sure that none of the nodes have the function because we've rollbacked SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add_with_param_names';$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,0) (localhost,57638,t,0) @@ -554,28 +554,28 @@ SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add_ -- make sure that none of the active and primary nodes hasmetadata select bool_or(hasmetadata) from pg_dist_node WHERE isactive AND noderole = 'primary'; - bool_or + bool_or --------------------------------------------------------------------- t (1 row) -- valid distribution with distribution_arg_name SELECT create_distributed_function('add_with_param_names(int, int)', distribution_arg_name:='val1'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) -- make sure that the primary nodes are now metadata synced select bool_and(hasmetadata) from pg_dist_node WHERE isactive AND noderole = 'primary'; - bool_and + bool_and --------------------------------------------------------------------- t (1 row) -- make sure that both of the nodes have the function because we've succeeded SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add_with_param_names';$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,1) (localhost,57638,t,1) @@ -583,16 +583,16 @@ SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add_ -- valid distribution with distribution_arg_name -- case insensitive SELECT create_distributed_function('add_with_param_names(int, int)', distribution_arg_name:='VaL1'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) -- valid distribution with distribution_arg_index SELECT create_distributed_function('add_with_param_names(int, int)','$1'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) -- a function cannot be colocated with a table that is not "streaming" replicated @@ -600,9 +600,9 @@ SET citus.shard_replication_factor TO 2; CREATE TABLE replicated_table_func_test (a int); SET citus.replication_model TO "statement"; SELECT create_distributed_table('replicated_table_func_test', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_function('add_with_param_names(int, int)', '$1', colocate_with:='replicated_table_func_test'); @@ -610,9 +610,9 @@ ERROR: cannot colocate function "add_with_param_names" and table "replicated_ta DETAIL: Citus currently only supports colocating function with distributed tables that are created using streaming replication model. HINT: When distributing tables make sure that citus.replication_model = 'streaming' SELECT public.wait_until_metadata_sync(); - wait_until_metadata_sync + wait_until_metadata_sync --------------------------------------------------------------------- - + (1 row) -- a function can be colocated with a different distribution argument type @@ -621,20 +621,20 @@ SET citus.shard_replication_factor TO 1; CREATE TABLE replicated_table_func_test_2 (a bigint); SET citus.replication_model TO "streaming"; SELECT create_distributed_table('replicated_table_func_test_2', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_function('add_with_param_names(int, int)', 'val1', colocate_with:='replicated_table_func_test_2'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) -- colocate_with cannot be used without distribution key SELECT create_distributed_function('add_with_param_names(int, int)', colocate_with:='replicated_table_func_test_2'); -ERROR: cannot distribute the function "add_with_param_names" since the distribution argument is not valid +ERROR: cannot distribute the function "add_with_param_names" since the distribution argument is not valid HINT: To provide "colocate_with" option, the distribution argument parameter should also be provided -- a function cannot be colocated with a local table CREATE TABLE replicated_table_func_test_3 (a bigint); @@ -642,9 +642,9 @@ SELECT create_distributed_function('add_with_param_names(int, int)', 'val1', col ERROR: relation replicated_table_func_test_3 is not distributed -- a function cannot be colocated with a reference table SELECT create_reference_table('replicated_table_func_test_3'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_function('add_with_param_names(int, int)', 'val1', colocate_with:='replicated_table_func_test_3'); @@ -654,15 +654,15 @@ SET citus.shard_replication_factor TO 1; CREATE TABLE replicated_table_func_test_4 (a int); SET citus.replication_model TO "streaming"; SELECT create_distributed_table('replicated_table_func_test_4', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_function('add_with_param_names(int, int)', '$1', colocate_with:='replicated_table_func_test_4'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) -- show that the colocationIds are the same @@ -670,7 +670,7 @@ SELECT pg_dist_partition.colocationid = objects.colocationid as table_and_functi FROM pg_dist_partition, citus.pg_dist_object as objects WHERE pg_dist_partition.logicalrelid = 'replicated_table_func_test_4'::regclass AND objects.objid = 'add_with_param_names(int, int)'::regprocedure; - table_and_function_colocated + table_and_function_colocated --------------------------------------------------------------------- t (1 row) @@ -678,16 +678,16 @@ WHERE pg_dist_partition.logicalrelid = 'replicated_table_func_test_4'::regclass -- now, re-distributed with the default colocation option, we should still see that the same colocation -- group preserved, because we're using the default shard creation settings SELECT create_distributed_function('add_with_param_names(int, int)', 'val1'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) SELECT pg_dist_partition.colocationid = objects.colocationid as table_and_function_colocated FROM pg_dist_partition, citus.pg_dist_object as objects WHERE pg_dist_partition.logicalrelid = 'replicated_table_func_test_4'::regclass AND objects.objid = 'add_with_param_names(int, int)'::regprocedure; - table_and_function_colocated + table_and_function_colocated --------------------------------------------------------------------- t (1 row) @@ -697,31 +697,31 @@ WHERE pg_dist_partition.logicalrelid = 'replicated_table_func_test_4'::regclass -- path, we rely on postgres for implicit coersions, and users for explicit coersions -- to coerce the values SELECT create_distributed_function('add_numeric(numeric, numeric)', '$1', colocate_with:='replicated_table_func_test_4'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) SELECT pg_dist_partition.colocationid = objects.colocationid as table_and_function_colocated FROM pg_dist_partition, citus.pg_dist_object as objects WHERE pg_dist_partition.logicalrelid = 'replicated_table_func_test_4'::regclass AND objects.objid = 'add_numeric(numeric, numeric)'::regprocedure; - table_and_function_colocated + table_and_function_colocated --------------------------------------------------------------------- t (1 row) SELECT create_distributed_function('add_text(text, text)', '$1', colocate_with:='replicated_table_func_test_4'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) SELECT pg_dist_partition.colocationid = objects.colocationid as table_and_function_colocated FROM pg_dist_partition, citus.pg_dist_object as objects WHERE pg_dist_partition.logicalrelid = 'replicated_table_func_test_4'::regclass AND objects.objid = 'add_text(text, text)'::regprocedure; - table_and_function_colocated + table_and_function_colocated --------------------------------------------------------------------- t (1 row) @@ -738,18 +738,18 @@ ERROR: cannot distribute the function "add_with_param_names" since there is no HINT: Provide a distributed table via "colocate_with" option to create_distributed_function() -- sync metadata to workers for consistent results when clearing objects SELECT public.wait_until_metadata_sync(); - wait_until_metadata_sync + wait_until_metadata_sync --------------------------------------------------------------------- - + (1 row) SET citus.shard_replication_factor TO 1; SET citus.shard_count TO 4; CREATE TABLE test (id int, name text); SELECT create_distributed_table('test','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO test VALUES (3,'three'); @@ -762,9 +762,9 @@ BEGIN END; $$ LANGUAGE plpgsql; SELECT create_distributed_function('increment(int)', '$1', colocate_with := 'test'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) -- call a distributed function inside a pl/pgsql function @@ -777,24 +777,24 @@ BEGIN END; $$ LANGUAGE plpgsql; SELECT test_func_calls_dist_func(); - test_func_calls_dist_func + test_func_calls_dist_func --------------------------------------------------------------------- - + (1 row) SELECT test_func_calls_dist_func(); - test_func_calls_dist_func + test_func_calls_dist_func --------------------------------------------------------------------- - + (1 row) -- test an INSERT..SELECT via the coordinator just because it is kind of funky INSERT INTO test SELECT increment(3); SELECT * FROM test ORDER BY id; - id | name + id | name --------------------------------------------------------------------- 3 | three - 4 | + 4 | (2 rows) DROP TABLE test; @@ -803,10 +803,10 @@ DROP SCHEMA function_tests CASCADE; DROP SCHEMA function_tests2 CASCADE; -- clear objects SELECT stop_metadata_sync_to_node(nodename,nodeport) FROM pg_dist_node WHERE isactive AND noderole = 'primary'; - stop_metadata_sync_to_node + stop_metadata_sync_to_node --------------------------------------------------------------------- - - + + (2 rows) -- This is hacky, but we should clean-up the resources as below @@ -826,7 +826,7 @@ DROP SCHEMA function_tests2 CASCADE; \c - - - :master_port DROP USER functionuser; SELECT run_command_on_workers($$DROP USER functionuser$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"DROP ROLE") (localhost,57638,t,"DROP ROLE") diff --git a/src/test/regress/expected/distributed_functions_conflict.out b/src/test/regress/expected/distributed_functions_conflict.out index 73c503c0b..995668e64 100644 --- a/src/test/regress/expected/distributed_functions_conflict.out +++ b/src/test/regress/expected/distributed_functions_conflict.out @@ -2,7 +2,7 @@ -- Note in PG12 we use CREATE OR REPLACE AGGREGATE, thus the renaming does not occur CREATE SCHEMA proc_conflict; SELECT run_command_on_workers($$CREATE SCHEMA proc_conflict;$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"CREATE SCHEMA") (localhost,57638,t,"CREATE SCHEMA") @@ -31,9 +31,9 @@ CREATE AGGREGATE existing_agg(int) ( STYPE = int ); SELECT create_distributed_function('existing_agg(int)'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) \c - - - :worker_1_port @@ -44,7 +44,7 @@ WITH data (val) AS ( union all select 6 ) SELECT existing_agg(val) FROM data; - existing_agg + existing_agg --------------------------------------------------------------------- 78 (1 row) @@ -57,7 +57,7 @@ WITH data (val) AS ( union all select 6 ) SELECT existing_agg(val) FROM data; - existing_agg + existing_agg --------------------------------------------------------------------- 78 (1 row) @@ -90,9 +90,9 @@ CREATE AGGREGATE existing_agg(int) ( STYPE = int ); SELECT create_distributed_function('existing_agg(int)'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) \c - - - :worker_1_port @@ -103,7 +103,7 @@ WITH data (val) AS ( union all select 6 ) SELECT existing_agg(val) FROM data; - existing_agg + existing_agg --------------------------------------------------------------------- 76 (1 row) @@ -116,7 +116,7 @@ WITH data (val) AS ( union all select 6 ) SELECT existing_agg(val) FROM data; - existing_agg + existing_agg --------------------------------------------------------------------- 76 (1 row) @@ -128,13 +128,13 @@ BEGIN END; $$ LANGUAGE plpgsql STRICT IMMUTABLE; SELECT worker_create_or_replace_object('CREATE AGGREGATE proc_conflict.existing_agg(integer) (STYPE = integer,SFUNC = proc_conflict.existing_func2)'); - worker_create_or_replace_object + worker_create_or_replace_object --------------------------------------------------------------------- t (1 row) SELECT worker_create_or_replace_object('CREATE AGGREGATE proc_conflict.existing_agg(integer) (STYPE = integer,SFUNC = proc_conflict.existing_func2)'); - worker_create_or_replace_object + worker_create_or_replace_object --------------------------------------------------------------------- f (1 row) diff --git a/src/test/regress/expected/distributed_procedure.out b/src/test/regress/expected/distributed_procedure.out index 6e0f572d0..d819c4294 100644 --- a/src/test/regress/expected/distributed_procedure.out +++ b/src/test/regress/expected/distributed_procedure.out @@ -3,7 +3,7 @@ CREATE USER procedureuser; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. SELECT run_command_on_workers($$CREATE USER procedureuser;$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") @@ -24,7 +24,7 @@ $proc$; ALTER SYSTEM SET citus.metadata_sync_interval TO 3000; ALTER SYSTEM SET citus.metadata_sync_retry_interval TO 500; SELECT pg_reload_conf(); - pg_reload_conf + pg_reload_conf --------------------------------------------------------------------- t (1 row) @@ -38,32 +38,32 @@ CREATE TABLE colocation_table(id text); SET citus.replication_model TO 'streaming'; SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('colocation_table','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_function('raise_info(text)', '$1', colocate_with := 'colocation_table'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) SELECT wait_until_metadata_sync(); - wait_until_metadata_sync + wait_until_metadata_sync --------------------------------------------------------------------- - + (1 row) SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info('hello');$$) ORDER BY 1,2; - nodename | nodeport | success | result + nodename | nodeport | success | result --------------------------------------------------------------------- localhost | 57637 | t | CALL localhost | 57638 | t | CALL (2 rows) SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(text)'); - verify_function_is_same_on_workers + verify_function_is_same_on_workers --------------------------------------------------------------------- t (1 row) @@ -73,14 +73,14 @@ SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(tex -- ERROR: ROWS is not applicable when function does not return a set ALTER PROCEDURE raise_info(text) SECURITY INVOKER; SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(text)'); - verify_function_is_same_on_workers + verify_function_is_same_on_workers --------------------------------------------------------------------- t (1 row) ALTER PROCEDURE raise_info(text) SECURITY DEFINER; SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(text)'); - verify_function_is_same_on_workers + verify_function_is_same_on_workers --------------------------------------------------------------------- t (1 row) @@ -88,28 +88,28 @@ SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(tex -- Test SET/RESET for alter procedure ALTER PROCEDURE raise_info(text) SET client_min_messages TO warning; SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(text)'); - verify_function_is_same_on_workers + verify_function_is_same_on_workers --------------------------------------------------------------------- t (1 row) ALTER PROCEDURE raise_info(text) SET client_min_messages TO error; SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(text)'); - verify_function_is_same_on_workers + verify_function_is_same_on_workers --------------------------------------------------------------------- t (1 row) ALTER PROCEDURE raise_info(text) SET client_min_messages TO debug; SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(text)'); - verify_function_is_same_on_workers + verify_function_is_same_on_workers --------------------------------------------------------------------- t (1 row) ALTER PROCEDURE raise_info(text) RESET client_min_messages; SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(text)'); - verify_function_is_same_on_workers + verify_function_is_same_on_workers --------------------------------------------------------------------- t (1 row) @@ -117,20 +117,20 @@ SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(tex -- rename function and make sure the new name can be used on the workers while the old name can't ALTER PROCEDURE raise_info(text) RENAME TO raise_info2; SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info2(text)'); - verify_function_is_same_on_workers + verify_function_is_same_on_workers --------------------------------------------------------------------- t (1 row) SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info('hello');$$) ORDER BY 1,2; - nodename | nodeport | success | result + nodename | nodeport | success | result --------------------------------------------------------------------- localhost | 57637 | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist localhost | 57638 | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist (2 rows) SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info2('hello');$$) ORDER BY 1,2; - nodename | nodeport | success | result + nodename | nodeport | success | result --------------------------------------------------------------------- localhost | 57637 | t | CALL localhost | 57638 | t | CALL @@ -140,7 +140,7 @@ ALTER PROCEDURE raise_info2(text) RENAME TO raise_info; -- change the owner of the function and verify the owner has been changed on the workers ALTER PROCEDURE raise_info(text) OWNER TO procedureuser; SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(text)'); - verify_function_is_same_on_workers + verify_function_is_same_on_workers --------------------------------------------------------------------- t (1 row) @@ -152,7 +152,7 @@ JOIN pg_user ON (usesysid = proowner) JOIN pg_namespace ON (pg_namespace.oid = pronamespace) WHERE proname = 'raise_info'; $$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"(procedureuser,procedure_tests,raise_info)") (localhost,57638,t,"(procedureuser,procedure_tests,raise_info)") @@ -162,20 +162,20 @@ $$); -- the new schema has the function. ALTER PROCEDURE raise_info(text) SET SCHEMA procedure_tests2; SELECT public.verify_function_is_same_on_workers('procedure_tests2.raise_info(text)'); - verify_function_is_same_on_workers + verify_function_is_same_on_workers --------------------------------------------------------------------- t (1 row) SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info('hello');$$) ORDER BY 1,2; - nodename | nodeport | success | result + nodename | nodeport | success | result --------------------------------------------------------------------- localhost | 57637 | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist localhost | 57638 | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist (2 rows) SELECT * FROM run_command_on_workers($$CALL procedure_tests2.raise_info('hello');$$) ORDER BY 1,2; - nodename | nodeport | success | result + nodename | nodeport | success | result --------------------------------------------------------------------- localhost | 57637 | t | CALL localhost | 57638 | t | CALL @@ -185,7 +185,7 @@ ALTER PROCEDURE procedure_tests2.raise_info(text) SET SCHEMA procedure_tests; DROP PROCEDURE raise_info(text); -- call should fail as procedure should have been dropped SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info('hello');$$) ORDER BY 1,2; - nodename | nodeport | success | result + nodename | nodeport | success | result --------------------------------------------------------------------- localhost | 57637 | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist localhost | 57638 | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist @@ -194,7 +194,7 @@ SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info('hello'); SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA procedure_tests CASCADE; SELECT run_command_on_workers($$DROP SCHEMA procedure_tests CASCADE;$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"DROP SCHEMA") (localhost,57638,t,"DROP SCHEMA") @@ -202,7 +202,7 @@ SELECT run_command_on_workers($$DROP SCHEMA procedure_tests CASCADE;$$); DROP SCHEMA procedure_tests2 CASCADE; SELECT run_command_on_workers($$DROP SCHEMA procedure_tests2 CASCADE;$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"DROP SCHEMA") (localhost,57638,t,"DROP SCHEMA") @@ -210,7 +210,7 @@ SELECT run_command_on_workers($$DROP SCHEMA procedure_tests2 CASCADE;$$); DROP USER procedureuser; SELECT run_command_on_workers($$DROP USER procedureuser;$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"DROP ROLE") (localhost,57638,t,"DROP ROLE") diff --git a/src/test/regress/expected/distributed_types.out b/src/test/regress/expected/distributed_types.out index ccdc05328..33957dfb4 100644 --- a/src/test/regress/expected/distributed_types.out +++ b/src/test/regress/expected/distributed_types.out @@ -3,7 +3,7 @@ CREATE USER typeuser; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. SELECT run_command_on_workers($$CREATE USER typeuser;$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") @@ -17,14 +17,14 @@ SET citus.shard_count TO 4; CREATE TYPE tc1 AS (a int, b int); CREATE TABLE t1 (a int PRIMARY KEY, b tc1); SELECT create_distributed_table('t1','a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO t1 VALUES (1, (2,3)::tc1); SELECT * FROM t1; - a | b + a | b --------------------------------------------------------------------- 1 | (2,3) (1 row) @@ -37,14 +37,14 @@ INSERT INTO t1 VALUES (6, (7,8)::type_tests2.tc1_newname); -- insert with a cast CREATE TYPE te1 AS ENUM ('one', 'two', 'three'); CREATE TABLE t2 (a int PRIMARY KEY, b te1); SELECT create_distributed_table('t2','a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO t2 VALUES (1, 'two'); SELECT * FROM t2; - a | b + a | b --------------------------------------------------------------------- 1 | two (1 row) @@ -55,7 +55,7 @@ ALTER TYPE te1 RENAME TO te1_newname; ALTER TYPE te1_newname ADD VALUE 'four'; UPDATE t2 SET b = 'four'; SELECT * FROM t2; - a | b + a | b --------------------------------------------------------------------- 1 | four (1 row) @@ -68,14 +68,14 @@ BEGIN; CREATE TYPE tc2 AS (a int, b int); CREATE TABLE t3 (a int PRIMARY KEY, b tc2); SELECT create_distributed_table('t3','a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO t3 VALUES (4, (5,6)::tc2); SELECT * FROM t3; - a | b + a | b --------------------------------------------------------------------- 4 | (5,6) (1 row) @@ -86,14 +86,14 @@ BEGIN; CREATE TYPE te2 AS ENUM ('yes', 'no'); CREATE TABLE t4 (a int PRIMARY KEY, b te2); SELECT create_distributed_table('t4','a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO t4 VALUES (1, 'yes'); SELECT * FROM t4; - a | b + a | b --------------------------------------------------------------------- 1 | yes (1 row) @@ -102,13 +102,13 @@ SELECT * FROM t4; COMMIT; -- verify order of enum labels SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'type_tests.te2'::regtype; - string_agg + string_agg --------------------------------------------------------------------- yes,no (1 row) SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'type_tests.te2'::regtype;$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"yes,no") (localhost,57638,t,"yes,no") @@ -124,9 +124,9 @@ CREATE TYPE te3 AS ENUM ('a','b'); RESET citus.enable_ddl_propagation; CREATE TABLE t5 (a int PRIMARY KEY, b tc5[], c te3); SELECT create_distributed_table('t5','a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- test adding an attribute to a type and a column to a table both for a non-distributed type @@ -144,7 +144,7 @@ INSERT INTO t5 VALUES (1, NULL, 'a', 'd', (1,2,(4,5)::tc6c)::tc6); -- test renaming an attribute of a distrbuted type and read it by its new name to verify propagation ALTER TYPE tc6 RENAME ATTRIBUTE b TO d; SELECT (e::tc6).d FROM t5 ORDER BY 1; - d + d --------------------------------------------------------------------- 2 (1 row) @@ -152,13 +152,13 @@ SELECT (e::tc6).d FROM t5 ORDER BY 1; -- change owner of supported types and check ownership on remote server ALTER TYPE te4 OWNER TO typeuser; SELECT typname, usename FROM pg_type, pg_user where typname = 'te4' and typowner = usesysid; - typname | usename + typname | usename --------------------------------------------------------------------- te4 | typeuser (1 row) SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'te4' and typowner = usesysid;$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"(te4,typeuser)") (localhost,57638,t,"(te4,typeuser)") @@ -166,13 +166,13 @@ SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_us ALTER TYPE tc6 OWNER TO typeuser; SELECT typname, usename FROM pg_type, pg_user where typname = 'tc6' and typowner = usesysid; - typname | usename + typname | usename --------------------------------------------------------------------- tc6 | typeuser (1 row) SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'tc6' and typowner = usesysid;$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"(tc6,typeuser)") (localhost,57638,t,"(tc6,typeuser)") @@ -190,60 +190,60 @@ CREATE TYPE te6 AS ENUM ('a','b','c'); RESET citus.enable_ddl_propagation; CREATE TABLE t6 (a int, b tc8, c te6); SELECT create_distributed_table('t6', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) RESET ROLE; -- test ownership of all types SELECT typname, usename FROM pg_type, pg_user where typname = 'tc7' and typowner = usesysid; - typname | usename + typname | usename --------------------------------------------------------------------- tc7 | typeuser (1 row) SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'tc7' and typowner = usesysid;$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"(tc7,typeuser)") (localhost,57638,t,"(tc7,typeuser)") (2 rows) SELECT typname, usename FROM pg_type, pg_user where typname = 'te5' and typowner = usesysid; - typname | usename + typname | usename --------------------------------------------------------------------- te5 | typeuser (1 row) SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'te5' and typowner = usesysid;$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"(te5,typeuser)") (localhost,57638,t,"(te5,typeuser)") (2 rows) SELECT typname, usename FROM pg_type, pg_user where typname = 'tc8' and typowner = usesysid; - typname | usename + typname | usename --------------------------------------------------------------------- tc8 | typeuser (1 row) SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'tc8' and typowner = usesysid;$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"(tc8,typeuser)") (localhost,57638,t,"(tc8,typeuser)") (2 rows) SELECT typname, usename FROM pg_type, pg_user where typname = 'te6' and typowner = usesysid; - typname | usename + typname | usename --------------------------------------------------------------------- te6 | typeuser (1 row) SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'te6' and typowner = usesysid;$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"(te6,typeuser)") (localhost,57638,t,"(te6,typeuser)") @@ -257,12 +257,12 @@ DROP TYPE tc3, tc4, tc5 CASCADE; NOTICE: drop cascades to column b of table t5 -- test if the types are deleted SELECT typname FROM pg_type, pg_user where typname IN ('te3','tc3','tc4','tc5') and typowner = usesysid ORDER BY typname; - typname + typname --------------------------------------------------------------------- (0 rows) SELECT run_command_on_workers($$SELECT typname FROM pg_type, pg_user where typname IN ('te3','tc3','tc4','tc5') and typowner = usesysid ORDER BY typname;$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"") (localhost,57638,t,"") @@ -301,9 +301,9 @@ CREATE TYPE distributed_enum_type AS ENUM ('a', 'c'); -- enforce distribution of types in every case CREATE TABLE type_proc (a int, b distributed_composite_type, c distributed_enum_type); SELECT create_distributed_table('type_proc','a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) DROP TABLE type_proc; @@ -330,13 +330,13 @@ CREATE TYPE feature_flag_composite_type AS (a int, b int); CREATE TYPE feature_flag_enum_type AS ENUM ('a', 'b'); -- verify types do not exist on workers SELECT count(*) FROM pg_type where typname IN ('feature_flag_composite_type', 'feature_flag_enum_type'); - count + count --------------------------------------------------------------------- 2 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname IN ('feature_flag_composite_type', 'feature_flag_enum_type');$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,0) (localhost,57638,t,0) @@ -345,19 +345,19 @@ SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname IN (' -- verify they are still distributed when required CREATE TABLE feature_flag_table (a int PRIMARY KEY, b feature_flag_composite_type, c feature_flag_enum_type); SELECT create_distributed_table('feature_flag_table','a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_type where typname IN ('feature_flag_composite_type', 'feature_flag_enum_type'); - count + count --------------------------------------------------------------------- 2 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname IN ('feature_flag_composite_type', 'feature_flag_enum_type');$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,2) (localhost,57638,t,2) @@ -368,7 +368,7 @@ RESET citus.enable_create_type_propagation; SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA type_tests CASCADE; SELECT run_command_on_workers($$DROP SCHEMA type_tests CASCADE;$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"DROP SCHEMA") (localhost,57638,t,"DROP SCHEMA") @@ -376,7 +376,7 @@ SELECT run_command_on_workers($$DROP SCHEMA type_tests CASCADE;$$); DROP SCHEMA type_tests2 CASCADE; SELECT run_command_on_workers($$DROP SCHEMA type_tests2 CASCADE;$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"DROP SCHEMA") (localhost,57638,t,"DROP SCHEMA") @@ -384,7 +384,7 @@ SELECT run_command_on_workers($$DROP SCHEMA type_tests2 CASCADE;$$); DROP USER typeuser; SELECT run_command_on_workers($$DROP USER typeuser;$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"DROP ROLE") (localhost,57638,t,"DROP ROLE") diff --git a/src/test/regress/expected/distributed_types_conflict.out b/src/test/regress/expected/distributed_types_conflict.out index 1750ef2a0..077b9c6a2 100644 --- a/src/test/regress/expected/distributed_types_conflict.out +++ b/src/test/regress/expected/distributed_types_conflict.out @@ -1,7 +1,7 @@ SET citus.next_shard_id TO 20020000; CREATE SCHEMA type_conflict; SELECT run_command_on_workers($$CREATE SCHEMA type_conflict;$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"CREATE SCHEMA") (localhost,57638,t,"CREATE SCHEMA") @@ -33,14 +33,14 @@ SET search_path TO type_conflict; WHERE pg_class.relname = 'local_table' AND attnum > 0 ORDER BY attnum; - relname | attname | typname + relname | attname | typname --------------------------------------------------------------------- local_table | a | int4 local_table | b | my_precious_type(citus_backup_0) (2 rows) SELECT * FROM local_table; - a | b + a | b --------------------------------------------------------------------- 42 | ("always bring a towel",t) (1 row) @@ -49,37 +49,37 @@ SELECT * FROM local_table; SET search_path TO type_conflict; -- make sure worker_create_or_replace correctly generates new names while types are existing SELECT worker_create_or_replace_object('CREATE TYPE type_conflict.multi_conflicting_type AS (a int, b int);'); - worker_create_or_replace_object + worker_create_or_replace_object --------------------------------------------------------------------- t (1 row) SELECT worker_create_or_replace_object('CREATE TYPE type_conflict.multi_conflicting_type AS (a int, b int, c int);'); - worker_create_or_replace_object + worker_create_or_replace_object --------------------------------------------------------------------- t (1 row) SELECT worker_create_or_replace_object('CREATE TYPE type_conflict.multi_conflicting_type AS (a int, b int, c int, d int);'); - worker_create_or_replace_object + worker_create_or_replace_object --------------------------------------------------------------------- t (1 row) SELECT worker_create_or_replace_object('CREATE TYPE type_conflict.multi_conflicting_type_with_a_really_long_name_that_truncates AS (a int, b int);'); - worker_create_or_replace_object + worker_create_or_replace_object --------------------------------------------------------------------- t (1 row) SELECT worker_create_or_replace_object('CREATE TYPE type_conflict.multi_conflicting_type_with_a_really_long_name_that_truncates AS (a int, b int, c int);'); - worker_create_or_replace_object + worker_create_or_replace_object --------------------------------------------------------------------- t (1 row) SELECT worker_create_or_replace_object('CREATE TYPE type_conflict.multi_conflicting_type_with_a_really_long_name_that_truncates AS (a int, b int, c int, d int);'); - worker_create_or_replace_object + worker_create_or_replace_object --------------------------------------------------------------------- t (1 row) @@ -93,7 +93,7 @@ FROM pg_attribute JOIN pg_type AS atttype ON (atttypid = atttype.oid) WHERE pg_type.typname LIKE 'multi_conflicting_type%' GROUP BY pg_type.typname; - typname | fields + typname | fields --------------------------------------------------------------------- multi_conflicting_type | a int4, b int4, c int4, d int4 multi_conflicting_type(citus_backup_0) | a int4, b int4 diff --git a/src/test/regress/expected/distributed_types_xact_add_enum_value.out b/src/test/regress/expected/distributed_types_xact_add_enum_value.out index 2a84e67de..fbfee78ac 100644 --- a/src/test/regress/expected/distributed_types_xact_add_enum_value.out +++ b/src/test/regress/expected/distributed_types_xact_add_enum_value.out @@ -1,6 +1,6 @@ SHOW server_version \gset SELECT substring(:'server_version', '\d+')::int > 11 AS version_above_eleven; - version_above_eleven + version_above_eleven --------------------------------------------------------------------- t (1 row) @@ -14,14 +14,14 @@ BEGIN; CREATE TYPE xact_enum_edit AS ENUM ('yes', 'no'); CREATE TABLE t1 (a int PRIMARY KEY, b xact_enum_edit); SELECT create_distributed_table('t1','a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO t1 VALUES (1, 'yes'); SELECT * FROM t1; - a | b + a | b --------------------------------------------------------------------- 1 | yes (1 row) @@ -32,13 +32,13 @@ ALTER TYPE xact_enum_edit ADD VALUE 'maybe'; ABORT; -- maybe should not be on the workers SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype; - string_agg + string_agg --------------------------------------------------------------------- yes,no (1 row) SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype;$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"yes,no") (localhost,57638,t,"yes,no") @@ -49,13 +49,13 @@ ALTER TYPE xact_enum_edit ADD VALUE 'maybe'; COMMIT; -- maybe should be on the workers (pg12 and above) SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype; - string_agg + string_agg --------------------------------------------------------------------- yes,no,maybe (1 row) SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype;$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"yes,no,maybe") (localhost,57638,t,"yes,no,maybe") @@ -65,7 +65,7 @@ SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumso SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA xact_enum_type CASCADE; SELECT run_command_on_workers($$DROP SCHEMA xact_enum_type CASCADE;$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"DROP SCHEMA") (localhost,57638,t,"DROP SCHEMA") diff --git a/src/test/regress/expected/distributed_types_xact_add_enum_value_0.out b/src/test/regress/expected/distributed_types_xact_add_enum_value_0.out index 0a9db443e..398c616c5 100644 --- a/src/test/regress/expected/distributed_types_xact_add_enum_value_0.out +++ b/src/test/regress/expected/distributed_types_xact_add_enum_value_0.out @@ -1,6 +1,6 @@ SHOW server_version \gset SELECT substring(:'server_version', '\d+')::int > 11 AS version_above_eleven; - version_above_eleven + version_above_eleven --------------------------------------------------------------------- f (1 row) @@ -14,14 +14,14 @@ BEGIN; CREATE TYPE xact_enum_edit AS ENUM ('yes', 'no'); CREATE TABLE t1 (a int PRIMARY KEY, b xact_enum_edit); SELECT create_distributed_table('t1','a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO t1 VALUES (1, 'yes'); SELECT * FROM t1; - a | b + a | b --------------------------------------------------------------------- 1 | yes (1 row) @@ -33,13 +33,13 @@ ERROR: ALTER TYPE ... ADD cannot run inside a transaction block ABORT; -- maybe should not be on the workers SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype; - string_agg + string_agg --------------------------------------------------------------------- yes,no (1 row) SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype;$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"yes,no") (localhost,57638,t,"yes,no") @@ -51,13 +51,13 @@ ERROR: ALTER TYPE ... ADD cannot run inside a transaction block COMMIT; -- maybe should be on the workers (pg12 and above) SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype; - string_agg + string_agg --------------------------------------------------------------------- yes,no (1 row) SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype;$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"yes,no") (localhost,57638,t,"yes,no") @@ -67,7 +67,7 @@ SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumso SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA xact_enum_type CASCADE; SELECT run_command_on_workers($$DROP SCHEMA xact_enum_type CASCADE;$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"DROP SCHEMA") (localhost,57638,t,"DROP SCHEMA") diff --git a/src/test/regress/expected/dml_recursive.out b/src/test/regress/expected/dml_recursive.out index 0196aa2c9..0530f6093 100644 --- a/src/test/regress/expected/dml_recursive.out +++ b/src/test/regress/expected/dml_recursive.out @@ -3,23 +3,23 @@ SET search_path TO recursive_dml_queries, public; SET citus.next_shard_id TO 2370000; CREATE TABLE recursive_dml_queries.distributed_table (tenant_id text, dept int, info jsonb); SELECT create_distributed_table('distributed_table', 'tenant_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE recursive_dml_queries.second_distributed_table (tenant_id text, dept int, info jsonb); SELECT create_distributed_table('second_distributed_table', 'tenant_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE recursive_dml_queries.reference_table (id text, name text); SELECT create_reference_table('reference_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE recursive_dml_queries.local_table (id text, name text); @@ -54,7 +54,7 @@ RETURNING reference_table.name; DEBUG: generating subplan 4_1 for subquery SELECT avg((tenant_id)::integer) AS avg_tenant_id FROM recursive_dml_queries.second_distributed_table DEBUG: Plan 4 query after replacing subqueries and CTEs: UPDATE recursive_dml_queries.reference_table SET name = ('new_'::text OPERATOR(pg_catalog.||) reference_table.name) FROM (SELECT intermediate_result.avg_tenant_id FROM read_intermediate_result('4_1'::text, 'binary'::citus_copy_format) intermediate_result(avg_tenant_id numeric)) foo WHERE (((foo.avg_tenant_id)::integer)::text OPERATOR(pg_catalog.=) reference_table.id) RETURNING reference_table.name - name + name --------------------------------------------------------------------- new_user_50 (1 row) @@ -87,7 +87,7 @@ RETURNING second_distributed_table.tenant_id, second_distributed_table.dept; DEBUG: generating subplan 6_1 for subquery SELECT DISTINCT ON (tenant_id) tenant_id, max(dept) AS max_dept FROM (SELECT second_distributed_table.dept, second_distributed_table.tenant_id FROM recursive_dml_queries.second_distributed_table, recursive_dml_queries.distributed_table WHERE (distributed_table.tenant_id OPERATOR(pg_catalog.=) second_distributed_table.tenant_id)) foo_inner GROUP BY tenant_id ORDER BY tenant_id DESC DEBUG: Plan 6 query after replacing subqueries and CTEs: UPDATE recursive_dml_queries.second_distributed_table SET dept = (foo.max_dept OPERATOR(pg_catalog.*) 2) FROM (SELECT intermediate_result.tenant_id, intermediate_result.max_dept FROM read_intermediate_result('6_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text, max_dept integer)) foo WHERE ((foo.tenant_id OPERATOR(pg_catalog.<>) second_distributed_table.tenant_id) AND (second_distributed_table.dept OPERATOR(pg_catalog.=) 2)) RETURNING second_distributed_table.tenant_id, second_distributed_table.dept - tenant_id | dept + tenant_id | dept --------------------------------------------------------------------- 12 | 18 2 | 18 @@ -156,7 +156,7 @@ RETURNING distributed_table.*; DEBUG: generating subplan 11_1 for subquery SELECT avg((id)::integer) AS avg_tenant_id FROM recursive_dml_queries.local_table DEBUG: Plan 11 query after replacing subqueries and CTEs: UPDATE recursive_dml_queries.distributed_table SET dept = (foo.avg_tenant_id)::integer FROM (SELECT intermediate_result.avg_tenant_id FROM read_intermediate_result('11_1'::text, 'binary'::citus_copy_format) intermediate_result(avg_tenant_id numeric)) foo WHERE (((foo.avg_tenant_id)::integer)::text OPERATOR(pg_catalog.=) distributed_table.tenant_id) RETURNING distributed_table.tenant_id, distributed_table.dept, distributed_table.info - tenant_id | dept | info + tenant_id | dept | info --------------------------------------------------------------------- 50 | 50 | {"f1": 50, "f2": 2500} (1 row) @@ -179,7 +179,7 @@ RETURNING distributed_table.*; DEBUG: generating subplan 12_1 for subquery SELECT avg((tenant_id)::integer) AS avg_tenant_id FROM (SELECT distributed_table.tenant_id, reference_table.name FROM recursive_dml_queries.distributed_table, recursive_dml_queries.reference_table WHERE ((distributed_table.dept)::text OPERATOR(pg_catalog.=) reference_table.id) ORDER BY reference_table.name DESC, distributed_table.tenant_id DESC) tenant_ids DEBUG: Plan 12 query after replacing subqueries and CTEs: UPDATE recursive_dml_queries.distributed_table SET dept = (foo.avg_tenant_id)::integer FROM (SELECT intermediate_result.avg_tenant_id FROM read_intermediate_result('12_1'::text, 'binary'::citus_copy_format) intermediate_result(avg_tenant_id numeric)) foo WHERE (((foo.avg_tenant_id)::integer)::text OPERATOR(pg_catalog.=) distributed_table.tenant_id) RETURNING distributed_table.tenant_id, distributed_table.dept, distributed_table.info - tenant_id | dept | info + tenant_id | dept | info --------------------------------------------------------------------- 50 | 50 | {"f1": 50, "f2": 2500} (1 row) @@ -212,7 +212,7 @@ foo_inner_1 JOIN LATERAL ) foo_inner_2 ON (foo_inner_2.tenant_id != foo_inner_1.tenant_id) ORDER BY foo_inner_1.tenant_id; - tenant_id + tenant_id --------------------------------------------------------------------- 14 24 diff --git a/src/test/regress/expected/ensure_no_intermediate_data_leak.out b/src/test/regress/expected/ensure_no_intermediate_data_leak.out index b4ed5abd5..2e4cb92b8 100644 --- a/src/test/regress/expected/ensure_no_intermediate_data_leak.out +++ b/src/test/regress/expected/ensure_no_intermediate_data_leak.out @@ -1,18 +1,18 @@ --------------------------------------------------------------------- --- THIS TEST SHOULD IDEALLY BE EXECUTED AT THE END OF --- THE REGRESSION TEST SUITE TO MAKE SURE THAT WE --- CLEAR ALL INTERMEDIATE RESULTS ON BOTH THE COORDINATOR +-- THIS TEST SHOULD IDEALLY BE EXECUTED AT THE END OF +-- THE REGRESSION TEST SUITE TO MAKE SURE THAT WE +-- CLEAR ALL INTERMEDIATE RESULTS ON BOTH THE COORDINATOR -- AND ON THE WORKERS. HOWEVER, WE HAVE SOME ISSUES AROUND -- WINDOWS SUPPORT, FAILURES IN TASK-TRACKER EXECUTOR -- SO WE DISABLE THIS TEST ON WINDOWS --------------------------------------------------------------------- SELECT pg_ls_dir('base/pgsql_job_cache') WHERE citus_version() NOT ILIKE '%windows%'; - pg_ls_dir + pg_ls_dir --------------------------------------------------------------------- (0 rows) SELECT run_command_on_workers($$SELECT pg_ls_dir('base/pgsql_job_cache') WHERE citus_version() NOT ILIKE '%windows%'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"") (localhost,57638,t,"") diff --git a/src/test/regress/expected/escape_extension_name.out b/src/test/regress/expected/escape_extension_name.out index 4968d02cd..45ca2a9a0 100644 --- a/src/test/regress/expected/escape_extension_name.out +++ b/src/test/regress/expected/escape_extension_name.out @@ -14,7 +14,7 @@ WHERE name = 'uuid-ossp' :uuid_present_command; -- show that the extension is created on both nodes SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,1) (localhost,57638,t,1) @@ -25,7 +25,7 @@ DROP EXTENSION "uuid-ossp"; RESET client_min_messages; -- show that the extension is dropped from both nodes SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,0) (localhost,57638,t,0) @@ -33,7 +33,7 @@ SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname -- show that extension recreation on new nodes works also fine with extension names that require escaping SELECT 1 from master_remove_node('localhost', :worker_2_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) @@ -50,14 +50,14 @@ WHERE name = 'uuid-ossp' :uuid_present_command; -- and add the other node SELECT 1 from master_add_node('localhost', :worker_2_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) -- show that the extension exists on both nodes SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,1) (localhost,57638,t,1) diff --git a/src/test/regress/expected/escape_extension_name_0.out b/src/test/regress/expected/escape_extension_name_0.out index e6c43039e..d27415e4a 100644 --- a/src/test/regress/expected/escape_extension_name_0.out +++ b/src/test/regress/expected/escape_extension_name_0.out @@ -12,14 +12,14 @@ FROM pg_available_extensions() WHERE name = 'uuid-ossp' \gset :uuid_present_command; - uuid_ossp_present + uuid_ossp_present --------------------------------------------------------------------- f (1 row) -- show that the extension is created on both nodes SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,0) (localhost,57638,t,0) @@ -31,7 +31,7 @@ ERROR: extension "uuid-ossp" does not exist RESET client_min_messages; -- show that the extension is dropped from both nodes SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,0) (localhost,57638,t,0) @@ -39,7 +39,7 @@ SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname -- show that extension recreation on new nodes works also fine with extension names that require escaping SELECT 1 from master_remove_node('localhost', :worker_2_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) @@ -54,21 +54,21 @@ FROM pg_available_extensions() WHERE name = 'uuid-ossp' \gset :uuid_present_command; - uuid_ossp_present + uuid_ossp_present --------------------------------------------------------------------- f (1 row) -- and add the other node SELECT 1 from master_add_node('localhost', :worker_2_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) -- show that the extension exists on both nodes SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,0) (localhost,57638,t,0) diff --git a/src/test/regress/expected/expression_reference_join.out b/src/test/regress/expected/expression_reference_join.out index 794a0e2ad..8076d9200 100644 --- a/src/test/regress/expected/expression_reference_join.out +++ b/src/test/regress/expected/expression_reference_join.out @@ -13,16 +13,16 @@ INSERT INTO test VALUES (2,2); SELECT create_reference_table('ref'); NOTICE: Copying data from local table... - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test', 'x'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- PR 3180 implements expressions in join clauses to reference tables to support CHbenCHmark queries 7/8/9 @@ -33,7 +33,7 @@ FROM ref a WHERE t2.y * 2 = a.a ORDER BY 1,2,3; - y | x | x | a | b + y | x | x | a | b --------------------------------------------------------------------- 2 | 1 | 1 | 4 | 4 2 | 1 | 2 | 4 | 4 @@ -53,7 +53,7 @@ FROM ref b WHERE t2.y - a.a - b.b = 0 ORDER BY 1,2,3; - y | x | x | a | b | a | b + y | x | x | a | b | a | b --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/failure_1pc_copy_append.out b/src/test/regress/expected/failure_1pc_copy_append.out index 5d4150341..bae1675cb 100644 --- a/src/test/regress/expected/failure_1pc_copy_append.out +++ b/src/test/regress/expected/failure_1pc_copy_append.out @@ -1,7 +1,7 @@ SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- do not cache any connections @@ -13,26 +13,26 @@ SET citus.next_shard_id TO 100400; ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 100; CREATE TABLE copy_test (key int, value int); SELECT create_distributed_table('copy_test', 'key', 'append'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT citus.clear_network_traffic(); - clear_network_traffic + clear_network_traffic --------------------------------------------------------------------- - + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; SELECT count(1) FROM copy_test; - count + count --------------------------------------------------------------------- 4 (1 row) SELECT citus.dump_network_traffic(); - dump_network_traffic + dump_network_traffic --------------------------------------------------------------------- (0,coordinator,"[initial message]") (0,worker,"['AuthenticationOk()', 'ParameterStatus(application_name=citus)', 'ParameterStatus(client_encoding=UTF8)', 'ParameterStatus(DateStyle=ISO, MDY)', 'ParameterStatus(integer_datetimes=on)', 'ParameterStatus(IntervalStyle=postgres)', 'ParameterStatus(is_superuser=on)', 'ParameterStatus(server_encoding=UTF8)', 'ParameterStatus(server_version=XXX)', 'ParameterStatus(session_authorization=postgres)', 'ParameterStatus(standard_conforming_strings=on)', 'ParameterStatus(TimeZone=XXX)', 'BackendKeyData(XXX)', 'ReadyForQuery(state=idle)']") @@ -58,16 +58,16 @@ SELECT citus.dump_network_traffic(); ---- all of the following tests test behavior with 2 shard placements ---- SHOW citus.shard_replication_factor; - citus.shard_replication_factor + citus.shard_replication_factor --------------------------------------------------------------------- 2 (1 row) ---- kill the connection when we try to create the shard ---- SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; @@ -78,23 +78,23 @@ CONTEXT: while executing command on localhost:xxxxx SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid --------------------------------------------------------------------- copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100 copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101 (2 rows) SELECT count(1) FROM copy_test; - count + count --------------------------------------------------------------------- 4 (1 row) ---- kill the connection when we try to start a transaction ---- SELECT citus.mitmproxy('conn.onQuery(query="assign_distributed_transaction_id").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; @@ -104,23 +104,23 @@ ERROR: failure on connection marked as essential: localhost:xxxxx SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid --------------------------------------------------------------------- copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100 copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101 (2 rows) SELECT count(1) FROM copy_test; - count + count --------------------------------------------------------------------- 4 (1 row) ---- kill the connection when we start the COPY ---- SELECT citus.mitmproxy('conn.onQuery(query="FROM STDIN WITH").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; @@ -131,23 +131,23 @@ CONTEXT: while executing command on localhost:xxxxx SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid --------------------------------------------------------------------- copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100 copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101 (2 rows) SELECT count(1) FROM copy_test; - count + count --------------------------------------------------------------------- 4 (1 row) ---- kill the connection when we send the data ---- SELECT citus.mitmproxy('conn.onCopyData().kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; @@ -155,16 +155,16 @@ ERROR: failed to COPY to shard xxxxx on localhost:xxxxx SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid --------------------------------------------------------------------- copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100 copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101 (2 rows) SELECT citus.mitmproxy('conn.onQuery(query="SELECT|COPY").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(1) FROM copy_test; @@ -172,16 +172,16 @@ WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. - count + count --------------------------------------------------------------------- 4 (1 row) ---- cancel the connection when we send the data ---- SELECT citus.mitmproxy('conn.onQuery(query="SELECT|COPY").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; @@ -189,7 +189,7 @@ ERROR: canceling statement due to user request SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid --------------------------------------------------------------------- copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100 copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101 @@ -199,9 +199,9 @@ SELECT count(1) FROM copy_test; ERROR: canceling statement due to user request ---- kill the connection when we try to get the size of the table ---- SELECT citus.mitmproxy('conn.onQuery(query="pg_table_size").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; @@ -215,23 +215,23 @@ ERROR: failure on connection marked as essential: localhost:xxxxx SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid --------------------------------------------------------------------- copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100 copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101 (2 rows) SELECT count(1) FROM copy_test; - count + count --------------------------------------------------------------------- 4 (1 row) ---- kill the connection when we try to get the min, max of the table ---- SELECT citus.mitmproxy('conn.onQuery(query="SELECT min\(key\), max\(key\)").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; @@ -245,23 +245,23 @@ ERROR: failure on connection marked as essential: localhost:xxxxx SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid --------------------------------------------------------------------- copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100 copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101 (2 rows) SELECT count(1) FROM copy_test; - count + count --------------------------------------------------------------------- 4 (1 row) ---- kill the connection when we try to COMMIT ---- SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; @@ -273,7 +273,7 @@ CONTEXT: while executing command on localhost:xxxxx SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass ORDER BY placementid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid --------------------------------------------------------------------- copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100 copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101 @@ -282,16 +282,16 @@ SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p (4 rows) SELECT count(1) FROM copy_test; - count + count --------------------------------------------------------------------- 8 (1 row) -- ==== Clean up, we're done here ==== SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) DROP TABLE copy_test; diff --git a/src/test/regress/expected/failure_1pc_copy_hash.out b/src/test/regress/expected/failure_1pc_copy_hash.out index 8e672cdc6..1a9d36355 100644 --- a/src/test/regress/expected/failure_1pc_copy_hash.out +++ b/src/test/regress/expected/failure_1pc_copy_hash.out @@ -1,7 +1,7 @@ SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- do not cache any connections @@ -14,26 +14,26 @@ SET citus.max_cached_conns_per_worker TO 0; ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 100; CREATE TABLE copy_test (key int, value int); SELECT create_distributed_table('copy_test', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT citus.clear_network_traffic(); - clear_network_traffic + clear_network_traffic --------------------------------------------------------------------- - + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; SELECT count(1) FROM copy_test; - count + count --------------------------------------------------------------------- 4 (1 row) SELECT citus.dump_network_traffic(); - dump_network_traffic + dump_network_traffic --------------------------------------------------------------------- (0,coordinator,"[initial message]") (0,worker,"['AuthenticationOk()', 'ParameterStatus(application_name=citus)', 'ParameterStatus(client_encoding=UTF8)', 'ParameterStatus(DateStyle=ISO, MDY)', 'ParameterStatus(integer_datetimes=on)', 'ParameterStatus(IntervalStyle=postgres)', 'ParameterStatus(is_superuser=on)', 'ParameterStatus(server_encoding=UTF8)', 'ParameterStatus(server_version=XXX)', 'ParameterStatus(session_authorization=postgres)', 'ParameterStatus(standard_conforming_strings=on)', 'ParameterStatus(TimeZone=XXX)', 'BackendKeyData(XXX)', 'ReadyForQuery(state=idle)']") @@ -54,9 +54,9 @@ SELECT citus.dump_network_traffic(); -- ==== kill the connection when we try to start a transaction ==== -- the query should abort SELECT citus.mitmproxy('conn.onQuery(query="assign_distributed_transaction").killall()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; @@ -68,9 +68,9 @@ CONTEXT: COPY copy_test, line 1: "0, 0" -- ==== kill the connection when we try to start the COPY ==== -- the query should abort SELECT citus.mitmproxy('conn.onQuery(query="FROM STDIN WITH").killall()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; @@ -82,9 +82,9 @@ COPY copy_test, line 1: "0, 0" -- ==== kill the connection when we first start sending data ==== -- the query should abort SELECT citus.mitmproxy('conn.onCopyData().killall()'); -- raw rows from the client - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; @@ -92,9 +92,9 @@ ERROR: failed to COPY to shard xxxxx on localhost:xxxxx -- ==== kill the connection when the worker confirms it's received the data ==== -- the query should abort SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY").killall()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; @@ -102,29 +102,29 @@ ERROR: failed to COPY to shard xxxxx on localhost:xxxxx -- ==== kill the connection when we try to send COMMIT ==== -- the query should succeed, and the placement should be marked inactive SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(1) FROM pg_dist_shard_placement WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'copy_test'::regclass ) AND shardstate = 3; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(1) FROM copy_test; - count + count --------------------------------------------------------------------- 4 (1 row) SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT$").killall()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; @@ -135,21 +135,21 @@ WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx -- the shard is marked invalid SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(1) FROM pg_dist_shard_placement WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'copy_test'::regclass ) AND shardstate = 3; - count + count --------------------------------------------------------------------- 1 (1 row) SELECT count(1) FROM copy_test; - count + count --------------------------------------------------------------------- 8 (1 row) @@ -169,24 +169,24 @@ ERROR: missing data for column "value" CONTEXT: COPY copy_test, line 5: "10" -- kill the connection if the coordinator sends COMMIT. It doesn't, so nothing changes SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT$").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9 && echo 10' WITH CSV; ERROR: missing data for column "value" CONTEXT: COPY copy_test, line 5: "10" SELECT * FROM copy_test ORDER BY key, value; - key | value + key | value --------------------------------------------------------------------- (0 rows) -- ==== clean up some more to prepare for tests with only one replica ==== SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) TRUNCATE copy_test; @@ -194,7 +194,7 @@ UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE nodeport = :worker_1_por SELECT * FROM pg_dist_shard_placement WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'copy_test'::regclass ) ORDER BY nodeport, placementid; - shardid | shardstate | shardlength | nodename | nodeport | placementid + shardid | shardstate | shardlength | nodename | nodeport | placementid --------------------------------------------------------------------- 100400 | 1 | 0 | localhost | 9060 | 100 100400 | 3 | 0 | localhost | 57637 | 101 @@ -203,7 +203,7 @@ SELECT * FROM pg_dist_shard_placement WHERE shardid IN ( -- ==== okay, run some tests where there's only one active shard ==== COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; SELECT * FROM copy_test; - key | value + key | value --------------------------------------------------------------------- 0 | 0 1 | 1 @@ -213,9 +213,9 @@ SELECT * FROM copy_test; -- the worker is unreachable SELECT citus.mitmproxy('conn.killall()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; @@ -227,13 +227,13 @@ CONTEXT: COPY copy_test, line 1: "0, 0" ERROR: could not connect to any active placements CONTEXT: COPY copy_test, line 1: "0, 0" SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM copy_test; - key | value + key | value --------------------------------------------------------------------- 0 | 0 1 | 1 @@ -243,9 +243,9 @@ SELECT * FROM copy_test; -- the first message fails SELECT citus.mitmproxy('conn.onQuery(query="assign_distributed_transaction_id").killall()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; @@ -255,13 +255,13 @@ COPY copy_test, line 1: "0, 0" ERROR: failure on connection marked as essential: localhost:xxxxx CONTEXT: COPY copy_test, line 1: "0, 0" SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM copy_test; - key | value + key | value --------------------------------------------------------------------- 0 | 0 1 | 1 @@ -271,9 +271,9 @@ SELECT * FROM copy_test; -- the COPY message fails SELECT citus.mitmproxy('conn.onQuery(query="FROM STDIN WITH").killall()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; @@ -283,13 +283,13 @@ ERROR: server closed the connection unexpectedly CONTEXT: while executing command on localhost:xxxxx COPY copy_test, line 1: "0, 0" SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM copy_test; - key | value + key | value --------------------------------------------------------------------- 0 | 0 1 | 1 @@ -299,21 +299,21 @@ SELECT * FROM copy_test; -- the COPY data fails SELECT citus.mitmproxy('conn.onCopyData().killall()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; ERROR: failed to COPY to shard xxxxx on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM copy_test; - key | value + key | value --------------------------------------------------------------------- 0 | 0 1 | 1 @@ -323,9 +323,9 @@ SELECT * FROM copy_test; -- the COMMIT fails SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT$").killall()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; @@ -337,13 +337,13 @@ CONTEXT: while executing command on localhost:xxxxx WARNING: could not commit transaction for shard xxxxx on any active node ERROR: could not commit transaction on any active node SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM copy_test; - key | value + key | value --------------------------------------------------------------------- 0 | 0 1 | 1 @@ -355,7 +355,7 @@ SELECT * FROM copy_test; SELECT * FROM pg_dist_shard_placement WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'copy_test'::regclass ) ORDER BY nodeport, placementid; - shardid | shardstate | shardlength | nodename | nodeport | placementid + shardid | shardstate | shardlength | nodename | nodeport | placementid --------------------------------------------------------------------- 100400 | 1 | 0 | localhost | 9060 | 100 100400 | 3 | 0 | localhost | 57637 | 101 @@ -363,9 +363,9 @@ SELECT * FROM pg_dist_shard_placement WHERE shardid IN ( -- the COMMIT makes it through but the connection dies before we get a response SELECT citus.mitmproxy('conn.onCommandComplete(command="COMMIT").killall()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV; @@ -377,22 +377,22 @@ CONTEXT: while executing command on localhost:xxxxx WARNING: could not commit transaction for shard xxxxx on any active node ERROR: could not commit transaction on any active node SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM pg_dist_shard_placement WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'copy_test'::regclass ) ORDER BY nodeport, placementid; - shardid | shardstate | shardlength | nodename | nodeport | placementid + shardid | shardstate | shardlength | nodename | nodeport | placementid --------------------------------------------------------------------- 100400 | 1 | 0 | localhost | 9060 | 100 100400 | 3 | 0 | localhost | 57637 | 101 (2 rows) SELECT * FROM copy_test; - key | value + key | value --------------------------------------------------------------------- 0 | 0 1 | 1 @@ -406,9 +406,9 @@ SELECT * FROM copy_test; -- ==== Clean up, we're done here ==== SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) DROP TABLE copy_test; diff --git a/src/test/regress/expected/failure_add_disable_node.out b/src/test/regress/expected/failure_add_disable_node.out index 8ca5e31da..473df0f4a 100644 --- a/src/test/regress/expected/failure_add_disable_node.out +++ b/src/test/regress/expected/failure_add_disable_node.out @@ -5,16 +5,16 @@ -- tested as they don't create network activity -- SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SET citus.next_shard_id TO 200000; -- verify we have all worker nodes present SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; - node_name | node_port + node_name | node_port --------------------------------------------------------------------- localhost | 9060 localhost | 57637 @@ -22,7 +22,7 @@ ORDER BY 1, 2; -- verify there are no tables that could prevent add/remove node operations SELECT * FROM pg_dist_partition; - logicalrelid | partmethod | partkey | colocationid | repmodel + logicalrelid | partmethod | partkey | colocationid | repmodel --------------------------------------------------------------------- (0 rows) @@ -30,23 +30,23 @@ CREATE SCHEMA add_remove_node; SET SEARCH_PATH=add_remove_node; CREATE TABLE user_table(user_id int, user_name text); SELECT create_reference_table('user_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE event_table(user_id int, event_id int, event_name text); SELECT create_distributed_table('event_table', 'user_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT shardid, shardstate FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid) WHERE s.logicalrelid = 'user_table'::regclass ORDER BY placementid; - shardid | shardstate + shardid | shardstate --------------------------------------------------------------------- 200000 | 1 200000 | 1 @@ -54,14 +54,14 @@ ORDER BY placementid; SELECT master_disable_node('localhost', :worker_2_proxy_port); NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT master_activate_node('localhost', 9060) to activate this node back. - master_disable_node + master_disable_node --------------------------------------------------------------------- - + (1 row) SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; - node_name | node_port + node_name | node_port --------------------------------------------------------------------- localhost | 57637 (1 row) @@ -70,16 +70,16 @@ SELECT shardid, shardstate FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid) WHERE s.logicalrelid = 'user_table'::regclass ORDER BY placementid; - shardid | shardstate + shardid | shardstate --------------------------------------------------------------------- 200000 | 1 (1 row) -- fail activate node by failing reference table creation SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT master_activate_node('localhost', :worker_2_proxy_port); @@ -89,15 +89,15 @@ ERROR: server closed the connection unexpectedly before or while processing the request. CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- verify node is not activated SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; - node_name | node_port + node_name | node_port --------------------------------------------------------------------- localhost | 57637 (1 row) @@ -106,16 +106,16 @@ SELECT shardid, shardstate FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid) WHERE s.logicalrelid = 'user_table'::regclass ORDER BY placementid; - shardid | shardstate + shardid | shardstate --------------------------------------------------------------------- 200000 | 1 (1 row) -- fail create schema command SELECT citus.mitmproxy('conn.onQuery(query="CREATE SCHEMA").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT master_activate_node('localhost', :worker_2_proxy_port); @@ -126,7 +126,7 @@ CONTEXT: while executing command on localhost:xxxxx -- verify node is not activated SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; - node_name | node_port + node_name | node_port --------------------------------------------------------------------- localhost | 57637 (1 row) @@ -135,16 +135,16 @@ SELECT shardid, shardstate FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid) WHERE s.logicalrelid = 'user_table'::regclass ORDER BY placementid; - shardid | shardstate + shardid | shardstate --------------------------------------------------------------------- 200000 | 1 (1 row) -- fail activate node by failing reference table creation SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT master_activate_node('localhost', :worker_2_proxy_port); @@ -153,7 +153,7 @@ ERROR: canceling statement due to user request -- verify node is not activated SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; - node_name | node_port + node_name | node_port --------------------------------------------------------------------- localhost | 57637 (1 row) @@ -162,15 +162,15 @@ SELECT shardid, shardstate FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid) WHERE s.logicalrelid = 'user_table'::regclass ORDER BY placementid; - shardid | shardstate + shardid | shardstate --------------------------------------------------------------------- 200000 | 1 (1 row) SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- master_remove_node fails when there are shards on that worker @@ -179,15 +179,15 @@ ERROR: you cannot remove the primary node of a node group which has shard place -- drop event table and re-run remove DROP TABLE event_table; SELECT master_remove_node('localhost', :worker_2_proxy_port); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) -- verify node is removed SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; - node_name | node_port + node_name | node_port --------------------------------------------------------------------- localhost | 57637 (1 row) @@ -196,7 +196,7 @@ SELECT shardid, shardstate FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid) WHERE s.logicalrelid = 'user_table'::regclass ORDER BY placementid; - shardid | shardstate + shardid | shardstate --------------------------------------------------------------------- 200000 | 1 (1 row) @@ -205,22 +205,22 @@ ORDER BY placementid; -- it does not create any network activity therefore can not -- be injected failure through network SELECT master_add_inactive_node('localhost', :worker_2_proxy_port); - master_add_inactive_node + master_add_inactive_node --------------------------------------------------------------------- 3 (1 row) SELECT master_remove_node('localhost', :worker_2_proxy_port); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) SELECT shardid, shardstate FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid) WHERE s.logicalrelid = 'user_table'::regclass ORDER BY placementid; - shardid | shardstate + shardid | shardstate --------------------------------------------------------------------- 200000 | 1 (1 row) @@ -228,9 +228,9 @@ ORDER BY placementid; -- test master_add_node replicated a reference table -- to newly added node. SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT master_add_node('localhost', :worker_2_proxy_port); @@ -242,7 +242,7 @@ CONTEXT: while executing command on localhost:xxxxx -- verify node is not added SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; - node_name | node_port + node_name | node_port --------------------------------------------------------------------- localhost | 57637 (1 row) @@ -251,15 +251,15 @@ SELECT shardid, shardstate FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid) WHERE s.logicalrelid = 'user_table'::regclass ORDER BY placementid; - shardid | shardstate + shardid | shardstate --------------------------------------------------------------------- 200000 | 1 (1 row) SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT master_add_node('localhost', :worker_2_proxy_port); @@ -268,7 +268,7 @@ ERROR: canceling statement due to user request -- verify node is not added SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; - node_name | node_port + node_name | node_port --------------------------------------------------------------------- localhost | 57637 (1 row) @@ -277,21 +277,21 @@ SELECT shardid, shardstate FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid) WHERE s.logicalrelid = 'user_table'::regclass ORDER BY placementid; - shardid | shardstate + shardid | shardstate --------------------------------------------------------------------- 200000 | 1 (1 row) -- reset cluster to original state SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT master_add_node('localhost', :worker_2_proxy_port); NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx - master_add_node + master_add_node --------------------------------------------------------------------- 6 (1 row) @@ -299,7 +299,7 @@ NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx -- verify node is added SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; - node_name | node_port + node_name | node_port --------------------------------------------------------------------- localhost | 9060 localhost | 57637 @@ -309,7 +309,7 @@ SELECT shardid, shardstate FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid) WHERE s.logicalrelid = 'user_table'::regclass ORDER BY placementid; - shardid | shardstate + shardid | shardstate --------------------------------------------------------------------- 200000 | 1 200000 | 1 @@ -317,15 +317,15 @@ ORDER BY placementid; -- fail master_add_node by failing copy out operation SELECT master_remove_node('localhost', :worker_1_port); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) SELECT citus.mitmproxy('conn.onQuery(query="COPY").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT master_add_node('localhost', :worker_1_port); @@ -335,20 +335,20 @@ CONTEXT: while executing command on localhost:xxxxx -- verify node is not added SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; - node_name | node_port + node_name | node_port --------------------------------------------------------------------- localhost | 9060 (1 row) SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT master_add_node('localhost', :worker_1_port); NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx - master_add_node + master_add_node --------------------------------------------------------------------- 8 (1 row) @@ -356,7 +356,7 @@ NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx -- verify node is added SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; - node_name | node_port + node_name | node_port --------------------------------------------------------------------- localhost | 9060 localhost | 57637 @@ -366,7 +366,7 @@ SELECT shardid, shardstate FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid) WHERE s.logicalrelid = 'user_table'::regclass ORDER BY placementid; - shardid | shardstate + shardid | shardstate --------------------------------------------------------------------- 200000 | 1 200000 | 1 @@ -377,7 +377,7 @@ DROP SCHEMA add_remove_node CASCADE; NOTICE: drop cascades to table add_remove_node.user_table SELECT * FROM run_command_on_workers('DROP SCHEMA IF EXISTS add_remove_node CASCADE') ORDER BY nodeport; - nodename | nodeport | success | result + nodename | nodeport | success | result --------------------------------------------------------------------- localhost | 9060 | t | DROP SCHEMA localhost | 57637 | t | DROP SCHEMA diff --git a/src/test/regress/expected/failure_connection_establishment.out b/src/test/regress/expected/failure_connection_establishment.out index 573f85811..778fcad8e 100644 --- a/src/test/regress/expected/failure_connection_establishment.out +++ b/src/test/regress/expected/failure_connection_establishment.out @@ -6,9 +6,9 @@ -- - timeout -- SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) CREATE SCHEMA fail_connect; @@ -23,9 +23,9 @@ CREATE TABLE products ( price numeric ); SELECT create_distributed_table('products', 'product_no'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- Can only add primary key constraint on distribution column (or group of columns @@ -38,17 +38,17 @@ DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY const -- into connection establishment problems SET citus.node_connection_timeout TO 400; SELECT citus.mitmproxy('conn.delay(500)'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ALTER TABLE products ADD CONSTRAINT p_key PRIMARY KEY(product_no); ERROR: could not establish any connections to the node localhost:xxxxx after 400 ms SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) CREATE TABLE r1 ( @@ -61,26 +61,26 @@ INSERT INTO r1 (id, name) VALUES (3,'baz'); SELECT create_reference_table('r1'); NOTICE: Copying data from local table... - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT citus.clear_network_traffic(); - clear_network_traffic + clear_network_traffic --------------------------------------------------------------------- - + (1 row) SELECT citus.mitmproxy('conn.delay(500)'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- we cannot control which replica of the reference table will be queried and there is -- only one specific client we can control the connection for. --- by using round-robin task_assignment_policy we can force to hit both machines. +-- by using round-robin task_assignment_policy we can force to hit both machines. -- and in the end, dumping the network traffic shows that the connection establishment -- is initiated to the node behind the proxy SET client_min_messages TO ERROR; @@ -88,13 +88,13 @@ SET citus.task_assignment_policy TO 'round-robin'; -- suppress the warning since we can't control which shard is chose first. Failure of this -- test would be if one of the queries does not return the result but an error. SELECT name FROM r1 WHERE id = 2; - name + name --------------------------------------------------------------------- bar (1 row) SELECT name FROM r1 WHERE id = 2; - name + name --------------------------------------------------------------------- bar (1 row) @@ -102,70 +102,70 @@ SELECT name FROM r1 WHERE id = 2; -- verify a connection attempt was made to the intercepted node, this would have cause the -- connection to have been delayed and thus caused a timeout SELECT citus.dump_network_traffic(); - dump_network_traffic + dump_network_traffic --------------------------------------------------------------------- (0,coordinator,"[initial message]") (1 row) SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) --- similar test with the above but this time on a +-- similar test with the above but this time on a -- distributed table instead of a reference table -- and with citus.force_max_query_parallelization is set SET citus.force_max_query_parallelization TO ON; SELECT citus.mitmproxy('conn.delay(500)'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- suppress the warning since we can't control which shard is chose first. Failure of this -- test would be if one of the queries does not return the result but an error. SELECT count(*) FROM products; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM products; - count + count --------------------------------------------------------------------- 0 (1 row) --- use OFFSET 1 to prevent printing the line where source +-- use OFFSET 1 to prevent printing the line where source -- is the worker SELECT citus.dump_network_traffic() ORDER BY 1 OFFSET 1; - dump_network_traffic + dump_network_traffic --------------------------------------------------------------------- (1,coordinator,"[initial message]") (1 row) SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SET citus.shard_replication_factor TO 1; CREATE TABLE single_replicatated(key int); SELECT create_distributed_table('single_replicatated', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- this time the table is single replicated and we're still using the -- the max parallelization flag, so the query should fail SET citus.force_max_query_parallelization TO ON; SELECT citus.mitmproxy('conn.delay(500)'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM single_replicatated; @@ -175,47 +175,47 @@ SET citus.force_max_query_parallelization TO OFF; -- to see that connection establishement failures could -- mark placement INVALID SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; -SELECT +SELECT count(*) as invalid_placement_count -FROM - pg_dist_shard_placement -WHERE - shardstate = 3 AND +FROM + pg_dist_shard_placement +WHERE + shardstate = 3 AND shardid IN (SELECT shardid from pg_dist_shard where logicalrelid = 'products'::regclass); - invalid_placement_count + invalid_placement_count --------------------------------------------------------------------- 0 (1 row) SELECT citus.mitmproxy('conn.delay(500)'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) INSERT INTO products VALUES (100, '100', 100); COMMIT; -SELECT +SELECT count(*) as invalid_placement_count -FROM - pg_dist_shard_placement -WHERE - shardstate = 3 AND +FROM + pg_dist_shard_placement +WHERE + shardstate = 3 AND shardid IN (SELECT shardid from pg_dist_shard where logicalrelid = 'products'::regclass); - invalid_placement_count + invalid_placement_count --------------------------------------------------------------------- 1 (1 row) -- show that INSERT went through SELECT count(*) FROM products WHERE product_no = 100; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -225,14 +225,14 @@ RESET client_min_messages; SELECT get_global_active_transactions(); WARNING: could not establish connection after 400 ms WARNING: connection error: localhost:xxxxx - get_global_active_transactions + get_global_active_transactions --------------------------------------------------------------------- (0 rows) SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SET citus.node_connection_timeout TO DEFAULT; diff --git a/src/test/regress/expected/failure_copy_on_hash.out b/src/test/regress/expected/failure_copy_on_hash.out index 0e2fce4c3..b867005af 100644 --- a/src/test/regress/expected/failure_copy_on_hash.out +++ b/src/test/regress/expected/failure_copy_on_hash.out @@ -5,9 +5,9 @@ CREATE SCHEMA copy_distributed_table; SET search_path TO 'copy_distributed_table'; SET citus.next_shard_id TO 1710000; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- With one placement COPY should error out and placement should stay healthy. @@ -16,23 +16,23 @@ SET citus.shard_count to 4; SET citus.max_cached_conns_per_worker to 0; CREATE TABLE test_table(id int, value_1 int); SELECT create_distributed_table('test_table','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -CREATE VIEW unhealthy_shard_count AS - SELECT count(*) - FROM pg_dist_shard_placement pdsp - JOIN - pg_dist_shard pds - ON pdsp.shardid=pds.shardid +CREATE VIEW unhealthy_shard_count AS + SELECT count(*) + FROM pg_dist_shard_placement pdsp + JOIN + pg_dist_shard pds + ON pdsp.shardid=pds.shardid WHERE logicalrelid='copy_distributed_table.test_table'::regclass AND shardstate != 1; -- Just kill the connection after sending the first query to the worker. SELECT citus.mitmproxy('conn.kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) \COPY test_table FROM stdin delimiter ','; @@ -44,46 +44,46 @@ CONTEXT: COPY test_table, line 1: "1,2" ERROR: could not connect to any active placements CONTEXT: COPY test_table, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 0 (1 row) -- Now, kill the connection while copying the data SELECT citus.mitmproxy('conn.onCopyData().kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) \COPY test_table FROM stdin delimiter ','; ERROR: failed to COPY to shard xxxxx on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -91,109 +91,109 @@ SELECT count(*) FROM test_table; -- Similar to the above one, but now cancel the connection -- instead of killing it. SELECT citus.mitmproxy('conn.onCopyData().cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) \COPY test_table FROM stdin delimiter ','; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 0 (1 row) -- kill the connection after worker sends command complete message SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY 1").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) \COPY test_table FROM stdin delimiter ','; ERROR: failed to COPY to shard xxxxx on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 0 (1 row) -- similar to above one, but cancel the connection on command complete SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY 1").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) \COPY test_table FROM stdin delimiter ','; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 0 (1 row) -- kill the connection on PREPARE TRANSACTION SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) \COPY test_table FROM stdin delimiter ','; ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -202,27 +202,27 @@ SELECT count(*) FROM test_table; SET client_min_messages TO ERROR; -- kill on command complete on COMMIT PREPARE, command should succeed SELECT citus.mitmproxy('conn.onCommandComplete(command="COMMIT PREPARED").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) \COPY test_table FROM stdin delimiter ','; SET client_min_messages TO NOTICE; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 4 (1 row) @@ -230,9 +230,9 @@ SELECT count(*) FROM test_table; TRUNCATE TABLE test_table; -- kill on ROLLBACK, command could be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="ROLLBACK").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -241,19 +241,19 @@ ROLLBACK; WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -264,15 +264,15 @@ NOTICE: drop cascades to view unhealthy_shard_count SET citus.shard_replication_factor TO 2; CREATE TABLE test_table_2(id int, value_1 int); SELECT create_distributed_table('test_table_2','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT citus.mitmproxy('conn.kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) \COPY test_table_2 FROM stdin delimiter ','; @@ -297,9 +297,9 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. CONTEXT: COPY test_table_2, line 5: "9,10" SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT pds.logicalrelid, pdsd.shardid, pdsd.shardstate @@ -308,7 +308,7 @@ SELECT pds.logicalrelid, pdsd.shardid, pdsd.shardstate ON pdsd.shardid = pds.shardid WHERE pds.logicalrelid = 'test_table_2'::regclass ORDER BY shardid, nodeport; - logicalrelid | shardid | shardstate + logicalrelid | shardid | shardstate --------------------------------------------------------------------- test_table_2 | 1710004 | 3 test_table_2 | 1710004 | 1 @@ -324,17 +324,17 @@ SELECT pds.logicalrelid, pdsd.shardid, pdsd.shardstate DROP TABLE test_table_2; CREATE TABLE test_table_2(id int, value_1 int); SELECT create_distributed_table('test_table_2','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) --- Kill the connection when we try to start the COPY +-- Kill the connection when we try to start the COPY -- The query should abort SELECT citus.mitmproxy('conn.onQuery(query="FROM STDIN WITH").killall()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) \COPY test_table_2 FROM stdin delimiter ','; @@ -344,9 +344,9 @@ ERROR: server closed the connection unexpectedly CONTEXT: while executing command on localhost:xxxxx COPY test_table_2, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT pds.logicalrelid, pdsd.shardid, pdsd.shardstate @@ -355,7 +355,7 @@ SELECT pds.logicalrelid, pdsd.shardid, pdsd.shardstate ON pdsd.shardid = pds.shardid WHERE pds.logicalrelid = 'test_table_2'::regclass ORDER BY shardid, nodeport; - logicalrelid | shardid | shardstate + logicalrelid | shardid | shardstate --------------------------------------------------------------------- test_table_2 | 1710008 | 1 test_table_2 | 1710008 | 1 @@ -371,26 +371,26 @@ SELECT pds.logicalrelid, pdsd.shardid, pdsd.shardstate DROP TABLE test_table_2; CREATE TABLE test_table_2(id int, value_1 int); SELECT create_distributed_table('test_table_2','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- When kill on copying data, it will be rollbacked and placements won't be labaled as invalid. -- Note that now we sent data to shard xxxxx, yet it is not marked as invalid. -- You can check the issue about this behaviour: https://github.com/citusdata/citus/issues/1933 SELECT citus.mitmproxy('conn.onCopyData().kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) \COPY test_table_2 FROM stdin delimiter ','; ERROR: failed to COPY to shard xxxxx on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT pds.logicalrelid, pdsd.shardid, pdsd.shardstate @@ -399,7 +399,7 @@ SELECT pds.logicalrelid, pdsd.shardid, pdsd.shardstate ON pdsd.shardid = pds.shardid WHERE pds.logicalrelid = 'test_table_2'::regclass ORDER BY shardid, nodeport; - logicalrelid | shardid | shardstate + logicalrelid | shardid | shardstate --------------------------------------------------------------------- test_table_2 | 1710012 | 1 test_table_2 | 1710012 | 1 diff --git a/src/test/regress/expected/failure_copy_to_reference.out b/src/test/regress/expected/failure_copy_to_reference.out index a253b446d..e34d24f3f 100644 --- a/src/test/regress/expected/failure_copy_to_reference.out +++ b/src/test/regress/expected/failure_copy_to_reference.out @@ -1,121 +1,121 @@ --- --- Failure tests for COPY to reference tables --- +-- +-- Failure tests for COPY to reference tables +-- CREATE SCHEMA copy_reference_failure; SET search_path TO 'copy_reference_failure'; SET citus.next_shard_id TO 130000; -- we don't want to see the prepared transaction numbers in the warnings SET client_min_messages TO ERROR; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) CREATE TABLE test_table(id int, value_1 int); SELECT create_reference_table('test_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -CREATE VIEW unhealthy_shard_count AS - SELECT count(*) - FROM pg_dist_shard_placement pdsp - JOIN - pg_dist_shard pds - ON pdsp.shardid=pds.shardid +CREATE VIEW unhealthy_shard_count AS + SELECT count(*) + FROM pg_dist_shard_placement pdsp + JOIN + pg_dist_shard pds + ON pdsp.shardid=pds.shardid WHERE logicalrelid='copy_reference_failure.test_table'::regclass AND shardstate != 1; --- in the first test, kill just in the first +-- in the first test, kill just in the first -- response we get from the worker SELECT citus.mitmproxy('conn.kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) \copy test_table FROM STDIN DELIMITER ',' ERROR: failure on connection marked as essential: localhost:xxxxx CONTEXT: COPY test_table, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 0 (1 row) -- kill as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) \copy test_table FROM STDIN DELIMITER ',' ERROR: failure on connection marked as essential: localhost:xxxxx CONTEXT: COPY test_table, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 0 (1 row) -- cancel as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) \copy test_table FROM STDIN DELIMITER ',' ERROR: canceling statement due to user request CONTEXT: COPY test_table, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 0 (1 row) -- kill as soon as the coordinator sends COPY command SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) \copy test_table FROM STDIN DELIMITER ',' @@ -125,111 +125,111 @@ ERROR: server closed the connection unexpectedly CONTEXT: while executing command on localhost:xxxxx COPY test_table, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 0 (1 row) -- cancel as soon as the coordinator sends COPY command SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) \copy test_table FROM STDIN DELIMITER ',' ERROR: canceling statement due to user request CONTEXT: COPY test_table, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 0 (1 row) -- kill as soon as the worker sends CopyComplete SELECT citus.mitmproxy('conn.onCommandComplete(command="^COPY 3").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) \copy test_table FROM STDIN DELIMITER ',' ERROR: failed to COPY to shard xxxxx on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 0 (1 row) -- cancel as soon as the coordinator sends CopyData SELECT citus.mitmproxy('conn.onCommandComplete(command="^COPY 3").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) \copy test_table FROM STDIN DELIMITER ',' ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 0 (1 row) --- kill the connection when we try to start the COPY +-- kill the connection when we try to start the COPY -- the query should abort SELECT citus.mitmproxy('conn.onQuery(query="FROM STDIN WITH").killall()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) \copy test_table FROM STDIN DELIMITER ',' @@ -239,74 +239,74 @@ ERROR: server closed the connection unexpectedly CONTEXT: while executing command on localhost:xxxxx COPY test_table, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 0 (1 row) -- killing on PREPARE should be fine, everything should be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE TRANSACTION").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) \copy test_table FROM STDIN DELIMITER ',' ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 0 (1 row) -- cancelling on PREPARE should be fine, everything should be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE TRANSACTION").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) \copy test_table FROM STDIN DELIMITER ',' ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -314,33 +314,33 @@ SELECT count(*) FROM test_table; -- killing on command complete of COMMIT PREPARE, we should see that the command succeeds -- and all the workers committed SELECT citus.mitmproxy('conn.onCommandComplete(command="^COMMIT PREPARED").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) \copy test_table FROM STDIN DELIMITER ',' SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- we shouldn't have any prepared transactions in the workers SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 3 (1 row) @@ -348,36 +348,36 @@ SELECT count(*) FROM test_table; TRUNCATE test_table; -- kill as soon as the coordinator sends COMMIT SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) \copy test_table FROM STDIN DELIMITER ',' SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) --- Since we kill connections to one worker after commit arrives but the +-- Since we kill connections to one worker after commit arrives but the -- other worker connections are healthy, we cannot commit on 1 worker -- which has 1 active shard placements, but the other does. That's why -- we expect to see 1 recovered prepared transactions. SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 1 (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 3 (1 row) @@ -386,9 +386,9 @@ TRUNCATE test_table; -- finally, test failing on ROLLBACK just after the coordinator -- sends the ROLLBACK so the command can be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -398,30 +398,30 @@ ROLLBACK; WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 0 (1 row) --- but now kill just after the worker sends response to +-- but now kill just after the worker sends response to -- ROLLBACK command, command should have been rollbacked -- both on the distributed table and the placements SELECT citus.mitmproxy('conn.onCommandComplete(command="^ROLLBACK").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -431,25 +431,25 @@ ROLLBACK; WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/failure_create_distributed_table_non_empty.out b/src/test/regress/expected/failure_create_distributed_table_non_empty.out index e6649c5ac..9402dde3e 100644 --- a/src/test/regress/expected/failure_create_distributed_table_non_empty.out +++ b/src/test/regress/expected/failure_create_distributed_table_non_empty.out @@ -1,6 +1,6 @@ --- --- Failure tests for COPY to reference tables --- +-- +-- Failure tests for COPY to reference tables +-- -- We have to keep two copies of this failure test -- because if the shards are created via the executor -- cancellations are processed, otherwise they are not @@ -8,9 +8,9 @@ CREATE SCHEMA create_distributed_table_non_empty_failure; SET search_path TO 'create_distributed_table_non_empty_failure'; SET citus.next_shard_id TO 11000000; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- we'll start with replication factor 1 and 2pc @@ -20,9 +20,9 @@ CREATE TABLE test_table(id int, value_1 int); INSERT INTO test_table VALUES (1,1),(2,2),(3,3),(4,4); -- in the first test, kill the first connection we sent from the coordinator SELECT citus.mitmproxy('conn.kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table', 'id'); @@ -31,31 +31,31 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) -- in the first test, cancel the first connection we sent from the coordinator SELECT citus.mitmproxy('conn.cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table', 'id'); ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) -- kill as soon as the coordinator sends CREATE SCHEMA SELECT citus.mitmproxy('conn.onQuery(query="^CREATE SCHEMA").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table', 'id'); @@ -64,13 +64,13 @@ ERROR: server closed the connection unexpectedly before or while processing the request. CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'create_distributed_table_non_empty_failure'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,1) @@ -81,28 +81,28 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata -- does not check for interrupts until GetRemoteCommandResult is called. -- Since we already sent the command at this stage, the schemas get created in workers SELECT citus.mitmproxy('conn.onQuery(query="^CREATE SCHEMA").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table', 'id'); ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'create_distributed_table_non_empty_failure'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,1) (2 rows) SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS create_distributed_table_non_empty_failure$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,9060,t,"DROP SCHEMA") (localhost,57637,t,"DROP SCHEMA") @@ -110,18 +110,18 @@ SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS create_distributed_table_n -- this triggers a schema creation which prevents further transactions around dependency propagation SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) CREATE TYPE schema_proc AS (a int); DROP TYPE schema_proc; -- kill as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table', 'id'); @@ -130,19 +130,19 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'create_distributed_table_non_empty_failure'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,9060,t,1) (localhost,57637,t,1) @@ -150,30 +150,30 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata -- cancel as soon as the coordinator sends begin -- if the shards are created via the executor, the table creation will fail --- otherwise shards will be created because we ignore cancel requests during the shard creation +-- otherwise shards will be created because we ignore cancel requests during the shard creation -- Interrupts are hold in CreateShardsWithRoundRobinPolicy SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table', 'id'); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'create_distributed_table_non_empty_failure'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,9060,t,1) (localhost,57637,t,1) @@ -184,9 +184,9 @@ CREATE TABLE test_table(id int, value_1 int); INSERT INTO test_table VALUES (1,1),(2,2),(3,3),(4,4); -- kill as soon as the coordinator sends CREATE TABLE SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table', 'id'); @@ -195,16 +195,16 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) -- kill as soon as the coordinator sends COPY SELECT citus.mitmproxy('conn.onQuery(query="COPY").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table', 'id'); @@ -213,55 +213,55 @@ ERROR: server closed the connection unexpectedly before or while processing the request. CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) -- kill when the COPY is completed, it should be rollbacked properly SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table', 'id'); NOTICE: Copying data from local table... ERROR: failed to COPY to shard xxxxx on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) --- cancel as soon as the coordinator sends COPY, table +-- cancel as soon as the coordinator sends COPY, table -- should not be created and rollbacked properly SELECT citus.mitmproxy('conn.onQuery(query="COPY").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table', 'id'); ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) -- cancel when the COPY is completed, it should be rollbacked properly SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table', 'id'); NOTICE: Copying data from local table... ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -271,16 +271,16 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ -- we don't want to see the prepared transaction numbers in the warnings SET client_min_messages TO ERROR; SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table', 'id'); ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -288,27 +288,27 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ -- immediately cancel when we see prepare transaction to see if the command -- successfully rollbacked the created shards SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table', 'id'); ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 1 (1 row) @@ -316,31 +316,31 @@ SELECT recover_prepared_transactions(); -- kill as soon as the coordinator sends COMMIT -- shards should be created and kill should not affect SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT PREPARED").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 4 (1 row) SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 2 (1 row) @@ -355,25 +355,25 @@ INSERT INTO test_table VALUES (1,1),(2,2),(3,3),(4,4); -- cancel as soon as the coordinator sends COMMIT -- shards should be created and kill should not affect SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT PREPARED").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 4 (1 row) @@ -384,49 +384,49 @@ INSERT INTO test_table VALUES (1,1),(2,2),(3,3),(4,4); -- kill as soon as the coordinator sends ROLLBACK -- the command can be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; SELECT create_distributed_table('test_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ROLLBACK; SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) -- cancel as soon as the coordinator sends ROLLBACK --- should be rollbacked +-- should be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; SELECT create_distributed_table('test_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -435,24 +435,24 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ -- testing for co-located tables. CREATE TABLE colocated_table(id int, value_1 int); SELECT create_distributed_table('colocated_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- Now, cancel the connection just after transaction is opened on -- workers. Note that, when there is a colocated table, interrupts -- are not held and we can cancel in the middle of the execution SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -461,9 +461,9 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ -- workers. Note that, when there is a colocated table, interrupts -- are not held and we can cancel in the middle of the execution SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); @@ -472,13 +472,13 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'create_distributed_table_non_empty_failure' and table_name LIKE 'test_table%'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) @@ -488,15 +488,15 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W -- workers. Note that, when there is a colocated table, interrupts -- are not held and we can cancel in the middle of the execution SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -505,9 +505,9 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ -- workers. Note that, when there is a colocated table, interrupts -- are not held and we can cancel in the middle of the execution SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); @@ -516,13 +516,13 @@ ERROR: server closed the connection unexpectedly before or while processing the request. CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'create_distributed_table_non_empty_failure' and table_name LIKE 'test_table%'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) @@ -532,15 +532,15 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W -- workers. Note that, when there is a colocated table, interrupts -- are not held and we can cancel in the middle of the execution SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_apply_shard_ddl_command").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -549,9 +549,9 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ -- workers. Note that, when there is a colocated table, interrupts -- are not held and we can cancel in the middle of the execution SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_apply_shard_ddl_command").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); @@ -560,13 +560,13 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'create_distributed_table_non_empty_failure' and table_name LIKE 'test_table%'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) @@ -574,9 +574,9 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W -- Now run the same tests with 1pc SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) DROP TABLE colocated_table; @@ -587,9 +587,9 @@ CREATE TABLE test_table(id int, value_1 int); INSERT INTO test_table VALUES (1,1),(2,2),(3,3),(4,4); SET citus.multi_shard_commit_protocol TO '1pc'; SELECT citus.mitmproxy('conn.kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table', 'id'); @@ -598,19 +598,19 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'create_distributed_table_non_empty_failure' and table_name LIKE 'test_table%'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) @@ -618,27 +618,27 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W -- in the first test, cancel the first connection we sent from the coordinator SELECT citus.mitmproxy('conn.cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table', 'id'); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'create_distributed_table_non_empty_failure' and table_name LIKE 'test_table%'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) @@ -646,18 +646,18 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W -- this triggers a schema creation which prevents further transactions around dependency propagation SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) CREATE TYPE schema_proc AS (a int); DROP TYPE schema_proc; -- kill as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table', 'id'); @@ -666,19 +666,19 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'create_distributed_table_non_empty_failure'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,9060,t,1) (localhost,57637,t,1) @@ -686,30 +686,30 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata -- cancel as soon as the coordinator sends begin -- if the shards are created via the executor, the table creation will fail --- otherwise shards will be created because we ignore cancel requests during the shard creation +-- otherwise shards will be created because we ignore cancel requests during the shard creation -- Interrupts are hold in CreateShardsWithRoundRobinPolicy SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table', 'id'); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'create_distributed_table_non_empty_failure'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,9060,t,1) (localhost,57637,t,1) @@ -720,9 +720,9 @@ CREATE TABLE test_table(id int, value_1 int); INSERT INTO test_table VALUES (1,1),(2,2),(3,3),(4,4); -- kill as soon as the coordinator sends CREATE TABLE SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table', 'id'); @@ -731,16 +731,16 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) -- kill as soon as the coordinator sends COPY SELECT citus.mitmproxy('conn.onQuery(query="COPY").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table', 'id'); @@ -749,53 +749,53 @@ ERROR: server closed the connection unexpectedly before or while processing the request. CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) -- kill when the COPY is completed, it should be rollbacked properly SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table', 'id'); ERROR: failed to COPY to shard xxxxx on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) --- cancel as soon as the coordinator sends COPY, table +-- cancel as soon as the coordinator sends COPY, table -- should not be created and rollbacked properly SELECT citus.mitmproxy('conn.onQuery(query="COPY").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table', 'id'); ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) -- cancel when the COPY is completed, it should be rollbacked properly SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table', 'id'); ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -803,49 +803,49 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ -- kill as soon as the coordinator sends ROLLBACK -- the command can be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; SELECT create_distributed_table('test_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ROLLBACK; SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) -- cancel as soon as the coordinator sends ROLLBACK --- should be rollbacked +-- should be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; SELECT create_distributed_table('test_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -853,21 +853,21 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ -- kill as soon as the coordinator sends COMMIT -- the command can be COMMITed SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; SELECT create_distributed_table('test_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) COMMIT; SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 4 (1 row) @@ -878,27 +878,27 @@ INSERT INTO test_table VALUES (1,1),(2,2),(3,3),(4,4); -- cancel as soon as the coordinator sends COMMIT -- should be COMMITed SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; SELECT create_distributed_table('test_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) COMMIT; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 4 (1 row) @@ -908,24 +908,24 @@ CREATE TABLE test_table(id int, value_1 int); INSERT INTO test_table VALUES (1,1),(2,2),(3,3),(4,4); CREATE TABLE colocated_table(id int, value_1 int); SELECT create_distributed_table('colocated_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- Now, cancel the connection just after transaction is opened on -- workers. Note that, when there is a colocated table, interrupts -- are not held and we can cancel in the middle of the execution SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -934,9 +934,9 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ -- workers. Note that, when there is a colocated table, interrupts -- are not held and we can cancel in the middle of the execution SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); @@ -945,7 +945,7 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -954,15 +954,15 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ -- workers. Note that, when there is a colocated table, interrupts -- are not held and we can cancel in the middle of the execution SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -971,9 +971,9 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_ -- workers. Note that, when there is a colocated table, interrupts -- are not held and we can cancel in the middle of the execution SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); @@ -982,22 +982,22 @@ ERROR: server closed the connection unexpectedly before or while processing the request. CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'create_distributed_table_non_empty_failure' and table_name LIKE 'test_table%'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) DROP SCHEMA create_distributed_table_non_empty_failure CASCADE; diff --git a/src/test/regress/expected/failure_create_index_concurrently.out b/src/test/regress/expected/failure_create_index_concurrently.out index 0e1f0968d..1e53e7d4f 100644 --- a/src/test/regress/expected/failure_create_index_concurrently.out +++ b/src/test/regress/expected/failure_create_index_concurrently.out @@ -3,9 +3,9 @@ -- test create index concurrently command -- failure. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SET citus.shard_count = 4; -- two per worker @@ -13,16 +13,16 @@ CREATE SCHEMA index_schema; SET SEARCH_PATH=index_schema; CREATE TABLE index_test(id int, value_1 int, value_2 int); SELECT create_distributed_table('index_test', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- kill the connection when create command is issued SELECT citus.mitmproxy('conn.onQuery(query="CREATE").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) CREATE INDEX CONCURRENTLY idx_index_test ON index_test(id, value_1); @@ -30,15 +30,15 @@ ERROR: CONCURRENTLY-enabled index command failed DETAIL: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. HINT: Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index, then retry the original command. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- verify index is not created SELECT * FROM run_command_on_workers($$SELECT count(*) FROM pg_indexes WHERE indexname LIKE 'idx_index_test%' $$) WHERE nodeport = :worker_2_proxy_port; - nodename | nodeport | success | result + nodename | nodeport | success | result --------------------------------------------------------------------- localhost | 9060 | t | 0 (1 row) @@ -46,16 +46,16 @@ WHERE nodeport = :worker_2_proxy_port; DROP TABLE index_test; CREATE TABLE index_test(id int, value_1 int, value_2 int); SELECT create_reference_table('index_test'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- kill the connection when create command is issued SELECT citus.mitmproxy('conn.onQuery(query="CREATE").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) CREATE INDEX CONCURRENTLY idx_index_test ON index_test(id, value_1); @@ -63,26 +63,26 @@ ERROR: CONCURRENTLY-enabled index command failed DETAIL: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. HINT: Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index, then retry the original command. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) DROP TABLE index_test; CREATE TABLE index_test(id int, value_1 int, value_2 int); SELECT create_distributed_table('index_test', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- cancel the connection when create command is issued -- network traffic may differ between execution during cancellation -- therefore dump_network_traffic() calls are not made SELECT citus.mitmproxy('conn.onQuery(query="CREATE").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) CREATE INDEX CONCURRENTLY idx_index_test ON index_test(id, value_1); @@ -90,24 +90,24 @@ ERROR: CONCURRENTLY-enabled index command failed DETAIL: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. HINT: Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index, then retry the original command. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) DROP TABLE index_test; CREATE TABLE index_test(id int, value_1 int, value_2 int); SELECT create_reference_table('index_test'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- cancel the connection when create command is issued SELECT citus.mitmproxy('conn.onQuery(query="CREATE").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) CREATE INDEX CONCURRENTLY idx_index_test ON index_test(id, value_1); @@ -115,25 +115,25 @@ ERROR: CONCURRENTLY-enabled index command failed DETAIL: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. HINT: Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index, then retry the original command. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) DROP TABLE index_test; CREATE TABLE index_test(id int, value_1 int, value_2 int); SELECT create_distributed_table('index_test', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE INDEX CONCURRENTLY idx_index_test ON index_test(id, value_1); -- kill the connection when create command is issued SELECT citus.mitmproxy('conn.onQuery(query="DROP INDEX CONCURRENTLY").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) DROP INDEX CONCURRENTLY IF EXISTS idx_index_test; @@ -141,15 +141,15 @@ ERROR: CONCURRENTLY-enabled index command failed DETAIL: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. HINT: Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index, then retry the original command. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- verify index is not dropped at worker 2 SELECT * FROM run_command_on_workers($$SELECT count(*) FROM pg_indexes WHERE indexname LIKE 'idx_index_test%' $$) WHERE nodeport = :worker_2_proxy_port; - nodename | nodeport | success | result + nodename | nodeport | success | result --------------------------------------------------------------------- localhost | 9060 | t | 4 (1 row) @@ -160,7 +160,7 @@ NOTICE: drop cascades to table index_schema.index_test -- verify index is not at worker 2 upon cleanup SELECT * FROM run_command_on_workers($$SELECT count(*) FROM pg_indexes WHERE indexname LIKE 'idx_index_test%' $$) WHERE nodeport = :worker_2_proxy_port; - nodename | nodeport | success | result + nodename | nodeport | success | result --------------------------------------------------------------------- localhost | 9060 | t | 0 (1 row) diff --git a/src/test/regress/expected/failure_create_reference_table.out b/src/test/regress/expected/failure_create_reference_table.out index 7ced28939..a9d15187c 100644 --- a/src/test/regress/expected/failure_create_reference_table.out +++ b/src/test/regress/expected/failure_create_reference_table.out @@ -5,9 +5,9 @@ CREATE SCHEMA failure_reference_table; SET search_path TO 'failure_reference_table'; SET citus.next_shard_id TO 10000000; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- this is merely used to get the schema creation propagated. Without there are failures @@ -19,9 +19,9 @@ INSERT INTO ref_table VALUES(1),(2),(3); -- Kill on sending first query to worker node, should error -- out and not create any placement SELECT citus.mitmproxy('conn.onQuery().kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_reference_table('ref_table'); @@ -30,16 +30,16 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT count(*) FROM pg_dist_shard_placement; - count + count --------------------------------------------------------------------- 0 (1 row) -- Kill after creating transaction on worker node SELECT citus.mitmproxy('conn.onCommandComplete(command="BEGIN").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_reference_table('ref_table'); @@ -48,31 +48,31 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT count(*) FROM pg_dist_shard_placement; - count + count --------------------------------------------------------------------- 0 (1 row) -- Cancel after creating transaction on worker node SELECT citus.mitmproxy('conn.onCommandComplete(command="BEGIN").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_reference_table('ref_table'); ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard_placement; - count + count --------------------------------------------------------------------- 0 (1 row) -- Kill after copying data to worker node SELECT citus.mitmproxy('conn.onCommandComplete(command="SELECT 1").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_reference_table('ref_table'); @@ -81,54 +81,54 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT count(*) FROM pg_dist_shard_placement; - count + count --------------------------------------------------------------------- 0 (1 row) -- Cancel after copying data to worker node SELECT citus.mitmproxy('conn.onCommandComplete(command="SELECT 1").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_reference_table('ref_table'); ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard_placement; - count + count --------------------------------------------------------------------- 0 (1 row) -- Kill after copying data to worker node SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY 3").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_reference_table('ref_table'); NOTICE: Copying data from local table... ERROR: failed to COPY to shard xxxxx on localhost:xxxxx SELECT count(*) FROM pg_dist_shard_placement; - count + count --------------------------------------------------------------------- 0 (1 row) -- Cancel after copying data to worker node SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY 3").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_reference_table('ref_table'); NOTICE: Copying data from local table... ERROR: canceling statement due to user request SELECT count(*) FROM pg_dist_shard_placement; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -138,41 +138,41 @@ SET client_min_messages TO ERROR; -- Kill after preparing transaction. Since we don't commit after preparing, we recover -- prepared transaction afterwards. SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_reference_table('ref_table'); ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM pg_dist_shard_placement; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 1 (1 row) -- Kill after commiting prepared, this should succeed SELECT citus.mitmproxy('conn.onCommandComplete(command="COMMIT PREPARED").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_reference_table('ref_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT shardid, nodeport, shardstate FROM pg_dist_shard_placement ORDER BY shardid, nodeport; - shardid | nodeport | shardstate + shardid | nodeport | shardstate --------------------------------------------------------------------- 10000008 | 9060 | 1 10000008 | 57637 | 1 @@ -180,9 +180,9 @@ SELECT shardid, nodeport, shardstate FROM pg_dist_shard_placement ORDER BY shard SET client_min_messages TO NOTICE; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) DROP TABLE ref_table; @@ -192,9 +192,9 @@ CREATE TABLE ref_table(id int); INSERT INTO ref_table VALUES(1),(2),(3); -- Test in transaction SELECT citus.mitmproxy('conn.onQuery().kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -207,46 +207,46 @@ ERROR: failure on connection marked as essential: localhost:xxxxx COMMIT; -- kill on ROLLBACK, should be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; SELECT create_reference_table('ref_table'); NOTICE: Copying data from local table... - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) ROLLBACK; WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodeport; - shardid | shardstate | shardlength | nodename | nodeport | placementid + shardid | shardstate | shardlength | nodename | nodeport | placementid --------------------------------------------------------------------- (0 rows) -- cancel when the coordinator send ROLLBACK, should be rollbacked. We ignore cancellations -- during the ROLLBACK. SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; SELECT create_reference_table('ref_table'); NOTICE: Copying data from local table... - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) ROLLBACK; SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodeport; - shardid | shardstate | shardlength | nodename | nodeport | placementid + shardid | shardstate | shardlength | nodename | nodeport | placementid --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/failure_create_table.out b/src/test/regress/expected/failure_create_table.out index c3a81f03e..fe839b791 100644 --- a/src/test/regress/expected/failure_create_table.out +++ b/src/test/regress/expected/failure_create_table.out @@ -4,19 +4,19 @@ CREATE SCHEMA failure_create_table; SET search_path TO 'failure_create_table'; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SET citus.shard_replication_factor TO 1; SET citus.shard_count to 4; CREATE TABLE test_table(id int, value_1 int); --- Kill connection before sending query to the worker +-- Kill connection before sending query to the worker SELECT citus.mitmproxy('conn.kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table','id'); @@ -25,19 +25,19 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_dist_shard; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) @@ -48,9 +48,9 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W -- be created only on the node which is not behind the proxy. -- https://github.com/citusdata/citus/pull/1652 SELECT citus.mitmproxy('conn.onQuery(query="^CREATE SCHEMA").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table', 'id'); @@ -59,19 +59,19 @@ ERROR: server closed the connection unexpectedly before or while processing the request. CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_dist_shard; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'failure_create_table'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,1) @@ -83,9 +83,9 @@ CREATE TYPE schema_proc AS (a int); DROP TYPE schema_proc; -- Now, kill the connection while opening transaction on workers. SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table','id'); @@ -94,19 +94,19 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_dist_shard; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) @@ -114,9 +114,9 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W -- Now, kill the connection after sending create table command with worker_apply_shard_ddl_command UDF SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_apply_shard_ddl_command").after(1).kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table','id'); @@ -125,19 +125,19 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_dist_shard; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) @@ -148,9 +148,9 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_apply_shard_ddl_command").after(1).kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table', 'id'); @@ -160,19 +160,19 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. COMMIT; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_dist_shard; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) @@ -182,27 +182,27 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W -- workers. Note that, cancel requests will be ignored during -- shard creation. SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table','id'); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_dist_shard; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) @@ -213,15 +213,15 @@ CREATE TABLE test_table(id int, value_1 int); -- Kill and cancel the connection with colocate_with option while sending the create table command CREATE TABLE temp_table(id int, value_1 int); SELECT create_distributed_table('temp_table','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table','id',colocate_with=>'temp_table'); @@ -230,46 +230,46 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_dist_shard; - count + count --------------------------------------------------------------------- 4 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table','id',colocate_with=>'temp_table'); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_dist_shard; - count + count --------------------------------------------------------------------- 4 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) @@ -277,55 +277,55 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W -- Kill and cancel the connection after worker sends "PREPARE TRANSACTION" ack with colocate_with option SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table','id',colocate_with=>'temp_table'); ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_dist_shard; - count + count --------------------------------------------------------------------- 4 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) (2 rows) SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table','id',colocate_with=>'temp_table'); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_dist_shard; - count + count --------------------------------------------------------------------- 4 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) @@ -338,11 +338,11 @@ DROP SCHEMA failure_create_table; CREATE SCHEMA failure_create_table; CREATE TABLE test_table(id int, value_1 int); -- Test inside transaction --- Kill connection before sending query to the worker +-- Kill connection before sending query to the worker SELECT citus.mitmproxy('conn.kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -353,19 +353,19 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_dist_shard; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) @@ -377,9 +377,9 @@ CREATE TYPE schema_proc AS (a int); DROP TYPE schema_proc; -- Now, kill the connection while creating transaction on workers in transaction. SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -390,19 +390,19 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_dist_shard; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) @@ -413,9 +413,9 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W -- shard creation again in transaction if we're not relying on the -- executor. So, we'll have two output files SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -423,25 +423,25 @@ SELECT create_distributed_table('test_table','id'); ERROR: canceling statement due to user request COMMIT; SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 1 (1 row) SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_dist_shard; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) @@ -456,9 +456,9 @@ CREATE TABLE test_table(id int, value_1 int); SET citus.multi_shard_commit_protocol TO "1pc"; -- Kill connection before sending query to the worker with 1pc. SELECT citus.mitmproxy('conn.kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -469,19 +469,19 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_dist_shard; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) @@ -489,9 +489,9 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W -- Kill connection while sending create table command with 1pc. SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -502,19 +502,19 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_dist_shard; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) @@ -526,9 +526,9 @@ CREATE TYPE schema_proc AS (a int); DROP TYPE schema_proc; -- Now, kill the connection while opening transactions on workers with 1pc. Transaction will be opened due to BEGIN. SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -539,19 +539,19 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_dist_shard; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) @@ -561,9 +561,9 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W -- workers with 1pc. Note that, cancel requests will be ignored during -- shard creation unless the executor is used. So, we'll have two output files SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -571,25 +571,25 @@ SELECT create_distributed_table('test_table','id'); ERROR: canceling statement due to user request COMMIT; SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_dist_shard; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) @@ -602,16 +602,16 @@ CREATE SCHEMA failure_create_table; SET citus.multi_shard_commit_protocol TO "2pc"; CREATE TABLE test_table_2(id int, value_1 int); SELECT master_create_distributed_table('test_table_2', 'id', 'hash'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) -- Kill connection before sending query to the worker SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT master_create_worker_shards('test_table_2', 4, 2); @@ -620,25 +620,25 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT count(*) FROM pg_dist_shard; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_dist_shard; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) @@ -646,28 +646,28 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W -- Kill the connection after worker sends "PREPARE TRANSACTION" ack SELECT citus.mitmproxy('conn.onCommandComplete(command="^PREPARE TRANSACTION").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT master_create_worker_shards('test_table_2', 4, 2); ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_dist_shard; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) @@ -675,34 +675,34 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W -- Cancel the connection after sending prepare transaction in master_create_worker_shards SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT master_create_worker_shards('test_table_2', 4, 2); ERROR: canceling statement due to user request -- Show that there is no pending transaction SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 1 (1 row) SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_dist_shard; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,9060,t,0) (localhost,57637,t,0) diff --git a/src/test/regress/expected/failure_cte_subquery.out b/src/test/regress/expected/failure_cte_subquery.out index 5c962dbcb..6ae86cfd9 100644 --- a/src/test/regress/expected/failure_cte_subquery.out +++ b/src/test/regress/expected/failure_cte_subquery.out @@ -7,23 +7,23 @@ SELECT pg_backend_pid() as pid \gset CREATE TABLE users_table (user_id int, user_name text); CREATE TABLE events_table(user_id int, event_id int, event_type int); SELECT create_distributed_table('users_table', 'user_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('events_table', 'user_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE users_table_local AS SELECT * FROM users_table; -- kill at the first copy (push) SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) WITH cte AS ( @@ -35,19 +35,19 @@ WITH cte AS ( ) SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id ) -SELECT - count(*) -FROM +SELECT + count(*) +FROM cte, - (SELECT - DISTINCT users_table.user_id - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + (SELECT + DISTINCT users_table.user_id + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) ORDER BY 1 DESC LIMIT 5 - ) as foo + ) as foo WHERE foo.user_id = cte.user_id; ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally @@ -55,9 +55,9 @@ ERROR: server closed the connection unexpectedly CONTEXT: while executing command on localhost:xxxxx -- kill at the second copy (pull) SELECT citus.mitmproxy('conn.onQuery(query="SELECT user_id FROM cte_failure.events_table_16000002").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) WITH cte AS ( @@ -69,19 +69,19 @@ WITH cte AS ( ) SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id ) -SELECT - count(*) -FROM +SELECT + count(*) +FROM cte, - (SELECT - DISTINCT users_table.user_id - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + (SELECT + DISTINCT users_table.user_id + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) ORDER BY 1 DESC LIMIT 5 - ) as foo + ) as foo WHERE foo.user_id = cte.user_id; ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly @@ -89,9 +89,9 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. -- kill at the third copy (pull) SELECT citus.mitmproxy('conn.onQuery(query="SELECT DISTINCT users_table.user").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) WITH cte AS ( @@ -103,19 +103,19 @@ WITH cte AS ( ) SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id ) -SELECT - count(*) -FROM +SELECT + count(*) +FROM cte, - (SELECT - DISTINCT users_table.user_id - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + (SELECT + DISTINCT users_table.user_id + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) ORDER BY 1 DESC LIMIT 5 - ) as foo + ) as foo WHERE foo.user_id = cte.user_id; ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly @@ -123,9 +123,9 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. -- cancel at the first copy (push) SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) WITH cte AS ( @@ -137,26 +137,26 @@ WITH cte AS ( ) SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id ) -SELECT - count(*) -FROM +SELECT + count(*) +FROM cte, - (SELECT - DISTINCT users_table.user_id - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + (SELECT + DISTINCT users_table.user_id + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) ORDER BY 1 DESC LIMIT 5 - ) as foo + ) as foo WHERE foo.user_id = cte.user_id; ERROR: canceling statement due to user request -- cancel at the second copy (pull) SELECT citus.mitmproxy('conn.onQuery(query="SELECT user_id FROM").cancel(' || :pid || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) WITH cte AS ( @@ -168,26 +168,26 @@ WITH cte AS ( ) SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id ) -SELECT - count(*) -FROM +SELECT + count(*) +FROM cte, - (SELECT - DISTINCT users_table.user_id - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + (SELECT + DISTINCT users_table.user_id + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) ORDER BY 1 DESC LIMIT 5 - ) as foo + ) as foo WHERE foo.user_id = cte.user_id; ERROR: canceling statement due to user request -- cancel at the third copy (pull) SELECT citus.mitmproxy('conn.onQuery(query="SELECT DISTINCT users_table.user").cancel(' || :pid || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) WITH cte AS ( @@ -199,33 +199,33 @@ WITH cte AS ( ) SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id ) -SELECT - count(*) -FROM +SELECT + count(*) +FROM cte, - (SELECT - DISTINCT users_table.user_id - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + (SELECT + DISTINCT users_table.user_id + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) ORDER BY 1 DESC LIMIT 5 - ) as foo + ) as foo WHERE foo.user_id = cte.user_id; ERROR: canceling statement due to user request -- distributed update tests SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- insert some rows INSERT INTO users_table VALUES (1, 'A'), (2, 'B'), (3, 'C'), (4, 'D'), (5, 'E'); INSERT INTO events_table VALUES (1,1,1), (1,2,1), (1,3,1), (2,1, 4), (3, 4,1), (5, 1, 2), (5, 2, 1), (5, 2,2); SELECT * FROM users_table ORDER BY 1, 2; - user_id | user_name + user_id | user_name --------------------------------------------------------------------- 1 | A 2 | B @@ -239,7 +239,7 @@ WITH cte_delete as (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURN INSERT INTO users_table SELECT * FROM cte_delete; -- verify contents are the same SELECT * FROM users_table ORDER BY 1, 2; - user_id | user_name + user_id | user_name --------------------------------------------------------------------- 1 | A 2 | B @@ -250,9 +250,9 @@ SELECT * FROM users_table ORDER BY 1, 2; -- kill connection during deletion SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) WITH cte_delete as (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *) @@ -263,13 +263,13 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. -- verify contents are the same SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM users_table ORDER BY 1, 2; - user_id | user_name + user_id | user_name --------------------------------------------------------------------- 1 | A 2 | B @@ -280,9 +280,9 @@ SELECT * FROM users_table ORDER BY 1, 2; -- kill connection during insert SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) WITH cte_delete as (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *) @@ -293,13 +293,13 @@ ERROR: server closed the connection unexpectedly CONTEXT: while executing command on localhost:xxxxx -- verify contents are the same SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM users_table ORDER BY 1, 2; - user_id | user_name + user_id | user_name --------------------------------------------------------------------- 1 | A 2 | B @@ -310,9 +310,9 @@ SELECT * FROM users_table ORDER BY 1, 2; -- cancel during deletion SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM").cancel(' || :pid || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) WITH cte_delete as (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *) @@ -320,13 +320,13 @@ INSERT INTO users_table SELECT * FROM cte_delete; ERROR: canceling statement due to user request -- verify contents are the same SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM users_table ORDER BY 1, 2; - user_id | user_name + user_id | user_name --------------------------------------------------------------------- 1 | A 2 | B @@ -337,9 +337,9 @@ SELECT * FROM users_table ORDER BY 1, 2; -- cancel during insert SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) WITH cte_delete as (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *) @@ -347,13 +347,13 @@ INSERT INTO users_table SELECT * FROM cte_delete; ERROR: canceling statement due to user request -- verify contents are the same SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM users_table ORDER BY 1, 2; - user_id | user_name + user_id | user_name --------------------------------------------------------------------- 1 | A 2 | B @@ -364,9 +364,9 @@ SELECT * FROM users_table ORDER BY 1, 2; -- test sequential delete/insert SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -380,9 +380,9 @@ DETAIL: server closed the connection unexpectedly END; RESET SEARCH_PATH; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) DROP SCHEMA cte_failure CASCADE; diff --git a/src/test/regress/expected/failure_ddl.out b/src/test/regress/expected/failure_ddl.out index e3f442665..8b31e291c 100644 --- a/src/test/regress/expected/failure_ddl.out +++ b/src/test/regress/expected/failure_ddl.out @@ -1,8 +1,8 @@ --- --- Test DDL command propagation failures +-- +-- Test DDL command propagation failures -- Different dimensions we're testing: -- Replication factor, 1PC-2PC, sequential-parallel modes --- +-- CREATE SCHEMA ddl_failure; SET citus.force_max_query_parallelization TO ON; SET search_path TO 'ddl_failure'; @@ -11,9 +11,9 @@ SET citus.max_cached_conns_per_worker TO 0; -- we don't want to see the prepared transaction numbers in the warnings SET client_min_messages TO ERROR; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SET citus.next_shard_id TO 100800; @@ -23,17 +23,17 @@ SET citus.shard_count = 4; SET citus.shard_replication_factor = 1; CREATE TABLE test_table (key int, value int); SELECT create_distributed_table('test_table', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) --- in the first test, kill just in the first +-- in the first test, kill just in the first -- response we get from the worker SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; @@ -42,32 +42,32 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg + array_agg --------------------------------------------------------------------- {key,value} (1 row) --- cancel just in the first +-- cancel just in the first -- response we get from the worker SELECT citus.mitmproxy('conn.onAuthenticationOk().cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; ERROR: canceling statement due to user request SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg + array_agg --------------------------------------------------------------------- {key,value} (1 row) -- kill as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; @@ -76,31 +76,31 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg + array_agg --------------------------------------------------------------------- {key,value} (1 row) -- cancel as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; ERROR: canceling statement due to user request SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg + array_agg --------------------------------------------------------------------- {key,value} (1 row) -- kill as soon as the coordinator sends worker_apply_shard_ddl_command SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; @@ -110,54 +110,54 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. -- show that we've never commited the changes SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg + array_agg --------------------------------------------------------------------- {key,value} (1 row) -- cancel as soon as the coordinator sends worker_apply_shard_ddl_command SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; ERROR: canceling statement due to user request -- show that we've never commited the changes SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg + array_agg --------------------------------------------------------------------- {key,value} (1 row) -- kill as soon as the coordinator sends COMMIT SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- since we've killed the connection just after -- the coordinator sends the COMMIT, the command should be applied -- to the distributed table and the shards on the other worker --- however, there is no way to recover the failure on the shards +-- however, there is no way to recover the failure on the shards -- that live in the failed worker, since we're running 1PC SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg + array_agg --------------------------------------------------------------------- {key,new_column,value} (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements + run_command_on_placements --------------------------------------------------------------------- (localhost,9060,100800,t,"{key,value}") (localhost,9060,100802,t,"{key,value}") @@ -173,35 +173,35 @@ SET citus.shard_count = 4; SET citus.shard_replication_factor = 1; CREATE TABLE test_table (key int, value int); SELECT create_distributed_table('test_table', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- cancel as soon as the coordinator sends COMMIT SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) --- interrupts are held during COMMIT/ROLLBACK, so the command +-- interrupts are held during COMMIT/ROLLBACK, so the command -- should have been applied without any issues since cancel is ignored SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg + array_agg --------------------------------------------------------------------- {key,new_column,value} (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements + run_command_on_placements --------------------------------------------------------------------- (localhost,9060,100800,t,"{key,new_column,value}") (localhost,9060,100802,t,"{key,new_column,value}") @@ -211,14 +211,14 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- the following tests rely the column not exists, so drop manually ALTER TABLE test_table DROP COLUMN new_column; --- but now kill just after the worker sends response to +-- but now kill just after the worker sends response to -- COMMIT command, so we'll have lots of warnings but the command -- should have been committed both on the distributed table and the placements SET client_min_messages TO WARNING; SELECT citus.mitmproxy('conn.onCommandComplete(command="^COMMIT").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; @@ -235,20 +235,20 @@ CONTEXT: while executing command on localhost:xxxxx WARNING: could not commit transaction for shard xxxxx on any active node WARNING: could not commit transaction for shard xxxxx on any active node SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SET client_min_messages TO ERROR; SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg + array_agg --------------------------------------------------------------------- {key,new_column,value} (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements + run_command_on_placements --------------------------------------------------------------------- (localhost,9060,100800,t,"{key,new_column,value}") (localhost,9060,100802,t,"{key,new_column,value}") @@ -256,21 +256,21 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD (localhost,57637,100803,t,"{key,new_column,value}") (4 rows) --- now cancel just after the worker sends response to +-- now cancel just after the worker sends response to -- but Postgres doesn't accepts interrupts during COMMIT and ROLLBACK -- so should not cancel at all, so not an effective test but adding in -- case Citus messes up this behaviour SELECT citus.mitmproxy('conn.onCommandComplete(command="^COMMIT").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table DROP COLUMN new_column; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- the remaining tests rely on table having new_column @@ -279,9 +279,9 @@ ALTER TABLE test_table ADD COLUMN new_column INT; -- fail just after the coordinator sends the ROLLBACK -- so the command can be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="ROLLBACK").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -292,45 +292,45 @@ WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx --- now cancel just after the worker sends response to +-- now cancel just after the worker sends response to -- but Postgres doesn't accepts interrupts during COMMIT and ROLLBACK -- so should not cancel at all, so not an effective test but adding in -- case Citus messes up this behaviour SELECT citus.mitmproxy('conn.onQuery(query="ROLLBACK").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; ALTER TABLE test_table DROP COLUMN new_column; ROLLBACK; --- but now kill just after the worker sends response to +-- but now kill just after the worker sends response to -- ROLLBACK command, so we'll have lots of warnings but the command -- should have been rollbacked both on the distributed table and the placements SELECT citus.mitmproxy('conn.onCommandComplete(command="ROLLBACK").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; ALTER TABLE test_table DROP COLUMN new_column; ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg + array_agg --------------------------------------------------------------------- {key,new_column,value} (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements + run_command_on_placements --------------------------------------------------------------------- (localhost,9060,100800,t,"{key,new_column,value}") (localhost,9060,100802,t,"{key,new_column,value}") @@ -338,14 +338,14 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD (localhost,57637,100803,t,"{key,new_column,value}") (4 rows) --- now, lets test with 2PC +-- now, lets test with 2PC SET citus.multi_shard_commit_protocol TO '2pc'; --- in the first test, kill just in the first +-- in the first test, kill just in the first -- response we get from the worker SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table DROP COLUMN new_column; @@ -354,32 +354,32 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg + array_agg --------------------------------------------------------------------- {key,new_column,value} (1 row) --- cancel just in the first +-- cancel just in the first -- response we get from the worker SELECT citus.mitmproxy('conn.onAuthenticationOk().cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table DROP COLUMN new_column; ERROR: canceling statement due to user request SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg + array_agg --------------------------------------------------------------------- {key,new_column,value} (1 row) -- kill as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table DROP COLUMN new_column; @@ -388,31 +388,31 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg + array_agg --------------------------------------------------------------------- {key,new_column,value} (1 row) -- cancel as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table DROP COLUMN new_column; ERROR: canceling statement due to user request SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg + array_agg --------------------------------------------------------------------- {key,new_column,value} (1 row) -- kill as soon as the coordinator sends worker_apply_shard_ddl_command SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table DROP COLUMN new_column; @@ -421,50 +421,50 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg + array_agg --------------------------------------------------------------------- {key,new_column,value} (1 row) -- cancel as soon as the coordinator sends worker_apply_shard_ddl_command SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table DROP COLUMN new_column; ERROR: canceling statement due to user request SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg + array_agg --------------------------------------------------------------------- {key,new_column,value} (1 row) -- killing on PREPARE should be fine, everything should be rollbacked SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table DROP COLUMN new_column; ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg + array_agg --------------------------------------------------------------------- {key,new_column,value} (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements + run_command_on_placements --------------------------------------------------------------------- (localhost,9060,100800,t,"{key,new_column,value}") (localhost,9060,100802,t,"{key,new_column,value}") @@ -475,13 +475,13 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- we should be able to recover the transaction and -- see that the command is rollbacked SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 2 (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements + run_command_on_placements --------------------------------------------------------------------- (localhost,9060,100800,t,"{key,new_column,value}") (localhost,9060,100802,t,"{key,new_column,value}") @@ -491,27 +491,27 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- cancelling on PREPARE should be fine, everything should be rollbacked SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table DROP COLUMN new_column; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg + array_agg --------------------------------------------------------------------- {key,new_column,value} (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements + run_command_on_placements --------------------------------------------------------------------- (localhost,9060,100800,t,"{key,new_column,value}") (localhost,9060,100802,t,"{key,new_column,value}") @@ -522,13 +522,13 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- we should be able to recover the transaction and -- see that the command is rollbacked SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 1 (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements + run_command_on_placements --------------------------------------------------------------------- (localhost,9060,100800,t,"{key,new_column,value}") (localhost,9060,100802,t,"{key,new_column,value}") @@ -539,26 +539,26 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- killing on command complete of COMMIT PREPARE, we should see that the command succeeds -- and all the workers committed SELECT citus.mitmproxy('conn.onCommandComplete(command="COMMIT PREPARED").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table DROP COLUMN new_column; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg + array_agg --------------------------------------------------------------------- {key,value} (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements + run_command_on_placements --------------------------------------------------------------------- (localhost,9060,100800,t,"{key,value}") (localhost,9060,100802,t,"{key,value}") @@ -568,13 +568,13 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- we shouldn't have any prepared transactions in the workers SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements + run_command_on_placements --------------------------------------------------------------------- (localhost,9060,100800,t,"{key,value}") (localhost,9060,100802,t,"{key,value}") @@ -584,28 +584,28 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- kill as soon as the coordinator sends COMMIT SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT PREPARED").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- some of the placements would be missing the new column -- since we've not commited the prepared transactions SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg + array_agg --------------------------------------------------------------------- {key,new_column,value} (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements + run_command_on_placements --------------------------------------------------------------------- (localhost,9060,100800,t,"{key,value}") (localhost,9060,100802,t,"{key,value}") @@ -616,13 +616,13 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- we should be able to recover the transaction and -- see that the command is committed SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 2 (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements + run_command_on_placements --------------------------------------------------------------------- (localhost,9060,100800,t,"{key,new_column,value}") (localhost,9060,100802,t,"{key,new_column,value}") @@ -634,29 +634,29 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- fail just after the coordinator sends the ROLLBACK -- so the command can be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="ROLLBACK").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; ALTER TABLE test_table DROP COLUMN new_column; ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- ROLLBACK should have failed on the distributed table and the placements SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg + array_agg --------------------------------------------------------------------- {key,new_column,value} (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements + run_command_on_placements --------------------------------------------------------------------- (localhost,9060,100800,t,"{key,new_column,value}") (localhost,9060,100802,t,"{key,new_column,value}") @@ -664,33 +664,33 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD (localhost,57637,100803,t,"{key,new_column,value}") (4 rows) --- but now kill just after the worker sends response to +-- but now kill just after the worker sends response to -- ROLLBACK command, so we'll have lots of warnings but the command -- should have been rollbacked both on the distributed table and the placements SELECT citus.mitmproxy('conn.onCommandComplete(command="ROLLBACK").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; ALTER TABLE test_table DROP COLUMN new_column; ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- make sure that the transaction is rollbacked SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements + run_command_on_placements --------------------------------------------------------------------- (localhost,9060,100800,t,"{key,new_column,value}") (localhost,9060,100802,t,"{key,new_column,value}") @@ -706,17 +706,17 @@ SET citus.shard_replication_factor = 2; DROP TABLE test_table; CREATE TABLE test_table (key int, value int); SELECT create_distributed_table('test_table', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) --- in the first test, kill just in the first +-- in the first test, kill just in the first -- response we get from the worker SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; @@ -725,32 +725,32 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg + array_agg --------------------------------------------------------------------- {key,value} (1 row) --- cancel just in the first +-- cancel just in the first -- response we get from the worker SELECT citus.mitmproxy('conn.onAuthenticationOk().cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; ERROR: canceling statement due to user request SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg + array_agg --------------------------------------------------------------------- {key,value} (1 row) -- kill as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; @@ -759,31 +759,31 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg + array_agg --------------------------------------------------------------------- {key,value} (1 row) -- cancel as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; ERROR: canceling statement due to user request SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg + array_agg --------------------------------------------------------------------- {key,value} (1 row) -- kill as soon as the coordinator sends worker_apply_shard_ddl_command SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; @@ -792,55 +792,55 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg + array_agg --------------------------------------------------------------------- {key,value} (1 row) -- cancel as soon as the coordinator sends worker_apply_shard_ddl_command SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; ERROR: canceling statement due to user request SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg + array_agg --------------------------------------------------------------------- {key,value} (1 row) -- killing on PREPARE should be fine, everything should be rollbacked SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- we should be able to recover the transaction and -- see that the command is rollbacked on all workers -- note that in this case recover_prepared_transactions() --- sends ROLLBACK PREPARED to the workers given that +-- sends ROLLBACK PREPARED to the workers given that -- the transaction has not been commited on any placement yet SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 4 (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements + run_command_on_placements --------------------------------------------------------------------- (localhost,9060,100804,t,"{key,value}") (localhost,9060,100805,t,"{key,value}") @@ -855,26 +855,26 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- killing on command complete of COMMIT PREPARE, we should see that the command succeeds -- and all the workers committed SELECT citus.mitmproxy('conn.onCommandComplete(command="COMMIT PREPARED").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg + array_agg --------------------------------------------------------------------- {key,new_column,value} (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements + run_command_on_placements --------------------------------------------------------------------- (localhost,9060,100804,t,"{key,new_column,value}") (localhost,9060,100805,t,"{key,new_column,value}") @@ -888,13 +888,13 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- we shouldn't have any prepared transactions in the workers SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements + run_command_on_placements --------------------------------------------------------------------- (localhost,9060,100804,t,"{key,new_column,value}") (localhost,9060,100805,t,"{key,new_column,value}") @@ -908,28 +908,28 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- kill as soon as the coordinator sends COMMIT SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT PREPARED").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table DROP COLUMN new_column; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- some of the placements would be missing the new column -- since we've not commited the prepared transactions SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg + array_agg --------------------------------------------------------------------- {key,value} (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements + run_command_on_placements --------------------------------------------------------------------- (localhost,9060,100804,t,"{key,new_column,value}") (localhost,9060,100805,t,"{key,new_column,value}") @@ -944,13 +944,13 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- we should be able to recover the transaction and -- see that the command is committed SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 4 (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements + run_command_on_placements --------------------------------------------------------------------- (localhost,9060,100804,t,"{key,value}") (localhost,9060,100805,t,"{key,value}") @@ -966,29 +966,29 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD -- fail just after the coordinator sends the ROLLBACK -- so the command can be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="ROLLBACK").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; ALTER TABLE test_table ADD COLUMN new_column INT; ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- ROLLBACK should have failed on the distributed table and the placements SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg + array_agg --------------------------------------------------------------------- {key,value} (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements + run_command_on_placements --------------------------------------------------------------------- (localhost,9060,100804,t,"{key,value}") (localhost,9060,100805,t,"{key,value}") @@ -1000,33 +1000,33 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD (localhost,57637,100807,t,"{key,value}") (8 rows) --- but now kill just after the worker sends response to +-- but now kill just after the worker sends response to -- ROLLBACK command, so we'll have lots of warnings but the command -- should have been rollbacked both on the distributed table and the placements SELECT citus.mitmproxy('conn.onCommandComplete(command="ROLLBACK").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; ALTER TABLE test_table ADD COLUMN new_column INT; ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- make sure that the transaction is rollbacked SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1; - run_command_on_placements + run_command_on_placements --------------------------------------------------------------------- (localhost,9060,100804,t,"{key,value}") (localhost,9060,100805,t,"{key,value}") @@ -1042,9 +1042,9 @@ SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORD SET citus.multi_shard_modify_mode TO 'sequential'; -- kill as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; @@ -1053,31 +1053,31 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg + array_agg --------------------------------------------------------------------- {key,value} (1 row) -- cancel as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; ERROR: canceling statement due to user request SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; - array_agg + array_agg --------------------------------------------------------------------- {key,value} (1 row) -- kill as soon as the coordinator sends worker_apply_shard_ddl_command SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; @@ -1087,9 +1087,9 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. -- kill as soon as the coordinator after it sends worker_apply_shard_ddl_command 2nd time SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").after(2).kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; @@ -1099,9 +1099,9 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. -- cancel as soon as the coordinator after it sends worker_apply_shard_ddl_command 2nd time SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").after(2).cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; diff --git a/src/test/regress/expected/failure_insert_select_pushdown.out b/src/test/regress/expected/failure_insert_select_pushdown.out index ce83c58de..b92b49b36 100644 --- a/src/test/regress/expected/failure_insert_select_pushdown.out +++ b/src/test/regress/expected/failure_insert_select_pushdown.out @@ -4,9 +4,9 @@ -- performs failure/cancellation test for insert/select pushed down to shards. -- SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) CREATE SCHEMA insert_select_pushdown; @@ -17,20 +17,20 @@ SELECT pg_backend_pid() as pid \gset CREATE TABLE events_table(user_id int, event_id int, event_type int); CREATE TABLE events_summary(user_id int, event_id int, event_count int); SELECT create_distributed_table('events_table', 'user_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('events_summary', 'user_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO events_table VALUES (1, 1, 3 ), (1, 2, 1), (1, 3, 2), (2, 4, 3), (3, 5, 1), (4, 7, 1), (4, 1, 9), (4, 3, 2); -SELECT count(*) FROM events_summary; - count +SELECT count(*) FROM events_summary; + count --------------------------------------------------------------------- 0 (1 row) @@ -38,9 +38,9 @@ SELECT count(*) FROM events_summary; -- insert/select from one distributed table to another -- kill worker query SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO insert_select_pushdown").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) INSERT INTO events_summary SELECT user_id, event_id, count(*) FROM events_table GROUP BY 1,2; @@ -50,51 +50,51 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. --verify nothing is modified SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM events_summary; - count + count --------------------------------------------------------------------- 0 (1 row) -- cancel worker query SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO insert_select_pushdown").cancel(' || :pid || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) INSERT INTO events_summary SELECT user_id, event_id, count(*) FROM events_table GROUP BY 1,2; ERROR: canceling statement due to user request --verify nothing is modified SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM events_summary; - count + count --------------------------------------------------------------------- 0 (1 row) -- test self insert/select SELECT count(*) FROM events_table; - count + count --------------------------------------------------------------------- 8 (1 row) -- kill worker query SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO insert_select_pushdown").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) INSERT INTO events_table SELECT * FROM events_table; @@ -104,44 +104,44 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. --verify nothing is modified SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM events_table; - count + count --------------------------------------------------------------------- 8 (1 row) -- cancel worker query SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO insert_select_pushdown").cancel(' || :pid || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) INSERT INTO events_table SELECT * FROM events_table; ERROR: canceling statement due to user request --verify nothing is modified SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM events_table; - count + count --------------------------------------------------------------------- 8 (1 row) RESET SEARCH_PATH; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) DROP SCHEMA insert_select_pushdown CASCADE; diff --git a/src/test/regress/expected/failure_insert_select_via_coordinator.out b/src/test/regress/expected/failure_insert_select_via_coordinator.out index 85a357ea5..b46a73c27 100644 --- a/src/test/regress/expected/failure_insert_select_via_coordinator.out +++ b/src/test/regress/expected/failure_insert_select_via_coordinator.out @@ -14,32 +14,32 @@ CREATE TABLE events_summary(event_id int, event_type int, event_count int); CREATE TABLE events_reference(event_type int, event_count int); CREATE TABLE events_reference_distributed(event_type int, event_count int); SELECT create_distributed_table('events_table', 'user_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('events_summary', 'event_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_reference_table('events_reference'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('events_reference_distributed', 'event_type'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO events_table VALUES (1, 1, 3 ), (1, 2, 1), (1, 3, 2), (2, 4, 3), (3, 5, 1), (4, 7, 1), (4, 1, 9), (4, 3, 2); -SELECT count(*) FROM events_summary; - count +SELECT count(*) FROM events_summary; + count --------------------------------------------------------------------- 0 (1 row) @@ -47,9 +47,9 @@ SELECT count(*) FROM events_summary; -- insert/select from one distributed table to another -- kill coordinator pull query SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) INSERT INTO events_summary SELECT event_id, event_type, count(*) FROM events_table GROUP BY 1,2; @@ -59,9 +59,9 @@ ERROR: server closed the connection unexpectedly CONTEXT: while executing command on localhost:xxxxx -- kill data push SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) INSERT INTO events_summary SELECT event_id, event_type, count(*) FROM events_table GROUP BY 1,2; @@ -71,31 +71,31 @@ ERROR: server closed the connection unexpectedly CONTEXT: while executing command on localhost:xxxxx -- cancel coordinator pull query SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) INSERT INTO events_summary SELECT event_id, event_type, count(*) FROM events_table GROUP BY 1,2; ERROR: canceling statement due to user request -- cancel data push SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").cancel(' || :pid || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) INSERT INTO events_summary SELECT event_id, event_type, count(*) FROM events_table GROUP BY 1,2; ERROR: canceling statement due to user request --verify nothing is modified SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM events_summary; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -103,9 +103,9 @@ SELECT count(*) FROM events_summary; -- insert into reference table from a distributed table -- kill coordinator pull query SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) INSERT INTO events_reference SELECT event_type, count(*) FROM events_table GROUP BY 1; @@ -115,9 +115,9 @@ ERROR: server closed the connection unexpectedly CONTEXT: while executing command on localhost:xxxxx -- kill data push SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) INSERT INTO events_reference SELECT event_type, count(*) FROM events_table GROUP BY 1; @@ -127,31 +127,31 @@ ERROR: server closed the connection unexpectedly CONTEXT: while executing command on localhost:xxxxx -- cancel coordinator pull query SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) INSERT INTO events_reference SELECT event_type, count(*) FROM events_table GROUP BY 1; ERROR: canceling statement due to user request -- cancel data push SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").cancel(' || :pid || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) INSERT INTO events_reference SELECT event_type, count(*) FROM events_table GROUP BY 1; ERROR: canceling statement due to user request --verify nothing is modified SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM events_reference; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -161,9 +161,9 @@ SELECT count(*) FROM events_reference; INSERT INTO events_reference SELECT event_type, count(*) FROM events_table GROUP BY 1; -- kill coordinator pull query SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) INSERT INTO events_reference_distributed SELECT * FROM events_reference; @@ -173,9 +173,9 @@ ERROR: server closed the connection unexpectedly CONTEXT: while executing command on localhost:xxxxx -- kill data push SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) INSERT INTO events_reference_distributed SELECT * FROM events_reference; @@ -185,40 +185,40 @@ ERROR: server closed the connection unexpectedly CONTEXT: while executing command on localhost:xxxxx -- cancel coordinator pull query SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) INSERT INTO events_reference_distributed SELECT * FROM events_reference; ERROR: canceling statement due to user request -- cancel data push SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").cancel(' || :pid || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) INSERT INTO events_reference_distributed SELECT * FROM events_reference; ERROR: canceling statement due to user request --verify nothing is modified SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM events_reference_distributed; - count + count --------------------------------------------------------------------- 0 (1 row) RESET SEARCH_PATH; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) DROP SCHEMA coordinator_insert_select CASCADE; diff --git a/src/test/regress/expected/failure_multi_dml.out b/src/test/regress/expected/failure_multi_dml.out index abc9c7896..89f48536a 100644 --- a/src/test/regress/expected/failure_multi_dml.out +++ b/src/test/regress/expected/failure_multi_dml.out @@ -1,7 +1,7 @@ SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SET citus.shard_count = 2; @@ -10,25 +10,25 @@ SET citus.next_shard_id TO 103400; ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 100; CREATE TABLE dml_test (id integer, name text); SELECT create_distributed_table('dml_test', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) COPY dml_test FROM STDIN WITH CSV; SELECT citus.clear_network_traffic(); - clear_network_traffic + clear_network_traffic --------------------------------------------------------------------- - + (1 row) ---- test multiple statements spanning multiple shards, ---- at each significant point. These transactions are 2pc -- fail at DELETE SELECT citus.mitmproxy('conn.onQuery(query="^DELETE").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -48,7 +48,7 @@ ERROR: current transaction is aborted, commands ignored until end of transactio COMMIT; --- shouldn't see any changes performed in failed transaction SELECT * FROM dml_test ORDER BY id ASC; - id | name + id | name --------------------------------------------------------------------- 1 | Alpha 2 | Beta @@ -58,9 +58,9 @@ SELECT * FROM dml_test ORDER BY id ASC; -- cancel at DELETE SELECT citus.mitmproxy('conn.onQuery(query="^DELETE").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -77,7 +77,7 @@ ERROR: current transaction is aborted, commands ignored until end of transactio COMMIT; --- shouldn't see any changes performed in failed transaction SELECT * FROM dml_test ORDER BY id ASC; - id | name + id | name --------------------------------------------------------------------- 1 | Alpha 2 | Beta @@ -87,9 +87,9 @@ SELECT * FROM dml_test ORDER BY id ASC; -- fail at INSERT SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -107,7 +107,7 @@ ERROR: current transaction is aborted, commands ignored until end of transactio COMMIT; --- shouldn't see any changes before failed INSERT SELECT * FROM dml_test ORDER BY id ASC; - id | name + id | name --------------------------------------------------------------------- 1 | Alpha 2 | Beta @@ -117,9 +117,9 @@ SELECT * FROM dml_test ORDER BY id ASC; -- cancel at INSERT SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -134,7 +134,7 @@ ERROR: current transaction is aborted, commands ignored until end of transactio COMMIT; --- shouldn't see any changes before failed INSERT SELECT * FROM dml_test ORDER BY id ASC; - id | name + id | name --------------------------------------------------------------------- 1 | Alpha 2 | Beta @@ -144,9 +144,9 @@ SELECT * FROM dml_test ORDER BY id ASC; -- fail at UPDATE SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -163,7 +163,7 @@ ERROR: current transaction is aborted, commands ignored until end of transactio COMMIT; --- shouldn't see any changes after failed UPDATE SELECT * FROM dml_test ORDER BY id ASC; - id | name + id | name --------------------------------------------------------------------- 1 | Alpha 2 | Beta @@ -173,9 +173,9 @@ SELECT * FROM dml_test ORDER BY id ASC; -- cancel at UPDATE SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -189,7 +189,7 @@ ERROR: current transaction is aborted, commands ignored until end of transactio COMMIT; --- shouldn't see any changes after failed UPDATE SELECT * FROM dml_test ORDER BY id ASC; - id | name + id | name --------------------------------------------------------------------- 1 | Alpha 2 | Beta @@ -199,9 +199,9 @@ SELECT * FROM dml_test ORDER BY id ASC; -- fail at PREPARE TRANSACTION SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE TRANSACTION").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- this transaction block will be sent to the coordinator as a remote command to hide the @@ -221,31 +221,31 @@ COMMIT; '], false ); - master_run_on_worker + master_run_on_worker --------------------------------------------------------------------- (localhost,57636,t,BEGIN) (1 row) SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT shardid FROM pg_dist_shard_placement WHERE shardstate = 3; - shardid + shardid --------------------------------------------------------------------- (0 rows) SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) -- shouldn't see any changes after failed PREPARE SELECT * FROM dml_test ORDER BY id ASC; - id | name + id | name --------------------------------------------------------------------- 1 | Alpha 2 | Beta @@ -255,9 +255,9 @@ SELECT * FROM dml_test ORDER BY id ASC; -- cancel at PREPARE TRANSACTION SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE TRANSACTION").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- we'll test for the txn side-effects to ensure it didn't run @@ -270,25 +270,25 @@ UPDATE dml_test SET name = 'gamma' WHERE id = 3; COMMIT; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT shardid FROM pg_dist_shard_placement WHERE shardstate = 3; - shardid + shardid --------------------------------------------------------------------- (0 rows) SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) -- shouldn't see any changes after failed PREPARE SELECT * FROM dml_test ORDER BY id ASC; - id | name + id | name --------------------------------------------------------------------- 1 | Alpha 2 | Beta @@ -298,9 +298,9 @@ SELECT * FROM dml_test ORDER BY id ASC; -- fail at COMMIT SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- hide the error message (it has the PID)... @@ -315,25 +315,25 @@ UPDATE dml_test SET name = 'gamma' WHERE id = 3; COMMIT; SET client_min_messages TO DEFAULT; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT shardid FROM pg_dist_shard_placement WHERE shardstate = 3; - shardid + shardid --------------------------------------------------------------------- (0 rows) SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 1 (1 row) -- should see changes, because of txn recovery SELECT * FROM dml_test ORDER BY id ASC; - id | name + id | name --------------------------------------------------------------------- 3 | gamma 4 | Delta @@ -342,9 +342,9 @@ SELECT * FROM dml_test ORDER BY id ASC; -- cancel at COMMITs are ignored by Postgres SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -356,7 +356,7 @@ UPDATE dml_test SET name = 'gamma' WHERE id = 3; COMMIT; -- should see changes, because cancellation is ignored SELECT * FROM dml_test ORDER BY id ASC; - id | name + id | name --------------------------------------------------------------------- 3 | gamma 4 | Delta @@ -370,18 +370,18 @@ SET citus.shard_count = 1; SET citus.shard_replication_factor = 2; -- two placements CREATE TABLE dml_test (id integer, name text); SELECT create_distributed_table('dml_test', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) COPY dml_test FROM STDIN WITH CSV; ---- test multiple statements against a single shard, but with two placements -- fail at COMMIT (actually COMMIT this time, as no 2pc in use) SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -398,7 +398,7 @@ WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx --- should see all changes, but they only went to one placement (other is unhealthy) SELECT * FROM dml_test ORDER BY id ASC; - id | name + id | name --------------------------------------------------------------------- 3 | gamma 4 | Delta @@ -406,15 +406,15 @@ SELECT * FROM dml_test ORDER BY id ASC; (3 rows) SELECT shardid FROM pg_dist_shard_placement WHERE shardstate = 3; - shardid + shardid --------------------------------------------------------------------- 103402 (1 row) SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- drop table and recreate as reference table @@ -423,17 +423,17 @@ SET citus.shard_count = 2; SET citus.shard_replication_factor = 1; CREATE TABLE dml_test (id integer, name text); SELECT create_reference_table('dml_test'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) COPY dml_test FROM STDIN WITH CSV; -- fail at COMMIT (by failing to PREPARE) SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -447,7 +447,7 @@ ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx --- shouldn't see any changes after failed COMMIT SELECT * FROM dml_test ORDER BY id ASC; - id | name + id | name --------------------------------------------------------------------- 1 | Alpha 2 | Beta @@ -457,9 +457,9 @@ SELECT * FROM dml_test ORDER BY id ASC; -- cancel at COMMIT (by cancelling on PREPARE) SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -472,7 +472,7 @@ COMMIT; ERROR: canceling statement due to user request --- shouldn't see any changes after cancelled PREPARE SELECT * FROM dml_test ORDER BY id ASC; - id | name + id | name --------------------------------------------------------------------- 1 | Alpha 2 | Beta @@ -482,9 +482,9 @@ SELECT * FROM dml_test ORDER BY id ASC; -- allow connection to allow DROP SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) DROP TABLE dml_test; diff --git a/src/test/regress/expected/failure_multi_row_insert.out b/src/test/regress/expected/failure_multi_row_insert.out index f13fe7bfe..a5ab9b357 100644 --- a/src/test/regress/expected/failure_multi_row_insert.out +++ b/src/test/regress/expected/failure_multi_row_insert.out @@ -10,23 +10,23 @@ SET citus.next_shard_id TO 301000; SET citus.shard_replication_factor TO 1; SELECT pg_backend_pid() as pid \gset SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) CREATE TABLE distributed_table(key int, value int); CREATE TABLE reference_table(value int); SELECT create_distributed_table('distributed_table', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_reference_table('reference_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- we'll test failure cases of the following cases: @@ -37,9 +37,9 @@ SELECT create_reference_table('reference_table'); -- (e) multi-row INSERT to a reference table -- Failure and cancellation on multi-row INSERT that hits the same shard with the same value SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) INSERT INTO distributed_table VALUES (1,1), (1,2), (1,3); @@ -52,9 +52,9 @@ DETAIL: server closed the connection unexpectedly -- INSERT INTO distributed_table VALUES (1,4), (1,5), (1,6); -- Failure and cancellation on multi-row INSERT that hits the same shard with different values SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) INSERT INTO distributed_table VALUES (1,7), (5,8); @@ -67,9 +67,9 @@ DETAIL: server closed the connection unexpectedly -- INSERT INTO distributed_table VALUES (1,9), (5,10); -- Failure and cancellation multi-row INSERT that hits multiple shards in a single worker SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) INSERT INTO distributed_table VALUES (1,11), (6,12); @@ -78,18 +78,18 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) INSERT INTO distributed_table VALUES (1,13), (6,14); ERROR: canceling statement due to user request -- Failure and cancellation multi-row INSERT that hits multiple shards in a single worker, happening on the second query SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").after(1).kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) INSERT INTO distributed_table VALUES (1,15), (6,16); @@ -98,18 +98,18 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").after(1).cancel(' || :pid || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) INSERT INTO distributed_table VALUES (1,17), (6,18); ERROR: canceling statement due to user request -- Failure and cancellation multi-row INSERT that hits multiple shards in multiple workers SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) INSERT INTO distributed_table VALUES (2,19),(1,20); @@ -118,54 +118,54 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) INSERT INTO distributed_table VALUES (2,21), (1,22); ERROR: canceling statement due to user request -- one test for the reference tables for completeness SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) INSERT INTO reference_table VALUES (1), (2), (3), (4); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) INSERT INTO distributed_table VALUES (1,1), (2,2), (3,3), (4,2), (5,2), (6,2), (7,2); ERROR: canceling statement due to user request -- cancel the second insert over the same connection SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").after(1).cancel(' || :pid || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) INSERT INTO distributed_table VALUES (1,1), (2,2), (3,3), (4,2), (5,2), (6,2), (7,2); ERROR: canceling statement due to user request -- we've either failed or cancelled all queries, so should be empty SELECT * FROM distributed_table; - key | value + key | value --------------------------------------------------------------------- (0 rows) SELECT * FROM reference_table; - value + value --------------------------------------------------------------------- (0 rows) SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) RESET SEARCH_PATH; diff --git a/src/test/regress/expected/failure_multi_shard_update_delete.out b/src/test/regress/expected/failure_multi_shard_update_delete.out index eaefb686e..cebd7f8c6 100644 --- a/src/test/regress/expected/failure_multi_shard_update_delete.out +++ b/src/test/regress/expected/failure_multi_shard_update_delete.out @@ -9,30 +9,30 @@ SET citus.shard_replication_factor TO 1; -- do not cache any connections SET citus.max_cached_conns_per_worker TO 0; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) CREATE TABLE t1(a int PRIMARY KEY, b int, c int); CREATE TABLE r1(a int, b int PRIMARY KEY); CREATE TABLE t2(a int REFERENCES t1(a) ON DELETE CASCADE, b int REFERENCES r1(b) ON DELETE CASCADE, c int); SELECT create_distributed_table('t1', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_reference_table('r1'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('t2', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- insert some data @@ -41,13 +41,13 @@ INSERT INTO t1 VALUES (1, 1, 1), (2, 2, 2), (3, 3, 3); INSERT INTO t2 VALUES (1, 1, 1), (1, 2, 1), (2, 1, 2), (2, 2, 4), (3, 1, 3), (3, 2, 3), (3, 3, 3); SELECT pg_backend_pid() as pid \gset SELECT count(*) FROM t2; - count + count --------------------------------------------------------------------- 7 (1 row) SHOW citus.multi_shard_commit_protocol ; - citus.multi_shard_commit_protocol + citus.multi_shard_commit_protocol --------------------------------------------------------------------- 2pc (1 row) @@ -56,9 +56,9 @@ SHOW citus.multi_shard_commit_protocol ; -- delete using a filter on non-partition column filter -- test both kill and cancellation SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- issue a multi shard delete @@ -69,16 +69,16 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. -- verify nothing is deleted SELECT count(*) FROM t2; - count + count --------------------------------------------------------------------- 7 (1 row) -- kill just one connection SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM multi_shard.t2_201005").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) DELETE FROM t2 WHERE b = 2; @@ -88,16 +88,16 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. -- verify nothing is deleted SELECT count(*) FROM t2; - count + count --------------------------------------------------------------------- 7 (1 row) -- cancellation SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").cancel(' || :pid || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- issue a multi shard delete @@ -105,23 +105,23 @@ DELETE FROM t2 WHERE b = 2; ERROR: canceling statement due to user request -- verify nothing is deleted SELECT count(*) FROM t2; - count + count --------------------------------------------------------------------- 7 (1 row) -- cancel just one connection SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM multi_shard.t2_201005").cancel(' || :pid || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) DELETE FROM t2 WHERE b = 2; ERROR: canceling statement due to user request -- verify nothing is deleted SELECT count(*) FROM t2; - count + count --------------------------------------------------------------------- 7 (1 row) @@ -132,15 +132,15 @@ SELECT count(*) FROM t2; -- delete using a filter on non-partition column filter -- test both kill and cancellation SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; - b2 | c4 + b2 | c4 --------------------------------------------------------------------- 3 | 1 (1 row) SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- issue a multi shard update @@ -151,16 +151,16 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; - b2 | c4 + b2 | c4 --------------------------------------------------------------------- 3 | 1 (1 row) -- kill just one connection SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t2_201005").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) UPDATE t2 SET c = 4 WHERE b = 2; @@ -170,16 +170,16 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; - b2 | c4 + b2 | c4 --------------------------------------------------------------------- 3 | 1 (1 row) -- cancellation SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").cancel(' || :pid || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- issue a multi shard update @@ -187,23 +187,23 @@ UPDATE t2 SET c = 4 WHERE b = 2; ERROR: canceling statement due to user request -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; - b2 | c4 + b2 | c4 --------------------------------------------------------------------- 3 | 1 (1 row) -- cancel just one connection SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t2_201005").cancel(' || :pid || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) UPDATE t2 SET c = 4 WHERE b = 2; ERROR: canceling statement due to user request -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; - b2 | c4 + b2 | c4 --------------------------------------------------------------------- 3 | 1 (1 row) @@ -214,9 +214,9 @@ SET citus.multi_shard_commit_protocol TO '1PC'; -- delete using a filter on non-partition column filter -- test both kill and cancellation SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- issue a multi shard delete @@ -227,16 +227,16 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. -- verify nothing is deleted SELECT count(*) FROM t2; - count + count --------------------------------------------------------------------- 7 (1 row) -- kill just one connection SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM multi_shard.t2_201005").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) DELETE FROM t2 WHERE b = 2; @@ -246,16 +246,16 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. -- verify nothing is deleted SELECT count(*) FROM t2; - count + count --------------------------------------------------------------------- 7 (1 row) -- cancellation SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").cancel(' || :pid || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- issue a multi shard delete @@ -263,23 +263,23 @@ DELETE FROM t2 WHERE b = 2; ERROR: canceling statement due to user request -- verify nothing is deleted SELECT count(*) FROM t2; - count + count --------------------------------------------------------------------- 7 (1 row) -- cancel just one connection SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM multi_shard.t2_201005").cancel(' || :pid || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) DELETE FROM t2 WHERE b = 2; ERROR: canceling statement due to user request -- verify nothing is deleted SELECT count(*) FROM t2; - count + count --------------------------------------------------------------------- 7 (1 row) @@ -290,15 +290,15 @@ SELECT count(*) FROM t2; -- delete using a filter on non-partition column filter -- test both kill and cancellation SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; - b2 | c4 + b2 | c4 --------------------------------------------------------------------- 3 | 1 (1 row) SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- issue a multi shard update @@ -309,16 +309,16 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; - b2 | c4 + b2 | c4 --------------------------------------------------------------------- 3 | 1 (1 row) -- kill just one connection SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t2_201005").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) UPDATE t2 SET c = 4 WHERE b = 2; @@ -328,16 +328,16 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; - b2 | c4 + b2 | c4 --------------------------------------------------------------------- 3 | 1 (1 row) -- cancellation SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").cancel(' || :pid || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- issue a multi shard update @@ -345,23 +345,23 @@ UPDATE t2 SET c = 4 WHERE b = 2; ERROR: canceling statement due to user request -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; - b2 | c4 + b2 | c4 --------------------------------------------------------------------- 3 | 1 (1 row) -- cancel just one connection SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t2_201005").cancel(' || :pid || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) UPDATE t2 SET c = 4 WHERE b = 2; ERROR: canceling statement due to user request -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; - b2 | c4 + b2 | c4 --------------------------------------------------------------------- 3 | 1 (1 row) @@ -377,22 +377,22 @@ RESET citus.multi_shard_commit_protocol; -- it is safe to remove them without reducing any -- test coverage SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- check counts before delete SELECT count(*) FILTER (WHERE b = 2) AS b2 FROM t2; - b2 + b2 --------------------------------------------------------------------- 3 (1 row) SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) DELETE FROM r1 WHERE a = 2; @@ -402,15 +402,15 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. -- verify nothing is deleted SELECT count(*) FILTER (WHERE b = 2) AS b2 FROM t2; - b2 + b2 --------------------------------------------------------------------- 3 (1 row) SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) DELETE FROM t2 WHERE b = 2; @@ -420,28 +420,28 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. -- verify nothing is deleted SELECT count(*) FILTER (WHERE b = 2) AS b2 FROM t2; - b2 + b2 --------------------------------------------------------------------- 3 (1 row) -- test update with subquery pull SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) CREATE TABLE t3 AS SELECT * FROM t2; SELECT create_distributed_table('t3', 'a'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT * FROM t3 ORDER BY 1, 2, 3; - a | b | c + a | b | c --------------------------------------------------------------------- 1 | 1 | 1 1 | 2 | 1 @@ -453,9 +453,9 @@ SELECT * FROM t3 ORDER BY 1, 2, 3; (7 rows) SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) UPDATE t3 SET c = q.c FROM ( @@ -468,13 +468,13 @@ ERROR: server closed the connection unexpectedly CONTEXT: while executing command on localhost:xxxxx --- verify nothing is updated SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM t3 ORDER BY 1, 2, 3; - a | b | c + a | b | c --------------------------------------------------------------------- 1 | 1 | 1 1 | 2 | 1 @@ -487,9 +487,9 @@ SELECT * FROM t3 ORDER BY 1, 2, 3; -- kill update part SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE multi_shard.t3_201009").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) UPDATE t3 SET c = q.c FROM ( @@ -502,13 +502,13 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. --- verify nothing is updated SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM t3 ORDER BY 1, 2, 3; - a | b | c + a | b | c --------------------------------------------------------------------- 1 | 1 | 1 1 | 2 | 1 @@ -524,31 +524,31 @@ SELECT * FROM t3 ORDER BY 1, 2, 3; -- use a different set of table SET citus.shard_replication_factor to 2; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) DROP TABLE t3; CREATE TABLE t3 AS SELECT * FROM t2; SELECT create_distributed_table('t3', 'a'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3; - b1 | b2 + b1 | b2 --------------------------------------------------------------------- 3 | 3 (1 row) -- prevent update of one replica of one shard SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t3_201013").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) UPDATE t3 SET b = 2 WHERE b = 1; @@ -558,7 +558,7 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3; - b1 | b2 + b1 | b2 --------------------------------------------------------------------- 3 | 3 (1 row) @@ -566,13 +566,13 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO -- fail only one update verify transaction is rolled back correctly BEGIN; SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t2; - b1 | b2 + b1 | b2 --------------------------------------------------------------------- 3 | 3 (1 row) SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3; - b1 | b2 + b1 | b2 --------------------------------------------------------------------- 3 | 3 (1 row) @@ -580,7 +580,7 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO UPDATE t2 SET b = 2 WHERE b = 1; -- verify update is performed on t2 SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t2; - b1 | b2 + b1 | b2 --------------------------------------------------------------------- 0 | 6 (1 row) @@ -594,13 +594,13 @@ DETAIL: server closed the connection unexpectedly END; -- verify everything is rolled back SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t2; - b1 | b2 + b1 | b2 --------------------------------------------------------------------- 3 | 3 (1 row) SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3; - b1 | b2 + b1 | b2 --------------------------------------------------------------------- 3 | 3 (1 row) @@ -612,7 +612,7 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3; - b1 | b2 + b1 | b2 --------------------------------------------------------------------- 3 | 3 (1 row) @@ -620,7 +620,7 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO -- switch to 1PC SET citus.multi_shard_commit_protocol TO '1PC'; SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3; - b1 | b2 + b1 | b2 --------------------------------------------------------------------- 3 | 3 (1 row) @@ -632,7 +632,7 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3; - b1 | b2 + b1 | b2 --------------------------------------------------------------------- 3 | 3 (1 row) @@ -640,13 +640,13 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO -- fail only one update verify transaction is rolled back correctly BEGIN; SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t2; - b1 | b2 + b1 | b2 --------------------------------------------------------------------- 3 | 3 (1 row) SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3; - b1 | b2 + b1 | b2 --------------------------------------------------------------------- 3 | 3 (1 row) @@ -654,7 +654,7 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO UPDATE t2 SET b = 2 WHERE b = 1; -- verify update is performed on t2 SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t2; - b1 | b2 + b1 | b2 --------------------------------------------------------------------- 0 | 6 (1 row) @@ -668,21 +668,21 @@ DETAIL: server closed the connection unexpectedly END; -- verify everything is rolled back SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t2; - b1 | b2 + b1 | b2 --------------------------------------------------------------------- 3 | 3 (1 row) SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3; - b1 | b2 + b1 | b2 --------------------------------------------------------------------- 3 | 3 (1 row) SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) RESET SEARCH_PATH; diff --git a/src/test/regress/expected/failure_mx_metadata_sync.out b/src/test/regress/expected/failure_mx_metadata_sync.out index 54046c3c1..a4aeb7704 100644 --- a/src/test/regress/expected/failure_mx_metadata_sync.out +++ b/src/test/regress/expected/failure_mx_metadata_sync.out @@ -9,39 +9,39 @@ SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; SELECT pg_backend_pid() as pid \gset SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) CREATE TABLE t1 (id int PRIMARY KEY); SELECT create_distributed_table('t1', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO t1 SELECT x FROM generate_series(1,100) AS f(x); -- Initial metadata status SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_proxy_port; - hasmetadata + hasmetadata --------------------------------------------------------------------- f (1 row) -- Failure to set groupid in the worker SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE pg_dist_local_group SET groupid").cancel(' || :pid || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE pg_dist_local_group SET groupid").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); @@ -51,17 +51,17 @@ ERROR: server closed the connection unexpectedly CONTEXT: while executing command on localhost:xxxxx -- Failure to drop all tables in pg_dist_partition SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_drop_distributed_table").cancel(' || :pid || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_drop_distributed_table").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); @@ -71,17 +71,17 @@ ERROR: server closed the connection unexpectedly CONTEXT: while executing command on localhost:xxxxx -- Failure to truncate pg_dist_node in the worker SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE pg_dist_node CASCADE").cancel(' || :pid || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE pg_dist_node CASCADE").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); @@ -91,17 +91,17 @@ ERROR: server closed the connection unexpectedly CONTEXT: while executing command on localhost:xxxxx -- Failure to populate pg_dist_node in the worker SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO pg_dist_node").cancel(' || :pid || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO pg_dist_node").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); @@ -111,26 +111,26 @@ ERROR: server closed the connection unexpectedly CONTEXT: while executing command on localhost:xxxxx -- Verify that coordinator knows worker does not have valid metadata SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_proxy_port; - hasmetadata + hasmetadata --------------------------------------------------------------------- f (1 row) -- Verify we can sync metadata after unsuccessful attempts SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); - start_metadata_sync_to_node + start_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_proxy_port; - hasmetadata + hasmetadata --------------------------------------------------------------------- t (1 row) @@ -138,9 +138,9 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_proxy_port; -- Check failures on DDL command propagation CREATE TABLE t2 (id int PRIMARY KEY); SELECT citus.mitmproxy('conn.onParse(query="^INSERT INTO pg_dist_placement").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('t2', 'id'); @@ -149,9 +149,9 @@ ERROR: server closed the connection unexpectedly before or while processing the request. CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.onParse(query="^INSERT INTO pg_dist_shard").cancel(' || :pid || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('t2', 'id'); @@ -160,7 +160,7 @@ ERROR: canceling statement due to user request SELECT count(*) > 0 AS is_table_distributed FROM pg_dist_partition WHERE logicalrelid='t2'::regclass; - is_table_distributed + is_table_distributed --------------------------------------------------------------------- f (1 row) diff --git a/src/test/regress/expected/failure_ref_tables.out b/src/test/regress/expected/failure_ref_tables.out index 3f9d39ee4..a397f8dfd 100644 --- a/src/test/regress/expected/failure_ref_tables.out +++ b/src/test/regress/expected/failure_ref_tables.out @@ -1,35 +1,35 @@ SET citus.next_shard_id TO 100500; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) CREATE TABLE ref_table (key int, value int); SELECT create_reference_table('ref_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) \copy ref_table FROM stdin delimiter ','; SELECT citus.clear_network_traffic(); - clear_network_traffic + clear_network_traffic --------------------------------------------------------------------- - + (1 row) SELECT COUNT(*) FROM ref_table; - count + count --------------------------------------------------------------------- 4 (1 row) -- verify behavior of single INSERT; should fail to execute SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) INSERT INTO ref_table VALUES (5, 6); @@ -38,16 +38,16 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT COUNT(*) FROM ref_table WHERE key=5; - count + count --------------------------------------------------------------------- 0 (1 row) -- verify behavior of UPDATE ... RETURNING; should not execute SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) UPDATE ref_table SET key=7 RETURNING value; @@ -56,16 +56,16 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT COUNT(*) FROM ref_table WHERE key=7; - count + count --------------------------------------------------------------------- 0 (1 row) -- verify fix to #2214; should raise error and fail to execute SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -77,23 +77,23 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. COMMIT; SELECT COUNT(*) FROM ref_table WHERE key=value; - count + count --------------------------------------------------------------------- 0 (1 row) -- all shards should still be healthy SELECT COUNT(*) FROM pg_dist_shard_placement WHERE shardstate = 3; - count + count --------------------------------------------------------------------- 0 (1 row) -- ==== Clean up, we're done here ==== SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) DROP TABLE ref_table; diff --git a/src/test/regress/expected/failure_savepoints.out b/src/test/regress/expected/failure_savepoints.out index c43dc832c..a2a113e9a 100644 --- a/src/test/regress/expected/failure_savepoints.out +++ b/src/test/regress/expected/failure_savepoints.out @@ -4,9 +4,9 @@ -- the placement commands fail. Otherwise, we might mark the placement -- as invalid and continue with a WARNING. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SET citus.shard_count = 2; @@ -18,9 +18,9 @@ CREATE TABLE artists ( name text NOT NULL ); SELECT create_distributed_table('artists', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- add some data @@ -30,9 +30,9 @@ INSERT INTO artists VALUES (3, 'Claude Monet'); INSERT INTO artists VALUES (4, 'William Kurelek'); -- simply fail at SAVEPOINT SELECT citus.mitmproxy('conn.onQuery(query="^SAVEPOINT").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -54,16 +54,16 @@ RELEASE SAVEPOINT s1; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; SELECT * FROM artists WHERE id IN (4, 5); - id | name + id | name --------------------------------------------------------------------- 4 | William Kurelek (1 row) -- fail at RELEASE SELECT citus.mitmproxy('conn.onQuery(query="^RELEASE").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -86,16 +86,16 @@ ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx ROLLBACK; SELECT * FROM artists WHERE id IN (4, 5); - id | name + id | name --------------------------------------------------------------------- 4 | William Kurelek (1 row) -- fail at ROLLBACK SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -110,16 +110,16 @@ CONTEXT: while executing command on localhost:xxxxx COMMIT; ERROR: could not make changes to shard xxxxx on any node SELECT * FROM artists WHERE id IN (4, 5); - id | name + id | name --------------------------------------------------------------------- 4 | William Kurelek (1 row) -- fail at second RELEASE SELECT citus.mitmproxy('conn.onQuery(query="^RELEASE").after(1).kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -142,16 +142,16 @@ ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx COMMIT; SELECT * FROM artists WHERE id IN (4, 5); - id | name + id | name --------------------------------------------------------------------- 4 | William Kurelek (1 row) -- fail at second ROLLBACK SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").after(1).kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -168,15 +168,15 @@ CONTEXT: while executing command on localhost:xxxxx COMMIT; ERROR: could not make changes to shard xxxxx on any node SELECT * FROM artists WHERE id IN (4, 5); - id | name + id | name --------------------------------------------------------------------- 4 | William Kurelek (1 row) SELECT citus.mitmproxy('conn.onQuery(query="^RELEASE").after(1).kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- Release after rollback @@ -191,14 +191,14 @@ ROLLBACK TO s2; RELEASE SAVEPOINT s2; COMMIT; SELECT * FROM artists WHERE id=7; - id | name + id | name --------------------------------------------------------------------- (0 rows) SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- Recover from errors @@ -219,7 +219,7 @@ WARNING: connection not open COMMIT; ERROR: could not make changes to shard xxxxx on any node SELECT * FROM artists WHERE id=6; - id | name + id | name --------------------------------------------------------------------- (0 rows) @@ -232,16 +232,16 @@ CREATE TABLE researchers ( SET citus.shard_count = 1; SET citus.shard_replication_factor = 2; -- single shard, on both workers SELECT create_distributed_table('researchers', 'lab_id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- simply fail at SAVEPOINT SELECT citus.mitmproxy('conn.onQuery(query="^SAVEPOINT").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -261,7 +261,7 @@ ERROR: current transaction is aborted, commands ignored until end of transactio COMMIT; -- should see correct results from healthy placement and one bad placement SELECT * FROM researchers WHERE lab_id = 4; - id | lab_id | name + id | lab_id | name --------------------------------------------------------------------- (0 rows) @@ -269,16 +269,16 @@ UPDATE pg_dist_shard_placement SET shardstate = 1 WHERE shardstate = 3 AND shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'researchers'::regclass ) RETURNING placementid; - placementid + placementid --------------------------------------------------------------------- (0 rows) TRUNCATE researchers; -- fail at rollback SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -293,7 +293,7 @@ COMMIT; ERROR: failure on connection marked as essential: localhost:xxxxx -- should see correct results from healthy placement and one bad placement SELECT * FROM researchers WHERE lab_id = 4; - id | lab_id | name + id | lab_id | name --------------------------------------------------------------------- (0 rows) @@ -301,16 +301,16 @@ UPDATE pg_dist_shard_placement SET shardstate = 1 WHERE shardstate = 3 AND shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'researchers'::regclass ) RETURNING placementid; - placementid + placementid --------------------------------------------------------------------- (0 rows) TRUNCATE researchers; -- fail at release SELECT citus.mitmproxy('conn.onQuery(query="^RELEASE").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -329,7 +329,7 @@ ERROR: connection not open COMMIT; -- should see correct results from healthy placement and one bad placement SELECT * FROM researchers WHERE lab_id = 4; - id | lab_id | name + id | lab_id | name --------------------------------------------------------------------- (0 rows) @@ -337,16 +337,16 @@ UPDATE pg_dist_shard_placement SET shardstate = 1 WHERE shardstate = 3 AND shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'researchers'::regclass ) RETURNING placementid; - placementid + placementid --------------------------------------------------------------------- (0 rows) TRUNCATE researchers; -- clean up SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) DROP TABLE artists; diff --git a/src/test/regress/expected/failure_setup.out b/src/test/regress/expected/failure_setup.out index b9e2a708d..4cbb4b0a4 100644 --- a/src/test/regress/expected/failure_setup.out +++ b/src/test/regress/expected/failure_setup.out @@ -1,18 +1,18 @@ SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- add the workers SELECT master_add_node('localhost', :worker_1_port); - master_add_node + master_add_node --------------------------------------------------------------------- 1 (1 row) SELECT master_add_node('localhost', :worker_2_proxy_port); -- an mitmproxy which forwards to the second worker - master_add_node + master_add_node --------------------------------------------------------------------- 2 (1 row) diff --git a/src/test/regress/expected/failure_single_mod.out b/src/test/regress/expected/failure_single_mod.out index 254dad7fa..65c5ffbef 100644 --- a/src/test/regress/expected/failure_single_mod.out +++ b/src/test/regress/expected/failure_single_mod.out @@ -1,29 +1,29 @@ SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT citus.clear_network_traffic(); - clear_network_traffic + clear_network_traffic --------------------------------------------------------------------- - + (1 row) SET citus.shard_count = 2; SET citus.shard_replication_factor = 2; CREATE TABLE mod_test (key int, value text); SELECT create_distributed_table('mod_test', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- verify behavior of single INSERT; should mark shard as failed SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) INSERT INTO mod_test VALUES (2, 6); @@ -32,7 +32,7 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT COUNT(*) FROM mod_test WHERE key=2; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -42,7 +42,7 @@ UPDATE pg_dist_shard_placement SET shardstate = 1 WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'mod_test'::regclass ) AND shardstate = 3 RETURNING placementid; - placementid + placementid --------------------------------------------------------------------- 125 (1 row) @@ -50,16 +50,16 @@ WHERE shardid IN ( TRUNCATE mod_test; -- verify behavior of UPDATE ... RETURNING; should mark as failed SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) INSERT INTO mod_test VALUES (2, 6); SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) UPDATE mod_test SET value='ok' WHERE key=2 RETURNING key; @@ -67,13 +67,13 @@ WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. - key + key --------------------------------------------------------------------- 2 (1 row) SELECT COUNT(*) FROM mod_test WHERE value='ok'; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -83,7 +83,7 @@ UPDATE pg_dist_shard_placement SET shardstate = 1 WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'mod_test'::regclass ) AND shardstate = 3 RETURNING placementid; - placementid + placementid --------------------------------------------------------------------- 125 (1 row) @@ -92,9 +92,9 @@ TRUNCATE mod_test; -- verify behavior of multi-statement modifications to a single shard -- should succeed but mark a placement as failed SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -108,7 +108,7 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. COMMIT; SELECT COUNT(*) FROM mod_test WHERE key=2; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -118,7 +118,7 @@ UPDATE pg_dist_shard_placement SET shardstate = 1 WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'mod_test'::regclass ) AND shardstate = 3 RETURNING placementid; - placementid + placementid --------------------------------------------------------------------- 125 (1 row) diff --git a/src/test/regress/expected/failure_single_select.out b/src/test/regress/expected/failure_single_select.out index c0519d91e..d5087779f 100644 --- a/src/test/regress/expected/failure_single_select.out +++ b/src/test/regress/expected/failure_single_select.out @@ -1,30 +1,30 @@ SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT citus.clear_network_traffic(); - clear_network_traffic + clear_network_traffic --------------------------------------------------------------------- - + (1 row) SET citus.shard_count = 2; SET citus.shard_replication_factor = 2; CREATE TABLE select_test (key int, value text); SELECT create_distributed_table('select_test', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- put data in shard for which mitm node is first placement INSERT INTO select_test VALUES (3, 'test data'); SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM select_test WHERE key = 3; @@ -32,7 +32,7 @@ WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. - key | value + key | value --------------------------------------------------------------------- 3 | test data (1 row) @@ -42,16 +42,16 @@ WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. - key | value + key | value --------------------------------------------------------------------- 3 | test data (1 row) -- kill after first SELECT; txn should work (though placement marked bad) SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -61,7 +61,7 @@ WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. - key | value + key | value --------------------------------------------------------------------- 3 | test data 3 | more data @@ -73,7 +73,7 @@ WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. - key | value + key | value --------------------------------------------------------------------- 3 | test data 3 | more data @@ -91,9 +91,9 @@ TRUNCATE select_test; -- put data in shard for which mitm node is first placement INSERT INTO select_test VALUES (3, 'test data'); SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM select_test WHERE key = 3; @@ -102,9 +102,9 @@ SELECT * FROM select_test WHERE key = 3; ERROR: canceling statement due to user request -- cancel after first SELECT; txn should fail and nothing should be marked as invalid SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -117,7 +117,7 @@ SELECT DISTINCT shardstate FROM pg_dist_shard_placement WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'select_test'::regclass ); - shardstate + shardstate --------------------------------------------------------------------- 1 (1 row) @@ -126,15 +126,15 @@ TRUNCATE select_test; -- cancel the second query -- error after second SELECT; txn should fail SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(1).cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; INSERT INTO select_test VALUES (3, 'more data'); SELECT * FROM select_test WHERE key = 3; - key | value + key | value --------------------------------------------------------------------- 3 | more data (1 row) @@ -145,15 +145,15 @@ ERROR: canceling statement due to user request COMMIT; -- error after second SELECT; txn should work (though placement marked bad) SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(1).reset()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; INSERT INTO select_test VALUES (3, 'more data'); SELECT * FROM select_test WHERE key = 3; - key | value + key | value --------------------------------------------------------------------- 3 | more data (1 row) @@ -164,7 +164,7 @@ WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. - key | value + key | value --------------------------------------------------------------------- 3 | more data 3 | even more data @@ -172,13 +172,13 @@ DETAIL: server closed the connection unexpectedly COMMIT; SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(2).kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) @@ -195,21 +195,21 @@ SET citus.shard_count = 2; SET citus.shard_replication_factor = 1; CREATE TABLE select_test (key int, value text); SELECT create_distributed_table('select_test', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SET citus.max_cached_conns_per_worker TO 1; -- allow connection to be cached INSERT INTO select_test VALUES (1, 'test data'); SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(1).kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM select_test WHERE key = 1; - key | value + key | value --------------------------------------------------------------------- 1 | test data (1 row) @@ -221,13 +221,13 @@ DETAIL: server closed the connection unexpectedly before or while processing the request. -- now the same test with query cancellation SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(1).cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM select_test WHERE key = 1; - key | value + key | value --------------------------------------------------------------------- 1 | test data (1 row) diff --git a/src/test/regress/expected/failure_test_helpers.out b/src/test/regress/expected/failure_test_helpers.out index e25562a67..a66749dff 100644 --- a/src/test/regress/expected/failure_test_helpers.out +++ b/src/test/regress/expected/failure_test_helpers.out @@ -5,7 +5,7 @@ ALTER SYSTEM SET citus.distributed_deadlock_detection_factor TO -1; ALTER SYSTEM SET citus.recover_2pc_interval TO -1; ALTER SYSTEM set citus.enable_statistics_collection TO false; SELECT pg_reload_conf(); - pg_reload_conf + pg_reload_conf --------------------------------------------------------------------- t (1 row) diff --git a/src/test/regress/expected/failure_truncate.out b/src/test/regress/expected/failure_truncate.out index c1b55392b..28cbe1b46 100644 --- a/src/test/regress/expected/failure_truncate.out +++ b/src/test/regress/expected/failure_truncate.out @@ -1,6 +1,6 @@ --- --- Test TRUNCATE command failures --- +-- +-- Test TRUNCATE command failures +-- CREATE SCHEMA truncate_failure; SET search_path TO 'truncate_failure'; SET citus.next_shard_id TO 120000; @@ -11,9 +11,9 @@ SET citus.max_cached_conns_per_worker TO 0; -- use a predictable number of connections per task SET citus.force_max_query_parallelization TO on; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- we'll start with replication factor 1, 1PC and parallel mode @@ -22,25 +22,25 @@ SET citus.shard_count = 4; SET citus.shard_replication_factor = 1; CREATE TABLE test_table (key int, value int); SELECT create_distributed_table('test_table', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO test_table SELECT x,x FROM generate_series(1,20) as f(x); -CREATE VIEW unhealthy_shard_count AS - SELECT count(*) - FROM pg_dist_shard_placement pdsp - JOIN - pg_dist_shard pds - ON pdsp.shardid=pds.shardid +CREATE VIEW unhealthy_shard_count AS + SELECT count(*) + FROM pg_dist_shard_placement pdsp + JOIN + pg_dist_shard pds + ON pdsp.shardid=pds.shardid WHERE logicalrelid='truncate_failure.test_table'::regclass AND shardstate != 1; --- in the first test, kill just in the first +-- in the first test, kill just in the first -- response we get from the worker SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) TRUNCATE test_table; @@ -49,56 +49,56 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 20 (1 row) --- cancel just in the first +-- cancel just in the first -- response we get from the worker SELECT citus.mitmproxy('conn.onAuthenticationOk().cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) TRUNCATE test_table; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 20 (1 row) -- kill as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) TRUNCATE test_table; @@ -107,55 +107,55 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 20 (1 row) -- cancel as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) TRUNCATE test_table; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 20 (1 row) -- kill as soon as the coordinator sends TRUNCATE TABLE command SELECT citus.mitmproxy('conn.onQuery(query="TRUNCATE TABLE truncate_failure.test_table").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) TRUNCATE test_table; @@ -164,76 +164,76 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 20 (1 row) -- cancel as soon as the coordinator sends TRUNCATE TABLE command SELECT citus.mitmproxy('conn.onQuery(query="TRUNCATE TABLE truncate_failure.test_table").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) TRUNCATE test_table; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 20 (1 row) -- kill as soon as the coordinator sends COMMIT -- One shard should not get truncated but the other should --- since it is sent from another connection. +-- since it is sent from another connection. -- Thus, we should see a partially successful truncate -- Note: This is the result of using 1pc and there is no way to recover from it SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) TRUNCATE test_table; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 8 (1 row) @@ -242,29 +242,29 @@ SELECT count(*) FROM test_table; TRUNCATE test_table; INSERT INTO test_table SELECT x,x FROM generate_series(1,20) as f(x); -- cancel as soon as the coordinator sends COMMIT --- interrupts are held during COMMIT/ROLLBACK, so the command +-- interrupts are held during COMMIT/ROLLBACK, so the command -- should have been applied without any issues since cancel is ignored SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) TRUNCATE test_table; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -273,13 +273,13 @@ SELECT count(*) FROM test_table; TRUNCATE test_table; INSERT INTO test_table SELECT x,x FROM generate_series(1,20) as f(x); SET client_min_messages TO WARNING; --- now kill just after the worker sends response to +-- now kill just after the worker sends response to -- COMMIT command, so we'll have lots of warnings but the command -- should have been committed both on the distributed table and the placements SELECT citus.mitmproxy('conn.onCommandComplete(command="^COMMIT").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) TRUNCATE test_table; @@ -296,50 +296,50 @@ CONTEXT: while executing command on localhost:xxxxx WARNING: could not commit transaction for shard xxxxx on any active node WARNING: could not commit transaction for shard xxxxx on any active node SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 0 (1 row) SET client_min_messages TO ERROR; INSERT INTO test_table SELECT x,x FROM generate_series(1,20) as f(x); --- now cancel just after the worker sends response to +-- now cancel just after the worker sends response to -- but Postgres doesn't accept interrupts during COMMIT and ROLLBACK -- so should not cancel at all, so not an effective test but adding in -- case Citus messes up this behaviour SELECT citus.mitmproxy('conn.onCommandComplete(command="^COMMIT").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) TRUNCATE test_table; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -349,43 +349,43 @@ INSERT INTO test_table SELECT x,x FROM generate_series(1,20) as f(x); CREATE TABLE reference_table(i int UNIQUE); INSERT INTO reference_table SELECT x FROM generate_series(1,20) as f(x); SELECT create_reference_table('reference_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table ADD CONSTRAINT foreign_key FOREIGN KEY (value) REFERENCES reference_table(i); -- immediately kill when we see prepare transaction to see if the command -- still cascaded to referencing table or failed successfuly SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) TRUNCATE reference_table CASCADE; ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 20 (1 row) SELECT count(*) FROM reference_table; - count + count --------------------------------------------------------------------- 20 (1 row) @@ -393,43 +393,43 @@ SELECT count(*) FROM reference_table; -- immediately cancel when we see prepare transaction to see if the command -- still cascaded to referencing table or failed successfuly SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) TRUNCATE reference_table CASCADE; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 20 (1 row) SELECT count(*) FROM reference_table; - count + count --------------------------------------------------------------------- 20 (1 row) --- immediately kill when we see cascading TRUNCATE on the hash table to see +-- immediately kill when we see cascading TRUNCATE on the hash table to see -- rollbacked properly SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE TABLE").after(2).kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) TRUNCATE reference_table CASCADE; @@ -438,59 +438,59 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 20 (1 row) SELECT count(*) FROM reference_table; - count + count --------------------------------------------------------------------- 20 (1 row) --- immediately cancel when we see cascading TRUNCATE on the hash table to see +-- immediately cancel when we see cascading TRUNCATE on the hash table to see -- if the command still cascaded to referencing table or failed successfuly SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE TABLE").after(2).cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) TRUNCATE reference_table CASCADE; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 20 (1 row) SELECT count(*) FROM reference_table; - count + count --------------------------------------------------------------------- 20 (1 row) @@ -499,34 +499,34 @@ SELECT count(*) FROM reference_table; -- to see if the command still cascaded to referencing table or -- failed successfuly SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) TRUNCATE reference_table CASCADE; ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 1 (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 20 (1 row) @@ -535,45 +535,45 @@ SELECT count(*) FROM test_table; -- to see if the command still cascaded to referencing table or -- failed successfuly SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) TRUNCATE reference_table CASCADE; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 20 (1 row) -- now, lets test with 2PC SET citus.multi_shard_commit_protocol TO '2pc'; --- in the first test, kill just in the first +-- in the first test, kill just in the first -- response we get from the worker SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) TRUNCATE test_table; @@ -582,56 +582,56 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 20 (1 row) --- cancel just in the first +-- cancel just in the first -- response we get from the worker SELECT citus.mitmproxy('conn.onAuthenticationOk().cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) TRUNCATE test_table; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 20 (1 row) -- kill as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) TRUNCATE test_table; @@ -640,55 +640,55 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 20 (1 row) -- cancel as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) TRUNCATE test_table; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 20 (1 row) -- kill as soon as the coordinator sends TRUNCATE TABLE command SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE TABLE truncate_failure.test_table").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) TRUNCATE test_table; @@ -697,68 +697,68 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 20 (1 row) -- cancel as soon as the coordinator sends TRUNCATE TABLE command SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE TABLE truncate_failure.test_table").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) TRUNCATE test_table; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 20 (1 row) -- killing on PREPARE should be fine, everything should be rollbacked SELECT citus.mitmproxy('conn.onCommandComplete(command="^PREPARE TRANSACTION").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) TRUNCATE test_table; ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -766,34 +766,34 @@ SELECT * FROM unhealthy_shard_count; -- we should be able to revocer the transaction and -- see that the command is rollbacked SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 2 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 20 (1 row) -- cancelling on PREPARE should be fine, everything should be rollbacked SELECT citus.mitmproxy('conn.onCommandComplete(command="^PREPARE TRANSACTION").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) TRUNCATE test_table; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -801,13 +801,13 @@ SELECT * FROM unhealthy_shard_count; -- we should be able to revocer the transaction and -- see that the command is rollbacked SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 20 (1 row) @@ -815,27 +815,27 @@ SELECT count(*) FROM test_table; -- killing on command complete of COMMIT PREPARE, we should see that the command succeeds -- and all the workers committed SELECT citus.mitmproxy('conn.onCommandComplete(command="^COMMIT PREPARED").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) TRUNCATE test_table; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) -- we shouldn't have any prepared transactions in the workers SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -843,30 +843,30 @@ SELECT count(*) FROM test_table; INSERT INTO test_table SELECT x,x FROM generate_series(1,20) as f(x); -- kill as soon as the coordinator sends COMMIT SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT PREPARED").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) TRUNCATE test_table; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) --- Since we kill connections to one worker after commit arrives but the +-- Since we kill connections to one worker after commit arrives but the -- other worker connections are healthy, we cannot commit on 1 worker -- which has 2 active shard placements, but the other does. That's why -- we expect to see 2 recovered prepared transactions. SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 2 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -876,52 +876,52 @@ INSERT INTO test_table SELECT x,x FROM generate_series(1,20) as f(x); -- fail just after the coordinator sends the ROLLBACK -- so the command can be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; TRUNCATE test_table; ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 20 (1 row) --- but now kill just after the worker sends response to +-- but now kill just after the worker sends response to -- ROLLBACK command, so we'll have lots of warnings but the command -- should have been rollbacked both on the distributed table and the placements SELECT citus.mitmproxy('conn.onCommandComplete(command="^ROLLBACK").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; TRUNCATE test_table; ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 20 (1 row) @@ -934,25 +934,25 @@ SET citus.shard_replication_factor = 2; DROP TABLE test_table CASCADE; CREATE TABLE test_table (key int, value int); SELECT create_distributed_table('test_table', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO test_table SELECT x,x FROM generate_series(1,20) as f(x); -CREATE VIEW unhealthy_shard_count AS - SELECT count(*) - FROM pg_dist_shard_placement pdsp - JOIN - pg_dist_shard pds - ON pdsp.shardid=pds.shardid +CREATE VIEW unhealthy_shard_count AS + SELECT count(*) + FROM pg_dist_shard_placement pdsp + JOIN + pg_dist_shard pds + ON pdsp.shardid=pds.shardid WHERE logicalrelid='truncate_failure.test_table'::regclass AND shardstate != 1; --- in the first test, kill just in the first +-- in the first test, kill just in the first -- response we get from the worker SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) TRUNCATE test_table; @@ -961,56 +961,56 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 20 (1 row) --- cancel just in the first +-- cancel just in the first -- response we get from the worker SELECT citus.mitmproxy('conn.onAuthenticationOk().cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) TRUNCATE test_table; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 20 (1 row) -- kill as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) TRUNCATE test_table; @@ -1019,55 +1019,55 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 20 (1 row) -- cancel as soon as the coordinator sends begin SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) TRUNCATE test_table; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 20 (1 row) -- kill as soon as the coordinator sends TRUNCATE TABLE command SELECT citus.mitmproxy('conn.onQuery(query="TRUNCATE TABLE truncate_failure.test_table").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) TRUNCATE test_table; @@ -1076,68 +1076,68 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 20 (1 row) -- cancel as soon as the coordinator sends TRUNCATE TABLE command SELECT citus.mitmproxy('conn.onQuery(query="TRUNCATE TABLE truncate_failure.test_table").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) TRUNCATE test_table; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 20 (1 row) -- killing on PREPARE should be fine, everything should be rollbacked SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) TRUNCATE test_table; ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -1145,13 +1145,13 @@ SELECT * FROM unhealthy_shard_count; -- we should be able to revocer the transaction and -- see that the command is rollbacked SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 4 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 20 (1 row) @@ -1159,33 +1159,33 @@ SELECT count(*) FROM test_table; -- killing on command complete of COMMIT PREPARE, we should see that the command succeeds -- and all the workers committed SELECT citus.mitmproxy('conn.onCommandComplete(command="^COMMIT PREPARED").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) TRUNCATE test_table; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) -- we shouldn't have any prepared transactions in the workers SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -1193,37 +1193,37 @@ SELECT count(*) FROM test_table; INSERT INTO test_table SELECT x,x FROM generate_series(1,20) as f(x); -- kill as soon as the coordinator sends COMMIT SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT PREPARED").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) TRUNCATE test_table; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) --- Since we kill connections to one worker after commit arrives but the +-- Since we kill connections to one worker after commit arrives but the -- other worker connections are healthy, we cannot commit on 1 worker -- which has 4 active shard placements (2 shards, replication factor=2), -- but the other does. That's why we expect to see 4 recovered prepared -- transactions. SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 4 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -1233,64 +1233,64 @@ INSERT INTO test_table SELECT x,x FROM generate_series(1,20) as f(x); -- fail just after the coordinator sends the ROLLBACK -- so the command can be rollbacked SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; TRUNCATE test_table; ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 20 (1 row) --- but now kill just after the worker sends response to +-- but now kill just after the worker sends response to -- ROLLBACK command, so we'll have lots of warnings but the command -- should have been rollbacked both on the distributed table and the placements SELECT citus.mitmproxy('conn.onCommandComplete(command="^ROLLBACK").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) BEGIN; TRUNCATE test_table; ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SELECT * FROM unhealthy_shard_count; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 20 (1 row) diff --git a/src/test/regress/expected/failure_vacuum.out b/src/test/regress/expected/failure_vacuum.out index cb32dab8c..66801ec22 100644 --- a/src/test/regress/expected/failure_vacuum.out +++ b/src/test/regress/expected/failure_vacuum.out @@ -3,9 +3,9 @@ -- get WARNINGs instead of ERRORs. SET citus.next_shard_id TO 12000000; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SET citus.shard_count = 1; @@ -13,21 +13,21 @@ SET citus.shard_replication_factor = 2; -- one shard per worker SET citus.multi_shard_commit_protocol TO '1pc'; CREATE TABLE vacuum_test (key int, value int); SELECT create_distributed_table('vacuum_test', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT citus.clear_network_traffic(); - clear_network_traffic + clear_network_traffic --------------------------------------------------------------------- - + (1 row) SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) VACUUM vacuum_test; @@ -36,9 +36,9 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ANALYZE vacuum_test; @@ -47,17 +47,17 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ANALYZE vacuum_test; -- ANALYZE transactions being critical is an open question, see #2430 -- show that we marked as INVALID on COMMIT FAILURE -SELECT shardid, shardstate FROM pg_dist_shard_placement where shardstate != 1 AND +SELECT shardid, shardstate FROM pg_dist_shard_placement where shardstate != 1 AND shardid in ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'vacuum_test'::regclass); - shardid | shardstate + shardid | shardstate --------------------------------------------------------------------- 12000000 | 3 (1 row) @@ -68,46 +68,46 @@ WHERE shardid IN ( ); -- the same tests with cancel SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) VACUUM vacuum_test; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ANALYZE vacuum_test; ERROR: canceling statement due to user request -- cancel during COMMIT should be ignored SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ANALYZE vacuum_test; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) CREATE TABLE other_vacuum_test (key int, value int); SELECT create_distributed_table('other_vacuum_test', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM.*other").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) VACUUM vacuum_test, other_vacuum_test; @@ -116,18 +116,18 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM.*other").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) VACUUM vacuum_test, other_vacuum_test; ERROR: canceling statement due to user request -- ==== Clean up, we're done here ==== SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) DROP TABLE vacuum_test, other_vacuum_test; diff --git a/src/test/regress/expected/failure_vacuum_1.out b/src/test/regress/expected/failure_vacuum_1.out index c2c93811c..c13096f6d 100644 --- a/src/test/regress/expected/failure_vacuum_1.out +++ b/src/test/regress/expected/failure_vacuum_1.out @@ -3,9 +3,9 @@ -- get WARNINGs instead of ERRORs. SET citus.next_shard_id TO 12000000; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) SET citus.shard_count = 1; @@ -13,21 +13,21 @@ SET citus.shard_replication_factor = 2; -- one shard per worker SET citus.multi_shard_commit_protocol TO '1pc'; CREATE TABLE vacuum_test (key int, value int); SELECT create_distributed_table('vacuum_test', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT citus.clear_network_traffic(); - clear_network_traffic + clear_network_traffic --------------------------------------------------------------------- - + (1 row) SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) VACUUM vacuum_test; @@ -36,9 +36,9 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ANALYZE vacuum_test; @@ -47,17 +47,17 @@ DETAIL: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ANALYZE vacuum_test; -- ANALYZE transactions being critical is an open question, see #2430 -- show that we marked as INVALID on COMMIT FAILURE -SELECT shardid, shardstate FROM pg_dist_shard_placement where shardstate != 1 AND +SELECT shardid, shardstate FROM pg_dist_shard_placement where shardstate != 1 AND shardid in ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'vacuum_test'::regclass); - shardid | shardstate + shardid | shardstate --------------------------------------------------------------------- 12000000 | 3 (1 row) @@ -68,63 +68,63 @@ WHERE shardid IN ( ); -- the same tests with cancel SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) VACUUM vacuum_test; ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ANALYZE vacuum_test; ERROR: canceling statement due to user request -- cancel during COMMIT should be ignored SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) ANALYZE vacuum_test; SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) CREATE TABLE other_vacuum_test (key int, value int); SELECT create_distributed_table('other_vacuum_test', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM.*other").kill()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) VACUUM vacuum_test, other_vacuum_test; ERROR: syntax error at or near "," SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM.*other").cancel(' || pg_backend_pid() || ')'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) VACUUM vacuum_test, other_vacuum_test; ERROR: syntax error at or near "," -- ==== Clean up, we're done here ==== SELECT citus.mitmproxy('conn.allow()'); - mitmproxy + mitmproxy --------------------------------------------------------------------- - + (1 row) DROP TABLE vacuum_test, other_vacuum_test; diff --git a/src/test/regress/expected/fast_path_router_modify.out b/src/test/regress/expected/fast_path_router_modify.out index f46f63d85..6b6713e3c 100644 --- a/src/test/regress/expected/fast_path_router_modify.out +++ b/src/test/regress/expected/fast_path_router_modify.out @@ -2,31 +2,31 @@ CREATE SCHEMA fast_path_router_modify; SET search_path TO fast_path_router_modify; SET citus.next_shard_id TO 1840000; -- all the tests in this file is intended for testing fast-path --- router planner, so we're explicitly enabling itin this file. --- We've bunch of other tests that triggers non-fast-path-router +-- router planner, so we're explicitly enabling itin this file. +-- We've bunch of other tests that triggers non-fast-path-router -- planner (note this is already true by default) SET citus.enable_fast_path_router_planner TO true; SET citus.shard_replication_factor TO 1; CREATE TABLE modify_fast_path(key int, value_1 int, value_2 text); SELECT create_distributed_table('modify_fast_path', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SET citus.shard_replication_factor TO 2; CREATE TABLE modify_fast_path_replication_2(key int, value_1 int, value_2 text); SELECT create_distributed_table('modify_fast_path_replication_2', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE modify_fast_path_reference(key int, value_1 int, value_2 text); SELECT create_reference_table('modify_fast_path_reference'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- show the output @@ -110,7 +110,7 @@ DELETE FROM modify_fast_path WHERE key = 1 RETURNING *; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - key | value_1 | value_2 + key | value_1 | value_2 --------------------------------------------------------------------- (0 rows) @@ -127,7 +127,7 @@ DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Plan 18 query after replacing subqueries and CTEs: SELECT key, value_1, value_2 FROM (SELECT intermediate_result.key, intermediate_result.value_1, intermediate_result.value_2 FROM read_intermediate_result('18_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value_1 integer, value_2 text)) t2 DEBUG: Creating router plan DEBUG: Plan is router executable - key | value_1 | value_2 + key | value_1 | value_2 --------------------------------------------------------------------- (0 rows) @@ -137,7 +137,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - key | value_1 | value_2 + key | value_1 | value_2 --------------------------------------------------------------------- (0 rows) @@ -146,7 +146,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - key | value_1 | value_2 + key | value_1 | value_2 --------------------------------------------------------------------- (0 rows) @@ -154,7 +154,7 @@ SELECT * FROM modify_fast_path_reference WHERE key = 1 FOR UPDATE; DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - key | value_1 | value_2 + key | value_1 | value_2 --------------------------------------------------------------------- (0 rows) @@ -162,7 +162,7 @@ SELECT * FROM modify_fast_path_reference WHERE key = 1 FOR SHARE; DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - key | value_1 | value_2 + key | value_1 | value_2 --------------------------------------------------------------------- (0 rows) @@ -193,12 +193,12 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable -- joins are not supported via fast-path -UPDATE modify_fast_path - SET value_1 = 1 - FROM modify_fast_path_reference - WHERE - modify_fast_path.key = modify_fast_path_reference.key AND - modify_fast_path.key = 1 AND +UPDATE modify_fast_path + SET value_1 = 1 + FROM modify_fast_path_reference + WHERE + modify_fast_path.key = modify_fast_path_reference.key AND + modify_fast_path.key = 1 AND modify_fast_path_reference.key = 1; DEBUG: Creating router plan DEBUG: Plan is router executable @@ -252,9 +252,9 @@ DEBUG: Plan is router executable DETAIL: distribution column value: 1 CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" PL/pgSQL function modify_fast_path_plpsql(integer,integer) line 3 at SQL statement - modify_fast_path_plpsql + modify_fast_path_plpsql --------------------------------------------------------------------- - + (1 row) SELECT modify_fast_path_plpsql(2,2); @@ -268,9 +268,9 @@ DEBUG: Plan is router executable DETAIL: distribution column value: 2 CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" PL/pgSQL function modify_fast_path_plpsql(integer,integer) line 3 at SQL statement - modify_fast_path_plpsql + modify_fast_path_plpsql --------------------------------------------------------------------- - + (1 row) SELECT modify_fast_path_plpsql(3,3); @@ -284,9 +284,9 @@ DEBUG: Plan is router executable DETAIL: distribution column value: 3 CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" PL/pgSQL function modify_fast_path_plpsql(integer,integer) line 3 at SQL statement - modify_fast_path_plpsql + modify_fast_path_plpsql --------------------------------------------------------------------- - + (1 row) SELECT modify_fast_path_plpsql(4,4); @@ -300,9 +300,9 @@ DEBUG: Plan is router executable DETAIL: distribution column value: 4 CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" PL/pgSQL function modify_fast_path_plpsql(integer,integer) line 3 at SQL statement - modify_fast_path_plpsql + modify_fast_path_plpsql --------------------------------------------------------------------- - + (1 row) SELECT modify_fast_path_plpsql(5,5); @@ -316,9 +316,9 @@ DEBUG: Plan is router executable DETAIL: distribution column value: 5 CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" PL/pgSQL function modify_fast_path_plpsql(integer,integer) line 3 at SQL statement - modify_fast_path_plpsql + modify_fast_path_plpsql --------------------------------------------------------------------- - + (1 row) SELECT modify_fast_path_plpsql(6,6); @@ -335,9 +335,9 @@ DEBUG: Plan is router executable DETAIL: distribution column value: 6 CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" PL/pgSQL function modify_fast_path_plpsql(integer,integer) line 3 at SQL statement - modify_fast_path_plpsql + modify_fast_path_plpsql --------------------------------------------------------------------- - + (1 row) SELECT modify_fast_path_plpsql(6,6); @@ -351,9 +351,9 @@ DEBUG: Plan is router executable DETAIL: distribution column value: 6 CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" PL/pgSQL function modify_fast_path_plpsql(integer,integer) line 3 at SQL statement - modify_fast_path_plpsql + modify_fast_path_plpsql --------------------------------------------------------------------- - + (1 row) RESET client_min_messages; diff --git a/src/test/regress/expected/foreign_key_restriction_enforcement.out b/src/test/regress/expected/foreign_key_restriction_enforcement.out index b2d1c03a1..73df1c3f8 100644 --- a/src/test/regress/expected/foreign_key_restriction_enforcement.out +++ b/src/test/regress/expected/foreign_key_restriction_enforcement.out @@ -10,30 +10,30 @@ SET citus.next_placement_id TO 2380000; SET citus.shard_replication_factor TO 1; CREATE TABLE transitive_reference_table(id int PRIMARY KEY); SELECT create_reference_table('transitive_reference_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE reference_table(id int PRIMARY KEY, value_1 int); SELECT create_reference_table('reference_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE on_update_fkey_table(id int PRIMARY KEY, value_1 int); SELECT create_distributed_table('on_update_fkey_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE unrelated_dist_table(id int PRIMARY KEY, value_1 int); SELECT create_distributed_table('unrelated_dist_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE on_update_fkey_table ADD CONSTRAINT fkey FOREIGN KEY(value_1) REFERENCES reference_table(id) ON UPDATE CASCADE; @@ -47,13 +47,13 @@ SET client_min_messages TO DEBUG1; -- case 1.1: SELECT to a reference table is followed by a parallel SELECT to a distributed table BEGIN; SELECT count(*) FROM reference_table; - count + count --------------------------------------------------------------------- 101 (1 row) SELECT count(*) FROM on_update_fkey_table; - count + count --------------------------------------------------------------------- 1001 (1 row) @@ -61,13 +61,13 @@ BEGIN; ROLLBACK; BEGIN; SELECT count(*) FROM transitive_reference_table; - count + count --------------------------------------------------------------------- 101 (1 row) SELECT count(*) FROM on_update_fkey_table; - count + count --------------------------------------------------------------------- 1001 (1 row) @@ -76,31 +76,31 @@ ROLLBACK; -- case 1.2: SELECT to a reference table is followed by a multiple router SELECTs to a distributed table BEGIN; SELECT count(*) FROM reference_table; - count + count --------------------------------------------------------------------- 101 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE id = 15; - count + count --------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE id = 16; - count + count --------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE id = 17; - count + count --------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE id = 18; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -108,31 +108,31 @@ BEGIN; ROLLBACK; BEGIN; SELECT count(*) FROM transitive_reference_table; - count + count --------------------------------------------------------------------- 101 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE id = 15; - count + count --------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE id = 16; - count + count --------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE id = 17; - count + count --------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE id = 18; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -141,7 +141,7 @@ ROLLBACK; -- case 1.3: SELECT to a reference table is followed by a multi-shard UPDATE to a distributed table BEGIN; SELECT count(*) FROM reference_table; - count + count --------------------------------------------------------------------- 101 (1 row) @@ -150,7 +150,7 @@ BEGIN; ROLLBACK; BEGIN; SELECT count(*) FROM transitive_reference_table; - count + count --------------------------------------------------------------------- 101 (1 row) @@ -160,7 +160,7 @@ ROLLBACK; -- case 1.4: SELECT to a reference table is followed by a multiple sing-shard UPDATE to a distributed table BEGIN; SELECT count(*) FROM reference_table; - count + count --------------------------------------------------------------------- 101 (1 row) @@ -172,7 +172,7 @@ BEGIN; ROLLBACK; BEGIN; SELECT count(*) FROM transitive_reference_table; - count + count --------------------------------------------------------------------- 101 (1 row) @@ -185,7 +185,7 @@ ROLLBACK; -- case 1.5: SELECT to a reference table is followed by a DDL that touches fkey column BEGIN; SELECT count(*) FROM reference_table; - count + count --------------------------------------------------------------------- 101 (1 row) @@ -197,7 +197,7 @@ DEBUG: validating foreign key constraint "fkey" ROLLBACK; BEGIN; SELECT count(*) FROM transitive_reference_table; - count + count --------------------------------------------------------------------- 101 (1 row) @@ -210,7 +210,7 @@ ROLLBACK; -- case 1.6: SELECT to a reference table is followed by an unrelated DDL BEGIN; SELECT count(*) FROM reference_table; - count + count --------------------------------------------------------------------- 101 (1 row) @@ -221,7 +221,7 @@ DETAIL: cannot execute parallel DDL on relation "on_update_fkey_table" after SE ROLLBACK; BEGIN; SELECT count(*) FROM transitive_reference_table; - count + count --------------------------------------------------------------------- 101 (1 row) @@ -234,7 +234,7 @@ ROLLBACK; -- the foreign key column BEGIN; SELECT count(*) FROM reference_table; - count + count --------------------------------------------------------------------- 101 (1 row) @@ -245,7 +245,7 @@ BEGIN; ROLLBACK; BEGIN; SELECT count(*) FROM transitive_reference_table; - count + count --------------------------------------------------------------------- 101 (1 row) @@ -258,13 +258,13 @@ ROLLBACK; -- the foreign key column after a parallel query has been executed BEGIN; SELECT count(*) FROM unrelated_dist_table; - count + count --------------------------------------------------------------------- 1001 (1 row) SELECT count(*) FROM reference_table; - count + count --------------------------------------------------------------------- 101 (1 row) @@ -276,13 +276,13 @@ HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_m ROLLBACK; BEGIN; SELECT count(*) FROM unrelated_dist_table; - count + count --------------------------------------------------------------------- 1001 (1 row) SELECT count(*) FROM transitive_reference_table; - count + count --------------------------------------------------------------------- 101 (1 row) @@ -296,13 +296,13 @@ ROLLBACK; -- the foreign key column, and a parallel query has already been executed BEGIN; SELECT count(*) FROM unrelated_dist_table; - count + count --------------------------------------------------------------------- 1001 (1 row) SELECT count(*) FROM reference_table; - count + count --------------------------------------------------------------------- 101 (1 row) @@ -314,13 +314,13 @@ HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_m ROLLBACK; BEGIN; SELECT count(*) FROM unrelated_dist_table; - count + count --------------------------------------------------------------------- 1001 (1 row) SELECT count(*) FROM transitive_reference_table; - count + count --------------------------------------------------------------------- 101 (1 row) @@ -333,7 +333,7 @@ ROLLBACK; -- case 1.8: SELECT to a reference table is followed by a COPY BEGIN; SELECT count(*) FROM reference_table; - count + count --------------------------------------------------------------------- 101 (1 row) @@ -342,7 +342,7 @@ BEGIN; ROLLBACK; BEGIN; SELECT count(*) FROM transitive_reference_table; - count + count --------------------------------------------------------------------- 101 (1 row) @@ -355,13 +355,13 @@ BEGIN; DEBUG: switching to sequential query execution mode DETAIL: Reference relation "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101; - count + count --------------------------------------------------------------------- 10 (1 row) @@ -372,13 +372,13 @@ BEGIN; DEBUG: switching to sequential query execution mode DETAIL: Reference relation "transitive_reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; - count + count --------------------------------------------------------------------- 10 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -390,25 +390,25 @@ BEGIN; DEBUG: switching to sequential query execution mode DETAIL: Reference relation "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101 AND id = 99; - count + count --------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101 AND id = 199; - count + count --------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101 AND id = 299; - count + count --------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101 AND id = 399; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -419,25 +419,25 @@ BEGIN; DEBUG: switching to sequential query execution mode DETAIL: Reference relation "transitive_reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101 AND id = 99; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101 AND id = 199; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101 AND id = 299; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101 AND id = 399; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -549,7 +549,7 @@ BEGIN; DEBUG: switching to sequential query execution mode DETAIL: Reference relation "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode SELECT count(*) FROM on_update_fkey_table; - count + count --------------------------------------------------------------------- 1001 (1 row) @@ -560,7 +560,7 @@ BEGIN; DEBUG: switching to sequential query execution mode DETAIL: Reference relation "transitive_reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode SELECT count(*) FROM on_update_fkey_table; - count + count --------------------------------------------------------------------- 1001 (1 row) @@ -570,7 +570,7 @@ ROLLBACK; BEGIN; ALTER TABLE reference_table ALTER COLUMN id SET DATA TYPE int; SELECT count(*) FROM on_update_fkey_table; - count + count --------------------------------------------------------------------- 1001 (1 row) @@ -579,7 +579,7 @@ ROLLBACK; BEGIN; ALTER TABLE transitive_reference_table ALTER COLUMN id SET DATA TYPE int; SELECT count(*) FROM on_update_fkey_table; - count + count --------------------------------------------------------------------- 1001 (1 row) @@ -706,13 +706,13 @@ ROLLBACK; -- case 4.1: SELECT to a dist table is follwed by a SELECT to a reference table BEGIN; SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; - count + count --------------------------------------------------------------------- 10 (1 row) SELECT count(*) FROM reference_table; - count + count --------------------------------------------------------------------- 101 (1 row) @@ -720,13 +720,13 @@ BEGIN; ROLLBACK; BEGIN; SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; - count + count --------------------------------------------------------------------- 10 (1 row) SELECT count(*) FROM transitive_reference_table; - count + count --------------------------------------------------------------------- 101 (1 row) @@ -735,7 +735,7 @@ ROLLBACK; -- case 4.2: SELECT to a dist table is follwed by a DML to a reference table BEGIN; SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; - count + count --------------------------------------------------------------------- 10 (1 row) @@ -747,7 +747,7 @@ HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_m ROLLBACK; BEGIN; SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; - count + count --------------------------------------------------------------------- 10 (1 row) @@ -760,7 +760,7 @@ ROLLBACK; -- case 4.3: SELECT to a dist table is follwed by an unrelated DDL to a reference table BEGIN; SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; - count + count --------------------------------------------------------------------- 10 (1 row) @@ -771,7 +771,7 @@ HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_m ROLLBACK; BEGIN; SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; - count + count --------------------------------------------------------------------- 10 (1 row) @@ -783,7 +783,7 @@ ROLLBACK; -- case 4.4: SELECT to a dist table is follwed by a DDL to a reference table BEGIN; SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; - count + count --------------------------------------------------------------------- 10 (1 row) @@ -797,7 +797,7 @@ HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_m ROLLBACK; BEGIN; SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; - count + count --------------------------------------------------------------------- 10 (1 row) @@ -814,7 +814,7 @@ ROLLBACK; SET client_min_messages to LOG; BEGIN; SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; - count + count --------------------------------------------------------------------- 10 (1 row) @@ -825,7 +825,7 @@ ERROR: cannot execute DDL on reference relation "reference_table" because there ROLLBACK; BEGIN; SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; - count + count --------------------------------------------------------------------- 10 (1 row) @@ -838,7 +838,7 @@ ROLLBACK; -- case 4.6: Router SELECT to a dist table is followed by a TRUNCATE BEGIN; SELECT count(*) FROM on_update_fkey_table WHERE id = 9; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -848,7 +848,7 @@ NOTICE: truncate cascades to table "on_update_fkey_table" ROLLBACK; BEGIN; SELECT count(*) FROM on_update_fkey_table WHERE id = 9; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -863,7 +863,7 @@ RESET client_min_messages; BEGIN; UPDATE on_update_fkey_table SET value_1 = 16 WHERE value_1 = 15; SELECT count(*) FROM reference_table; - count + count --------------------------------------------------------------------- 101 (1 row) @@ -872,7 +872,7 @@ ROLLBACK; BEGIN; UPDATE on_update_fkey_table SET value_1 = 16 WHERE value_1 = 15; SELECT count(*) FROM transitive_reference_table; - count + count --------------------------------------------------------------------- 101 (1 row) @@ -1033,16 +1033,16 @@ ROLLBACK; BEGIN; CREATE TABLE test_table_1(id int PRIMARY KEY); SELECT create_reference_table('test_table_1'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- make sure that the output isn't too verbose @@ -1054,16 +1054,16 @@ ROLLBACK; BEGIN; CREATE TABLE test_table_1(id int PRIMARY KEY); SELECT create_reference_table('test_table_1'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE tt4(id int PRIMARY KEY, value_1 int, FOREIGN KEY(id) REFERENCES tt4(id)); SELECT create_distributed_table('tt4', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id), FOREIGN KEY(id) REFERENCES tt4(id)); @@ -1083,23 +1083,23 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; CREATE TABLE test_table_1(id int PRIMARY KEY); SELECT create_reference_table('test_table_1'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE tt4(id int PRIMARY KEY, value_1 int, FOREIGN KEY(id) REFERENCES tt4(id)); SELECT create_distributed_table('tt4', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id), FOREIGN KEY(id) REFERENCES tt4(id)); SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- make sure that the output isn't too verbose @@ -1112,16 +1112,16 @@ ROLLBACK; BEGIN; CREATE TABLE test_table_1(id int PRIMARY KEY); SELECT create_reference_table('test_table_1'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int); SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table_2 ADD CONSTRAINT c_check FOREIGN KEY (value_1) REFERENCES test_table_1(id); @@ -1139,16 +1139,16 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; CREATE TABLE test_table_1(id int PRIMARY KEY); SELECT create_reference_table('test_table_1'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int); SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table_2 ADD CONSTRAINT c_check FOREIGN KEY (value_1) REFERENCES test_table_1(id); @@ -1162,16 +1162,16 @@ COMMIT; BEGIN; CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int); SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE test_table_1(id int PRIMARY KEY); SELECT create_reference_table('test_table_1'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table_2 ADD CONSTRAINT c_check FOREIGN KEY (value_1) REFERENCES test_table_1(id); @@ -1189,16 +1189,16 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int); SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE test_table_1(id int PRIMARY KEY); SELECT create_reference_table('test_table_1'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table_2 ADD CONSTRAINT c_check FOREIGN KEY (value_1) REFERENCES test_table_1(id); @@ -1211,7 +1211,7 @@ ROLLBACK; -- setting the mode to sequential should fail BEGIN; SELECT count(*) FROM on_update_fkey_table; - count + count --------------------------------------------------------------------- 1001 (1 row) @@ -1243,9 +1243,9 @@ BEGIN; INSERT INTO test_table_2 SELECT i, i FROM generate_series(0,100) i; SELECT create_reference_table('test_table_1'); NOTICE: Copying data from local table... - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table_2', 'id'); @@ -1267,9 +1267,9 @@ BEGIN; INSERT INTO test_table_2 SELECT i, i FROM generate_series(0,100) i; SELECT create_reference_table('test_table_1'); NOTICE: Copying data from local table... - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table_2', 'id'); @@ -1287,28 +1287,28 @@ BEGIN; CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_reference_table('test_table_1'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- and maybe some other test CREATE INDEX i1 ON test_table_1(id); ALTER TABLE test_table_2 ADD CONSTRAINT check_val CHECK (id > 0); SELECT count(*) FROM test_table_2; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM test_table_1; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -1327,18 +1327,18 @@ CREATE TABLE reference_table(id int PRIMARY KEY); DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "reference_table_pkey" for table "reference_table" DEBUG: building index "reference_table_pkey" on table "reference_table" serially SELECT create_reference_table('reference_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE distributed_table(id int PRIMARY KEY, value_1 int); DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "distributed_table_pkey" for table "distributed_table" DEBUG: building index "distributed_table_pkey" on table "distributed_table" serially SELECT create_distributed_table('distributed_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE @@ -1363,7 +1363,7 @@ DEBUG: generating subplan 170_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx. DEBUG: Plan 170 query after replacing subqueries and CTEs: DELETE FROM test_fkey_to_ref_in_tx.distributed_table USING (SELECT intermediate_result.id FROM read_intermediate_result('170_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t1 WHERE (distributed_table.value_1 OPERATOR(pg_catalog.=) t1.id) RETURNING distributed_table.id, distributed_table.value_1, t1.id DEBUG: switching to sequential query execution mode DETAIL: Reference relation "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode - id | value_1 | id + id | value_1 | id --------------------------------------------------------------------- (0 rows) @@ -1384,7 +1384,7 @@ DEBUG: generating subplan 174_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx. DEBUG: Plan 174 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM test_fkey_to_ref_in_tx.distributed_table, (SELECT intermediate_result.id FROM read_intermediate_result('174_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t1 WHERE (distributed_table.value_1 OPERATOR(pg_catalog.=) t1.id) DEBUG: switching to sequential query execution mode DETAIL: Reference relation "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode - count + count --------------------------------------------------------------------- 0 (1 row) @@ -1417,7 +1417,7 @@ BEGIN; DEBUG: generating subplan 181_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.distributed_table RETURNING id DEBUG: generating subplan 181_2 for CTE t2: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id DEBUG: Plan 181 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM test_fkey_to_ref_in_tx.distributed_table, (SELECT intermediate_result.id FROM read_intermediate_result('181_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t1, (SELECT intermediate_result.id FROM read_intermediate_result('181_2'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t2 WHERE ((distributed_table.value_1 OPERATOR(pg_catalog.=) t1.id) AND (distributed_table.value_1 OPERATOR(pg_catalog.=) t2.id)) - count + count --------------------------------------------------------------------- 0 (1 row) @@ -1429,7 +1429,7 @@ BEGIN; DELETE FROM reference_table RETURNING id; DEBUG: generating subplan 184_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.distributed_table RETURNING id DEBUG: Plan 184 query after replacing subqueries and CTEs: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id - id + id --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/foreign_key_to_reference_table.out b/src/test/regress/expected/foreign_key_to_reference_table.out index 91a835518..9c3463ba5 100644 --- a/src/test/regress/expected/foreign_key_to_reference_table.out +++ b/src/test/regress/expected/foreign_key_to_reference_table.out @@ -28,18 +28,18 @@ SELECT d $$ )).RESULT::json )::json )).* ; CREATE TABLE referenced_table(id int UNIQUE, test_column int); SELECT create_reference_table('referenced_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- we still do not support update/delete operations through foreign constraints if the foreign key includes the distribution column -- All should fail CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE SET NULL; @@ -53,9 +53,9 @@ DETAIL: SET NULL or SET DEFAULT is not supported in ON DELETE operation when di DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE SET DEFAULT; @@ -69,9 +69,9 @@ DETAIL: SET NULL or SET DEFAULT is not supported in ON DELETE operation when di DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON UPDATE SET NULL; @@ -88,16 +88,16 @@ ROLLBACK; DROP TABLE referenced_table; CREATE TABLE referenced_table(id int, test_column int, PRIMARY KEY(id, test_column)); SELECT create_reference_table('referenced_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY(id, ref_id) REFERENCES referenced_table(id, test_column) ON UPDATE SET DEFAULT; @@ -111,9 +111,9 @@ DETAIL: SET NULL, SET DEFAULT or CASCADE is not supported in ON UPDATE operatio DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY(id, ref_id) REFERENCES referenced_table(id, test_column) ON UPDATE CASCADE; @@ -130,21 +130,21 @@ ROLLBACK; DROP TABLE referenced_table; CREATE TABLE referenced_table(id int, test_column int, PRIMARY KEY(id)); SELECT create_reference_table('referenced_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY(id) REFERENCES referenced_table(id) ON DELETE SET NULL; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; - name | relid | refd_relid + name | relid | refd_relid --------------------------------------------------------------------- fkey_ref_7000043 | fkey_reference_table.referencing_table_7000043 | fkey_reference_table.referenced_table_7000042 fkey_ref_7000044 | fkey_reference_table.referencing_table_7000044 | fkey_reference_table.referenced_table_7000042 @@ -159,13 +159,13 @@ SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' A DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(id) REFERENCES referenced_table(id) ON DELETE SET NULL); SELECT create_distributed_table('referencing_table', 'ref_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; - name | relid | refd_relid + name | relid | refd_relid --------------------------------------------------------------------- referencing_table_id_fkey_7000051 | fkey_reference_table.referencing_table_7000051 | fkey_reference_table.referenced_table_7000042 referencing_table_id_fkey_7000052 | fkey_reference_table.referencing_table_7000052 | fkey_reference_table.referenced_table_7000042 @@ -180,14 +180,14 @@ SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' A DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY(id) REFERENCES referenced_table(id) ON DELETE SET DEFAULT; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; - name | relid | refd_relid + name | relid | refd_relid --------------------------------------------------------------------- fkey_ref_7000059 | fkey_reference_table.referencing_table_7000059 | fkey_reference_table.referenced_table_7000042 fkey_ref_7000060 | fkey_reference_table.referencing_table_7000060 | fkey_reference_table.referenced_table_7000042 @@ -203,14 +203,14 @@ DROP TABLE referencing_table; BEGIN; CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(id) REFERENCES referenced_table(id) ON DELETE SET DEFAULT); SELECT create_distributed_table('referencing_table', 'ref_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) COMMIT; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; - name | relid | refd_relid + name | relid | refd_relid --------------------------------------------------------------------- referencing_table_id_fkey_7000067 | fkey_reference_table.referencing_table_7000067 | fkey_reference_table.referenced_table_7000042 referencing_table_id_fkey_7000068 | fkey_reference_table.referencing_table_7000068 | fkey_reference_table.referenced_table_7000042 @@ -225,14 +225,14 @@ SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' A DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY(id) REFERENCES referenced_table(id) ON UPDATE SET NULL; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; - name | relid | refd_relid + name | relid | refd_relid --------------------------------------------------------------------- fkey_ref_7000075 | fkey_reference_table.referencing_table_7000075 | fkey_reference_table.referenced_table_7000042 fkey_ref_7000076 | fkey_reference_table.referencing_table_7000076 | fkey_reference_table.referenced_table_7000042 @@ -247,14 +247,14 @@ SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' A DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY(id) REFERENCES referenced_table(id) ON UPDATE SET DEFAULT; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; - name | relid | refd_relid + name | relid | refd_relid --------------------------------------------------------------------- fkey_ref_7000083 | fkey_reference_table.referencing_table_7000083 | fkey_reference_table.referenced_table_7000042 fkey_ref_7000084 | fkey_reference_table.referencing_table_7000084 | fkey_reference_table.referenced_table_7000042 @@ -269,14 +269,14 @@ SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' A DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY(id) REFERENCES referenced_table(id) ON UPDATE CASCADE; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; - name | relid | refd_relid + name | relid | refd_relid --------------------------------------------------------------------- fkey_ref_7000091 | fkey_reference_table.referencing_table_7000091 | fkey_reference_table.referenced_table_7000042 fkey_ref_7000092 | fkey_reference_table.referencing_table_7000092 | fkey_reference_table.referenced_table_7000042 @@ -292,9 +292,9 @@ DROP TABLE referencing_table; -- check if we can add the foreign key while adding the column CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE referencing_table ADD COLUMN referencing int REFERENCES referenced_table(id) ON UPDATE CASCADE; @@ -302,7 +302,7 @@ ERROR: cannot execute ADD COLUMN command with PRIMARY KEY, UNIQUE, FOREIGN and DETAIL: Adding a column with a constraint in one command is not supported because all constraints in Citus must have explicit names HINT: You can issue each command separately such as ALTER TABLE referencing_table ADD COLUMN referencing data_type; ALTER TABLE referencing_table ADD CONSTRAINT constraint_name FOREIGN KEY (referencing) REFERENCES referenced_table(id) ON UPDATE CASCADE; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; - name | relid | refd_relid + name | relid | refd_relid --------------------------------------------------------------------- (0 rows) @@ -311,9 +311,9 @@ DROP TABLE referencing_table; SET citus.shard_replication_factor TO 2; CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (id) REFERENCES referenced_table(id); @@ -321,7 +321,7 @@ ERROR: cannot create foreign key constraint DETAIL: Citus Community Edition currently supports foreign key constraints only for "citus.shard_replication_factor = 1". HINT: Please change "citus.shard_replication_factor to 1". To learn more about using foreign keys with other replication factors, please contact us at https://citusdata.com/about/contact_us. SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; - name | relid | refd_relid + name | relid | refd_relid --------------------------------------------------------------------- (0 rows) @@ -329,9 +329,9 @@ DROP TABLE referencing_table; -- should fail when we add the column as well CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE referencing_table ADD COLUMN referencing_col int REFERENCES referenced_table(id) ON DELETE SET NULL; @@ -339,7 +339,7 @@ ERROR: cannot create foreign key constraint DETAIL: Citus Community Edition currently supports foreign key constraints only for "citus.shard_replication_factor = 1". HINT: Please change "citus.shard_replication_factor to 1". To learn more about using foreign keys with other replication factors, please contact us at https://citusdata.com/about/contact_us. SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; - name | relid | refd_relid + name | relid | refd_relid --------------------------------------------------------------------- (0 rows) @@ -348,13 +348,13 @@ SET citus.shard_replication_factor TO 1; -- simple create_distributed_table should work in/out transactions on tables with foreign key to reference tables CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY (id) REFERENCES referenced_table(id)); SELECT create_distributed_table('referencing_table', 'ref_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; - name | relid | refd_relid + name | relid | refd_relid --------------------------------------------------------------------- referencing_table_id_fkey_7000123 | fkey_reference_table.referencing_table_7000123 | fkey_reference_table.referenced_table_7000042 referencing_table_id_fkey_7000124 | fkey_reference_table.referencing_table_7000124 | fkey_reference_table.referenced_table_7000042 @@ -371,21 +371,21 @@ DROP TABLE referenced_table; BEGIN; CREATE TABLE referenced_table(id int, test_column int, PRIMARY KEY(id)); SELECT create_reference_table('referenced_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY (id) REFERENCES referenced_table(id)); SELECT create_distributed_table('referencing_table', 'ref_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) COMMIT; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; - name | relid | refd_relid + name | relid | refd_relid --------------------------------------------------------------------- referencing_table_id_fkey_7000132 | fkey_reference_table.referencing_table_7000132 | fkey_reference_table.referenced_table_7000131 referencing_table_id_fkey_7000133 | fkey_reference_table.referencing_table_7000133 | fkey_reference_table.referenced_table_7000131 @@ -402,32 +402,32 @@ DROP TABLE referencing_table; -- distribution column or from distributed tables to reference tables. CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id', 'append'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (id) REFERENCES referenced_table(id); ERROR: cannot create foreign key constraint since relations are not colocated or not referencing a reference table DETAIL: A distributed table can only have foreign keys if it is referencing another colocated hash distributed table or a reference table SELECT * FROM table_fkeys_in_workers WHERE name LIKE 'fkey_ref%' ORDER BY 1,2,3; - name | relid | refd_relid + name | relid | refd_relid --------------------------------------------------------------------- (0 rows) DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id', 'range'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (id) REFERENCES referenced_table(id); ERROR: cannot create foreign key constraint since relations are not colocated or not referencing a reference table DETAIL: A distributed table can only have foreign keys if it is referencing another colocated hash distributed table or a reference table SELECT * FROM table_fkeys_in_workers WHERE name LIKE 'fkey_ref%' ORDER BY 1,2,3; - name | relid | refd_relid + name | relid | refd_relid --------------------------------------------------------------------- (0 rows) @@ -437,15 +437,15 @@ DROP TABLE referenced_table; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int); SELECT create_reference_table('referenced_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('referencing_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (ref_id) REFERENCES referenced_table(id); @@ -470,7 +470,7 @@ DELETE FROM referenced_table WHERE id = 501; TRUNCATE referenced_table CASCADE; NOTICE: truncate cascades to table "referencing_table" SELECT count(*) FROM referencing_table; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -483,15 +483,15 @@ DROP TABLE referenced_table; CREATE TABLE referenced_table(id int, test_column int, PRIMARY KEY(id)); CREATE TABLE referencing_table(id int, ref_id int); SELECT create_reference_table('referenced_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_reference_table('referencing_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- self referencing foreign key @@ -506,15 +506,15 @@ CREATE SCHEMA referencing_schema; CREATE TABLE referenced_schema.referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_schema.referencing_table(id int, ref_id int); SELECT create_reference_table('referenced_schema.referenced_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('referencing_schema.referencing_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE referencing_schema.referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (ref_id) REFERENCES referenced_schema.referenced_table(id) ON DELETE CASCADE; @@ -522,7 +522,7 @@ INSERT INTO referenced_schema.referenced_table SELECT x, x from generate_series( INSERT INTO referencing_schema.referencing_table SELECT x, x from generate_series(1,1000) as f(x); DELETE FROM referenced_schema.referenced_table WHERE id > 800; SELECT count(*) FROM referencing_schema.referencing_table; - count + count --------------------------------------------------------------------- 800 (1 row) @@ -535,15 +535,15 @@ RESET client_min_messages; CREATE TABLE referenced_table(test_column int, test_column2 int, PRIMARY KEY(test_column)); CREATE TABLE referencing_table(id int, ref_id int DEFAULT 1); SELECT create_reference_table('referenced_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('referencing_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (ref_id) REFERENCES referenced_table(test_column) ON DELETE SET DEFAULT; @@ -551,7 +551,7 @@ INSERT INTO referenced_table SELECT x, x FROM generate_series(1,1000) AS f(x); INSERT INTO referencing_table SELECT x, x FROM generate_series(1,1000) AS f(x); DELETE FROM referenced_table WHERE test_column > 800; SELECT count(*) FROM referencing_table WHERE ref_id = 1; - count + count --------------------------------------------------------------------- 201 (1 row) @@ -563,15 +563,15 @@ CREATE TYPE fkey_reference_table.composite AS (key1 int, key2 int); CREATE TABLE referenced_table(test_column composite, PRIMARY KEY(test_column)); CREATE TABLE referencing_table(id int, referencing_composite composite); SELECT create_reference_table('referenced_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('referencing_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (referencing_composite) REFERENCES referenced_table(test_column) ON DELETE CASCADE; @@ -579,7 +579,7 @@ INSERT INTO referenced_table SELECT (x+1, x+1)::composite FROM generate_series(1 INSERT INTO referencing_table SELECT x, (x+1, x+1)::composite FROM generate_series(1,1000) AS f(x); DELETE FROM referenced_table WHERE (test_column).key1 > 900; SELECT count(*) FROM referencing_table; - count + count --------------------------------------------------------------------- 899 (1 row) @@ -595,15 +595,15 @@ DROP TABLE referencing_table CASCADE; CREATE TABLE referenced_table(test_column SERIAL PRIMARY KEY, test_column2 int); CREATE TABLE referencing_table(id int, ref_id int); SELECT create_reference_table('referenced_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('referencing_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (ref_id) REFERENCES referenced_table(test_column) ON DELETE CASCADE; @@ -611,7 +611,7 @@ INSERT INTO referenced_table(test_column2) SELECT x FROM generate_series(1,1000) INSERT INTO referencing_table SELECT x, x FROM generate_series(1,1000) AS f(x); DELETE FROM referenced_table WHERE test_column2 > 10; SELECT count(*) FROM referencing_table; - count + count --------------------------------------------------------------------- 10 (1 row) @@ -627,15 +627,15 @@ DROP TABLE referencing_table CASCADE; CREATE TABLE referenced_table(test_column int PRIMARY KEY, test_column2 int); CREATE TABLE referencing_table(id int, ref_id SERIAL); SELECT create_reference_table('referenced_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('referencing_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (ref_id) REFERENCES referenced_table(test_column) ON DELETE CASCADE; @@ -658,15 +658,15 @@ DROP TABLE referencing_table CASCADE; CREATE TABLE referenced_table(test_column SERIAL PRIMARY KEY, test_column2 int); CREATE TABLE referencing_table(id int, ref_id SERIAL); SELECT create_reference_table('referenced_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('referencing_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (ref_id) REFERENCES referenced_table(test_column) ON DELETE CASCADE; @@ -687,15 +687,15 @@ DROP TABLE referencing_table CASCADE; CREATE TABLE referenced_table(test_column int, test_column2 int, PRIMARY KEY(test_column)); CREATE TABLE referencing_table(id int, ref_id int DEFAULT -1); SELECT create_reference_table('referenced_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('referencing_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (ref_id) REFERENCES referenced_table(test_column) ON DELETE SET DEFAULT; @@ -709,15 +709,15 @@ DROP TABLE referencing_table CASCADE; CREATE TABLE referenced_table(test_column int, test_column2 int, PRIMARY KEY(test_column)); CREATE TABLE referencing_table(id int, ref_id int DEFAULT -1); SELECT create_reference_table('referenced_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('referencing_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (ref_id) REFERENCES referenced_table(test_column) ON UPDATE CASCADE; @@ -728,7 +728,7 @@ ON CONFLICT (test_column) DO UPDATE SET test_column = -1 * EXCLUDED.test_column; SELECT * FROM referencing_table WHERE ref_id < 0 ORDER BY 1; - id | ref_id + id | ref_id --------------------------------------------------------------------- 1 | -1 2 | -2 @@ -746,9 +746,9 @@ INSERT INTO referenced_table VALUES (1,1), (2,2), (3,3); INSERT INTO referencing_table VALUES (1,1), (2,2), (3,3); SELECT create_reference_table('referenced_table'); NOTICE: Copying data from local table... - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('referencing_table', 'id'); @@ -773,27 +773,27 @@ CREATE TABLE referenced_table(test_column int, test_column2 int, PRIMARY KEY(tes CREATE TABLE referenced_table2(test_column int, test_column2 int, PRIMARY KEY(test_column2)); CREATE TABLE referencing_table(id int, ref_id int); SELECT create_reference_table('referenced_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_reference_table('referenced_table2'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('referencing_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (id) REFERENCES referenced_table(test_column) ON DELETE CASCADE; ALTER TABLE referencing_table ADD CONSTRAINT foreign_key_2 FOREIGN KEY (id) REFERENCES referenced_table2(test_column2) ON DELETE CASCADE; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; - name | relid | refd_relid + name | relid | refd_relid --------------------------------------------------------------------- fkey_ref_7000226 | fkey_reference_table.referencing_table_7000226 | fkey_reference_table.referenced_table_7000224 fkey_ref_7000227 | fkey_reference_table.referencing_table_7000227 | fkey_reference_table.referenced_table_7000224 @@ -830,21 +830,21 @@ DETAIL: Key (id)=(X) is not present in table "referenced_table_xxxxxxx". -- should succeed INSERT INTO referencing_table SELECT x, x+1 FROM generate_series(600,900) AS f(x); SELECT count(*) FROM referencing_table; - count + count --------------------------------------------------------------------- 301 (1 row) DELETE FROM referenced_table WHERE test_column < 700; SELECT count(*) FROM referencing_table; - count + count --------------------------------------------------------------------- 201 (1 row) DELETE FROM referenced_table2 WHERE test_column2 > 800; SELECT count(*) FROM referencing_table; - count + count --------------------------------------------------------------------- 101 (1 row) @@ -859,25 +859,25 @@ CREATE TABLE referenced_table(test_column int, test_column2 int, PRIMARY KEY(tes CREATE TABLE referenced_table2(test_column int, test_column2 int, PRIMARY KEY(test_column2)); CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY (id) REFERENCES referenced_table(test_column) ON DELETE CASCADE, FOREIGN KEY (id) REFERENCES referenced_table2(test_column2) ON DELETE CASCADE); SELECT create_reference_table('referenced_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_reference_table('referenced_table2'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('referencing_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; - count + count --------------------------------------------------------------------- 16 (1 row) @@ -899,21 +899,21 @@ CREATE TABLE referenced_table(test_column int, test_column2 int, PRIMARY KEY(tes CREATE TABLE referenced_table2(test_column int, test_column2 int, PRIMARY KEY(test_column2)); CREATE TABLE referencing_table(id int, ref_id int); SELECT create_reference_table('referenced_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_reference_table('referenced_table2'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('referencing_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -921,7 +921,7 @@ BEGIN; ALTER TABLE referencing_table ADD CONSTRAINT foreign_key_2 FOREIGN KEY (ref_id) REFERENCES referenced_table2(test_column2) ON DELETE CASCADE; COMMIT; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; - name | relid | refd_relid + name | relid | refd_relid --------------------------------------------------------------------- fkey_ref_7000246 | fkey_reference_table.referencing_table_7000246 | fkey_reference_table.referenced_table_7000244 fkey_ref_7000247 | fkey_reference_table.referencing_table_7000247 | fkey_reference_table.referenced_table_7000244 @@ -955,21 +955,21 @@ ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign k -- should succeed INSERT INTO referencing_table SELECT x, x+501 FROM generate_series(0,1000) AS f(x); SELECT count(*) FROM referencing_table; - count + count --------------------------------------------------------------------- 1001 (1 row) DELETE FROM referenced_table WHERE test_column < 700; SELECT count(*) FROM referencing_table; - count + count --------------------------------------------------------------------- 301 (1 row) DELETE FROM referenced_table2 WHERE test_column2 > 800; SELECT count(*) FROM referencing_table; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -985,27 +985,27 @@ CREATE TABLE referenced_table2(test_column int, test_column2 int, PRIMARY KEY(te CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY (id) REFERENCES referenced_table(test_column) ON DELETE CASCADE); BEGIN; SELECT create_reference_table('referenced_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_reference_table('referenced_table2'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('referencing_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT foreign_key_2 FOREIGN KEY (ref_id) REFERENCES referenced_table2(test_column2) ON DELETE CASCADE; COMMIT; SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; - count + count --------------------------------------------------------------------- 16 (1 row) @@ -1028,21 +1028,21 @@ CREATE TABLE referenced_table(test_column int, test_column2 int UNIQUE, PRIMARY CREATE TABLE referencing_table(id int PRIMARY KEY, ref_id int); CREATE TABLE referencing_table2(id int, ref_id int); SELECT create_reference_table('referenced_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('referencing_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('referencing_table2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -1052,7 +1052,7 @@ ALTER TABLE referencing_table2 ADD CONSTRAINT fkey_ref FOREIGN KEY (ref_id) REFE ALTER TABLE referencing_table2 ADD CONSTRAINT fkey_ref_to_dist FOREIGN KEY (id) REFERENCES referencing_table(id) ON DELETE CASCADE; COMMIT; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3; - name | relid | refd_relid + name | relid | refd_relid --------------------------------------------------------------------- fkey_ref_7000265 | fkey_reference_table.referencing_table_7000265 | fkey_reference_table.referenced_table_7000264 fkey_ref_7000266 | fkey_reference_table.referencing_table_7000266 | fkey_reference_table.referenced_table_7000264 @@ -1095,20 +1095,20 @@ DETAIL: Key (id)=(X) is not present in table "referencing_table_xxxxxxx". INSERT INTO referencing_table2 SELECT x, x+1 FROM generate_series(0,300) AS f(x); DELETE FROM referenced_table WHERE test_column < 200; SELECT count(*) FROM referencing_table; - count + count --------------------------------------------------------------------- 201 (1 row) SELECT count(*) FROM referencing_table2; - count + count --------------------------------------------------------------------- 101 (1 row) DELETE FROM referencing_table WHERE id > 200; SELECT count(*) FROM referencing_table2; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -1125,28 +1125,28 @@ CREATE TABLE referenced_table(test_column int, test_column2 int UNIQUE, PRIMARY CREATE TABLE referencing_table(id int PRIMARY KEY, ref_id int, FOREIGN KEY (id) REFERENCES referenced_table(test_column) ON DELETE CASCADE); CREATE TABLE referencing_table2(id int, ref_id int, FOREIGN KEY (ref_id) REFERENCES referenced_table(test_column2) ON DELETE CASCADE, FOREIGN KEY (id) REFERENCES referencing_table(id) ON DELETE CASCADE); SELECT create_reference_table('referenced_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT create_distributed_table('referencing_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('referencing_table2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) COMMIT; SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; - count + count --------------------------------------------------------------------- 24 (1 row) @@ -1167,26 +1167,26 @@ CREATE TABLE referenced_table(test_column int, test_column2 int, PRIMARY KEY(tes CREATE TABLE referencing_table(id int, ref_id int, ref_id2 int, PRIMARY KEY(id, ref_id)); CREATE TABLE referencing_referencing_table(id int, ref_id int, FOREIGN KEY (id, ref_id) REFERENCES referencing_table(id, ref_id) ON DELETE CASCADE); SELECT create_reference_table('referenced_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('referencing_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('referencing_referencing_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (ref_id, ref_id2) REFERENCES referenced_table(test_column, test_column2) ON DELETE CASCADE; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.referencing%' ORDER BY 1,2,3; - name | relid | refd_relid + name | relid | refd_relid --------------------------------------------------------------------- fkey_ref_7000299 | fkey_reference_table.referencing_table_7000299 | fkey_reference_table.referenced_table_7000298 fkey_ref_7000300 | fkey_reference_table.referencing_table_7000300 | fkey_reference_table.referenced_table_7000298 @@ -1211,7 +1211,7 @@ INSERT INTO referencing_table SELECT x, x+1, x+2 FROM generate_series(1,999) AS INSERT INTO referencing_referencing_table SELECT x, x+1 FROM generate_series(1,999) AS f(x); DELETE FROM referenced_table WHERE test_column > 800; SELECT max(ref_id) FROM referencing_referencing_table; - max + max --------------------------------------------------------------------- 800 (1 row) @@ -1227,23 +1227,23 @@ DROP TABLE referencing_referencing_table; BEGIN; CREATE TABLE test_table_1(id int PRIMARY KEY); SELECT create_reference_table('test_table_1'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(id) REFERENCES test_table_1(id)); SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE test_table_3(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id), FOREIGN KEY(id) REFERENCES test_table_2(id)); SELECT create_distributed_table('test_table_3', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) DROP TABLE test_table_1 CASCADE; @@ -1255,16 +1255,16 @@ ROLLBACK; BEGIN; CREATE TABLE test_table_1(id int PRIMARY KEY); SELECT create_reference_table('test_table_1'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int); SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table_2 ADD CONSTRAINT c_check FOREIGN KEY (value_1) REFERENCES test_table_1(id); @@ -1278,16 +1278,16 @@ COMMIT; BEGIN; CREATE TABLE test_table_1(id int PRIMARY KEY, value_1 int); SELECT create_distributed_table('test_table_1', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE test_table_2(id int PRIMARY KEY); SELECT create_reference_table('test_table_2'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table_1 ADD CONSTRAINT c_check FOREIGN KEY (value_1) REFERENCES test_table_2(id); @@ -1305,9 +1305,9 @@ BEGIN; INSERT INTO test_table_2 SELECT i, i FROM generate_series(0,100) i; SELECT create_reference_table('test_table_1'); NOTICE: Copying data from local table... - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table_2', 'id'); @@ -1321,15 +1321,15 @@ BEGIN; CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_reference_table('test_table_1'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE INDEX i1 ON test_table_1(id); @@ -1341,26 +1341,26 @@ COMMIT; CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_reference_table('test_table_1'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; - count + count --------------------------------------------------------------------- 8 (1 row) ALTER TABLE test_table_2 DROP CONSTRAINT test_table_2_value_1_fkey; SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -1371,15 +1371,15 @@ BEGIN; CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int); SELECT create_reference_table('test_table_1'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table_2 ADD CONSTRAINT foreign_key FOREIGN KEY(value_1) REFERENCES test_table_1(id); @@ -1390,7 +1390,7 @@ HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_m ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -1401,21 +1401,21 @@ ERROR: table "test_table_1" does not exist CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_reference_table('test_table_1'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table_1 DROP CONSTRAINT test_table_1_pkey CASCADE; NOTICE: drop cascades to constraint test_table_2_value_1_fkey on table test_table_2 SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -1426,22 +1426,22 @@ BEGIN; CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_reference_table('test_table_1'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table_1 DROP CONSTRAINT test_table_1_pkey CASCADE; NOTICE: drop cascades to constraint test_table_2_value_1_fkey on table test_table_2 COMMIT; SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -1451,20 +1451,20 @@ DROP TABLE test_table_1, test_table_2; CREATE TABLE test_table_1(id int PRIMARY KEY, id2 int); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_reference_table('test_table_1'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table_2 DROP COLUMN value_1; SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -1475,21 +1475,21 @@ CREATE TABLE test_table_1(id int PRIMARY KEY, id2 int); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); BEGIN; SELECT create_reference_table('test_table_1'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table_2 DROP COLUMN value_1; COMMIT; SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -1499,21 +1499,21 @@ DROP TABLE test_table_1, test_table_2; CREATE TABLE test_table_1(id int PRIMARY KEY, id2 int); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_reference_table('test_table_1'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table_1 DROP COLUMN id CASCADE; NOTICE: drop cascades to constraint test_table_2_value_1_fkey on table test_table_2 SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -1524,22 +1524,22 @@ CREATE TABLE test_table_1(id int PRIMARY KEY, id2 int); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); BEGIN; SELECT create_reference_table('test_table_1'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table_1 DROP COLUMN id CASCADE; NOTICE: drop cascades to constraint test_table_2_value_1_fkey on table test_table_2 COMMIT; SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -1549,15 +1549,15 @@ DROP TABLE test_table_1, test_table_2; CREATE TABLE test_table_1(id int PRIMARY KEY, id2 int); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_reference_table('test_table_1'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO test_table_1 VALUES (1,1), (2,2), (3,3); @@ -1572,7 +1572,7 @@ ALTER TABLE test_table_2 ALTER COLUMN value_1 SET DATA TYPE int; ERROR: integer out of range CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; - count + count --------------------------------------------------------------------- 8 (1 row) @@ -1585,15 +1585,15 @@ CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); BEGIN; SELECT create_reference_table('test_table_1'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE test_table_2 ALTER COLUMN value_1 SET DATA TYPE bigint; @@ -1601,7 +1601,7 @@ BEGIN; NOTICE: drop cascades to constraint test_table_2_value_1_fkey on table test_table_2 COMMIT; SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%'; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -1611,15 +1611,15 @@ DROP TABLE test_table_1, test_table_2; CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_reference_table('test_table_1'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO test_table_1 VALUES (1),(2),(3); @@ -1627,7 +1627,7 @@ INSERT INTO test_table_2 VALUES (1,1),(2,2),(3,3); TRUNCATE test_table_1 CASCADE; NOTICE: truncate cascades to table "test_table_2" SELECT * FROM test_table_2; - id | value_1 + id | value_1 --------------------------------------------------------------------- (0 rows) @@ -1636,15 +1636,15 @@ DROP TABLE test_table_1, test_table_2; CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_reference_table('test_table_1'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO test_table_1 VALUES (1),(2),(3); @@ -1654,7 +1654,7 @@ BEGIN; NOTICE: truncate cascades to table "test_table_2" COMMIT; SELECT * FROM test_table_2; - id | value_1 + id | value_1 --------------------------------------------------------------------- (0 rows) @@ -1664,15 +1664,15 @@ CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); BEGIN; SELECT create_reference_table('test_table_1'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO test_table_1 VALUES (1),(2),(3); @@ -1681,7 +1681,7 @@ BEGIN; NOTICE: truncate cascades to table "test_table_2" COMMIT; SELECT * FROM test_table_2; - id | value_1 + id | value_1 --------------------------------------------------------------------- (0 rows) @@ -1690,27 +1690,27 @@ DROP TABLE test_table_1, test_table_2; CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_reference_table('test_table_1'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO test_table_1 VALUES (1),(2),(3); INSERT INTO test_table_2 VALUES (1,1),(2,2),(3,3); TRUNCATE test_table_2 CASCADE; SELECT * FROM test_table_2; - id | value_1 + id | value_1 --------------------------------------------------------------------- (0 rows) SELECT * FROM test_table_1; - id + id --------------------------------------------------------------------- 1 2 @@ -1722,15 +1722,15 @@ DROP TABLE test_table_1, test_table_2; CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); SELECT create_reference_table('test_table_1'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO test_table_1 VALUES (1),(2),(3); @@ -1739,12 +1739,12 @@ BEGIN; TRUNCATE test_table_2 CASCADE; COMMIT; SELECT * FROM test_table_2; - id | value_1 + id | value_1 --------------------------------------------------------------------- (0 rows) SELECT * FROM test_table_1; - id + id --------------------------------------------------------------------- 1 2 @@ -1758,21 +1758,21 @@ CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int); CREATE TABLE test_table_3(id int PRIMARY KEY, value_1 int); SELECT create_reference_table('test_table_1'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table_3', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -1796,15 +1796,15 @@ select create_reference_table('referencing_table'); ERROR: distributing partitioned tables in only supported for hash-distributed tables -- partitioned tables are supported as hash distributed table SELECT create_reference_table('referenced_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('referencing_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- add foreign constraints in between partitions @@ -1820,7 +1820,7 @@ CONTEXT: while executing command on localhost:xxxxx -- should succeed on partitioning_test_0 INSERT INTO referencing_table VALUES (0, 1); SELECT * FROM referencing_table; - id | value_1 + id | value_1 --------------------------------------------------------------------- 0 | 1 (1 row) @@ -1840,7 +1840,7 @@ BEGIN; TRUNCATE referencing_table, referenced_table; ALTER TABLE referencing_table ADD COLUMN x INT; SELECT * FROM referencing_table; - id | value_1 | x + id | value_1 | x --------------------------------------------------------------------- (0 rows) @@ -1849,7 +1849,7 @@ BEGIN; TRUNCATE referenced_table, referencing_table; ALTER TABLE referencing_table ADD COLUMN x INT; SELECT * FROM referencing_table; - id | value_1 | x + id | value_1 | x --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/full_join.out b/src/test/regress/expected/full_join.out index 1ba772019..5c37b2e52 100644 --- a/src/test/regress/expected/full_join.out +++ b/src/test/regress/expected/full_join.out @@ -8,21 +8,21 @@ CREATE TABLE test_table_1(id int, val1 int); CREATE TABLE test_table_2(id bigint, val1 int); CREATE TABLE test_table_3(id int, val1 bigint); SELECT create_distributed_table('test_table_1', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table_3', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO test_table_1 VALUES(1,1),(2,2),(3,3); @@ -30,7 +30,7 @@ INSERT INTO test_table_2 VALUES(2,2),(3,3),(4,4); INSERT INTO test_table_3 VALUES(1,1),(3,3),(4,5); -- Simple full outer join SELECT id FROM test_table_1 FULL JOIN test_table_3 using(id) ORDER BY 1; - id + id --------------------------------------------------------------------- 1 2 @@ -40,10 +40,10 @@ SELECT id FROM test_table_1 FULL JOIN test_table_3 using(id) ORDER BY 1; -- Get all columns as the result of the full join SELECT * FROM test_table_1 FULL JOIN test_table_3 using(id) ORDER BY 1; - id | val1 | val1 + id | val1 | val1 --------------------------------------------------------------------- 1 | 1 | 1 - 2 | 2 | + 2 | 2 | 3 | 3 | 3 4 | | 5 (4 rows) @@ -55,13 +55,13 @@ SELECT * FROM (SELECT test_table_1.id FROM test_table_1 FULL JOIN test_table_3 using(id)) as j2 USING(id) ORDER BY 1; - id + id --------------------------------------------------------------------- 1 2 3 - - + + (5 rows) -- Join subqueries using multiple columns @@ -71,18 +71,18 @@ SELECT * FROM (SELECT test_table_1.id, test_table_1.val1 FROM test_table_1 FULL JOIN test_table_3 using(id)) as j2 USING(id, val1) ORDER BY 1; - id | val1 + id | val1 --------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 - | - | + | + | (5 rows) -- Full join using multiple columns SELECT * FROM test_table_1 FULL JOIN test_table_3 USING(id, val1) ORDER BY 1; - id | val1 + id | val1 --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -97,7 +97,7 @@ WHERE id::bigint < 55 GROUP BY id ORDER BY 2 ASC LIMIT 3; - count | avg_value | not_null + count | avg_value | not_null --------------------------------------------------------------------- 1 | 2 | t 1 | 6 | t @@ -108,7 +108,7 @@ SELECT max(val1) FROM test_table_1 FULL JOIN test_table_3 USING(id, val1) GROUP BY test_table_1.id ORDER BY 1; - max + max --------------------------------------------------------------------- 1 2 @@ -121,7 +121,7 @@ SELECT max(val1) FROM test_table_1 LEFT JOIN test_table_3 USING(id, val1) GROUP BY test_table_1.id ORDER BY 1; - max + max --------------------------------------------------------------------- 1 2 @@ -138,36 +138,36 @@ INSERT INTO test_table_2 VALUES(7, NULL); INSERT INTO test_table_3 VALUES(7, NULL); -- Get all columns as the result of the full join SELECT * FROM test_table_1 FULL JOIN test_table_3 using(id) ORDER BY 1; - id | val1 | val1 + id | val1 | val1 --------------------------------------------------------------------- 1 | 1 | 1 - 2 | 2 | + 2 | 2 | 3 | 3 | 3 4 | | 5 - 7 | | + 7 | | (5 rows) -- Get the same result (with multiple id) SELECT * FROM test_table_1 FULL JOIN test_table_3 ON (test_table_1.id = test_table_3.id) ORDER BY 1; - id | val1 | id | val1 + id | val1 | id | val1 --------------------------------------------------------------------- 1 | 1 | 1 | 1 - 2 | 2 | | + 2 | 2 | | 3 | 3 | 3 | 3 - 7 | | 7 | + 7 | | 7 | | | 4 | 5 (5 rows) -- Full join using multiple columns SELECT * FROM test_table_1 FULL JOIN test_table_3 USING(id, val1) ORDER BY 1; - id | val1 + id | val1 --------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 4 | 5 - 7 | - 7 | + 7 | + 7 | (6 rows) -- In order to make the same test with different data types use text-varchar pair @@ -178,22 +178,22 @@ DROP TABLE test_table_3; CREATE TABLE test_table_1(id int, val1 text); CREATE TABLE test_table_2(id int, val1 varchar(30)); SELECT create_distributed_table('test_table_1', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_table_2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO test_table_1 VALUES(1,'val_1'),(2,'val_2'),(3,'val_3'), (4, NULL); INSERT INTO test_table_2 VALUES(2,'val_2'),(3,'val_3'),(4,'val_4'), (5, NULL); -- Simple full outer join SELECT id FROM test_table_1 FULL JOIN test_table_2 using(id) ORDER BY 1; - id + id --------------------------------------------------------------------- 1 2 @@ -204,13 +204,13 @@ SELECT id FROM test_table_1 FULL JOIN test_table_2 using(id) ORDER BY 1; -- Get all columns as the result of the full join SELECT * FROM test_table_1 FULL JOIN test_table_2 using(id) ORDER BY 1; - id | val1 | val1 + id | val1 | val1 --------------------------------------------------------------------- - 1 | val_1 | + 1 | val_1 | 2 | val_2 | val_2 3 | val_3 | val_3 4 | | val_4 - 5 | | + 5 | | (5 rows) -- Join subqueries using multiple columns @@ -220,28 +220,28 @@ SELECT * FROM (SELECT test_table_2.id, test_table_2.val1 FROM test_table_1 FULL JOIN test_table_2 using(id)) as j2 USING(id, val1) ORDER BY 1,2; - id | val1 + id | val1 --------------------------------------------------------------------- 1 | val_1 2 | val_2 3 | val_3 4 | val_4 - 4 | - 5 | - | - | + 4 | + 5 | + | + | (8 rows) -- Full join using multiple columns SELECT * FROM test_table_1 FULL JOIN test_table_2 USING(id, val1) ORDER BY 1,2; - id | val1 + id | val1 --------------------------------------------------------------------- 1 | val_1 2 | val_2 3 | val_3 4 | val_4 - 4 | - 5 | + 4 | + 5 | (6 rows) DROP SCHEMA full_join CASCADE; diff --git a/src/test/regress/expected/intermediate_result_pruning.out b/src/test/regress/expected/intermediate_result_pruning.out index efbc2f080..7f97be065 100644 --- a/src/test/regress/expected/intermediate_result_pruning.out +++ b/src/test/regress/expected/intermediate_result_pruning.out @@ -6,30 +6,30 @@ SET citus.next_shard_id TO 1480000; SET citus.shard_replication_factor = 1; CREATE TABLE table_1 (key int, value text); SELECT create_distributed_table('table_1', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE table_2 (key int, value text); SELECT create_distributed_table('table_2', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE table_3 (key int, value text); SELECT create_distributed_table('table_3', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE ref_table (key int, value text); SELECT create_reference_table('ref_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- load some data @@ -51,7 +51,7 @@ DEBUG: generating subplan 5_1 for CTE some_values_1: SELECT key FROM intermedia DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key FROM read_intermediate_result('5_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) DEBUG: Subplan 5_1 will be sent to localhost:xxxxx DEBUG: Subplan 5_1 will be sent to localhost:xxxxx - count + count --------------------------------------------------------------------- 2 (1 row) @@ -68,7 +68,7 @@ FROM DEBUG: generating subplan 7_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('7_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1) DEBUG: Subplan 7_1 will be sent to localhost:xxxxx - count + count --------------------------------------------------------------------- 0 (1 row) @@ -86,7 +86,7 @@ DEBUG: generating subplan 9_1 for CTE some_values_1: SELECT key, random() AS ra DEBUG: Plan 9 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('9_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.ref_table USING (key)) DEBUG: Subplan 9_1 will be sent to localhost:xxxxx DEBUG: Subplan 9_1 will be sent to localhost:xxxxx - count + count --------------------------------------------------------------------- 2 (1 row) @@ -106,7 +106,7 @@ DEBUG: generating subplan 11_2 for CTE some_values_2: SELECT key, random() AS r DEBUG: Plan 11 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('11_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1) DEBUG: Subplan 11_1 will be sent to localhost:xxxxx DEBUG: Subplan 11_2 will be sent to localhost:xxxxx - count + count --------------------------------------------------------------------- 0 (1 row) @@ -127,7 +127,7 @@ DEBUG: Plan 14 query after replacing subqueries and CTEs: SELECT count(*) AS co DEBUG: Subplan 14_1 will be sent to localhost:xxxxx DEBUG: Subplan 14_1 will be sent to localhost:xxxxx DEBUG: Subplan 14_2 will be sent to localhost:xxxxx - count + count --------------------------------------------------------------------- 1 (1 row) @@ -149,7 +149,7 @@ DEBUG: Plan 17 query after replacing subqueries and CTEs: SELECT count(*) AS co DEBUG: Subplan 17_1 will be sent to localhost:xxxxx DEBUG: Subplan 17_1 will be sent to localhost:xxxxx DEBUG: Subplan 17_2 will be sent to localhost:xxxxx - count + count --------------------------------------------------------------------- 1 (1 row) @@ -171,7 +171,7 @@ DEBUG: Plan 20 query after replacing subqueries and CTEs: SELECT count(*) AS co DEBUG: Subplan 20_1 will be sent to localhost:xxxxx DEBUG: Subplan 20_1 will be sent to localhost:xxxxx DEBUG: Subplan 20_2 will be sent to localhost:xxxxx - count + count --------------------------------------------------------------------- 0 (1 row) @@ -193,7 +193,7 @@ DEBUG: generating subplan 23_2 for CTE some_values_2: SELECT some_values_1.key, DEBUG: Plan 23 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('23_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('23_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1) DEBUG: Subplan 23_1 will be sent to localhost:xxxxx DEBUG: Subplan 23_2 will be sent to localhost:xxxxx - count + count --------------------------------------------------------------------- 0 (1 row) @@ -214,7 +214,7 @@ DEBUG: Subplan 26_1 will be sent to localhost:xxxxx DEBUG: Subplan 26_1 will be sent to localhost:xxxxx DEBUG: Subplan 26_2 will be sent to localhost:xxxxx DEBUG: Subplan 26_2 will be sent to localhost:xxxxx - count + count --------------------------------------------------------------------- 1 (1 row) @@ -237,7 +237,7 @@ DEBUG: Subplan 29_1 will be sent to localhost:xxxxx DEBUG: Subplan 29_1 will be sent to localhost:xxxxx DEBUG: Subplan 29_2 will be sent to localhost:xxxxx DEBUG: Subplan 29_2 will be sent to localhost:xxxxx - count + count --------------------------------------------------------------------- 0 (1 row) @@ -255,7 +255,7 @@ DEBUG: generating subplan 32_1 for CTE some_values_1: SELECT key, random() AS r DEBUG: Plan 32 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('32_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.ref_table USING (key)) JOIN intermediate_result_pruning.table_2 USING (key)) DEBUG: Subplan 32_1 will be sent to localhost:xxxxx DEBUG: Subplan 32_1 will be sent to localhost:xxxxx - count + count --------------------------------------------------------------------- 2 (1 row) @@ -268,7 +268,7 @@ SELECT count(*) FROM (some_values_1 JOIN ref_table USING (key)) JOIN table_2 USING (key) WHERE table_2.key = 1; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -290,7 +290,7 @@ DEBUG: generating subplan 35_2 for CTE some_values_2: SELECT some_values_1.key, DEBUG: Plan 35 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('35_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 DEBUG: Subplan 35_1 will be sent to localhost:xxxxx DEBUG: Subplan 35_2 will be sent to localhost:xxxxx - count + count --------------------------------------------------------------------- 0 (1 row) @@ -320,7 +320,7 @@ DEBUG: Subplan 38_1 will be sent to localhost:xxxxx DEBUG: Subplan 38_1 will be sent to localhost:xxxxx DEBUG: Subplan 39_1 will be sent to localhost:xxxxx DEBUG: Subplan 39_2 will be sent to localhost:xxxxx - count + count --------------------------------------------------------------------- 0 (1 row) @@ -349,7 +349,7 @@ DEBUG: Plan 42 query after replacing subqueries and CTEs: SELECT count(*) AS co DEBUG: Subplan 42_1 will be sent to localhost:xxxxx DEBUG: Subplan 43_1 will be sent to localhost:xxxxx DEBUG: Subplan 43_2 will be sent to localhost:xxxxx - count + count --------------------------------------------------------------------- 0 (1 row) @@ -373,7 +373,7 @@ DEBUG: Subplan 46_2 will be sent to localhost:xxxxx DEBUG: Subplan 46_2 will be sent to localhost:xxxxx DEBUG: Subplan 46_3 will be sent to localhost:xxxxx DEBUG: Subplan 46_3 will be sent to localhost:xxxxx - key | key | value + key | key | value --------------------------------------------------------------------- (0 rows) @@ -389,7 +389,7 @@ DEBUG: generating subplan 50_2 for CTE some_values_2: SELECT key, random() AS r DEBUG: Plan 50 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('50_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('50_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) DEBUG: Subplan 50_1 will be sent to localhost:xxxxx DEBUG: Subplan 50_2 will be sent to localhost:xxxxx - count + count --------------------------------------------------------------------- 2 (1 row) @@ -406,7 +406,7 @@ DEBUG: generating subplan 53_2 for CTE some_values_2: SELECT key, random() AS r DEBUG: Plan 53 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('53_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('53_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) WHERE false DEBUG: Subplan 53_1 will be sent to localhost:xxxxx DEBUG: Subplan 53_2 will be sent to localhost:xxxxx - count + count --------------------------------------------------------------------- 0 (1 row) @@ -428,7 +428,7 @@ DEBUG: generating subplan 56_2 for CTE some_values_3: SELECT key, random() AS r DEBUG: Plan 56 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('56_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_3 DEBUG: Subplan 56_1 will be sent to localhost:xxxxx DEBUG: Subplan 56_2 will be sent to localhost:xxxxx - count + count --------------------------------------------------------------------- 2 (1 row) @@ -488,7 +488,7 @@ DEBUG: Subplan 59_4 will be sent to localhost:xxxxx DEBUG: Subplan 59_5 will be sent to localhost:xxxxx DEBUG: Subplan 59_5 will be sent to localhost:xxxxx DEBUG: Subplan 59_6 will be sent to localhost:xxxxx - count + count --------------------------------------------------------------------- 0 (1 row) @@ -544,7 +544,7 @@ DEBUG: Subplan 66_3 will be sent to localhost:xxxxx DEBUG: Subplan 66_4 will be sent to localhost:xxxxx DEBUG: Subplan 66_5 will be sent to localhost:xxxxx DEBUG: Subplan 66_6 will be sent to localhost:xxxxx - count + count --------------------------------------------------------------------- 0 (1 row) @@ -559,7 +559,7 @@ DEBUG: generating subplan 73_2 for subquery SELECT key FROM intermediate_result DEBUG: Plan 73 query after replacing subqueries and CTEs: SELECT intermediate_result.key FROM read_intermediate_result('73_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer) INTERSECT SELECT intermediate_result.key FROM read_intermediate_result('73_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer) DEBUG: Subplan 73_1 will be sent to localhost:xxxxx DEBUG: Subplan 73_2 will be sent to localhost:xxxxx - key + key --------------------------------------------------------------------- (0 rows) @@ -589,7 +589,7 @@ DEBUG: Subplan 76_1 will be sent to localhost:xxxxx DEBUG: Subplan 77_1 will be sent to localhost:xxxxx DEBUG: Subplan 77_2 will be sent to localhost:xxxxx DEBUG: Subplan 76_2 will be sent to localhost:xxxxx - key + key --------------------------------------------------------------------- (0 rows) @@ -619,7 +619,7 @@ DEBUG: Subplan 81_1 will be sent to localhost:xxxxx DEBUG: Subplan 82_1 will be sent to localhost:xxxxx DEBUG: Subplan 82_2 will be sent to localhost:xxxxx DEBUG: Subplan 81_2 will be sent to localhost:xxxxx - count + count --------------------------------------------------------------------- 0 (1 row) @@ -638,7 +638,7 @@ DEBUG: generating subplan 86_1 for subquery SELECT key, random() AS random FROM DEBUG: Plan 86 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT table_1.key, random() AS random FROM intermediate_result_pruning.table_1) foo, (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('86_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) bar WHERE (foo.key OPERATOR(pg_catalog.<>) bar.key) DEBUG: Subplan 86_1 will be sent to localhost:xxxxx DEBUG: Subplan 86_1 will be sent to localhost:xxxxx - count + count --------------------------------------------------------------------- 14 (1 row) @@ -655,7 +655,7 @@ WHERE DEBUG: generating subplan 88_1 for subquery SELECT key, random() AS random FROM intermediate_result_pruning.table_2 DEBUG: Plan 88 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT table_1.key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 1)) foo, (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('88_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) bar WHERE (foo.key OPERATOR(pg_catalog.<>) bar.key) DEBUG: Subplan 88_1 will be sent to localhost:xxxxx - count + count --------------------------------------------------------------------- 4 (1 row) @@ -682,7 +682,7 @@ DEBUG: Subplan 90_1 will be sent to localhost:xxxxx DEBUG: Subplan 90_2 will be sent to localhost:xxxxx DEBUG: Subplan 92_1 will be sent to localhost:xxxxx DEBUG: Subplan 92_1 will be sent to localhost:xxxxx - key | value + key | value --------------------------------------------------------------------- 3 | 3 4 | 4 @@ -712,7 +712,7 @@ DEBUG: Subplan 94_1 will be sent to localhost:xxxxx DEBUG: Subplan 94_2 will be sent to localhost:xxxxx DEBUG: Subplan 96_1 will be sent to localhost:xxxxx DEBUG: Subplan 96_1 will be sent to localhost:xxxxx - key | value + key | value --------------------------------------------------------------------- 3 | 3 4 | 4 @@ -737,7 +737,7 @@ DEBUG: Plan 99 query after replacing subqueries and CTEs: DELETE FROM intermedi DEBUG: Plan 98 query after replacing subqueries and CTEs: SELECT key, value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('98_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) raw_data DEBUG: Subplan 98_1 will be sent to localhost:xxxxx DEBUG: Subplan 99_1 will be sent to localhost:xxxxx - key | value + key | value --------------------------------------------------------------------- 6 | 6 (1 row) @@ -837,37 +837,37 @@ SET client_min_messages TO DEFAULT; CREATE TABLE range_partitioned(range_column text, data int); SET client_min_messages TO DEBUG1; SELECT create_distributed_table('range_partitioned', 'range_column', 'range'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_create_empty_shard('range_partitioned'); - master_create_empty_shard + master_create_empty_shard --------------------------------------------------------------------- 1480013 (1 row) SELECT master_create_empty_shard('range_partitioned'); - master_create_empty_shard + master_create_empty_shard --------------------------------------------------------------------- 1480014 (1 row) SELECT master_create_empty_shard('range_partitioned'); - master_create_empty_shard + master_create_empty_shard --------------------------------------------------------------------- 1480015 (1 row) SELECT master_create_empty_shard('range_partitioned'); - master_create_empty_shard + master_create_empty_shard --------------------------------------------------------------------- 1480016 (1 row) SELECT master_create_empty_shard('range_partitioned'); - master_create_empty_shard + master_create_empty_shard --------------------------------------------------------------------- 1480017 (1 row) @@ -888,7 +888,7 @@ WHERE DEBUG: generating subplan 120_1 for subquery SELECT data FROM intermediate_result_pruning.range_partitioned DEBUG: Plan 120 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM intermediate_result_pruning.range_partitioned WHERE ((range_column OPERATOR(pg_catalog.=) 'A'::text) AND (data OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.data FROM read_intermediate_result('120_1'::text, 'binary'::citus_copy_format) intermediate_result(data integer)))) DEBUG: Subplan 120_1 will be sent to localhost:xxxxx - count + count --------------------------------------------------------------------- 0 (1 row) @@ -905,7 +905,7 @@ DEBUG: generating subplan 122_1 for subquery SELECT data FROM intermediate_resu DEBUG: Plan 122 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM intermediate_result_pruning.range_partitioned WHERE ((range_column OPERATOR(pg_catalog.>=) 'A'::text) AND (range_column OPERATOR(pg_catalog.<=) 'K'::text) AND (data OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.data FROM read_intermediate_result('122_1'::text, 'binary'::citus_copy_format) intermediate_result(data integer)))) DEBUG: Subplan 122_1 will be sent to localhost:xxxxx DEBUG: Subplan 122_1 will be sent to localhost:xxxxx - count + count --------------------------------------------------------------------- 0 (1 row) @@ -925,7 +925,7 @@ DEBUG: generating subplan 124_1 for CTE some_data: SELECT data FROM intermediat DEBUG: Plan 124 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM intermediate_result_pruning.range_partitioned WHERE ((range_column OPERATOR(pg_catalog.=) ANY (ARRAY['A'::text, 'E'::text])) AND (data OPERATOR(pg_catalog.=) ANY (SELECT some_data.data FROM (SELECT intermediate_result.data FROM read_intermediate_result('124_1'::text, 'binary'::citus_copy_format) intermediate_result(data integer)) some_data))) DEBUG: Subplan 124_1 will be sent to localhost:xxxxx DEBUG: Subplan 124_1 will be sent to localhost:xxxxx - count + count --------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/intermediate_results.out b/src/test/regress/expected/intermediate_results.out index 68d813c94..b5b13525a 100644 --- a/src/test/regress/expected/intermediate_results.out +++ b/src/test/regress/expected/intermediate_results.out @@ -9,13 +9,13 @@ CREATE OR REPLACE FUNCTION pg_catalog.store_intermediate_result_on_node(nodename -- in the same transaction we can read a result BEGIN; SELECT create_intermediate_result('squares', 'SELECT s, s*s FROM generate_series(1,5) s'); - create_intermediate_result + create_intermediate_result --------------------------------------------------------------------- 5 (1 row) SELECT * FROM read_intermediate_result('squares', 'binary') AS res (x int, x2 int); - x | x2 + x | x2 --------------------------------------------------------------------- 1 | 1 2 | 4 @@ -27,7 +27,7 @@ SELECT * FROM read_intermediate_result('squares', 'binary') AS res (x int, x2 in COMMIT; -- in separate transactions, the result is no longer available SELECT create_intermediate_result('squares', 'SELECT s, s*s FROM generate_series(1,5) s'); - create_intermediate_result + create_intermediate_result --------------------------------------------------------------------- 5 (1 row) @@ -37,15 +37,15 @@ ERROR: result "squares" does not exist BEGIN; CREATE TABLE interesting_squares (user_id text, interested_in text); SELECT create_distributed_table('interesting_squares', 'user_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO interesting_squares VALUES ('jon', '2'), ('jon', '5'), ('jack', '3'); -- put an intermediate result on all workers SELECT broadcast_intermediate_result('squares', 'SELECT s, s*s FROM generate_series(1,5) s'); - broadcast_intermediate_result + broadcast_intermediate_result --------------------------------------------------------------------- 5 (1 row) @@ -55,7 +55,7 @@ SELECT x, x2 FROM interesting_squares JOIN (SELECT * FROM read_intermediate_result('squares', 'binary') AS res (x int, x2 int)) squares ON (x::text = interested_in) WHERE user_id = 'jon' ORDER BY x; - x | x2 + x | x2 --------------------------------------------------------------------- 2 | 4 5 | 25 @@ -65,7 +65,7 @@ END; BEGIN; -- put an intermediate result on all workers SELECT broadcast_intermediate_result('squares', 'SELECT s, s*s FROM generate_series(1,5) s'); - broadcast_intermediate_result + broadcast_intermediate_result --------------------------------------------------------------------- 5 (1 row) @@ -75,7 +75,7 @@ SELECT x, x2 FROM interesting_squares JOIN (SELECT * FROM read_intermediate_result('squares', 'binary') AS res (x int, x2 int)) squares ON (x::text = interested_in) ORDER BY x; - x | x2 + x | x2 --------------------------------------------------------------------- 2 | 4 3 | 9 @@ -110,7 +110,7 @@ SET client_min_messages TO DEFAULT; -- try to read the file as text, will fail because of binary encoding BEGIN; SELECT create_intermediate_result('squares', 'SELECT s, s*s FROM generate_series(1,5) s'); - create_intermediate_result + create_intermediate_result --------------------------------------------------------------------- 5 (1 row) @@ -121,7 +121,7 @@ END; -- try to read the file with wrong encoding BEGIN; SELECT create_intermediate_result('squares', 'SELECT s, s*s FROM generate_series(1,5) s'); - create_intermediate_result + create_intermediate_result --------------------------------------------------------------------- 5 (1 row) @@ -139,7 +139,7 @@ INSERT INTO stored_squares VALUES ('jon', '(5,25)'::intermediate_results.square_ -- composite types change the format to text BEGIN; SELECT create_intermediate_result('stored_squares', 'SELECT square FROM stored_squares'); - create_intermediate_result + create_intermediate_result --------------------------------------------------------------------- 4 (1 row) @@ -149,13 +149,13 @@ ERROR: COPY file signature not recognized COMMIT; BEGIN; SELECT create_intermediate_result('stored_squares', 'SELECT square FROM stored_squares'); - create_intermediate_result + create_intermediate_result --------------------------------------------------------------------- 4 (1 row) SELECT * FROM read_intermediate_result('stored_squares', 'text') AS res (s intermediate_results.square_type); - s + s --------------------------------------------------------------------- (2,4) (3,9) @@ -167,7 +167,7 @@ COMMIT; BEGIN; -- put an intermediate result in text format on all workers SELECT broadcast_intermediate_result('stored_squares', 'SELECT square, metadata FROM stored_squares'); - broadcast_intermediate_result + broadcast_intermediate_result --------------------------------------------------------------------- 4 (1 row) @@ -178,7 +178,7 @@ SELECT * FROM interesting_squares JOIN ( read_intermediate_result('stored_squares', 'text') AS res (s intermediate_results.square_type, m jsonb) ) squares ON ((s).x = interested_in) WHERE user_id = 'jon' ORDER BY 1,2; - user_id | interested_in | s | m + user_id | interested_in | s | m --------------------------------------------------------------------- jon | 2 | (2,4) | {"value": 2} jon | 5 | (5,25) | {"value": 5} @@ -190,7 +190,7 @@ SELECT * FROM interesting_squares JOIN ( read_intermediate_result('stored_squares', 'text') AS res (s intermediate_results.square_type, m jsonb) ) squares ON ((s).x = interested_in) ORDER BY 1,2; - user_id | interested_in | s | m + user_id | interested_in | s | m --------------------------------------------------------------------- jack | 3 | (3,9) | {"value": 3} jon | 2 | (2,4) | {"value": 2} @@ -201,39 +201,39 @@ END; BEGIN; -- accurate row count estimates for primitive types SELECT create_intermediate_result('squares', 'SELECT s, s*s FROM generate_series(1,632) s'); - create_intermediate_result + create_intermediate_result --------------------------------------------------------------------- 632 (1 row) EXPLAIN (COSTS ON) SELECT * FROM read_intermediate_result('squares', 'binary') AS res (x int, x2 int); - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Function Scan on read_intermediate_result res (cost=0.00..4.55 rows=632 width=8) (1 row) -- less accurate results for variable types SELECT create_intermediate_result('hellos', $$SELECT s, 'hello-'||s FROM generate_series(1,63) s$$); - create_intermediate_result + create_intermediate_result --------------------------------------------------------------------- 63 (1 row) EXPLAIN (COSTS ON) SELECT * FROM read_intermediate_result('hellos', 'binary') AS res (x int, y text); - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Function Scan on read_intermediate_result res (cost=0.00..0.32 rows=30 width=36) (1 row) -- not very accurate results for text encoding SELECT create_intermediate_result('stored_squares', 'SELECT square FROM stored_squares'); - create_intermediate_result + create_intermediate_result --------------------------------------------------------------------- 4 (1 row) EXPLAIN (COSTS ON) SELECT * FROM read_intermediate_result('stored_squares', 'text') AS res (s intermediate_results.square_type); - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Function Scan on read_intermediate_result res (cost=0.00..0.01 rows=1 width=32) (1 row) @@ -245,7 +245,7 @@ TO PROGRAM $$psql -h localhost -p 57636 -U postgres -d regression -c "BEGIN; COPY squares FROM STDIN WITH (format result); CREATE TABLE intermediate_results.squares AS SELECT * FROM read_intermediate_result('squares', 'text') AS res(x int, x2 int); END;"$$ WITH (FORMAT text); SELECT * FROM squares ORDER BY x; - x | x2 + x | x2 --------------------------------------------------------------------- 1 | 1 2 | 4 @@ -271,19 +271,19 @@ BEGIN; SELECT create_intermediate_result('squares_1', 'SELECT s, s*s FROM generate_series(1,3) s'), create_intermediate_result('squares_2', 'SELECT s, s*s FROM generate_series(4,6) s'), create_intermediate_result('squares_3', 'SELECT s, s*s FROM generate_series(7,10) s'); - create_intermediate_result | create_intermediate_result | create_intermediate_result + create_intermediate_result | create_intermediate_result | create_intermediate_result --------------------------------------------------------------------- 3 | 3 | 4 (1 row) SELECT count(*) FROM read_intermediate_results(ARRAY[]::text[], 'binary') AS res (x int, x2 int); - count + count --------------------------------------------------------------------- 0 (1 row) SELECT * FROM read_intermediate_results(ARRAY['squares_1']::text[], 'binary') AS res (x int, x2 int); - x | x2 + x | x2 --------------------------------------------------------------------- 1 | 1 2 | 4 @@ -291,7 +291,7 @@ SELECT * FROM read_intermediate_results(ARRAY['squares_1']::text[], 'binary') AS (3 rows) SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2', 'squares_3']::text[], 'binary') AS res (x int, x2 int); - x | x2 + x | x2 --------------------------------------------------------------------- 1 | 1 2 | 4 @@ -308,7 +308,7 @@ SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2', 'squares COMMIT; -- in separate transactions, the result is no longer available SELECT create_intermediate_result('squares_1', 'SELECT s, s*s FROM generate_series(1,5) s'); - create_intermediate_result + create_intermediate_result --------------------------------------------------------------------- 5 (1 row) @@ -318,7 +318,7 @@ ERROR: result "squares_1" does not exist -- error behaviour, and also check that results are deleted on rollback BEGIN; SELECT create_intermediate_result('squares_1', 'SELECT s, s*s FROM generate_series(1,3) s'); - create_intermediate_result + create_intermediate_result --------------------------------------------------------------------- 3 (1 row) @@ -335,13 +335,13 @@ ERROR: null array element not allowed in this context ROLLBACK TO SAVEPOINT s1; -- after rollbacks we should be able to run vail read_intermediate_results still. SELECT count(*) FROM read_intermediate_results(ARRAY['squares_1']::text[], 'binary') AS res (x int, x2 int); - count + count --------------------------------------------------------------------- 3 (1 row) SELECT count(*) FROM read_intermediate_results(ARRAY[]::text[], 'binary') AS res (x int, x2 int); - count + count --------------------------------------------------------------------- 0 (1 row) @@ -356,7 +356,7 @@ SELECT broadcast_intermediate_result('stored_squares_1', 'SELECT s, s*s, ROW(1::text, 2) FROM generate_series(1,3) s'), broadcast_intermediate_result('stored_squares_2', 'SELECT s, s*s, ROW(2::text, 3) FROM generate_series(4,6) s'); - broadcast_intermediate_result | broadcast_intermediate_result + broadcast_intermediate_result | broadcast_intermediate_result --------------------------------------------------------------------- 3 | 3 (1 row) @@ -367,7 +367,7 @@ SELECT * FROM interesting_squares JOIN ( read_intermediate_results(ARRAY['stored_squares_1', 'stored_squares_2'], 'binary') AS res (x int, x2 int, z intermediate_results.square_type) ) squares ON (squares.x::text = interested_in) WHERE user_id = 'jon' ORDER BY 1,2; - user_id | interested_in | x | x2 | z + user_id | interested_in | x | x2 | z --------------------------------------------------------------------- jon | 2 | 2 | 4 | (1,2) jon | 5 | 5 | 25 | (2,3) @@ -379,13 +379,13 @@ BEGIN; -- almost accurate row count estimates for primitive types SELECT create_intermediate_result('squares_1', 'SELECT s, s*s FROM generate_series(1,632) s'), create_intermediate_result('squares_2', 'SELECT s, s*s FROM generate_series(633,1024) s'); - create_intermediate_result | create_intermediate_result + create_intermediate_result | create_intermediate_result --------------------------------------------------------------------- 632 | 392 (1 row) EXPLAIN (COSTS ON) SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2'], 'binary') AS res (x int, x2 int); - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Function Scan on read_intermediate_results res (cost=0.00..7.37 rows=1024 width=8) (1 row) @@ -393,26 +393,26 @@ EXPLAIN (COSTS ON) SELECT * FROM read_intermediate_results(ARRAY['squares_1', 's -- less accurate results for variable types SELECT create_intermediate_result('hellos_1', $$SELECT s, 'hello-'||s FROM generate_series(1,63) s$$), create_intermediate_result('hellos_2', $$SELECT s, 'hello-'||s FROM generate_series(64,129) s$$); - create_intermediate_result | create_intermediate_result + create_intermediate_result | create_intermediate_result --------------------------------------------------------------------- 63 | 66 (1 row) EXPLAIN (COSTS ON) SELECT * FROM read_intermediate_results(ARRAY['hellos_1', 'hellos_2'], 'binary') AS res (x int, y text); - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Function Scan on read_intermediate_results res (cost=0.00..0.66 rows=62 width=36) (1 row) -- not very accurate results for text encoding SELECT create_intermediate_result('stored_squares', 'SELECT square FROM stored_squares'); - create_intermediate_result + create_intermediate_result --------------------------------------------------------------------- 4 (1 row) EXPLAIN (COSTS ON) SELECT * FROM read_intermediate_results(ARRAY['stored_squares'], 'text') AS res (s intermediate_results.square_type); - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Function Scan on read_intermediate_results res (cost=0.00..0.01 rows=1 width=32) (1 row) @@ -424,19 +424,19 @@ END; -- straightforward, single-result case BEGIN; SELECT broadcast_intermediate_result('squares_1', 'SELECT s, s*s FROM generate_series(1, 5) s'); - broadcast_intermediate_result + broadcast_intermediate_result --------------------------------------------------------------------- 5 (1 row) SELECT * FROM fetch_intermediate_results(ARRAY['squares_1']::text[], 'localhost', :worker_2_port); - fetch_intermediate_results + fetch_intermediate_results --------------------------------------------------------------------- 111 (1 row) SELECT * FROM read_intermediate_result('squares_1', 'binary') AS res (x int, x2 int); - x | x2 + x | x2 --------------------------------------------------------------------- 1 | 1 2 | 4 @@ -446,13 +446,13 @@ SELECT * FROM read_intermediate_result('squares_1', 'binary') AS res (x int, x2 (5 rows) SELECT * FROM fetch_intermediate_results(ARRAY['squares_1']::text[], 'localhost', :worker_1_port); - fetch_intermediate_results + fetch_intermediate_results --------------------------------------------------------------------- 111 (1 row) SELECT * FROM read_intermediate_result('squares_1', 'binary') AS res (x int, x2 int); - x | x2 + x | x2 --------------------------------------------------------------------- 1 | 1 2 | 4 @@ -466,16 +466,16 @@ END; BEGIN; SELECT store_intermediate_result_on_node('localhost', :worker_1_port, 'squares_1', 'SELECT s, s*s FROM generate_series(1, 2) s'); - store_intermediate_result_on_node + store_intermediate_result_on_node --------------------------------------------------------------------- - + (1 row) SELECT store_intermediate_result_on_node('localhost', :worker_1_port, 'squares_2', 'SELECT s, s*s FROM generate_series(3, 4) s'); - store_intermediate_result_on_node + store_intermediate_result_on_node --------------------------------------------------------------------- - + (1 row) SAVEPOINT s1; @@ -494,13 +494,13 @@ ERROR: result "squares_1" does not exist ROLLBACK TO SAVEPOINT s1; -- fetch from worker 1 should succeed SELECT * FROM fetch_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], 'localhost', :worker_1_port); - fetch_intermediate_results + fetch_intermediate_results --------------------------------------------------------------------- 114 (1 row) SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], 'binary') AS res (x int, x2 int); - x | x2 + x | x2 --------------------------------------------------------------------- 1 | 1 2 | 4 @@ -510,13 +510,13 @@ SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], -- fetching again should succeed SELECT * FROM fetch_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], 'localhost', :worker_1_port); - fetch_intermediate_results + fetch_intermediate_results --------------------------------------------------------------------- 114 (1 row) SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], 'binary') AS res (x int, x2 int); - x | x2 + x | x2 --------------------------------------------------------------------- 1 | 1 2 | 4 @@ -527,7 +527,7 @@ SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], ROLLBACK TO SAVEPOINT s1; -- empty result id list should succeed SELECT * FROM fetch_intermediate_results(ARRAY[]::text[], 'localhost', :worker_1_port); - fetch_intermediate_results + fetch_intermediate_results --------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/isolation_add_node_vs_reference_table_operations.out b/src/test/regress/expected/isolation_add_node_vs_reference_table_operations.out index 56d464af6..4fa0f754a 100644 --- a/src/test/regress/expected/isolation_add_node_vs_reference_table_operations.out +++ b/src/test/regress/expected/isolation_add_node_vs_reference_table_operations.out @@ -3,27 +3,27 @@ Parsed test spec with 2 sessions starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-copy-to-reference-table s1-commit s2-print-content create_distributed_table - -step s2-load-metadata-cache: + +step s2-load-metadata-cache: COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; -step s1-begin: +step s1-begin: BEGIN; -step s1-add-second-worker: +step s1-add-second-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-copy-to-reference-table: +1 +step s2-copy-to-reference-table: COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; -step s1-commit: +step s1-commit: COMMIT; step s2-copy-to-reference-table: <... completed> -step s2-print-content: +step s2-print-content: SELECT nodeport, success, result FROM @@ -31,39 +31,39 @@ step s2-print-content: ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 10 -57638 t 10 +57637 t 10 +57638 t 10 master_remove_node - - + + starting permutation: s2-load-metadata-cache s2-begin s2-copy-to-reference-table s1-add-second-worker s2-commit s2-print-content create_distributed_table - -step s2-load-metadata-cache: + +step s2-load-metadata-cache: COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; -step s2-begin: +step s2-begin: BEGIN; -step s2-copy-to-reference-table: +step s2-copy-to-reference-table: COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; -step s1-add-second-worker: +step s1-add-second-worker: SELECT 1 FROM master_add_node('localhost', 57638); -step s2-commit: +step s2-commit: COMMIT; step s1-add-second-worker: <... completed> -?column? +?column? -1 -step s2-print-content: +1 +step s2-print-content: SELECT nodeport, success, result FROM @@ -71,39 +71,39 @@ step s2-print-content: ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 10 -57638 t 10 +57637 t 10 +57638 t 10 master_remove_node - - + + starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-insert-to-reference-table s1-commit s2-print-content create_distributed_table - -step s2-load-metadata-cache: + +step s2-load-metadata-cache: COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; -step s1-begin: +step s1-begin: BEGIN; -step s1-add-second-worker: +step s1-add-second-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-insert-to-reference-table: +1 +step s2-insert-to-reference-table: INSERT INTO test_reference_table VALUES (6); -step s1-commit: +step s1-commit: COMMIT; step s2-insert-to-reference-table: <... completed> -step s2-print-content: +step s2-print-content: SELECT nodeport, success, result FROM @@ -111,39 +111,39 @@ step s2-print-content: ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 6 -57638 t 6 +57637 t 6 +57638 t 6 master_remove_node - - + + starting permutation: s2-load-metadata-cache s2-begin s2-insert-to-reference-table s1-add-second-worker s2-commit s2-print-content create_distributed_table - -step s2-load-metadata-cache: + +step s2-load-metadata-cache: COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; -step s2-begin: +step s2-begin: BEGIN; -step s2-insert-to-reference-table: +step s2-insert-to-reference-table: INSERT INTO test_reference_table VALUES (6); -step s1-add-second-worker: +step s1-add-second-worker: SELECT 1 FROM master_add_node('localhost', 57638); -step s2-commit: +step s2-commit: COMMIT; step s1-add-second-worker: <... completed> -?column? +?column? -1 -step s2-print-content: +1 +step s2-print-content: SELECT nodeport, success, result FROM @@ -151,39 +151,39 @@ step s2-print-content: ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 6 -57638 t 6 +57637 t 6 +57638 t 6 master_remove_node - - + + starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-ddl-on-reference-table s1-commit s2-print-index-count create_distributed_table - -step s2-load-metadata-cache: + +step s2-load-metadata-cache: COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; -step s1-begin: +step s1-begin: BEGIN; -step s1-add-second-worker: +step s1-add-second-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-ddl-on-reference-table: +1 +step s2-ddl-on-reference-table: CREATE INDEX reference_index ON test_reference_table(test_id); -step s1-commit: +step s1-commit: COMMIT; step s2-ddl-on-reference-table: <... completed> -step s2-print-index-count: +step s2-print-index-count: SELECT nodeport, success, result FROM @@ -191,39 +191,39 @@ step s2-print-index-count: ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 1 -57638 t 1 +57637 t 1 +57638 t 1 master_remove_node - - + + starting permutation: s2-load-metadata-cache s2-begin s2-ddl-on-reference-table s1-add-second-worker s2-commit s2-print-index-count create_distributed_table - -step s2-load-metadata-cache: + +step s2-load-metadata-cache: COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; -step s2-begin: +step s2-begin: BEGIN; -step s2-ddl-on-reference-table: +step s2-ddl-on-reference-table: CREATE INDEX reference_index ON test_reference_table(test_id); -step s1-add-second-worker: +step s1-add-second-worker: SELECT 1 FROM master_add_node('localhost', 57638); -step s2-commit: +step s2-commit: COMMIT; step s1-add-second-worker: <... completed> -?column? +?column? -1 -step s2-print-index-count: +1 +step s2-print-index-count: SELECT nodeport, success, result FROM @@ -231,42 +231,42 @@ step s2-print-index-count: ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 1 -57638 t 1 +57637 t 1 +57638 t 1 master_remove_node - - + + starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-create-reference-table-2 s1-commit s2-print-content-2 create_distributed_table - -step s2-load-metadata-cache: + +step s2-load-metadata-cache: COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; -step s1-begin: +step s1-begin: BEGIN; -step s1-add-second-worker: +step s1-add-second-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-create-reference-table-2: +1 +step s2-create-reference-table-2: SELECT create_reference_table('test_reference_table_2'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-reference-table-2: <... completed> create_reference_table - -step s2-print-content-2: + +step s2-print-content-2: SELECT nodeport, success, result FROM @@ -274,42 +274,42 @@ step s2-print-content-2: ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 1 -57638 t 1 +57637 t 1 +57638 t 1 master_remove_node - - + + starting permutation: s2-load-metadata-cache s2-begin s2-create-reference-table-2 s1-add-second-worker s2-commit s2-print-content-2 create_distributed_table - -step s2-load-metadata-cache: + +step s2-load-metadata-cache: COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; -step s2-begin: +step s2-begin: BEGIN; -step s2-create-reference-table-2: +step s2-create-reference-table-2: SELECT create_reference_table('test_reference_table_2'); create_reference_table - -step s1-add-second-worker: + +step s1-add-second-worker: SELECT 1 FROM master_add_node('localhost', 57638); -step s2-commit: +step s2-commit: COMMIT; step s1-add-second-worker: <... completed> -?column? +?column? -1 -step s2-print-content-2: +1 +step s2-print-content-2: SELECT nodeport, success, result FROM @@ -317,36 +317,36 @@ step s2-print-content-2: ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 1 -57638 t 1 +57637 t 1 +57638 t 1 master_remove_node - - + + starting permutation: s1-begin s1-add-second-worker s2-copy-to-reference-table s1-commit s2-print-content create_distributed_table - -step s1-begin: + +step s1-begin: BEGIN; -step s1-add-second-worker: +step s1-add-second-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-copy-to-reference-table: +1 +step s2-copy-to-reference-table: COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; -step s1-commit: +step s1-commit: COMMIT; step s2-copy-to-reference-table: <... completed> -step s2-print-content: +step s2-print-content: SELECT nodeport, success, result FROM @@ -354,36 +354,36 @@ step s2-print-content: ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 5 -57638 t 5 +57637 t 5 +57638 t 5 master_remove_node - - + + starting permutation: s2-begin s2-copy-to-reference-table s1-add-second-worker s2-commit s2-print-content create_distributed_table - -step s2-begin: + +step s2-begin: BEGIN; -step s2-copy-to-reference-table: +step s2-copy-to-reference-table: COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; -step s1-add-second-worker: +step s1-add-second-worker: SELECT 1 FROM master_add_node('localhost', 57638); -step s2-commit: +step s2-commit: COMMIT; step s1-add-second-worker: <... completed> -?column? +?column? -1 -step s2-print-content: +1 +step s2-print-content: SELECT nodeport, success, result FROM @@ -391,36 +391,36 @@ step s2-print-content: ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 5 -57638 t 5 +57637 t 5 +57638 t 5 master_remove_node - - + + starting permutation: s1-begin s1-add-second-worker s2-insert-to-reference-table s1-commit s2-print-content create_distributed_table - -step s1-begin: + +step s1-begin: BEGIN; -step s1-add-second-worker: +step s1-add-second-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-insert-to-reference-table: +1 +step s2-insert-to-reference-table: INSERT INTO test_reference_table VALUES (6); -step s1-commit: +step s1-commit: COMMIT; step s2-insert-to-reference-table: <... completed> -step s2-print-content: +step s2-print-content: SELECT nodeport, success, result FROM @@ -428,36 +428,36 @@ step s2-print-content: ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 1 -57638 t 1 +57637 t 1 +57638 t 1 master_remove_node - - + + starting permutation: s2-begin s2-insert-to-reference-table s1-add-second-worker s2-commit s2-print-content create_distributed_table - -step s2-begin: + +step s2-begin: BEGIN; -step s2-insert-to-reference-table: +step s2-insert-to-reference-table: INSERT INTO test_reference_table VALUES (6); -step s1-add-second-worker: +step s1-add-second-worker: SELECT 1 FROM master_add_node('localhost', 57638); -step s2-commit: +step s2-commit: COMMIT; step s1-add-second-worker: <... completed> -?column? +?column? -1 -step s2-print-content: +1 +step s2-print-content: SELECT nodeport, success, result FROM @@ -465,36 +465,36 @@ step s2-print-content: ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 1 -57638 t 1 +57637 t 1 +57638 t 1 master_remove_node - - + + starting permutation: s1-begin s1-add-second-worker s2-ddl-on-reference-table s1-commit s2-print-index-count create_distributed_table - -step s1-begin: + +step s1-begin: BEGIN; -step s1-add-second-worker: +step s1-add-second-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-ddl-on-reference-table: +1 +step s2-ddl-on-reference-table: CREATE INDEX reference_index ON test_reference_table(test_id); -step s1-commit: +step s1-commit: COMMIT; step s2-ddl-on-reference-table: <... completed> -step s2-print-index-count: +step s2-print-index-count: SELECT nodeport, success, result FROM @@ -502,36 +502,36 @@ step s2-print-index-count: ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 1 -57638 t 1 +57637 t 1 +57638 t 1 master_remove_node - - + + starting permutation: s2-begin s2-ddl-on-reference-table s1-add-second-worker s2-commit s2-print-index-count create_distributed_table - -step s2-begin: + +step s2-begin: BEGIN; -step s2-ddl-on-reference-table: +step s2-ddl-on-reference-table: CREATE INDEX reference_index ON test_reference_table(test_id); -step s1-add-second-worker: +step s1-add-second-worker: SELECT 1 FROM master_add_node('localhost', 57638); -step s2-commit: +step s2-commit: COMMIT; step s1-add-second-worker: <... completed> -?column? +?column? -1 -step s2-print-index-count: +1 +step s2-print-index-count: SELECT nodeport, success, result FROM @@ -539,39 +539,39 @@ step s2-print-index-count: ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 1 -57638 t 1 +57637 t 1 +57638 t 1 master_remove_node - - + + starting permutation: s1-begin s1-add-second-worker s2-create-reference-table-2 s1-commit s2-print-content-2 create_distributed_table - -step s1-begin: + +step s1-begin: BEGIN; -step s1-add-second-worker: +step s1-add-second-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-create-reference-table-2: +1 +step s2-create-reference-table-2: SELECT create_reference_table('test_reference_table_2'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-reference-table-2: <... completed> create_reference_table - -step s2-print-content-2: + +step s2-print-content-2: SELECT nodeport, success, result FROM @@ -579,39 +579,39 @@ step s2-print-content-2: ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 1 -57638 t 1 +57637 t 1 +57638 t 1 master_remove_node - - + + starting permutation: s2-begin s2-create-reference-table-2 s1-add-second-worker s2-commit s2-print-content-2 create_distributed_table - -step s2-begin: + +step s2-begin: BEGIN; -step s2-create-reference-table-2: +step s2-create-reference-table-2: SELECT create_reference_table('test_reference_table_2'); create_reference_table - -step s1-add-second-worker: + +step s1-add-second-worker: SELECT 1 FROM master_add_node('localhost', 57638); -step s2-commit: +step s2-commit: COMMIT; step s1-add-second-worker: <... completed> -?column? +?column? -1 -step s2-print-content-2: +1 +step s2-print-content-2: SELECT nodeport, success, result FROM @@ -619,11 +619,11 @@ step s2-print-content-2: ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 1 -57638 t 1 +57637 t 1 +57638 t 1 master_remove_node - - + + diff --git a/src/test/regress/expected/isolation_add_remove_node.out b/src/test/regress/expected/isolation_add_remove_node.out index 0cdf114bb..5203acbd8 100644 --- a/src/test/regress/expected/isolation_add_remove_node.out +++ b/src/test/regress/expected/isolation_add_remove_node.out @@ -1,634 +1,634 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s1-add-node-1 s2-remove-node-1 s1-commit s1-show-nodes -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-add-node-1: +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-remove-node-1: +1 +step s2-remove-node-1: SELECT * FROM master_remove_node('localhost', 57637); -step s1-commit: +step s1-commit: COMMIT; step s2-remove-node-1: <... completed> master_remove_node - -step s1-show-nodes: + +step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodename nodeport isactive +nodename nodeport isactive master_remove_node starting permutation: s1-begin s1-add-node-1 s2-add-node-2 s1-commit s1-show-nodes -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-add-node-1: +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-add-node-2: +1 +step s2-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); -step s1-commit: +step s1-commit: COMMIT; step s2-add-node-2: <... completed> -?column? +?column? -1 -step s1-show-nodes: +1 +step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodename nodeport isactive +nodename nodeport isactive -localhost 57637 t -localhost 57638 t +localhost 57637 t +localhost 57638 t master_remove_node - - + + starting permutation: s1-begin s1-add-node-1 s2-add-node-1 s1-commit s1-show-nodes -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-add-node-1: +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-add-node-1: +1 +step s2-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -step s1-commit: +step s1-commit: COMMIT; step s2-add-node-1: <... completed> -?column? +?column? -1 -step s1-show-nodes: +1 +step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodename nodeport isactive +nodename nodeport isactive -localhost 57637 t +localhost 57637 t master_remove_node - + starting permutation: s1-begin s1-add-node-1 s2-add-node-2 s1-abort s1-show-nodes -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-add-node-1: +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-add-node-2: +1 +step s2-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); -step s1-abort: +step s1-abort: ABORT; step s2-add-node-2: <... completed> -?column? +?column? -1 -step s1-show-nodes: +1 +step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodename nodeport isactive +nodename nodeport isactive -localhost 57638 t +localhost 57638 t master_remove_node - + starting permutation: s1-begin s1-add-node-1 s2-add-node-1 s1-abort s1-show-nodes -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-add-node-1: +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-add-node-1: +1 +step s2-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -step s1-abort: +step s1-abort: ABORT; step s2-add-node-1: <... completed> -?column? +?column? -1 -step s1-show-nodes: +1 +step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodename nodeport isactive +nodename nodeport isactive -localhost 57637 t +localhost 57637 t master_remove_node - + starting permutation: s1-add-node-1 s1-add-node-2 s1-begin s1-remove-node-1 s2-remove-node-2 s1-commit s1-show-nodes -?column? +?column? -1 -step s1-add-node-1: +1 +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s1-add-node-2: +1 +step s1-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-remove-node-1: +step s1-remove-node-1: SELECT * FROM master_remove_node('localhost', 57637); master_remove_node - -step s2-remove-node-2: + +step s2-remove-node-2: SELECT * FROM master_remove_node('localhost', 57638); -step s1-commit: +step s1-commit: COMMIT; step s2-remove-node-2: <... completed> master_remove_node - -step s1-show-nodes: + +step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodename nodeport isactive +nodename nodeport isactive master_remove_node starting permutation: s1-add-node-1 s1-begin s1-remove-node-1 s2-remove-node-1 s1-commit s1-show-nodes -?column? +?column? -1 -step s1-add-node-1: +1 +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-remove-node-1: +step s1-remove-node-1: SELECT * FROM master_remove_node('localhost', 57637); master_remove_node - -step s2-remove-node-1: + +step s2-remove-node-1: SELECT * FROM master_remove_node('localhost', 57637); -step s1-commit: +step s1-commit: COMMIT; step s2-remove-node-1: <... completed> error in steps s1-commit s2-remove-node-1: ERROR: node at "localhost:xxxxx" does not exist -step s1-show-nodes: +step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodename nodeport isactive +nodename nodeport isactive master_remove_node starting permutation: s1-add-node-1 s1-begin s1-activate-node-1 s2-activate-node-1 s1-commit s1-show-nodes -?column? +?column? -1 -step s1-add-node-1: +1 +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-activate-node-1: +step s1-activate-node-1: SELECT 1 FROM master_activate_node('localhost', 57637); -?column? +?column? -1 -step s2-activate-node-1: +1 +step s2-activate-node-1: SELECT 1 FROM master_activate_node('localhost', 57637); -step s1-commit: +step s1-commit: COMMIT; step s2-activate-node-1: <... completed> -?column? +?column? -1 -step s1-show-nodes: +1 +step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodename nodeport isactive +nodename nodeport isactive -localhost 57637 t +localhost 57637 t master_remove_node - + starting permutation: s1-add-node-1 s1-begin s1-disable-node-1 s2-disable-node-1 s1-commit s1-show-nodes -?column? +?column? -1 -step s1-add-node-1: +1 +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-disable-node-1: +step s1-disable-node-1: SELECT 1 FROM master_disable_node('localhost', 57637); -?column? +?column? -1 -step s2-disable-node-1: +1 +step s2-disable-node-1: SELECT 1 FROM master_disable_node('localhost', 57637); -step s1-commit: +step s1-commit: COMMIT; step s2-disable-node-1: <... completed> -?column? +?column? -1 -step s1-show-nodes: +1 +step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodename nodeport isactive +nodename nodeport isactive -localhost 57637 f +localhost 57637 f master_remove_node - + starting permutation: s1-add-inactive-1 s1-begin s1-activate-node-1 s2-activate-node-1 s1-commit s1-show-nodes -?column? +?column? -1 -step s1-add-inactive-1: +1 +step s1-add-inactive-1: SELECT 1 FROM master_add_inactive_node('localhost', 57637); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-activate-node-1: +step s1-activate-node-1: SELECT 1 FROM master_activate_node('localhost', 57637); -?column? +?column? -1 -step s2-activate-node-1: +1 +step s2-activate-node-1: SELECT 1 FROM master_activate_node('localhost', 57637); -step s1-commit: +step s1-commit: COMMIT; step s2-activate-node-1: <... completed> -?column? +?column? -1 -step s1-show-nodes: +1 +step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodename nodeport isactive +nodename nodeport isactive -localhost 57637 t +localhost 57637 t master_remove_node - + starting permutation: s1-add-inactive-1 s1-begin s1-disable-node-1 s2-disable-node-1 s1-commit s1-show-nodes -?column? +?column? -1 -step s1-add-inactive-1: +1 +step s1-add-inactive-1: SELECT 1 FROM master_add_inactive_node('localhost', 57637); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-disable-node-1: +step s1-disable-node-1: SELECT 1 FROM master_disable_node('localhost', 57637); -?column? +?column? -1 -step s2-disable-node-1: +1 +step s2-disable-node-1: SELECT 1 FROM master_disable_node('localhost', 57637); -step s1-commit: +step s1-commit: COMMIT; step s2-disable-node-1: <... completed> -?column? +?column? -1 -step s1-show-nodes: +1 +step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodename nodeport isactive +nodename nodeport isactive -localhost 57637 f +localhost 57637 f master_remove_node - + starting permutation: s1-add-node-1 s1-begin s1-disable-node-1 s2-activate-node-1 s1-commit s1-show-nodes -?column? +?column? -1 -step s1-add-node-1: +1 +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-disable-node-1: +step s1-disable-node-1: SELECT 1 FROM master_disable_node('localhost', 57637); -?column? +?column? -1 -step s2-activate-node-1: +1 +step s2-activate-node-1: SELECT 1 FROM master_activate_node('localhost', 57637); -step s1-commit: +step s1-commit: COMMIT; step s2-activate-node-1: <... completed> -?column? +?column? -1 -step s1-show-nodes: +1 +step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodename nodeport isactive +nodename nodeport isactive -localhost 57637 t +localhost 57637 t master_remove_node - + starting permutation: s1-add-node-1 s1-begin s1-activate-node-1 s2-disable-node-1 s1-commit s1-show-nodes -?column? +?column? -1 -step s1-add-node-1: +1 +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-activate-node-1: +step s1-activate-node-1: SELECT 1 FROM master_activate_node('localhost', 57637); -?column? +?column? -1 -step s2-disable-node-1: +1 +step s2-disable-node-1: SELECT 1 FROM master_disable_node('localhost', 57637); -step s1-commit: +step s1-commit: COMMIT; step s2-disable-node-1: <... completed> -?column? +?column? -1 -step s1-show-nodes: +1 +step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodename nodeport isactive +nodename nodeport isactive -localhost 57637 f +localhost 57637 f master_remove_node - + starting permutation: s1-add-inactive-1 s1-begin s1-disable-node-1 s2-activate-node-1 s1-commit s1-show-nodes -?column? +?column? -1 -step s1-add-inactive-1: +1 +step s1-add-inactive-1: SELECT 1 FROM master_add_inactive_node('localhost', 57637); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-disable-node-1: +step s1-disable-node-1: SELECT 1 FROM master_disable_node('localhost', 57637); -?column? +?column? -1 -step s2-activate-node-1: +1 +step s2-activate-node-1: SELECT 1 FROM master_activate_node('localhost', 57637); -step s1-commit: +step s1-commit: COMMIT; step s2-activate-node-1: <... completed> -?column? +?column? -1 -step s1-show-nodes: +1 +step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodename nodeport isactive +nodename nodeport isactive -localhost 57637 t +localhost 57637 t master_remove_node - + starting permutation: s1-add-inactive-1 s1-begin s1-activate-node-1 s2-disable-node-1 s1-commit s1-show-nodes -?column? +?column? -1 -step s1-add-inactive-1: +1 +step s1-add-inactive-1: SELECT 1 FROM master_add_inactive_node('localhost', 57637); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-activate-node-1: +step s1-activate-node-1: SELECT 1 FROM master_activate_node('localhost', 57637); -?column? +?column? -1 -step s2-disable-node-1: +1 +step s2-disable-node-1: SELECT 1 FROM master_disable_node('localhost', 57637); -step s1-commit: +step s1-commit: COMMIT; step s2-disable-node-1: <... completed> -?column? +?column? -1 -step s1-show-nodes: +1 +step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodename nodeport isactive +nodename nodeport isactive -localhost 57637 f +localhost 57637 f master_remove_node - + starting permutation: s1-add-inactive-1 s1-begin s1-activate-node-1 s2-disable-node-1 s1-abort s1-show-nodes -?column? +?column? -1 -step s1-add-inactive-1: +1 +step s1-add-inactive-1: SELECT 1 FROM master_add_inactive_node('localhost', 57637); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-activate-node-1: +step s1-activate-node-1: SELECT 1 FROM master_activate_node('localhost', 57637); -?column? +?column? -1 -step s2-disable-node-1: +1 +step s2-disable-node-1: SELECT 1 FROM master_disable_node('localhost', 57637); -step s1-abort: +step s1-abort: ABORT; step s2-disable-node-1: <... completed> -?column? +?column? -1 -step s1-show-nodes: +1 +step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodename nodeport isactive +nodename nodeport isactive -localhost 57637 f +localhost 57637 f master_remove_node - + starting permutation: s1-add-node-1 s1-begin s1-disable-node-1 s2-disable-node-1 s1-abort s1-show-nodes -?column? +?column? -1 -step s1-add-node-1: +1 +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-disable-node-1: +step s1-disable-node-1: SELECT 1 FROM master_disable_node('localhost', 57637); -?column? +?column? -1 -step s2-disable-node-1: +1 +step s2-disable-node-1: SELECT 1 FROM master_disable_node('localhost', 57637); -step s1-abort: +step s1-abort: ABORT; step s2-disable-node-1: <... completed> -?column? +?column? -1 -step s1-show-nodes: +1 +step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodename nodeport isactive +nodename nodeport isactive -localhost 57637 f +localhost 57637 f master_remove_node - + diff --git a/src/test/regress/expected/isolation_alter_role_propagation.out b/src/test/regress/expected/isolation_alter_role_propagation.out index 777a95273..4e30be626 100644 --- a/src/test/regress/expected/isolation_alter_role_propagation.out +++ b/src/test/regress/expected/isolation_alter_role_propagation.out @@ -5,28 +5,28 @@ run_command_on_workers (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") -step s1-enable-propagation: +step s1-enable-propagation: SET citus.enable_alter_role_propagation to ON; -step s2-enable-propagation: +step s2-enable-propagation: SET citus.enable_alter_role_propagation to ON; -step s1-begin: +step s1-begin: BEGIN; -step s1-alter-role-1: +step s1-alter-role-1: ALTER ROLE alter_role_1 NOSUPERUSER; -step s2-add-node: +step s2-add-node: SELECT 1 FROM master_add_node('localhost', 57637); -step s1-commit: +step s1-commit: COMMIT; step s2-add-node: <... completed> -?column? +?column? -1 +1 run_command_on_workers (localhost,57637,t,"DROP ROLE") @@ -37,25 +37,25 @@ run_command_on_workers (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") -step s1-enable-propagation: +step s1-enable-propagation: SET citus.enable_alter_role_propagation to ON; -step s2-enable-propagation: +step s2-enable-propagation: SET citus.enable_alter_role_propagation to ON; -step s1-begin: +step s1-begin: BEGIN; -step s1-add-node: +step s1-add-node: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-alter-role-1: +1 +step s2-alter-role-1: ALTER ROLE alter_role_1 NOSUPERUSER; -step s1-commit: +step s1-commit: COMMIT; step s2-alter-role-1: <... completed> @@ -69,22 +69,22 @@ run_command_on_workers (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") -step s1-enable-propagation: +step s1-enable-propagation: SET citus.enable_alter_role_propagation to ON; -step s2-enable-propagation: +step s2-enable-propagation: SET citus.enable_alter_role_propagation to ON; -step s1-begin: +step s1-begin: BEGIN; -step s1-alter-role-1: +step s1-alter-role-1: ALTER ROLE alter_role_1 NOSUPERUSER; -step s2-alter-role-1: +step s2-alter-role-1: ALTER ROLE alter_role_1 NOSUPERUSER; -step s1-commit: +step s1-commit: COMMIT; step s2-alter-role-1: <... completed> @@ -99,22 +99,22 @@ run_command_on_workers (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") -step s1-enable-propagation: +step s1-enable-propagation: SET citus.enable_alter_role_propagation to ON; -step s2-enable-propagation: +step s2-enable-propagation: SET citus.enable_alter_role_propagation to ON; -step s1-begin: +step s1-begin: BEGIN; -step s1-alter-role-1: +step s1-alter-role-1: ALTER ROLE alter_role_1 NOSUPERUSER; -step s2-alter-role-2: +step s2-alter-role-2: ALTER ROLE alter_role_2 NOSUPERUSER; -step s1-commit: +step s1-commit: COMMIT; run_command_on_workers diff --git a/src/test/regress/expected/isolation_append_copy_vs_all.out b/src/test/regress/expected/isolation_append_copy_vs_all.out index 1348e991d..65f8de20f 100644 --- a/src/test/regress/expected/isolation_append_copy_vs_all.out +++ b/src/test/regress/expected/isolation_append_copy_vs_all.out @@ -3,139 +3,139 @@ Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s1-copy s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -15 +15 starting permutation: s1-initialize s1-begin s1-copy s2-router-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-router-select: SELECT * FROM append_copy WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-real-time-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-real-time-select: SELECT * FROM append_copy ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-task-tracker-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s2-task-tracker-select: +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM append_copy AS t1 JOIN append_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-insert s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-insert: INSERT INTO append_copy VALUES(0, 'k', 0); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -11 +11 starting permutation: s1-initialize s1-begin s1-copy s2-insert-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-insert-select: INSERT INTO append_copy SELECT * FROM append_copy; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-update s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-update: UPDATE append_copy SET data = 'l' WHERE id = 0; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-delete s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-delete: DELETE FROM append_copy WHERE id = 1; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -9 +9 starting permutation: s1-initialize s1-begin s1-copy s2-truncate s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -143,14 +143,14 @@ step s2-truncate: TRUNCATE append_copy; step s1-commit: COMMIT; step s2-truncate: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-copy s2-drop s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -163,7 +163,7 @@ ERROR: relation "append_copy" does not exist starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -171,9 +171,9 @@ step s2-ddl-create-index: CREATE INDEX append_copy_index ON append_copy(id); step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%'''); run_command_on_workers @@ -183,7 +183,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-copy s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX append_copy_index ON append_copy(id); step s1-begin: BEGIN; @@ -192,9 +192,9 @@ step s2-ddl-drop-index: DROP INDEX append_copy_index; step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%'''); run_command_on_workers @@ -204,7 +204,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -212,9 +212,9 @@ step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY append_copy_ind step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%'''); run_command_on_workers @@ -224,7 +224,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -232,9 +232,9 @@ step s2-ddl-add-column: ALTER TABLE append_copy ADD new_column int DEFAULT 0; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -244,7 +244,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE append_copy ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -253,9 +253,9 @@ step s2-ddl-drop-column: ALTER TABLE append_copy DROP new_column; step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -265,7 +265,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -273,9 +273,9 @@ step s2-ddl-rename-column: ALTER TABLE append_copy RENAME data TO new_column; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -285,24 +285,24 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-table-size: SELECT citus_total_relation_size('append_copy'); citus_total_relation_size -32768 +32768 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-master-apply-delete-command s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -311,16 +311,16 @@ step s1-commit: COMMIT; step s2-master-apply-delete-command: <... completed> master_apply_delete_command -1 +1 step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-copy s2-master-drop-all-shards s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -329,16 +329,16 @@ step s1-commit: COMMIT; step s2-master-drop-all-shards: <... completed> master_drop_all_shards -2 +2 step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -0 +0 starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-copy s2-distribute-table s1-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE append_copy; step s1-create-non-distributed-table: CREATE TABLE append_copy(id integer, data text, int_data int); step s1-begin: BEGIN; @@ -348,134 +348,134 @@ step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table - -step s1-select-count: SELECT COUNT(*) FROM append_copy; -count -0 +step s1-select-count: SELECT COUNT(*) FROM append_copy; +count + +0 starting permutation: s1-initialize s1-begin s1-router-select s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM append_copy WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-real-time-select s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM append_copy ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM append_copy AS t1 JOIN append_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-insert s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO append_copy VALUES(0, 'k', 0); step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -11 +11 starting permutation: s1-initialize s1-begin s1-insert-select s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO append_copy SELECT * FROM append_copy; step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-update s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-update: UPDATE append_copy SET data = 'l' WHERE id = 0; step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-delete s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-delete: DELETE FROM append_copy WHERE id = 1; step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -9 +9 starting permutation: s1-initialize s1-begin s1-truncate s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-truncate: TRUNCATE append_copy; @@ -483,14 +483,14 @@ step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && ech step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-drop s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-drop: DROP TABLE append_copy; @@ -504,7 +504,7 @@ ERROR: relation "append_copy" does not exist starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-copy s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX append_copy_index ON append_copy(id); @@ -512,9 +512,9 @@ step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && ech step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%'''); run_command_on_workers @@ -524,7 +524,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-copy s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX append_copy_index ON append_copy(id); step s1-begin: BEGIN; @@ -533,9 +533,9 @@ step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && ech step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%'''); run_command_on_workers @@ -545,7 +545,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE append_copy ADD new_column int DEFAULT 0; @@ -554,9 +554,9 @@ step s1-commit: COMMIT; step s2-copy: <... completed> error in steps s1-commit s2-copy: ERROR: missing data for column "new_column" step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -566,7 +566,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE append_copy ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -575,9 +575,9 @@ step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && ech step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -587,7 +587,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE append_copy RENAME data TO new_column; @@ -595,9 +595,9 @@ step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && ech step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -607,71 +607,71 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('append_copy'); citus_total_relation_size -32768 +32768 step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-master-apply-delete-command s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM append_copy WHERE id <= 4;'); master_apply_delete_command -1 +1 step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-master-drop-all-shards s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-drop-all-shards: SELECT master_drop_all_shards('append_copy'::regclass, 'public', 'append_copy'); master_drop_all_shards -1 +1 step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -5 +5 starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-distribute-table s2-copy s1-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE append_copy; step s1-create-non-distributed-table: CREATE TABLE append_copy(id integer, data text, int_data int); step s1-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('append_copy', 'id', 'append'); create_distributed_table - + step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; -count +count -5 +5 diff --git a/src/test/regress/expected/isolation_cancellation.out b/src/test/regress/expected/isolation_cancellation.out index fc929c568..1947bd394 100644 --- a/src/test/regress/expected/isolation_cancellation.out +++ b/src/test/regress/expected/isolation_cancellation.out @@ -1,127 +1,127 @@ Parsed test spec with 2 sessions starting permutation: s1-timeout s1-sleep10000 s1-reset s1-drop -step s1-timeout: +step s1-timeout: SET statement_timeout = '100ms'; -step s1-sleep10000: +step s1-sleep10000: SELECT pg_sleep(10000) FROM cancel_table WHERE test_id = 1; ERROR: canceling statement due to statement timeout -step s1-reset: +step s1-reset: RESET ALL; -step s1-drop: +step s1-drop: DROP TABLE cancel_table; starting permutation: s1-timeout s1-sleep10000 s1-reset s2-drop -step s1-timeout: +step s1-timeout: SET statement_timeout = '100ms'; -step s1-sleep10000: +step s1-sleep10000: SELECT pg_sleep(10000) FROM cancel_table WHERE test_id = 1; ERROR: canceling statement due to statement timeout -step s1-reset: +step s1-reset: RESET ALL; -step s2-drop: +step s2-drop: DROP TABLE cancel_table; starting permutation: s1-timeout s1-begin s1-sleep10000 s1-rollback s1-reset s1-drop -step s1-timeout: +step s1-timeout: SET statement_timeout = '100ms'; -step s1-begin: +step s1-begin: BEGIN; -step s1-sleep10000: +step s1-sleep10000: SELECT pg_sleep(10000) FROM cancel_table WHERE test_id = 1; ERROR: canceling statement due to statement timeout -step s1-rollback: +step s1-rollback: ROLLBACK; -step s1-reset: +step s1-reset: RESET ALL; -step s1-drop: +step s1-drop: DROP TABLE cancel_table; starting permutation: s1-timeout s1-begin s1-sleep10000 s1-rollback s1-reset s2-drop -step s1-timeout: +step s1-timeout: SET statement_timeout = '100ms'; -step s1-begin: +step s1-begin: BEGIN; -step s1-sleep10000: +step s1-sleep10000: SELECT pg_sleep(10000) FROM cancel_table WHERE test_id = 1; ERROR: canceling statement due to statement timeout -step s1-rollback: +step s1-rollback: ROLLBACK; -step s1-reset: +step s1-reset: RESET ALL; -step s2-drop: +step s2-drop: DROP TABLE cancel_table; starting permutation: s1-timeout s1-begin s1-update1 s1-sleep10000 s1-rollback s1-reset s1-drop -step s1-timeout: +step s1-timeout: SET statement_timeout = '100ms'; -step s1-begin: +step s1-begin: BEGIN; -step s1-update1: +step s1-update1: UPDATE cancel_table SET data = '' WHERE test_id = 1; -step s1-sleep10000: +step s1-sleep10000: SELECT pg_sleep(10000) FROM cancel_table WHERE test_id = 1; ERROR: canceling statement due to statement timeout -step s1-rollback: +step s1-rollback: ROLLBACK; -step s1-reset: +step s1-reset: RESET ALL; -step s1-drop: +step s1-drop: DROP TABLE cancel_table; starting permutation: s1-timeout s1-begin s1-update1 s1-sleep10000 s1-rollback s1-reset s2-drop -step s1-timeout: +step s1-timeout: SET statement_timeout = '100ms'; -step s1-begin: +step s1-begin: BEGIN; -step s1-update1: +step s1-update1: UPDATE cancel_table SET data = '' WHERE test_id = 1; -step s1-sleep10000: +step s1-sleep10000: SELECT pg_sleep(10000) FROM cancel_table WHERE test_id = 1; ERROR: canceling statement due to statement timeout -step s1-rollback: +step s1-rollback: ROLLBACK; -step s1-reset: +step s1-reset: RESET ALL; -step s2-drop: +step s2-drop: DROP TABLE cancel_table; diff --git a/src/test/regress/expected/isolation_citus_dist_activity.out b/src/test/regress/expected/isolation_citus_dist_activity.out index 9d0b9e331..355c59e89 100644 --- a/src/test/regress/expected/isolation_citus_dist_activity.out +++ b/src/test/regress/expected/isolation_citus_dist_activity.out @@ -3,223 +3,223 @@ Parsed test spec with 3 sessions starting permutation: s1-cache-connections s1-begin s2-begin s3-begin s1-alter-table s2-sleep s2-view-dist s3-view-worker s2-rollback s1-commit s3-rollback create_distributed_table - -step s1-cache-connections: + +step s1-cache-connections: SET citus.max_cached_conns_per_worker TO 4; SET citus.force_max_query_parallelization TO on; UPDATE test_table SET column2 = 0; -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s3-begin: +step s3-begin: BEGIN; -step s1-alter-table: +step s1-alter-table: ALTER TABLE test_table ADD COLUMN x INT; -step s2-sleep: +step s2-sleep: SELECT pg_sleep(0.5); -pg_sleep +pg_sleep - -step s2-view-dist: + +step s2-view-dist: SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; -query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname +query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname ALTER TABLE test_table ADD COLUMN x INT; -coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression -step s3-view-worker: +coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression +step s3-view-worker: SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; -query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname +query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname SELECT worker_apply_shard_ddl_command (1300004, 'public', ' ALTER TABLE test_table ADD COLUMN x INT; -')localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression +')localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression SELECT worker_apply_shard_ddl_command (1300003, 'public', ' ALTER TABLE test_table ADD COLUMN x INT; -')localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression +')localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression SELECT worker_apply_shard_ddl_command (1300002, 'public', ' ALTER TABLE test_table ADD COLUMN x INT; -')localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression +')localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression SELECT worker_apply_shard_ddl_command (1300001, 'public', ' ALTER TABLE test_table ADD COLUMN x INT; -')localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression -step s2-rollback: +')localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression +step s2-rollback: ROLLBACK; -step s1-commit: +step s1-commit: COMMIT; -step s3-rollback: +step s3-rollback: ROLLBACK; starting permutation: s1-cache-connections s1-begin s2-begin s3-begin s1-insert s2-sleep s2-view-dist s3-view-worker s2-rollback s1-commit s3-rollback create_distributed_table - -step s1-cache-connections: + +step s1-cache-connections: SET citus.max_cached_conns_per_worker TO 4; SET citus.force_max_query_parallelization TO on; UPDATE test_table SET column2 = 0; -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s3-begin: +step s3-begin: BEGIN; -step s1-insert: +step s1-insert: INSERT INTO test_table VALUES (100, 100); -step s2-sleep: +step s2-sleep: SELECT pg_sleep(0.5); -pg_sleep +pg_sleep - -step s2-view-dist: + +step s2-view-dist: SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; -query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname +query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname INSERT INTO test_table VALUES (100, 100); -coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression -step s3-view-worker: +coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression +step s3-view-worker: SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; -query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname +query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname -INSERT INTO public.test_table_1300008 (column1, column2) VALUES (100, 100)localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression -step s2-rollback: +INSERT INTO public.test_table_1300008 (column1, column2) VALUES (100, 100)localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression +step s2-rollback: ROLLBACK; -step s1-commit: +step s1-commit: COMMIT; -step s3-rollback: +step s3-rollback: ROLLBACK; starting permutation: s1-cache-connections s1-begin s2-begin s3-begin s1-select s2-sleep s2-view-dist s3-view-worker s2-rollback s1-commit s3-rollback create_distributed_table - -step s1-cache-connections: + +step s1-cache-connections: SET citus.max_cached_conns_per_worker TO 4; SET citus.force_max_query_parallelization TO on; UPDATE test_table SET column2 = 0; -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s3-begin: +step s3-begin: BEGIN; -step s1-select: +step s1-select: SELECT count(*) FROM test_table; -count +count -0 -step s2-sleep: +0 +step s2-sleep: SELECT pg_sleep(0.5); -pg_sleep +pg_sleep - -step s2-view-dist: + +step s2-view-dist: SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; -query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname +query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname SELECT count(*) FROM test_table; -coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression -step s3-view-worker: +coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression +step s3-view-worker: SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; -query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname +query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname -SELECT count(*) AS count FROM test_table_1300014 test_table WHERE truelocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression -SELECT count(*) AS count FROM test_table_1300013 test_table WHERE truelocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression -SELECT count(*) AS count FROM test_table_1300012 test_table WHERE truelocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression -SELECT count(*) AS count FROM test_table_1300011 test_table WHERE truelocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression -step s2-rollback: +SELECT count(*) AS count FROM test_table_1300014 test_table WHERE truelocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression +SELECT count(*) AS count FROM test_table_1300013 test_table WHERE truelocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression +SELECT count(*) AS count FROM test_table_1300012 test_table WHERE truelocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression +SELECT count(*) AS count FROM test_table_1300011 test_table WHERE truelocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression +step s2-rollback: ROLLBACK; -step s1-commit: +step s1-commit: COMMIT; -step s3-rollback: +step s3-rollback: ROLLBACK; starting permutation: s1-cache-connections s1-begin s2-begin s3-begin s1-select-router s2-sleep s2-view-dist s3-view-worker s2-rollback s1-commit s3-rollback create_distributed_table - -step s1-cache-connections: + +step s1-cache-connections: SET citus.max_cached_conns_per_worker TO 4; SET citus.force_max_query_parallelization TO on; UPDATE test_table SET column2 = 0; -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s3-begin: +step s3-begin: BEGIN; -step s1-select-router: +step s1-select-router: SELECT count(*) FROM test_table WHERE column1 = 55; -count +count -0 -step s2-sleep: +0 +step s2-sleep: SELECT pg_sleep(0.5); -pg_sleep +pg_sleep - -step s2-view-dist: + +step s2-view-dist: SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; -query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname +query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname SELECT count(*) FROM test_table WHERE column1 = 55; -coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression -step s3-view-worker: +coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression +step s3-view-worker: SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; -query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname +query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname -SELECT count(*) AS count FROM public.test_table_1300017 test_table WHERE (column1 OPERATOR(pg_catalog.=) 55)localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression -step s2-rollback: +SELECT count(*) AS count FROM public.test_table_1300017 test_table WHERE (column1 OPERATOR(pg_catalog.=) 55)localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression +step s2-rollback: ROLLBACK; -step s1-commit: +step s1-commit: COMMIT; -step s3-rollback: +step s3-rollback: ROLLBACK; diff --git a/src/test/regress/expected/isolation_cluster_management.out b/src/test/regress/expected/isolation_cluster_management.out index 336550e78..505157100 100644 --- a/src/test/regress/expected/isolation_cluster_management.out +++ b/src/test/regress/expected/isolation_cluster_management.out @@ -1,13 +1,13 @@ Parsed test spec with 1 sessions starting permutation: s1a -step s1a: +step s1a: SELECT 1 FROM master_add_node('localhost', 57637); SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -?column? +1 +?column? -1 +1 diff --git a/src/test/regress/expected/isolation_concurrent_dml.out b/src/test/regress/expected/isolation_concurrent_dml.out index 69c06723c..02cc2403d 100644 --- a/src/test/regress/expected/isolation_concurrent_dml.out +++ b/src/test/regress/expected/isolation_concurrent_dml.out @@ -3,17 +3,17 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s1-insert s2-update s1-commit master_create_worker_shards - -step s1-begin: + +step s1-begin: BEGIN; -step s1-insert: +step s1-insert: INSERT INTO test_concurrent_dml VALUES(1); -step s2-update: +step s2-update: UPDATE test_concurrent_dml SET data = 'blarg' WHERE test_id = 1; -step s1-commit: +step s1-commit: COMMIT; step s2-update: <... completed> @@ -21,28 +21,28 @@ step s2-update: <... completed> starting permutation: s1-insert s2-update master_create_worker_shards - -step s1-insert: + +step s1-insert: INSERT INTO test_concurrent_dml VALUES(1); -step s2-update: +step s2-update: UPDATE test_concurrent_dml SET data = 'blarg' WHERE test_id = 1; starting permutation: s1-begin s1-multi-insert s2-update s1-commit master_create_worker_shards - -step s1-begin: + +step s1-begin: BEGIN; -step s1-multi-insert: +step s1-multi-insert: INSERT INTO test_concurrent_dml VALUES (1), (2); -step s2-update: +step s2-update: UPDATE test_concurrent_dml SET data = 'blarg' WHERE test_id = 1; -step s1-commit: +step s1-commit: COMMIT; step s2-update: <... completed> @@ -50,39 +50,39 @@ step s2-update: <... completed> starting permutation: s1-begin s1-multi-insert s2-multi-insert-overlap s1-commit master_create_worker_shards - -step s1-begin: + +step s1-begin: BEGIN; -step s1-multi-insert: +step s1-multi-insert: INSERT INTO test_concurrent_dml VALUES (1), (2); -step s2-multi-insert-overlap: +step s2-multi-insert-overlap: INSERT INTO test_concurrent_dml VALUES (1), (4); -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-begin s2-begin s1-multi-insert s2-multi-insert s1-commit s2-commit master_create_worker_shards - -step s1-begin: + +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-multi-insert: +step s1-multi-insert: INSERT INTO test_concurrent_dml VALUES (1), (2); -step s2-multi-insert: +step s2-multi-insert: INSERT INTO test_concurrent_dml VALUES (3), (4); -step s1-commit: +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; diff --git a/src/test/regress/expected/isolation_copy_placement_vs_copy_placement.out b/src/test/regress/expected/isolation_copy_placement_vs_copy_placement.out index ad6366975..95a9a59cf 100644 --- a/src/test/regress/expected/isolation_copy_placement_vs_copy_placement.out +++ b/src/test/regress/expected/isolation_copy_placement_vs_copy_placement.out @@ -1,50 +1,50 @@ Parsed test spec with 2 sessions starting permutation: s1-load-cache s2-load-cache s2-set-placement-inactive s2-begin s2-repair-placement s1-repair-placement s2-commit -step s1-load-cache: +step s1-load-cache: COPY test_hash_table FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV; -step s2-load-cache: +step s2-load-cache: COPY test_hash_table FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV; -step s2-set-placement-inactive: +step s2-set-placement-inactive: UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard_for_test_table) AND nodeport = 57638; -step s2-begin: +step s2-begin: BEGIN; -step s2-repair-placement: +step s2-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement - -step s1-repair-placement: + +step s1-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638); -step s2-commit: +step s2-commit: COMMIT; step s1-repair-placement: <... completed> error in steps s2-commit s1-repair-placement: ERROR: target placement must be in inactive state starting permutation: s2-set-placement-inactive s2-begin s2-repair-placement s1-repair-placement s2-commit -step s2-set-placement-inactive: +step s2-set-placement-inactive: UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard_for_test_table) AND nodeport = 57638; -step s2-begin: +step s2-begin: BEGIN; -step s2-repair-placement: +step s2-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement - -step s1-repair-placement: + +step s1-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638); -step s2-commit: +step s2-commit: COMMIT; step s1-repair-placement: <... completed> diff --git a/src/test/regress/expected/isolation_copy_placement_vs_modification.out b/src/test/regress/expected/isolation_copy_placement_vs_modification.out index 4574cb055..24abf82b1 100644 --- a/src/test/regress/expected/isolation_copy_placement_vs_modification.out +++ b/src/test/regress/expected/isolation_copy_placement_vs_modification.out @@ -1,508 +1,508 @@ Parsed test spec with 2 sessions starting permutation: s1-load-cache s1-insert s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-update s2-commit s1-commit s2-print-content -step s1-load-cache: +step s1-load-cache: TRUNCATE test_copy_placement_vs_modification; -step s1-insert: +step s1-insert: INSERT INTO test_copy_placement_vs_modification VALUES (5, 10); -step s1-begin: +step s1-begin: BEGIN; SET LOCAL citus.select_opens_transaction_block TO off; -step s1-select: +step s1-select: SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5; -count +count -1 -step s2-set-placement-inactive: +1 +step s2-set-placement-inactive: UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; -step s2-begin: +step s2-begin: BEGIN; -step s2-repair-placement: +step s2-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement - -step s1-update: + +step s1-update: UPDATE test_copy_placement_vs_modification SET y = 5 WHERE x = 5; -step s2-commit: +step s2-commit: COMMIT; step s1-update: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s2-print-content: - SELECT - nodeport, success, result - FROM +step s2-print-content: + SELECT + nodeport, success, result + FROM run_command_on_placements('test_copy_placement_vs_modification', 'select y from %s WHERE x = 5') WHERE shardid IN (SELECT * FROM selected_shard) ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 5 -57638 t 5 +57637 t 5 +57638 t 5 starting permutation: s1-load-cache s1-insert s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-delete s2-commit s1-commit s2-print-content -step s1-load-cache: +step s1-load-cache: TRUNCATE test_copy_placement_vs_modification; -step s1-insert: +step s1-insert: INSERT INTO test_copy_placement_vs_modification VALUES (5, 10); -step s1-begin: +step s1-begin: BEGIN; SET LOCAL citus.select_opens_transaction_block TO off; -step s1-select: +step s1-select: SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5; -count +count -1 -step s2-set-placement-inactive: +1 +step s2-set-placement-inactive: UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; -step s2-begin: +step s2-begin: BEGIN; -step s2-repair-placement: +step s2-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement - -step s1-delete: + +step s1-delete: DELETE FROM test_copy_placement_vs_modification WHERE x = 5; -step s2-commit: +step s2-commit: COMMIT; step s1-delete: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s2-print-content: - SELECT - nodeport, success, result - FROM +step s2-print-content: + SELECT + nodeport, success, result + FROM run_command_on_placements('test_copy_placement_vs_modification', 'select y from %s WHERE x = 5') WHERE shardid IN (SELECT * FROM selected_shard) ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t -57638 t +57637 t +57638 t starting permutation: s1-load-cache s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-insert s2-commit s1-commit s2-print-content -step s1-load-cache: +step s1-load-cache: TRUNCATE test_copy_placement_vs_modification; -step s1-begin: +step s1-begin: BEGIN; SET LOCAL citus.select_opens_transaction_block TO off; -step s1-select: +step s1-select: SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5; -count +count -0 -step s2-set-placement-inactive: +0 +step s2-set-placement-inactive: UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; -step s2-begin: +step s2-begin: BEGIN; -step s2-repair-placement: +step s2-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement - -step s1-insert: + +step s1-insert: INSERT INTO test_copy_placement_vs_modification VALUES (5, 10); -step s2-commit: +step s2-commit: COMMIT; step s1-insert: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s2-print-content: - SELECT - nodeport, success, result - FROM +step s2-print-content: + SELECT + nodeport, success, result + FROM run_command_on_placements('test_copy_placement_vs_modification', 'select y from %s WHERE x = 5') WHERE shardid IN (SELECT * FROM selected_shard) ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 10 -57638 t 10 +57637 t 10 +57638 t 10 starting permutation: s1-load-cache s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-copy s2-commit s1-commit s2-print-content -step s1-load-cache: +step s1-load-cache: TRUNCATE test_copy_placement_vs_modification; -step s1-begin: +step s1-begin: BEGIN; SET LOCAL citus.select_opens_transaction_block TO off; -step s1-select: +step s1-select: SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5; -count +count -0 -step s2-set-placement-inactive: +0 +step s2-set-placement-inactive: UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; -step s2-begin: +step s2-begin: BEGIN; -step s2-repair-placement: +step s2-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement - -step s1-copy: + +step s1-copy: COPY test_copy_placement_vs_modification FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV; -step s2-commit: +step s2-commit: COMMIT; step s1-copy: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s2-print-content: - SELECT - nodeport, success, result - FROM +step s2-print-content: + SELECT + nodeport, success, result + FROM run_command_on_placements('test_copy_placement_vs_modification', 'select y from %s WHERE x = 5') WHERE shardid IN (SELECT * FROM selected_shard) ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 5 -57638 t 5 +57637 t 5 +57638 t 5 starting permutation: s1-load-cache s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-ddl s2-commit s1-commit s2-print-index-count -step s1-load-cache: +step s1-load-cache: TRUNCATE test_copy_placement_vs_modification; -step s1-begin: +step s1-begin: BEGIN; SET LOCAL citus.select_opens_transaction_block TO off; -step s1-select: +step s1-select: SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5; -count +count -0 -step s2-set-placement-inactive: +0 +step s2-set-placement-inactive: UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; -step s2-begin: +step s2-begin: BEGIN; -step s2-repair-placement: +step s2-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement - -step s1-ddl: + +step s1-ddl: CREATE INDEX test_copy_placement_vs_modification_index ON test_copy_placement_vs_modification(x); -step s2-commit: +step s2-commit: COMMIT; step s1-ddl: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s2-print-index-count: - SELECT - nodeport, success, result - FROM +step s2-print-index-count: + SELECT + nodeport, success, result + FROM run_command_on_placements('test_copy_placement_vs_modification', 'select count(*) from pg_indexes WHERE tablename = ''%s''') ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 1 -57637 t 1 -57638 t 1 -57638 t 1 +57637 t 1 +57637 t 1 +57638 t 1 +57638 t 1 starting permutation: s1-insert s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-update s2-commit s1-commit s2-print-content -step s1-insert: +step s1-insert: INSERT INTO test_copy_placement_vs_modification VALUES (5, 10); -step s1-begin: +step s1-begin: BEGIN; SET LOCAL citus.select_opens_transaction_block TO off; -step s1-select: +step s1-select: SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5; -count +count -1 -step s2-set-placement-inactive: +1 +step s2-set-placement-inactive: UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; -step s2-begin: +step s2-begin: BEGIN; -step s2-repair-placement: +step s2-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement - -step s1-update: + +step s1-update: UPDATE test_copy_placement_vs_modification SET y = 5 WHERE x = 5; -step s2-commit: +step s2-commit: COMMIT; step s1-update: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s2-print-content: - SELECT - nodeport, success, result - FROM +step s2-print-content: + SELECT + nodeport, success, result + FROM run_command_on_placements('test_copy_placement_vs_modification', 'select y from %s WHERE x = 5') WHERE shardid IN (SELECT * FROM selected_shard) ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 5 -57638 t 5 +57637 t 5 +57638 t 5 starting permutation: s1-insert s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-delete s2-commit s1-commit s2-print-content -step s1-insert: +step s1-insert: INSERT INTO test_copy_placement_vs_modification VALUES (5, 10); -step s1-begin: +step s1-begin: BEGIN; SET LOCAL citus.select_opens_transaction_block TO off; -step s1-select: +step s1-select: SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5; -count +count -1 -step s2-set-placement-inactive: +1 +step s2-set-placement-inactive: UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; -step s2-begin: +step s2-begin: BEGIN; -step s2-repair-placement: +step s2-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement - -step s1-delete: + +step s1-delete: DELETE FROM test_copy_placement_vs_modification WHERE x = 5; -step s2-commit: +step s2-commit: COMMIT; step s1-delete: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s2-print-content: - SELECT - nodeport, success, result - FROM +step s2-print-content: + SELECT + nodeport, success, result + FROM run_command_on_placements('test_copy_placement_vs_modification', 'select y from %s WHERE x = 5') WHERE shardid IN (SELECT * FROM selected_shard) ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t -57638 t +57637 t +57638 t starting permutation: s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-insert s2-commit s1-commit s2-print-content -step s1-begin: +step s1-begin: BEGIN; SET LOCAL citus.select_opens_transaction_block TO off; -step s1-select: +step s1-select: SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5; -count +count -0 -step s2-set-placement-inactive: +0 +step s2-set-placement-inactive: UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; -step s2-begin: +step s2-begin: BEGIN; -step s2-repair-placement: +step s2-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement - -step s1-insert: + +step s1-insert: INSERT INTO test_copy_placement_vs_modification VALUES (5, 10); -step s2-commit: +step s2-commit: COMMIT; step s1-insert: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s2-print-content: - SELECT - nodeport, success, result - FROM +step s2-print-content: + SELECT + nodeport, success, result + FROM run_command_on_placements('test_copy_placement_vs_modification', 'select y from %s WHERE x = 5') WHERE shardid IN (SELECT * FROM selected_shard) ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 10 -57638 t 10 +57637 t 10 +57638 t 10 starting permutation: s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-copy s2-commit s1-commit s2-print-content -step s1-begin: +step s1-begin: BEGIN; SET LOCAL citus.select_opens_transaction_block TO off; -step s1-select: +step s1-select: SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5; -count +count -0 -step s2-set-placement-inactive: +0 +step s2-set-placement-inactive: UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; -step s2-begin: +step s2-begin: BEGIN; -step s2-repair-placement: +step s2-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement - -step s1-copy: + +step s1-copy: COPY test_copy_placement_vs_modification FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV; -step s2-commit: +step s2-commit: COMMIT; step s1-copy: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s2-print-content: - SELECT - nodeport, success, result - FROM +step s2-print-content: + SELECT + nodeport, success, result + FROM run_command_on_placements('test_copy_placement_vs_modification', 'select y from %s WHERE x = 5') WHERE shardid IN (SELECT * FROM selected_shard) ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 5 -57638 t 5 +57637 t 5 +57638 t 5 starting permutation: s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-ddl s2-commit s1-commit s2-print-index-count -step s1-begin: +step s1-begin: BEGIN; SET LOCAL citus.select_opens_transaction_block TO off; -step s1-select: +step s1-select: SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5; -count +count -0 -step s2-set-placement-inactive: +0 +step s2-set-placement-inactive: UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; -step s2-begin: +step s2-begin: BEGIN; -step s2-repair-placement: +step s2-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement - -step s1-ddl: + +step s1-ddl: CREATE INDEX test_copy_placement_vs_modification_index ON test_copy_placement_vs_modification(x); -step s2-commit: +step s2-commit: COMMIT; step s1-ddl: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s2-print-index-count: - SELECT - nodeport, success, result - FROM +step s2-print-index-count: + SELECT + nodeport, success, result + FROM run_command_on_placements('test_copy_placement_vs_modification', 'select count(*) from pg_indexes WHERE tablename = ''%s''') ORDER BY nodeport; -nodeport success result +nodeport success result -57637 t 1 -57637 t 1 -57638 t 1 -57638 t 1 +57637 t 1 +57637 t 1 +57638 t 1 +57638 t 1 diff --git a/src/test/regress/expected/isolation_copy_vs_all_on_mx.out b/src/test/regress/expected/isolation_copy_vs_all_on_mx.out index 3a59ce155..52f882f2d 100644 --- a/src/test/regress/expected/isolation_copy_vs_all_on_mx.out +++ b/src/test/regress/expected/isolation_copy_vs_all_on_mx.out @@ -1,192 +1,192 @@ Parsed test spec with 3 sessions starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy s2-start-session-level-connection s2-begin-on-worker s2-copy s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-copy: + +step s1-copy: SELECT run_commands_on_session_level_connection_to_node('COPY copy_table FROM PROGRAM ''echo 5, 50 && echo 6, 60 && echo 7, 70''WITH CSV'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-copy: + +step s2-copy: SELECT run_commands_on_session_level_connection_to_node('COPY copy_table FROM PROGRAM ''echo 5, 50 && echo 8, 80 && echo 9, 90''WITH CSV'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM copy_table; -count +count -11 +11 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy s2-begin s2-coordinator-drop s1-commit-worker s2-commit s1-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-copy: + +step s1-copy: SELECT run_commands_on_session_level_connection_to_node('COPY copy_table FROM PROGRAM ''echo 5, 50 && echo 6, 60 && echo 7, 70''WITH CSV'); run_commands_on_session_level_connection_to_node - -step s2-begin: + +step s2-begin: BEGIN; -step s2-coordinator-drop: +step s2-coordinator-drop: DROP TABLE copy_table; -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-coordinator-drop: <... completed> -step s2-commit: +step s2-commit: COMMIT; -step s1-stop-connection: +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM copy_table; ERROR: relation "copy_table" does not exist restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-copy: + +step s1-copy: SELECT run_commands_on_session_level_connection_to_node('COPY copy_table FROM PROGRAM ''echo 5, 50 && echo 6, 60 && echo 7, 70''WITH CSV'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-select-for-update: + +step s2-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM copy_table WHERE id=5 FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM copy_table; -count +count -8 +8 restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_create_distributed_table.out b/src/test/regress/expected/isolation_create_distributed_table.out index 51c144342..418485f80 100644 --- a/src/test/regress/expected/isolation_create_distributed_table.out +++ b/src/test/regress/expected/isolation_create_distributed_table.out @@ -1,102 +1,102 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s2-begin s1-create_distributed_table s2-create_distributed_table s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-create_distributed_table: +step s1-create_distributed_table: SELECT create_distributed_table('table_to_distribute', 'id'); create_distributed_table - -step s2-create_distributed_table: + +step s2-create_distributed_table: SELECT create_distributed_table('table_to_distribute', 'id'); -step s1-commit: +step s1-commit: COMMIT; step s2-create_distributed_table: <... completed> error in steps s1-commit s2-create_distributed_table: ERROR: table "table_to_distribute" is already distributed -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s1-create_distributed_table s2-copy_to_local_table s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-create_distributed_table: +step s1-create_distributed_table: SELECT create_distributed_table('table_to_distribute', 'id'); create_distributed_table - -step s2-copy_to_local_table: + +step s2-copy_to_local_table: COPY table_to_distribute FROM PROGRAM 'echo 0 && echo 1 && echo 2 && echo 3 && echo 4 && echo 5 && echo 6 && echo 7 && echo 8'; -step s1-commit: +step s1-commit: COMMIT; step s2-copy_to_local_table: <... completed> -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s2-copy_to_local_table s1-create_distributed_table s2-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-copy_to_local_table: +step s2-copy_to_local_table: COPY table_to_distribute FROM PROGRAM 'echo 0 && echo 1 && echo 2 && echo 3 && echo 4 && echo 5 && echo 6 && echo 7 && echo 8'; -step s1-create_distributed_table: +step s1-create_distributed_table: SELECT create_distributed_table('table_to_distribute', 'id'); -step s2-commit: +step s2-commit: COMMIT; step s1-create_distributed_table: <... completed> create_distributed_table - -step s1-commit: + +step s1-commit: COMMIT; starting permutation: s1-copy_to_local_table s1-begin s2-begin s1-create_distributed_table s2-create_distributed_table s1-commit s2-commit -step s1-copy_to_local_table: +step s1-copy_to_local_table: COPY table_to_distribute FROM PROGRAM 'echo 0 && echo 1 && echo 2 && echo 3 && echo 4 && echo 5 && echo 6 && echo 7 && echo 8'; -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-create_distributed_table: +step s1-create_distributed_table: SELECT create_distributed_table('table_to_distribute', 'id'); create_distributed_table - -step s2-create_distributed_table: + +step s2-create_distributed_table: SELECT create_distributed_table('table_to_distribute', 'id'); -step s1-commit: +step s1-commit: COMMIT; step s2-create_distributed_table: <... completed> error in steps s1-commit s2-create_distributed_table: ERROR: table "table_to_distribute" is already distributed -step s2-commit: +step s2-commit: COMMIT; diff --git a/src/test/regress/expected/isolation_create_restore_point.out b/src/test/regress/expected/isolation_create_restore_point.out index cf90b199a..c0bb77d6b 100644 --- a/src/test/regress/expected/isolation_create_restore_point.out +++ b/src/test/regress/expected/isolation_create_restore_point.out @@ -3,252 +3,252 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s1-create-distributed s2-create-restore s1-commit create_reference_table - -step s1-begin: + +step s1-begin: BEGIN; SET citus.multi_shard_commit_protocol TO '2pc'; -step s1-create-distributed: +step s1-create-distributed: CREATE TABLE test_create_distributed_table (test_id integer NOT NULL, data text); SELECT create_distributed_table('test_create_distributed_table', 'test_id'); create_distributed_table - -step s2-create-restore: + +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-restore: <... completed> -?column? +?column? -1 +1 starting permutation: s1-begin s1-insert s2-create-restore s1-commit create_reference_table - -step s1-begin: + +step s1-begin: BEGIN; SET citus.multi_shard_commit_protocol TO '2pc'; -step s1-insert: +step s1-insert: INSERT INTO restore_table VALUES (1,'hello'); -step s2-create-restore: +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -?column? +?column? -1 -step s1-commit: +1 +step s1-commit: COMMIT; starting permutation: s1-begin s1-modify-multiple s2-create-restore s1-commit create_reference_table - -step s1-begin: + +step s1-begin: BEGIN; SET citus.multi_shard_commit_protocol TO '2pc'; -step s1-modify-multiple: +step s1-modify-multiple: UPDATE restore_table SET data = 'world'; -step s2-create-restore: +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -?column? +?column? -1 -step s1-commit: +1 +step s1-commit: COMMIT; starting permutation: s1-begin s1-ddl s2-create-restore s1-commit create_reference_table - -step s1-begin: + +step s1-begin: BEGIN; SET citus.multi_shard_commit_protocol TO '2pc'; -step s1-ddl: +step s1-ddl: ALTER TABLE restore_table ADD COLUMN x int; -step s2-create-restore: +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -?column? +?column? -1 -step s1-commit: +1 +step s1-commit: COMMIT; starting permutation: s1-begin s1-copy s2-create-restore s1-commit create_reference_table - -step s1-begin: + +step s1-begin: BEGIN; SET citus.multi_shard_commit_protocol TO '2pc'; -step s1-copy: +step s1-copy: COPY restore_table FROM PROGRAM 'echo 1,hello' WITH CSV; -step s2-create-restore: +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -?column? +?column? -1 -step s1-commit: +1 +step s1-commit: COMMIT; starting permutation: s1-begin s1-recover s2-create-restore s1-commit create_reference_table - -step s1-begin: + +step s1-begin: BEGIN; SET citus.multi_shard_commit_protocol TO '2pc'; -step s1-recover: +step s1-recover: SELECT recover_prepared_transactions(); recover_prepared_transactions -0 -step s2-create-restore: +0 +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-restore: <... completed> -?column? +?column? -1 +1 starting permutation: s1-begin s1-drop s2-create-restore s1-commit create_reference_table - -step s1-begin: + +step s1-begin: BEGIN; SET citus.multi_shard_commit_protocol TO '2pc'; -step s1-drop: +step s1-drop: DROP TABLE restore_table; -step s2-create-restore: +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-restore: <... completed> -?column? +?column? -1 +1 starting permutation: s1-begin s1-add-node s2-create-restore s1-commit create_reference_table - -step s1-begin: + +step s1-begin: BEGIN; SET citus.multi_shard_commit_protocol TO '2pc'; -step s1-add-node: +step s1-add-node: SELECT 1 FROM master_add_inactive_node('localhost', 9999); -?column? +?column? -1 -step s2-create-restore: +1 +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-restore: <... completed> -?column? +?column? -1 +1 starting permutation: s1-begin s1-remove-node s2-create-restore s1-commit create_reference_table - -step s1-begin: + +step s1-begin: BEGIN; SET citus.multi_shard_commit_protocol TO '2pc'; -step s1-remove-node: +step s1-remove-node: SELECT master_remove_node('localhost', 9999); master_remove_node - -step s2-create-restore: + +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-restore: <... completed> -?column? +?column? -1 +1 starting permutation: s1-begin s1-create-restore s2-create-restore s1-commit create_reference_table - -step s1-begin: + +step s1-begin: BEGIN; SET citus.multi_shard_commit_protocol TO '2pc'; -step s1-create-restore: +step s1-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test-2'); -?column? +?column? -1 -step s2-create-restore: +1 +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-restore: <... completed> -?column? +?column? -1 +1 starting permutation: s2-begin s2-create-restore s1-modify-multiple s2-commit create_reference_table - -step s2-begin: + +step s2-begin: BEGIN; -step s2-create-restore: +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -?column? +?column? -1 -step s1-modify-multiple: +1 +step s1-modify-multiple: UPDATE restore_table SET data = 'world'; -step s2-commit: +step s2-commit: COMMIT; step s1-modify-multiple: <... completed> @@ -256,20 +256,20 @@ step s1-modify-multiple: <... completed> starting permutation: s2-begin s2-create-restore s1-ddl s2-commit create_reference_table - -step s2-begin: + +step s2-begin: BEGIN; -step s2-create-restore: +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -?column? +?column? -1 -step s1-ddl: +1 +step s1-ddl: ALTER TABLE restore_table ADD COLUMN x int; -step s2-commit: +step s2-commit: COMMIT; step s1-ddl: <... completed> @@ -277,24 +277,24 @@ step s1-ddl: <... completed> starting permutation: s2-begin s2-create-restore s1-multi-statement s2-commit create_reference_table - -step s2-begin: + +step s2-begin: BEGIN; -step s2-create-restore: +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -?column? +?column? -1 -step s1-multi-statement: +1 +step s1-multi-statement: SET citus.multi_shard_commit_protocol TO '2pc'; BEGIN; INSERT INTO restore_table VALUES (1,'hello'); INSERT INTO restore_table VALUES (2,'hello'); COMMIT; -step s2-commit: +step s2-commit: COMMIT; step s1-multi-statement: <... completed> @@ -302,153 +302,153 @@ step s1-multi-statement: <... completed> starting permutation: s1-begin s1-create-reference s2-create-restore s1-commit create_reference_table - -step s1-begin: + +step s1-begin: BEGIN; SET citus.multi_shard_commit_protocol TO '2pc'; -step s1-create-reference: +step s1-create-reference: CREATE TABLE test_create_reference_table (test_id integer NOT NULL, data text); SELECT create_reference_table('test_create_reference_table'); create_reference_table - -step s2-create-restore: + +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-restore: <... completed> -?column? +?column? -1 +1 starting permutation: s1-begin s1-insert-ref s2-create-restore s1-commit create_reference_table - -step s1-begin: + +step s1-begin: BEGIN; SET citus.multi_shard_commit_protocol TO '2pc'; -step s1-insert-ref: +step s1-insert-ref: INSERT INTO restore_ref_table VALUES (1,'hello'); -step s2-create-restore: +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -?column? +?column? -1 -step s1-commit: +1 +step s1-commit: COMMIT; starting permutation: s1-begin s1-modify-multiple-ref s2-create-restore s1-commit create_reference_table - -step s1-begin: + +step s1-begin: BEGIN; SET citus.multi_shard_commit_protocol TO '2pc'; -step s1-modify-multiple-ref: +step s1-modify-multiple-ref: UPDATE restore_ref_table SET data = 'world'; -step s2-create-restore: +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -?column? +?column? -1 -step s1-commit: +1 +step s1-commit: COMMIT; starting permutation: s1-begin s1-ddl-ref s2-create-restore s1-commit create_reference_table - -step s1-begin: + +step s1-begin: BEGIN; SET citus.multi_shard_commit_protocol TO '2pc'; -step s1-ddl-ref: +step s1-ddl-ref: ALTER TABLE restore_ref_table ADD COLUMN x int; -step s2-create-restore: +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-restore: <... completed> -?column? +?column? -1 +1 starting permutation: s1-begin s1-copy-ref s2-create-restore s1-commit create_reference_table - -step s1-begin: + +step s1-begin: BEGIN; SET citus.multi_shard_commit_protocol TO '2pc'; -step s1-copy-ref: +step s1-copy-ref: COPY restore_ref_table FROM PROGRAM 'echo 1,hello' WITH CSV; -step s2-create-restore: +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -?column? +?column? -1 -step s1-commit: +1 +step s1-commit: COMMIT; starting permutation: s1-begin s1-drop-ref s2-create-restore s1-commit create_reference_table - -step s1-begin: + +step s1-begin: BEGIN; SET citus.multi_shard_commit_protocol TO '2pc'; -step s1-drop-ref: +step s1-drop-ref: DROP TABLE restore_ref_table; -step s2-create-restore: +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-restore: <... completed> -?column? +?column? -1 +1 starting permutation: s2-begin s2-create-restore s1-modify-multiple-ref s2-commit create_reference_table - -step s2-begin: + +step s2-begin: BEGIN; -step s2-create-restore: +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -?column? +?column? -1 -step s1-modify-multiple-ref: +1 +step s1-modify-multiple-ref: UPDATE restore_ref_table SET data = 'world'; -step s2-commit: +step s2-commit: COMMIT; step s1-modify-multiple-ref: <... completed> @@ -456,20 +456,20 @@ step s1-modify-multiple-ref: <... completed> starting permutation: s2-begin s2-create-restore s1-ddl-ref s2-commit create_reference_table - -step s2-begin: + +step s2-begin: BEGIN; -step s2-create-restore: +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -?column? +?column? -1 -step s1-ddl-ref: +1 +step s1-ddl-ref: ALTER TABLE restore_ref_table ADD COLUMN x int; -step s2-commit: +step s2-commit: COMMIT; step s1-ddl-ref: <... completed> @@ -477,24 +477,24 @@ step s1-ddl-ref: <... completed> starting permutation: s2-begin s2-create-restore s1-multi-statement-ref s2-commit create_reference_table - -step s2-begin: + +step s2-begin: BEGIN; -step s2-create-restore: +step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); -?column? +?column? -1 -step s1-multi-statement-ref: +1 +step s1-multi-statement-ref: SET citus.multi_shard_commit_protocol TO '2pc'; BEGIN; INSERT INTO restore_ref_table VALUES (1,'hello'); INSERT INTO restore_ref_table VALUES (2,'hello'); COMMIT; -step s2-commit: +step s2-commit: COMMIT; step s1-multi-statement-ref: <... completed> diff --git a/src/test/regress/expected/isolation_create_table_vs_add_remove_node.out b/src/test/regress/expected/isolation_create_table_vs_add_remove_node.out index 023e320c6..ed61cf589 100644 --- a/src/test/regress/expected/isolation_create_table_vs_add_remove_node.out +++ b/src/test/regress/expected/isolation_create_table_vs_add_remove_node.out @@ -1,32 +1,32 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s1-add-node-2 s2-create-table-1 s1-commit s1-show-placements s2-select -node_name node_port +node_name node_port -localhost 57637 -step s1-begin: +localhost 57637 +step s1-begin: BEGIN; -step s1-add-node-2: +step s1-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-create-table-1: +1 +step s2-create-table-1: SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; CREATE TABLE dist_table (x int, y int); SELECT create_distributed_table('dist_table', 'x'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-table-1: <... completed> create_distributed_table - -step s1-show-placements: + +step s1-show-placements: SELECT nodename, nodeport FROM @@ -36,49 +36,49 @@ step s1-show-placements: ORDER BY nodename, nodeport; -nodename nodeport +nodename nodeport -localhost 57637 -localhost 57637 -localhost 57638 -localhost 57638 -step s2-select: +localhost 57637 +localhost 57637 +localhost 57638 +localhost 57638 +step s2-select: SELECT * FROM dist_table; -x y +x y master_remove_node - - + + starting permutation: s1-begin s1-add-node-2 s2-create-table-1 s1-abort s1-show-placements s2-select -node_name node_port +node_name node_port -localhost 57637 -step s1-begin: +localhost 57637 +step s1-begin: BEGIN; -step s1-add-node-2: +step s1-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-create-table-1: +1 +step s2-create-table-1: SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; CREATE TABLE dist_table (x int, y int); SELECT create_distributed_table('dist_table', 'x'); -step s1-abort: +step s1-abort: ABORT; step s2-create-table-1: <... completed> create_distributed_table - -step s1-show-placements: + +step s1-show-placements: SELECT nodename, nodeport FROM @@ -88,29 +88,29 @@ step s1-show-placements: ORDER BY nodename, nodeport; -nodename nodeport +nodename nodeport -localhost 57637 -localhost 57637 -localhost 57637 -localhost 57637 -step s2-select: +localhost 57637 +localhost 57637 +localhost 57637 +localhost 57637 +step s2-select: SELECT * FROM dist_table; -x y +x y master_remove_node - + starting permutation: s2-begin s2-create-table-1 s1-add-node-2 s2-commit s1-show-placements s2-select -node_name node_port +node_name node_port -localhost 57637 -step s2-begin: +localhost 57637 +step s2-begin: BEGIN; -step s2-create-table-1: +step s2-create-table-1: SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; CREATE TABLE dist_table (x int, y int); @@ -118,18 +118,18 @@ step s2-create-table-1: create_distributed_table - -step s1-add-node-2: + +step s1-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); -step s2-commit: +step s2-commit: COMMIT; step s1-add-node-2: <... completed> -?column? +?column? -1 -step s1-show-placements: +1 +step s1-show-placements: SELECT nodename, nodeport FROM @@ -139,55 +139,55 @@ step s1-show-placements: ORDER BY nodename, nodeport; -nodename nodeport +nodename nodeport -localhost 57637 -localhost 57637 -localhost 57637 -localhost 57637 -step s2-select: +localhost 57637 +localhost 57637 +localhost 57637 +localhost 57637 +step s2-select: SELECT * FROM dist_table; -x y +x y master_remove_node - - + + starting permutation: s1-add-node-2 s1-begin s1-remove-node-2 s2-create-table-1 s1-commit s1-show-placements s2-select -node_name node_port +node_name node_port -localhost 57637 -step s1-add-node-2: +localhost 57637 +step s1-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-remove-node-2: +step s1-remove-node-2: SELECT * FROM master_remove_node('localhost', 57638); master_remove_node - -step s2-create-table-1: + +step s2-create-table-1: SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; CREATE TABLE dist_table (x int, y int); SELECT create_distributed_table('dist_table', 'x'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-table-1: <... completed> create_distributed_table - -step s1-show-placements: + +step s1-show-placements: SELECT nodename, nodeport FROM @@ -197,54 +197,54 @@ step s1-show-placements: ORDER BY nodename, nodeport; -nodename nodeport +nodename nodeport -localhost 57637 -localhost 57637 -localhost 57637 -localhost 57637 -step s2-select: +localhost 57637 +localhost 57637 +localhost 57637 +localhost 57637 +step s2-select: SELECT * FROM dist_table; -x y +x y master_remove_node - + starting permutation: s1-add-node-2 s1-begin s1-remove-node-2 s2-create-table-1 s1-abort s1-show-placements s2-select -node_name node_port +node_name node_port -localhost 57637 -step s1-add-node-2: +localhost 57637 +step s1-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-remove-node-2: +step s1-remove-node-2: SELECT * FROM master_remove_node('localhost', 57638); master_remove_node - -step s2-create-table-1: + +step s2-create-table-1: SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; CREATE TABLE dist_table (x int, y int); SELECT create_distributed_table('dist_table', 'x'); -step s1-abort: +step s1-abort: ABORT; step s2-create-table-1: <... completed> create_distributed_table - -step s1-show-placements: + +step s1-show-placements: SELECT nodename, nodeport FROM @@ -254,36 +254,36 @@ step s1-show-placements: ORDER BY nodename, nodeport; -nodename nodeport +nodename nodeport -localhost 57637 -localhost 57637 -localhost 57638 -localhost 57638 -step s2-select: +localhost 57637 +localhost 57637 +localhost 57638 +localhost 57638 +step s2-select: SELECT * FROM dist_table; -x y +x y master_remove_node - - + + starting permutation: s1-add-node-2 s2-begin s2-create-table-1 s1-remove-node-2 s2-commit s1-show-placements s2-select -node_name node_port +node_name node_port -localhost 57637 -step s1-add-node-2: +localhost 57637 +step s1-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-begin: +1 +step s2-begin: BEGIN; -step s2-create-table-1: +step s2-create-table-1: SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; CREATE TABLE dist_table (x int, y int); @@ -291,16 +291,16 @@ step s2-create-table-1: create_distributed_table - -step s1-remove-node-2: + +step s1-remove-node-2: SELECT * FROM master_remove_node('localhost', 57638); -step s2-commit: +step s2-commit: COMMIT; step s1-remove-node-2: <... completed> error in steps s2-commit s1-remove-node-2: ERROR: you cannot remove the primary node of a node group which has shard placements -step s1-show-placements: +step s1-show-placements: SELECT nodename, nodeport FROM @@ -310,74 +310,74 @@ step s1-show-placements: ORDER BY nodename, nodeport; -nodename nodeport +nodename nodeport -localhost 57637 -localhost 57637 -localhost 57638 -localhost 57638 -step s2-select: +localhost 57637 +localhost 57637 +localhost 57638 +localhost 57638 +step s2-select: SELECT * FROM dist_table; -x y +x y master_remove_node - - + + starting permutation: s1-add-node-2 s1-begin s1-remove-node-2 s2-create-table-2 s1-commit s2-select -node_name node_port +node_name node_port -localhost 57637 -step s1-add-node-2: +localhost 57637 +step s1-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-remove-node-2: +step s1-remove-node-2: SELECT * FROM master_remove_node('localhost', 57638); master_remove_node - -step s2-create-table-2: + +step s2-create-table-2: SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 2; CREATE TABLE dist_table (x int, y int); SELECT create_distributed_table('dist_table', 'x'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-table-2: <... completed> error in steps s1-commit s2-create-table-2: ERROR: replication_factor (2) exceeds number of worker nodes (1) -step s2-select: +step s2-select: SELECT * FROM dist_table; ERROR: relation "dist_table" does not exist master_remove_node - + starting permutation: s1-add-node-2 s2-begin s2-create-table-2 s1-remove-node-2 s2-commit s2-select -node_name node_port +node_name node_port -localhost 57637 -step s1-add-node-2: +localhost 57637 +step s1-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-begin: +1 +step s2-begin: BEGIN; -step s2-create-table-2: +step s2-create-table-2: SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 2; CREATE TABLE dist_table (x int, y int); @@ -385,83 +385,83 @@ step s2-create-table-2: create_distributed_table - -step s1-remove-node-2: + +step s1-remove-node-2: SELECT * FROM master_remove_node('localhost', 57638); -step s2-commit: +step s2-commit: COMMIT; step s1-remove-node-2: <... completed> error in steps s2-commit s1-remove-node-2: ERROR: you cannot remove the primary node of a node group which has shard placements -step s2-select: +step s2-select: SELECT * FROM dist_table; -x y +x y master_remove_node - - + + starting permutation: s1-add-node-2 s1-begin s1-remove-node-2 s2-create-append-table s1-commit s2-select -node_name node_port +node_name node_port -localhost 57637 -step s1-add-node-2: +localhost 57637 +step s1-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-remove-node-2: +step s1-remove-node-2: SELECT * FROM master_remove_node('localhost', 57638); master_remove_node - -step s2-create-append-table: + +step s2-create-append-table: SET citus.shard_replication_factor TO 1; CREATE TABLE dist_table (x int, y int); SELECT create_distributed_table('dist_table', 'x', 'append'); SELECT 1 FROM master_create_empty_shard('dist_table'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-append-table: <... completed> create_distributed_table - -?column? -1 -step s2-select: +?column? + +1 +step s2-select: SELECT * FROM dist_table; -x y +x y master_remove_node - + starting permutation: s1-add-node-2 s2-begin s2-create-append-table s1-remove-node-2 s2-commit s2-select -node_name node_port +node_name node_port -localhost 57637 -step s1-add-node-2: +localhost 57637 +step s1-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-begin: +1 +step s2-begin: BEGIN; -step s2-create-append-table: +step s2-create-append-table: SET citus.shard_replication_factor TO 1; CREATE TABLE dist_table (x int, y int); SELECT create_distributed_table('dist_table', 'x', 'append'); @@ -469,25 +469,25 @@ step s2-create-append-table: create_distributed_table - -?column? -1 -step s1-remove-node-2: +?column? + +1 +step s1-remove-node-2: SELECT * FROM master_remove_node('localhost', 57638); -step s2-commit: +step s2-commit: COMMIT; step s1-remove-node-2: <... completed> master_remove_node - -step s2-select: + +step s2-select: SELECT * FROM dist_table; -x y +x y master_remove_node - + diff --git a/src/test/regress/expected/isolation_data_migration.out b/src/test/regress/expected/isolation_data_migration.out index b77878c0f..fc72167db 100644 --- a/src/test/regress/expected/isolation_data_migration.out +++ b/src/test/regress/expected/isolation_data_migration.out @@ -1,151 +1,151 @@ Parsed test spec with 2 sessions starting permutation: s2-begin s2-copy s1-create_distributed_table s2-commit s2-select -step s2-begin: +step s2-begin: BEGIN; -step s2-copy: +step s2-copy: COPY migration_table FROM PROGRAM 'echo 1,hello' WITH CSV; -step s1-create_distributed_table: +step s1-create_distributed_table: SELECT create_distributed_table('migration_table', 'test_id'); -step s2-commit: +step s2-commit: COMMIT; step s1-create_distributed_table: <... completed> create_distributed_table - -step s2-select: + +step s2-select: SELECT * FROM migration_table ORDER BY test_id; -test_id data +test_id data -1 hello +1 hello starting permutation: s1-begin s1-create_distributed_table s2-copy s1-commit s2-select -step s1-begin: +step s1-begin: BEGIN; -step s1-create_distributed_table: +step s1-create_distributed_table: SELECT create_distributed_table('migration_table', 'test_id'); create_distributed_table - -step s2-copy: + +step s2-copy: COPY migration_table FROM PROGRAM 'echo 1,hello' WITH CSV; -step s1-commit: +step s1-commit: COMMIT; step s2-copy: <... completed> -step s2-select: +step s2-select: SELECT * FROM migration_table ORDER BY test_id; -test_id data +test_id data -1 hello +1 hello starting permutation: s2-begin s2-insert s1-create_distributed_table s2-commit s2-select -step s2-begin: +step s2-begin: BEGIN; -step s2-insert: +step s2-insert: INSERT INTO migration_table VALUES (1, 'hello'); -step s1-create_distributed_table: +step s1-create_distributed_table: SELECT create_distributed_table('migration_table', 'test_id'); -step s2-commit: +step s2-commit: COMMIT; step s1-create_distributed_table: <... completed> create_distributed_table - -step s2-select: + +step s2-select: SELECT * FROM migration_table ORDER BY test_id; -test_id data +test_id data -1 hello +1 hello starting permutation: s1-begin s1-create_distributed_table s2-insert s1-commit s2-select -step s1-begin: +step s1-begin: BEGIN; -step s1-create_distributed_table: +step s1-create_distributed_table: SELECT create_distributed_table('migration_table', 'test_id'); create_distributed_table - -step s2-insert: + +step s2-insert: INSERT INTO migration_table VALUES (1, 'hello'); -step s1-commit: +step s1-commit: COMMIT; step s2-insert: <... completed> -step s2-select: +step s2-select: SELECT * FROM migration_table ORDER BY test_id; -test_id data +test_id data -1 hello +1 hello starting permutation: s1-begin-serializable s2-copy s1-create_distributed_table s1-commit s2-select -step s1-begin-serializable: +step s1-begin-serializable: BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; SELECT 1; -?column? +?column? -1 -step s2-copy: +1 +step s2-copy: COPY migration_table FROM PROGRAM 'echo 1,hello' WITH CSV; -step s1-create_distributed_table: +step s1-create_distributed_table: SELECT create_distributed_table('migration_table', 'test_id'); create_distributed_table - -step s1-commit: + +step s1-commit: COMMIT; -step s2-select: +step s2-select: SELECT * FROM migration_table ORDER BY test_id; -test_id data +test_id data -1 hello +1 hello starting permutation: s1-begin-serializable s2-insert s1-create_distributed_table s1-commit s2-select -step s1-begin-serializable: +step s1-begin-serializable: BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; SELECT 1; -?column? +?column? -1 -step s2-insert: +1 +step s2-insert: INSERT INTO migration_table VALUES (1, 'hello'); -step s1-create_distributed_table: +step s1-create_distributed_table: SELECT create_distributed_table('migration_table', 'test_id'); create_distributed_table - -step s1-commit: + +step s1-commit: COMMIT; -step s2-select: +step s2-select: SELECT * FROM migration_table ORDER BY test_id; -test_id data +test_id data -1 hello +1 hello diff --git a/src/test/regress/expected/isolation_ddl_vs_all.out b/src/test/regress/expected/isolation_ddl_vs_all.out index 31cfb46a5..fbf8677b0 100644 --- a/src/test/regress/expected/isolation_ddl_vs_all.out +++ b/src/test/regress/expected/isolation_ddl_vs_all.out @@ -3,7 +3,7 @@ Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-ddl-create-index s1-commit s2-commit s1-show-indexes create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -20,12 +20,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-ddl-create-index-concurrently s1-commit s1-show-indexes create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX ddl_hash_index ON ddl_hash(id); @@ -40,12 +40,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-ddl-add-column s1-commit s2-commit s1-show-indexes s1-show-columns create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -66,12 +66,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-ddl-rename-column s1-commit s2-commit s1-show-indexes s1-show-columns create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -92,12 +92,12 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-ddl-create-index s1-commit s2-commit s1-show-columns s1-show-indexes create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -118,12 +118,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-ddl-create-index-concurrently s1-commit s1-show-columns s1-show-indexes create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE ddl_hash ADD new_column_1 int DEFAULT 0; @@ -142,12 +142,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-ddl-add-column s1-commit s2-commit s1-show-columns create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -163,12 +163,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-ddl-rename-column s1-commit s2-commit s1-show-columns create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -184,12 +184,12 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-ddl-create-index s1-commit s2-commit s1-show-columns s1-show-indexes create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -210,12 +210,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-ddl-create-index-concurrently s1-commit s1-show-columns s1-show-indexes create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE ddl_hash RENAME data TO new_column; @@ -234,12 +234,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-ddl-add-column s1-commit s2-commit s1-show-columns create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -255,12 +255,12 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-ddl-rename-column s1-commit s2-commit s1-show-columns create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -277,12 +277,12 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-table-size s1-commit s2-commit s1-show-indexes create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -290,7 +290,7 @@ step s1-ddl-create-index: CREATE INDEX ddl_hash_index ON ddl_hash(id); step s2-table-size: SELECT citus_total_relation_size('ddl_hash'); citus_total_relation_size -57344 +57344 step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); @@ -300,12 +300,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-master-modify-multiple-shards s1-commit s2-commit s1-show-indexes create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -321,12 +321,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-ddl-create-index s2-distribute-table s1-commit s2-commit s1-show-indexes create_distributed_table - + step s1-drop: DROP TABLE ddl_hash; step s1-create-non-distributed-table: CREATE TABLE ddl_hash(id integer, data text); COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; @@ -338,7 +338,7 @@ step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table - + step s2-commit: COMMIT; step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); run_command_on_workers @@ -347,12 +347,12 @@ run_command_on_workers (localhost,57638,t,4) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-table-size s1-commit s2-commit s1-show-columns create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -362,7 +362,7 @@ step s1-commit: COMMIT; step s2-table-size: <... completed> citus_total_relation_size -57344 +57344 step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -371,12 +371,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-master-modify-multiple-shards s1-commit s2-commit s1-show-columns create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -392,12 +392,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-ddl-add-column s2-distribute-table s1-commit s2-commit s1-show-columns create_distributed_table - + step s1-drop: DROP TABLE ddl_hash; step s1-create-non-distributed-table: CREATE TABLE ddl_hash(id integer, data text); COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; @@ -409,7 +409,7 @@ step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table - + step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -418,12 +418,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-table-size s1-commit s2-commit s1-show-columns create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -433,7 +433,7 @@ step s1-commit: COMMIT; step s2-table-size: <... completed> citus_total_relation_size -57344 +57344 step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -442,12 +442,12 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-master-modify-multiple-shards s1-commit s2-commit s1-show-columns create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -463,12 +463,12 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-distribute-table s1-commit s2-commit s1-show-columns create_distributed_table - + step s1-drop: DROP TABLE ddl_hash; step s1-create-non-distributed-table: CREATE TABLE ddl_hash(id integer, data text); COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; @@ -480,7 +480,7 @@ step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table - + step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -489,19 +489,19 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-ddl-create-index s1-commit s2-commit s1-show-indexes create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('ddl_hash'); citus_total_relation_size -57344 +57344 step s2-ddl-create-index: CREATE INDEX ddl_hash_index ON ddl_hash(id); step s1-commit: COMMIT; step s2-commit: COMMIT; @@ -512,12 +512,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-ddl-create-index s1-commit s2-commit s1-show-indexes create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -533,12 +533,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-ddl-create-index s1-commit s2-commit s1-show-indexes create_distributed_table - + step s1-drop: DROP TABLE ddl_hash; step s1-create-non-distributed-table: CREATE TABLE ddl_hash(id integer, data text); COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; @@ -547,7 +547,7 @@ step s2-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('ddl_hash', 'id'); create_distributed_table - + step s2-ddl-create-index: CREATE INDEX ddl_hash_index ON ddl_hash(id); step s1-commit: COMMIT; step s2-ddl-create-index: <... completed> @@ -559,18 +559,18 @@ run_command_on_workers (localhost,57638,t,4) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s1-table-size s2-ddl-create-index-concurrently s1-commit s1-show-indexes create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('ddl_hash'); citus_total_relation_size -57344 +57344 step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY ddl_hash_index ON ddl_hash(id); step s1-commit: COMMIT; step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); @@ -580,12 +580,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-ddl-create-index-concurrently s1-commit s1-show-indexes create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-master-modify-multiple-shards: DELETE FROM ddl_hash; @@ -599,12 +599,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-distribute-table s2-ddl-create-index-concurrently s1-commit s1-show-indexes create_distributed_table - + step s1-drop: DROP TABLE ddl_hash; step s1-create-non-distributed-table: CREATE TABLE ddl_hash(id integer, data text); COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; @@ -612,7 +612,7 @@ step s1-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('ddl_hash', 'id'); create_distributed_table - + step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY ddl_hash_index ON ddl_hash(id); step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> @@ -623,19 +623,19 @@ run_command_on_workers (localhost,57638,t,4) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-ddl-add-column s1-commit s2-commit s1-show-columns create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('ddl_hash'); citus_total_relation_size -57344 +57344 step s2-ddl-add-column: ALTER TABLE ddl_hash ADD new_column_2 int DEFAULT 0; step s1-commit: COMMIT; step s2-commit: COMMIT; @@ -646,12 +646,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-ddl-add-column s1-commit s2-commit s1-show-columns create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -667,12 +667,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-ddl-add-column s1-commit s2-commit s1-show-columns create_distributed_table - + step s1-drop: DROP TABLE ddl_hash; step s1-create-non-distributed-table: CREATE TABLE ddl_hash(id integer, data text); COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; @@ -681,7 +681,7 @@ step s2-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('ddl_hash', 'id'); create_distributed_table - + step s2-ddl-add-column: ALTER TABLE ddl_hash ADD new_column_2 int DEFAULT 0; step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> @@ -693,19 +693,19 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-ddl-rename-column s1-commit s2-commit s1-show-columns create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('ddl_hash'); citus_total_relation_size -57344 +57344 step s2-ddl-rename-column: ALTER TABLE ddl_hash RENAME data TO new_column; step s1-commit: COMMIT; step s2-commit: COMMIT; @@ -716,12 +716,12 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-ddl-rename-column s1-commit s2-commit s1-show-columns create_distributed_table - + step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -737,12 +737,12 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-ddl-rename-column s1-commit s2-commit s1-show-columns create_distributed_table - + step s1-drop: DROP TABLE ddl_hash; step s1-create-non-distributed-table: CREATE TABLE ddl_hash(id integer, data text); COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; @@ -751,7 +751,7 @@ step s2-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('ddl_hash', 'id'); create_distributed_table - + step s2-ddl-rename-column: ALTER TABLE ddl_hash RENAME data TO new_column; step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> @@ -763,4 +763,4 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_delete_vs_all.out b/src/test/regress/expected/isolation_delete_vs_all.out index c3b62be11..849c8ca7e 100644 --- a/src/test/regress/expected/isolation_delete_vs_all.out +++ b/src/test/regress/expected/isolation_delete_vs_all.out @@ -3,7 +3,7 @@ Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-delete s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -13,17 +13,17 @@ step s1-commit: COMMIT; step s2-delete: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; -count +count -4 +4 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-truncate s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -33,17 +33,17 @@ step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-drop s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -56,12 +56,12 @@ step s1-select-count: SELECT COUNT(*) FROM delete_hash; ERROR: relation "delete_hash" does not exist restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-ddl-create-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -71,9 +71,9 @@ step s1-commit: COMMIT; step s2-ddl-create-index: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; -count +count -4 +4 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''delete_hash%'''); run_command_on_workers @@ -81,12 +81,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-delete s2-ddl-drop-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX delete_hash_index ON delete_hash(id); step s1-begin: BEGIN; @@ -97,9 +97,9 @@ step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; -count +count -4 +4 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''delete_hash%'''); run_command_on_workers @@ -107,12 +107,12 @@ run_command_on_workers (localhost,57638,t,0) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s1-delete s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-delete: DELETE FROM delete_hash WHERE id = 4; @@ -120,9 +120,9 @@ step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY delete_hash_ind step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> step s1-select-count: SELECT COUNT(*) FROM delete_hash; -count +count -4 +4 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''delete_hash%'''); run_command_on_workers @@ -130,12 +130,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-ddl-add-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -145,9 +145,9 @@ step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; -count +count -4 +4 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -155,12 +155,12 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-delete s2-ddl-drop-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE delete_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -171,9 +171,9 @@ step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; -count +count -4 +4 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -181,12 +181,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-ddl-rename-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -196,9 +196,9 @@ step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; -count +count -4 +4 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -206,12 +206,12 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-table-size s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -219,21 +219,21 @@ step s1-delete: DELETE FROM delete_hash WHERE id = 4; step s2-table-size: SELECT citus_total_relation_size('delete_hash'); citus_total_relation_size -57344 +57344 step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; -count +count -4 +4 restore_isolation_tester_func - + starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-delete s2-distribute-table s1-commit s2-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE delete_hash; step s1-create-non-distributed-table: CREATE TABLE delete_hash(id integer, data text); COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; @@ -245,20 +245,20 @@ step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table - + step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; -count +count -8 +8 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-delete s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -268,17 +268,17 @@ step s1-commit: COMMIT; step s2-delete: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-delete s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -292,12 +292,12 @@ step s1-select-count: SELECT COUNT(*) FROM delete_hash; ERROR: relation "delete_hash" does not exist restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-delete s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -307,9 +307,9 @@ step s1-commit: COMMIT; step s2-delete: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; -count +count -4 +4 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''delete_hash%'''); run_command_on_workers @@ -317,12 +317,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-ddl-drop-index s2-delete s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX delete_hash_index ON delete_hash(id); step s1-begin: BEGIN; @@ -333,9 +333,9 @@ step s1-commit: COMMIT; step s2-delete: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; -count +count -4 +4 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''delete_hash%'''); run_command_on_workers @@ -343,12 +343,12 @@ run_command_on_workers (localhost,57638,t,0) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-delete s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -358,9 +358,9 @@ step s1-commit: COMMIT; step s2-delete: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; -count +count -4 +4 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -368,12 +368,12 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-ddl-drop-column s2-delete s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE delete_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -384,9 +384,9 @@ step s1-commit: COMMIT; step s2-delete: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; -count +count -4 +4 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -394,12 +394,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-delete s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -409,9 +409,9 @@ step s1-commit: COMMIT; step s2-delete: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; -count +count -4 +4 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -419,34 +419,34 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-delete s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('delete_hash'); citus_total_relation_size -57344 +57344 step s2-delete: DELETE FROM delete_hash WHERE id = 4; step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; -count +count -4 +4 restore_isolation_tester_func - + starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-delete s1-commit s2-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE delete_hash; step s1-create-non-distributed-table: CREATE TABLE delete_hash(id integer, data text); COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; @@ -455,15 +455,15 @@ step s2-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('delete_hash', 'id'); create_distributed_table - + step s2-delete: DELETE FROM delete_hash WHERE id = 4; step s1-commit: COMMIT; step s2-delete: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; -count +count -8 +8 restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_dis2ref_foreign_keys_on_mx.out b/src/test/regress/expected/isolation_dis2ref_foreign_keys_on_mx.out index 403e948f6..22ca208ae 100644 --- a/src/test/regress/expected/isolation_dis2ref_foreign_keys_on_mx.out +++ b/src/test/regress/expected/isolation_dis2ref_foreign_keys_on_mx.out @@ -1,544 +1,544 @@ Parsed test spec with 3 sessions starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-insert s1-rollback-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s1-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-delete: + +step s1-delete: SELECT run_commands_on_session_level_connection_to_node('DELETE FROM ref_table WHERE id=1'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert: + +step s2-insert: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table VALUES (1, 1)'); -step s1-rollback-worker: +step s1-rollback-worker: SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); run_commands_on_session_level_connection_to_node - + step s2-insert: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-display: + +step s3-display: SELECT * FROM ref_table ORDER BY id, value; SELECT * FROM dist_table ORDER BY id, value; -id value +id value -1 10 -2 20 -id value +1 10 +2 20 +id value -1 1 -1 1 -2 2 +1 1 +1 1 +2 2 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s1-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-delete: + +step s1-delete: SELECT run_commands_on_session_level_connection_to_node('DELETE FROM ref_table WHERE id=1'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-select: + +step s2-select: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM dist_table WHERE id=1'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-display: + +step s3-display: SELECT * FROM ref_table ORDER BY id, value; SELECT * FROM dist_table ORDER BY id, value; -id value +id value -2 20 -id value +2 20 +id value -2 2 +2 2 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-insert-select s1-rollback-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s1-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-delete: + +step s1-delete: SELECT run_commands_on_session_level_connection_to_node('DELETE FROM ref_table WHERE id=1'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert-select: + +step s2-insert-select: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT * FROM dist_table'); -step s1-rollback-worker: +step s1-rollback-worker: SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); run_commands_on_session_level_connection_to_node - + step s2-insert-select: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-display: + +step s3-display: SELECT * FROM ref_table ORDER BY id, value; SELECT * FROM dist_table ORDER BY id, value; -id value +id value -1 10 -2 20 -id value +1 10 +2 20 +id value -1 1 -1 1 -2 2 -2 2 +1 1 +1 1 +2 2 +2 2 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update s2-start-session-level-connection s2-begin-on-worker s2-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s1-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-update: + +step s1-update: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET id=id+2 WHERE id=1'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update: + +step s2-update: SELECT run_commands_on_session_level_connection_to_node('UPDATE dist_table SET value=2 WHERE id=1'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-update: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-display: + +step s3-display: SELECT * FROM ref_table ORDER BY id, value; SELECT * FROM dist_table ORDER BY id, value; -id value +id value -2 20 -3 10 -id value +2 20 +3 10 +id value -1 2 -2 2 +1 2 +2 2 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update s2-start-session-level-connection s2-begin-on-worker s2-copy s1-rollback-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s1-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-update: + +step s1-update: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET id=id+2 WHERE id=1'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-copy: + +step s2-copy: SELECT run_commands_on_session_level_connection_to_node('COPY dist_table FROM PROGRAM ''echo 1, 1''WITH CSV'); -step s1-rollback-worker: +step s1-rollback-worker: SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); run_commands_on_session_level_connection_to_node - + step s2-copy: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-display: + +step s3-display: SELECT * FROM ref_table ORDER BY id, value; SELECT * FROM dist_table ORDER BY id, value; -id value +id value -1 10 -2 20 -id value +1 10 +2 20 +id value -1 1 -1 1 -2 2 +1 1 +1 1 +2 2 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s1-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-update: + +step s1-update: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET id=id+2 WHERE id=1'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-truncate: + +step s2-truncate: SELECT run_commands_on_session_level_connection_to_node('TRUNCATE dist_table'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-truncate: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-display: + +step s3-display: SELECT * FROM ref_table ORDER BY id, value; SELECT * FROM dist_table ORDER BY id, value; -id value +id value -2 20 -3 10 -id value +2 20 +3 10 +id value restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-select-for-udpate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s1-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-delete: + +step s1-delete: SELECT run_commands_on_session_level_connection_to_node('DELETE FROM ref_table WHERE id=1'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-select-for-udpate: + +step s2-select-for-udpate: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM dist_table WHERE id=1 FOR UPDATE'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-select-for-udpate: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-display: + +step s3-display: SELECT * FROM ref_table ORDER BY id, value; SELECT * FROM dist_table ORDER BY id, value; -id value +id value -2 20 -id value +2 20 +id value -2 2 +2 2 restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_distributed_deadlock_detection.out b/src/test/regress/expected/isolation_distributed_deadlock_detection.out index 182ad05c1..ad08e5ff7 100644 --- a/src/test/regress/expected/isolation_distributed_deadlock_detection.out +++ b/src/test/regress/expected/isolation_distributed_deadlock_detection.out @@ -1,958 +1,958 @@ Parsed test spec with 7 sessions starting permutation: s1-begin s2-begin s1-update-1 s2-update-2 s2-update-1 deadlock-checker-call s1-update-2 deadlock-checker-call s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-update-1: +step s1-update-1: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; -step s2-update-2: +step s2-update-2: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2; -step s2-update-1: +step s2-update-1: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 1; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -f -step s1-update-2: +f +step s1-update-2: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 2; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s2-update-1: <... completed> step s1-update-2: <... completed> error in steps deadlock-checker-call s2-update-1 s1-update-2: ERROR: canceling the transaction since it was involved in a distributed deadlock -step s1-commit: +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s1-update-1-rep-2 s2-update-2-rep-2 s2-update-1-rep-2 deadlock-checker-call s1-update-2-rep-2 deadlock-checker-call s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-update-1-rep-2: +step s1-update-1-rep-2: UPDATE deadlock_detection_test_rep_2 SET some_val = 1 WHERE user_id = 1; -step s2-update-2-rep-2: +step s2-update-2-rep-2: UPDATE deadlock_detection_test_rep_2 SET some_val = 1 WHERE user_id = 2; -step s2-update-1-rep-2: +step s2-update-1-rep-2: UPDATE deadlock_detection_test_rep_2 SET some_val = 1 WHERE user_id = 1; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -f -step s1-update-2-rep-2: +f +step s1-update-2-rep-2: UPDATE deadlock_detection_test_rep_2 SET some_val = 1 WHERE user_id = 2; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s2-update-1-rep-2: <... completed> step s1-update-2-rep-2: <... completed> error in steps deadlock-checker-call s2-update-1-rep-2 s1-update-2-rep-2: ERROR: canceling the transaction since it was involved in a distributed deadlock -step s1-commit: +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s1-set-2pc s2-set-2pc s1-update-1 s2-update-2 s2-update-1 deadlock-checker-call s1-update-2 deadlock-checker-call s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-set-2pc: +step s1-set-2pc: set citus.multi_shard_commit_protocol TO '2pc'; -step s2-set-2pc: +step s2-set-2pc: set citus.multi_shard_commit_protocol TO '2pc'; -step s1-update-1: +step s1-update-1: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; -step s2-update-2: +step s2-update-2: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2; -step s2-update-1: +step s2-update-1: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 1; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -f -step s1-update-2: +f +step s1-update-2: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 2; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s2-update-1: <... completed> step s1-update-2: <... completed> error in steps deadlock-checker-call s2-update-1 s1-update-2: ERROR: canceling the transaction since it was involved in a distributed deadlock -step s1-commit: +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s1-update-1 s2-update-2 s1-update-2 deadlock-checker-call s2-upsert-select-all deadlock-checker-call s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-update-1: +step s1-update-1: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; -step s2-update-2: +step s2-update-2: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2; -step s1-update-2: +step s1-update-2: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 2; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -f -step s2-upsert-select-all: +f +step s2-upsert-select-all: INSERT INTO deadlock_detection_test SELECT * FROM deadlock_detection_test ON CONFLICT(user_id) DO UPDATE SET some_val = deadlock_detection_test.some_val + 5 RETURNING *; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s1-update-2: <... completed> step s2-upsert-select-all: <... completed> error in steps deadlock-checker-call s1-update-2 s2-upsert-select-all: ERROR: canceling the transaction since it was involved in a distributed deadlock -step s1-commit: +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s1-update-1 s2-update-2 s1-update-2 deadlock-checker-call s2-ddl deadlock-checker-call s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-update-1: +step s1-update-1: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; -step s2-update-2: +step s2-update-2: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2; -step s1-update-2: +step s1-update-2: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 2; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -f -step s2-ddl: +f +step s2-ddl: ALTER TABLE deadlock_detection_test ADD COLUMN test_col INT; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s1-update-2: <... completed> step s2-ddl: <... completed> error in steps deadlock-checker-call s1-update-2 s2-ddl: ERROR: canceling the transaction since it was involved in a distributed deadlock -step s1-commit: +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s1-insert-dist-10 s2-insert-local-10 s2-insert-dist-10 s1-insert-local-10 deadlock-checker-call s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-insert-dist-10: +step s1-insert-dist-10: INSERT INTO deadlock_detection_test VALUES (10, 10); -step s2-insert-local-10: +step s2-insert-local-10: INSERT INTO local_deadlock_table VALUES (10, 10); -step s2-insert-dist-10: +step s2-insert-dist-10: INSERT INTO deadlock_detection_test VALUES (10, 10); -step s1-insert-local-10: +step s1-insert-local-10: INSERT INTO local_deadlock_table VALUES (10, 10); -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s2-insert-dist-10: <... completed> step s1-insert-local-10: <... completed> error in steps deadlock-checker-call s2-insert-dist-10 s1-insert-local-10: ERROR: canceling the transaction since it was involved in a distributed deadlock -step s1-commit: +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s2-insert-ref-10 s1-insert-ref-11 s2-insert-ref-11 s1-insert-ref-10 deadlock-checker-call s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-insert-ref-10: +step s2-insert-ref-10: INSERT INTO deadlock_detection_reference VALUES (10, 10); -step s1-insert-ref-11: +step s1-insert-ref-11: INSERT INTO deadlock_detection_reference VALUES (11, 11); -step s2-insert-ref-11: +step s2-insert-ref-11: INSERT INTO deadlock_detection_reference VALUES (11, 11); -step s1-insert-ref-10: +step s1-insert-ref-10: INSERT INTO deadlock_detection_reference VALUES (10, 10); -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s2-insert-ref-11: <... completed> step s1-insert-ref-10: <... completed> error in steps deadlock-checker-call s2-insert-ref-11 s1-insert-ref-10: ERROR: canceling the transaction since it was involved in a distributed deadlock -step s1-commit: +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s2-insert-ref-10 s1-update-1 deadlock-checker-call s2-update-1 s1-insert-ref-10 deadlock-checker-call s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-insert-ref-10: +step s2-insert-ref-10: INSERT INTO deadlock_detection_reference VALUES (10, 10); -step s1-update-1: +step s1-update-1: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -f -step s2-update-1: +f +step s2-update-1: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 1; -step s1-insert-ref-10: +step s1-insert-ref-10: INSERT INTO deadlock_detection_reference VALUES (10, 10); -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s2-update-1: <... completed> step s1-insert-ref-10: <... completed> error in steps deadlock-checker-call s2-update-1 s1-insert-ref-10: ERROR: canceling the transaction since it was involved in a distributed deadlock -step s1-commit: +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s3-begin s1-update-1 s2-update-2 s3-update-3 deadlock-checker-call s1-update-2 s2-update-3 s3-update-1 deadlock-checker-call s3-commit s2-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s3-begin: +step s3-begin: BEGIN; -step s1-update-1: +step s1-update-1: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; -step s2-update-2: +step s2-update-2: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2; -step s3-update-3: +step s3-update-3: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 3; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -f -step s1-update-2: +f +step s1-update-2: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 2; -step s2-update-3: +step s2-update-3: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 3; -step s3-update-1: +step s3-update-1: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 1; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s2-update-3: <... completed> step s3-update-1: <... completed> error in steps deadlock-checker-call s2-update-3 s3-update-1: ERROR: canceling the transaction since it was involved in a distributed deadlock -step s3-commit: +step s3-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; step s1-update-2: <... completed> -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-begin s2-begin s3-begin s2-update-1 s1-update-1 s2-update-2 s3-update-3 s3-update-2 deadlock-checker-call s2-update-3 deadlock-checker-call s3-commit s2-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s3-begin: +step s3-begin: BEGIN; -step s2-update-1: +step s2-update-1: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 1; -step s1-update-1: +step s1-update-1: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; -step s2-update-2: +step s2-update-2: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2; -step s3-update-3: +step s3-update-3: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 3; -step s3-update-2: +step s3-update-2: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 2; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -f -step s2-update-3: +f +step s2-update-3: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 3; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s3-update-2: <... completed> step s2-update-3: <... completed> error in steps deadlock-checker-call s3-update-2 s2-update-3: ERROR: canceling the transaction since it was involved in a distributed deadlock -step s3-commit: +step s3-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; step s1-update-1: <... completed> -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-begin s2-begin s3-begin s4-begin s1-update-1 s2-update-2 s3-update-3 s3-update-2 deadlock-checker-call s4-update-4 s2-update-3 deadlock-checker-call s3-commit s2-commit s1-commit s4-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s3-begin: +step s3-begin: BEGIN; -step s4-begin: +step s4-begin: BEGIN; -step s1-update-1: +step s1-update-1: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; -step s2-update-2: +step s2-update-2: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2; -step s3-update-3: +step s3-update-3: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 3; -step s3-update-2: +step s3-update-2: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 2; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -f -step s4-update-4: +f +step s4-update-4: UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 4; -step s2-update-3: +step s2-update-3: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 3; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s3-update-2: <... completed> step s2-update-3: <... completed> error in steps deadlock-checker-call s3-update-2 s2-update-3: ERROR: canceling the transaction since it was involved in a distributed deadlock -step s3-commit: +step s3-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; -step s1-commit: +step s1-commit: COMMIT; -step s4-commit: +step s4-commit: COMMIT; starting permutation: s1-begin s2-begin s3-begin s4-begin s4-update-1 s1-update-1 deadlock-checker-call s2-update-2 s3-update-3 s2-update-3 s3-update-2 deadlock-checker-call s3-commit s2-commit s4-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s3-begin: +step s3-begin: BEGIN; -step s4-begin: +step s4-begin: BEGIN; -step s4-update-1: +step s4-update-1: UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 1; -step s1-update-1: +step s1-update-1: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -f -step s2-update-2: +f +step s2-update-2: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2; -step s3-update-3: +step s3-update-3: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 3; -step s2-update-3: +step s2-update-3: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 3; -step s3-update-2: +step s3-update-2: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 2; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s2-update-3: <... completed> step s3-update-2: <... completed> error in steps deadlock-checker-call s2-update-3 s3-update-2: ERROR: canceling the transaction since it was involved in a distributed deadlock -step s3-commit: +step s3-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; -step s4-commit: +step s4-commit: COMMIT; step s1-update-1: <... completed> -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-begin s2-begin s3-begin s4-begin s1-update-1 s4-update-4 s2-update-2 s3-update-3 s3-update-2 s4-update-1 s1-update-4 deadlock-checker-call s1-commit s4-commit s2-update-3 deadlock-checker-call s2-commit s3-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s3-begin: +step s3-begin: BEGIN; -step s4-begin: +step s4-begin: BEGIN; -step s1-update-1: +step s1-update-1: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; -step s4-update-4: +step s4-update-4: UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 4; -step s2-update-2: +step s2-update-2: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2; -step s3-update-3: +step s3-update-3: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 3; -step s3-update-2: +step s3-update-2: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 2; -step s4-update-1: +step s4-update-1: UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 1; -step s1-update-4: +step s1-update-4: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 4; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s4-update-1: <... completed> step s1-update-4: <... completed> error in steps deadlock-checker-call s4-update-1 s1-update-4: ERROR: canceling the transaction since it was involved in a distributed deadlock -step s1-commit: +step s1-commit: COMMIT; -step s4-commit: +step s4-commit: COMMIT; -step s2-update-3: +step s2-update-3: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 3; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s3-update-2: <... completed> step s2-update-3: <... completed> error in steps deadlock-checker-call s3-update-2 s2-update-3: ERROR: canceling the transaction since it was involved in a distributed deadlock -step s2-commit: +step s2-commit: COMMIT; -step s3-commit: +step s3-commit: COMMIT; starting permutation: s1-begin s2-begin s3-begin s4-begin s5-begin s6-begin s1-update-1 s5-update-5 s3-update-2 s2-update-3 s4-update-4 s3-update-4 deadlock-checker-call s6-update-6 s4-update-6 s1-update-5 s5-update-1 deadlock-checker-call s1-commit s5-commit s6-commit s4-commit s3-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s3-begin: +step s3-begin: BEGIN; -step s4-begin: +step s4-begin: BEGIN; -step s5-begin: +step s5-begin: BEGIN; -step s6-begin: +step s6-begin: BEGIN; -step s1-update-1: +step s1-update-1: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; -step s5-update-5: +step s5-update-5: UPDATE deadlock_detection_test SET some_val = 5 WHERE user_id = 5; -step s3-update-2: +step s3-update-2: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 2; -step s2-update-3: +step s2-update-3: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 3; -step s4-update-4: +step s4-update-4: UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 4; -step s3-update-4: +step s3-update-4: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 4; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -f -step s6-update-6: +f +step s6-update-6: UPDATE deadlock_detection_test SET some_val = 6 WHERE user_id = 6; -step s4-update-6: +step s4-update-6: UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 6; -step s1-update-5: +step s1-update-5: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 5; -step s5-update-1: +step s5-update-1: UPDATE deadlock_detection_test SET some_val = 5 WHERE user_id = 1; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s1-update-5: <... completed> step s5-update-1: <... completed> error in steps deadlock-checker-call s1-update-5 s5-update-1: ERROR: canceling the transaction since it was involved in a distributed deadlock -step s1-commit: +step s1-commit: COMMIT; -step s5-commit: +step s5-commit: COMMIT; -step s6-commit: +step s6-commit: COMMIT; step s4-update-6: <... completed> -step s4-commit: +step s4-commit: COMMIT; step s3-update-4: <... completed> -step s3-commit: +step s3-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s3-begin s4-begin s5-begin s6-begin s6-update-6 s5-update-5 s5-update-6 s4-update-4 s1-update-4 s4-update-5 deadlock-checker-call s2-update-3 s3-update-2 s2-update-2 s3-update-3 deadlock-checker-call s6-commit s5-commit s4-commit s1-commit s3-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s3-begin: +step s3-begin: BEGIN; -step s4-begin: +step s4-begin: BEGIN; -step s5-begin: +step s5-begin: BEGIN; -step s6-begin: +step s6-begin: BEGIN; -step s6-update-6: +step s6-update-6: UPDATE deadlock_detection_test SET some_val = 6 WHERE user_id = 6; -step s5-update-5: +step s5-update-5: UPDATE deadlock_detection_test SET some_val = 5 WHERE user_id = 5; -step s5-update-6: +step s5-update-6: UPDATE deadlock_detection_test SET some_val = 5 WHERE user_id = 6; -step s4-update-4: +step s4-update-4: UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 4; -step s1-update-4: +step s1-update-4: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 4; -step s4-update-5: +step s4-update-5: UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 5; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -f -step s2-update-3: +f +step s2-update-3: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 3; -step s3-update-2: +step s3-update-2: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 2; -step s2-update-2: +step s2-update-2: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2; -step s3-update-3: +step s3-update-3: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 3; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s2-update-2: <... completed> step s3-update-3: <... completed> error in steps deadlock-checker-call s2-update-2 s3-update-3: ERROR: canceling the transaction since it was involved in a distributed deadlock -step s6-commit: +step s6-commit: COMMIT; step s5-update-6: <... completed> -step s5-commit: +step s5-commit: COMMIT; step s4-update-5: <... completed> -step s4-commit: +step s4-commit: COMMIT; step s1-update-4: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s3-commit: +step s3-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s3-begin s4-begin s5-begin s6-begin s5-update-5 s3-update-2 s2-update-2 s4-update-4 s3-update-4 s4-update-5 s1-update-4 deadlock-checker-call s6-update-6 s5-update-6 s6-update-5 deadlock-checker-call s5-commit s6-commit s4-commit s3-commit s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s3-begin: +step s3-begin: BEGIN; -step s4-begin: +step s4-begin: BEGIN; -step s5-begin: +step s5-begin: BEGIN; -step s6-begin: +step s6-begin: BEGIN; -step s5-update-5: +step s5-update-5: UPDATE deadlock_detection_test SET some_val = 5 WHERE user_id = 5; -step s3-update-2: +step s3-update-2: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 2; -step s2-update-2: +step s2-update-2: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2; -step s4-update-4: +step s4-update-4: UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 4; -step s3-update-4: +step s3-update-4: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 4; -step s4-update-5: +step s4-update-5: UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 5; -step s1-update-4: +step s1-update-4: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 4; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -f -step s6-update-6: +f +step s6-update-6: UPDATE deadlock_detection_test SET some_val = 6 WHERE user_id = 6; -step s5-update-6: +step s5-update-6: UPDATE deadlock_detection_test SET some_val = 5 WHERE user_id = 6; -step s6-update-5: +step s6-update-5: UPDATE deadlock_detection_test SET some_val = 6 WHERE user_id = 5; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s5-update-6: <... completed> step s6-update-5: <... completed> error in steps deadlock-checker-call s5-update-6 s6-update-5: ERROR: canceling the transaction since it was involved in a distributed deadlock -step s5-commit: +step s5-commit: COMMIT; step s4-update-5: <... completed> -step s6-commit: +step s6-commit: COMMIT; -step s4-commit: +step s4-commit: COMMIT; step s3-update-4: <... completed> -step s3-commit: +step s3-commit: COMMIT; step s2-update-2: <... completed> step s1-update-4: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s3-begin s4-begin s5-begin s1-update-1 s3-update-3 s2-update-4 s2-update-3 s4-update-2 s5-random-adv-lock s4-random-adv-lock s3-update-1 s1-update-2-4 deadlock-checker-call deadlock-checker-call s5-commit s4-commit s2-commit s1-commit s3-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s3-begin: +step s3-begin: BEGIN; -step s4-begin: +step s4-begin: BEGIN; -step s5-begin: +step s5-begin: BEGIN; -step s1-update-1: +step s1-update-1: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; -step s3-update-3: +step s3-update-3: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 3; -step s2-update-4: +step s2-update-4: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 4; -step s2-update-3: +step s2-update-3: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 3; -step s4-update-2: +step s4-update-2: UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 2; -step s5-random-adv-lock: +step s5-random-adv-lock: SELECT pg_advisory_xact_lock(8765); pg_advisory_xact_lock - -step s4-random-adv-lock: + +step s4-random-adv-lock: SELECT pg_advisory_xact_lock(8765); -step s3-update-1: +step s3-update-1: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 1; -step s1-update-2-4: +step s1-update-2-4: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 2 OR user_id = 4; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s2-update-3: <... completed> error in steps deadlock-checker-call s2-update-3: ERROR: canceling the transaction since it was involved in a distributed deadlock -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -f -step s5-commit: +f +step s5-commit: COMMIT; step s4-random-adv-lock: <... completed> pg_advisory_xact_lock - -step s4-commit: + +step s4-commit: COMMIT; step s1-update-2-4: <... completed> -step s2-commit: +step s2-commit: COMMIT; -step s1-commit: +step s1-commit: COMMIT; step s3-update-1: <... completed> -step s3-commit: +step s3-commit: COMMIT; diff --git a/src/test/regress/expected/isolation_distributed_transaction_id.out b/src/test/regress/expected/isolation_distributed_transaction_id.out index cd6e9f130..8a9bfe565 100644 --- a/src/test/regress/expected/isolation_distributed_transaction_id.out +++ b/src/test/regress/expected/isolation_distributed_transaction_id.out @@ -1,63 +1,63 @@ Parsed test spec with 3 sessions starting permutation: s1-begin s1-assign-transaction-id s1-get-all-transactions s2-begin s2-assign-transaction-id s2-get-all-transactions s3-begin s3-assign-transaction-id s3-get-all-transactions s1-commit s2-commit s3-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-assign-transaction-id: +step s1-assign-transaction-id: SELECT assign_distributed_transaction_id(1, 1, '2015-01-01 00:00:00+0'); assign_distributed_transaction_id - -step s1-get-all-transactions: + +step s1-get-all-transactions: SELECT initiator_node_identifier, transaction_number, transaction_stamp FROM get_current_transaction_id() ORDER BY 1,2,3; initiator_node_identifiertransaction_numbertransaction_stamp 1 1 Wed Dec 31 16:00:00 2014 PST -step s2-begin: +step s2-begin: BEGIN; -step s2-assign-transaction-id: +step s2-assign-transaction-id: SELECT assign_distributed_transaction_id(2, 2, '2015-01-02 00:00:00+0'); assign_distributed_transaction_id - -step s2-get-all-transactions: + +step s2-get-all-transactions: SELECT initiator_node_identifier, transaction_number, transaction_stamp FROM get_current_transaction_id() ORDER BY 1,2,3; initiator_node_identifiertransaction_numbertransaction_stamp 2 2 Thu Jan 01 16:00:00 2015 PST -step s3-begin: +step s3-begin: BEGIN; -step s3-assign-transaction-id: +step s3-assign-transaction-id: SELECT assign_distributed_transaction_id(3, 3, '2015-01-03 00:00:00+0'); assign_distributed_transaction_id - -step s3-get-all-transactions: + +step s3-get-all-transactions: SELECT initiator_node_identifier, transaction_number, transaction_stamp FROM get_current_transaction_id() ORDER BY 1,2,3; initiator_node_identifiertransaction_numbertransaction_stamp 3 3 Fri Jan 02 16:00:00 2015 PST -step s1-commit: +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; -step s3-commit: +step s3-commit: COMMIT; starting permutation: s1-create-table s1-begin s1-insert s1-verify-current-xact-is-on-worker s1-commit -step s1-create-table: +step s1-create-table: -- some tests also use distributed table CREATE TABLE distributed_transaction_id_table(some_value int, other_value int); SET citus.shard_count TO 4; @@ -65,14 +65,14 @@ step s1-create-table: create_distributed_table - -step s1-begin: + +step s1-begin: BEGIN; -step s1-insert: +step s1-insert: INSERT INTO distributed_transaction_id_table VALUES (1, 1); -step s1-verify-current-xact-is-on-worker: +step s1-verify-current-xact-is-on-worker: SELECT remote.nodeport, remote.result = row(xact.initiator_node_identifier, xact.transaction_number)::text AS xact_exists @@ -84,39 +84,39 @@ step s1-verify-current-xact-is-on-worker: $$) as remote ORDER BY remote.nodeport ASC; -nodeport xact_exists +nodeport xact_exists -57637 t -57638 t -step s1-commit: +57637 t +57638 t +step s1-commit: COMMIT; starting permutation: s1-begin s1-assign-transaction-id s1-has-transaction-number s2-vacuum s1-has-transaction-number s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-assign-transaction-id: +step s1-assign-transaction-id: SELECT assign_distributed_transaction_id(1, 1, '2015-01-01 00:00:00+0'); assign_distributed_transaction_id - -step s1-has-transaction-number: + +step s1-has-transaction-number: SELECT transaction_number > 0 FROM get_current_transaction_id(); -?column? +?column? -t -step s2-vacuum: +t +step s2-vacuum: VACUUM FULL pg_dist_partition; -step s1-has-transaction-number: +step s1-has-transaction-number: SELECT transaction_number > 0 FROM get_current_transaction_id(); -?column? +?column? -t -step s1-commit: +t +step s1-commit: COMMIT; diff --git a/src/test/regress/expected/isolation_dml_vs_repair.out b/src/test/regress/expected/isolation_dml_vs_repair.out index 193a28897..499632c66 100644 --- a/src/test/regress/expected/isolation_dml_vs_repair.out +++ b/src/test/regress/expected/isolation_dml_vs_repair.out @@ -3,202 +3,202 @@ Parsed test spec with 2 sessions starting permutation: s2-invalidate-57637 s1-begin s1-insertone s2-repair s1-commit master_create_worker_shards - -step s2-invalidate-57637: + +step s2-invalidate-57637: UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637; -step s1-begin: +step s1-begin: BEGIN; -step s1-insertone: +step s1-insertone: INSERT INTO test_dml_vs_repair VALUES(1, 1); -step s2-repair: +step s2-repair: SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), 'localhost', 57638, 'localhost', 57637); -step s1-commit: +step s1-commit: COMMIT; step s2-repair: <... completed> master_copy_shard_placement - + starting permutation: s1-insertone s2-invalidate-57637 s1-begin s1-insertall s2-repair s1-commit master_create_worker_shards - -step s1-insertone: + +step s1-insertone: INSERT INTO test_dml_vs_repair VALUES(1, 1); -step s2-invalidate-57637: +step s2-invalidate-57637: UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637; -step s1-begin: +step s1-begin: BEGIN; -step s1-insertall: +step s1-insertall: INSERT INTO test_dml_vs_repair SELECT test_id, data+1 FROM test_dml_vs_repair; -step s2-repair: +step s2-repair: SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), 'localhost', 57638, 'localhost', 57637); -step s1-commit: +step s1-commit: COMMIT; step s2-repair: <... completed> master_copy_shard_placement - + starting permutation: s2-invalidate-57637 s2-begin s2-repair s1-insertone s2-commit s2-invalidate-57638 s1-display s2-invalidate-57637 s2-revalidate-57638 s1-display master_create_worker_shards - -step s2-invalidate-57637: + +step s2-invalidate-57637: UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637; -step s2-begin: +step s2-begin: BEGIN; -step s2-repair: +step s2-repair: SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), 'localhost', 57638, 'localhost', 57637); master_copy_shard_placement - -step s1-insertone: + +step s1-insertone: INSERT INTO test_dml_vs_repair VALUES(1, 1); -step s2-commit: +step s2-commit: COMMIT; step s1-insertone: <... completed> -step s2-invalidate-57638: +step s2-invalidate-57638: UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638; -step s1-display: +step s1-display: SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id; -test_id data +test_id data -1 1 -step s2-invalidate-57637: +1 1 +step s2-invalidate-57637: UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637; -step s2-revalidate-57638: +step s2-revalidate-57638: UPDATE pg_dist_shard_placement SET shardstate = '1' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638; -step s1-display: +step s1-display: SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id; -test_id data +test_id data -1 1 +1 1 starting permutation: s2-invalidate-57637 s1-prepared-insertone s2-begin s2-repair s1-prepared-insertone s2-commit s2-invalidate-57638 s1-display s2-invalidate-57637 s2-revalidate-57638 s1-display master_create_worker_shards - -step s2-invalidate-57637: + +step s2-invalidate-57637: UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637; -step s1-prepared-insertone: +step s1-prepared-insertone: EXECUTE insertone; -step s2-begin: +step s2-begin: BEGIN; -step s2-repair: +step s2-repair: SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), 'localhost', 57638, 'localhost', 57637); master_copy_shard_placement - -step s1-prepared-insertone: + +step s1-prepared-insertone: EXECUTE insertone; -step s2-commit: +step s2-commit: COMMIT; step s1-prepared-insertone: <... completed> -step s2-invalidate-57638: +step s2-invalidate-57638: UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638; -step s1-display: +step s1-display: SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id; -test_id data +test_id data -1 1 -1 1 -step s2-invalidate-57637: +1 1 +1 1 +step s2-invalidate-57637: UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637; -step s2-revalidate-57638: +step s2-revalidate-57638: UPDATE pg_dist_shard_placement SET shardstate = '1' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638; -step s1-display: +step s1-display: SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id; -test_id data +test_id data -1 1 -1 1 +1 1 +1 1 starting permutation: s2-invalidate-57637 s1-insertone s1-prepared-insertall s2-begin s2-repair s1-prepared-insertall s2-commit s2-invalidate-57638 s1-display s2-invalidate-57637 s2-revalidate-57638 s1-display master_create_worker_shards - -step s2-invalidate-57637: + +step s2-invalidate-57637: UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637; -step s1-insertone: +step s1-insertone: INSERT INTO test_dml_vs_repair VALUES(1, 1); -step s1-prepared-insertall: +step s1-prepared-insertall: EXECUTE insertall; -step s2-begin: +step s2-begin: BEGIN; -step s2-repair: +step s2-repair: SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), 'localhost', 57638, 'localhost', 57637); master_copy_shard_placement - -step s1-prepared-insertall: + +step s1-prepared-insertall: EXECUTE insertall; -step s2-commit: +step s2-commit: COMMIT; step s1-prepared-insertall: <... completed> -step s2-invalidate-57638: +step s2-invalidate-57638: UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638; -step s1-display: +step s1-display: SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id; -test_id data +test_id data -1 1 -1 2 -1 2 -1 3 -step s2-invalidate-57637: +1 1 +1 2 +1 2 +1 3 +step s2-invalidate-57637: UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637; -step s2-revalidate-57638: +step s2-revalidate-57638: UPDATE pg_dist_shard_placement SET shardstate = '1' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638; -step s1-display: +step s1-display: SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id; -test_id data +test_id data -1 1 -1 2 -1 2 -1 3 +1 1 +1 2 +1 2 +1 3 diff --git a/src/test/regress/expected/isolation_drop_alter_index_select_for_update_on_mx.out b/src/test/regress/expected/isolation_drop_alter_index_select_for_update_on_mx.out index 499ca18f2..10c988f33 100644 --- a/src/test/regress/expected/isolation_drop_alter_index_select_for_update_on_mx.out +++ b/src/test/regress/expected/isolation_drop_alter_index_select_for_update_on_mx.out @@ -1,218 +1,218 @@ Parsed test spec with 3 sessions starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-alter s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert: + +step s1-insert: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table VALUES(5, 55)'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-alter: + +step s2-alter: ALTER TABLE dist_table DROP value; -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-alter: <... completed> -step s2-commit-worker: +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM dist_table; -count +count -6 +6 restore_isolation_tester_func - + starting permutation: s1-begin s1-index s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit s2-commit-worker s2-stop-connection -step s1-begin: +step s1-begin: BEGIN; -step s1-index: +step s1-index: CREATE INDEX dist_table_index ON dist_table (id); -step s2-start-session-level-connection: +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-select-for-update: + +step s2-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM dist_table WHERE id = 5 FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s1-commit: + +step s1-commit: COMMIT; -step s2-commit-worker: +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select-for-update: + +step s1-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM dist_table WHERE id = 5 FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-select-for-update: + +step s2-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM dist_table WHERE id = 5 FOR UPDATE'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-select-for-update: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-coordinator-create-index-concurrently s1-commit-worker s1-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select-for-update: + +step s1-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM dist_table WHERE id = 5 FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s2-coordinator-create-index-concurrently: + +step s2-coordinator-create-index-concurrently: CREATE INDEX CONCURRENTLY dist_table_index_conc ON dist_table(id); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_drop_shards.out b/src/test/regress/expected/isolation_drop_shards.out index d007f502c..4f0a91d48 100644 --- a/src/test/regress/expected/isolation_drop_shards.out +++ b/src/test/regress/expected/isolation_drop_shards.out @@ -1,242 +1,242 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s1-drop-all-shards s2-truncate s1-commit -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-drop-all-shards: +step s1-drop-all-shards: SELECT master_drop_all_shards('append_table', 'public', 'append_table'); master_drop_all_shards -16 -step s2-truncate: +16 +step s2-truncate: TRUNCATE append_table; -step s1-commit: +step s1-commit: COMMIT; step s2-truncate: <... completed> starting permutation: s1-begin s1-drop-all-shards s2-apply-delete-command s1-commit -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-drop-all-shards: +step s1-drop-all-shards: SELECT master_drop_all_shards('append_table', 'public', 'append_table'); master_drop_all_shards -16 -step s2-apply-delete-command: +16 +step s2-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM append_table'); -step s1-commit: +step s1-commit: COMMIT; step s2-apply-delete-command: <... completed> master_apply_delete_command -0 +0 starting permutation: s1-begin s1-drop-all-shards s2-drop-all-shards s1-commit -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-drop-all-shards: +step s1-drop-all-shards: SELECT master_drop_all_shards('append_table', 'public', 'append_table'); master_drop_all_shards -16 -step s2-drop-all-shards: +16 +step s2-drop-all-shards: SELECT master_drop_all_shards('append_table', 'public', 'append_table'); -step s1-commit: +step s1-commit: COMMIT; step s2-drop-all-shards: <... completed> master_drop_all_shards -0 +0 starting permutation: s1-begin s1-drop-all-shards s2-select s1-commit -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-drop-all-shards: +step s1-drop-all-shards: SELECT master_drop_all_shards('append_table', 'public', 'append_table'); master_drop_all_shards -16 -step s2-select: +16 +step s2-select: SELECT * FROM append_table; -step s1-commit: +step s1-commit: COMMIT; step s2-select: <... completed> -test_id data +test_id data starting permutation: s1-begin s1-apply-delete-command s2-truncate s1-commit -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-apply-delete-command: +step s1-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM append_table'); master_apply_delete_command -16 -step s2-truncate: +16 +step s2-truncate: TRUNCATE append_table; -step s1-commit: +step s1-commit: COMMIT; step s2-truncate: <... completed> starting permutation: s1-begin s1-apply-delete-command s2-apply-delete-command s1-commit -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-apply-delete-command: +step s1-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM append_table'); master_apply_delete_command -16 -step s2-apply-delete-command: +16 +step s2-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM append_table'); -step s1-commit: +step s1-commit: COMMIT; step s2-apply-delete-command: <... completed> master_apply_delete_command -0 +0 starting permutation: s1-begin s1-apply-delete-command s2-drop-all-shards s1-commit -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-apply-delete-command: +step s1-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM append_table'); master_apply_delete_command -16 -step s2-drop-all-shards: +16 +step s2-drop-all-shards: SELECT master_drop_all_shards('append_table', 'public', 'append_table'); -step s1-commit: +step s1-commit: COMMIT; step s2-drop-all-shards: <... completed> master_drop_all_shards -0 +0 starting permutation: s1-begin s1-truncate s2-truncate s1-commit -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-truncate: +step s1-truncate: TRUNCATE append_table; -step s2-truncate: +step s2-truncate: TRUNCATE append_table; -step s1-commit: +step s1-commit: COMMIT; step s2-truncate: <... completed> starting permutation: s1-begin s1-truncate s2-apply-delete-command s1-commit -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-truncate: +step s1-truncate: TRUNCATE append_table; -step s2-apply-delete-command: +step s2-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM append_table'); -step s1-commit: +step s1-commit: COMMIT; step s2-apply-delete-command: <... completed> master_apply_delete_command -0 +0 starting permutation: s1-begin s1-truncate s2-drop-all-shards s1-commit -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-truncate: +step s1-truncate: TRUNCATE append_table; -step s2-drop-all-shards: +step s2-drop-all-shards: SELECT master_drop_all_shards('append_table', 'public', 'append_table'); -step s1-commit: +step s1-commit: COMMIT; step s2-drop-all-shards: <... completed> master_drop_all_shards -0 +0 starting permutation: s1-begin s1-truncate s2-select s1-commit -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-truncate: +step s1-truncate: TRUNCATE append_table; -step s2-select: +step s2-select: SELECT * FROM append_table; -step s1-commit: +step s1-commit: COMMIT; step s2-select: <... completed> -test_id data +test_id data diff --git a/src/test/regress/expected/isolation_drop_vs_all.out b/src/test/regress/expected/isolation_drop_vs_all.out index 01a661878..06c50699c 100644 --- a/src/test/regress/expected/isolation_drop_vs_all.out +++ b/src/test/regress/expected/isolation_drop_vs_all.out @@ -3,7 +3,7 @@ Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-drop s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -17,12 +17,12 @@ step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-ddl-create-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -41,12 +41,12 @@ run_command_on_workers (localhost,57638,t,0) restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-drop s2-ddl-drop-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX drop_hash_index ON drop_hash(id); step s1-begin: BEGIN; @@ -66,12 +66,12 @@ run_command_on_workers (localhost,57638,t,0) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s1-drop s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-drop: DROP TABLE drop_hash; @@ -88,12 +88,12 @@ run_command_on_workers (localhost,57638,t,0) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-ddl-add-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -112,12 +112,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-drop s2-ddl-drop-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE drop_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -137,12 +137,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-ddl-rename-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -161,12 +161,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-table-size s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -180,12 +180,12 @@ step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist restore_isolation_tester_func - + starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-drop s2-distribute-table s1-commit s2-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE drop_hash; step s1-create-non-distributed-table: CREATE TABLE drop_hash(id integer, data text); COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; @@ -201,12 +201,12 @@ step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-drop s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -224,12 +224,12 @@ run_command_on_workers (localhost,57638,t,0) restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-ddl-drop-index s2-drop s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX drop_hash_index ON drop_hash(id); step s1-begin: BEGIN; @@ -248,12 +248,12 @@ run_command_on_workers (localhost,57638,t,0) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-drop s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -271,12 +271,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-ddl-drop-column s2-drop s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE drop_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -295,12 +295,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-drop s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -318,19 +318,19 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-drop s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('drop_hash'); citus_total_relation_size -57344 +57344 step s2-drop: DROP TABLE drop_hash; step s1-commit: COMMIT; step s2-commit: COMMIT; @@ -338,12 +338,12 @@ step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist restore_isolation_tester_func - + starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-drop s1-commit s2-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE drop_hash; step s1-create-non-distributed-table: CREATE TABLE drop_hash(id integer, data text); COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; @@ -352,7 +352,7 @@ step s2-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('drop_hash', 'id'); create_distributed_table - + step s2-drop: DROP TABLE drop_hash; step s1-commit: COMMIT; step s2-drop: <... completed> @@ -361,4 +361,4 @@ step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_dump_global_wait_edges.out b/src/test/regress/expected/isolation_dump_global_wait_edges.out index 74e699f61..037be6803 100644 --- a/src/test/regress/expected/isolation_dump_global_wait_edges.out +++ b/src/test/regress/expected/isolation_dump_global_wait_edges.out @@ -1,19 +1,19 @@ Parsed test spec with 4 sessions starting permutation: s1-begin s2-begin s1-update s2-update detector-dump-wait-edges s1-abort s2-abort -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-update: +step s1-update: UPDATE distributed_table SET y = 1 WHERE x = 1; -step s2-update: +step s2-update: UPDATE distributed_table SET y = 2 WHERE x = 1; -step detector-dump-wait-edges: +step detector-dump-wait-edges: SELECT waiting_transaction_num, blocking_transaction_num, @@ -28,39 +28,39 @@ step detector-dump-wait-edges: waiting_transaction_numblocking_transaction_numblocking_transaction_waiting -357 356 f +357 356 f transactionnumberwaitingtransactionnumbers -356 -357 356 -step s1-abort: +356 +357 356 +step s1-abort: ABORT; step s2-update: <... completed> -step s2-abort: +step s2-abort: ABORT; starting permutation: s1-begin s2-begin s3-begin s1-update s2-update s3-update detector-dump-wait-edges s1-abort s2-abort s3-abort -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s3-begin: +step s3-begin: BEGIN; -step s1-update: +step s1-update: UPDATE distributed_table SET y = 1 WHERE x = 1; -step s2-update: +step s2-update: UPDATE distributed_table SET y = 2 WHERE x = 1; -step s3-update: +step s3-update: UPDATE distributed_table SET y = 3 WHERE x = 1; -step detector-dump-wait-edges: +step detector-dump-wait-edges: SELECT waiting_transaction_num, blocking_transaction_num, @@ -75,22 +75,22 @@ step detector-dump-wait-edges: waiting_transaction_numblocking_transaction_numblocking_transaction_waiting -361 360 f -362 360 f -362 361 t +361 360 f +362 360 f +362 361 t transactionnumberwaitingtransactionnumbers -360 -361 360 -362 360,361 -step s1-abort: +360 +361 360 +362 360,361 +step s1-abort: ABORT; step s2-update: <... completed> -step s2-abort: +step s2-abort: ABORT; step s3-update: <... completed> -step s3-abort: +step s3-abort: ABORT; diff --git a/src/test/regress/expected/isolation_dump_local_wait_edges.out b/src/test/regress/expected/isolation_dump_local_wait_edges.out index 1ecaccf0f..73f770dba 100644 --- a/src/test/regress/expected/isolation_dump_local_wait_edges.out +++ b/src/test/regress/expected/isolation_dump_local_wait_edges.out @@ -1,27 +1,27 @@ Parsed test spec with 4 sessions starting permutation: dist11-begin dist13-begin dist11-update dist13-update detector-dump-wait-edges dist11-abort dist13-abort -step dist11-begin: +step dist11-begin: BEGIN; SELECT assign_distributed_transaction_id(11, 1, '2017-01-01 00:00:00+0'); assign_distributed_transaction_id - -step dist13-begin: + +step dist13-begin: BEGIN; SELECT assign_distributed_transaction_id(13, 1, '2017-01-01 00:00:00+0'); assign_distributed_transaction_id - -step dist11-update: + +step dist11-update: UPDATE local_table SET y = 1 WHERE x = 1; -step dist13-update: +step dist13-update: UPDATE local_table SET y = 3 WHERE x = 1; -step detector-dump-wait-edges: +step detector-dump-wait-edges: SELECT waiting_node_id, waiting_transaction_num, @@ -37,33 +37,33 @@ step detector-dump-wait-edges: waiting_node_idwaiting_transaction_numblocking_node_idblocking_transaction_numblocking_transaction_waiting -13 1 11 1 f -step dist11-abort: +13 1 11 1 f +step dist11-abort: ABORT; step dist13-update: <... completed> -step dist13-abort: +step dist13-abort: ABORT; starting permutation: local-begin dist13-begin local-update dist13-update detector-dump-wait-edges local-abort dist13-abort -step local-begin: +step local-begin: BEGIN; -step dist13-begin: +step dist13-begin: BEGIN; SELECT assign_distributed_transaction_id(13, 1, '2017-01-01 00:00:00+0'); assign_distributed_transaction_id - -step local-update: + +step local-update: UPDATE local_table SET y = 2 WHERE x = 1; -step dist13-update: +step dist13-update: UPDATE local_table SET y = 3 WHERE x = 1; -step detector-dump-wait-edges: +step detector-dump-wait-edges: SELECT waiting_node_id, waiting_transaction_num, @@ -79,43 +79,43 @@ step detector-dump-wait-edges: waiting_node_idwaiting_transaction_numblocking_node_idblocking_transaction_numblocking_transaction_waiting -13 1 0 f -step local-abort: +13 1 0 f +step local-abort: ABORT; step dist13-update: <... completed> -step dist13-abort: +step dist13-abort: ABORT; starting permutation: dist11-begin local-begin dist13-begin dist11-update local-update dist13-update detector-dump-wait-edges dist11-abort local-abort dist13-abort -step dist11-begin: +step dist11-begin: BEGIN; SELECT assign_distributed_transaction_id(11, 1, '2017-01-01 00:00:00+0'); assign_distributed_transaction_id - -step local-begin: + +step local-begin: BEGIN; -step dist13-begin: +step dist13-begin: BEGIN; SELECT assign_distributed_transaction_id(13, 1, '2017-01-01 00:00:00+0'); assign_distributed_transaction_id - -step dist11-update: + +step dist11-update: UPDATE local_table SET y = 1 WHERE x = 1; -step local-update: +step local-update: UPDATE local_table SET y = 2 WHERE x = 1; -step dist13-update: +step dist13-update: UPDATE local_table SET y = 3 WHERE x = 1; -step detector-dump-wait-edges: +step detector-dump-wait-edges: SELECT waiting_node_id, waiting_transaction_num, @@ -131,16 +131,16 @@ step detector-dump-wait-edges: waiting_node_idwaiting_transaction_numblocking_node_idblocking_transaction_numblocking_transaction_waiting -0 11 1 f -13 1 0 t -step dist11-abort: +0 11 1 f +13 1 0 t +step dist11-abort: ABORT; step local-update: <... completed> -step local-abort: +step local-abort: ABORT; step dist13-update: <... completed> -step dist13-abort: +step dist13-abort: ABORT; diff --git a/src/test/regress/expected/isolation_ensure_dependency_activate_node.out b/src/test/regress/expected/isolation_ensure_dependency_activate_node.out index d93c3d60c..cf12b2d2e 100644 --- a/src/test/regress/expected/isolation_ensure_dependency_activate_node.out +++ b/src/test/regress/expected/isolation_ensure_dependency_activate_node.out @@ -1,10 +1,10 @@ Parsed test spec with 3 sessions starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-public-schema s2-create-table s1-commit s2-print-distributed-objects -?column? +?column? -1 -step s1-print-distributed-objects: +1 +step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; @@ -19,62 +19,62 @@ step s1-print-distributed-objects: SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); SELECT master_remove_node('localhost', 57638); -?column? +?column? -1 +1 pg_identify_object_as_address -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - -step s1-begin: + +step s1-begin: BEGIN; -step s1-add-worker: +step s1-add-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-public-schema: +1 +step s2-public-schema: SET search_path TO public; -step s2-create-table: +step s2-create-table: CREATE TABLE t1 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup SET citus.replication_model TO 'streaming'; SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('t1', 'a'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-table: <... completed> create_distributed_table - -step s2-print-distributed-objects: + +step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -89,37 +89,37 @@ step s2-print-distributed-objects: pg_identify_object_as_address -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - - + + starting permutation: s1-print-distributed-objects s1-begin s2-begin s1-add-worker s2-public-schema s2-create-table s1-commit s2-commit s2-print-distributed-objects -?column? +?column? -1 -step s1-print-distributed-objects: +1 +step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; @@ -134,68 +134,68 @@ step s1-print-distributed-objects: SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); SELECT master_remove_node('localhost', 57638); -?column? +?column? -1 +1 pg_identify_object_as_address -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - -step s1-begin: + +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-add-worker: +step s1-add-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-public-schema: +1 +step s2-public-schema: SET search_path TO public; -step s2-create-table: +step s2-create-table: CREATE TABLE t1 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup SET citus.replication_model TO 'streaming'; SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('t1', 'a'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-table: <... completed> create_distributed_table - -step s2-commit: + +step s2-commit: COMMIT; -step s2-print-distributed-objects: +step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -210,37 +210,37 @@ step s2-print-distributed-objects: pg_identify_object_as_address -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - - + + starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-public-schema s2-create-table s1-add-worker s2-commit s1-commit s2-print-distributed-objects -?column? +?column? -1 -step s1-print-distributed-objects: +1 +step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; @@ -255,45 +255,45 @@ step s1-print-distributed-objects: SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); SELECT master_remove_node('localhost', 57638); -?column? +?column? -1 +1 pg_identify_object_as_address -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - -step s1-begin: + +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-public-schema: +step s2-public-schema: SET search_path TO public; -step s2-create-table: +step s2-create-table: CREATE TABLE t1 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup SET citus.replication_model TO 'streaming'; @@ -302,21 +302,21 @@ step s2-create-table: create_distributed_table - -step s1-add-worker: + +step s1-add-worker: SELECT 1 FROM master_add_node('localhost', 57638); -step s2-commit: +step s2-commit: COMMIT; step s1-add-worker: <... completed> -?column? +?column? -1 -step s1-commit: +1 +step s1-commit: COMMIT; -step s2-print-distributed-objects: +step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -331,37 +331,37 @@ step s2-print-distributed-objects: pg_identify_object_as_address -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - - + + starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-create-schema s2-create-table s1-commit s2-print-distributed-objects -?column? +?column? -1 -step s1-print-distributed-objects: +1 +step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; @@ -376,63 +376,63 @@ step s1-print-distributed-objects: SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); SELECT master_remove_node('localhost', 57638); -?column? +?column? -1 +1 pg_identify_object_as_address -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - -step s1-begin: + +step s1-begin: BEGIN; -step s1-add-worker: +step s1-add-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-create-schema: +1 +step s2-create-schema: CREATE SCHEMA myschema; SET search_path TO myschema; -step s2-create-table: +step s2-create-table: CREATE TABLE t1 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup SET citus.replication_model TO 'streaming'; SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('t1', 'a'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-table: <... completed> create_distributed_table - -step s2-print-distributed-objects: + +step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -448,37 +448,37 @@ step s2-print-distributed-objects: pg_identify_object_as_address (schema,{myschema},{}) -count +count -1 +1 run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - - + + starting permutation: s1-print-distributed-objects s1-begin s2-begin s1-add-worker s2-create-schema s2-create-table s1-commit s2-commit s2-print-distributed-objects -?column? +?column? -1 -step s1-print-distributed-objects: +1 +step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; @@ -493,69 +493,69 @@ step s1-print-distributed-objects: SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); SELECT master_remove_node('localhost', 57638); -?column? +?column? -1 +1 pg_identify_object_as_address -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - -step s1-begin: + +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-add-worker: +step s1-add-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-create-schema: +1 +step s2-create-schema: CREATE SCHEMA myschema; SET search_path TO myschema; -step s2-create-table: +step s2-create-table: CREATE TABLE t1 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup SET citus.replication_model TO 'streaming'; SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('t1', 'a'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-table: <... completed> create_distributed_table - -step s2-commit: + +step s2-commit: COMMIT; -step s2-print-distributed-objects: +step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -571,37 +571,37 @@ step s2-print-distributed-objects: pg_identify_object_as_address (schema,{myschema},{}) -count +count -1 +1 run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - - + + starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-create-schema s2-create-table s1-add-worker s2-commit s1-commit s2-print-distributed-objects -?column? +?column? -1 -step s1-print-distributed-objects: +1 +step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; @@ -616,46 +616,46 @@ step s1-print-distributed-objects: SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); SELECT master_remove_node('localhost', 57638); -?column? +?column? -1 +1 pg_identify_object_as_address -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - -step s1-begin: + +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-create-schema: +step s2-create-schema: CREATE SCHEMA myschema; SET search_path TO myschema; -step s2-create-table: +step s2-create-table: CREATE TABLE t1 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup SET citus.replication_model TO 'streaming'; @@ -664,21 +664,21 @@ step s2-create-table: create_distributed_table - -step s1-add-worker: + +step s1-add-worker: SELECT 1 FROM master_add_node('localhost', 57638); -step s2-commit: +step s2-commit: COMMIT; step s1-add-worker: <... completed> -?column? +?column? -1 -step s1-commit: +1 +step s1-commit: COMMIT; -step s2-print-distributed-objects: +step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -694,37 +694,37 @@ step s2-print-distributed-objects: pg_identify_object_as_address (schema,{myschema},{}) -count +count -1 +1 run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - - + + starting permutation: s1-print-distributed-objects s2-create-schema s1-begin s2-begin s3-begin s1-add-worker s2-create-table s3-use-schema s3-create-table s1-commit s2-commit s3-commit s2-print-distributed-objects -?column? +?column? -1 -step s1-print-distributed-objects: +1 +step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; @@ -739,88 +739,88 @@ step s1-print-distributed-objects: SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); SELECT master_remove_node('localhost', 57638); -?column? +?column? -1 +1 pg_identify_object_as_address -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - -step s2-create-schema: + +step s2-create-schema: CREATE SCHEMA myschema; SET search_path TO myschema; -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s3-begin: +step s3-begin: BEGIN; -step s1-add-worker: +step s1-add-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-create-table: +1 +step s2-create-table: CREATE TABLE t1 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup SET citus.replication_model TO 'streaming'; SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('t1', 'a'); -step s3-use-schema: +step s3-use-schema: SET search_path TO myschema; -step s3-create-table: +step s3-create-table: CREATE TABLE t2 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('t2', 'a'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-table: <... completed> create_distributed_table - -step s2-commit: + +step s2-commit: COMMIT; step s3-create-table: <... completed> create_distributed_table - -step s3-commit: + +step s3-commit: COMMIT; -step s2-print-distributed-objects: +step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -836,37 +836,37 @@ step s2-print-distributed-objects: pg_identify_object_as_address (schema,{myschema},{}) -count +count -1 +1 run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - - + + starting permutation: s1-print-distributed-objects s1-add-worker s2-create-schema s2-begin s3-begin s3-use-schema s2-create-table s3-create-table s2-commit s3-commit s2-print-distributed-objects -?column? +?column? -1 -step s1-print-distributed-objects: +1 +step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; @@ -881,55 +881,55 @@ step s1-print-distributed-objects: SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); SELECT master_remove_node('localhost', 57638); -?column? +?column? -1 +1 pg_identify_object_as_address -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - -step s1-add-worker: + +step s1-add-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-create-schema: +1 +step s2-create-schema: CREATE SCHEMA myschema; SET search_path TO myschema; -step s2-begin: +step s2-begin: BEGIN; -step s3-begin: +step s3-begin: BEGIN; -step s3-use-schema: +step s3-use-schema: SET search_path TO myschema; -step s2-create-table: +step s2-create-table: CREATE TABLE t1 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup SET citus.replication_model TO 'streaming'; @@ -938,24 +938,24 @@ step s2-create-table: create_distributed_table - -step s3-create-table: + +step s3-create-table: CREATE TABLE t2 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('t2', 'a'); -step s2-commit: +step s2-commit: COMMIT; step s3-create-table: <... completed> create_distributed_table - -step s3-commit: + +step s3-commit: COMMIT; -step s2-print-distributed-objects: +step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -971,37 +971,37 @@ step s2-print-distributed-objects: pg_identify_object_as_address (schema,{myschema},{}) -count +count -1 +1 run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - - + + starting permutation: s1-print-distributed-objects s1-begin s2-begin s3-begin s1-add-worker s2-create-schema s3-create-schema2 s2-create-table s3-create-table s1-commit s2-commit s3-commit s2-print-distributed-objects -?column? +?column? -1 -step s1-print-distributed-objects: +1 +step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; @@ -1016,89 +1016,89 @@ step s1-print-distributed-objects: SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); SELECT master_remove_node('localhost', 57638); -?column? +?column? -1 +1 pg_identify_object_as_address -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - -step s1-begin: + +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s3-begin: +step s3-begin: BEGIN; -step s1-add-worker: +step s1-add-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-create-schema: +1 +step s2-create-schema: CREATE SCHEMA myschema; SET search_path TO myschema; -step s3-create-schema2: +step s3-create-schema2: CREATE SCHEMA myschema2; SET search_path TO myschema2; -step s2-create-table: +step s2-create-table: CREATE TABLE t1 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup SET citus.replication_model TO 'streaming'; SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('t1', 'a'); -step s3-create-table: +step s3-create-table: CREATE TABLE t2 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('t2', 'a'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-table: <... completed> create_distributed_table - + step s3-create-table: <... completed> create_distributed_table - -step s2-commit: + +step s2-commit: COMMIT; -step s3-commit: +step s3-commit: COMMIT; -step s2-print-distributed-objects: +step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -1115,37 +1115,37 @@ pg_identify_object_as_address (schema,{myschema},{}) (schema,{myschema2},{}) -count +count -1 +1 run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - - + + starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-public-schema s2-create-type s1-commit s2-print-distributed-objects -?column? +?column? -1 -step s1-print-distributed-objects: +1 +step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; @@ -1160,55 +1160,55 @@ step s1-print-distributed-objects: SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); SELECT master_remove_node('localhost', 57638); -?column? +?column? -1 +1 pg_identify_object_as_address -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - -step s1-begin: + +step s1-begin: BEGIN; -step s1-add-worker: +step s1-add-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-public-schema: +1 +step s2-public-schema: SET search_path TO public; -step s2-create-type: +step s2-create-type: CREATE TYPE tt1 AS (a int, b int); -step s1-commit: +step s1-commit: COMMIT; step s2-create-type: <... completed> -step s2-print-distributed-objects: +step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -1224,37 +1224,37 @@ step s2-print-distributed-objects: pg_identify_object_as_address (type,{public.tt1},{}) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -1 +1 run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - - + + starting permutation: s1-print-distributed-objects s1-begin s2-public-schema s2-create-type s1-add-worker s1-commit s2-print-distributed-objects -?column? +?column? -1 -step s1-print-distributed-objects: +1 +step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; @@ -1269,54 +1269,54 @@ step s1-print-distributed-objects: SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); SELECT master_remove_node('localhost', 57638); -?column? +?column? -1 +1 pg_identify_object_as_address -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - -step s1-begin: + +step s1-begin: BEGIN; -step s2-public-schema: +step s2-public-schema: SET search_path TO public; -step s2-create-type: +step s2-create-type: CREATE TYPE tt1 AS (a int, b int); -step s1-add-worker: +step s1-add-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s1-commit: +1 +step s1-commit: COMMIT; -step s2-print-distributed-objects: +step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -1332,37 +1332,37 @@ step s2-print-distributed-objects: pg_identify_object_as_address (type,{public.tt1},{}) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -1 +1 run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - - + + starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-create-schema s2-create-type s2-create-table-with-type s1-add-worker s2-commit s1-commit s2-print-distributed-objects -?column? +?column? -1 -step s1-print-distributed-objects: +1 +step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; @@ -1377,49 +1377,49 @@ step s1-print-distributed-objects: SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); SELECT master_remove_node('localhost', 57638); -?column? +?column? -1 +1 pg_identify_object_as_address -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - -step s1-begin: + +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-create-schema: +step s2-create-schema: CREATE SCHEMA myschema; SET search_path TO myschema; -step s2-create-type: +step s2-create-type: CREATE TYPE tt1 AS (a int, b int); -step s2-create-table-with-type: +step s2-create-table-with-type: CREATE TABLE t1 (a int, b tt1); -- session needs to have replication factor set to 1, can't do in setup SET citus.replication_model TO 'streaming'; @@ -1428,21 +1428,21 @@ step s2-create-table-with-type: create_distributed_table - -step s1-add-worker: + +step s1-add-worker: SELECT 1 FROM master_add_node('localhost', 57638); -step s2-commit: +step s2-commit: COMMIT; step s1-add-worker: <... completed> -?column? +?column? -1 -step s1-commit: +1 +step s1-commit: COMMIT; -step s2-print-distributed-objects: +step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -1459,37 +1459,37 @@ pg_identify_object_as_address (schema,{myschema},{}) (type,{myschema.tt1},{}) -count +count -1 +1 run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) -count +count -1 +1 run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - - + + starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-public-schema s2-distribute-function s1-commit s2-begin s2-commit s3-wait-for-metadata-sync s2-print-distributed-objects -?column? +?column? -1 -step s1-print-distributed-objects: +1 +step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; @@ -1504,71 +1504,71 @@ step s1-print-distributed-objects: SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); SELECT master_remove_node('localhost', 57638); -?column? +?column? -1 +1 pg_identify_object_as_address -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - -step s1-begin: + +step s1-begin: BEGIN; -step s1-add-worker: +step s1-add-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s2-public-schema: +1 +step s2-public-schema: SET search_path TO public; -step s2-distribute-function: +step s2-distribute-function: CREATE OR REPLACE FUNCTION add (INT,INT) RETURNS INT AS $$ SELECT $1 + $2 $$ LANGUAGE SQL; SELECT create_distributed_function('add(INT,INT)', '$1'); -step s1-commit: +step s1-commit: COMMIT; step s2-distribute-function: <... completed> create_distributed_function - -step s2-begin: + +step s2-begin: BEGIN; -step s2-commit: +step s2-commit: COMMIT; -step s3-wait-for-metadata-sync: +step s3-wait-for-metadata-sync: SELECT public.wait_until_metadata_sync(5000); wait_until_metadata_sync - -step s2-print-distributed-objects: + +step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -1584,37 +1584,37 @@ step s2-print-distributed-objects: pg_identify_object_as_address (function,"{public,add}","{integer,integer}") -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -1 +1 run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) master_remove_node - - + + starting permutation: s1-print-distributed-objects s1-begin s2-public-schema s2-distribute-function s2-begin s2-commit s3-wait-for-metadata-sync s1-add-worker s1-commit s3-wait-for-metadata-sync s2-print-distributed-objects -?column? +?column? -1 -step s1-print-distributed-objects: +1 +step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; @@ -1629,76 +1629,76 @@ step s1-print-distributed-objects: SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); SELECT master_remove_node('localhost', 57638); -?column? +?column? -1 +1 pg_identify_object_as_address -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - -step s1-begin: + +step s1-begin: BEGIN; -step s2-public-schema: +step s2-public-schema: SET search_path TO public; -step s2-distribute-function: +step s2-distribute-function: CREATE OR REPLACE FUNCTION add (INT,INT) RETURNS INT AS $$ SELECT $1 + $2 $$ LANGUAGE SQL; SELECT create_distributed_function('add(INT,INT)', '$1'); create_distributed_function - -step s2-begin: + +step s2-begin: BEGIN; -step s2-commit: +step s2-commit: COMMIT; -step s3-wait-for-metadata-sync: +step s3-wait-for-metadata-sync: SELECT public.wait_until_metadata_sync(5000); wait_until_metadata_sync - -step s1-add-worker: + +step s1-add-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s1-commit: +1 +step s1-commit: COMMIT; -step s3-wait-for-metadata-sync: +step s3-wait-for-metadata-sync: SELECT public.wait_until_metadata_sync(5000); wait_until_metadata_sync - -step s2-print-distributed-objects: + +step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -1714,37 +1714,37 @@ step s2-print-distributed-objects: pg_identify_object_as_address (function,"{public,add}","{integer,integer}") -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -1 +1 run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) master_remove_node - - + + starting permutation: s1-print-distributed-objects s2-begin s2-create-schema s2-distribute-function s2-commit s3-wait-for-metadata-sync s1-begin s1-add-worker s1-commit s3-wait-for-metadata-sync s2-print-distributed-objects -?column? +?column? -1 -step s1-print-distributed-objects: +1 +step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; @@ -1759,77 +1759,77 @@ step s1-print-distributed-objects: SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); SELECT master_remove_node('localhost', 57638); -?column? +?column? -1 +1 pg_identify_object_as_address -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) master_remove_node - -step s2-begin: + +step s2-begin: BEGIN; -step s2-create-schema: +step s2-create-schema: CREATE SCHEMA myschema; SET search_path TO myschema; -step s2-distribute-function: +step s2-distribute-function: CREATE OR REPLACE FUNCTION add (INT,INT) RETURNS INT AS $$ SELECT $1 + $2 $$ LANGUAGE SQL; SELECT create_distributed_function('add(INT,INT)', '$1'); create_distributed_function - -step s2-commit: + +step s2-commit: COMMIT; -step s3-wait-for-metadata-sync: +step s3-wait-for-metadata-sync: SELECT public.wait_until_metadata_sync(5000); wait_until_metadata_sync - -step s1-begin: + +step s1-begin: BEGIN; -step s1-add-worker: +step s1-add-worker: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s1-commit: +1 +step s1-commit: COMMIT; -step s3-wait-for-metadata-sync: +step s3-wait-for-metadata-sync: SELECT public.wait_until_metadata_sync(5000); wait_until_metadata_sync - -step s2-print-distributed-objects: + +step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; -- print if the schema has been created @@ -1846,28 +1846,28 @@ pg_identify_object_as_address (function,"{myschema,add}","{integer,integer}") (schema,{myschema},{}) -count +count -1 +1 run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) -count +count -0 +0 run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) -count +count -1 +1 run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) master_remove_node - - + + diff --git a/src/test/regress/expected/isolation_extension_commands.out b/src/test/regress/expected/isolation_extension_commands.out index 751eee47f..7e2176b71 100644 --- a/src/test/regress/expected/isolation_extension_commands.out +++ b/src/test/regress/expected/isolation_extension_commands.out @@ -1,35 +1,35 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s1-add-node-1 s2-create-extension-version-11 s1-commit s1-print -step s1-begin: +step s1-begin: BEGIN; -step s1-add-node-1: +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-create-extension-version-11: +1 +step s2-create-extension-version-11: CREATE extension seg VERSION "1.1"; -step s1-commit: +step s1-commit: COMMIT; step s2-create-extension-version-11: <... completed> -step s1-print: +step s1-print: select count(*) from citus.pg_dist_object ; select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg'; SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$); -count +count -1 -extname extversion nspname +1 +extname extversion nspname -seg 1.1 public +seg 1.1 public run_command_on_workers (localhost,57637,t,seg) @@ -44,39 +44,39 @@ run_command_on_workers (localhost,57638,t,public) master_remove_node - - + + starting permutation: s1-begin s1-add-node-1 s2-alter-extension-update-to-version-12 s1-commit s1-print -step s1-begin: +step s1-begin: BEGIN; -step s1-add-node-1: +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-alter-extension-update-to-version-12: +1 +step s2-alter-extension-update-to-version-12: ALTER extension seg update to "1.2"; -step s1-commit: +step s1-commit: COMMIT; step s2-alter-extension-update-to-version-12: <... completed> -step s1-print: +step s1-print: select count(*) from citus.pg_dist_object ; select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg'; SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$); -count +count -1 -extname extversion nspname +1 +extname extversion nspname -seg 1.2 public +seg 1.2 public run_command_on_workers (localhost,57637,t,seg) @@ -91,43 +91,43 @@ run_command_on_workers (localhost,57638,t,public) master_remove_node - - + + starting permutation: s1-add-node-1 s1-begin s1-remove-node-1 s2-drop-extension s1-commit s1-print -step s1-add-node-1: +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s1-remove-node-1: +step s1-remove-node-1: SELECT 1 FROM master_remove_node('localhost', 57637); -?column? +?column? -1 -step s2-drop-extension: +1 +step s2-drop-extension: drop extension seg; -step s1-commit: +step s1-commit: COMMIT; step s2-drop-extension: <... completed> -step s1-print: +step s1-print: select count(*) from citus.pg_dist_object ; select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg'; SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$); -count +count -0 -extname extversion nspname +0 +extname extversion nspname run_command_on_workers @@ -140,38 +140,38 @@ run_command_on_workers (localhost,57638,t,"") master_remove_node - + starting permutation: s1-begin s1-add-node-1 s2-create-extension-with-schema1 s1-commit s1-print -step s1-begin: +step s1-begin: BEGIN; -step s1-add-node-1: +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-create-extension-with-schema1: +1 +step s2-create-extension-with-schema1: CREATE extension seg with schema schema1; -step s1-commit: +step s1-commit: COMMIT; step s2-create-extension-with-schema1: <... completed> -step s1-print: +step s1-print: select count(*) from citus.pg_dist_object ; select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg'; SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$); -count +count -2 -extname extversion nspname +2 +extname extversion nspname -seg 1.3 schema1 +seg 1.3 schema1 run_command_on_workers (localhost,57637,t,seg) @@ -186,37 +186,37 @@ run_command_on_workers (localhost,57638,t,schema1) master_remove_node - - + + starting permutation: s1-begin s1-add-node-1 s2-drop-extension s1-commit s1-print -step s1-begin: +step s1-begin: BEGIN; -step s1-add-node-1: +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-drop-extension: +1 +step s2-drop-extension: drop extension seg; -step s1-commit: +step s1-commit: COMMIT; step s2-drop-extension: <... completed> -step s1-print: +step s1-print: select count(*) from citus.pg_dist_object ; select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg'; SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$); -count +count -1 -extname extversion nspname +1 +extname extversion nspname run_command_on_workers @@ -232,48 +232,48 @@ run_command_on_workers (localhost,57638,t,"") master_remove_node - - + + starting permutation: s1-add-node-1 s1-create-extension-with-schema2 s1-begin s1-remove-node-1 s2-alter-extension-set-schema3 s1-commit s1-print -step s1-add-node-1: +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s1-create-extension-with-schema2: +1 +step s1-create-extension-with-schema2: CREATE extension seg with schema schema2; -step s1-begin: +step s1-begin: BEGIN; -step s1-remove-node-1: +step s1-remove-node-1: SELECT 1 FROM master_remove_node('localhost', 57637); -?column? +?column? -1 -step s2-alter-extension-set-schema3: +1 +step s2-alter-extension-set-schema3: alter extension seg set schema schema3; -step s1-commit: +step s1-commit: COMMIT; step s2-alter-extension-set-schema3: <... completed> -step s1-print: +step s1-print: select count(*) from citus.pg_dist_object ; select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg'; SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$); -count +count -4 -extname extversion nspname +4 +extname extversion nspname -seg 1.3 schema3 +seg 1.3 schema3 run_command_on_workers (localhost,57638,t,seg) @@ -285,47 +285,47 @@ run_command_on_workers (localhost,57638,t,schema3) master_remove_node - + starting permutation: s1-add-node-1 s2-drop-extension s1-begin s1-remove-node-1 s2-create-extension-with-schema1 s1-commit s1-print -step s1-add-node-1: +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-drop-extension: +1 +step s2-drop-extension: drop extension seg; -step s1-begin: +step s1-begin: BEGIN; -step s1-remove-node-1: +step s1-remove-node-1: SELECT 1 FROM master_remove_node('localhost', 57637); -?column? +?column? -1 -step s2-create-extension-with-schema1: +1 +step s2-create-extension-with-schema1: CREATE extension seg with schema schema1; -step s1-commit: +step s1-commit: COMMIT; step s2-create-extension-with-schema1: <... completed> -step s1-print: +step s1-print: select count(*) from citus.pg_dist_object ; select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg'; SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$); -count +count -4 -extname extversion nspname +4 +extname extversion nspname -seg 1.3 schema1 +seg 1.3 schema1 run_command_on_workers (localhost,57638,t,seg) @@ -337,52 +337,52 @@ run_command_on_workers (localhost,57638,t,schema1) master_remove_node - + starting permutation: s2-add-node-1 s2-drop-extension s2-remove-node-1 s2-begin s2-create-extension-version-11 s1-add-node-1 s2-commit s1-print -step s2-add-node-1: +step s2-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-drop-extension: +1 +step s2-drop-extension: drop extension seg; -step s2-remove-node-1: +step s2-remove-node-1: SELECT 1 FROM master_remove_node('localhost', 57637); -?column? +?column? -1 -step s2-begin: +1 +step s2-begin: BEGIN; -step s2-create-extension-version-11: +step s2-create-extension-version-11: CREATE extension seg VERSION "1.1"; -step s1-add-node-1: +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-commit: +1 +step s2-commit: COMMIT; -step s1-print: +step s1-print: select count(*) from citus.pg_dist_object ; select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg'; SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$); -count +count -3 -extname extversion nspname +3 +extname extversion nspname -seg 1.1 public +seg 1.1 public run_command_on_workers (localhost,57637,t,"") @@ -397,57 +397,57 @@ run_command_on_workers (localhost,57638,t,"") master_remove_node - - + + starting permutation: s2-drop-extension s2-add-node-1 s2-create-extension-version-11 s2-remove-node-1 s2-begin s2-alter-extension-update-to-version-12 s1-add-node-1 s2-commit s1-print -step s2-drop-extension: +step s2-drop-extension: drop extension seg; -step s2-add-node-1: +step s2-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-create-extension-version-11: +1 +step s2-create-extension-version-11: CREATE extension seg VERSION "1.1"; -step s2-remove-node-1: +step s2-remove-node-1: SELECT 1 FROM master_remove_node('localhost', 57637); -?column? +?column? -1 -step s2-begin: +1 +step s2-begin: BEGIN; -step s2-alter-extension-update-to-version-12: +step s2-alter-extension-update-to-version-12: ALTER extension seg update to "1.2"; -step s1-add-node-1: +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -step s2-commit: +step s2-commit: COMMIT; step s1-add-node-1: <... completed> -?column? +?column? -1 -step s1-print: +1 +step s1-print: select count(*) from citus.pg_dist_object ; select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg'; SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$); -count +count -4 -extname extversion nspname +4 +extname extversion nspname -seg 1.2 public +seg 1.2 public run_command_on_workers (localhost,57637,t,seg) @@ -462,43 +462,43 @@ run_command_on_workers (localhost,57638,t,public) master_remove_node - - + + starting permutation: s2-add-node-1 s2-begin s2-drop-extension s1-remove-node-1 s2-commit s1-print -step s2-add-node-1: +step s2-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-begin: +1 +step s2-begin: BEGIN; -step s2-drop-extension: +step s2-drop-extension: drop extension seg; -step s1-remove-node-1: +step s1-remove-node-1: SELECT 1 FROM master_remove_node('localhost', 57637); -step s2-commit: +step s2-commit: COMMIT; step s1-remove-node-1: <... completed> -?column? +?column? -1 -step s1-print: +1 +step s1-print: select count(*) from citus.pg_dist_object ; select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg'; SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$); -count +count -3 -extname extversion nspname +3 +extname extversion nspname run_command_on_workers @@ -511,37 +511,37 @@ run_command_on_workers (localhost,57638,t,"") master_remove_node - + starting permutation: s2-begin s2-create-extension-with-schema1 s1-add-node-1 s2-commit s1-print -step s2-begin: +step s2-begin: BEGIN; -step s2-create-extension-with-schema1: +step s2-create-extension-with-schema1: CREATE extension seg with schema schema1; -step s1-add-node-1: +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-commit: +1 +step s2-commit: COMMIT; -step s1-print: +step s1-print: select count(*) from citus.pg_dist_object ; select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg'; SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$); -count +count -3 -extname extversion nspname +3 +extname extversion nspname -seg 1.3 schema1 +seg 1.3 schema1 run_command_on_workers (localhost,57637,t,"") @@ -556,51 +556,51 @@ run_command_on_workers (localhost,57638,t,"") master_remove_node - - + + starting permutation: s2-drop-extension s2-add-node-1 s2-create-extension-with-schema2 s2-begin s2-alter-extension-version-13 s1-remove-node-1 s2-commit s1-print -step s2-drop-extension: +step s2-drop-extension: drop extension seg; -step s2-add-node-1: +step s2-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-create-extension-with-schema2: +1 +step s2-create-extension-with-schema2: CREATE extension seg with schema schema2; -step s2-begin: +step s2-begin: BEGIN; -step s2-alter-extension-version-13: +step s2-alter-extension-version-13: ALTER extension seg update to "1.3"; -step s1-remove-node-1: +step s1-remove-node-1: SELECT 1 FROM master_remove_node('localhost', 57637); -step s2-commit: +step s2-commit: COMMIT; step s1-remove-node-1: <... completed> -?column? +?column? -1 -step s1-print: +1 +step s1-print: select count(*) from citus.pg_dist_object ; select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg'; SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$); -count +count -4 -extname extversion nspname +4 +extname extversion nspname -seg 1.3 schema2 +seg 1.3 schema2 run_command_on_workers (localhost,57638,t,seg) @@ -612,46 +612,46 @@ run_command_on_workers (localhost,57638,t,schema2) master_remove_node - + starting permutation: s2-drop-extension s2-add-node-1 s2-begin s2-create-extension-version-11 s1-remove-node-1 s2-commit s1-print -step s2-drop-extension: +step s2-drop-extension: drop extension seg; -step s2-add-node-1: +step s2-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-begin: +1 +step s2-begin: BEGIN; -step s2-create-extension-version-11: +step s2-create-extension-version-11: CREATE extension seg VERSION "1.1"; -step s1-remove-node-1: +step s1-remove-node-1: SELECT 1 FROM master_remove_node('localhost', 57637); -?column? +?column? -1 -step s2-commit: +1 +step s2-commit: COMMIT; -step s1-print: +step s1-print: select count(*) from citus.pg_dist_object ; select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg'; SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$); -count +count -3 -extname extversion nspname +3 +extname extversion nspname -seg 1.1 public +seg 1.1 public run_command_on_workers (localhost,57638,t,"") @@ -663,54 +663,54 @@ run_command_on_workers (localhost,57638,t,"") master_remove_node - + starting permutation: s2-drop-extension s2-add-node-1 s2-create-extension-version-11 s2-remove-node-1 s2-begin s2-drop-extension s1-add-node-1 s2-commit s1-print -step s2-drop-extension: +step s2-drop-extension: drop extension seg; -step s2-add-node-1: +step s2-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -?column? +?column? -1 -step s2-create-extension-version-11: +1 +step s2-create-extension-version-11: CREATE extension seg VERSION "1.1"; -step s2-remove-node-1: +step s2-remove-node-1: SELECT 1 FROM master_remove_node('localhost', 57637); -?column? +?column? -1 -step s2-begin: +1 +step s2-begin: BEGIN; -step s2-drop-extension: +step s2-drop-extension: drop extension seg; -step s1-add-node-1: +step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); -step s2-commit: +step s2-commit: COMMIT; step s1-add-node-1: <... completed> -?column? +?column? -1 -step s1-print: +1 +step s1-print: select count(*) from citus.pg_dist_object ; select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg'; SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$); SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$); -count +count -3 -extname extversion nspname +3 +extname extversion nspname run_command_on_workers @@ -726,5 +726,5 @@ run_command_on_workers (localhost,57638,t,"") master_remove_node - - + + diff --git a/src/test/regress/expected/isolation_get_all_active_transactions.out b/src/test/regress/expected/isolation_get_all_active_transactions.out index 3d05d6696..3bc0437b0 100644 --- a/src/test/regress/expected/isolation_get_all_active_transactions.out +++ b/src/test/regress/expected/isolation_get_all_active_transactions.out @@ -5,79 +5,79 @@ run_command_on_workers (localhost,57637,t,"GRANT ROLE") (localhost,57638,t,"GRANT ROLE") -step s1-grant: +step s1-grant: GRANT ALL ON test_table TO test_user_1; SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_1'); GRANT ALL ON test_table TO test_user_2; SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); -bool_and +bool_and -t -bool_and +t +bool_and -t -step s1-begin-insert: +t +step s1-begin-insert: BEGIN; SET ROLE test_user_1; INSERT INTO test_table VALUES (100, 100); -step s2-begin-insert: +step s2-begin-insert: BEGIN; SET ROLE test_user_2; INSERT INTO test_table VALUES (200, 200); -step s3-as-admin: +step s3-as-admin: -- Admin should be able to see all transactions SELECT count(*) FROM get_all_active_transactions(); SELECT count(*) FROM get_global_active_transactions(); -count +count -2 -count +2 +count -4 -step s3-as-user-1: +4 +step s3-as-user-1: -- User should only be able to see its own transactions SET ROLE test_user_1; SELECT count(*) FROM get_all_active_transactions(); SELECT count(*) FROM get_global_active_transactions(); -count +count -1 -count +1 +count -2 -step s3-as-readonly: +2 +step s3-as-readonly: -- Other user should not see transactions SET ROLE test_readonly; SELECT count(*) FROM get_all_active_transactions(); SELECT count(*) FROM get_global_active_transactions(); -count +count -0 -count +0 +count -0 -step s3-as-monitor: +0 +step s3-as-monitor: -- Monitor should see all transactions SET ROLE test_monitor; SELECT count(*) FROM get_all_active_transactions(); SELECT count(*) FROM get_global_active_transactions(); -count +count -2 -count +2 +count -4 -step s1-commit: +4 +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; run_command_on_workers diff --git a/src/test/regress/expected/isolation_get_distributed_wait_queries_mx.out b/src/test/regress/expected/isolation_get_distributed_wait_queries_mx.out index 1547304b0..56db49f47 100644 --- a/src/test/regress/expected/isolation_get_distributed_wait_queries_mx.out +++ b/src/test/regress/expected/isolation_get_distributed_wait_queries_mx.out @@ -1,764 +1,764 @@ Parsed test spec with 4 sessions starting permutation: s1-begin s1-update-ref-table-from-coordinator s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s3-select-distributed-waiting-queries s1-commit s2-commit-worker s2-stop-connection -step s1-begin: +step s1-begin: BEGIN; -step s1-update-ref-table-from-coordinator: +step s1-update-ref-table-from-coordinator: UPDATE ref_table SET value_1 = 15; -step s2-start-session-level-connection: +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update-ref-table: + +step s2-update-ref-table: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); -step s3-select-distributed-waiting-queries: +step s3-select-distributed-waiting-queries: SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port UPDATE ref_table SET value_1 = 12 WHERE user_id = 1 UPDATE ref_table SET value_1 = 15; -localhost coordinator_host57638 57636 -step s1-commit: +localhost coordinator_host57638 57636 +step s1-commit: COMMIT; step s2-update-ref-table: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-update-ref-table: + +step s1-update-ref-table: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update-ref-table: + +step s2-update-ref-table: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); -step s3-select-distributed-waiting-queries: +step s3-select-distributed-waiting-queries: SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port -UPDATE ref_table SET value_1 = 12 WHERE user_id = 1UPDATE ref_table SET value_1 = 12 WHERE user_id = 1localhost localhost 57638 57637 -step s1-commit-worker: +UPDATE ref_table SET value_1 = 12 WHERE user_id = 1UPDATE ref_table SET value_1 = 12 WHERE user_id = 1localhost localhost 57638 57637 +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-update-ref-table: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update-dist-table s2-start-session-level-connection s2-begin-on-worker s2-update-dist-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-update-dist-table: + +step s1-update-dist-table: SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 4'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update-dist-table: + +step s2-update-dist-table: SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 5'); -step s3-select-distributed-waiting-queries: +step s3-select-distributed-waiting-queries: SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port -UPDATE tt1 SET value_1 = 5UPDATE tt1 SET value_1 = 4localhost localhost 57638 57637 -step s1-commit-worker: +UPDATE tt1 SET value_1 = 5UPDATE tt1 SET value_1 = 4localhost localhost 57638 57637 +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-update-dist-table: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete-from-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-delete-from-ref-table: + +step s1-delete-from-ref-table: SELECT run_commands_on_session_level_connection_to_node('DELETE FROM ref_table WHERE user_id = 1'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update-ref-table: + +step s2-update-ref-table: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); -step s3-select-distributed-waiting-queries: +step s3-select-distributed-waiting-queries: SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port -UPDATE ref_table SET value_1 = 12 WHERE user_id = 1DELETE FROM ref_table WHERE user_id = 1localhost localhost 57638 57637 -step s1-commit-worker: +UPDATE ref_table SET value_1 = 12 WHERE user_id = 1DELETE FROM ref_table WHERE user_id = 1localhost localhost 57638 57637 +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-update-ref-table: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-into-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert-into-ref-table: + +step s1-insert-into-ref-table: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES(8,81),(9,91)'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update-ref-table: + +step s2-update-ref-table: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); -step s3-select-distributed-waiting-queries: +step s3-select-distributed-waiting-queries: SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port -UPDATE ref_table SET value_1 = 12 WHERE user_id = 1INSERT INTO ref_table VALUES(8,81),(9,91)localhost localhost 57638 57637 -step s1-commit-worker: +UPDATE ref_table SET value_1 = 12 WHERE user_id = 1INSERT INTO ref_table VALUES(8,81),(9,91)localhost localhost 57638 57637 +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-update-ref-table: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-into-ref-table s2-start-session-level-connection s2-begin-on-worker s2-insert-into-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert-into-ref-table: + +step s1-insert-into-ref-table: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES(8,81),(9,91)'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert-into-ref-table: + +step s2-insert-into-ref-table: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES(8,81),(9,91)'); run_commands_on_session_level_connection_to_node - -step s3-select-distributed-waiting-queries: + +step s3-select-distributed-waiting-queries: SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-to-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-copy-to-ref-table: + +step s1-copy-to-ref-table: SELECT run_commands_on_session_level_connection_to_node('COPY ref_table FROM PROGRAM ''echo 10, 101 && echo 11, 111'' WITH CSV'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update-ref-table: + +step s2-update-ref-table: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); -step s3-select-distributed-waiting-queries: +step s3-select-distributed-waiting-queries: SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port -UPDATE ref_table SET value_1 = 12 WHERE user_id = 1COPY ref_table FROM PROGRAM 'echo 10, 101 && echo 11, 111' WITH CSVlocalhost localhost 57638 57637 -step s1-commit-worker: +UPDATE ref_table SET value_1 = 12 WHERE user_id = 1COPY ref_table FROM PROGRAM 'echo 10, 101 && echo 11, 111' WITH CSVlocalhost localhost 57638 57637 +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-update-ref-table: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-to-ref-table s2-start-session-level-connection s2-begin-on-worker s2-insert-into-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-copy-to-ref-table: + +step s1-copy-to-ref-table: SELECT run_commands_on_session_level_connection_to_node('COPY ref_table FROM PROGRAM ''echo 10, 101 && echo 11, 111'' WITH CSV'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert-into-ref-table: + +step s2-insert-into-ref-table: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES(8,81),(9,91)'); run_commands_on_session_level_connection_to_node - -step s3-select-distributed-waiting-queries: + +step s3-select-distributed-waiting-queries: SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-to-ref-table s2-start-session-level-connection s2-begin-on-worker s2-copy-to-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-copy-to-ref-table: + +step s1-copy-to-ref-table: SELECT run_commands_on_session_level_connection_to_node('COPY ref_table FROM PROGRAM ''echo 10, 101 && echo 11, 111'' WITH CSV'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-copy-to-ref-table: + +step s2-copy-to-ref-table: SELECT run_commands_on_session_level_connection_to_node('COPY ref_table FROM PROGRAM ''echo 10, 101 && echo 11, 111'' WITH CSV'); run_commands_on_session_level_connection_to_node - -step s3-select-distributed-waiting-queries: + +step s3-select-distributed-waiting-queries: SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select-for-update: + +step s1-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update-ref-table: + +step s2-update-ref-table: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); -step s3-select-distributed-waiting-queries: +step s3-select-distributed-waiting-queries: SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port -UPDATE ref_table SET value_1 = 12 WHERE user_id = 1SELECT * FROM ref_table FOR UPDATElocalhost localhost 57638 57637 -step s1-commit-worker: +UPDATE ref_table SET value_1 = 12 WHERE user_id = 1SELECT * FROM ref_table FOR UPDATElocalhost localhost 57638 57637 +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-update-ref-table: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-insert-into-ref-table s1-begin s1-alter-table s3-select-distributed-waiting-queries s2-commit-worker s1-commit s2-stop-connection -step s2-start-session-level-connection: +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert-into-ref-table: + +step s2-insert-into-ref-table: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES(8,81),(9,91)'); run_commands_on_session_level_connection_to_node - -step s1-begin: + +step s1-begin: BEGIN; -step s1-alter-table: +step s1-alter-table: ALTER TABLE ref_table ADD CONSTRAINT rf_p_key PRIMARY KEY(user_id); -step s3-select-distributed-waiting-queries: +step s3-select-distributed-waiting-queries: SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port ALTER TABLE ref_table ADD CONSTRAINT rf_p_key PRIMARY KEY(user_id); -INSERT INTO ref_table VALUES(8,81),(9,91)coordinator_hostlocalhost 57636 57638 -step s2-commit-worker: +INSERT INTO ref_table VALUES(8,81),(9,91)coordinator_hostlocalhost 57636 57638 +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s1-alter-table: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s2-stop-connection: +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-begin s1-update-on-the-coordinator s2-update-on-the-coordinator s3-select-distributed-waiting-queries s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-update-on-the-coordinator: +step s1-update-on-the-coordinator: UPDATE tt1 SET value_1 = 4; -step s2-update-on-the-coordinator: +step s2-update-on-the-coordinator: UPDATE tt1 SET value_1 = 4; -step s3-select-distributed-waiting-queries: +step s3-select-distributed-waiting-queries: SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port @@ -767,83 +767,83 @@ blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_ UPDATE tt1 SET value_1 = 4; UPDATE tt1 SET value_1 = 4; -coordinator_hostcoordinator_host57636 57636 -step s1-commit: +coordinator_hostcoordinator_host57636 57636 +step s1-commit: COMMIT; step s2-update-on-the-coordinator: <... completed> restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update-dist-table s4-start-session-level-connection s4-begin-on-worker s4-update-dist-table s3-select-distributed-waiting-queries s1-commit-worker s4-commit-worker s1-stop-connection s4-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-update-dist-table: + +step s1-update-dist-table: SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 4'); run_commands_on_session_level_connection_to_node - -step s4-start-session-level-connection: + +step s4-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s4-begin-on-worker: + +step s4-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s4-update-dist-table: + +step s4-update-dist-table: SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 5'); -step s3-select-distributed-waiting-queries: +step s3-select-distributed-waiting-queries: SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port -UPDATE tt1 SET value_1 = 5UPDATE tt1 SET value_1 = 4localhost localhost 57637 57637 -step s1-commit-worker: +UPDATE tt1 SET value_1 = 5UPDATE tt1 SET value_1 = 4localhost localhost 57637 57637 +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s4-update-dist-table: <... completed> run_commands_on_session_level_connection_to_node - -step s4-commit-worker: + +step s4-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s4-stop-connection: + +step s4-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_hash_copy_vs_all.out b/src/test/regress/expected/isolation_hash_copy_vs_all.out index 4566e34b4..b3a90ec0b 100644 --- a/src/test/regress/expected/isolation_hash_copy_vs_all.out +++ b/src/test/regress/expected/isolation_hash_copy_vs_all.out @@ -3,139 +3,139 @@ Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s1-copy s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -15 +15 starting permutation: s1-initialize s1-begin s1-copy s2-router-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-router-select: SELECT * FROM hash_copy WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-real-time-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-real-time-select: SELECT * FROM hash_copy ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-task-tracker-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s2-task-tracker-select: +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM hash_copy AS t1 JOIN hash_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-insert s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-insert: INSERT INTO hash_copy VALUES(0, 'k', 0); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -11 +11 starting permutation: s1-initialize s1-begin s1-copy s2-insert-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-insert-select: INSERT INTO hash_copy SELECT * FROM hash_copy; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -15 +15 starting permutation: s1-initialize s1-begin s1-copy s2-update s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-update: UPDATE hash_copy SET data = 'l' WHERE id = 0; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-delete s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-delete: DELETE FROM hash_copy WHERE id = 1; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -9 +9 starting permutation: s1-initialize s1-begin s1-copy s2-truncate s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -143,14 +143,14 @@ step s2-truncate: TRUNCATE hash_copy; step s1-commit: COMMIT; step s2-truncate: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-copy s2-drop s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -163,7 +163,7 @@ ERROR: relation "hash_copy" does not exist starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -171,9 +171,9 @@ step s2-ddl-create-index: CREATE INDEX hash_copy_index ON hash_copy(id); step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''hash_copy%'''); run_command_on_workers @@ -183,7 +183,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-copy s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX hash_copy_index ON hash_copy(id); step s1-begin: BEGIN; @@ -192,9 +192,9 @@ step s2-ddl-drop-index: DROP INDEX hash_copy_index; step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''hash_copy%'''); run_command_on_workers @@ -204,7 +204,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -212,9 +212,9 @@ step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY hash_copy_index step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''hash_copy%'''); run_command_on_workers @@ -224,7 +224,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -232,9 +232,9 @@ step s2-ddl-add-column: ALTER TABLE hash_copy ADD new_column int DEFAULT 0; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -244,7 +244,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE hash_copy ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -253,9 +253,9 @@ step s2-ddl-drop-column: ALTER TABLE hash_copy DROP new_column; step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -265,7 +265,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -273,9 +273,9 @@ step s2-ddl-rename-column: ALTER TABLE hash_copy RENAME data TO new_column; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -285,38 +285,38 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-table-size: SELECT citus_total_relation_size('hash_copy'); citus_total_relation_size -65536 +65536 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-master-modify-multiple-shards s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-master-modify-multiple-shards: DELETE FROM hash_copy; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-copy s2-master-drop-all-shards s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -325,16 +325,16 @@ step s1-commit: COMMIT; step s2-master-drop-all-shards: <... completed> master_drop_all_shards -4 +4 step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -0 +0 starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-copy s2-distribute-table s1-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE hash_copy; step s1-create-non-distributed-table: CREATE TABLE hash_copy(id integer, data text, int_data int); COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; @@ -345,17 +345,17 @@ step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table - -step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count -15 +step s1-select-count: SELECT COUNT(*) FROM hash_copy; +count + +15 starting permutation: s1-recreate-with-replication-2 s1-initialize s1-begin s1-copy s2-update s1-commit s1-select-count create_distributed_table - -step s1-recreate-with-replication-2: + +step s1-recreate-with-replication-2: DROP TABLE hash_copy; SET citus.shard_replication_factor TO 2; CREATE TABLE hash_copy(id integer, data text, int_data int); @@ -363,7 +363,7 @@ step s1-recreate-with-replication-2: create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -371,15 +371,15 @@ step s2-update: UPDATE hash_copy SET data = 'l' WHERE id = 0; step s1-commit: COMMIT; step s2-update: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 starting permutation: s1-recreate-with-replication-2 s1-initialize s1-begin s1-copy s2-delete s1-commit s1-select-count create_distributed_table - -step s1-recreate-with-replication-2: + +step s1-recreate-with-replication-2: DROP TABLE hash_copy; SET citus.shard_replication_factor TO 2; CREATE TABLE hash_copy(id integer, data text, int_data int); @@ -387,7 +387,7 @@ step s1-recreate-with-replication-2: create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -395,15 +395,15 @@ step s2-delete: DELETE FROM hash_copy WHERE id = 1; step s1-commit: COMMIT; step s2-delete: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -9 +9 starting permutation: s1-recreate-with-replication-2 s1-initialize s1-begin s1-copy s2-insert-select s1-commit s1-select-count create_distributed_table - -step s1-recreate-with-replication-2: + +step s1-recreate-with-replication-2: DROP TABLE hash_copy; SET citus.shard_replication_factor TO 2; CREATE TABLE hash_copy(id integer, data text, int_data int); @@ -411,7 +411,7 @@ step s1-recreate-with-replication-2: create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -419,15 +419,15 @@ step s2-insert-select: INSERT INTO hash_copy SELECT * FROM hash_copy; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -20 +20 starting permutation: s1-recreate-with-replication-2 s1-initialize s1-begin s1-copy s2-master-modify-multiple-shards s1-commit s1-select-count create_distributed_table - -step s1-recreate-with-replication-2: + +step s1-recreate-with-replication-2: DROP TABLE hash_copy; SET citus.shard_replication_factor TO 2; CREATE TABLE hash_copy(id integer, data text, int_data int); @@ -435,7 +435,7 @@ step s1-recreate-with-replication-2: create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -443,132 +443,132 @@ step s2-master-modify-multiple-shards: DELETE FROM hash_copy; step s1-commit: COMMIT; step s2-master-modify-multiple-shards: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-router-select s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM hash_copy WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-real-time-select s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM hash_copy ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM hash_copy AS t1 JOIN hash_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-insert s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO hash_copy VALUES(0, 'k', 0); step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -11 +11 starting permutation: s1-initialize s1-begin s1-insert-select s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO hash_copy SELECT * FROM hash_copy; step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -15 +15 starting permutation: s1-initialize s1-begin s1-update s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-update: UPDATE hash_copy SET data = 'l' WHERE id = 0; step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-delete s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-delete: DELETE FROM hash_copy WHERE id = 1; step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -9 +9 starting permutation: s1-initialize s1-begin s1-truncate s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-truncate: TRUNCATE hash_copy; @@ -576,14 +576,14 @@ step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-drop s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-drop: DROP TABLE hash_copy; @@ -597,7 +597,7 @@ ERROR: relation "hash_copy" does not exist starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-copy s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX hash_copy_index ON hash_copy(id); @@ -605,9 +605,9 @@ step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''hash_copy%'''); run_command_on_workers @@ -617,7 +617,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-copy s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX hash_copy_index ON hash_copy(id); step s1-begin: BEGIN; @@ -626,9 +626,9 @@ step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''hash_copy%'''); run_command_on_workers @@ -638,7 +638,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE hash_copy ADD new_column int DEFAULT 0; @@ -647,9 +647,9 @@ step s1-commit: COMMIT; step s2-copy: <... completed> error in steps s1-commit s2-copy: ERROR: missing data for column "new_column" step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -659,7 +659,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE hash_copy ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -668,9 +668,9 @@ step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -680,7 +680,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE hash_copy RENAME data TO new_column; @@ -688,9 +688,9 @@ step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -700,57 +700,57 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('hash_copy'); citus_total_relation_size -57344 +57344 step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-modify-multiple-shards: DELETE FROM hash_copy; step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-master-drop-all-shards s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-drop-all-shards: SELECT master_drop_all_shards('hash_copy'::regclass, 'public', 'hash_copy'); master_drop_all_shards -4 +4 step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> error in steps s1-commit s2-copy: ERROR: could not find any shards into which to copy step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -0 +0 starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-distribute-table s2-copy s1-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE hash_copy; step s1-create-non-distributed-table: CREATE TABLE hash_copy(id integer, data text, int_data int); COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; @@ -758,11 +758,11 @@ step s1-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('hash_copy', 'id'); create_distributed_table - + step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; -count +count -15 +15 diff --git a/src/test/regress/expected/isolation_insert_select_conflict.out b/src/test/regress/expected/isolation_insert_select_conflict.out index 55fd21f6c..0c8a69760 100644 --- a/src/test/regress/expected/isolation_insert_select_conflict.out +++ b/src/test/regress/expected/isolation_insert_select_conflict.out @@ -3,299 +3,299 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s1-insert-into-select-conflict-update s2-begin s2-update s1-commit s2-commit create_distributed_table - -step s1-begin: + +step s1-begin: SET citus.shard_replication_factor to 1; BEGIN; -step s1-insert-into-select-conflict-update: +step s1-insert-into-select-conflict-update: INSERT INTO target_table - SELECT + SELECT col_1, col_2 FROM ( - SELECT - col_1, col_2, col_3 + SELECT + col_1, col_2, col_3 FROM source_table LIMIT 5 ) as foo ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *; -col_1 col_2 +col_1 col_2 -1 1 -2 2 -3 3 -4 4 -5 5 -step s2-begin: +1 1 +2 2 +3 3 +4 4 +5 5 +step s2-begin: BEGIN; -step s2-update: +step s2-update: UPDATE target_table SET col_2 = 5; -step s1-commit: +step s1-commit: COMMIT; step s2-update: <... completed> -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s1-insert-into-select-conflict-do-nothing s2-begin s2-delete s1-commit s2-commit create_distributed_table - -step s1-begin: + +step s1-begin: SET citus.shard_replication_factor to 1; BEGIN; -step s1-insert-into-select-conflict-do-nothing: +step s1-insert-into-select-conflict-do-nothing: INSERT INTO target_table - SELECT + SELECT col_1, col_2 FROM ( - SELECT - col_1, col_2, col_3 + SELECT + col_1, col_2, col_3 FROM source_table LIMIT 5 ) as foo ON CONFLICT DO NOTHING; -step s2-begin: +step s2-begin: BEGIN; -step s2-delete: +step s2-delete: DELETE FROM target_table; -step s1-commit: +step s1-commit: COMMIT; step s2-delete: <... completed> -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s1-insert-into-select-conflict-do-nothing s2-begin s2-insert-into-select-conflict-update s1-commit s2-commit create_distributed_table - -step s1-begin: + +step s1-begin: SET citus.shard_replication_factor to 1; BEGIN; -step s1-insert-into-select-conflict-do-nothing: +step s1-insert-into-select-conflict-do-nothing: INSERT INTO target_table - SELECT + SELECT col_1, col_2 FROM ( - SELECT - col_1, col_2, col_3 + SELECT + col_1, col_2, col_3 FROM source_table LIMIT 5 ) as foo ON CONFLICT DO NOTHING; -step s2-begin: +step s2-begin: BEGIN; -step s2-insert-into-select-conflict-update: +step s2-insert-into-select-conflict-update: INSERT INTO target_table - SELECT + SELECT col_1, col_2 FROM ( - SELECT - col_1, col_2, col_3 + SELECT + col_1, col_2, col_3 FROM source_table LIMIT 5 ) as foo ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *; -step s1-commit: +step s1-commit: COMMIT; step s2-insert-into-select-conflict-update: <... completed> -col_1 col_2 +col_1 col_2 -1 1 -2 2 -3 3 -4 4 -5 5 -step s2-commit: +1 1 +2 2 +3 3 +4 4 +5 5 +step s2-commit: COMMIT; starting permutation: s1-begin s1-insert-into-select-conflict-update s2-begin s2-insert-into-select-conflict-update s1-commit s2-commit create_distributed_table - -step s1-begin: + +step s1-begin: SET citus.shard_replication_factor to 1; BEGIN; -step s1-insert-into-select-conflict-update: +step s1-insert-into-select-conflict-update: INSERT INTO target_table - SELECT + SELECT col_1, col_2 FROM ( - SELECT - col_1, col_2, col_3 + SELECT + col_1, col_2, col_3 FROM source_table LIMIT 5 ) as foo ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *; -col_1 col_2 +col_1 col_2 -1 1 -2 2 -3 3 -4 4 -5 5 -step s2-begin: +1 1 +2 2 +3 3 +4 4 +5 5 +step s2-begin: BEGIN; -step s2-insert-into-select-conflict-update: +step s2-insert-into-select-conflict-update: INSERT INTO target_table - SELECT + SELECT col_1, col_2 FROM ( - SELECT - col_1, col_2, col_3 + SELECT + col_1, col_2, col_3 FROM source_table LIMIT 5 ) as foo ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *; -step s1-commit: +step s1-commit: COMMIT; step s2-insert-into-select-conflict-update: <... completed> -col_1 col_2 +col_1 col_2 -1 1 -2 2 -3 3 -4 4 -5 5 -step s2-commit: +1 1 +2 2 +3 3 +4 4 +5 5 +step s2-commit: COMMIT; starting permutation: s1-begin s1-insert-into-select-conflict-update s2-begin s2-insert-into-select-conflict-do-nothing s1-commit s2-commit create_distributed_table - -step s1-begin: + +step s1-begin: SET citus.shard_replication_factor to 1; BEGIN; -step s1-insert-into-select-conflict-update: +step s1-insert-into-select-conflict-update: INSERT INTO target_table - SELECT + SELECT col_1, col_2 FROM ( - SELECT - col_1, col_2, col_3 + SELECT + col_1, col_2, col_3 FROM source_table LIMIT 5 ) as foo ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *; -col_1 col_2 +col_1 col_2 -1 1 -2 2 -3 3 -4 4 -5 5 -step s2-begin: +1 1 +2 2 +3 3 +4 4 +5 5 +step s2-begin: BEGIN; -step s2-insert-into-select-conflict-do-nothing: +step s2-insert-into-select-conflict-do-nothing: INSERT INTO target_table - SELECT + SELECT col_1, col_2 FROM ( - SELECT - col_1, col_2, col_3 + SELECT + col_1, col_2, col_3 FROM source_table LIMIT 5 ) as foo ON CONFLICT DO NOTHING; -step s1-commit: +step s1-commit: COMMIT; step s2-insert-into-select-conflict-do-nothing: <... completed> -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin-replication-factor-2 s1-insert-into-select-conflict-update-replication-factor-2 s2-begin-replication-factor-2 s2-insert-into-select-conflict-update-replication-factor-2 s1-commit s2-commit create_distributed_table - -step s1-begin-replication-factor-2: + +step s1-begin-replication-factor-2: SET citus.shard_replication_factor to 2; BEGIN; -step s1-insert-into-select-conflict-update-replication-factor-2: +step s1-insert-into-select-conflict-update-replication-factor-2: INSERT INTO target_table_2 - SELECT + SELECT col_1, col_2 FROM ( - SELECT - col_1, col_2, col_3 + SELECT + col_1, col_2, col_3 FROM source_table LIMIT 5 ) as foo ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *; -col_1 col_2 col_3 +col_1 col_2 col_3 -1 1 -2 2 -3 3 -4 4 -5 5 -step s2-begin-replication-factor-2: +1 1 +2 2 +3 3 +4 4 +5 5 +step s2-begin-replication-factor-2: SET citus.shard_replication_factor to 2; BEGIN; -step s2-insert-into-select-conflict-update-replication-factor-2: +step s2-insert-into-select-conflict-update-replication-factor-2: INSERT INTO target_table_2 - SELECT + SELECT col_1, col_2 FROM ( - SELECT - col_1, col_2, col_3 + SELECT + col_1, col_2, col_3 FROM source_table LIMIT 5 ) as foo ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *; -step s1-commit: +step s1-commit: COMMIT; step s2-insert-into-select-conflict-update-replication-factor-2: <... completed> -col_1 col_2 col_3 +col_1 col_2 col_3 -1 1 -2 2 -3 3 -4 4 -5 5 -step s2-commit: +1 1 +2 2 +3 3 +4 4 +5 5 +step s2-commit: COMMIT; diff --git a/src/test/regress/expected/isolation_insert_select_vs_all.out b/src/test/regress/expected/isolation_insert_select_vs_all.out index 9800bd726..7987f87c5 100644 --- a/src/test/regress/expected/isolation_insert_select_vs_all.out +++ b/src/test/regress/expected/isolation_insert_select_vs_all.out @@ -3,8 +3,8 @@ Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s1-insert-select s2-insert-select s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -15,15 +15,15 @@ step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-insert-select s2-update-on-inserted s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -34,15 +34,15 @@ step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s2-update-on-inserted: UPDATE insert_of_insert_select_hash SET data = 'l' WHERE id = 4; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-insert-select s2-delete-on-inserted s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -53,15 +53,15 @@ step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s2-delete-on-inserted: DELETE FROM insert_of_insert_select_hash WHERE id = 4; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-insert-select s2-truncate-on-inserted s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -73,15 +73,15 @@ step s2-truncate-on-inserted: TRUNCATE insert_of_insert_select_hash; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-insert-select s2-drop-on-inserted s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -93,15 +93,15 @@ step s2-drop-on-inserted: DROP TABLE insert_of_insert_select_hash; step s1-commit: COMMIT; step s2-drop-on-inserted: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-insert-select s2-ddl-create-index-on-inserted s1-commit s1-select-count s1-show-indexes-inserted create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -113,9 +113,9 @@ step s2-ddl-create-index-on-inserted: CREATE INDEX insert_of_insert_select_hash_ step s1-commit: COMMIT; step s2-ddl-create-index-on-inserted: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-indexes-inserted: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_of_insert_select_hash%'''); run_command_on_workers @@ -125,8 +125,8 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index-on-inserted s1-begin s1-insert-select s2-ddl-drop-index-on-inserted s1-commit s1-select-count s1-show-indexes-inserted create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -139,9 +139,9 @@ step s2-ddl-drop-index-on-inserted: DROP INDEX insert_of_insert_select_hash_inde step s1-commit: COMMIT; step s2-ddl-drop-index-on-inserted: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-indexes-inserted: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_of_insert_select_hash%'''); run_command_on_workers @@ -151,8 +151,8 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-insert-select s2-ddl-create-index-concurrently-on-inserted s1-commit s1-select-count s1-show-indexes-inserted create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -164,9 +164,9 @@ step s2-ddl-create-index-concurrently-on-inserted: CREATE INDEX CONCURRENTLY ins step s1-commit: COMMIT; step s2-ddl-create-index-concurrently-on-inserted: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-indexes-inserted: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_of_insert_select_hash%'''); run_command_on_workers @@ -176,8 +176,8 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-insert-select s2-ddl-add-column-on-inserted s1-commit s1-select-count s1-show-columns-inserted create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -189,9 +189,9 @@ step s2-ddl-add-column-on-inserted: ALTER TABLE insert_of_insert_select_hash ADD step s1-commit: COMMIT; step s2-ddl-add-column-on-inserted: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-columns-inserted: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -201,8 +201,8 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column-on-inserted s1-begin s1-insert-select s2-ddl-drop-column-on-inserted s1-commit s1-select-count s1-show-columns-inserted create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -215,9 +215,9 @@ step s2-ddl-drop-column-on-inserted: ALTER TABLE insert_of_insert_select_hash DR step s1-commit: COMMIT; step s2-ddl-drop-column-on-inserted: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-columns-inserted: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -227,8 +227,8 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-insert-select s2-ddl-rename-column-on-inserted s1-commit s1-select-count s1-show-columns-inserted s1-show-columns-inserted create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -240,9 +240,9 @@ step s2-ddl-rename-column-on-inserted: ALTER TABLE insert_of_insert_select_hash step s1-commit: COMMIT; step s2-ddl-rename-column-on-inserted: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-columns-inserted: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -257,8 +257,8 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-insert-select s2-table-size-on-inserted s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -269,18 +269,18 @@ step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s2-table-size-on-inserted: SELECT citus_total_relation_size('insert_of_insert_select_hash'); citus_total_relation_size -65536 +65536 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-insert-select s2-master-modify-multiple-shards-on-inserted s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -291,15 +291,15 @@ step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s2-master-modify-multiple-shards-on-inserted: DELETE FROM insert_of_insert_select_hash; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-insert-select s2-master-drop-all-shards-on-inserted s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -312,19 +312,19 @@ step s1-commit: COMMIT; step s2-master-drop-all-shards-on-inserted: <... completed> master_drop_all_shards -4 +4 step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-drop-on-inserted s1-create-non-distributed-table-on-inserted s1-initialize s1-begin s1-insert-select s2-distribute-table-on-inserted s1-commit s1-select-count create_distributed_table - + step s1-drop-on-inserted: DROP TABLE insert_of_insert_select_hash; step s1-create-non-distributed-table-on-inserted: CREATE TABLE insert_of_insert_select_hash(id integer, data text); -step s1-initialize: +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -336,18 +336,18 @@ ERROR: cannot INSERT rows from a distributed query into a local table step s2-distribute-table-on-inserted: SELECT create_distributed_table('insert_of_insert_select_hash', 'id'); create_distributed_table - + step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-insert-select s2-update-on-selected s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -358,15 +358,15 @@ step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s2-update-on-selected: UPDATE select_of_insert_select_hash SET data = 'l' WHERE id = 4; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-insert-select s2-delete-on-selected s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -377,15 +377,15 @@ step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s2-delete-on-selected: DELETE FROM select_of_insert_select_hash WHERE id = 4; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -9 +9 starting permutation: s1-initialize s1-begin s1-insert-select s2-truncate-on-selected s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -397,15 +397,15 @@ step s2-truncate-on-selected: TRUNCATE select_of_insert_select_hash; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-insert-select s2-drop-on-selected s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -422,8 +422,8 @@ ERROR: relation "select_of_insert_select_hash" does not exist starting permutation: s1-initialize s1-begin s1-insert-select s2-ddl-create-index-on-selected s1-commit s1-select-count s1-show-indexes-selected create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -434,9 +434,9 @@ step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s2-ddl-create-index-on-selected: CREATE INDEX select_of_insert_select_hash_index ON select_of_insert_select_hash(id); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-indexes-selected: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_of_insert_select_hash%'''); run_command_on_workers @@ -446,8 +446,8 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index-on-selected s1-begin s1-insert-select s2-ddl-drop-index-on-selected s1-commit s1-select-count s1-show-indexes-selected create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -460,9 +460,9 @@ step s2-ddl-drop-index-on-selected: DROP INDEX select_of_insert_select_hash_inde step s1-commit: COMMIT; step s2-ddl-drop-index-on-selected: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-indexes-selected: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_of_insert_select_hash%'''); run_command_on_workers @@ -472,8 +472,8 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-insert-select s2-ddl-create-index-concurrently-on-selected s1-commit s1-select-count s1-show-indexes-selected create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -485,9 +485,9 @@ step s2-ddl-create-index-concurrently-on-selected: CREATE INDEX CONCURRENTLY sel step s1-commit: COMMIT; step s2-ddl-create-index-concurrently-on-selected: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-indexes-selected: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_of_insert_select_hash%'''); run_command_on_workers @@ -497,8 +497,8 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-insert-select s2-ddl-add-column-on-selected s1-commit s1-select-count s1-show-columns-selected create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -510,9 +510,9 @@ step s2-ddl-add-column-on-selected: ALTER TABLE select_of_insert_select_hash ADD step s1-commit: COMMIT; step s2-ddl-add-column-on-selected: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-columns-selected: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -522,8 +522,8 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column-on-selected s1-begin s1-insert-select s2-ddl-drop-column-on-selected s1-commit s1-select-count s1-show-columns-selected create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -536,9 +536,9 @@ ERROR: INSERT has more expressions than target columns step s2-ddl-drop-column-on-selected: ALTER TABLE select_of_insert_select_hash DROP new_column; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-columns-selected: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -548,8 +548,8 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-insert-select s2-ddl-rename-column-on-selected s1-commit s1-select-count s1-show-columns-selected create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -561,9 +561,9 @@ step s2-ddl-rename-column-on-selected: ALTER TABLE select_of_insert_select_hash step s1-commit: COMMIT; step s2-ddl-rename-column-on-selected: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-columns-selected: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -573,8 +573,8 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-insert-select s2-table-size-on-selected s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -585,18 +585,18 @@ step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s2-table-size-on-selected: SELECT citus_total_relation_size('select_of_insert_select_hash'); citus_total_relation_size -65536 +65536 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-insert-select s2-master-modify-multiple-shards-on-selected s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -607,15 +607,15 @@ step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s2-master-modify-multiple-shards-on-selected: DELETE FROM select_of_insert_select_hash; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-insert-select s2-master-drop-all-shards-on-selected s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -628,19 +628,19 @@ step s1-commit: COMMIT; step s2-master-drop-all-shards-on-selected: <... completed> master_drop_all_shards -4 +4 step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -0 +0 starting permutation: s1-drop-on-selected s1-create-non-distributed-table-on-selected s1-initialize s1-begin s1-insert-select s2-distribute-table-on-selected s1-commit s1-select-count create_distributed_table - + step s1-drop-on-selected: DROP TABLE select_of_insert_select_hash; step s1-create-non-distributed-table-on-selected: CREATE TABLE select_of_insert_select_hash(id integer, data text); -step s1-initialize: +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -651,18 +651,18 @@ step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s2-distribute-table-on-selected: SELECT create_distributed_table('select_of_insert_select_hash', 'id'); create_distributed_table - + step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-update-on-inserted s2-insert-select s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -673,15 +673,15 @@ step s1-update-on-inserted: UPDATE insert_of_insert_select_hash SET data = 'l' W step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-delete-on-inserted s2-insert-select s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -692,15 +692,15 @@ step s1-delete-on-inserted: DELETE FROM insert_of_insert_select_hash WHERE id = step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-truncate-on-inserted s2-insert-select s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -712,15 +712,15 @@ step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-drop-on-inserted s2-insert-select s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -733,15 +733,15 @@ step s1-commit: COMMIT; step s2-insert-select: <... completed> error in steps s1-commit s2-insert-select: ERROR: relation "insert_of_insert_select_hash" does not exist step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-ddl-create-index-on-inserted s2-insert-select s1-commit s1-select-count s1-show-indexes-inserted create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -753,9 +753,9 @@ step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-indexes-inserted: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_of_insert_select_hash%'''); run_command_on_workers @@ -765,8 +765,8 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index-on-inserted s1-begin s1-ddl-drop-index-on-inserted s2-insert-select s1-commit s1-select-count s1-show-indexes-inserted create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -779,9 +779,9 @@ step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-indexes-inserted: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_of_insert_select_hash%'''); run_command_on_workers @@ -791,8 +791,8 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-add-column-on-inserted s2-insert-select s1-commit s1-select-count s1-show-columns-inserted create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -804,9 +804,9 @@ step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-columns-inserted: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -816,8 +816,8 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column-on-inserted s1-begin s1-ddl-drop-column-on-inserted s2-insert-select s1-commit s1-select-count s1-show-columns-inserted create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -830,9 +830,9 @@ step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-columns-inserted: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -842,8 +842,8 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-rename-column-on-inserted s2-insert-select s1-commit s1-select-count s1-show-columns-inserted create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -855,9 +855,9 @@ step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-columns-inserted: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -867,8 +867,8 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-table-size-on-inserted s2-insert-select s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -878,19 +878,19 @@ step s1-begin: BEGIN; step s1-table-size-on-inserted: SELECT citus_total_relation_size('insert_of_insert_select_hash'); citus_total_relation_size -65536 +65536 step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards-on-inserted s2-insert-select s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -901,15 +901,15 @@ step s1-master-modify-multiple-shards-on-inserted: DELETE FROM insert_of_insert_ step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-master-drop-all-shards-on-inserted s2-insert-select s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -919,23 +919,23 @@ step s1-begin: BEGIN; step s1-master-drop-all-shards-on-inserted: SELECT master_drop_all_shards('insert_of_insert_select_hash'::regclass, 'public', 'insert_of_insert_select_hash'); master_drop_all_shards -4 +4 step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s2-insert-select: <... completed> error in steps s1-commit s2-insert-select: ERROR: could not find any shards into which to copy step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-drop-on-inserted s1-create-non-distributed-table-on-inserted s1-initialize s1-begin s1-distribute-table-on-inserted s2-insert-select s1-commit s1-select-count create_distributed_table - + step s1-drop-on-inserted: DROP TABLE insert_of_insert_select_hash; step s1-create-non-distributed-table-on-inserted: CREATE TABLE insert_of_insert_select_hash(id integer, data text); -step s1-initialize: +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -945,20 +945,20 @@ step s1-begin: BEGIN; step s1-distribute-table-on-inserted: SELECT create_distributed_table('insert_of_insert_select_hash', 'id'); create_distributed_table - + step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-update-on-selected s2-insert-select s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -969,15 +969,15 @@ step s1-update-on-selected: UPDATE select_of_insert_select_hash SET data = 'l' W step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-delete-on-selected s2-insert-select s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -988,15 +988,15 @@ step s1-delete-on-selected: DELETE FROM select_of_insert_select_hash WHERE id = step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -9 +9 starting permutation: s1-initialize s1-begin s1-truncate-on-selected s2-insert-select s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -1008,15 +1008,15 @@ step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-drop-on-selected s2-insert-select s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -1034,8 +1034,8 @@ ERROR: relation "select_of_insert_select_hash" does not exist starting permutation: s1-initialize s1-begin s1-ddl-create-index-on-selected s2-insert-select s1-commit s1-select-count s1-show-indexes-selected create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -1046,9 +1046,9 @@ step s1-ddl-create-index-on-selected: CREATE INDEX select_of_insert_select_hash_ step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-indexes-selected: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_of_insert_select_hash%'''); run_command_on_workers @@ -1058,8 +1058,8 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index-on-selected s1-begin s1-ddl-drop-index-on-selected s2-insert-select s1-commit s1-select-count s1-show-indexes-selected create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -1072,9 +1072,9 @@ step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-indexes-selected: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_of_insert_select_hash%'''); run_command_on_workers @@ -1084,8 +1084,8 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-add-column-on-selected s2-insert-select s1-commit s1-select-count s1-show-columns-selected create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -1098,9 +1098,9 @@ step s1-commit: COMMIT; step s2-insert-select: <... completed> error in steps s1-commit s2-insert-select: ERROR: INSERT has more expressions than target columns step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-columns-selected: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -1110,8 +1110,8 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column-on-selected s1-begin s1-ddl-drop-column-on-selected s2-insert-select s1-commit s1-select-count s1-show-columns-selected create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -1124,9 +1124,9 @@ step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-columns-selected: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -1136,8 +1136,8 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-rename-column-on-selected s2-insert-select s1-commit s1-select-count s1-show-columns-selected create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -1149,9 +1149,9 @@ step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM se step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 step s1-show-columns-selected: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -1161,8 +1161,8 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-table-size-on-selected s2-insert-select s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -1172,19 +1172,19 @@ step s1-begin: BEGIN; step s1-table-size-on-selected: SELECT citus_total_relation_size('select_of_insert_select_hash'); citus_total_relation_size -65536 +65536 step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards-on-selected s2-insert-select s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -1195,15 +1195,15 @@ step s1-master-modify-multiple-shards-on-selected: DELETE FROM select_of_insert_ step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-master-drop-all-shards-on-selected s2-insert-select s1-commit s1-select-count create_distributed_table - -step s1-initialize: + +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -1213,22 +1213,22 @@ step s1-begin: BEGIN; step s1-master-drop-all-shards-on-selected: SELECT master_drop_all_shards('select_of_insert_select_hash'::regclass, 'public', 'select_of_insert_select_hash'); master_drop_all_shards -4 +4 step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -0 +0 starting permutation: s1-drop-on-selected s1-create-non-distributed-table-on-selected s1-initialize s1-begin s1-distribute-table-on-selected s2-insert-select s1-commit s1-select-count create_distributed_table - + step s1-drop-on-selected: DROP TABLE select_of_insert_select_hash; step s1-create-non-distributed-table-on-selected: CREATE TABLE select_of_insert_select_hash(id integer, data text); -step s1-initialize: +step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV; @@ -1238,10 +1238,10 @@ step s1-begin: BEGIN; step s1-distribute-table-on-selected: SELECT create_distributed_table('select_of_insert_select_hash', 'id'); create_distributed_table - + step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; -count +count -10 +10 diff --git a/src/test/regress/expected/isolation_insert_select_vs_all_on_mx.out b/src/test/regress/expected/isolation_insert_select_vs_all_on_mx.out index 97e9b1a5f..178c5a4dd 100644 --- a/src/test/regress/expected/isolation_insert_select_vs_all_on_mx.out +++ b/src/test/regress/expected/isolation_insert_select_vs_all_on_mx.out @@ -1,963 +1,963 @@ Parsed test spec with 3 sessions starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-colocated-insert-select s2-start-session-level-connection s2-begin-on-worker s2-colocated-insert-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-colocated-insert-select: + +step s1-colocated-insert-select: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT * FROM dist_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-colocated-insert-select: + +step s2-colocated-insert-select: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT * FROM dist_table'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM dist_table; -count +count -15 +15 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-colocated-insert-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert-select-via-coordinator: + +step s1-insert-select-via-coordinator: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT value, id FROM dist_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-colocated-insert-select: + +step s2-colocated-insert-select: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT * FROM dist_table'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM dist_table; -count +count -15 +15 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-colocated-insert-select s2-start-session-level-connection s2-begin-on-worker s2-insert-select-via-coordinator s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-colocated-insert-select: + +step s1-colocated-insert-select: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT * FROM dist_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert-select-via-coordinator: + +step s2-insert-select-via-coordinator: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT value, id FROM dist_table'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM dist_table; -count +count -15 +15 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-insert-select-via-coordinator s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert-select-via-coordinator: + +step s1-insert-select-via-coordinator: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT value, id FROM dist_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert-select-via-coordinator: + +step s2-insert-select-via-coordinator: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT value, id FROM dist_table'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM dist_table; -count +count -15 +15 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-insert s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert-select-via-coordinator: + +step s1-insert-select-via-coordinator: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT value, id FROM dist_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert: + +step s2-insert: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table VALUES (5, 50), (6, 60)') run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM dist_table; -count +count -12 +12 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert-select-via-coordinator: + +step s1-insert-select-via-coordinator: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT value, id FROM dist_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-select: + +step s2-select: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM dist_table') run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM dist_table; -count +count -10 +10 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-colocated-insert-select s2-start-session-level-connection s2-begin-on-worker s2-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-colocated-insert-select: + +step s1-colocated-insert-select: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT * FROM dist_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update: + +step s2-update: SELECT run_commands_on_session_level_connection_to_node('UPDATE dist_table SET value=55 WHERE id=5'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM dist_table; -count +count -10 +10 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert-select-via-coordinator: + +step s1-insert-select-via-coordinator: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT value, id FROM dist_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update: + +step s2-update: SELECT run_commands_on_session_level_connection_to_node('UPDATE dist_table SET value=55 WHERE id=5'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM dist_table; -count +count -10 +10 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-colocated-insert-select s2-start-session-level-connection s2-begin-on-worker s2-copy s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-colocated-insert-select: + +step s1-colocated-insert-select: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT * FROM dist_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-copy: + +step s2-copy: SELECT run_commands_on_session_level_connection_to_node('COPY dist_table FROM PROGRAM ''echo 5, 50 && echo 9, 90 && echo 10, 100''WITH CSV'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM dist_table; -count +count -13 +13 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-copy s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert-select-via-coordinator: + +step s1-insert-select-via-coordinator: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT value, id FROM dist_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-copy: + +step s2-copy: SELECT run_commands_on_session_level_connection_to_node('COPY dist_table FROM PROGRAM ''echo 5, 50 && echo 9, 90 && echo 10, 100''WITH CSV'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM dist_table; -count +count -13 +13 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-colocated-insert-select s2-begin s2-coordinator-drop s1-commit-worker s2-commit s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-colocated-insert-select: + +step s1-colocated-insert-select: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT * FROM dist_table'); run_commands_on_session_level_connection_to_node - -step s2-begin: + +step s2-begin: BEGIN; -step s2-coordinator-drop: +step s2-coordinator-drop: DROP TABLE dist_table; -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-coordinator-drop: <... completed> -step s2-commit: +step s2-commit: COMMIT; -step s1-stop-connection: +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM dist_table; ERROR: relation "dist_table" does not exist restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-begin s2-coordinator-drop s1-commit-worker s2-commit s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert-select-via-coordinator: + +step s1-insert-select-via-coordinator: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT value, id FROM dist_table'); run_commands_on_session_level_connection_to_node - -step s2-begin: + +step s2-begin: BEGIN; -step s2-coordinator-drop: +step s2-coordinator-drop: DROP TABLE dist_table; -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-coordinator-drop: <... completed> -step s2-commit: +step s2-commit: COMMIT; -step s1-stop-connection: +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM dist_table; ERROR: relation "dist_table" does not exist restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-colocated-insert-select s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-colocated-insert-select: + +step s1-colocated-insert-select: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT * FROM dist_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-select-for-update: + +step s2-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM dist_table WHERE id = 5 FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM dist_table; -count +count -10 +10 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert-select-via-coordinator: + +step s1-insert-select-via-coordinator: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT value, id FROM dist_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-select-for-update: + +step s2-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM dist_table WHERE id = 5 FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM dist_table; -count +count -10 +10 restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_insert_vs_all.out b/src/test/regress/expected/isolation_insert_vs_all.out index b230818a3..197130b47 100644 --- a/src/test/regress/expected/isolation_insert_vs_all.out +++ b/src/test/regress/expected/isolation_insert_vs_all.out @@ -3,105 +3,105 @@ Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s1-insert s2-insert s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -7 +7 starting permutation: s1-initialize s1-begin s1-insert s2-insert-multi-row s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -9 +9 starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-insert s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -9 +9 starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-insert-multi-row s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -11 +11 starting permutation: s1-initialize s1-begin s1-insert s2-insert-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s2-insert-select: INSERT INTO insert_hash SELECT * FROM insert_hash; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -11 +11 starting permutation: s1-initialize s1-begin s1-insert s2-update s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s2-update: UPDATE insert_hash SET data = 'l' WHERE id = 4; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -6 +6 starting permutation: s1-initialize s1-begin s1-insert s2-delete s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s2-delete: DELETE FROM insert_hash WHERE id = 4; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-insert s2-truncate s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); @@ -109,14 +109,14 @@ step s2-truncate: TRUNCATE insert_hash; step s1-commit: COMMIT; step s2-truncate: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-insert s2-drop s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); @@ -129,7 +129,7 @@ ERROR: relation "insert_hash" does not exist starting permutation: s1-initialize s1-begin s1-insert s2-ddl-create-index s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); @@ -137,9 +137,9 @@ step s2-ddl-create-index: CREATE INDEX insert_hash_index ON insert_hash(id); step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -6 +6 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers @@ -149,7 +149,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-insert s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX insert_hash_index ON insert_hash(id); step s1-begin: BEGIN; @@ -158,9 +158,9 @@ step s2-ddl-drop-index: DROP INDEX insert_hash_index; step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -6 +6 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers @@ -170,7 +170,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-insert s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); @@ -178,9 +178,9 @@ step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY insert_hash_ind step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -6 +6 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers @@ -190,7 +190,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-insert s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); @@ -198,9 +198,9 @@ step s2-ddl-add-column: ALTER TABLE insert_hash ADD new_column int DEFAULT 0; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -6 +6 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -210,7 +210,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-insert s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE insert_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -219,9 +219,9 @@ step s2-ddl-drop-column: ALTER TABLE insert_hash DROP new_column; step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -6 +6 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -231,7 +231,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-insert s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); @@ -239,9 +239,9 @@ step s2-ddl-rename-column: ALTER TABLE insert_hash RENAME data TO new_column; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -6 +6 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -251,38 +251,38 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-insert s2-table-size s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s2-table-size: SELECT citus_total_relation_size('insert_hash'); citus_total_relation_size -57344 +57344 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -6 +6 starting permutation: s1-initialize s1-begin s1-insert s2-master-modify-multiple-shards s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s2-master-modify-multiple-shards: DELETE FROM insert_hash; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -1 +1 starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-insert s2-distribute-table s1-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE insert_hash; step s1-create-non-distributed-table: CREATE TABLE insert_hash(id integer, data text); COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; @@ -293,58 +293,58 @@ step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table - -step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count -11 +step s1-select-count: SELECT COUNT(*) FROM insert_hash; +count + +11 starting permutation: s1-initialize s1-begin s1-insert-select s2-insert s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO insert_hash SELECT * FROM insert_hash; step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -11 +11 starting permutation: s1-initialize s1-begin s1-update s2-insert s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-update: UPDATE insert_hash SET data = 'l' WHERE id = 4; step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -6 +6 starting permutation: s1-initialize s1-begin s1-delete s2-insert s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-delete: DELETE FROM insert_hash WHERE id = 4; step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-truncate s2-insert s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-truncate: TRUNCATE insert_hash; @@ -352,14 +352,14 @@ step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s2-insert: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -1 +1 starting permutation: s1-initialize s1-begin s1-drop s2-insert s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-drop: DROP TABLE insert_hash; @@ -373,7 +373,7 @@ ERROR: relation "insert_hash" does not exist starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-insert s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX insert_hash_index ON insert_hash(id); @@ -381,9 +381,9 @@ step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s2-insert: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -6 +6 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers @@ -393,7 +393,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-insert s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX insert_hash_index ON insert_hash(id); step s1-begin: BEGIN; @@ -402,9 +402,9 @@ step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s2-insert: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -6 +6 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers @@ -414,7 +414,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-insert s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE insert_hash ADD new_column int DEFAULT 0; @@ -422,9 +422,9 @@ step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s2-insert: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -6 +6 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -434,7 +434,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-insert s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE insert_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -443,9 +443,9 @@ step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s2-insert: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -6 +6 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -455,7 +455,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-insert s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE insert_hash RENAME data TO new_column; @@ -463,9 +463,9 @@ step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s2-insert: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -6 +6 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -475,38 +475,38 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-table-size s2-insert s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('insert_hash'); citus_total_relation_size -57344 +57344 step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -6 +6 starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-insert s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-master-modify-multiple-shards: DELETE FROM insert_hash; step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -1 +1 starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-distribute-table s2-insert s1-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE insert_hash; step s1-create-non-distributed-table: CREATE TABLE insert_hash(id integer, data text); COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; @@ -514,61 +514,61 @@ step s1-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('insert_hash', 'id'); create_distributed_table - + step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s2-insert: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -11 +11 starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-insert-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s2-insert-select: INSERT INTO insert_hash SELECT * FROM insert_hash; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -13 +13 starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-update s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s2-update: UPDATE insert_hash SET data = 'l' WHERE id = 4; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -8 +8 starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-delete s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s2-delete: DELETE FROM insert_hash WHERE id = 4; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -7 +7 starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-truncate s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); @@ -576,14 +576,14 @@ step s2-truncate: TRUNCATE insert_hash; step s1-commit: COMMIT; step s2-truncate: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-drop s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); @@ -596,7 +596,7 @@ ERROR: relation "insert_hash" does not exist starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-ddl-create-index s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); @@ -604,9 +604,9 @@ step s2-ddl-create-index: CREATE INDEX insert_hash_index ON insert_hash(id); step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -8 +8 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers @@ -616,7 +616,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-insert-multi-row s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX insert_hash_index ON insert_hash(id); step s1-begin: BEGIN; @@ -625,9 +625,9 @@ step s2-ddl-drop-index: DROP INDEX insert_hash_index; step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -8 +8 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers @@ -637,7 +637,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); @@ -645,9 +645,9 @@ step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY insert_hash_ind step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -8 +8 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers @@ -657,7 +657,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); @@ -665,9 +665,9 @@ step s2-ddl-add-column: ALTER TABLE insert_hash ADD new_column int DEFAULT 0; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -8 +8 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -677,7 +677,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-insert-multi-row s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE insert_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -686,9 +686,9 @@ step s2-ddl-drop-column: ALTER TABLE insert_hash DROP new_column; step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -8 +8 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -698,7 +698,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); @@ -706,9 +706,9 @@ step s2-ddl-rename-column: ALTER TABLE insert_hash RENAME data TO new_column; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -8 +8 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -718,38 +718,38 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-table-size s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s2-table-size: SELECT citus_total_relation_size('insert_hash'); citus_total_relation_size -57344 +57344 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -8 +8 starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-master-modify-multiple-shards s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s2-master-modify-multiple-shards: DELETE FROM insert_hash; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -3 +3 starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-insert-multi-row s2-distribute-table s1-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE insert_hash; step s1-create-non-distributed-table: CREATE TABLE insert_hash(id integer, data text); COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; @@ -760,58 +760,58 @@ step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table - -step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count -13 +step s1-select-count: SELECT COUNT(*) FROM insert_hash; +count + +13 starting permutation: s1-initialize s1-begin s1-insert-select s2-insert-multi-row s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO insert_hash SELECT * FROM insert_hash; step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -13 +13 starting permutation: s1-initialize s1-begin s1-update s2-insert-multi-row s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-update: UPDATE insert_hash SET data = 'l' WHERE id = 4; step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -8 +8 starting permutation: s1-initialize s1-begin s1-delete s2-insert-multi-row s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-delete: DELETE FROM insert_hash WHERE id = 4; step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -7 +7 starting permutation: s1-initialize s1-begin s1-truncate s2-insert-multi-row s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-truncate: TRUNCATE insert_hash; @@ -819,14 +819,14 @@ step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, step s1-commit: COMMIT; step s2-insert-multi-row: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -3 +3 starting permutation: s1-initialize s1-begin s1-drop s2-insert-multi-row s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-drop: DROP TABLE insert_hash; @@ -840,7 +840,7 @@ ERROR: relation "insert_hash" does not exist starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-insert-multi-row s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX insert_hash_index ON insert_hash(id); @@ -848,9 +848,9 @@ step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, step s1-commit: COMMIT; step s2-insert-multi-row: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -8 +8 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers @@ -860,7 +860,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-insert-multi-row s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX insert_hash_index ON insert_hash(id); step s1-begin: BEGIN; @@ -869,9 +869,9 @@ step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, step s1-commit: COMMIT; step s2-insert-multi-row: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -8 +8 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers @@ -881,7 +881,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-insert-multi-row s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE insert_hash ADD new_column int DEFAULT 0; @@ -889,9 +889,9 @@ step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, step s1-commit: COMMIT; step s2-insert-multi-row: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -8 +8 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -901,7 +901,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-insert-multi-row s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE insert_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -910,9 +910,9 @@ step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, step s1-commit: COMMIT; step s2-insert-multi-row: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -8 +8 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -922,7 +922,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-insert-multi-row s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE insert_hash RENAME data TO new_column; @@ -930,9 +930,9 @@ step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, step s1-commit: COMMIT; step s2-insert-multi-row: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -8 +8 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -942,38 +942,38 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-table-size s2-insert-multi-row s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('insert_hash'); citus_total_relation_size -57344 +57344 step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -8 +8 starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-insert-multi-row s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-master-modify-multiple-shards: DELETE FROM insert_hash; step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -3 +3 starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-distribute-table s2-insert-multi-row s1-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE insert_hash; step s1-create-non-distributed-table: CREATE TABLE insert_hash(id integer, data text); COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; @@ -981,11 +981,11 @@ step s1-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('insert_hash', 'id'); create_distributed_table - + step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s1-commit: COMMIT; step s2-insert-multi-row: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; -count +count -13 +13 diff --git a/src/test/regress/expected/isolation_insert_vs_all_on_mx.out b/src/test/regress/expected/isolation_insert_vs_all_on_mx.out index 5e041ad30..5a70f0bf3 100644 --- a/src/test/regress/expected/isolation_insert_vs_all_on_mx.out +++ b/src/test/regress/expected/isolation_insert_vs_all_on_mx.out @@ -1,783 +1,783 @@ Parsed test spec with 3 sessions starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-insert s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert: + +step s1-insert: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table VALUES(6, 60)'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert: + +step s2-insert: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table VALUES(6, 60)'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM insert_table; -count +count -7 -step s1-stop-connection: +7 +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-multi-row s2-start-session-level-connection s2-begin-on-worker s2-insert s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert-multi-row: + +step s1-insert-multi-row: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table VALUES(6, 60), (7, 70), (8, 80)'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert: + +step s2-insert: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table VALUES(6, 60)'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM insert_table; -count +count -9 -step s1-stop-connection: +9 +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-insert-multi-row s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert: + +step s1-insert: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table VALUES(6, 60)'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert-multi-row: + +step s2-insert-multi-row: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table VALUES(6, 60), (7, 70), (8, 80)'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM insert_table; -count +count -9 -step s1-stop-connection: +9 +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-multi-row s2-start-session-level-connection s2-begin-on-worker s2-insert-multi-row s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert-multi-row: + +step s1-insert-multi-row: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table VALUES(6, 60), (7, 70), (8, 80)'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert-multi-row: + +step s2-insert-multi-row: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table VALUES(6, 60), (7, 70), (8, 80)'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM insert_table; -count +count -11 -step s1-stop-connection: +11 +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-select s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert: + +step s1-insert: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table VALUES(6, 60)'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-select: + +step s2-select: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM insert_table WHERE id = 6'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM insert_table; -count +count -6 -step s1-stop-connection: +6 +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-insert-select s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert: + +step s1-insert: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table VALUES(6, 60)'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert-select: + +step s2-insert-select: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table SELECT * FROM insert_table'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM insert_table; -count +count -11 -step s1-stop-connection: +11 +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-update s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert: + +step s1-insert: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table VALUES(6, 60)'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update: + +step s2-update: SELECT run_commands_on_session_level_connection_to_node('UPDATE insert_table SET value = 65 WHERE id = 6'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM insert_table; -count +count -6 -step s1-stop-connection: +6 +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-multi-row s2-start-session-level-connection s2-begin-on-worker s2-update-multi-row s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert-multi-row: + +step s1-insert-multi-row: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table VALUES(6, 60), (7, 70), (8, 80)'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update-multi-row: + +step s2-update-multi-row: SELECT run_commands_on_session_level_connection_to_node('UPDATE insert_table SET value = 67 WHERE id IN (6, 7)'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM insert_table; -count +count -8 -step s1-stop-connection: +8 +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-copy s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert: + +step s1-insert: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table VALUES(6, 60)'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-copy: + +step s2-copy: SELECT run_commands_on_session_level_connection_to_node('COPY insert_table FROM PROGRAM ''echo 9, 90 && echo 10, 100''WITH CSV'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM insert_table; -count +count -8 -step s1-stop-connection: +8 +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert: + +step s1-insert: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table VALUES(6, 60)'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-truncate: + +step s2-truncate: SELECT run_commands_on_session_level_connection_to_node('TRUNCATE insert_table'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-truncate: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM insert_table; -count +count -0 -step s1-stop-connection: +0 +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert: + +step s1-insert: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO insert_table VALUES(6, 60)'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-select-for-update: + +step s2-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM insert_table WHERE id = 6 FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM insert_table; -count +count -6 -step s1-stop-connection: +6 +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_insert_vs_vacuum.out b/src/test/regress/expected/isolation_insert_vs_vacuum.out index eb6c481b1..d9fda9453 100644 --- a/src/test/regress/expected/isolation_insert_vs_vacuum.out +++ b/src/test/regress/expected/isolation_insert_vs_vacuum.out @@ -3,34 +3,34 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s1-insert s2-vacuum-analyze s1-commit create_distributed_table - -step s1-begin: + +step s1-begin: BEGIN; -step s1-insert: +step s1-insert: INSERT INTO test_insert_vacuum VALUES(1, 1); -step s2-vacuum-analyze: +step s2-vacuum-analyze: VACUUM ANALYZE test_insert_vacuum; -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-begin s1-insert s2-vacuum-full s1-commit create_distributed_table - -step s1-begin: + +step s1-begin: BEGIN; -step s1-insert: +step s1-insert: INSERT INTO test_insert_vacuum VALUES(1, 1); -step s2-vacuum-full: +step s2-vacuum-full: VACUUM FULL test_insert_vacuum; -step s1-commit: +step s1-commit: COMMIT; step s2-vacuum-full: <... completed> diff --git a/src/test/regress/expected/isolation_master_append_table.out b/src/test/regress/expected/isolation_master_append_table.out index 6c88f7a56..539384e2d 100644 --- a/src/test/regress/expected/isolation_master_append_table.out +++ b/src/test/regress/expected/isolation_master_append_table.out @@ -1,13 +1,13 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s2-begin s1-master_append_table_to_shard s2-master_append_table_to_shard s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-master_append_table_to_shard: +step s1-master_append_table_to_shard: SELECT master_append_table_to_shard(shardid, 'table_to_be_appended', 'localhost', 57636) FROM @@ -17,8 +17,8 @@ step s1-master_append_table_to_shard: master_append_table_to_shard -0.0426667 -step s2-master_append_table_to_shard: +0.0426667 +step s2-master_append_table_to_shard: SELECT master_append_table_to_shard(shardid, 'table_to_be_appended', 'localhost', 57636) FROM @@ -26,13 +26,13 @@ step s2-master_append_table_to_shard: WHERE 'table_to_append'::regclass::oid = logicalrelid; -step s1-commit: +step s1-commit: COMMIT; step s2-master_append_table_to_shard: <... completed> master_append_table_to_shard -0.064 -step s2-commit: +0.064 +step s2-commit: COMMIT; diff --git a/src/test/regress/expected/isolation_master_apply_delete.out b/src/test/regress/expected/isolation_master_apply_delete.out index fe596eef3..b4ea51901 100644 --- a/src/test/regress/expected/isolation_master_apply_delete.out +++ b/src/test/regress/expected/isolation_master_apply_delete.out @@ -1,109 +1,109 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s2-begin s1-master_apply_delete_command_all_shard s2-master_apply_delete_command_all_shard s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-master_apply_delete_command_all_shard: +step s1-master_apply_delete_command_all_shard: SELECT master_apply_delete_command($$DELETE FROM table_to_delete_from WHERE id >= 0$$); master_apply_delete_command -1 -step s2-master_apply_delete_command_all_shard: +1 +step s2-master_apply_delete_command_all_shard: SELECT master_apply_delete_command($$DELETE FROM table_to_delete_from WHERE id >= 0$$); -step s1-commit: +step s1-commit: COMMIT; step s2-master_apply_delete_command_all_shard: <... completed> master_apply_delete_command -0 -step s2-commit: +0 +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s1-master_apply_delete_command_all_shard s2-master_apply_delete_command_row s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-master_apply_delete_command_all_shard: +step s1-master_apply_delete_command_all_shard: SELECT master_apply_delete_command($$DELETE FROM table_to_delete_from WHERE id >= 0$$); master_apply_delete_command -1 -step s2-master_apply_delete_command_row: +1 +step s2-master_apply_delete_command_row: SELECT master_apply_delete_command($$DELETE FROM table_to_delete_from WHERE id >= 0 and id < 3$$); -step s1-commit: +step s1-commit: COMMIT; step s2-master_apply_delete_command_row: <... completed> master_apply_delete_command -0 -step s2-commit: +0 +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s1-master_apply_delete_command_row s2-master_apply_delete_command_all_shard s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-master_apply_delete_command_row: +step s1-master_apply_delete_command_row: SELECT master_apply_delete_command($$DELETE FROM table_to_delete_from WHERE id >= 0 and id < 3$$); master_apply_delete_command -0 -step s2-master_apply_delete_command_all_shard: +0 +step s2-master_apply_delete_command_all_shard: SELECT master_apply_delete_command($$DELETE FROM table_to_delete_from WHERE id >= 0$$); -step s1-commit: +step s1-commit: COMMIT; step s2-master_apply_delete_command_all_shard: <... completed> master_apply_delete_command -1 -step s2-commit: +1 +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s1-master_apply_delete_command_row s2-master_apply_delete_command_row s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-master_apply_delete_command_row: +step s1-master_apply_delete_command_row: SELECT master_apply_delete_command($$DELETE FROM table_to_delete_from WHERE id >= 0 and id < 3$$); master_apply_delete_command -0 -step s2-master_apply_delete_command_row: +0 +step s2-master_apply_delete_command_row: SELECT master_apply_delete_command($$DELETE FROM table_to_delete_from WHERE id >= 0 and id < 3$$); -step s1-commit: +step s1-commit: COMMIT; step s2-master_apply_delete_command_row: <... completed> master_apply_delete_command -0 -step s2-commit: +0 +step s2-commit: COMMIT; diff --git a/src/test/regress/expected/isolation_master_update_node.out b/src/test/regress/expected/isolation_master_update_node.out index 09d9bf781..cc746278e 100644 --- a/src/test/regress/expected/isolation_master_update_node.out +++ b/src/test/regress/expected/isolation_master_update_node.out @@ -3,11 +3,11 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s1-insert s2-begin s2-update-node-1 s1-abort s2-abort create_distributed_table - + step s1-begin: BEGIN; step s1-insert: INSERT INTO t1 SELECT generate_series(1, 100); step s2-begin: BEGIN; -step s2-update-node-1: +step s2-update-node-1: -- update a specific node by address SELECT master_update_node(nodeid, 'localhost', nodeport + 10) FROM pg_dist_node @@ -18,21 +18,21 @@ step s1-abort: ABORT; step s2-update-node-1: <... completed> master_update_node - + step s2-abort: ABORT; master_remove_node - - + + starting permutation: s1-begin s1-insert s2-begin s2-update-node-1-force s2-abort s1-abort create_distributed_table - + step s1-begin: BEGIN; step s1-insert: INSERT INTO t1 SELECT generate_series(1, 100); step s2-begin: BEGIN; -step s2-update-node-1-force: +step s2-update-node-1-force: -- update a specific node by address (force) SELECT master_update_node(nodeid, 'localhost', nodeport + 10, force => true, lock_cooldown => 100) FROM pg_dist_node @@ -42,7 +42,7 @@ step s2-update-node-1-force: step s2-update-node-1-force: <... completed> master_update_node - + step s2-abort: ABORT; step s1-abort: ABORT; WARNING: this step had a leftover error message @@ -53,5 +53,5 @@ server closed the connection unexpectedly master_remove_node - - + + diff --git a/src/test/regress/expected/isolation_master_update_node_0.out b/src/test/regress/expected/isolation_master_update_node_0.out index eb450d715..8dbc71029 100644 --- a/src/test/regress/expected/isolation_master_update_node_0.out +++ b/src/test/regress/expected/isolation_master_update_node_0.out @@ -3,11 +3,11 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s1-insert s2-begin s2-update-node-1 s1-abort s2-abort create_distributed_table - + step s1-begin: BEGIN; step s1-insert: INSERT INTO t1 SELECT generate_series(1, 100); step s2-begin: BEGIN; -step s2-update-node-1: +step s2-update-node-1: -- update a specific node by address SELECT master_update_node(nodeid, 'localhost', nodeport + 10) FROM pg_dist_node @@ -18,21 +18,21 @@ step s1-abort: ABORT; step s2-update-node-1: <... completed> master_update_node - + step s2-abort: ABORT; master_remove_node - - + + starting permutation: s1-begin s1-insert s2-begin s2-update-node-1-force s2-abort s1-abort create_distributed_table - + step s1-begin: BEGIN; step s1-insert: INSERT INTO t1 SELECT generate_series(1, 100); step s2-begin: BEGIN; -step s2-update-node-1-force: +step s2-update-node-1-force: -- update a specific node by address (force) SELECT master_update_node(nodeid, 'localhost', nodeport + 10, force => true, lock_cooldown => 100) FROM pg_dist_node @@ -42,7 +42,7 @@ step s2-update-node-1-force: step s2-update-node-1-force: <... completed> master_update_node - + step s2-abort: ABORT; step s1-abort: ABORT; WARNING: this step had a leftover error message @@ -51,5 +51,5 @@ SSL connection has been closed unexpectedly master_remove_node - - + + diff --git a/src/test/regress/expected/isolation_modify_with_subquery_vs_dml.out b/src/test/regress/expected/isolation_modify_with_subquery_vs_dml.out index 67a1e5dcb..d61825ebc 100644 --- a/src/test/regress/expected/isolation_modify_with_subquery_vs_dml.out +++ b/src/test/regress/expected/isolation_modify_with_subquery_vs_dml.out @@ -1,127 +1,127 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s2-begin s2-modify_with_subquery_v1 s1-insert_to_events_test_table s2-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-modify_with_subquery_v1: +step s2-modify_with_subquery_v1: UPDATE users_test_table SET value_2 = 5 FROM events_test_table WHERE users_test_table.user_id = events_test_table.user_id; -step s1-insert_to_events_test_table: +step s1-insert_to_events_test_table: INSERT INTO events_test_table VALUES(4,6,8,10); -step s2-commit: +step s2-commit: COMMIT; step s1-insert_to_events_test_table: <... completed> -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-begin s2-begin s2-modify_with_subquery_v1 s1-update_events_test_table s2-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-modify_with_subquery_v1: +step s2-modify_with_subquery_v1: UPDATE users_test_table SET value_2 = 5 FROM events_test_table WHERE users_test_table.user_id = events_test_table.user_id; -step s1-update_events_test_table: +step s1-update_events_test_table: UPDATE users_test_table SET value_1 = 3; -step s2-commit: +step s2-commit: COMMIT; step s1-update_events_test_table: <... completed> -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-begin s2-begin s2-modify_with_subquery_v1 s1-delete_events_test_table s2-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-modify_with_subquery_v1: +step s2-modify_with_subquery_v1: UPDATE users_test_table SET value_2 = 5 FROM events_test_table WHERE users_test_table.user_id = events_test_table.user_id; -step s1-delete_events_test_table: +step s1-delete_events_test_table: DELETE FROM events_test_table WHERE user_id = 1 or user_id = 3; -step s2-commit: +step s2-commit: COMMIT; step s1-delete_events_test_table: <... completed> -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-begin s2-begin s1-insert_to_events_test_table s2-modify_with_subquery_v1 s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-insert_to_events_test_table: +step s1-insert_to_events_test_table: INSERT INTO events_test_table VALUES(4,6,8,10); -step s2-modify_with_subquery_v1: +step s2-modify_with_subquery_v1: UPDATE users_test_table SET value_2 = 5 FROM events_test_table WHERE users_test_table.user_id = events_test_table.user_id; -step s1-commit: +step s1-commit: COMMIT; step s2-modify_with_subquery_v1: <... completed> -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s1-update_events_test_table s2-modify_with_subquery_v1 s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-update_events_test_table: +step s1-update_events_test_table: UPDATE users_test_table SET value_1 = 3; -step s2-modify_with_subquery_v1: +step s2-modify_with_subquery_v1: UPDATE users_test_table SET value_2 = 5 FROM events_test_table WHERE users_test_table.user_id = events_test_table.user_id; -step s1-commit: +step s1-commit: COMMIT; step s2-modify_with_subquery_v1: <... completed> -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s1-delete_events_test_table s2-modify_with_subquery_v1 s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-delete_events_test_table: +step s1-delete_events_test_table: DELETE FROM events_test_table WHERE user_id = 1 or user_id = 3; -step s2-modify_with_subquery_v1: +step s2-modify_with_subquery_v1: UPDATE users_test_table SET value_2 = 5 FROM events_test_table WHERE users_test_table.user_id = events_test_table.user_id; -step s1-commit: +step s1-commit: COMMIT; step s2-modify_with_subquery_v1: <... completed> -step s2-commit: +step s2-commit: COMMIT; diff --git a/src/test/regress/expected/isolation_multi_shard_modify_vs_all.out b/src/test/regress/expected/isolation_multi_shard_modify_vs_all.out index 47411070d..8fcaf2ae5 100644 --- a/src/test/regress/expected/isolation_multi_shard_modify_vs_all.out +++ b/src/test/regress/expected/isolation_multi_shard_modify_vs_all.out @@ -1,458 +1,458 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s1-update_all_value_1 s2-begin s2-select s1-commit s2-select s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-update_all_value_1: +step s1-update_all_value_1: UPDATE users_test_table SET value_1 = 3; -step s2-begin: +step s2-begin: BEGIN; -step s2-select: +step s2-select: SELECT * FROM users_test_table ORDER BY value_2, value_3; -user_id value_1 value_2 value_3 +user_id value_1 value_2 value_3 -1 5 6 7 -2 12 7 18 -3 23 8 25 -4 42 9 23 -5 35 10 17 -6 21 11 25 -7 27 12 18 -step s1-commit: +1 5 6 7 +2 12 7 18 +3 23 8 25 +4 42 9 23 +5 35 10 17 +6 21 11 25 +7 27 12 18 +step s1-commit: COMMIT; -step s2-select: +step s2-select: SELECT * FROM users_test_table ORDER BY value_2, value_3; -user_id value_1 value_2 value_3 +user_id value_1 value_2 value_3 -1 3 6 7 -2 3 7 18 -3 3 8 25 -4 3 9 23 -5 3 10 17 -6 3 11 25 -7 3 12 18 -step s2-commit: +1 3 6 7 +2 3 7 18 +3 3 8 25 +4 3 9 23 +5 3 10 17 +6 3 11 25 +7 3 12 18 +step s2-commit: COMMIT; starting permutation: s1-begin s1-update_all_value_1 s2-begin s2-update_all_value_1 s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-update_all_value_1: +step s1-update_all_value_1: UPDATE users_test_table SET value_1 = 3; -step s2-begin: +step s2-begin: BEGIN; -step s2-update_all_value_1: +step s2-update_all_value_1: UPDATE users_test_table SET value_1 = 6; -step s1-commit: +step s1-commit: COMMIT; step s2-update_all_value_1: <... completed> -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s1-update_even_concurrently s2-begin s2-update_odd_concurrently s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-update_even_concurrently: +step s1-update_even_concurrently: SET citus.enable_deadlock_prevention TO off; UPDATE users_test_table SET value_1 = 3 WHERE user_id % 2 = 0; SET citus.enable_deadlock_prevention TO on; -step s2-begin: +step s2-begin: BEGIN; -step s2-update_odd_concurrently: +step s2-update_odd_concurrently: SET citus.enable_deadlock_prevention = off; UPDATE users_test_table SET value_1 = 3 WHERE user_id % 2 = 1; SET citus.enable_deadlock_prevention TO on; -step s1-commit: +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s1-update_even_concurrently s2-begin s2-update_value_1_of_4_or_6_to_4 s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-update_even_concurrently: +step s1-update_even_concurrently: SET citus.enable_deadlock_prevention TO off; UPDATE users_test_table SET value_1 = 3 WHERE user_id % 2 = 0; SET citus.enable_deadlock_prevention TO on; -step s2-begin: +step s2-begin: BEGIN; -step s2-update_value_1_of_4_or_6_to_4: +step s2-update_value_1_of_4_or_6_to_4: UPDATE users_test_table SET value_1 = 4 WHERE user_id = 4 or user_id = 6; -step s1-commit: +step s1-commit: COMMIT; step s2-update_value_1_of_4_or_6_to_4: <... completed> -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s1-update_value_1_of_1_or_3_to_5 s2-begin s2-update_value_1_of_4_or_6_to_4 s1-commit s2-commit s2-select -step s1-begin: +step s1-begin: BEGIN; -step s1-update_value_1_of_1_or_3_to_5: +step s1-update_value_1_of_1_or_3_to_5: UPDATE users_test_table SET value_1 = 5 WHERE user_id = 1 or user_id = 3; -step s2-begin: +step s2-begin: BEGIN; -step s2-update_value_1_of_4_or_6_to_4: +step s2-update_value_1_of_4_or_6_to_4: UPDATE users_test_table SET value_1 = 4 WHERE user_id = 4 or user_id = 6; -step s1-commit: +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; -step s2-select: +step s2-select: SELECT * FROM users_test_table ORDER BY value_2, value_3; -user_id value_1 value_2 value_3 +user_id value_1 value_2 value_3 -1 5 6 7 -2 12 7 18 -3 5 8 25 -4 4 9 23 -5 35 10 17 -6 4 11 25 -7 27 12 18 +1 5 6 7 +2 12 7 18 +3 5 8 25 +4 4 9 23 +5 35 10 17 +6 4 11 25 +7 27 12 18 starting permutation: s1-begin s1-update_value_1_of_1_or_3_to_5 s2-begin s2-update_value_1_of_1_or_3_to_8 s1-commit s2-commit s2-select -step s1-begin: +step s1-begin: BEGIN; -step s1-update_value_1_of_1_or_3_to_5: +step s1-update_value_1_of_1_or_3_to_5: UPDATE users_test_table SET value_1 = 5 WHERE user_id = 1 or user_id = 3; -step s2-begin: +step s2-begin: BEGIN; -step s2-update_value_1_of_1_or_3_to_8: +step s2-update_value_1_of_1_or_3_to_8: UPDATE users_test_table SET value_1 = 8 WHERE user_id = 1 or user_id = 3; -step s1-commit: +step s1-commit: COMMIT; step s2-update_value_1_of_1_or_3_to_8: <... completed> -step s2-commit: +step s2-commit: COMMIT; -step s2-select: +step s2-select: SELECT * FROM users_test_table ORDER BY value_2, value_3; -user_id value_1 value_2 value_3 +user_id value_1 value_2 value_3 -1 8 6 7 -2 12 7 18 -3 8 8 25 -4 42 9 23 -5 35 10 17 -6 21 11 25 -7 27 12 18 +1 8 6 7 +2 12 7 18 +3 8 8 25 +4 42 9 23 +5 35 10 17 +6 21 11 25 +7 27 12 18 starting permutation: s1-begin s1-update_all_value_1 s2-begin s2-insert-to-table s1-commit s2-commit s2-select -step s1-begin: +step s1-begin: BEGIN; -step s1-update_all_value_1: +step s1-update_all_value_1: UPDATE users_test_table SET value_1 = 3; -step s2-begin: +step s2-begin: BEGIN; -step s2-insert-to-table: +step s2-insert-to-table: INSERT INTO users_test_table VALUES (1,2,3,4); -step s1-commit: +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; -step s2-select: +step s2-select: SELECT * FROM users_test_table ORDER BY value_2, value_3; -user_id value_1 value_2 value_3 +user_id value_1 value_2 value_3 -1 2 3 4 -1 3 6 7 -2 3 7 18 -3 3 8 25 -4 3 9 23 -5 3 10 17 -6 3 11 25 -7 3 12 18 +1 2 3 4 +1 3 6 7 +2 3 7 18 +3 3 8 25 +4 3 9 23 +5 3 10 17 +6 3 11 25 +7 3 12 18 starting permutation: s1-begin s1-update_all_value_1 s2-begin s2-insert-into-select s1-commit s2-commit s2-select -step s1-begin: +step s1-begin: BEGIN; -step s1-update_all_value_1: +step s1-update_all_value_1: UPDATE users_test_table SET value_1 = 3; -step s2-begin: +step s2-begin: BEGIN; -step s2-insert-into-select: +step s2-insert-into-select: INSERT INTO users_test_table SELECT * FROM events_test_table; -step s1-commit: +step s1-commit: COMMIT; step s2-insert-into-select: <... completed> -step s2-commit: +step s2-commit: COMMIT; -step s2-select: +step s2-select: SELECT * FROM users_test_table ORDER BY value_2, value_3; -user_id value_1 value_2 value_3 +user_id value_1 value_2 value_3 -1 3 6 7 -1 5 7 7 -2 3 7 18 -3 3 8 25 -4 3 9 23 -5 22 9 25 -5 3 10 17 -7 41 10 23 -6 3 11 25 -7 3 12 18 -1 20 12 25 -3 26 13 18 -5 17 14 4 -3 11 78 18 +1 3 6 7 +1 5 7 7 +2 3 7 18 +3 3 8 25 +4 3 9 23 +5 22 9 25 +5 3 10 17 +7 41 10 23 +6 3 11 25 +7 3 12 18 +1 20 12 25 +3 26 13 18 +5 17 14 4 +3 11 78 18 starting permutation: s1-begin s2-begin s1-update_value_1_of_1_or_3_to_5 s2-update_value_1_of_1_or_3_to_8 s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-update_value_1_of_1_or_3_to_5: +step s1-update_value_1_of_1_or_3_to_5: UPDATE users_test_table SET value_1 = 5 WHERE user_id = 1 or user_id = 3; -step s2-update_value_1_of_1_or_3_to_8: +step s2-update_value_1_of_1_or_3_to_8: UPDATE users_test_table SET value_1 = 8 WHERE user_id = 1 or user_id = 3; -step s1-commit: +step s1-commit: COMMIT; step s2-update_value_1_of_1_or_3_to_8: <... completed> -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s2-update_value_1_of_1_or_3_to_8 s1-update_value_1_of_2_or_4_to_5 s2-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-update_value_1_of_1_or_3_to_8: +step s2-update_value_1_of_1_or_3_to_8: UPDATE users_test_table SET value_1 = 8 WHERE user_id = 1 or user_id = 3; -step s1-update_value_1_of_2_or_4_to_5: +step s1-update_value_1_of_2_or_4_to_5: UPDATE users_test_table SET value_1 = 5 WHERE user_id = 2 or user_id = 4; -step s2-commit: +step s2-commit: COMMIT; -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-begin s1-change_connection_mode_to_sequential s1-update_all_value_1 s2-begin s2-change_connection_mode_to_sequential s2-update_all_value_1 s1-commit s2-commit s2-select -step s1-begin: +step s1-begin: BEGIN; -step s1-change_connection_mode_to_sequential: +step s1-change_connection_mode_to_sequential: set citus.multi_shard_modify_mode to 'sequential'; -step s1-update_all_value_1: +step s1-update_all_value_1: UPDATE users_test_table SET value_1 = 3; -step s2-begin: +step s2-begin: BEGIN; -step s2-change_connection_mode_to_sequential: +step s2-change_connection_mode_to_sequential: set citus.multi_shard_modify_mode to 'sequential'; -step s2-update_all_value_1: +step s2-update_all_value_1: UPDATE users_test_table SET value_1 = 6; -step s1-commit: +step s1-commit: COMMIT; step s2-update_all_value_1: <... completed> -step s2-commit: +step s2-commit: COMMIT; -step s2-select: +step s2-select: SELECT * FROM users_test_table ORDER BY value_2, value_3; -user_id value_1 value_2 value_3 +user_id value_1 value_2 value_3 -1 6 6 7 -2 6 7 18 -3 6 8 25 -4 6 9 23 -5 6 10 17 -6 6 11 25 -7 6 12 18 +1 6 6 7 +2 6 7 18 +3 6 8 25 +4 6 9 23 +5 6 10 17 +6 6 11 25 +7 6 12 18 starting permutation: s1-begin s1-change_connection_mode_to_sequential s1-update_value_1_of_1_or_3_to_5 s2-begin s2-change_connection_mode_to_sequential s2-update_value_1_of_1_or_3_to_8 s1-commit s2-commit s2-select -step s1-begin: +step s1-begin: BEGIN; -step s1-change_connection_mode_to_sequential: +step s1-change_connection_mode_to_sequential: set citus.multi_shard_modify_mode to 'sequential'; -step s1-update_value_1_of_1_or_3_to_5: +step s1-update_value_1_of_1_or_3_to_5: UPDATE users_test_table SET value_1 = 5 WHERE user_id = 1 or user_id = 3; -step s2-begin: +step s2-begin: BEGIN; -step s2-change_connection_mode_to_sequential: +step s2-change_connection_mode_to_sequential: set citus.multi_shard_modify_mode to 'sequential'; -step s2-update_value_1_of_1_or_3_to_8: +step s2-update_value_1_of_1_or_3_to_8: UPDATE users_test_table SET value_1 = 8 WHERE user_id = 1 or user_id = 3; -step s1-commit: +step s1-commit: COMMIT; step s2-update_value_1_of_1_or_3_to_8: <... completed> -step s2-commit: +step s2-commit: COMMIT; -step s2-select: +step s2-select: SELECT * FROM users_test_table ORDER BY value_2, value_3; -user_id value_1 value_2 value_3 +user_id value_1 value_2 value_3 -1 8 6 7 -2 12 7 18 -3 8 8 25 -4 42 9 23 -5 35 10 17 -6 21 11 25 -7 27 12 18 +1 8 6 7 +2 12 7 18 +3 8 8 25 +4 42 9 23 +5 35 10 17 +6 21 11 25 +7 27 12 18 starting permutation: s1-begin s1-change_connection_mode_to_sequential s1-update_value_1_of_1_or_3_to_5 s2-begin s2-change_connection_mode_to_sequential s2-update_value_1_of_4_or_6_to_4 s1-commit s2-commit s2-select -step s1-begin: +step s1-begin: BEGIN; -step s1-change_connection_mode_to_sequential: +step s1-change_connection_mode_to_sequential: set citus.multi_shard_modify_mode to 'sequential'; -step s1-update_value_1_of_1_or_3_to_5: +step s1-update_value_1_of_1_or_3_to_5: UPDATE users_test_table SET value_1 = 5 WHERE user_id = 1 or user_id = 3; -step s2-begin: +step s2-begin: BEGIN; -step s2-change_connection_mode_to_sequential: +step s2-change_connection_mode_to_sequential: set citus.multi_shard_modify_mode to 'sequential'; -step s2-update_value_1_of_4_or_6_to_4: +step s2-update_value_1_of_4_or_6_to_4: UPDATE users_test_table SET value_1 = 4 WHERE user_id = 4 or user_id = 6; -step s1-commit: +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; -step s2-select: +step s2-select: SELECT * FROM users_test_table ORDER BY value_2, value_3; -user_id value_1 value_2 value_3 +user_id value_1 value_2 value_3 -1 5 6 7 -2 12 7 18 -3 5 8 25 -4 4 9 23 -5 35 10 17 -6 4 11 25 -7 27 12 18 +1 5 6 7 +2 12 7 18 +3 5 8 25 +4 4 9 23 +5 35 10 17 +6 4 11 25 +7 27 12 18 starting permutation: s1-begin s2-begin s1-change_connection_mode_to_sequential s2-change_connection_mode_to_sequential s1-update_value_1_of_1_or_3_to_5 s2-update_value_1_of_1_or_3_to_8 s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-change_connection_mode_to_sequential: +step s1-change_connection_mode_to_sequential: set citus.multi_shard_modify_mode to 'sequential'; -step s2-change_connection_mode_to_sequential: +step s2-change_connection_mode_to_sequential: set citus.multi_shard_modify_mode to 'sequential'; -step s1-update_value_1_of_1_or_3_to_5: +step s1-update_value_1_of_1_or_3_to_5: UPDATE users_test_table SET value_1 = 5 WHERE user_id = 1 or user_id = 3; -step s2-update_value_1_of_1_or_3_to_8: +step s2-update_value_1_of_1_or_3_to_8: UPDATE users_test_table SET value_1 = 8 WHERE user_id = 1 or user_id = 3; -step s1-commit: +step s1-commit: COMMIT; step s2-update_value_1_of_1_or_3_to_8: <... completed> -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s1-change_connection_mode_to_sequential s2-change_connection_mode_to_sequential s2-update_value_1_of_1_or_3_to_8 s1-update_value_1_of_2_or_4_to_5 s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-change_connection_mode_to_sequential: +step s1-change_connection_mode_to_sequential: set citus.multi_shard_modify_mode to 'sequential'; -step s2-change_connection_mode_to_sequential: +step s2-change_connection_mode_to_sequential: set citus.multi_shard_modify_mode to 'sequential'; -step s2-update_value_1_of_1_or_3_to_8: +step s2-update_value_1_of_1_or_3_to_8: UPDATE users_test_table SET value_1 = 8 WHERE user_id = 1 or user_id = 3; -step s1-update_value_1_of_2_or_4_to_5: +step s1-update_value_1_of_2_or_4_to_5: UPDATE users_test_table SET value_1 = 5 WHERE user_id = 2 or user_id = 4; -step s1-commit: +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; diff --git a/src/test/regress/expected/isolation_multiuser_locking.out b/src/test/regress/expected/isolation_multiuser_locking.out index 6dea9cc91..1c8860726 100644 --- a/src/test/regress/expected/isolation_multiuser_locking.out +++ b/src/test/regress/expected/isolation_multiuser_locking.out @@ -1,284 +1,284 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s2-begin s2-reindex s1-insert s2-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; SET ROLE test_user_1; -step s2-begin: +step s2-begin: BEGIN; SET ROLE test_user_2; -step s2-reindex: +step s2-reindex: REINDEX TABLE test_table; ERROR: must be owner of table test_table -step s1-insert: +step s1-insert: UPDATE test_table SET column2 = 1; -step s2-commit: +step s2-commit: COMMIT; -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-grant s1-begin s2-begin s2-reindex s1-insert s2-insert s2-commit s1-commit -step s1-grant: +step s1-grant: SET ROLE test_user_1; SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); GRANT ALL ON test_table TO test_user_2; -bool_and +bool_and -t -step s1-begin: +t +step s1-begin: BEGIN; SET ROLE test_user_1; -step s2-begin: +step s2-begin: BEGIN; SET ROLE test_user_2; -step s2-reindex: +step s2-reindex: REINDEX TABLE test_table; ERROR: must be owner of table test_table -step s1-insert: +step s1-insert: UPDATE test_table SET column2 = 1; -step s2-insert: +step s2-insert: UPDATE test_table SET column2 = 2; ERROR: current transaction is aborted, commands ignored until end of transaction block -step s2-commit: +step s2-commit: COMMIT; -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-grant s1-begin s2-begin s1-reindex s2-insert s1-insert s1-commit s2-commit -step s1-grant: +step s1-grant: SET ROLE test_user_1; SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); GRANT ALL ON test_table TO test_user_2; -bool_and +bool_and -t -step s1-begin: +t +step s1-begin: BEGIN; SET ROLE test_user_1; -step s2-begin: +step s2-begin: BEGIN; SET ROLE test_user_2; -step s1-reindex: +step s1-reindex: REINDEX TABLE test_table; -step s2-insert: +step s2-insert: UPDATE test_table SET column2 = 2; -step s1-insert: +step s1-insert: UPDATE test_table SET column2 = 1; -step s1-commit: +step s1-commit: COMMIT; step s2-insert: <... completed> -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-begin s2-begin s2-index s1-insert s2-commit s1-commit s2-drop-index -step s1-begin: +step s1-begin: BEGIN; SET ROLE test_user_1; -step s2-begin: +step s2-begin: BEGIN; SET ROLE test_user_2; -step s2-index: +step s2-index: CREATE INDEX test_index ON test_table(column1); ERROR: must be owner of table test_table -step s1-insert: +step s1-insert: UPDATE test_table SET column2 = 1; -step s2-commit: +step s2-commit: COMMIT; -step s1-commit: +step s1-commit: COMMIT; -step s2-drop-index: +step s2-drop-index: DROP INDEX IF EXISTS test_index; starting permutation: s1-grant s1-begin s2-begin s2-insert s1-index s2-insert s2-commit s1-commit s1-drop-index -step s1-grant: +step s1-grant: SET ROLE test_user_1; SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); GRANT ALL ON test_table TO test_user_2; -bool_and +bool_and -t -step s1-begin: +t +step s1-begin: BEGIN; SET ROLE test_user_1; -step s2-begin: +step s2-begin: BEGIN; SET ROLE test_user_2; -step s2-insert: +step s2-insert: UPDATE test_table SET column2 = 2; -step s1-index: +step s1-index: CREATE INDEX test_index ON test_table(column1); -step s2-insert: +step s2-insert: UPDATE test_table SET column2 = 2; -step s2-commit: +step s2-commit: COMMIT; step s1-index: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s1-drop-index: +step s1-drop-index: DROP INDEX IF EXISTS test_index; starting permutation: s1-grant s1-begin s2-begin s1-index s2-index s1-insert s1-commit s2-commit s1-drop-index s2-drop-index -step s1-grant: +step s1-grant: SET ROLE test_user_1; SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); GRANT ALL ON test_table TO test_user_2; -bool_and +bool_and -t -step s1-begin: +t +step s1-begin: BEGIN; SET ROLE test_user_1; -step s2-begin: +step s2-begin: BEGIN; SET ROLE test_user_2; -step s1-index: +step s1-index: CREATE INDEX test_index ON test_table(column1); -step s2-index: +step s2-index: CREATE INDEX test_index ON test_table(column1); ERROR: must be owner of table test_table -step s1-insert: +step s1-insert: UPDATE test_table SET column2 = 1; -step s1-commit: +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; -step s1-drop-index: +step s1-drop-index: DROP INDEX IF EXISTS test_index; -step s2-drop-index: +step s2-drop-index: DROP INDEX IF EXISTS test_index; starting permutation: s1-begin s2-begin s2-truncate s1-insert s2-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; SET ROLE test_user_1; -step s2-begin: +step s2-begin: BEGIN; SET ROLE test_user_2; -step s2-truncate: +step s2-truncate: TRUNCATE test_table; ERROR: permission denied for table test_table -step s1-insert: +step s1-insert: UPDATE test_table SET column2 = 1; -step s2-commit: +step s2-commit: COMMIT; -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-grant s1-begin s2-begin s1-truncate s2-insert s1-insert s1-commit s2-commit -step s1-grant: +step s1-grant: SET ROLE test_user_1; SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); GRANT ALL ON test_table TO test_user_2; -bool_and +bool_and -t -step s1-begin: +t +step s1-begin: BEGIN; SET ROLE test_user_1; -step s2-begin: +step s2-begin: BEGIN; SET ROLE test_user_2; -step s1-truncate: +step s1-truncate: TRUNCATE test_table; -step s2-insert: +step s2-insert: UPDATE test_table SET column2 = 2; -step s1-insert: +step s1-insert: UPDATE test_table SET column2 = 1; -step s1-commit: +step s1-commit: COMMIT; step s2-insert: <... completed> -step s2-commit: +step s2-commit: COMMIT; starting permutation: s1-grant s1-begin s2-begin s1-truncate s2-truncate s1-commit s2-commit -step s1-grant: +step s1-grant: SET ROLE test_user_1; SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); GRANT ALL ON test_table TO test_user_2; -bool_and +bool_and -t -step s1-begin: +t +step s1-begin: BEGIN; SET ROLE test_user_1; -step s2-begin: +step s2-begin: BEGIN; SET ROLE test_user_2; -step s1-truncate: +step s1-truncate: TRUNCATE test_table; -step s2-truncate: +step s2-truncate: TRUNCATE test_table; -step s1-commit: +step s1-commit: COMMIT; step s2-truncate: <... completed> -step s2-commit: +step s2-commit: COMMIT; diff --git a/src/test/regress/expected/isolation_partitioned_copy_vs_all.out b/src/test/regress/expected/isolation_partitioned_copy_vs_all.out index 7b0a028a9..3b578a581 100644 --- a/src/test/regress/expected/isolation_partitioned_copy_vs_all.out +++ b/src/test/regress/expected/isolation_partitioned_copy_vs_all.out @@ -3,139 +3,139 @@ Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s1-copy s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -15 +15 starting permutation: s1-initialize s1-begin s1-copy s2-router-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-router-select: SELECT * FROM partitioned_copy WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-real-time-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-real-time-select: SELECT * FROM partitioned_copy ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-task-tracker-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s2-task-tracker-select: +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM partitioned_copy AS t1 JOIN partitioned_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-insert s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-insert: INSERT INTO partitioned_copy VALUES(0, 'k', 0); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -11 +11 starting permutation: s1-initialize s1-begin s1-copy s2-insert-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-insert-select: INSERT INTO partitioned_copy SELECT * FROM partitioned_copy; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -15 +15 starting permutation: s1-initialize s1-begin s1-copy s2-update s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-update: UPDATE partitioned_copy SET data = 'l' WHERE id = 0; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-delete s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-delete: DELETE FROM partitioned_copy WHERE id = 1; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -9 +9 starting permutation: s1-initialize s1-begin s1-copy s2-truncate s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -143,14 +143,14 @@ step s2-truncate: TRUNCATE partitioned_copy; step s1-commit: COMMIT; step s2-truncate: <... completed> step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-copy s2-drop s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -163,7 +163,7 @@ ERROR: relation "partitioned_copy" does not exist starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -171,9 +171,9 @@ step s2-ddl-add-column: ALTER TABLE partitioned_copy ADD new_column int DEFAULT step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -183,7 +183,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE partitioned_copy ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -192,9 +192,9 @@ step s2-ddl-drop-column: ALTER TABLE partitioned_copy DROP new_column; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -204,7 +204,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -212,9 +212,9 @@ step s2-ddl-rename-column: ALTER TABLE partitioned_copy RENAME data TO new_colum step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -224,38 +224,38 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-table-size: SELECT citus_total_relation_size('partitioned_copy'); citus_total_relation_size -0 +0 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-master-modify-multiple-shards s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-master-modify-multiple-shards: DELETE FROM partitioned_copy; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-copy s2-master-drop-all-shards s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -264,16 +264,16 @@ step s1-commit: COMMIT; step s2-master-drop-all-shards: <... completed> master_drop_all_shards -4 +4 step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -0 +0 starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-copy s2-distribute-table s1-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE partitioned_copy; step s1-create-non-distributed-table: CREATE TABLE partitioned_copy(id integer, data text, int_data int); COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; @@ -284,134 +284,134 @@ step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table - -step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count -15 +step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; +count + +15 starting permutation: s1-initialize s1-begin s1-router-select s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM partitioned_copy WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-real-time-select s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM partitioned_copy ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM partitioned_copy AS t1 JOIN partitioned_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-insert s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO partitioned_copy VALUES(0, 'k', 0); step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -11 +11 starting permutation: s1-initialize s1-begin s1-insert-select s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO partitioned_copy SELECT * FROM partitioned_copy; step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -15 +15 starting permutation: s1-initialize s1-begin s1-update s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-update: UPDATE partitioned_copy SET data = 'l' WHERE id = 0; step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-delete s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-delete: DELETE FROM partitioned_copy WHERE id = 1; step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -9 +9 starting permutation: s1-initialize s1-begin s1-truncate s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-truncate: TRUNCATE partitioned_copy; @@ -419,14 +419,14 @@ step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 & step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-drop s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-drop: DROP TABLE partitioned_copy; @@ -440,7 +440,7 @@ ERROR: relation "partitioned_copy" does not exist starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE partitioned_copy ADD new_column int DEFAULT 0; @@ -449,9 +449,9 @@ step s1-commit: COMMIT; step s2-copy: <... completed> error in steps s1-commit s2-copy: ERROR: missing data for column "new_column" step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -461,7 +461,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE partitioned_copy ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -470,9 +470,9 @@ step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 & step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -482,7 +482,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE partitioned_copy RENAME data TO new_column; @@ -490,9 +490,9 @@ step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 & step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -502,57 +502,57 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('partitioned_copy'); citus_total_relation_size -0 +0 step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-modify-multiple-shards: DELETE FROM partitioned_copy; step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-master-drop-all-shards s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-drop-all-shards: SELECT master_drop_all_shards('partitioned_copy'::regclass, 'public', 'partitioned_copy'); master_drop_all_shards -4 +4 step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> error in steps s1-commit s2-copy: ERROR: could not find any shards into which to copy step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -0 +0 starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-distribute-table s2-copy s1-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE partitioned_copy; step s1-create-non-distributed-table: CREATE TABLE partitioned_copy(id integer, data text, int_data int); COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; @@ -560,11 +560,11 @@ step s1-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('partitioned_copy', 'id'); create_distributed_table - + step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; -count +count -15 +15 diff --git a/src/test/regress/expected/isolation_progress_monitoring.out b/src/test/regress/expected/isolation_progress_monitoring.out index e2b699394..679e42c6b 100644 --- a/src/test/regress/expected/isolation_progress_monitoring.out +++ b/src/test/regress/expected/isolation_progress_monitoring.out @@ -1,7 +1,7 @@ Parsed test spec with 5 sessions starting permutation: take-locks s1-start-operation s2-start-operation s3-start-operation show-progress release-locks-1 show-progress release-locks-2 show-progress release-locks-3 -step take-locks: +step take-locks: -- Locks for steps of sample operation in s1 SELECT pg_advisory_lock(10); SELECT pg_advisory_lock(11); @@ -17,55 +17,55 @@ step take-locks: pg_advisory_lock - + pg_advisory_lock - + pg_advisory_lock - + pg_advisory_lock - + pg_advisory_lock - + pg_advisory_lock - + pg_advisory_lock - + pg_advisory_lock - + pg_advisory_lock - -step s1-start-operation: + +step s1-start-operation: SELECT sample_operation(1337, 10, -1); -step s2-start-operation: +step s2-start-operation: SELECT sample_operation(1337, 20, 2); -step s3-start-operation: +step s3-start-operation: SELECT sample_operation(3778, 30, 9); -step show-progress: +step show-progress: SELECT show_progress(1337); SELECT show_progress(3778); -show_progress +show_progress -(0,0) -(1,0) -(0,0) -(1,0) -show_progress +(0,0) +(1,0) +(0,0) +(1,0) +show_progress -(0,0) -(1,0) -step release-locks-1: +(0,0) +(1,0) +step release-locks-1: -- Release the locks of first steps of sample operations SELECT pg_advisory_unlock(10); SELECT pg_advisory_unlock(20); @@ -73,28 +73,28 @@ step release-locks-1: pg_advisory_unlock -t +t pg_advisory_unlock -t +t pg_advisory_unlock -t -step show-progress: +t +step show-progress: SELECT show_progress(1337); SELECT show_progress(3778); -show_progress +show_progress -(0,-1) -(1,0) -(0,2) -(1,0) -show_progress +(0,-1) +(1,0) +(0,2) +(1,0) +show_progress -(0,9) -(1,0) -step release-locks-2: +(0,9) +(1,0) +step release-locks-2: -- Release the locks of second steps of sample operations SELECT pg_advisory_unlock(11); SELECT pg_advisory_unlock(21); @@ -102,28 +102,28 @@ step release-locks-2: pg_advisory_unlock -t +t pg_advisory_unlock -t +t pg_advisory_unlock -t -step show-progress: +t +step show-progress: SELECT show_progress(1337); SELECT show_progress(3778); -show_progress +show_progress -(0,-1) -(1,-1) -(0,2) -(1,2) -show_progress +(0,-1) +(1,-1) +(0,2) +(1,2) +show_progress -(0,9) -(1,9) -step release-locks-3: +(0,9) +(1,9) +step release-locks-3: -- Release the locks of final steps of sample operations SELECT pg_advisory_unlock(12); SELECT pg_advisory_unlock(22); @@ -131,22 +131,22 @@ step release-locks-3: pg_advisory_unlock -t +t pg_advisory_unlock -t +t pg_advisory_unlock -t +t step s1-start-operation: <... completed> sample_operation - + step s2-start-operation: <... completed> sample_operation - + step s3-start-operation: <... completed> sample_operation - + diff --git a/src/test/regress/expected/isolation_range_copy_vs_all.out b/src/test/regress/expected/isolation_range_copy_vs_all.out index 1dacae535..5e0e7e474 100644 --- a/src/test/regress/expected/isolation_range_copy_vs_all.out +++ b/src/test/regress/expected/isolation_range_copy_vs_all.out @@ -3,139 +3,139 @@ Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s1-copy s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -15 +15 starting permutation: s1-initialize s1-begin s1-copy s2-router-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-router-select: SELECT * FROM range_copy WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-real-time-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-real-time-select: SELECT * FROM range_copy ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-task-tracker-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s2-task-tracker-select: +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM range_copy AS t1 JOIN range_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-insert s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-insert: INSERT INTO range_copy VALUES(0, 'k', 0); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -11 +11 starting permutation: s1-initialize s1-begin s1-copy s2-insert-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-insert-select: INSERT INTO range_copy SELECT * FROM range_copy; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-update s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-update: UPDATE range_copy SET data = 'l' WHERE id = 0; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-delete s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-delete: DELETE FROM range_copy WHERE id = 1; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -9 +9 starting permutation: s1-initialize s1-begin s1-copy s2-truncate s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -143,14 +143,14 @@ step s2-truncate: TRUNCATE range_copy; step s1-commit: COMMIT; step s2-truncate: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-copy s2-drop s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -163,7 +163,7 @@ ERROR: relation "range_copy" does not exist starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -171,9 +171,9 @@ step s2-ddl-create-index: CREATE INDEX range_copy_index ON range_copy(id); step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''range_copy%'''); run_command_on_workers @@ -183,7 +183,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-copy s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX range_copy_index ON range_copy(id); step s1-begin: BEGIN; @@ -192,9 +192,9 @@ step s2-ddl-drop-index: DROP INDEX range_copy_index; step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''range_copy%'''); run_command_on_workers @@ -204,7 +204,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -212,9 +212,9 @@ step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY range_copy_inde step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''range_copy%'''); run_command_on_workers @@ -224,7 +224,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -232,9 +232,9 @@ step s2-ddl-add-column: ALTER TABLE range_copy ADD new_column int DEFAULT 0; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''range_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -244,7 +244,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE range_copy ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -253,9 +253,9 @@ step s2-ddl-drop-column: ALTER TABLE range_copy DROP new_column; step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''range_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -265,7 +265,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -273,9 +273,9 @@ step s2-ddl-rename-column: ALTER TABLE range_copy RENAME data TO new_column; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''range_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -285,38 +285,38 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-table-size: SELECT citus_total_relation_size('range_copy'); citus_total_relation_size -32768 +32768 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-master-modify-multiple-shards s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-master-modify-multiple-shards: DELETE FROM range_copy; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-copy s2-master-apply-delete-command s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -325,16 +325,16 @@ step s1-commit: COMMIT; step s2-master-apply-delete-command: <... completed> master_apply_delete_command -1 +1 step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-copy s2-master-drop-all-shards s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -343,16 +343,16 @@ step s1-commit: COMMIT; step s2-master-drop-all-shards: <... completed> master_drop_all_shards -2 +2 step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -0 +0 starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-copy s2-distribute-table s1-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE range_copy; step s1-create-non-distributed-table: CREATE TABLE range_copy(id integer, data text, int_data int); step s1-begin: BEGIN; @@ -362,134 +362,134 @@ step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table - -step s1-select-count: SELECT COUNT(*) FROM range_copy; -count -0 +step s1-select-count: SELECT COUNT(*) FROM range_copy; +count + +0 starting permutation: s1-initialize s1-begin s1-router-select s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM range_copy WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-real-time-select s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM range_copy ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM range_copy AS t1 JOIN range_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-insert s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO range_copy VALUES(0, 'k', 0); step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -11 +11 starting permutation: s1-initialize s1-begin s1-insert-select s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO range_copy SELECT * FROM range_copy; step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-update s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-update: UPDATE range_copy SET data = 'l' WHERE id = 0; step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-delete s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-delete: DELETE FROM range_copy WHERE id = 1; step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -9 +9 starting permutation: s1-initialize s1-begin s1-truncate s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-truncate: TRUNCATE range_copy; @@ -497,14 +497,14 @@ step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-drop s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-drop: DROP TABLE range_copy; @@ -518,7 +518,7 @@ ERROR: relation "range_copy" does not exist starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-copy s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX range_copy_index ON range_copy(id); @@ -526,9 +526,9 @@ step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''range_copy%'''); run_command_on_workers @@ -538,7 +538,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-copy s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX range_copy_index ON range_copy(id); step s1-begin: BEGIN; @@ -547,9 +547,9 @@ step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''range_copy%'''); run_command_on_workers @@ -559,7 +559,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE range_copy ADD new_column int DEFAULT 0; @@ -568,9 +568,9 @@ step s1-commit: COMMIT; step s2-copy: <... completed> error in steps s1-commit s2-copy: ERROR: missing data for column "new_column" step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''range_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -580,7 +580,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE range_copy ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -589,9 +589,9 @@ step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''range_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -601,7 +601,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE range_copy RENAME data TO new_column; @@ -609,9 +609,9 @@ step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''range_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -621,86 +621,86 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('range_copy'); citus_total_relation_size -32768 +32768 step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-modify-multiple-shards: DELETE FROM range_copy; step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-master-apply-delete-command s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM range_copy WHERE id <= 4;'); master_apply_delete_command -1 +1 step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-master-drop-all-shards s2-copy s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-drop-all-shards: SELECT master_drop_all_shards('range_copy'::regclass, 'public', 'range_copy'); master_drop_all_shards -1 +1 step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -5 +5 starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-distribute-table s2-copy s1-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE range_copy; step s1-create-non-distributed-table: CREATE TABLE range_copy(id integer, data text, int_data int); step s1-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('range_copy', 'id', 'range'); create_distributed_table - + step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> error in steps s1-commit s2-copy: ERROR: could not find any shards into which to copy step s1-select-count: SELECT COUNT(*) FROM range_copy; -count +count -0 +0 diff --git a/src/test/regress/expected/isolation_ref2ref_foreign_keys.out b/src/test/regress/expected/isolation_ref2ref_foreign_keys.out index 7d210d33e..507db3ccd 100644 --- a/src/test/regress/expected/isolation_ref2ref_foreign_keys.out +++ b/src/test/regress/expected/isolation_ref2ref_foreign_keys.out @@ -1,952 +1,952 @@ Parsed test spec with 2 sessions starting permutation: s2-begin s2-update-table-1 s1-begin s1-view-locks s1-rollback s2-rollback s1-view-locks -step s2-begin: +step s2-begin: BEGIN; -step s2-update-table-1: +step s2-update-table-1: UPDATE ref_table_1 SET id = 2 WHERE id = 1; -step s1-begin: +step s1-begin: BEGIN; -step s1-view-locks: +step s1-view-locks: SELECT mode, count(*) FROM pg_locks WHERE locktype='advisory' GROUP BY mode; -mode count +mode count -ExclusiveLock 1 -ShareLock 1 -step s1-rollback: +ExclusiveLock 1 +ShareLock 1 +step s1-rollback: ROLLBACK; -step s2-rollback: +step s2-rollback: ROLLBACK; -step s1-view-locks: +step s1-view-locks: SELECT mode, count(*) FROM pg_locks WHERE locktype='advisory' GROUP BY mode; -mode count +mode count starting permutation: s2-begin s2-delete-table-1 s1-view-locks s2-rollback s1-view-locks -step s2-begin: +step s2-begin: BEGIN; -step s2-delete-table-1: +step s2-delete-table-1: DELETE FROM ref_table_1 WHERE id = 1; -step s1-view-locks: +step s1-view-locks: SELECT mode, count(*) FROM pg_locks WHERE locktype='advisory' GROUP BY mode; -mode count +mode count -ExclusiveLock 1 -ShareLock 1 -step s2-rollback: +ExclusiveLock 1 +ShareLock 1 +step s2-rollback: ROLLBACK; -step s1-view-locks: +step s1-view-locks: SELECT mode, count(*) FROM pg_locks WHERE locktype='advisory' GROUP BY mode; -mode count +mode count starting permutation: s2-begin s2-update-table-2 s1-view-locks s2-rollback s1-view-locks -step s2-begin: +step s2-begin: BEGIN; -step s2-update-table-2: +step s2-update-table-2: UPDATE ref_table_2 SET id = 2 WHERE id = 1; -step s1-view-locks: +step s1-view-locks: SELECT mode, count(*) FROM pg_locks WHERE locktype='advisory' GROUP BY mode; -mode count +mode count -ExclusiveLock 2 -ShareLock 1 -step s2-rollback: +ExclusiveLock 2 +ShareLock 1 +step s2-rollback: ROLLBACK; -step s1-view-locks: +step s1-view-locks: SELECT mode, count(*) FROM pg_locks WHERE locktype='advisory' GROUP BY mode; -mode count +mode count starting permutation: s2-begin s2-delete-table-2 s1-view-locks s2-rollback s1-view-locks -step s2-begin: +step s2-begin: BEGIN; -step s2-delete-table-2: +step s2-delete-table-2: DELETE FROM ref_table_2 WHERE id = 1; -step s1-view-locks: +step s1-view-locks: SELECT mode, count(*) FROM pg_locks WHERE locktype='advisory' GROUP BY mode; -mode count +mode count -ExclusiveLock 2 -ShareLock 1 -step s2-rollback: +ExclusiveLock 2 +ShareLock 1 +step s2-rollback: ROLLBACK; -step s1-view-locks: +step s1-view-locks: SELECT mode, count(*) FROM pg_locks WHERE locktype='advisory' GROUP BY mode; -mode count +mode count starting permutation: s2-begin s2-update-table-3 s1-begin s1-view-locks s1-rollback s2-rollback s1-view-locks -step s2-begin: +step s2-begin: BEGIN; -step s2-update-table-3: +step s2-update-table-3: UPDATE ref_table_3 SET id = 2 WHERE id = 1; -step s1-begin: +step s1-begin: BEGIN; -step s1-view-locks: +step s1-view-locks: SELECT mode, count(*) FROM pg_locks WHERE locktype='advisory' GROUP BY mode; -mode count +mode count -ExclusiveLock 3 -ShareLock 1 -step s1-rollback: +ExclusiveLock 3 +ShareLock 1 +step s1-rollback: ROLLBACK; -step s2-rollback: +step s2-rollback: ROLLBACK; -step s1-view-locks: +step s1-view-locks: SELECT mode, count(*) FROM pg_locks WHERE locktype='advisory' GROUP BY mode; -mode count +mode count starting permutation: s2-begin s2-delete-table-3 s1-begin s1-view-locks s1-rollback s2-rollback s1-view-locks -step s2-begin: +step s2-begin: BEGIN; -step s2-delete-table-3: +step s2-delete-table-3: DELETE FROM ref_table_3 WHERE id = 1; -step s1-begin: +step s1-begin: BEGIN; -step s1-view-locks: +step s1-view-locks: SELECT mode, count(*) FROM pg_locks WHERE locktype='advisory' GROUP BY mode; -mode count +mode count -ExclusiveLock 3 -ShareLock 1 -step s1-rollback: +ExclusiveLock 3 +ShareLock 1 +step s1-rollback: ROLLBACK; -step s2-rollback: +step s2-rollback: ROLLBACK; -step s1-view-locks: +step s1-view-locks: SELECT mode, count(*) FROM pg_locks WHERE locktype='advisory' GROUP BY mode; -mode count +mode count starting permutation: s2-begin s2-insert-table-1 s1-view-locks s2-rollback s1-view-locks -step s2-begin: +step s2-begin: BEGIN; -step s2-insert-table-1: +step s2-insert-table-1: INSERT INTO ref_table_1 VALUES (7, 7); -step s1-view-locks: +step s1-view-locks: SELECT mode, count(*) FROM pg_locks WHERE locktype='advisory' GROUP BY mode; -mode count +mode count -RowExclusiveLock1 -ShareLock 1 -step s2-rollback: +RowExclusiveLock1 +ShareLock 1 +step s2-rollback: ROLLBACK; -step s1-view-locks: +step s1-view-locks: SELECT mode, count(*) FROM pg_locks WHERE locktype='advisory' GROUP BY mode; -mode count +mode count starting permutation: s2-begin s2-insert-table-2 s1-view-locks s2-rollback s1-view-locks -step s2-begin: +step s2-begin: BEGIN; -step s2-insert-table-2: +step s2-insert-table-2: INSERT INTO ref_table_2 VALUES (7, 5); -step s1-view-locks: +step s1-view-locks: SELECT mode, count(*) FROM pg_locks WHERE locktype='advisory' GROUP BY mode; -mode count +mode count -RowExclusiveLock2 -ShareLock 1 -step s2-rollback: +RowExclusiveLock2 +ShareLock 1 +step s2-rollback: ROLLBACK; -step s1-view-locks: +step s1-view-locks: SELECT mode, count(*) FROM pg_locks WHERE locktype='advisory' GROUP BY mode; -mode count +mode count starting permutation: s2-begin s2-insert-table-3 s1-view-locks s2-rollback s1-view-locks -step s2-begin: +step s2-begin: BEGIN; -step s2-insert-table-3: +step s2-insert-table-3: INSERT INTO ref_table_3 VALUES (7, 5); -step s1-view-locks: +step s1-view-locks: SELECT mode, count(*) FROM pg_locks WHERE locktype='advisory' GROUP BY mode; -mode count +mode count -RowExclusiveLock3 -ShareLock 1 -step s2-rollback: +RowExclusiveLock3 +ShareLock 1 +step s2-rollback: ROLLBACK; -step s1-view-locks: +step s1-view-locks: SELECT mode, count(*) FROM pg_locks WHERE locktype='advisory' GROUP BY mode; -mode count +mode count starting permutation: s1-begin s2-begin s2-update-table-1 s1-delete-table-2 s2-commit s1-commit s1-select-table-2 -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-update-table-1: +step s2-update-table-1: UPDATE ref_table_1 SET id = 2 WHERE id = 1; -step s1-delete-table-2: +step s1-delete-table-2: DELETE FROM ref_table_2 WHERE value = 2; -step s2-commit: +step s2-commit: COMMIT; step s1-delete-table-2: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s1-select-table-2: +step s1-select-table-2: SELECT * FROM ref_table_2 ORDER BY id, value; -id value +id value -3 3 -5 5 +3 3 +5 5 starting permutation: s1-begin s2-begin s2-update-table-1 s1-insert-table-2 s2-commit s1-commit s1-select-table-2 -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-update-table-1: +step s2-update-table-1: UPDATE ref_table_1 SET id = 2 WHERE id = 1; -step s1-insert-table-2: +step s1-insert-table-2: INSERT INTO ref_table_2 VALUES (7, 2); -step s2-commit: +step s2-commit: COMMIT; step s1-insert-table-2: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s1-select-table-2: +step s1-select-table-2: SELECT * FROM ref_table_2 ORDER BY id, value; -id value +id value -1 2 -3 3 -5 5 -7 2 +1 2 +3 3 +5 5 +7 2 starting permutation: s1-begin s2-begin s2-update-table-1 s1-update-table-2 s2-commit s1-commit s1-select-table-2 -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-update-table-1: +step s2-update-table-1: UPDATE ref_table_1 SET id = 2 WHERE id = 1; -step s1-update-table-2: +step s1-update-table-2: UPDATE ref_table_2 SET id = 0 WHERE value = 2; -step s2-commit: +step s2-commit: COMMIT; step s1-update-table-2: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s1-select-table-2: +step s1-select-table-2: SELECT * FROM ref_table_2 ORDER BY id, value; -id value +id value -0 2 -3 3 -5 5 +0 2 +3 3 +5 5 starting permutation: s1-begin s2-begin s2-delete-table-1 s1-delete-table-2 s2-commit s1-commit s1-select-table-2 -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-delete-table-1: +step s2-delete-table-1: DELETE FROM ref_table_1 WHERE id = 1; -step s1-delete-table-2: +step s1-delete-table-2: DELETE FROM ref_table_2 WHERE value = 2; -step s2-commit: +step s2-commit: COMMIT; step s1-delete-table-2: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s1-select-table-2: +step s1-select-table-2: SELECT * FROM ref_table_2 ORDER BY id, value; -id value +id value -3 3 -5 5 +3 3 +5 5 starting permutation: s1-begin s2-begin s2-delete-table-1 s1-insert-table-2 s2-commit s1-commit s1-select-table-2 -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-delete-table-1: +step s2-delete-table-1: DELETE FROM ref_table_1 WHERE id = 1; -step s1-insert-table-2: +step s1-insert-table-2: INSERT INTO ref_table_2 VALUES (7, 2); -step s2-commit: +step s2-commit: COMMIT; step s1-insert-table-2: <... completed> error in steps s2-commit s1-insert-table-2: ERROR: insert or update on table "ref_table_2_xxxxxxx" violates foreign key constraint "ref_table_2_value_fkey_xxxxxxx" -step s1-commit: +step s1-commit: COMMIT; -step s1-select-table-2: +step s1-select-table-2: SELECT * FROM ref_table_2 ORDER BY id, value; -id value +id value -3 3 -5 5 +3 3 +5 5 starting permutation: s1-begin s2-begin s2-delete-table-1 s1-update-table-2 s2-commit s1-commit s1-select-table-2 -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-delete-table-1: +step s2-delete-table-1: DELETE FROM ref_table_1 WHERE id = 1; -step s1-update-table-2: +step s1-update-table-2: UPDATE ref_table_2 SET id = 0 WHERE value = 2; -step s2-commit: +step s2-commit: COMMIT; step s1-update-table-2: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s1-select-table-2: +step s1-select-table-2: SELECT * FROM ref_table_2 ORDER BY id, value; -id value +id value -3 3 -5 5 +3 3 +5 5 starting permutation: s1-begin s2-begin s2-delete-table-1 s1-delete-table-3 s2-commit s1-commit s1-select-table-3 -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-delete-table-1: +step s2-delete-table-1: DELETE FROM ref_table_1 WHERE id = 1; -step s1-delete-table-3: +step s1-delete-table-3: DELETE FROM ref_table_3 WHERE value = 1 RETURNING id; -step s2-commit: +step s2-commit: COMMIT; step s1-delete-table-3: <... completed> -id +id -step s1-commit: +step s1-commit: COMMIT; -step s1-select-table-3: +step s1-select-table-3: SELECT * FROM ref_table_3 ORDER BY id, value; -id value +id value -3 3 -5 5 +3 3 +5 5 starting permutation: s1-begin s2-begin s2-delete-table-1 s1-insert-table-3 s2-commit s1-commit s1-select-table-3 -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-delete-table-1: +step s2-delete-table-1: DELETE FROM ref_table_1 WHERE id = 1; -step s1-insert-table-3: +step s1-insert-table-3: INSERT INTO ref_table_3 VALUES (7, 1); -step s2-commit: +step s2-commit: COMMIT; step s1-insert-table-3: <... completed> error in steps s2-commit s1-insert-table-3: ERROR: insert or update on table "ref_table_3_xxxxxxx" violates foreign key constraint "ref_table_3_value_fkey_xxxxxxx" -step s1-commit: +step s1-commit: COMMIT; -step s1-select-table-3: +step s1-select-table-3: SELECT * FROM ref_table_3 ORDER BY id, value; -id value +id value -3 3 -5 5 +3 3 +5 5 starting permutation: s1-begin s2-begin s2-delete-table-1 s1-update-table-3 s2-commit s1-commit s1-select-table-3 -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-delete-table-1: +step s2-delete-table-1: DELETE FROM ref_table_1 WHERE id = 1; -step s1-update-table-3: +step s1-update-table-3: UPDATE ref_table_3 SET id = 2 WHERE value = 1 RETURNING id; -step s2-commit: +step s2-commit: COMMIT; step s1-update-table-3: <... completed> -id +id -step s1-commit: +step s1-commit: COMMIT; -step s1-select-table-3: +step s1-select-table-3: SELECT * FROM ref_table_3 ORDER BY id, value; -id value +id value -3 3 -5 5 +3 3 +5 5 starting permutation: s1-begin s2-begin s2-insert-table-1 s1-update-table-3 s2-commit s1-commit s1-select-table-3 -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-insert-table-1: +step s2-insert-table-1: INSERT INTO ref_table_1 VALUES (7, 7); -step s1-update-table-3: +step s1-update-table-3: UPDATE ref_table_3 SET id = 2 WHERE value = 1 RETURNING id; -step s2-commit: +step s2-commit: COMMIT; step s1-update-table-3: <... completed> -id +id -2 -step s1-commit: +2 +step s1-commit: COMMIT; -step s1-select-table-3: +step s1-select-table-3: SELECT * FROM ref_table_3 ORDER BY id, value; -id value +id value -2 1 -3 3 -5 5 +2 1 +3 3 +5 5 starting permutation: s1-begin s2-begin s1-update-table-3 s2-insert-table-1 s1-commit s2-commit s1-select-table-3 -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-update-table-3: +step s1-update-table-3: UPDATE ref_table_3 SET id = 2 WHERE value = 1 RETURNING id; -id +id -2 -step s2-insert-table-1: +2 +step s2-insert-table-1: INSERT INTO ref_table_1 VALUES (7, 7); -step s1-commit: +step s1-commit: COMMIT; step s2-insert-table-1: <... completed> -step s2-commit: +step s2-commit: COMMIT; -step s1-select-table-3: +step s1-select-table-3: SELECT * FROM ref_table_3 ORDER BY id, value; -id value +id value -2 1 -3 3 -5 5 +2 1 +3 3 +5 5 starting permutation: s1-begin s2-begin s2-insert-table-1 s1-update-table-2 s2-commit s1-commit s1-select-table-3 -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-insert-table-1: +step s2-insert-table-1: INSERT INTO ref_table_1 VALUES (7, 7); -step s1-update-table-2: +step s1-update-table-2: UPDATE ref_table_2 SET id = 0 WHERE value = 2; -step s2-commit: +step s2-commit: COMMIT; step s1-update-table-2: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s1-select-table-3: +step s1-select-table-3: SELECT * FROM ref_table_3 ORDER BY id, value; -id value +id value -1 1 -3 3 -5 5 +1 1 +3 3 +5 5 starting permutation: s1-begin s2-begin s1-update-table-2 s2-insert-table-1 s1-commit s2-commit s1-select-table-3 -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-update-table-2: +step s1-update-table-2: UPDATE ref_table_2 SET id = 0 WHERE value = 2; -step s2-insert-table-1: +step s2-insert-table-1: INSERT INTO ref_table_1 VALUES (7, 7); -step s1-commit: +step s1-commit: COMMIT; step s2-insert-table-1: <... completed> -step s2-commit: +step s2-commit: COMMIT; -step s1-select-table-3: +step s1-select-table-3: SELECT * FROM ref_table_3 ORDER BY id, value; -id value +id value -1 1 -3 3 -5 5 +1 1 +3 3 +5 5 starting permutation: s1-begin s2-begin s2-insert-table-2 s1-update-table-3 s2-commit s1-commit s1-select-table-3 -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-insert-table-2: +step s2-insert-table-2: INSERT INTO ref_table_2 VALUES (7, 5); -step s1-update-table-3: +step s1-update-table-3: UPDATE ref_table_3 SET id = 2 WHERE value = 1 RETURNING id; -step s2-commit: +step s2-commit: COMMIT; step s1-update-table-3: <... completed> -id +id -2 -step s1-commit: +2 +step s1-commit: COMMIT; -step s1-select-table-3: +step s1-select-table-3: SELECT * FROM ref_table_3 ORDER BY id, value; -id value +id value -2 1 -3 3 -5 5 +2 1 +3 3 +5 5 starting permutation: s1-begin s2-begin s1-update-table-3 s2-insert-table-2 s1-commit s2-commit s1-select-table-3 -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-update-table-3: +step s1-update-table-3: UPDATE ref_table_3 SET id = 2 WHERE value = 1 RETURNING id; -id +id -2 -step s2-insert-table-2: +2 +step s2-insert-table-2: INSERT INTO ref_table_2 VALUES (7, 5); -step s1-commit: +step s1-commit: COMMIT; step s2-insert-table-2: <... completed> -step s2-commit: +step s2-commit: COMMIT; -step s1-select-table-3: +step s1-select-table-3: SELECT * FROM ref_table_3 ORDER BY id, value; -id value +id value -2 1 -3 3 -5 5 +2 1 +3 3 +5 5 starting permutation: s1-begin s2-begin s2-insert-table-1 s1-select-table-1 s2-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-insert-table-1: +step s2-insert-table-1: INSERT INTO ref_table_1 VALUES (7, 7); -step s1-select-table-1: +step s1-select-table-1: SELECT * FROM ref_table_1 ORDER BY id, value; -id value +id value -1 1 -3 3 -5 5 -step s2-commit: +1 1 +3 3 +5 5 +step s2-commit: COMMIT; -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-begin s2-begin s2-insert-table-1 s1-select-table-2 s2-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-insert-table-1: +step s2-insert-table-1: INSERT INTO ref_table_1 VALUES (7, 7); -step s1-select-table-2: +step s1-select-table-2: SELECT * FROM ref_table_2 ORDER BY id, value; -id value +id value -1 1 -3 3 -5 5 -step s2-commit: +1 1 +3 3 +5 5 +step s2-commit: COMMIT; -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-begin s2-begin s2-insert-table-1 s1-select-table-3 s2-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-insert-table-1: +step s2-insert-table-1: INSERT INTO ref_table_1 VALUES (7, 7); -step s1-select-table-3: +step s1-select-table-3: SELECT * FROM ref_table_3 ORDER BY id, value; -id value +id value -1 1 -3 3 -5 5 -step s2-commit: +1 1 +3 3 +5 5 +step s2-commit: COMMIT; -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-begin s2-begin s2-delete-table-2 s1-select-table-1 s2-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-delete-table-2: +step s2-delete-table-2: DELETE FROM ref_table_2 WHERE id = 1; -step s1-select-table-1: +step s1-select-table-1: SELECT * FROM ref_table_1 ORDER BY id, value; -id value +id value -1 1 -3 3 -5 5 -step s2-commit: +1 1 +3 3 +5 5 +step s2-commit: COMMIT; -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-begin s2-begin s2-delete-table-2 s1-select-table-2 s2-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-delete-table-2: +step s2-delete-table-2: DELETE FROM ref_table_2 WHERE id = 1; -step s1-select-table-2: +step s1-select-table-2: SELECT * FROM ref_table_2 ORDER BY id, value; -id value +id value -1 1 -3 3 -5 5 -step s2-commit: +1 1 +3 3 +5 5 +step s2-commit: COMMIT; -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-begin s2-begin s2-delete-table-2 s1-select-table-3 s2-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-delete-table-2: +step s2-delete-table-2: DELETE FROM ref_table_2 WHERE id = 1; -step s1-select-table-3: +step s1-select-table-3: SELECT * FROM ref_table_3 ORDER BY id, value; -id value +id value -1 1 -3 3 -5 5 -step s2-commit: +1 1 +3 3 +5 5 +step s2-commit: COMMIT; -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-begin s2-begin s2-update-table-3 s1-select-table-1 s2-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-update-table-3: +step s2-update-table-3: UPDATE ref_table_3 SET id = 2 WHERE id = 1; -step s1-select-table-1: +step s1-select-table-1: SELECT * FROM ref_table_1 ORDER BY id, value; -id value +id value -1 1 -3 3 -5 5 -step s2-commit: +1 1 +3 3 +5 5 +step s2-commit: COMMIT; -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-begin s2-begin s2-update-table-3 s1-select-table-2 s2-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-update-table-3: +step s2-update-table-3: UPDATE ref_table_3 SET id = 2 WHERE id = 1; -step s1-select-table-2: +step s1-select-table-2: SELECT * FROM ref_table_2 ORDER BY id, value; -id value +id value -1 1 -3 3 -5 5 -step s2-commit: +1 1 +3 3 +5 5 +step s2-commit: COMMIT; -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-begin s2-begin s2-update-table-3 s1-select-table-3 s2-commit s1-commit -step s1-begin: +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-update-table-3: +step s2-update-table-3: UPDATE ref_table_3 SET id = 2 WHERE id = 1; -step s1-select-table-3: +step s1-select-table-3: SELECT * FROM ref_table_3 ORDER BY id, value; -id value +id value -1 1 -3 3 -5 5 -step s2-commit: +1 1 +3 3 +5 5 +step s2-commit: COMMIT; -step s1-commit: +step s1-commit: COMMIT; diff --git a/src/test/regress/expected/isolation_ref2ref_foreign_keys_on_mx.out b/src/test/regress/expected/isolation_ref2ref_foreign_keys_on_mx.out index b36eaa365..1dab78f92 100644 --- a/src/test/regress/expected/isolation_ref2ref_foreign_keys_on_mx.out +++ b/src/test/regress/expected/isolation_ref2ref_foreign_keys_on_mx.out @@ -1,658 +1,658 @@ Parsed test spec with 2 sessions starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-update-table-1 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection -step s2-start-session-level-connection: +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update-table-1: + +step s2-update-table-1: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table_1 SET id = 2 WHERE id = 1'); run_commands_on_session_level_connection_to_node - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-view-locks: + +step s1-view-locks: SELECT * FROM master_run_on_worker( ARRAY['localhost']::text[], ARRAY[57637]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM - (SELECT mode, count(*) count FROM pg_locks + (SELECT mode, count(*) count FROM pg_locks WHERE locktype='advisory' GROUP BY mode) t$$]::text[], false); -node_name node_port success result +node_name node_port success result localhost 57637 t {"(ExclusiveLock,1)","(ShareLock,1)"} -step s2-rollback-worker: +step s2-rollback-worker: SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); run_commands_on_session_level_connection_to_node - -step s1-view-locks: + +step s1-view-locks: SELECT * FROM master_run_on_worker( ARRAY['localhost']::text[], ARRAY[57637]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM - (SELECT mode, count(*) count FROM pg_locks + (SELECT mode, count(*) count FROM pg_locks WHERE locktype='advisory' GROUP BY mode) t$$]::text[], false); -node_name node_port success result +node_name node_port success result -localhost 57637 t -step s1-stop-connection: +localhost 57637 t +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-delete-table-1 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection -step s2-start-session-level-connection: +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-delete-table-1: + +step s2-delete-table-1: SELECT run_commands_on_session_level_connection_to_node('DELETE FROM ref_table_1 WHERE id = 1'); run_commands_on_session_level_connection_to_node - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-view-locks: + +step s1-view-locks: SELECT * FROM master_run_on_worker( ARRAY['localhost']::text[], ARRAY[57637]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM - (SELECT mode, count(*) count FROM pg_locks + (SELECT mode, count(*) count FROM pg_locks WHERE locktype='advisory' GROUP BY mode) t$$]::text[], false); -node_name node_port success result +node_name node_port success result localhost 57637 t {"(ExclusiveLock,1)","(ShareLock,1)"} -step s2-rollback-worker: +step s2-rollback-worker: SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); run_commands_on_session_level_connection_to_node - -step s1-view-locks: + +step s1-view-locks: SELECT * FROM master_run_on_worker( ARRAY['localhost']::text[], ARRAY[57637]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM - (SELECT mode, count(*) count FROM pg_locks + (SELECT mode, count(*) count FROM pg_locks WHERE locktype='advisory' GROUP BY mode) t$$]::text[], false); -node_name node_port success result +node_name node_port success result -localhost 57637 t -step s1-stop-connection: +localhost 57637 t +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-update-table-2 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection -step s2-start-session-level-connection: +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update-table-2: + +step s2-update-table-2: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table_2 SET id = 2 WHERE id = 1'); run_commands_on_session_level_connection_to_node - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-view-locks: + +step s1-view-locks: SELECT * FROM master_run_on_worker( ARRAY['localhost']::text[], ARRAY[57637]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM - (SELECT mode, count(*) count FROM pg_locks + (SELECT mode, count(*) count FROM pg_locks WHERE locktype='advisory' GROUP BY mode) t$$]::text[], false); -node_name node_port success result +node_name node_port success result localhost 57637 t {"(ExclusiveLock,2)","(ShareLock,1)"} -step s2-rollback-worker: +step s2-rollback-worker: SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); run_commands_on_session_level_connection_to_node - -step s1-view-locks: + +step s1-view-locks: SELECT * FROM master_run_on_worker( ARRAY['localhost']::text[], ARRAY[57637]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM - (SELECT mode, count(*) count FROM pg_locks + (SELECT mode, count(*) count FROM pg_locks WHERE locktype='advisory' GROUP BY mode) t$$]::text[], false); -node_name node_port success result +node_name node_port success result -localhost 57637 t -step s1-stop-connection: +localhost 57637 t +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-delete-table-2 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection -step s2-start-session-level-connection: +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-delete-table-2: + +step s2-delete-table-2: SELECT run_commands_on_session_level_connection_to_node('DELETE FROM ref_table_2 WHERE id = 1'); run_commands_on_session_level_connection_to_node - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-view-locks: + +step s1-view-locks: SELECT * FROM master_run_on_worker( ARRAY['localhost']::text[], ARRAY[57637]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM - (SELECT mode, count(*) count FROM pg_locks + (SELECT mode, count(*) count FROM pg_locks WHERE locktype='advisory' GROUP BY mode) t$$]::text[], false); -node_name node_port success result +node_name node_port success result localhost 57637 t {"(ExclusiveLock,2)","(ShareLock,1)"} -step s2-rollback-worker: +step s2-rollback-worker: SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); run_commands_on_session_level_connection_to_node - -step s1-view-locks: + +step s1-view-locks: SELECT * FROM master_run_on_worker( ARRAY['localhost']::text[], ARRAY[57637]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM - (SELECT mode, count(*) count FROM pg_locks + (SELECT mode, count(*) count FROM pg_locks WHERE locktype='advisory' GROUP BY mode) t$$]::text[], false); -node_name node_port success result +node_name node_port success result -localhost 57637 t -step s1-stop-connection: +localhost 57637 t +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-update-table-3 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection -step s2-start-session-level-connection: +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update-table-3: + +step s2-update-table-3: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table_3 SET id = 2 WHERE id = 1'); run_commands_on_session_level_connection_to_node - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-view-locks: + +step s1-view-locks: SELECT * FROM master_run_on_worker( ARRAY['localhost']::text[], ARRAY[57637]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM - (SELECT mode, count(*) count FROM pg_locks + (SELECT mode, count(*) count FROM pg_locks WHERE locktype='advisory' GROUP BY mode) t$$]::text[], false); -node_name node_port success result +node_name node_port success result localhost 57637 t {"(ExclusiveLock,3)","(ShareLock,1)"} -step s2-rollback-worker: +step s2-rollback-worker: SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); run_commands_on_session_level_connection_to_node - -step s1-view-locks: + +step s1-view-locks: SELECT * FROM master_run_on_worker( ARRAY['localhost']::text[], ARRAY[57637]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM - (SELECT mode, count(*) count FROM pg_locks + (SELECT mode, count(*) count FROM pg_locks WHERE locktype='advisory' GROUP BY mode) t$$]::text[], false); -node_name node_port success result +node_name node_port success result -localhost 57637 t -step s1-stop-connection: +localhost 57637 t +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-delete-table-3 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection -step s2-start-session-level-connection: +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-delete-table-3: + +step s2-delete-table-3: SELECT run_commands_on_session_level_connection_to_node('DELETE FROM ref_table_3 WHERE id = 1'); run_commands_on_session_level_connection_to_node - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-view-locks: + +step s1-view-locks: SELECT * FROM master_run_on_worker( ARRAY['localhost']::text[], ARRAY[57637]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM - (SELECT mode, count(*) count FROM pg_locks + (SELECT mode, count(*) count FROM pg_locks WHERE locktype='advisory' GROUP BY mode) t$$]::text[], false); -node_name node_port success result +node_name node_port success result localhost 57637 t {"(ExclusiveLock,3)","(ShareLock,1)"} -step s2-rollback-worker: +step s2-rollback-worker: SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); run_commands_on_session_level_connection_to_node - -step s1-view-locks: + +step s1-view-locks: SELECT * FROM master_run_on_worker( ARRAY['localhost']::text[], ARRAY[57637]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM - (SELECT mode, count(*) count FROM pg_locks + (SELECT mode, count(*) count FROM pg_locks WHERE locktype='advisory' GROUP BY mode) t$$]::text[], false); -node_name node_port success result +node_name node_port success result -localhost 57637 t -step s1-stop-connection: +localhost 57637 t +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-insert-table-1 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection -step s2-start-session-level-connection: +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert-table-1: + +step s2-insert-table-1: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table_1 VALUES (7, 7)'); run_commands_on_session_level_connection_to_node - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-view-locks: + +step s1-view-locks: SELECT * FROM master_run_on_worker( ARRAY['localhost']::text[], ARRAY[57637]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM - (SELECT mode, count(*) count FROM pg_locks + (SELECT mode, count(*) count FROM pg_locks WHERE locktype='advisory' GROUP BY mode) t$$]::text[], false); -node_name node_port success result +node_name node_port success result localhost 57637 t {"(RowExclusiveLock,1)","(ShareLock,1)"} -step s2-rollback-worker: +step s2-rollback-worker: SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); run_commands_on_session_level_connection_to_node - -step s1-view-locks: + +step s1-view-locks: SELECT * FROM master_run_on_worker( ARRAY['localhost']::text[], ARRAY[57637]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM - (SELECT mode, count(*) count FROM pg_locks + (SELECT mode, count(*) count FROM pg_locks WHERE locktype='advisory' GROUP BY mode) t$$]::text[], false); -node_name node_port success result +node_name node_port success result -localhost 57637 t -step s1-stop-connection: +localhost 57637 t +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-insert-table-2 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection -step s2-start-session-level-connection: +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert-table-2: + +step s2-insert-table-2: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table_2 VALUES (7, 5)'); run_commands_on_session_level_connection_to_node - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-view-locks: + +step s1-view-locks: SELECT * FROM master_run_on_worker( ARRAY['localhost']::text[], ARRAY[57637]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM - (SELECT mode, count(*) count FROM pg_locks + (SELECT mode, count(*) count FROM pg_locks WHERE locktype='advisory' GROUP BY mode) t$$]::text[], false); -node_name node_port success result +node_name node_port success result localhost 57637 t {"(RowExclusiveLock,2)","(ShareLock,1)"} -step s2-rollback-worker: +step s2-rollback-worker: SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); run_commands_on_session_level_connection_to_node - -step s1-view-locks: + +step s1-view-locks: SELECT * FROM master_run_on_worker( ARRAY['localhost']::text[], ARRAY[57637]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM - (SELECT mode, count(*) count FROM pg_locks + (SELECT mode, count(*) count FROM pg_locks WHERE locktype='advisory' GROUP BY mode) t$$]::text[], false); -node_name node_port success result +node_name node_port success result -localhost 57637 t -step s1-stop-connection: +localhost 57637 t +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-insert-table-3 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection -step s2-start-session-level-connection: +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert-table-3: + +step s2-insert-table-3: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table_3 VALUES (7, 5)'); run_commands_on_session_level_connection_to_node - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-view-locks: + +step s1-view-locks: SELECT * FROM master_run_on_worker( ARRAY['localhost']::text[], ARRAY[57637]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM - (SELECT mode, count(*) count FROM pg_locks + (SELECT mode, count(*) count FROM pg_locks WHERE locktype='advisory' GROUP BY mode) t$$]::text[], false); -node_name node_port success result +node_name node_port success result localhost 57637 t {"(RowExclusiveLock,3)","(ShareLock,1)"} -step s2-rollback-worker: +step s2-rollback-worker: SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); run_commands_on_session_level_connection_to_node - -step s1-view-locks: + +step s1-view-locks: SELECT * FROM master_run_on_worker( ARRAY['localhost']::text[], ARRAY[57637]::int[], ARRAY[$$ SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM - (SELECT mode, count(*) count FROM pg_locks + (SELECT mode, count(*) count FROM pg_locks WHERE locktype='advisory' GROUP BY mode) t$$]::text[], false); -node_name node_port success result +node_name node_port success result -localhost 57637 t -step s1-stop-connection: +localhost 57637 t +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_ref_select_for_update_vs_all_on_mx.out b/src/test/regress/expected/isolation_ref_select_for_update_vs_all_on_mx.out index 5ea02eb65..7855dadea 100644 --- a/src/test/regress/expected/isolation_ref_select_for_update_vs_all_on_mx.out +++ b/src/test/regress/expected/isolation_ref_select_for_update_vs_all_on_mx.out @@ -3,531 +3,531 @@ Parsed test spec with 3 sessions starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection create_reference_table - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select-for-update: + +step s1-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table WHERE id=1 OR id=2 FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-select-for-update: + +step s2-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table WHERE id=1 OR id=2 FOR UPDATE'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-select-for-update: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-insert s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count create_reference_table - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select-for-update: + +step s1-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table WHERE id=1 OR id=2 FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert: + +step s2-insert: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES (1, 10), (2, 20)'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-insert: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM ref_table; -count +count -2 +2 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection create_reference_table - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select-for-update: + +step s1-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table WHERE id=1 OR id=2 FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-select: + +step s2-select: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table WHERE id=1 OR id=2'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-insert-select-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection create_reference_table - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select-for-update: + +step s1-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table WHERE id=1 OR id=2 FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert-select-ref-table: + +step s2-insert-select-ref-table: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table SELECT * FROM ref_table'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-insert-select-ref-table: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-copy s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count create_reference_table - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select-for-update: + +step s1-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table WHERE id=1 OR id=2 FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-copy: + +step s2-copy: SELECT run_commands_on_session_level_connection_to_node('COPY ref_table FROM PROGRAM ''echo 1, 10 && echo 2, 20''WITH CSV'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-copy: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM ref_table; -count +count -2 +2 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-alter s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection create_reference_table - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select-for-update: + +step s1-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table WHERE id=1 OR id=2 FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-alter: + +step s2-alter: ALTER TABLE ref_table DROP value; -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-alter: <... completed> -step s2-commit-worker: +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection create_reference_table - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select-for-update: + +step s1-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table WHERE id=1 OR id=2 FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-truncate: + +step s2-truncate: SELECT run_commands_on_session_level_connection_to_node('TRUNCATE ref_table'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-truncate: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-coordinator-create-index-concurrently s1-commit-worker s1-stop-connection create_reference_table - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select-for-update: + +step s1-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table WHERE id=1 OR id=2 FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s2-coordinator-create-index-concurrently: + +step s2-coordinator-create-index-concurrently: CREATE INDEX CONCURRENTLY ref_table_index ON ref_table(id); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_ref_update_delete_upsert_vs_all_on_mx.out b/src/test/regress/expected/isolation_ref_update_delete_upsert_vs_all_on_mx.out index 9f4ee7360..0b82dab0e 100644 --- a/src/test/regress/expected/isolation_ref_update_delete_upsert_vs_all_on_mx.out +++ b/src/test/regress/expected/isolation_ref_update_delete_upsert_vs_all_on_mx.out @@ -3,299 +3,299 @@ Parsed test spec with 3 sessions starting permutation: s1-add-primary-key s1-start-session-level-connection s1-begin-on-worker s1-upsert s2-start-session-level-connection s2-begin-on-worker s2-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count create_reference_table - -step s1-add-primary-key: + +step s1-add-primary-key: ALTER TABLE ref_table ADD CONSTRAINT pri_key PRIMARY KEY (id); -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-upsert: + +step s1-upsert: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES (1, 3), (2, 3) ON CONFLICT (id) DO UPDATE SET value=3'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-select: + +step s2-select: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table WHERE id=1 OR id=2'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM ref_table; -count +count -2 +2 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-insert-select-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count create_reference_table - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-delete: + +step s1-delete: SELECT run_commands_on_session_level_connection_to_node('DELETE FROM ref_table WHERE id=1 OR id=2'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert-select-ref-table: + +step s2-insert-select-ref-table: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table SELECT * FROM ref_table'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-insert-select-ref-table: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM ref_table; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-add-primary-key s1-start-session-level-connection s1-begin-on-worker s1-upsert s2-start-session-level-connection s2-begin-on-worker s2-drop s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count create_reference_table - -step s1-add-primary-key: + +step s1-add-primary-key: ALTER TABLE ref_table ADD CONSTRAINT pri_key PRIMARY KEY (id); -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-upsert: + +step s1-upsert: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES (1, 3), (2, 3) ON CONFLICT (id) DO UPDATE SET value=3'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-drop: + +step s2-drop: DROP TABLE ref_table; -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-drop: <... completed> -step s2-commit-worker: +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM ref_table; ERROR: relation "ref_table" does not exist restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count create_reference_table - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-delete: + +step s1-delete: SELECT run_commands_on_session_level_connection_to_node('DELETE FROM ref_table WHERE id=1 OR id=2'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-truncate: + +step s2-truncate: SELECT run_commands_on_session_level_connection_to_node('TRUNCATE ref_table'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-truncate: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM ref_table; -count +count -0 +0 restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_reference_copy_vs_all.out b/src/test/regress/expected/isolation_reference_copy_vs_all.out index c6ed82f07..1cba83a40 100644 --- a/src/test/regress/expected/isolation_reference_copy_vs_all.out +++ b/src/test/regress/expected/isolation_reference_copy_vs_all.out @@ -3,97 +3,97 @@ Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s1-copy s2-copy s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -15 +15 starting permutation: s1-initialize s1-begin s1-copy s2-router-select s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-router-select: SELECT * FROM reference_copy WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-real-time-select s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-real-time-select: SELECT * FROM reference_copy ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-task-tracker-select s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s2-task-tracker-select: +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM reference_copy AS t1 JOIN reference_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-insert s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-insert: INSERT INTO reference_copy VALUES(0, 'k', 0); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -11 +11 starting permutation: s1-initialize s1-begin s1-copy s2-insert-select s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -101,14 +101,14 @@ step s2-insert-select: INSERT INTO reference_copy SELECT * FROM reference_copy; step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -20 +20 starting permutation: s1-initialize s1-begin s1-copy s2-update s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -116,14 +116,14 @@ step s2-update: UPDATE reference_copy SET data = 'l' WHERE id = 0; step s1-commit: COMMIT; step s2-update: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-delete s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -131,14 +131,14 @@ step s2-delete: DELETE FROM reference_copy WHERE id = 1; step s1-commit: COMMIT; step s2-delete: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -9 +9 starting permutation: s1-initialize s1-begin s1-copy s2-truncate s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -146,14 +146,14 @@ step s2-truncate: TRUNCATE reference_copy; step s1-commit: COMMIT; step s2-truncate: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-copy s2-drop s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -166,7 +166,7 @@ ERROR: relation "reference_copy" does not exist starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index s1-commit s1-select-count s1-show-indexes create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -174,9 +174,9 @@ step s2-ddl-create-index: CREATE INDEX reference_copy_index ON reference_copy(id step s1-commit: COMMIT; step s2-ddl-create-index: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''reference_copy%'''); run_command_on_workers @@ -186,7 +186,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-copy s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX reference_copy_index ON reference_copy(id); step s1-begin: BEGIN; @@ -195,9 +195,9 @@ step s2-ddl-drop-index: DROP INDEX reference_copy_index; step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''reference_copy%'''); run_command_on_workers @@ -207,7 +207,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -215,9 +215,9 @@ step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY reference_copy_ step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''reference_copy%'''); run_command_on_workers @@ -227,7 +227,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -235,9 +235,9 @@ step s2-ddl-add-column: ALTER TABLE reference_copy ADD new_column int DEFAULT 0; step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''reference_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -247,7 +247,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE reference_copy ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -256,9 +256,9 @@ step s2-ddl-drop-column: ALTER TABLE reference_copy DROP new_column; step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''reference_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -268,7 +268,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -276,9 +276,9 @@ step s2-ddl-rename-column: ALTER TABLE reference_copy RENAME data TO new_column; step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''reference_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -288,24 +288,24 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-table-size: SELECT citus_total_relation_size('reference_copy'); citus_total_relation_size -32768 +32768 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-copy s2-master-modify-multiple-shards s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -313,14 +313,14 @@ step s2-master-modify-multiple-shards: DELETE FROM reference_copy; step s1-commit: COMMIT; step s2-master-modify-multiple-shards: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -0 +0 starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-copy s2-distribute-table s1-commit s1-select-count create_reference_table - + step s1-drop: DROP TABLE reference_copy; step s1-create-non-distributed-table: CREATE TABLE reference_copy(id integer, data text, int_data int); COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; @@ -331,92 +331,92 @@ step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_reference_table - -step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count -15 +step s1-select-count: SELECT COUNT(*) FROM reference_copy; +count + +15 starting permutation: s1-initialize s1-begin s1-router-select s2-copy s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM reference_copy WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-real-time-select s2-copy s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM reference_copy ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-copy s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM reference_copy AS t1 JOIN reference_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-insert s2-copy s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO reference_copy VALUES(0, 'k', 0); step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -11 +11 starting permutation: s1-initialize s1-begin s1-insert-select s2-copy s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO reference_copy SELECT * FROM reference_copy; @@ -424,14 +424,14 @@ step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -15 +15 starting permutation: s1-initialize s1-begin s1-update s2-copy s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-update: UPDATE reference_copy SET data = 'l' WHERE id = 0; @@ -439,14 +439,14 @@ step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-delete s2-copy s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-delete: DELETE FROM reference_copy WHERE id = 1; @@ -454,14 +454,14 @@ step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -9 +9 starting permutation: s1-initialize s1-begin s1-truncate s2-copy s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-truncate: TRUNCATE reference_copy; @@ -469,14 +469,14 @@ step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-drop s2-copy s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-drop: DROP TABLE reference_copy; @@ -490,7 +490,7 @@ ERROR: relation "reference_copy" does not exist starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-copy s1-commit s1-select-count s1-show-indexes create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX reference_copy_index ON reference_copy(id); @@ -498,9 +498,9 @@ step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''reference_copy%'''); run_command_on_workers @@ -510,7 +510,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-copy s1-commit s1-select-count s1-show-indexes create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX reference_copy_index ON reference_copy(id); step s1-begin: BEGIN; @@ -519,9 +519,9 @@ step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''reference_copy%'''); run_command_on_workers @@ -531,7 +531,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE reference_copy ADD new_column int DEFAULT 0; @@ -540,9 +540,9 @@ step s1-commit: COMMIT; step s2-copy: <... completed> error in steps s1-commit s2-copy: ERROR: missing data for column "new_column" step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''reference_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -552,7 +552,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE reference_copy ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -561,9 +561,9 @@ step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''reference_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -573,7 +573,7 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE reference_copy RENAME data TO new_column; @@ -581,9 +581,9 @@ step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''reference_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -593,24 +593,24 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('reference_copy'); citus_total_relation_size -32768 +32768 step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -10 +10 starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-copy s1-commit s1-select-count create_reference_table - + step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-modify-multiple-shards: DELETE FROM reference_copy; @@ -618,14 +618,14 @@ step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -5 +5 starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-distribute-table s2-copy s1-commit s1-select-count create_reference_table - + step s1-drop: DROP TABLE reference_copy; step s1-create-non-distributed-table: CREATE TABLE reference_copy(id integer, data text, int_data int); COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; @@ -633,11 +633,11 @@ step s1-begin: BEGIN; step s1-distribute-table: SELECT create_reference_table('reference_copy'); create_reference_table - + step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; -count +count -15 +15 diff --git a/src/test/regress/expected/isolation_reference_on_mx.out b/src/test/regress/expected/isolation_reference_on_mx.out index d45f54659..1fed9e597 100644 --- a/src/test/regress/expected/isolation_reference_on_mx.out +++ b/src/test/regress/expected/isolation_reference_on_mx.out @@ -1,616 +1,616 @@ Parsed test spec with 2 sessions starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s1-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-update-ref-table: + +step s1-update-ref-table: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update-ref-table: + +step s2-update-ref-table: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-update-ref-table: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete-from-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s1-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-delete-from-ref-table: + +step s1-delete-from-ref-table: SELECT run_commands_on_session_level_connection_to_node('DELETE FROM ref_table WHERE user_id = 1'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update-ref-table: + +step s2-update-ref-table: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-update-ref-table: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-into-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s1-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert-into-ref-table: + +step s1-insert-into-ref-table: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES(8,81),(9,91)'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update-ref-table: + +step s2-update-ref-table: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-update-ref-table: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-into-ref-table s2-start-session-level-connection s2-begin-on-worker s2-insert-into-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s1-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert-into-ref-table: + +step s1-insert-into-ref-table: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES(8,81),(9,91)'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert-into-ref-table: + +step s2-insert-into-ref-table: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES(8,81),(9,91)'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-to-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s1-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-copy-to-ref-table: + +step s1-copy-to-ref-table: SELECT run_commands_on_session_level_connection_to_node('COPY ref_table FROM PROGRAM ''echo 10, 101 && echo 11, 111'' WITH CSV'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update-ref-table: + +step s2-update-ref-table: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-update-ref-table: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-to-ref-table s2-start-session-level-connection s2-begin-on-worker s2-insert-into-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s1-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-copy-to-ref-table: + +step s1-copy-to-ref-table: SELECT run_commands_on_session_level_connection_to_node('COPY ref_table FROM PROGRAM ''echo 10, 101 && echo 11, 111'' WITH CSV'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert-into-ref-table: + +step s2-insert-into-ref-table: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES(8,81),(9,91)'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-to-ref-table s2-start-session-level-connection s2-begin-on-worker s2-copy-to-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s1-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-copy-to-ref-table: + +step s1-copy-to-ref-table: SELECT run_commands_on_session_level_connection_to_node('COPY ref_table FROM PROGRAM ''echo 10, 101 && echo 11, 111'' WITH CSV'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-copy-to-ref-table: + +step s2-copy-to-ref-table: SELECT run_commands_on_session_level_connection_to_node('COPY ref_table FROM PROGRAM ''echo 10, 101 && echo 11, 111'' WITH CSV'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s1-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select-for-update: + +step s1-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-update-ref-table: + +step s2-update-ref-table: SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-update-ref-table: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-insert-into-ref-table s1-begin s1-alter-table s2-commit-worker s1-commit s2-stop-connection -step s2-start-session-level-connection: +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert-into-ref-table: + +step s2-insert-into-ref-table: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES(8,81),(9,91)'); run_commands_on_session_level_connection_to_node - -step s1-begin: + +step s1-begin: BEGIN; -step s1-alter-table: +step s1-alter-table: ALTER TABLE ref_table ADD CONSTRAINT rf_p_key PRIMARY KEY(user_id); -step s2-commit-worker: +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s1-alter-table: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s2-stop-connection: +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-select-from-ref-table s1-begin s1-alter-table s2-commit-worker s1-commit s2-stop-connection -step s2-start-session-level-connection: +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-select-from-ref-table: + +step s2-select-from-ref-table: SELECT run_commands_on_session_level_connection_to_node('SELECT count(*) FROM ref_table'); run_commands_on_session_level_connection_to_node - -step s1-begin: + +step s1-begin: BEGIN; -step s1-alter-table: +step s1-alter-table: ALTER TABLE ref_table ADD CONSTRAINT rf_p_key PRIMARY KEY(user_id); -step s2-commit-worker: +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s1-alter-table: <... completed> -step s1-commit: +step s1-commit: COMMIT; -step s2-stop-connection: +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_replace_wait_function.out b/src/test/regress/expected/isolation_replace_wait_function.out index 15c4649b7..c93af44b2 100644 --- a/src/test/regress/expected/isolation_replace_wait_function.out +++ b/src/test/regress/expected/isolation_replace_wait_function.out @@ -3,23 +3,23 @@ Parsed test spec with 2 sessions starting permutation: s1-insert-1 s2-insert s1-commit s2-commit create_distributed_table - -step s1-insert-1: + +step s1-insert-1: BEGIN; INSERT INTO test_locking (a) VALUES (1); -step s2-insert: +step s2-insert: BEGIN; INSERT INTO test_locking (a) VALUES (1); -step s1-commit: +step s1-commit: COMMIT; step s2-insert: <... completed> error in steps s1-commit s2-insert: ERROR: duplicate key value violates unique constraint "test_locking_a_key_1400001" -step s2-commit: +step s2-commit: COMMIT; restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out b/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out index eae88c220..110178365 100644 --- a/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out +++ b/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out @@ -3,17 +3,17 @@ Parsed test spec with 3 sessions starting permutation: s1-begin s2-begin s1-update-dist-table s2-lock-ref-table-placement-on-coordinator s1-lock-ref-table-placement-on-coordinator s2-update-dist-table deadlock-checker-call s1-end s2-end create_distributed_table - -step s1-begin: + +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-update-dist-table: +step s1-update-dist-table: update dist_table set b = 2 where a = 1; -step s2-lock-ref-table-placement-on-coordinator: +step s2-lock-ref-table-placement-on-coordinator: DO $$ DECLARE refshardid int; BEGIN @@ -22,7 +22,7 @@ step s2-lock-ref-table-placement-on-coordinator: END $$; -step s1-lock-ref-table-placement-on-coordinator: +step s1-lock-ref-table-placement-on-coordinator: DO $$ DECLARE refshardid int; BEGIN @@ -31,113 +31,113 @@ step s1-lock-ref-table-placement-on-coordinator: END $$; -step s2-update-dist-table: +step s2-update-dist-table: update dist_table set b = 2 where a = 1; -step deadlock-checker-call: +step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks -t +t step s1-lock-ref-table-placement-on-coordinator: <... completed> step s2-update-dist-table: <... completed> error in steps deadlock-checker-call s1-lock-ref-table-placement-on-coordinator s2-update-dist-table: ERROR: canceling the transaction since it was involved in a distributed deadlock -step s1-end: +step s1-end: END; -step s2-end: +step s2-end: END; master_remove_node - + starting permutation: s1-begin s2-begin s1-update-ref-table s2-sleep s2-view-dist s2-view-worker s2-end s1-end create_distributed_table - -step s1-begin: + +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-update-ref-table: +step s1-update-ref-table: update ref_table set a = a + 1; -step s2-sleep: +step s2-sleep: SELECT pg_sleep(0.5); -pg_sleep +pg_sleep - -step s2-view-dist: + +step s2-view-dist: SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; -query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname +query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname update ref_table set a = a + 1; -coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression +coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression update ref_table set a = a + 1; -localhost 57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression -step s2-view-worker: - SELECT query, query_hostname, query_hostport, master_query_host_name, - master_query_host_port, state, wait_event_type, wait_event, usename, datname +localhost 57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression +step s2-view-worker: + SELECT query, query_hostname, query_hostport, master_query_host_name, + master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity - WHERE query NOT ILIKE '%pg_prepared_xacts%' AND + WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' AND query NOT ILIKE '%dump_local_wait_edges%' ORDER BY query, query_hostport DESC; -query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname +query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname -UPDATE public.ref_table_1400163 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression -UPDATE public.ref_table_1400163 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression -UPDATE public.ref_table_1400163 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression -UPDATE public.ref_table_1400163 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)localhost 57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression -step s2-end: +UPDATE public.ref_table_1400163 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression +UPDATE public.ref_table_1400163 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression +UPDATE public.ref_table_1400163 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression +UPDATE public.ref_table_1400163 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)localhost 57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression +step s2-end: END; -step s1-end: +step s1-end: END; master_remove_node - + starting permutation: s1-begin s2-begin s1-update-ref-table s2-active-transactions s1-end s2-end create_distributed_table - -step s1-begin: + +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-update-ref-table: +step s1-update-ref-table: update ref_table set a = a + 1; -step s2-active-transactions: +step s2-active-transactions: -- Admin should be able to see all transactions SELECT count(*) FROM get_all_active_transactions(); SELECT count(*) FROM get_global_active_transactions(); -count +count -2 -count +2 +count -6 -step s1-end: +6 +step s1-end: END; -step s2-end: +step s2-end: END; master_remove_node - + diff --git a/src/test/regress/expected/isolation_select_for_update.out b/src/test/regress/expected/isolation_select_for_update.out index c1d655d60..7a5be5ff1 100644 --- a/src/test/regress/expected/isolation_select_for_update.out +++ b/src/test/regress/expected/isolation_select_for_update.out @@ -1,103 +1,103 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s1-select-from-t1-t2-for-update s2-begin s2-update-t1 s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-select-from-t1-t2-for-update: +step s1-select-from-t1-t2-for-update: SELECT * FROM test_table_1_rf1 as tt1 INNER JOIN test_table_2_rf1 as tt2 on tt1.id = tt2.id - WHERE tt1.id = 1 + WHERE tt1.id = 1 ORDER BY 1 FOR UPDATE; -id val_1 id val_1 +id val_1 id val_1 -1 2 1 2 -step s2-begin: +1 2 1 2 +step s2-begin: BEGIN; -step s2-update-t1: +step s2-update-t1: UPDATE test_table_1_rf1 SET val_1 = 5 WHERE id = 1; -step s1-commit: +step s1-commit: COMMIT; step s2-update-t1: <... completed> -step s2-commit: +step s2-commit: COMMIT; restore_isolation_tester_func - + starting permutation: s1-begin s1-select-from-t1-t2-for-share s2-begin s2-delete-t1 s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-select-from-t1-t2-for-share: +step s1-select-from-t1-t2-for-share: SELECT * FROM test_table_1_rf1 as tt1 INNER JOIN test_table_2_rf1 as tt2 on tt1.id = tt2.id - WHERE tt1.id = 1 + WHERE tt1.id = 1 ORDER BY 1 FOR SHARE; -id val_1 id val_1 +id val_1 id val_1 -1 2 1 2 -step s2-begin: +1 2 1 2 +step s2-begin: BEGIN; -step s2-delete-t1: +step s2-delete-t1: DELETE FROM test_table_1_rf1 WHERE id = 1; -step s1-commit: +step s1-commit: COMMIT; step s2-delete-t1: <... completed> -step s2-commit: +step s2-commit: COMMIT; restore_isolation_tester_func - + starting permutation: s1-begin s1-select-from-t1-rt-for-update s2-begin s2-update-t1 s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-select-from-t1-rt-for-update: +step s1-select-from-t1-rt-for-update: SELECT * FROM test_table_1_rf1 as tt1 INNER JOIN ref_table as rt1 on tt1.id = rt1.id WHERE tt1.id = 1 ORDER BY 1 FOR UPDATE; -id val_1 id val_1 +id val_1 id val_1 -1 2 1 2 -step s2-begin: +1 2 1 2 +step s2-begin: BEGIN; -step s2-update-t1: +step s2-update-t1: UPDATE test_table_1_rf1 SET val_1 = 5 WHERE id = 1; -step s1-commit: +step s1-commit: COMMIT; step s2-update-t1: <... completed> -step s2-commit: +step s2-commit: COMMIT; restore_isolation_tester_func - + starting permutation: s1-begin s1-select-from-t1-rt-with-lc-for-update s2-begin s2-update-rt s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-select-from-t1-rt-with-lc-for-update: +step s1-select-from-t1-rt-with-lc-for-update: SELECT * FROM test_table_1_rf1 as tt1 INNER JOIN ref_table as rt1 on tt1.id = rt1.id WHERE tt1.id = 1 @@ -105,31 +105,31 @@ step s1-select-from-t1-rt-with-lc-for-update: FOR UPDATE OF rt1; -id val_1 id val_1 +id val_1 id val_1 -1 2 1 2 -step s2-begin: +1 2 1 2 +step s2-begin: BEGIN; -step s2-update-rt: +step s2-update-rt: UPDATE ref_table SET val_1 = 5 WHERE id = 1; -step s1-commit: +step s1-commit: COMMIT; step s2-update-rt: <... completed> -step s2-commit: +step s2-commit: COMMIT; restore_isolation_tester_func - + starting permutation: s1-begin s1-select-from-t1-rt-with-lc-for-update s2-begin s2-update-t1 s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-select-from-t1-rt-with-lc-for-update: +step s1-select-from-t1-rt-with-lc-for-update: SELECT * FROM test_table_1_rf1 as tt1 INNER JOIN ref_table as rt1 on tt1.id = rt1.id WHERE tt1.id = 1 @@ -137,265 +137,265 @@ step s1-select-from-t1-rt-with-lc-for-update: FOR UPDATE OF rt1; -id val_1 id val_1 +id val_1 id val_1 -1 2 1 2 -step s2-begin: +1 2 1 2 +step s2-begin: BEGIN; -step s2-update-t1: +step s2-update-t1: UPDATE test_table_1_rf1 SET val_1 = 5 WHERE id = 1; -step s1-commit: +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; restore_isolation_tester_func - + starting permutation: s1-begin s1-select-from-t1-t2-for-share s2-begin s2-select-from-t1-t2-for-share s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-select-from-t1-t2-for-share: +step s1-select-from-t1-t2-for-share: SELECT * FROM test_table_1_rf1 as tt1 INNER JOIN test_table_2_rf1 as tt2 on tt1.id = tt2.id - WHERE tt1.id = 1 + WHERE tt1.id = 1 ORDER BY 1 FOR SHARE; -id val_1 id val_1 +id val_1 id val_1 -1 2 1 2 -step s2-begin: +1 2 1 2 +step s2-begin: BEGIN; -step s2-select-from-t1-t2-for-share: +step s2-select-from-t1-t2-for-share: SELECT * FROM test_table_1_rf1 as tt1 INNER JOIN test_table_1_rf1 as tt2 on tt1.id = tt2.id - WHERE tt1.id = 1 + WHERE tt1.id = 1 ORDER BY 1 FOR SHARE; -id val_1 id val_1 +id val_1 id val_1 -1 2 1 2 -step s1-commit: +1 2 1 2 +step s1-commit: COMMIT; -step s2-commit: +step s2-commit: COMMIT; restore_isolation_tester_func - + starting permutation: s1-begin s1-select-from-t1-rt-for-update s2-begin s2-select-from-t1-t2-for-update s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-select-from-t1-rt-for-update: +step s1-select-from-t1-rt-for-update: SELECT * FROM test_table_1_rf1 as tt1 INNER JOIN ref_table as rt1 on tt1.id = rt1.id WHERE tt1.id = 1 ORDER BY 1 FOR UPDATE; -id val_1 id val_1 +id val_1 id val_1 -1 2 1 2 -step s2-begin: +1 2 1 2 +step s2-begin: BEGIN; -step s2-select-from-t1-t2-for-update: +step s2-select-from-t1-t2-for-update: SELECT * FROM test_table_1_rf1 as tt1 INNER JOIN test_table_1_rf1 as tt2 on tt1.id = tt2.id - WHERE tt1.id = 1 + WHERE tt1.id = 1 ORDER BY 1 FOR UPDATE; -step s1-commit: +step s1-commit: COMMIT; step s2-select-from-t1-t2-for-update: <... completed> -id val_1 id val_1 +id val_1 id val_1 -1 2 1 2 -step s2-commit: +1 2 1 2 +step s2-commit: COMMIT; restore_isolation_tester_func - + starting permutation: s1-begin s1-select-from-t1-within-cte s2-begin s2-select-from-t1-t2-for-update s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-select-from-t1-within-cte: +step s1-select-from-t1-within-cte: WITH first_value AS ( SELECT val_1 FROM test_table_1_rf1 WHERE id = 1 FOR UPDATE) SELECT * FROM first_value; -val_1 +val_1 -2 -step s2-begin: +2 +step s2-begin: BEGIN; -step s2-select-from-t1-t2-for-update: +step s2-select-from-t1-t2-for-update: SELECT * FROM test_table_1_rf1 as tt1 INNER JOIN test_table_1_rf1 as tt2 on tt1.id = tt2.id - WHERE tt1.id = 1 + WHERE tt1.id = 1 ORDER BY 1 FOR UPDATE; -step s1-commit: +step s1-commit: COMMIT; step s2-select-from-t1-t2-for-update: <... completed> -id val_1 id val_1 +id val_1 id val_1 -1 2 1 2 -step s2-commit: +1 2 1 2 +step s2-commit: COMMIT; restore_isolation_tester_func - + starting permutation: s1-begin s1-select-from-t1-within-cte s2-begin s2-update-t1 s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-select-from-t1-within-cte: +step s1-select-from-t1-within-cte: WITH first_value AS ( SELECT val_1 FROM test_table_1_rf1 WHERE id = 1 FOR UPDATE) SELECT * FROM first_value; -val_1 +val_1 -2 -step s2-begin: +2 +step s2-begin: BEGIN; -step s2-update-t1: +step s2-update-t1: UPDATE test_table_1_rf1 SET val_1 = 5 WHERE id = 1; -step s1-commit: +step s1-commit: COMMIT; step s2-update-t1: <... completed> -step s2-commit: +step s2-commit: COMMIT; restore_isolation_tester_func - + starting permutation: s1-begin s1-select-from-t1-with-subquery s2-begin s2-update-t1 s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-select-from-t1-with-subquery: +step s1-select-from-t1-with-subquery: SELECT * FROM (SELECT * FROM test_table_1_rf1 FOR UPDATE) foo WHERE id = 1; -id val_1 +id val_1 -1 2 -step s2-begin: +1 2 +step s2-begin: BEGIN; -step s2-update-t1: +step s2-update-t1: UPDATE test_table_1_rf1 SET val_1 = 5 WHERE id = 1; -step s1-commit: +step s1-commit: COMMIT; step s2-update-t1: <... completed> -step s2-commit: +step s2-commit: COMMIT; restore_isolation_tester_func - + starting permutation: s1-begin s1-select-from-rt-with-subquery s2-begin s2-update-rt s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-select-from-rt-with-subquery: +step s1-select-from-rt-with-subquery: SELECT * FROM (SELECT * FROM ref_table FOR UPDATE) foo WHERE id = 1; -id val_1 +id val_1 -1 2 -step s2-begin: +1 2 +step s2-begin: BEGIN; -step s2-update-rt: +step s2-update-rt: UPDATE ref_table SET val_1 = 5 WHERE id = 1; -step s1-commit: +step s1-commit: COMMIT; step s2-update-rt: <... completed> -step s2-commit: +step s2-commit: COMMIT; restore_isolation_tester_func - + starting permutation: s1-begin s1-select-from-t1-with-view s2-begin s2-update-t1 s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-select-from-t1-with-view: +step s1-select-from-t1-with-view: SELECT * FROM test_1 WHERE id = 1 FOR UPDATE; -id val_1 +id val_1 -1 2 -step s2-begin: +1 2 +step s2-begin: BEGIN; -step s2-update-t1: +step s2-update-t1: UPDATE test_table_1_rf1 SET val_1 = 5 WHERE id = 1; -step s1-commit: +step s1-commit: COMMIT; step s2-update-t1: <... completed> -step s2-commit: +step s2-commit: COMMIT; restore_isolation_tester_func - + starting permutation: s1-begin s1-update-rt-with-cte-select-from-rt s2-begin s2-update-rt s1-commit s2-commit -step s1-begin: +step s1-begin: BEGIN; -step s1-update-rt-with-cte-select-from-rt: +step s1-update-rt-with-cte-select-from-rt: WITH foo AS (SELECT * FROM ref_table FOR UPDATE) UPDATE ref_table SET val_1 = 4 FROM foo WHERE ref_table.id = foo.id; -step s2-begin: +step s2-begin: BEGIN; -step s2-update-rt: +step s2-update-rt: UPDATE ref_table SET val_1 = 5 WHERE id = 1; -step s1-commit: +step s1-commit: COMMIT; step s2-update-rt: <... completed> -step s2-commit: +step s2-commit: COMMIT; restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_select_vs_all.out b/src/test/regress/expected/isolation_select_vs_all.out index f4d1a3331..b1136066b 100644 --- a/src/test/regress/expected/isolation_select_vs_all.out +++ b/src/test/regress/expected/isolation_select_vs_all.out @@ -3,345 +3,345 @@ Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s1-router-select s2-router-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-router-select s2-real-time-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-router-select s2-task-tracker-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 -step s2-task-tracker-select: +1 b 1 +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-real-time-select s2-router-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-real-time-select s2-real-time-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-real-time-select s2-task-tracker-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 -step s2-task-tracker-select: +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-router-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-real-time-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-task-tracker-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 -step s2-task-tracker-select: +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-router-select s2-insert s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-insert: INSERT INTO select_append VALUES(0, 'k', 0); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -6 +6 starting permutation: s1-initialize s1-begin s1-router-select s2-insert-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-insert-select: INSERT INTO select_append SELECT * FROM select_append; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-router-select s2-update s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-update: UPDATE select_append SET data = 'l' WHERE id = 0; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-router-select s2-delete s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-delete: DELETE FROM select_append WHERE id = 1; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -4 +4 starting permutation: s1-initialize s1-begin s1-router-select s2-truncate s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-truncate: TRUNCATE select_append; step s1-commit: COMMIT; step s2-truncate: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-router-select s2-drop s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-drop: DROP TABLE select_append; step s1-commit: COMMIT; step s2-drop: <... completed> @@ -351,19 +351,19 @@ ERROR: relation "select_append" does not exist starting permutation: s1-initialize s1-begin s1-router-select s2-ddl-create-index s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers @@ -373,21 +373,21 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-router-select s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-ddl-drop-index: DROP INDEX select_append_index; step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers @@ -397,19 +397,19 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-router-select s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY select_append_index ON select_append(id); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers @@ -419,20 +419,20 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-router-select s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -442,21 +442,21 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-router-select s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data new_column +id data int_data new_column -1 b 1 0 +1 b 1 0 step s2-ddl-drop-column: ALTER TABLE select_append DROP new_column; step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -466,20 +466,20 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-router-select s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-ddl-rename-column: ALTER TABLE select_append RENAME data TO new_column; step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -489,181 +489,181 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-router-select s2-table-size s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-table-size: SELECT citus_total_relation_size('select_append'); citus_total_relation_size -32768 +32768 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-router-select s2-master-modify-multiple-shards s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s2-master-modify-multiple-shards: DELETE FROM select_append; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-initialize s1-begin s2-master-apply-delete-command s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s2-master-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM select_append WHERE id <= 4;'); master_apply_delete_command -1 +1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-initialize s1-begin s2-master-drop-all-shards s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s2-master-drop-all-shards: SELECT master_drop_all_shards('select_append'::regclass, 'public', 'append_copy'); master_drop_all_shards -1 +1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-router-select s2-distribute-table s1-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE select_append; step s1-create-non-distributed-table: CREATE TABLE select_append(id integer, data text, int_data int); step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data step s2-distribute-table: SELECT create_distributed_table('select_append', 'id', 'append'); create_distributed_table - + step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-insert s2-router-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO select_append VALUES(0, 'k', 0); step s2-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -6 +6 starting permutation: s1-initialize s1-begin s1-insert-select s2-router-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO select_append SELECT * FROM select_append; step s2-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-update s2-router-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-update: UPDATE select_append SET data = 'l' WHERE id = 0; step s2-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-delete s2-router-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-delete: DELETE FROM select_append WHERE id = 1; step s2-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -4 +4 starting permutation: s1-initialize s1-begin s1-truncate s2-router-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-truncate: TRUNCATE select_append; step s2-router-select: SELECT * FROM select_append WHERE id = 1; step s1-commit: COMMIT; step s2-router-select: <... completed> -id data int_data +id data int_data step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-drop s2-router-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-drop: DROP TABLE select_append; @@ -677,19 +677,19 @@ ERROR: relation "select_append" does not exist starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-router-select s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s2-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers @@ -699,7 +699,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-router-select s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s1-begin: BEGIN; @@ -707,13 +707,13 @@ step s1-ddl-drop-index: DROP INDEX select_append_index; step s2-router-select: SELECT * FROM select_append WHERE id = 1; step s1-commit: COMMIT; step s2-router-select: <... completed> -id data int_data +id data int_data -1 b 1 +1 b 1 step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers @@ -723,20 +723,20 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-router-select s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s2-router-select: SELECT * FROM select_append WHERE id = 1; step s1-commit: COMMIT; step s2-router-select: <... completed> -id data int_data new_column +id data int_data new_column -1 b 1 0 +1 b 1 0 step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -746,7 +746,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-router-select s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -754,13 +754,13 @@ step s1-ddl-drop-column: ALTER TABLE select_append DROP new_column; step s2-router-select: SELECT * FROM select_append WHERE id = 1; step s1-commit: COMMIT; step s2-router-select: <... completed> -id data int_data +id data int_data -1 b 1 +1 b 1 step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -770,20 +770,20 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-router-select s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE select_append RENAME data TO new_column; step s2-router-select: SELECT * FROM select_append WHERE id = 1; step s1-commit: COMMIT; step s2-router-select: <... completed> -id new_column int_data +id new_column int_data -1 b 1 +1 b 1 step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -793,212 +793,212 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-table-size s2-router-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('select_append'); citus_total_relation_size -32768 +32768 step s2-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-router-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-modify-multiple-shards: DELETE FROM select_append; step s2-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data -1 b 1 +1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-master-apply-delete-command s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM select_append WHERE id <= 4;'); master_apply_delete_command -1 +1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-master-drop-all-shards s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-drop-all-shards: SELECT master_drop_all_shards('select_append'::regclass, 'public', 'append_copy'); master_drop_all_shards -1 +1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-distribute-table s2-router-select s1-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE select_append; step s1-create-non-distributed-table: CREATE TABLE select_append(id integer, data text, int_data int); step s1-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('select_append', 'id', 'append'); create_distributed_table - + step s2-router-select: SELECT * FROM select_append WHERE id = 1; -id data int_data +id data int_data step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-real-time-select s2-insert s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-insert: INSERT INTO select_append VALUES(0, 'k', 0); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -6 +6 starting permutation: s1-initialize s1-begin s1-real-time-select s2-insert-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-insert-select: INSERT INTO select_append SELECT * FROM select_append; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-real-time-select s2-update s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-update: UPDATE select_append SET data = 'l' WHERE id = 0; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-real-time-select s2-delete s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-delete: DELETE FROM select_append WHERE id = 1; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -4 +4 starting permutation: s1-initialize s1-begin s1-real-time-select s2-truncate s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-truncate: TRUNCATE select_append; step s1-commit: COMMIT; step s2-truncate: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-real-time-select s2-drop s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-drop: DROP TABLE select_append; step s1-commit: COMMIT; step s2-drop: <... completed> @@ -1008,23 +1008,23 @@ ERROR: relation "select_append" does not exist starting permutation: s1-initialize s1-begin s1-real-time-select s2-ddl-create-index s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers @@ -1034,25 +1034,25 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-real-time-select s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-ddl-drop-index: DROP INDEX select_append_index; step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers @@ -1062,23 +1062,23 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-real-time-select s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY select_append_index ON select_append(id); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers @@ -1088,24 +1088,24 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-real-time-select s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -1115,25 +1115,25 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-real-time-select s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data new_column +id data int_data new_column -0 a 0 0 -1 b 1 0 -2 c 2 0 -3 d 3 0 -4 e 4 0 +0 a 0 0 +1 b 1 0 +2 c 2 0 +3 d 3 0 +4 e 4 0 step s2-ddl-drop-column: ALTER TABLE select_append DROP new_column; step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -1143,24 +1143,24 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-real-time-select s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-ddl-rename-column: ALTER TABLE select_append RENAME data TO new_column; step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -1170,173 +1170,173 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-real-time-select s2-table-size s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-table-size: SELECT citus_total_relation_size('select_append'); citus_total_relation_size -32768 +32768 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-real-time-select s2-master-modify-multiple-shards s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s2-master-modify-multiple-shards: DELETE FROM select_append; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-real-time-select s2-distribute-table s1-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE select_append; step s1-create-non-distributed-table: CREATE TABLE select_append(id integer, data text, int_data int); step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data step s2-distribute-table: SELECT create_distributed_table('select_append', 'id', 'append'); create_distributed_table - + step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-insert s2-real-time-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO select_append VALUES(0, 'k', 0); step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -6 +6 starting permutation: s1-initialize s1-begin s1-insert-select s2-real-time-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO select_append SELECT * FROM select_append; step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-update s2-real-time-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-update: UPDATE select_append SET data = 'l' WHERE id = 0; step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-delete s2-real-time-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-delete: DELETE FROM select_append WHERE id = 1; step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -4 +4 starting permutation: s1-initialize s1-begin s1-truncate s2-real-time-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-truncate: TRUNCATE select_append; step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; step s1-commit: COMMIT; step s2-real-time-select: <... completed> -id data int_data +id data int_data step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-drop s2-real-time-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-drop: DROP TABLE select_append; @@ -1350,23 +1350,23 @@ ERROR: relation "select_append" does not exist starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-real-time-select s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers @@ -1376,7 +1376,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-real-time-select s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s1-begin: BEGIN; @@ -1384,17 +1384,17 @@ step s1-ddl-drop-index: DROP INDEX select_append_index; step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; step s1-commit: COMMIT; step s2-real-time-select: <... completed> -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers @@ -1404,24 +1404,24 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-real-time-select s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; step s1-commit: COMMIT; step s2-real-time-select: <... completed> -id data int_data new_column +id data int_data new_column -0 a 0 0 -1 b 1 0 -2 c 2 0 -3 d 3 0 -4 e 4 0 +0 a 0 0 +1 b 1 0 +2 c 2 0 +3 d 3 0 +4 e 4 0 step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -1431,7 +1431,7 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-real-time-select s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -1439,17 +1439,17 @@ step s1-ddl-drop-column: ALTER TABLE select_append DROP new_column; step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; step s1-commit: COMMIT; step s2-real-time-select: <... completed> -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -1459,24 +1459,24 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-real-time-select s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE select_append RENAME data TO new_column; step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; step s1-commit: COMMIT; step s2-real-time-select: <... completed> -id new_column int_data +id new_column int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -1486,206 +1486,206 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-table-size s2-real-time-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('select_append'); citus_total_relation_size -32768 +32768 step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-real-time-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-modify-multiple-shards: DELETE FROM select_append; step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data -0 a 0 -1 b 1 -2 c 2 -3 d 3 -4 e 4 +0 a 0 +1 b 1 +2 c 2 +3 d 3 +4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-distribute-table s2-real-time-select s1-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE select_append; step s1-create-non-distributed-table: CREATE TABLE select_append(id integer, data text, int_data int); step s1-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('select_append', 'id', 'append'); create_distributed_table - + step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; -id data int_data +id data int_data step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-insert s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-insert: INSERT INTO select_append VALUES(0, 'k', 0); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -6 +6 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-insert-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-insert-select: INSERT INTO select_append SELECT * FROM select_append; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-update s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-update: UPDATE select_append SET data = 'l' WHERE id = 0; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-delete s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-delete: DELETE FROM select_append WHERE id = 1; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -4 +4 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-truncate s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-truncate: TRUNCATE select_append; step s1-commit: COMMIT; step s2-truncate: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-drop s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-drop: DROP TABLE select_append; step s1-commit: COMMIT; step s2-drop: <... completed> @@ -1695,26 +1695,26 @@ ERROR: relation "select_append" does not exist starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-ddl-create-index s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers @@ -1724,28 +1724,28 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-task-tracker-select s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-ddl-drop-index: DROP INDEX select_append_index; step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers @@ -1755,26 +1755,26 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY select_append_index ON select_append(id); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers @@ -1784,27 +1784,27 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -1814,28 +1814,28 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-task-tracker-select s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data new_column id data int_data new_column +id data int_data new_column id data int_data new_column -0 a 0 0 0 a 0 0 -1 b 1 0 1 b 1 0 -2 c 2 0 2 c 2 0 -3 d 3 0 3 d 3 0 -4 e 4 0 4 e 4 0 +0 a 0 0 0 a 0 0 +1 b 1 0 1 b 1 0 +2 c 2 0 2 c 2 0 +3 d 3 0 3 d 3 0 +4 e 4 0 4 e 4 0 step s2-ddl-drop-column: ALTER TABLE select_append DROP new_column; step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -1845,27 +1845,27 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-ddl-rename-column: ALTER TABLE select_append RENAME data TO new_column; step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -1875,201 +1875,201 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-table-size s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-table-size: SELECT citus_total_relation_size('select_append'); citus_total_relation_size -32768 +32768 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-master-modify-multiple-shards s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s2-master-modify-multiple-shards: DELETE FROM select_append; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-task-tracker-select s2-distribute-table s1-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE select_append; step s1-create-non-distributed-table: CREATE TABLE select_append(id integer, data text, int_data int); step s1-begin: BEGIN; -step s1-task-tracker-select: +step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data step s2-distribute-table: SELECT create_distributed_table('select_append', 'id', 'append'); create_distributed_table - + step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-insert s2-task-tracker-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO select_append VALUES(0, 'k', 0); -step s2-task-tracker-select: +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -6 +6 starting permutation: s1-initialize s1-begin s1-insert-select s2-task-tracker-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO select_append SELECT * FROM select_append; -step s2-task-tracker-select: +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-update s2-task-tracker-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-update: UPDATE select_append SET data = 'l' WHERE id = 0; -step s2-task-tracker-select: +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-delete s2-task-tracker-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-delete: DELETE FROM select_append WHERE id = 1; -step s2-task-tracker-select: +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -4 +4 starting permutation: s1-initialize s1-begin s1-truncate s2-task-tracker-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-truncate: TRUNCATE select_append; -step s2-task-tracker-select: +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; step s1-commit: COMMIT; step s2-task-tracker-select: <... completed> -id data int_data id data int_data +id data int_data id data int_data step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-initialize s1-begin s1-drop s2-task-tracker-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-drop: DROP TABLE select_append; -step s2-task-tracker-select: +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; @@ -2082,26 +2082,26 @@ ERROR: relation "select_append" does not exist starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-task-tracker-select s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); -step s2-task-tracker-select: +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers @@ -2111,28 +2111,28 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-task-tracker-select s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s1-begin: BEGIN; step s1-ddl-drop-index: DROP INDEX select_append_index; -step s2-task-tracker-select: +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; step s1-commit: COMMIT; step s2-task-tracker-select: <... completed> -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers @@ -2142,27 +2142,27 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-task-tracker-select s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; -step s2-task-tracker-select: +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; step s1-commit: COMMIT; step s2-task-tracker-select: <... completed> -id data int_data new_column id data int_data new_column +id data int_data new_column id data int_data new_column -0 a 0 0 0 a 0 0 -1 b 1 0 1 b 1 0 -2 c 2 0 2 c 2 0 -3 d 3 0 3 d 3 0 -4 e 4 0 4 e 4 0 +0 a 0 0 0 a 0 0 +1 b 1 0 1 b 1 0 +2 c 2 0 2 c 2 0 +3 d 3 0 3 d 3 0 +4 e 4 0 4 e 4 0 step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -2172,28 +2172,28 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-task-tracker-select s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-ddl-drop-column: ALTER TABLE select_append DROP new_column; -step s2-task-tracker-select: +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; step s1-commit: COMMIT; step s2-task-tracker-select: <... completed> -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -2203,27 +2203,27 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-task-tracker-select s1-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE select_append RENAME data TO new_column; -step s2-task-tracker-select: +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; step s1-commit: COMMIT; step s2-task-tracker-select: <... completed> -id new_column int_data id new_column int_data +id new_column int_data id new_column int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -2233,73 +2233,73 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-table-size s2-task-tracker-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('select_append'); citus_total_relation_size -32768 -step s2-task-tracker-select: +32768 +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -5 +5 starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-task-tracker-select s1-commit s1-select-count create_distributed_table - + step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-modify-multiple-shards: DELETE FROM select_append; -step s2-task-tracker-select: +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data -0 a 0 0 a 0 -1 b 1 1 b 1 -2 c 2 2 c 2 -3 d 3 3 d 3 -4 e 4 4 e 4 +0 a 0 0 a 0 +1 b 1 1 b 1 +2 c 2 2 c 2 +3 d 3 3 d 3 +4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-distribute-table s2-task-tracker-select s1-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE select_append; step s1-create-non-distributed-table: CREATE TABLE select_append(id integer, data text, int_data int); step s1-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('select_append', 'id', 'append'); create_distributed_table - -step s2-task-tracker-select: + +step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -id data int_data id data int_data +id data int_data id data int_data step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; -count +count -0 +0 diff --git a/src/test/regress/expected/isolation_select_vs_all_on_mx.out b/src/test/regress/expected/isolation_select_vs_all_on_mx.out index 933e884f5..5c5ce570f 100644 --- a/src/test/regress/expected/isolation_select_vs_all_on_mx.out +++ b/src/test/regress/expected/isolation_select_vs_all_on_mx.out @@ -1,426 +1,426 @@ Parsed test spec with 3 sessions starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-start-session-level-connection s2-begin-on-worker s2-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select: + +step s1-select: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM select_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-select: + +step s2-select: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM select_table'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-start-session-level-connection s2-begin-on-worker s2-insert-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select: + +step s1-select: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM select_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-insert-select: + +step s2-insert-select: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO select_table SELECT * FROM select_table'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM select_table; -count +count -10 +10 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-start-session-level-connection s2-begin-on-worker s2-delete s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select: + +step s1-select: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM select_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-delete: + +step s2-delete: SELECT run_commands_on_session_level_connection_to_node('DELETE FROM select_table WHERE id = 1'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM select_table; -count +count -4 +4 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-start-session-level-connection s2-begin-on-worker s2-copy s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select: + +step s1-select: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM select_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-copy: + +step s2-copy: SELECT run_commands_on_session_level_connection_to_node('COPY select_table FROM PROGRAM ''echo 9, 90 && echo 10, 100''WITH CSV'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM select_table; -count +count -7 +7 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-begin s2-index s1-commit-worker s2-commit s1-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select: + +step s1-select: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM select_table'); run_commands_on_session_level_connection_to_node - -step s2-begin: + +step s2-begin: BEGIN; -step s2-index: +step s2-index: CREATE INDEX select_index ON select_table(id); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit: + +step s2-commit: COMMIT; -step s1-stop-connection: +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select: + +step s1-select: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM select_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-select-for-update: + +step s2-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM select_table WHERE id = 6 FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-coordinator-create-index-concurrently s1-commit-worker s1-stop-connection -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select: + +step s1-select: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM select_table'); run_commands_on_session_level_connection_to_node - -step s2-coordinator-create-index-concurrently: + +step s2-coordinator-create-index-concurrently: CREATE INDEX CONCURRENTLY select_table_index ON select_table(id); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_shouldhaveshards.out b/src/test/regress/expected/isolation_shouldhaveshards.out index 5168a5949..8de483c5e 100644 --- a/src/test/regress/expected/isolation_shouldhaveshards.out +++ b/src/test/regress/expected/isolation_shouldhaveshards.out @@ -1,22 +1,22 @@ Parsed test spec with 2 sessions starting permutation: s1-add-second-node s1-begin s2-begin s2-create-distributed-table s1-noshards s2-commit s1-commit s2-shardcounts -?column? +?column? -1 -step s1-add-second-node: +1 +step s1-add-second-node: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-create-distributed-table: +step s2-create-distributed-table: CREATE TABLE t1 (a int); -- session needs to have replication factor set to 1, can't do in setup SET citus.shard_replication_factor TO 1; @@ -24,145 +24,145 @@ step s2-create-distributed-table: create_distributed_table - -step s1-noshards: + +step s1-noshards: SELECT * from master_set_node_property('localhost', 57637, 'shouldhaveshards', false); -step s2-commit: +step s2-commit: COMMIT; step s1-noshards: <... completed> master_set_node_property - -step s1-commit: + +step s1-commit: COMMIT; -step s2-shardcounts: +step s2-shardcounts: SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 't1'::regclass GROUP BY nodeport ORDER BY nodeport; -nodeport count +nodeport count -57637 2 -57638 2 +57637 2 +57638 2 master_remove_node - - + + starting permutation: s1-add-second-node s1-begin s2-begin s1-noshards s2-create-distributed-table s1-commit s2-commit s2-shardcounts -?column? +?column? -1 -step s1-add-second-node: +1 +step s1-add-second-node: SELECT 1 FROM master_add_node('localhost', 57638); -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-noshards: +step s1-noshards: SELECT * from master_set_node_property('localhost', 57637, 'shouldhaveshards', false); master_set_node_property - -step s2-create-distributed-table: + +step s2-create-distributed-table: CREATE TABLE t1 (a int); -- session needs to have replication factor set to 1, can't do in setup SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('t1', 'a'); -step s1-commit: +step s1-commit: COMMIT; step s2-create-distributed-table: <... completed> create_distributed_table - -step s2-commit: + +step s2-commit: COMMIT; -step s2-shardcounts: +step s2-shardcounts: SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 't1'::regclass GROUP BY nodeport ORDER BY nodeport; -nodeport count +nodeport count -57638 4 +57638 4 master_remove_node - - + + starting permutation: s1-begin s2-begin s1-noshards s2-update-node s1-commit s2-commit -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s1-noshards: +step s1-noshards: SELECT * from master_set_node_property('localhost', 57637, 'shouldhaveshards', false); master_set_node_property - -step s2-update-node: + +step s2-update-node: select * from master_update_node((select nodeid from pg_dist_node where nodeport = 57637), 'localhost', 57638) -step s1-commit: +step s1-commit: COMMIT; step s2-update-node: <... completed> master_update_node - -step s2-commit: + +step s2-commit: COMMIT; master_remove_node - + starting permutation: s1-begin s2-begin s2-update-node s1-noshards s2-commit s1-commit -?column? +?column? -1 -step s1-begin: +1 +step s1-begin: BEGIN; -step s2-begin: +step s2-begin: BEGIN; -step s2-update-node: +step s2-update-node: select * from master_update_node((select nodeid from pg_dist_node where nodeport = 57637), 'localhost', 57638) master_update_node - -step s1-noshards: + +step s1-noshards: SELECT * from master_set_node_property('localhost', 57637, 'shouldhaveshards', false); -step s2-commit: +step s2-commit: COMMIT; step s1-noshards: <... completed> error in steps s2-commit s1-noshards: ERROR: node at "localhost:xxxxx" does not exist -step s1-commit: +step s1-commit: COMMIT; master_remove_node - + diff --git a/src/test/regress/expected/isolation_transaction_recovery.out b/src/test/regress/expected/isolation_transaction_recovery.out index 411a75feb..461bfabd3 100644 --- a/src/test/regress/expected/isolation_transaction_recovery.out +++ b/src/test/regress/expected/isolation_transaction_recovery.out @@ -3,43 +3,43 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s1-recover s2-insert s1-commit create_reference_table - -step s1-begin: + +step s1-begin: BEGIN; -step s1-recover: +step s1-recover: SELECT recover_prepared_transactions(); recover_prepared_transactions -0 -step s2-insert: +0 +step s2-insert: INSERT INTO test_transaction_recovery VALUES (1,2); -step s1-commit: +step s1-commit: COMMIT; starting permutation: s1-begin s1-recover s2-recover s1-commit create_reference_table - -step s1-begin: + +step s1-begin: BEGIN; -step s1-recover: +step s1-recover: SELECT recover_prepared_transactions(); recover_prepared_transactions -0 -step s2-recover: +0 +step s2-recover: SELECT recover_prepared_transactions(); -step s1-commit: +step s1-commit: COMMIT; step s2-recover: <... completed> recover_prepared_transactions -0 +0 diff --git a/src/test/regress/expected/isolation_truncate_vs_all.out b/src/test/regress/expected/isolation_truncate_vs_all.out index a555891d8..930c5ef25 100644 --- a/src/test/regress/expected/isolation_truncate_vs_all.out +++ b/src/test/regress/expected/isolation_truncate_vs_all.out @@ -3,7 +3,7 @@ Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-truncate s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -13,17 +13,17 @@ step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-truncate s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -33,17 +33,17 @@ step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-drop s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -56,12 +56,12 @@ step s1-select-count: SELECT COUNT(*) FROM truncate_append; ERROR: relation "truncate_append" does not exist restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-ddl-create-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -71,9 +71,9 @@ step s1-commit: COMMIT; step s2-ddl-create-index: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''truncate_append%'''); run_command_on_workers @@ -81,12 +81,12 @@ run_command_on_workers (localhost,57638,t,0) restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-truncate s2-ddl-drop-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX truncate_append_index ON truncate_append(id); step s1-begin: BEGIN; @@ -97,9 +97,9 @@ step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''truncate_append%'''); run_command_on_workers @@ -107,12 +107,12 @@ run_command_on_workers (localhost,57638,t,0) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s1-truncate s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-truncate: TRUNCATE truncate_append; @@ -120,9 +120,9 @@ step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY truncate_append step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''truncate_append%'''); run_command_on_workers @@ -130,12 +130,12 @@ run_command_on_workers (localhost,57638,t,0) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-ddl-add-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -145,9 +145,9 @@ step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''truncate_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -155,12 +155,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-truncate s2-ddl-drop-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE truncate_append ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -171,9 +171,9 @@ step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''truncate_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -181,12 +181,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-ddl-rename-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -196,9 +196,9 @@ step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''truncate_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -206,12 +206,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-table-size s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -221,20 +221,20 @@ step s1-commit: COMMIT; step s2-table-size: <... completed> citus_total_relation_size -0 +0 step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-master-modify-multiple-shards s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -244,17 +244,17 @@ step s1-commit: COMMIT; step s2-master-modify-multiple-shards: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-master-apply-delete-command s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -264,20 +264,20 @@ step s1-commit: COMMIT; step s2-master-apply-delete-command: <... completed> master_apply_delete_command -0 +0 step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-master-drop-all-shards s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -287,20 +287,20 @@ step s1-commit: COMMIT; step s2-master-drop-all-shards: <... completed> master_drop_all_shards -0 +0 step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-drop s1-create-non-distributed-table s1-begin s2-begin s1-truncate s2-distribute-table s1-commit s2-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE truncate_append; step s1-create-non-distributed-table: CREATE TABLE truncate_append(id integer, data text); step s1-begin: BEGIN; @@ -311,20 +311,20 @@ step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table - + step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-truncate s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -334,17 +334,17 @@ step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-truncate s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -358,12 +358,12 @@ step s1-select-count: SELECT COUNT(*) FROM truncate_append; ERROR: relation "truncate_append" does not exist restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-truncate s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -373,9 +373,9 @@ step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''truncate_append%'''); run_command_on_workers @@ -383,12 +383,12 @@ run_command_on_workers (localhost,57638,t,0) restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-ddl-drop-index s2-truncate s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX truncate_append_index ON truncate_append(id); step s1-begin: BEGIN; @@ -399,9 +399,9 @@ step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''truncate_append%'''); run_command_on_workers @@ -409,12 +409,12 @@ run_command_on_workers (localhost,57638,t,0) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-truncate s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -424,9 +424,9 @@ step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''truncate_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -434,12 +434,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-ddl-drop-column s2-truncate s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE truncate_append ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -450,9 +450,9 @@ step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''truncate_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -460,12 +460,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-truncate s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -475,9 +475,9 @@ step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''truncate_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -485,34 +485,34 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-truncate s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('truncate_append'); citus_total_relation_size -32768 +32768 step s2-truncate: TRUNCATE truncate_append; step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-truncate s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -522,63 +522,63 @@ step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-master-apply-delete-command s2-truncate s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-master-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM truncate_append WHERE id <= 4;'); master_apply_delete_command -1 +1 step s2-truncate: TRUNCATE truncate_append; step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-master-drop-all-shards s2-truncate s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-master-drop-all-shards: SELECT master_drop_all_shards('truncate_append'::regclass, 'public', 'truncate_append'); master_drop_all_shards -1 +1 step s2-truncate: TRUNCATE truncate_append; step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-drop s1-create-non-distributed-table s1-begin s2-begin s1-distribute-table s2-truncate s1-commit s2-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE truncate_append; step s1-create-non-distributed-table: CREATE TABLE truncate_append(id integer, data text); step s1-begin: BEGIN; @@ -586,15 +586,15 @@ step s2-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('truncate_append', 'id', 'append'); create_distributed_table - + step s2-truncate: TRUNCATE truncate_append; step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; -count +count -0 +0 restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_truncate_vs_all_on_mx.out b/src/test/regress/expected/isolation_truncate_vs_all_on_mx.out index 0f59eaae6..9432fcb58 100644 --- a/src/test/regress/expected/isolation_truncate_vs_all_on_mx.out +++ b/src/test/regress/expected/isolation_truncate_vs_all_on_mx.out @@ -1,484 +1,484 @@ Parsed test spec with 3 sessions starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-truncate s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-truncate: + +step s1-truncate: SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-truncate: + +step s2-truncate: SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-truncate: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM truncate_table; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select: + +step s1-select: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM truncate_table WHERE id = 6'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-truncate: + +step s2-truncate: SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-truncate: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM truncate_table; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-insert-select: + +step s1-insert-select: SELECT run_commands_on_session_level_connection_to_node('INSERT INTO truncate_table SELECT * FROM truncate_table'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-truncate: + +step s2-truncate: SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-truncate: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM truncate_table; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-delete: + +step s1-delete: SELECT run_commands_on_session_level_connection_to_node('DELETE FROM truncate_table WHERE id IN (5, 6, 7)'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-truncate: + +step s2-truncate: SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-truncate: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM truncate_table; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-copy: + +step s1-copy: SELECT run_commands_on_session_level_connection_to_node('COPY truncate_table FROM PROGRAM ''echo 5, 50 && echo 9, 90 && echo 10, 100''WITH CSV'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-truncate: + +step s2-truncate: SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-truncate: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM truncate_table; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-begin s1-alter s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit s2-commit-worker s2-stop-connection s3-select-count -step s1-begin: +step s1-begin: BEGIN; -step s1-alter: +step s1-alter: ALTER TABLE truncate_table DROP value; -step s2-start-session-level-connection: +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-truncate: + +step s2-truncate: SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table'); -step s1-commit: +step s1-commit: COMMIT; step s2-truncate: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM truncate_table; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count -step s1-start-session-level-connection: +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-select-for-update: + +step s1-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM truncate_table WHERE id=5 FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-truncate: + +step s2-truncate: SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table'); -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-truncate: <... completed> run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM truncate_table; -count +count -0 +0 restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_update_delete_upsert_vs_all_on_mx.out b/src/test/regress/expected/isolation_update_delete_upsert_vs_all_on_mx.out index 8e57fe7a6..e52a21b8d 100644 --- a/src/test/regress/expected/isolation_update_delete_upsert_vs_all_on_mx.out +++ b/src/test/regress/expected/isolation_update_delete_upsert_vs_all_on_mx.out @@ -3,287 +3,287 @@ Parsed test spec with 3 sessions starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update s2-start-session-level-connection s2-begin-on-worker s2-delete s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count create_distributed_table - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-update: + +step s1-update: SELECT run_commands_on_session_level_connection_to_node('UPDATE dist_table SET value=15 WHERE id=5'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-delete: + +step s2-delete: SELECT run_commands_on_session_level_connection_to_node('DELETE FROM dist_table WHERE id=5'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM dist_table; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-copy s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count create_distributed_table - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-delete: + +step s1-delete: SELECT run_commands_on_session_level_connection_to_node('DELETE FROM dist_table WHERE id=5'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-copy: + +step s2-copy: SELECT run_commands_on_session_level_connection_to_node('COPY dist_table FROM PROGRAM ''echo 5, 50 && echo 9, 90 && echo 10, 100''WITH CSV'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM dist_table; -count +count -3 +3 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update s2-start-session-level-connection s2-begin-on-worker s2-alter-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count create_distributed_table - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-update: + +step s1-update: SELECT run_commands_on_session_level_connection_to_node('UPDATE dist_table SET value=15 WHERE id=5'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-alter-table: + +step s2-alter-table: ALTER TABLE dist_table DROP value; -step s1-commit-worker: +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - + step s2-alter-table: <... completed> -step s2-commit-worker: +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s3-select-count: + +step s3-select-count: SELECT COUNT(*) FROM dist_table; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection create_distributed_table - -step s1-start-session-level-connection: + +step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node - -step s1-begin-on-worker: + +step s1-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s1-update: + +step s1-update: SELECT run_commands_on_session_level_connection_to_node('UPDATE dist_table SET value=15 WHERE id=5'); run_commands_on_session_level_connection_to_node - -step s2-start-session-level-connection: + +step s2-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node - -step s2-begin-on-worker: + +step s2-begin-on-worker: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node - -step s2-select-for-update: + +step s2-select-for-update: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM dist_table WHERE id=5 FOR UPDATE'); run_commands_on_session_level_connection_to_node - -step s1-commit-worker: + +step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s2-commit-worker: + +step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); run_commands_on_session_level_connection_to_node - -step s1-stop-connection: + +step s1-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - -step s2-stop-connection: + +step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node - + restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_update_node.out b/src/test/regress/expected/isolation_update_node.out index 4cb171384..7b22761dc 100644 --- a/src/test/regress/expected/isolation_update_node.out +++ b/src/test/regress/expected/isolation_update_node.out @@ -1,135 +1,135 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s1-update-node-1 s2-update-node-2 s1-commit s1-show-nodes -nodeid nodename nodeport +nodeid nodename nodeport -22 localhost 57637 -23 localhost 57638 -step s1-begin: +22 localhost 57637 +23 localhost 57638 +step s1-begin: BEGIN; -step s1-update-node-1: +step s1-update-node-1: SELECT 1 FROM master_update_node( (select nodeid from pg_dist_node where nodeport = 57637), 'localhost', 58637); -?column? +?column? -1 -step s2-update-node-2: +1 +step s2-update-node-2: SELECT 1 FROM master_update_node( (select nodeid from pg_dist_node where nodeport = 57638), 'localhost', 58638); -step s1-commit: +step s1-commit: COMMIT; step s2-update-node-2: <... completed> -?column? +?column? -1 -step s1-show-nodes: +1 +step s1-show-nodes: SELECT nodeid, nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodeid nodename nodeport isactive +nodeid nodename nodeport isactive -22 localhost 58637 t -23 localhost 58638 t -nodeid nodename nodeport +22 localhost 58637 t +23 localhost 58638 t +nodeid nodename nodeport starting permutation: s1-begin s1-update-node-1 s2-begin s2-update-node-1 s1-commit s2-abort s1-show-nodes -nodeid nodename nodeport +nodeid nodename nodeport -24 localhost 57637 -25 localhost 57638 -step s1-begin: +24 localhost 57637 +25 localhost 57638 +step s1-begin: BEGIN; -step s1-update-node-1: +step s1-update-node-1: SELECT 1 FROM master_update_node( (select nodeid from pg_dist_node where nodeport = 57637), 'localhost', 58637); -?column? +?column? -1 -step s2-begin: +1 +step s2-begin: BEGIN; -step s2-update-node-1: +step s2-update-node-1: SELECT 1 FROM master_update_node( (select nodeid from pg_dist_node where nodeport = 57637), 'localhost', 58637); -step s1-commit: +step s1-commit: COMMIT; step s2-update-node-1: <... completed> -?column? +?column? -1 -step s2-abort: +1 +step s2-abort: ABORT; -step s1-show-nodes: +step s1-show-nodes: SELECT nodeid, nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; -nodeid nodename nodeport isactive +nodeid nodename nodeport isactive -25 localhost 57638 t -24 localhost 58637 t -nodeid nodename nodeport +25 localhost 57638 t +24 localhost 58637 t +nodeid nodename nodeport starting permutation: s1-begin s1-update-node-1 s2-start-metadata-sync-node-2 s1-commit s2-verify-metadata -nodeid nodename nodeport +nodeid nodename nodeport -26 localhost 57637 -27 localhost 57638 -step s1-begin: +26 localhost 57637 +27 localhost 57638 +step s1-begin: BEGIN; -step s1-update-node-1: +step s1-update-node-1: SELECT 1 FROM master_update_node( (select nodeid from pg_dist_node where nodeport = 57637), 'localhost', 58637); -?column? +?column? -1 -step s2-start-metadata-sync-node-2: +1 +step s2-start-metadata-sync-node-2: SELECT start_metadata_sync_to_node('localhost', 57638); -step s1-commit: +step s1-commit: COMMIT; step s2-start-metadata-sync-node-2: <... completed> start_metadata_sync_to_node - -step s2-verify-metadata: + +step s2-verify-metadata: SELECT nodeid, groupid, nodename, nodeport FROM pg_dist_node ORDER BY nodeid; SELECT master_run_on_worker( ARRAY['localhost'], ARRAY[57638], ARRAY['SELECT jsonb_agg(ROW(nodeid, groupid, nodename, nodeport) ORDER BY nodeid) FROM pg_dist_node'], false); -nodeid groupid nodename nodeport +nodeid groupid nodename nodeport -26 26 localhost 58637 -27 27 localhost 57638 +26 26 localhost 58637 +27 27 localhost 57638 master_run_on_worker (localhost,57638,t,"[{""f1"": 26, ""f2"": 26, ""f3"": ""localhost"", ""f4"": 58637}, {""f1"": 27, ""f2"": 27, ""f3"": ""localhost"", ""f4"": 57638}]") -nodeid nodename nodeport +nodeid nodename nodeport diff --git a/src/test/regress/expected/isolation_update_node_lock_writes.out b/src/test/regress/expected/isolation_update_node_lock_writes.out index b9286cfad..dcaa5b991 100644 --- a/src/test/regress/expected/isolation_update_node_lock_writes.out +++ b/src/test/regress/expected/isolation_update_node_lock_writes.out @@ -3,62 +3,62 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s1-update-node-1 s2-begin s2-insert s1-commit s2-abort create_distributed_table - -step s1-begin: + +step s1-begin: BEGIN; -step s1-update-node-1: +step s1-update-node-1: SELECT 1 FROM master_update_node( (select nodeid from pg_dist_node where nodeport = 57637), 'localhost', 57638); -?column? +?column? -1 -step s2-begin: +1 +step s2-begin: BEGIN; -step s2-insert: +step s2-insert: INSERT INTO update_node(id, f1) SELECT id, md5(id::text) FROM generate_series(1, 10) as t(id); -step s1-commit: +step s1-commit: COMMIT; step s2-insert: <... completed> error in steps s1-commit s2-insert: ERROR: relation "public.update_node_102008" does not exist -step s2-abort: +step s2-abort: ABORT; -nodeid nodename nodeport +nodeid nodename nodeport starting permutation: s2-begin s2-insert s1-update-node-1 s2-commit create_distributed_table - -step s2-begin: + +step s2-begin: BEGIN; -step s2-insert: +step s2-insert: INSERT INTO update_node(id, f1) SELECT id, md5(id::text) FROM generate_series(1, 10) as t(id); -step s1-update-node-1: +step s1-update-node-1: SELECT 1 FROM master_update_node( (select nodeid from pg_dist_node where nodeport = 57637), 'localhost', 57638); -step s2-commit: +step s2-commit: COMMIT; step s1-update-node-1: <... completed> -?column? +?column? -1 -nodeid nodename nodeport +1 +nodeid nodename nodeport diff --git a/src/test/regress/expected/isolation_update_vs_all.out b/src/test/regress/expected/isolation_update_vs_all.out index 18490362a..42090fcf2 100644 --- a/src/test/regress/expected/isolation_update_vs_all.out +++ b/src/test/regress/expected/isolation_update_vs_all.out @@ -3,7 +3,7 @@ Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s2-begin s1-update s2-update s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -13,17 +13,17 @@ step s1-commit: COMMIT; step s2-update: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -5 +5 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-update s2-delete s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -33,17 +33,17 @@ step s1-commit: COMMIT; step s2-delete: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -4 +4 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-update s2-truncate s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -53,17 +53,17 @@ step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-update s2-drop s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -76,12 +76,12 @@ step s1-select-count: SELECT COUNT(*) FROM update_hash; ERROR: relation "update_hash" does not exist restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-update s2-ddl-create-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -91,9 +91,9 @@ step s1-commit: COMMIT; step s2-ddl-create-index: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''update_hash%'''); run_command_on_workers @@ -101,12 +101,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-update s2-ddl-drop-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX update_hash_index ON update_hash(id); step s1-begin: BEGIN; @@ -117,9 +117,9 @@ step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''update_hash%'''); run_command_on_workers @@ -127,12 +127,12 @@ run_command_on_workers (localhost,57638,t,0) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s1-update s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-update: UPDATE update_hash SET data = 'l' WHERE id = 4; @@ -140,9 +140,9 @@ step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY update_hash_ind step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''update_hash%'''); run_command_on_workers @@ -150,12 +150,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-update s2-ddl-add-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -165,9 +165,9 @@ step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''update_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -175,12 +175,12 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-update s2-ddl-drop-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE update_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -191,9 +191,9 @@ step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''update_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -201,12 +201,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-update s2-ddl-rename-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -216,9 +216,9 @@ step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''update_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -226,12 +226,12 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-update s2-table-size s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -239,21 +239,21 @@ step s1-update: UPDATE update_hash SET data = 'l' WHERE id = 4; step s2-table-size: SELECT citus_total_relation_size('update_hash'); citus_total_relation_size -57344 +57344 step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -5 +5 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-update s2-master-modify-multiple-shards s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -263,17 +263,17 @@ step s1-commit: COMMIT; step s2-master-modify-multiple-shards: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-update s2-distribute-table s1-commit s2-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE update_hash; step s1-create-non-distributed-table: CREATE TABLE update_hash(id integer, data text); COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; @@ -285,20 +285,20 @@ step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table - + step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -10 +10 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-update s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -308,17 +308,17 @@ step s1-commit: COMMIT; step s2-update: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -4 +4 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-update s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -328,17 +328,17 @@ step s1-commit: COMMIT; step s2-update: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-update s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -352,12 +352,12 @@ step s1-select-count: SELECT COUNT(*) FROM update_hash; ERROR: relation "update_hash" does not exist restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-update s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -367,9 +367,9 @@ step s1-commit: COMMIT; step s2-update: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''update_hash%'''); run_command_on_workers @@ -377,12 +377,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-ddl-drop-index s2-update s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX update_hash_index ON update_hash(id); step s1-begin: BEGIN; @@ -393,9 +393,9 @@ step s1-commit: COMMIT; step s2-update: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''update_hash%'''); run_command_on_workers @@ -403,12 +403,12 @@ run_command_on_workers (localhost,57638,t,0) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-update s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -418,9 +418,9 @@ step s1-commit: COMMIT; step s2-update: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''update_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -428,12 +428,12 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-ddl-drop-column s2-update s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE update_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -444,9 +444,9 @@ step s1-commit: COMMIT; step s2-update: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''update_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -454,12 +454,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-update s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -470,9 +470,9 @@ step s2-update: <... completed> error in steps s1-commit s2-update: ERROR: column "data" of relation "update_hash" does not exist step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''update_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -480,34 +480,34 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-update s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('update_hash'); citus_total_relation_size -57344 +57344 step s2-update: UPDATE update_hash SET data = 'l' WHERE id = 4; step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -5 +5 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-update s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -517,17 +517,17 @@ step s1-commit: COMMIT; step s2-update: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-update s1-commit s2-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE update_hash; step s1-create-non-distributed-table: CREATE TABLE update_hash(id integer, data text); COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; @@ -536,15 +536,15 @@ step s2-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('update_hash', 'id'); create_distributed_table - + step s2-update: UPDATE update_hash SET data = 'l' WHERE id = 4; step s1-commit: COMMIT; step s2-update: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; -count +count -10 +10 restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_upsert_vs_all.out b/src/test/regress/expected/isolation_upsert_vs_all.out index 54089a571..9438d39da 100644 --- a/src/test/regress/expected/isolation_upsert_vs_all.out +++ b/src/test/regress/expected/isolation_upsert_vs_all.out @@ -3,7 +3,7 @@ Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-upsert s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -13,17 +13,17 @@ step s1-commit: COMMIT; step s2-upsert: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -5 +5 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-update s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -33,17 +33,17 @@ step s1-commit: COMMIT; step s2-update: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -5 +5 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-delete s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -53,17 +53,17 @@ step s1-commit: COMMIT; step s2-delete: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -4 +4 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-truncate s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -73,17 +73,17 @@ step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-drop s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -96,12 +96,12 @@ step s1-select-count: SELECT COUNT(*) FROM upsert_hash; ERROR: relation "upsert_hash" does not exist restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-ddl-create-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -111,9 +111,9 @@ step s1-commit: COMMIT; step s2-ddl-create-index: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''upsert_hash%'''); run_command_on_workers @@ -121,12 +121,12 @@ run_command_on_workers (localhost,57638,t,4) restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-upsert s2-ddl-drop-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX upsert_hash_index ON upsert_hash(id); step s1-begin: BEGIN; @@ -137,9 +137,9 @@ step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''upsert_hash%'''); run_command_on_workers @@ -147,12 +147,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s1-upsert s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s1-upsert: INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE SET data = 'k'; @@ -160,9 +160,9 @@ step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY upsert_hash_ind step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''upsert_hash%'''); run_command_on_workers @@ -170,12 +170,12 @@ run_command_on_workers (localhost,57638,t,4) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-ddl-add-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -185,9 +185,9 @@ step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''upsert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -195,12 +195,12 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-upsert s2-ddl-drop-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE upsert_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -211,9 +211,9 @@ step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''upsert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -221,12 +221,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-ddl-rename-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -236,9 +236,9 @@ step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''upsert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -246,12 +246,12 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-table-size s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -259,21 +259,21 @@ step s1-upsert: INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE step s2-table-size: SELECT citus_total_relation_size('upsert_hash'); citus_total_relation_size -114688 +114688 step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -5 +5 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-master-modify-multiple-shards s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -283,17 +283,17 @@ step s1-commit: COMMIT; step s2-master-modify-multiple-shards: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -0 +0 restore_isolation_tester_func - + starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-upsert s2-distribute-table s1-commit s2-commit s1-select-count create_distributed_table - + step s1-drop: DROP TABLE upsert_hash; step s1-create-non-distributed-table: CREATE TABLE upsert_hash(id integer PRIMARY KEY, data text); step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; @@ -305,20 +305,20 @@ step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table - + step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -5 +5 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-update s2-upsert s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -328,17 +328,17 @@ step s1-commit: COMMIT; step s2-upsert: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -5 +5 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-upsert s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -348,17 +348,17 @@ step s1-commit: COMMIT; step s2-upsert: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -5 +5 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-upsert s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -368,17 +368,17 @@ step s1-commit: COMMIT; step s2-upsert: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -1 +1 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-upsert s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -392,12 +392,12 @@ step s1-select-count: SELECT COUNT(*) FROM upsert_hash; ERROR: relation "upsert_hash" does not exist restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-upsert s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -407,9 +407,9 @@ step s1-commit: COMMIT; step s2-upsert: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''upsert_hash%'''); run_command_on_workers @@ -417,12 +417,12 @@ run_command_on_workers (localhost,57638,t,4) restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-ddl-drop-index s2-upsert s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX upsert_hash_index ON upsert_hash(id); step s1-begin: BEGIN; @@ -433,9 +433,9 @@ step s1-commit: COMMIT; step s2-upsert: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -5 +5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''upsert_hash%'''); run_command_on_workers @@ -443,12 +443,12 @@ run_command_on_workers (localhost,57638,t,2) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-upsert s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -458,9 +458,9 @@ step s1-commit: COMMIT; step s2-upsert: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''upsert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -468,12 +468,12 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-ddl-drop-column s2-upsert s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE upsert_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -484,9 +484,9 @@ step s1-commit: COMMIT; step s2-upsert: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''upsert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -494,12 +494,12 @@ run_command_on_workers (localhost,57638,t,"") restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-upsert s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -510,9 +510,9 @@ step s2-upsert: <... completed> error in steps s1-commit s2-upsert: ERROR: column "data" of relation "upsert_hash" does not exist step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -5 +5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''upsert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers @@ -520,34 +520,34 @@ run_command_on_workers (localhost,57638,t,new_column) restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-upsert s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('upsert_hash'); citus_total_relation_size -114688 +114688 step s2-upsert: INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE SET data = 'k'; step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -5 +5 restore_isolation_tester_func - + starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-upsert s1-commit s2-commit s1-select-count create_distributed_table - + step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -557,9 +557,9 @@ step s1-commit: COMMIT; step s2-upsert: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; -count +count -1 +1 restore_isolation_tester_func - + diff --git a/src/test/regress/expected/isolation_validate_vs_insert.out b/src/test/regress/expected/isolation_validate_vs_insert.out index 212edbedf..7af0e764f 100644 --- a/src/test/regress/expected/isolation_validate_vs_insert.out +++ b/src/test/regress/expected/isolation_validate_vs_insert.out @@ -3,7 +3,7 @@ Parsed test spec with 2 sessions starting permutation: s1-initialize s1-add-constraint s1-begin s2-begin s1-validate s2-insert s1-commit s2-commit create_distributed_table - + step s1-initialize: INSERT INTO constrained_table VALUES (0, 0), (1, 1), (2, 2), (3, 4); step s1-add-constraint: ALTER TABLE constrained_table ADD CONSTRAINT check_constraint CHECK(int_data<30) NOT VALID; step s1-begin: BEGIN; @@ -16,23 +16,23 @@ step s2-commit: COMMIT; starting permutation: s1-initialize s1-add-constraint s1-begin s2-begin s1-validate s2-select s1-commit s2-commit create_distributed_table - + step s1-initialize: INSERT INTO constrained_table VALUES (0, 0), (1, 1), (2, 2), (3, 4); step s1-add-constraint: ALTER TABLE constrained_table ADD CONSTRAINT check_constraint CHECK(int_data<30) NOT VALID; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-validate: ALTER TABLE constrained_table VALIDATE CONSTRAINT check_constraint; step s2-select: SELECT sum(int_data) FROM constrained_table; -sum +sum -7 +7 step s1-commit: COMMIT; step s2-commit: COMMIT; starting permutation: s1-initialize s1-add-constraint s1-begin s2-begin s2-insert s1-validate s1-commit s2-commit create_distributed_table - + step s1-initialize: INSERT INTO constrained_table VALUES (0, 0), (1, 1), (2, 2), (3, 4); step s1-add-constraint: ALTER TABLE constrained_table ADD CONSTRAINT check_constraint CHECK(int_data<30) NOT VALID; step s1-begin: BEGIN; @@ -45,15 +45,15 @@ step s2-commit: COMMIT; starting permutation: s1-initialize s1-add-constraint s1-begin s2-begin s2-select s1-validate s1-commit s2-commit create_distributed_table - + step s1-initialize: INSERT INTO constrained_table VALUES (0, 0), (1, 1), (2, 2), (3, 4); step s1-add-constraint: ALTER TABLE constrained_table ADD CONSTRAINT check_constraint CHECK(int_data<30) NOT VALID; step s1-begin: BEGIN; step s2-begin: BEGIN; step s2-select: SELECT sum(int_data) FROM constrained_table; -sum +sum -7 +7 step s1-validate: ALTER TABLE constrained_table VALIDATE CONSTRAINT check_constraint; step s1-commit: COMMIT; step s2-commit: COMMIT; diff --git a/src/test/regress/expected/limit_intermediate_size.out b/src/test/regress/expected/limit_intermediate_size.out index beaebf5cb..de7c758bf 100644 --- a/src/test/regress/expected/limit_intermediate_size.out +++ b/src/test/regress/expected/limit_intermediate_size.out @@ -2,39 +2,39 @@ SET citus.enable_repartition_joins to ON; SET citus.task_executor_type to 'task-tracker'; SET citus.max_intermediate_result_size TO 2; -- should fail because the copy size is ~4kB for each cte -WITH cte AS +WITH cte AS ( SELECT * FROM users_table ), cte2 AS ( SELECT * FROM events_table -) +) SELECT cte.user_id, cte.value_2 FROM cte,cte2 ORDER BY 1,2 LIMIT 10; ERROR: the intermediate result size exceeds citus.max_intermediate_result_size (currently 2 kB) DETAIL: Citus restricts the size of intermediate results of complex subqueries and CTEs to avoid accidentally pulling large result sets into once place. HINT: To run the current query, set citus.max_intermediate_result_size to a higher value or -1 to disable. SET citus.max_intermediate_result_size TO 9; -- regular task-tracker CTE should fail -WITH cte AS +WITH cte AS ( - SELECT + SELECT users_table.user_id, users_table.value_1, users_table.value_2 - FROM + FROM users_table - join + join events_table - on + on (users_table.value_3=events_table.value_3) ), cte2 AS ( SELECT * FROM events_table -) -SELECT - cte.user_id, cte2.value_2 -FROM +) +SELECT + cte.user_id, cte2.value_2 +FROM cte JOIN cte2 ON (cte.value_1 = cte2.event_type) -ORDER BY - 1,2 +ORDER BY + 1,2 LIMIT 10; ERROR: the intermediate result size exceeds citus.max_intermediate_result_size (currently 9 kB) DETAIL: Citus restricts the size of intermediate results of complex subqueries and CTEs to avoid accidentally pulling large result sets into once place. @@ -58,7 +58,7 @@ UNION UNION (select count(*) as c from cte5) ) as foo; - sum + sum --------------------------------------------------------------------- 91 (1 row) @@ -117,7 +117,7 @@ WITH cte AS ( cte3 AS ( SELECT * FROM events_table WHERE event_type = 1 ) - SELECT * FROM cte2, cte3 WHERE cte2.value_1 IN (SELECT value_2 FROM cte3) + SELECT * FROM cte2, cte3 WHERE cte2.value_1 IN (SELECT value_2 FROM cte3) ) SELECT * FROM cte; ERROR: the intermediate result size exceeds citus.max_intermediate_result_size (currently 3 kB) @@ -178,38 +178,38 @@ cte4 AS ( ) SELECT * FROM cte UNION ALL SELECT * FROM cte4 ORDER BY 1,2,3,4,5 LIMIT 5; - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 1 | Wed Nov 22 18:49:42.327403 2017 | 3 | 2 | 1 | - 1 | Wed Nov 22 19:03:01.772353 2017 | 4 | 1 | 2 | - 1 | Wed Nov 22 19:07:03.846437 2017 | 1 | 2 | 5 | - 1 | Wed Nov 22 20:56:21.122638 2017 | 2 | 4 | 4 | - 1 | Wed Nov 22 21:06:57.457147 2017 | 4 | 3 | 2 | + 1 | Wed Nov 22 18:49:42.327403 2017 | 3 | 2 | 1 | + 1 | Wed Nov 22 19:03:01.772353 2017 | 4 | 1 | 2 | + 1 | Wed Nov 22 19:07:03.846437 2017 | 1 | 2 | 5 | + 1 | Wed Nov 22 20:56:21.122638 2017 | 2 | 4 | 4 | + 1 | Wed Nov 22 21:06:57.457147 2017 | 4 | 3 | 2 | (5 rows) -- regular task-tracker CTE, should work since -1 disables the limit -WITH cte AS +WITH cte AS ( - SELECT + SELECT users_table.user_id, users_table.value_1, users_table.value_2 - FROM + FROM users_table - join + join events_table - on + on (users_table.value_2=events_table.value_2) ), cte2 AS ( SELECT * FROM events_table -) -SELECT - cte.user_id, cte2.value_2 -FROM +) +SELECT + cte.user_id, cte2.value_2 +FROM cte JOIN cte2 ON (cte.value_1 = cte2.event_type) -ORDER BY - 1,2 +ORDER BY + 1,2 LIMIT 10; - user_id | value_2 + user_id | value_2 --------------------------------------------------------------------- 1 | 0 1 | 0 @@ -224,15 +224,15 @@ LIMIT 10; (10 rows) -- regular real-time CTE fetches around ~4kb data in each subplan -WITH cte AS +WITH cte AS ( SELECT * FROM users_table ), cte2 AS ( SELECT * FROM events_table -) +) SELECT cte.user_id, cte.value_2 FROM cte,cte2 ORDER BY 1,2 LIMIT 10; - user_id | value_2 + user_id | value_2 --------------------------------------------------------------------- 1 | 0 1 | 0 @@ -247,23 +247,23 @@ SELECT cte.user_id, cte.value_2 FROM cte,cte2 ORDER BY 1,2 LIMIT 10; (10 rows) -- regular real-time query fetches ~4kB -WITH cte AS +WITH cte AS ( SELECT * FROM users_table WHERE user_id IN (1,2,3,4,5) ) SELECT * FROM cte ORDER BY 1,2,3,4,5 LIMIT 10; - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | - 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | - 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | - 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | - 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | - 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | - 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | + 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | + 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | + 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | + 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | + 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | + 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | + 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | (10 rows) -- nested CTEs @@ -274,15 +274,15 @@ WITH cte AS ( cte3 AS ( SELECT * FROM events_table ) - SELECT + SELECT cte2.user_id, cte2.time, cte3.event_type, cte3.value_2, cte3.value_3 - FROM - cte2, cte3 - WHERE + FROM + cte2, cte3 + WHERE cte2.user_id = cte3.user_id AND cte2.user_id = 1 ) SELECT * FROM cte ORDER BY 1,2,3,4,5 LIMIT 10; - user_id | time | event_type | value_2 | value_3 + user_id | time | event_type | value_2 | value_3 --------------------------------------------------------------------- 1 | Wed Nov 22 22:51:43.132261 2017 | 0 | 2 | 0 1 | Wed Nov 22 22:51:43.132261 2017 | 0 | 5 | 1 diff --git a/src/test/regress/expected/local_shard_execution.out b/src/test/regress/expected/local_shard_execution.out index 73638b8d0..cb48cf994 100644 --- a/src/test/regress/expected/local_shard_execution.out +++ b/src/test/regress/expected/local_shard_execution.out @@ -6,23 +6,23 @@ SET citus.replication_model TO 'streaming'; SET citus.next_shard_id TO 1470000; CREATE TABLE reference_table (key int PRIMARY KEY); SELECT create_reference_table('reference_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE distributed_table (key int PRIMARY KEY , value text, age bigint CHECK (age > 10), FOREIGN KEY (key) REFERENCES reference_table(key) ON DELETE CASCADE); SELECT create_distributed_table('distributed_table','key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE second_distributed_table (key int PRIMARY KEY , value text, FOREIGN KEY (key) REFERENCES distributed_table(key) ON DELETE CASCADE); SELECT create_distributed_table('second_distributed_table','key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- ingest some data to enable some tests with data @@ -39,9 +39,9 @@ CREATE TABLE collections_list ( PRIMARY KEY(key, collection_id) ) PARTITION BY LIST (collection_id ); SELECT create_distributed_table('collections_list', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE collections_list_0 @@ -79,38 +79,38 @@ $$ LANGUAGE plpgsql; -- distribution key values of 1,6, 500 and 701 are LOCAL to shards, -- we'll use these values in the tests SELECT shard_of_distribution_column_is_local(1); - shard_of_distribution_column_is_local + shard_of_distribution_column_is_local --------------------------------------------------------------------- t (1 row) SELECT shard_of_distribution_column_is_local(6); - shard_of_distribution_column_is_local + shard_of_distribution_column_is_local --------------------------------------------------------------------- t (1 row) SELECT shard_of_distribution_column_is_local(500); - shard_of_distribution_column_is_local + shard_of_distribution_column_is_local --------------------------------------------------------------------- t (1 row) SELECT shard_of_distribution_column_is_local(701); - shard_of_distribution_column_is_local + shard_of_distribution_column_is_local --------------------------------------------------------------------- t (1 row) -- distribution key values of 11 and 12 are REMOTE to shards SELECT shard_of_distribution_column_is_local(11); - shard_of_distribution_column_is_local + shard_of_distribution_column_is_local --------------------------------------------------------------------- f (1 row) SELECT shard_of_distribution_column_is_local(12); - shard_of_distribution_column_is_local + shard_of_distribution_column_is_local --------------------------------------------------------------------- f (1 row) @@ -122,7 +122,7 @@ SET citus.log_local_commands TO ON; -- with simple queries that are not in transcation blocks SELECT count(*) FROM distributed_table WHERE key = 1; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) - count + count --------------------------------------------------------------------- 1 (1 row) @@ -131,20 +131,20 @@ LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_e -- because local execution means executing the tasks locally, so the executor -- favors parallel execution even if everyting is local to node SELECT count(*) FROM distributed_table WHERE key IN (1,6); - count + count --------------------------------------------------------------------- 1 (1 row) -- queries that hit any remote shards should NOT use local execution SELECT count(*) FROM distributed_table WHERE key IN (1,11); - count + count --------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM distributed_table; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -179,7 +179,7 @@ WHERE ON CONFLICT(key) DO UPDATE SET value = '22' RETURNING *; LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) SELECT distributed_table.key, distributed_table.value, distributed_table.age FROM local_shard_execution.distributed_table_1470001 distributed_table, local_shard_execution.second_distributed_table_1470005 second_distributed_table WHERE (((distributed_table.key OPERATOR(pg_catalog.=) 1) AND (distributed_table.key OPERATOR(pg_catalog.=) second_distributed_table.key)) AND ((worker_hash(distributed_table.key) OPERATOR(pg_catalog.>=) '-2147483648'::integer) AND (worker_hash(distributed_table.key) OPERATOR(pg_catalog.<=) '-1073741825'::integer))) ON CONFLICT(key) DO UPDATE SET value = '22'::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age - key | value | age + key | value | age --------------------------------------------------------------------- 1 | 22 | 20 (1 row) @@ -194,7 +194,7 @@ WHERE distributed_table.key != 1 and distributed_table.key=second_distributed_table.key ON CONFLICT(key) DO UPDATE SET value = '22' RETURNING *; - key | value | age + key | value | age --------------------------------------------------------------------- (0 rows) @@ -208,7 +208,7 @@ INSERT INTO distributed_table SELECT * FROM distributed_table ON CONFLICT DO NOT -- EXPLAIN for local execution just works fine -- though going through distributed execution EXPLAIN (COSTS OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 1 @@ -221,7 +221,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20; (8 rows) EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Adaptive) (actual rows=1 loops=1) Task Count: 1 @@ -234,7 +234,7 @@ EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) SELECT * FROM distribute (8 rows) EXPLAIN (COSTS OFF) DELETE FROM distributed_table WHERE key = 1 AND age = 20; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 1 @@ -248,7 +248,7 @@ EXPLAIN (COSTS OFF) DELETE FROM distributed_table WHERE key = 1 AND age = 20; (9 rows) EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) DELETE FROM distributed_table WHERE key = 1 AND age = 20; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Adaptive) (actual rows=0 loops=1) Task Count: 1 @@ -264,13 +264,13 @@ EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) DELETE FROM distributed_ta -- show that EXPLAIN ANALYZE deleted the row and cascades deletes SELECT * FROM distributed_table WHERE key = 1 AND age = 20 ORDER BY 1,2,3; LOG: executing the command locally: SELECT key, value, age FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((key OPERATOR(pg_catalog.=) 1) AND (age OPERATOR(pg_catalog.=) 20)) ORDER BY key, value, age - key | value | age + key | value | age --------------------------------------------------------------------- (0 rows) SELECT * FROM second_distributed_table WHERE key = 1 ORDER BY 1,2; LOG: executing the command locally: SELECT key, value FROM local_shard_execution.second_distributed_table_1470005 second_distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) ORDER BY key, value - key | value + key | value --------------------------------------------------------------------- (0 rows) @@ -294,14 +294,14 @@ COPY second_distributed_table FROM STDIN WITH CSV; BEGIN; INSERT INTO distributed_table VALUES (1, '11',21) ON CONFLICT(key) DO UPDATE SET value = '29' RETURNING *; LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, 21) ON CONFLICT(key) DO UPDATE SET value = '29'::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age - key | value | age + key | value | age --------------------------------------------------------------------- 1 | 29 | 20 (1 row) SELECT * FROM distributed_table WHERE key = 1 ORDER BY 1,2,3; LOG: executing the command locally: SELECT key, value, age FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) ORDER BY key, value, age - key | value | age + key | value | age --------------------------------------------------------------------- 1 | 29 | 20 (1 row) @@ -310,7 +310,7 @@ ROLLBACK; -- make sure that the value is rollbacked SELECT * FROM distributed_table WHERE key = 1 ORDER BY 1,2,3; LOG: executing the command locally: SELECT key, value, age FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) ORDER BY key, value, age - key | value | age + key | value | age --------------------------------------------------------------------- 1 | 22 | 20 (1 row) @@ -319,7 +319,7 @@ LOG: executing the command locally: SELECT key, value, age FROM local_shard_exe BEGIN; INSERT INTO distributed_table VALUES (1, '11',21) ON CONFLICT(key) DO UPDATE SET value = '29' RETURNING *; LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, 21) ON CONFLICT(key) DO UPDATE SET value = '29'::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age - key | value | age + key | value | age --------------------------------------------------------------------- 1 | 29 | 20 (1 row) @@ -331,7 +331,7 @@ LOG: executing the command locally: DELETE FROM local_shard_execution.distribut SELECT count(*) FROM second_distributed_table; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.second_distributed_table_1470005 second_distributed_table WHERE true LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.second_distributed_table_1470007 second_distributed_table WHERE true - count + count --------------------------------------------------------------------- 0 (1 row) @@ -340,19 +340,19 @@ ROLLBACK; -- make sure that everything is rollbacked SELECT * FROM distributed_table WHERE key = 1 ORDER BY 1,2,3; LOG: executing the command locally: SELECT key, value, age FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) ORDER BY key, value, age - key | value | age + key | value | age --------------------------------------------------------------------- 1 | 22 | 20 (1 row) SELECT count(*) FROM second_distributed_table; - count + count --------------------------------------------------------------------- 2 (1 row) SELECT * FROM second_distributed_table; - key | value + key | value --------------------------------------------------------------------- 1 | 1 6 | '6' @@ -364,7 +364,7 @@ BEGIN; -- INSERT is executed locally INSERT INTO distributed_table VALUES (1, '11',21) ON CONFLICT(key) DO UPDATE SET value = '23' RETURNING *; LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, 21) ON CONFLICT(key) DO UPDATE SET value = '23'::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age - key | value | age + key | value | age --------------------------------------------------------------------- 1 | 23 | 20 (1 row) @@ -373,7 +373,7 @@ LOG: executing the command locally: INSERT INTO local_shard_execution.distribut -- executed locally and see the changes SELECT * FROM distributed_table WHERE key = 1 ORDER BY 1,2,3; LOG: executing the command locally: SELECT key, value, age FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) ORDER BY key, value, age - key | value | age + key | value | age --------------------------------------------------------------------- 1 | 23 | 20 (1 row) @@ -383,7 +383,7 @@ LOG: executing the command locally: SELECT key, value, age FROM local_shard_exe SELECT * FROM distributed_table WHERE value = '23' ORDER BY 1,2,3; LOG: executing the command locally: SELECT key, value, age FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (value OPERATOR(pg_catalog.=) '23'::text) LOG: executing the command locally: SELECT key, value, age FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE (value OPERATOR(pg_catalog.=) '23'::text) - key | value | age + key | value | age --------------------------------------------------------------------- 1 | 23 | 20 (1 row) @@ -397,7 +397,7 @@ LOG: executing the command locally: DELETE FROM local_shard_execution.distribut SELECT * FROM distributed_table WHERE value = '23' ORDER BY 1,2,3; LOG: executing the command locally: SELECT key, value, age FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (value OPERATOR(pg_catalog.=) '23'::text) LOG: executing the command locally: SELECT key, value, age FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE (value OPERATOR(pg_catalog.=) '23'::text) - key | value | age + key | value | age --------------------------------------------------------------------- (0 rows) @@ -405,7 +405,7 @@ COMMIT; -- make sure that we've committed everything SELECT * FROM distributed_table WHERE key = 1 ORDER BY 1,2,3; LOG: executing the command locally: SELECT key, value, age FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) ORDER BY key, value, age - key | value | age + key | value | age --------------------------------------------------------------------- (0 rows) @@ -416,7 +416,7 @@ BEGIN; -- although this command could have been executed -- locally, it is not going to be executed locally SELECT * FROM distributed_table WHERE key = 1 ORDER BY 1,2,3; - key | value | age + key | value | age --------------------------------------------------------------------- (0 rows) @@ -426,7 +426,7 @@ BEGIN; NOTICE: truncate cascades to table "second_distributed_table" -- TRUNCATE cascaded into second_distributed_table SELECT count(*) FROM second_distributed_table; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -441,7 +441,7 @@ BEGIN; -- this could go through local execution, but doesn't because we've already -- done distributed execution SELECT * FROM distributed_table WHERE key = 500 ORDER BY 1,2,3; - key | value | age + key | value | age --------------------------------------------------------------------- 500 | 500 | 25 (1 row) @@ -451,7 +451,7 @@ BEGIN; NOTICE: truncate cascades to table "second_distributed_table" -- ensure that TRUNCATE made it SELECT * FROM distributed_table WHERE key = 500 ORDER BY 1,2,3; - key | value | age + key | value | age --------------------------------------------------------------------- (0 rows) @@ -468,14 +468,14 @@ LOG: executing the command locally: INSERT INTO local_shard_execution.second_di LOG: executing the command locally: DELETE FROM local_shard_execution.reference_table_1470000 reference_table WHERE (key OPERATOR(pg_catalog.=) 701) SELECT count(*) FROM distributed_table WHERE key = 701; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 701) - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM second_distributed_table WHERE key = 701; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.second_distributed_table_1470005 second_distributed_table WHERE (key OPERATOR(pg_catalog.=) 701) - count + count --------------------------------------------------------------------- 0 (1 row) @@ -484,7 +484,7 @@ LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_e SELECT count(*) FROM distributed_table WHERE key > 700; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.>) 700) LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE (key OPERATOR(pg_catalog.>) 700) - count + count --------------------------------------------------------------------- 0 (1 row) @@ -498,21 +498,21 @@ ROLLBACK; BEGIN; SELECT count(*) FROM distributed_table WHERE key = 1; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM distributed_table WHERE key = 6; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE (key OPERATOR(pg_catalog.=) 6) - count + count --------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM distributed_table WHERE key = 500; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE (key OPERATOR(pg_catalog.=) 500) - count + count --------------------------------------------------------------------- 0 (1 row) @@ -522,7 +522,7 @@ ROLLBACK; BEGIN; SELECT count(*) FROM distributed_table WHERE key = 1; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) - count + count --------------------------------------------------------------------- 0 (1 row) @@ -537,7 +537,7 @@ ROLLBACK; BEGIN; SELECT count(*) FROM distributed_table WHERE key = 1; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) - count + count --------------------------------------------------------------------- 0 (1 row) @@ -552,7 +552,7 @@ ROLLBACK; BEGIN; SELECT count(*) FROM distributed_table WHERE key = 1; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) - count + count --------------------------------------------------------------------- 0 (1 row) @@ -566,7 +566,7 @@ ROLLBACK; BEGIN; SELECT count(*) FROM distributed_table WHERE key = 1; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) - count + count --------------------------------------------------------------------- 0 (1 row) @@ -578,7 +578,7 @@ HINT: Try re-running the transaction with "SET LOCAL citus.enable_local_executi ROLLBACK; INSERT INTO distributed_table VALUES (1, '11',21) ON CONFLICT(key) DO UPDATE SET value = '29' RETURNING *; LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, 21) ON CONFLICT(key) DO UPDATE SET value = '29'::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age - key | value | age + key | value | age --------------------------------------------------------------------- 1 | 11 | 21 (1 row) @@ -593,7 +593,7 @@ HINT: Try re-running the transaction with "SET LOCAL citus.enable_local_executi ROLLBACK; BEGIN; INSERT INTO distributed_table VALUES (11, '111',29) ON CONFLICT(key) DO UPDATE SET value = '29' RETURNING *; - key | value | age + key | value | age --------------------------------------------------------------------- 11 | 29 | 121 (1 row) @@ -606,7 +606,7 @@ HINT: Connect to the coordinator and run it again. ROLLBACK; BEGIN; INSERT INTO distributed_table VALUES (11, '111',29) ON CONFLICT(key) DO UPDATE SET value = '29' RETURNING *; - key | value | age + key | value | age --------------------------------------------------------------------- 11 | 29 | 121 (1 row) @@ -687,7 +687,7 @@ distributed_local_mixed AS (SELECT * FROM reference_table WHERE key IN (SELECT k SELECT * FROM local_insert, distributed_local_mixed; LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, 21) ON CONFLICT(key) DO UPDATE SET value = '29'::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age LOG: executing the command locally: SELECT key FROM local_shard_execution.reference_table_1470000 reference_table WHERE (key OPERATOR(pg_catalog.=) ANY (SELECT local_insert.key FROM (SELECT intermediate_result.key, intermediate_result.value, intermediate_result.age FROM read_intermediate_result('81_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, age bigint)) local_insert)) - key | value | age | key + key | value | age | key --------------------------------------------------------------------- 1 | 11 | 21 | 1 (1 row) @@ -697,7 +697,7 @@ LOG: executing the command locally: SELECT key FROM local_shard_execution.refer WITH distributed_local_mixed AS (SELECT * FROM distributed_table), local_insert AS (INSERT INTO distributed_table VALUES (1, '11',21) ON CONFLICT(key) DO UPDATE SET value = '29' RETURNING *) SELECT * FROM local_insert, distributed_local_mixed ORDER BY 1,2,3,4,5; - key | value | age | key | value | age + key | value | age | key | value | age --------------------------------------------------------------------- 1 | 29 | 21 | 1 | 11 | 21 (1 row) @@ -711,7 +711,7 @@ FROM WHERE distributed_table.key = all_data.key AND distributed_table.key = 1; LOG: executing the command locally: WITH all_data AS (SELECT distributed_table_1.key, distributed_table_1.value, distributed_table_1.age FROM local_shard_execution.distributed_table_1470001 distributed_table_1 WHERE (distributed_table_1.key OPERATOR(pg_catalog.=) 1)) SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table, all_data WHERE ((distributed_table.key OPERATOR(pg_catalog.=) all_data.key) AND (distributed_table.key OPERATOR(pg_catalog.=) 1)) - count + count --------------------------------------------------------------------- 1 (1 row) @@ -730,7 +730,7 @@ WHERE distributed_table.value = all_data.value AND distributed_table.key = 1 ORDER BY 1 DESC; - key + key --------------------------------------------------------------------- 1 (1 row) @@ -748,7 +748,7 @@ FROM WHERE distributed_table.key = all_data.key AND distributed_table.key = 1 AND EXISTS (SELECT * FROM all_data); - count + count --------------------------------------------------------------------- 1 (1 row) @@ -763,7 +763,7 @@ FROM distributed_table, all_data WHERE distributed_table.key = all_data.age AND distributed_table.key = 1; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -773,7 +773,7 @@ TRUNCATE reference_table, distributed_table, second_distributed_table; -- local execution of returning of reference tables INSERT INTO reference_table VALUES (1),(2),(3),(4),(5),(6) RETURNING *; LOG: executing the command locally: INSERT INTO local_shard_execution.reference_table_1470000 AS citus_table_alias (key) VALUES (1), (2), (3), (4), (5), (6) RETURNING citus_table_alias.key - key + key --------------------------------------------------------------------- 1 2 @@ -786,7 +786,7 @@ LOG: executing the command locally: INSERT INTO local_shard_execution.reference -- local execution of multi-row INSERTs INSERT INTO distributed_table VALUES (1, '11',21), (5,'55',22) ON CONFLICT(key) DO UPDATE SET value = (EXCLUDED.value::int + 1)::text RETURNING *; LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (1,'11'::text,'21'::bigint), (5,'55'::text,'22'::bigint) ON CONFLICT(key) DO UPDATE SET value = (((excluded.value)::integer OPERATOR(pg_catalog.+) 1))::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age - key | value | age + key | value | age --------------------------------------------------------------------- 1 | 11 | 21 5 | 55 | 22 @@ -796,7 +796,7 @@ LOG: executing the command locally: INSERT INTO local_shard_execution.distribut -- could have been done via local execution but the executor choose the other way around -- because the command is a multi-shard query INSERT INTO distributed_table VALUES (1, '11',21), (2,'22',22), (3,'33',33), (4,'44',44),(5,'55',55) ON CONFLICT(key) DO UPDATE SET value = (EXCLUDED.value::int + 1)::text RETURNING *; - key | value | age + key | value | age --------------------------------------------------------------------- 1 | 12 | 21 2 | 22 | 22 @@ -812,42 +812,42 @@ BEGIN; -- 6 local execution without params EXECUTE local_prepare_no_param; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) - count + count --------------------------------------------------------------------- 1 (1 row) EXECUTE local_prepare_no_param; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) - count + count --------------------------------------------------------------------- 1 (1 row) EXECUTE local_prepare_no_param; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) - count + count --------------------------------------------------------------------- 1 (1 row) EXECUTE local_prepare_no_param; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) - count + count --------------------------------------------------------------------- 1 (1 row) EXECUTE local_prepare_no_param; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) - count + count --------------------------------------------------------------------- 1 (1 row) EXECUTE local_prepare_no_param; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) - count + count --------------------------------------------------------------------- 1 (1 row) @@ -855,42 +855,42 @@ LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_e -- 6 local executions with params EXECUTE local_prepare_param(1); LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) - count + count --------------------------------------------------------------------- 1 (1 row) EXECUTE local_prepare_param(5); LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 5) - count + count --------------------------------------------------------------------- 1 (1 row) EXECUTE local_prepare_param(6); LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE (key OPERATOR(pg_catalog.=) 6) - count + count --------------------------------------------------------------------- 0 (1 row) EXECUTE local_prepare_param(1); LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) - count + count --------------------------------------------------------------------- 1 (1 row) EXECUTE local_prepare_param(5); LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 5) - count + count --------------------------------------------------------------------- 1 (1 row) EXECUTE local_prepare_param(6); LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE (key OPERATOR(pg_catalog.=) 6) - count + count --------------------------------------------------------------------- 0 (1 row) @@ -899,7 +899,7 @@ LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_e EXECUTE remote_prepare_param(1); LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.<>) 1) LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE (key OPERATOR(pg_catalog.<>) 1) - count + count --------------------------------------------------------------------- 4 (1 row) @@ -911,7 +911,7 @@ COMMIT; BEGIN; INSERT INTO distributed_table VALUES (1, '11',21) ON CONFLICT(key) DO UPDATE SET value = '100' RETURNING *; LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, 21) ON CONFLICT(key) DO UPDATE SET value = '100'::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age - key | value | age + key | value | age --------------------------------------------------------------------- 1 | 100 | 21 (1 row) @@ -925,7 +925,7 @@ ERROR: division by zero ROLLBACK; -- we've rollbacked everything SELECT count(*) FROM distributed_table WHERE value = '200'; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -933,14 +933,14 @@ SELECT count(*) FROM distributed_table WHERE value = '200'; -- RETURNING should just work fine for reference tables INSERT INTO reference_table VALUES (500) RETURNING *; LOG: executing the command locally: INSERT INTO local_shard_execution.reference_table_1470000 (key) VALUES (500) RETURNING key - key + key --------------------------------------------------------------------- 500 (1 row) DELETE FROM reference_table WHERE key = 500 RETURNING *; LOG: executing the command locally: DELETE FROM local_shard_execution.reference_table_1470000 reference_table WHERE (key OPERATOR(pg_catalog.=) 500) RETURNING key - key + key --------------------------------------------------------------------- 500 (1 row) @@ -950,7 +950,7 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode TO sequential ; DELETE FROM distributed_table; INSERT INTO distributed_table VALUES (1, '11',21) ON CONFLICT(key) DO UPDATE SET value = '100' RETURNING *; - key | value | age + key | value | age --------------------------------------------------------------------- 1 | 11 | 21 (1 row) @@ -961,7 +961,7 @@ BEGIN; SET citus.multi_shard_modify_mode TO sequential ; INSERT INTO distributed_table VALUES (1, '11',21) ON CONFLICT(key) DO UPDATE SET value = '100' RETURNING *; LOG: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, 21) ON CONFLICT(key) DO UPDATE SET value = '100'::text RETURNING citus_table_alias.key, citus_table_alias.value, citus_table_alias.age - key | value | age + key | value | age --------------------------------------------------------------------- 1 | 100 | 21 (1 row) @@ -988,7 +988,7 @@ ROLLBACK; BEGIN; DELETE FROM reference_table WHERE key = 500 RETURNING *; LOG: executing the command locally: DELETE FROM local_shard_execution.reference_table_1470000 reference_table WHERE (key OPERATOR(pg_catalog.=) 500) RETURNING key - key + key --------------------------------------------------------------------- 500 (1 row) @@ -1012,7 +1012,7 @@ BEGIN; SET LOCAL citus.task_executor_type = 'task-tracker'; SET LOCAL client_min_messages TO INFO; SELECT count(*) FROM distributed_table; - count + count --------------------------------------------------------------------- 101 (1 row) @@ -1026,7 +1026,7 @@ CREATE VIEW v_local_query_execution AS SELECT * FROM distributed_table WHERE key = 500; SELECT * FROM v_local_query_execution; LOG: executing the command locally: SELECT key, value, age FROM (SELECT distributed_table.key, distributed_table.value, distributed_table.age FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE (distributed_table.key OPERATOR(pg_catalog.=) 500)) v_local_query_execution - key | value | age + key | value | age --------------------------------------------------------------------- 500 | 500 | 25 (1 row) @@ -1037,7 +1037,7 @@ CREATE VIEW v_local_query_execution_2 AS SELECT * FROM distributed_table; SELECT * FROM v_local_query_execution_2 WHERE key = 500; LOG: executing the command locally: SELECT key, value, age FROM (SELECT distributed_table.key, distributed_table.value, distributed_table.age FROM local_shard_execution.distributed_table_1470003 distributed_table) v_local_query_execution_2 WHERE (key OPERATOR(pg_catalog.=) 500) - key | value | age + key | value | age --------------------------------------------------------------------- 500 | 500 | 25 (1 row) @@ -1047,7 +1047,7 @@ LOG: executing the command locally: SELECT key, value, age FROM (SELECT distrib BEGIN; SAVEPOINT my_savepoint; SELECT count(*) FROM distributed_table; - count + count --------------------------------------------------------------------- 101 (1 row) @@ -1065,7 +1065,7 @@ LOG: executing the command locally: DELETE FROM local_shard_execution.distribut SELECT count(*) FROM distributed_table; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE true LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE true - count + count --------------------------------------------------------------------- 100 (1 row) @@ -1077,9 +1077,9 @@ COMMIT; -- sanity check: local execution on partitions INSERT INTO collections_list (collection_id) VALUES (0) RETURNING *; LOG: executing the command locally: INSERT INTO local_shard_execution.collections_list_1470011 (key, ser, collection_id) VALUES ('3940649673949185'::bigint, '3940649673949185'::bigint, 0) RETURNING key, ser, ts, collection_id, value - key | ser | ts | collection_id | value + key | ser | ts | collection_id | value --------------------------------------------------------------------- - 3940649673949185 | 3940649673949185 | | 0 | + 3940649673949185 | 3940649673949185 | | 0 | (1 row) BEGIN; @@ -1087,7 +1087,7 @@ BEGIN; LOG: executing the command locally: INSERT INTO local_shard_execution.collections_list_1470009 (key, ser, collection_id) VALUES ('1'::bigint, '3940649673949186'::bigint, 0) SELECT count(*) FROM collections_list_0 WHERE key = 1; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.collections_list_0_1470013 collections_list_0 WHERE (key OPERATOR(pg_catalog.=) 1) - count + count --------------------------------------------------------------------- 1 (1 row) @@ -1095,7 +1095,7 @@ LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_e SELECT count(*) FROM collections_list; LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.collections_list_1470009 collections_list WHERE true LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.collections_list_1470011 collections_list WHERE true - count + count --------------------------------------------------------------------- 2 (1 row) @@ -1103,10 +1103,10 @@ LOG: executing the command locally: SELECT count(*) AS count FROM local_shard_e SELECT * FROM collections_list ORDER BY 1,2,3,4; LOG: executing the command locally: SELECT key, ser, ts, collection_id, value FROM local_shard_execution.collections_list_1470009 collections_list WHERE true LOG: executing the command locally: SELECT key, ser, ts, collection_id, value FROM local_shard_execution.collections_list_1470011 collections_list WHERE true - key | ser | ts | collection_id | value + key | ser | ts | collection_id | value --------------------------------------------------------------------- - 1 | 3940649673949186 | | 0 | - 3940649673949185 | 3940649673949185 | | 0 | + 1 | 3940649673949186 | | 0 | + 3940649673949185 | 3940649673949185 | | 0 | (2 rows) COMMIT; @@ -1115,7 +1115,7 @@ COMMIT; -- Citus currently doesn't allow using task_assignment_policy for intermediate results WITH distributed_local_mixed AS (INSERT INTO reference_table VALUES (1000) RETURNING *) SELECT * FROM distributed_local_mixed; LOG: executing the command locally: INSERT INTO local_shard_execution.reference_table_1470000 (key) VALUES (1000) RETURNING key - key + key --------------------------------------------------------------------- 1000 (1 row) @@ -1135,7 +1135,7 @@ LOG: executing the command locally: INSERT INTO local_shard_execution.distribut DELETE FROM distributed_table RETURNING key; LOG: executing the command locally: DELETE FROM local_shard_execution.distributed_table_1470001 distributed_table RETURNING key LOG: executing the command locally: DELETE FROM local_shard_execution.distributed_table_1470003 distributed_table RETURNING key - key + key --------------------------------------------------------------------- 1 2 @@ -1155,7 +1155,7 @@ BEGIN; LOG: executing the command locally: INSERT INTO local_shard_execution.reference_table_1470000 (key) VALUES (1) DELETE FROM reference_table RETURNING key; LOG: executing the command locally: DELETE FROM local_shard_execution.reference_table_1470000 reference_table RETURNING key - key + key --------------------------------------------------------------------- 1 2 @@ -1174,9 +1174,9 @@ CREATE TABLE event_responses ( primary key (event_id, user_id) ); SELECT create_distributed_table('event_responses', 'event_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE OR REPLACE PROCEDURE register_for_event(p_event_id int, p_user_id int, p_choice invite_resp) @@ -1188,9 +1188,9 @@ BEGIN END; $fn$; SELECT create_distributed_function('register_for_event(int,int,invite_resp)', 'p_event_id', 'event_responses'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) -- call 6 times to make sure it works after the 5th time(postgres binds values after the 5th time) diff --git a/src/test/regress/expected/materialized_view.out b/src/test/regress/expected/materialized_view.out index f0d36450c..b8cb80529 100644 --- a/src/test/regress/expected/materialized_view.out +++ b/src/test/regress/expected/materialized_view.out @@ -9,14 +9,14 @@ SET search_path TO materialized_view, public; CREATE VIEW air_shipped_lineitems AS SELECT * FROM lineitem_hash_part WHERE l_shipmode = 'AIR'; CREATE TABLE temp_lineitem(LIKE lineitem_hash_part); SELECT create_distributed_table('temp_lineitem', 'l_orderkey', 'hash', 'lineitem_hash_part'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO temp_lineitem SELECT * FROM air_shipped_lineitems; SELECT count(*) FROM temp_lineitem; - count + count --------------------------------------------------------------------- 1706 (1 row) @@ -24,7 +24,7 @@ SELECT count(*) FROM temp_lineitem; -- following is a where false query, should not be inserting anything INSERT INTO temp_lineitem SELECT * FROM air_shipped_lineitems WHERE l_shipmode = 'MAIL'; SELECT count(*) FROM temp_lineitem; - count + count --------------------------------------------------------------------- 1706 (1 row) @@ -33,7 +33,7 @@ SELECT count(*) FROM temp_lineitem; CREATE MATERIALIZED VIEW mode_counts AS SELECT l_shipmode, count(*) FROM temp_lineitem GROUP BY l_shipmode; SELECT * FROM mode_counts WHERE l_shipmode = 'AIR' ORDER BY 2 DESC, 1 LIMIT 10; - l_shipmode | count + l_shipmode | count --------------------------------------------------------------------- AIR | 1706 (1 row) @@ -44,7 +44,7 @@ ERROR: relation mode_counts is not distributed -- new data is not immediately reflected in the view INSERT INTO temp_lineitem SELECT * FROM air_shipped_lineitems; SELECT * FROM mode_counts WHERE l_shipmode = 'AIR' ORDER BY 2 DESC, 1 LIMIT 10; - l_shipmode | count + l_shipmode | count --------------------------------------------------------------------- AIR | 1706 (1 row) @@ -52,7 +52,7 @@ SELECT * FROM mode_counts WHERE l_shipmode = 'AIR' ORDER BY 2 DESC, 1 LIMIT 10; -- refresh updates the materialised view with new data REFRESH MATERIALIZED VIEW mode_counts; SELECT * FROM mode_counts WHERE l_shipmode = 'AIR' ORDER BY 2 DESC, 1 LIMIT 10; - l_shipmode | count + l_shipmode | count --------------------------------------------------------------------- AIR | 3412 (1 row) @@ -66,7 +66,7 @@ FROM lineitem_hash_part, orders_hash_part, (SELECT SUM(l_extendedprice) AS price WHERE lineitem_hash_part.l_orderkey=orders_hash_part.o_orderkey AND lineitem_hash_part.l_orderkey=3; REFRESH MATERIALIZED VIEW materialized_view; SELECT count(*) FROM materialized_view; - count + count --------------------------------------------------------------------- 6 (1 row) @@ -79,7 +79,7 @@ FROM lineitem_hash_part, orders_hash_part, (SELECT SUM(l_extendedprice) AS price WHERE lineitem_hash_part.l_orderkey=orders_hash_part.o_orderkey; REFRESH MATERIALIZED VIEW materialized_view; SELECT count(*) FROM materialized_view; - count + count --------------------------------------------------------------------- 12000 (1 row) @@ -93,7 +93,7 @@ FROM lineitem_hash_part, orders_hash_part, total_price WHERE lineitem_hash_part.l_orderkey=orders_hash_part.o_orderkey; REFRESH MATERIALIZED VIEW materialized_view; SELECT count(*) FROM materialized_view; - count + count --------------------------------------------------------------------- 12000 (1 row) @@ -110,7 +110,7 @@ FROM orders_hash_part JOIN ( ON total_quantity.l_orderkey=orders_hash_part.o_orderkey; REFRESH MATERIALIZED VIEW materialized_view; SELECT count(*) FROM materialized_view; - count + count --------------------------------------------------------------------- 2985 (1 row) @@ -123,7 +123,7 @@ FROM lineitem_hash_part, orders_reference, (SELECT SUM(o_totalprice) AS price_su WHERE lineitem_hash_part.l_orderkey=orders_reference.o_orderkey; REFRESH MATERIALIZED VIEW materialized_view; SELECT count(*) FROM materialized_view; - count + count --------------------------------------------------------------------- 12000 (1 row) @@ -138,21 +138,21 @@ FROM lineitem_local_to_hash_part, orders_local_to_hash_part, (SELECT SUM(l_exten WHERE lineitem_local_to_hash_part.l_orderkey=orders_local_to_hash_part.o_orderkey; SELECT create_distributed_table('lineitem_local_to_hash_part', 'l_orderkey'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('orders_local_to_hash_part', 'o_orderkey'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) REFRESH MATERIALIZED VIEW materialized_view; SELECT count(*) FROM materialized_view; - count + count --------------------------------------------------------------------- 12000 (1 row) @@ -167,7 +167,7 @@ FROM lineitem_hash_part, orders_hash_part, (SELECT SUM(l_extendedprice) AS price WHERE lineitem_hash_part.l_orderkey=orders_hash_part.o_orderkey; REFRESH MATERIALIZED VIEW materialized_view WITH DATA; SELECT count(*) FROM materialized_view; - count + count --------------------------------------------------------------------- 12000 (1 row) @@ -192,7 +192,7 @@ GROUP BY orders_hash_part.o_orderdate; CREATE UNIQUE INDEX materialized_view_index ON materialized_view (o_orderdate); REFRESH MATERIALIZED VIEW CONCURRENTLY materialized_view; SELECT count(*) FROM materialized_view; - count + count --------------------------------------------------------------------- 1699 (1 row) @@ -205,15 +205,15 @@ NOTICE: drop cascades to view air_shipped_lineitems CREATE TABLE large (id int, tenant_id int); CREATE TABLE small (id int, tenant_id int); SELECT create_distributed_table('large','tenant_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('small','tenant_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) \copy small FROM STDIN DELIMITER ',' @@ -226,7 +226,7 @@ ERROR: cannot change materialized view "small_view" UPDATE large SET id=20 FROM small_view WHERE small_view.id=large.id; ERROR: materialized views in modify queries are not supported SELECT * FROM large ORDER BY 1, 2; - id | tenant_id + id | tenant_id --------------------------------------------------------------------- 1 | 2 2 | 3 @@ -238,7 +238,7 @@ SELECT * FROM large ORDER BY 1, 2; UPDATE large SET id=28 FROM small_view WHERE small_view.id=large.id and small_view.tenant_id=2 and large.tenant_id=2; ERROR: materialized views in modify queries are not supported SELECT * FROM large ORDER BY 1, 2; - id | tenant_id + id | tenant_id --------------------------------------------------------------------- 1 | 2 2 | 3 @@ -249,7 +249,7 @@ SELECT * FROM large ORDER BY 1, 2; -- delete statement on large with subquery, this should succeed DELETE FROM large WHERE tenant_id in (SELECT tenant_id FROM small_view); SELECT * FROM large ORDER BY 1, 2; - id | tenant_id + id | tenant_id --------------------------------------------------------------------- 6 | 5 (1 row) @@ -267,15 +267,15 @@ CREATE TABLE large_partitioned_p1 PARTITION OF large_partitioned FOR VALUES FROM CREATE TABLE large_partitioned_p2 PARTITION OF large_partitioned FOR VALUES FROM (10) TO (20); CREATE TABLE large_partitioned_p3 PARTITION OF large_partitioned FOR VALUES FROM (20) TO (100); SELECT create_distributed_table('large_partitioned','tenant_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('small','tenant_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) \copy small FROM STDIN DELIMITER ',' @@ -290,7 +290,7 @@ ERROR: cannot change materialized view "small_view" UPDATE large_partitioned SET id=20 FROM small_view WHERE small_view.id=large_partitioned.id; ERROR: materialized views in modify queries are not supported SELECT * FROM large_partitioned ORDER BY 1, 2; - id | tenant_id + id | tenant_id --------------------------------------------------------------------- 1 | 2 2 | 3 @@ -304,7 +304,7 @@ SELECT * FROM large_partitioned ORDER BY 1, 2; -- delete statement on large_partitioned DELETE FROM large_partitioned WHERE id in (SELECT id FROM small_view); SELECT * FROM large_partitioned ORDER BY 1, 2; - id | tenant_id + id | tenant_id --------------------------------------------------------------------- 2 | 3 5 | 4 @@ -321,13 +321,13 @@ WITH all_small_view_ids AS (SELECT id FROM small_view) DELETE FROM large_partitioned WHERE id in (SELECT * FROM all_small_view_ids); -- make sure that materialized view in a CTE/subquery can be joined with a distributed table WITH cte AS (SELECT *, random() FROM small_view) SELECT count(*) FROM cte JOIN small USING(id); - count + count --------------------------------------------------------------------- 4 (1 row) SELECT count(*) FROM (SELECT *, random() FROM small_view) as subquery JOIN small USING(id); - count + count --------------------------------------------------------------------- 4 (1 row) diff --git a/src/test/regress/expected/multi_703_upgrade.out b/src/test/regress/expected/multi_703_upgrade.out index 660f0bce2..61c8b568f 100644 --- a/src/test/regress/expected/multi_703_upgrade.out +++ b/src/test/regress/expected/multi_703_upgrade.out @@ -15,7 +15,7 @@ INSERT INTO pg_dist_node (nodename, nodeport, groupid) VALUES ('localhost', :worker_1_port, 1); ALTER EXTENSION citus UPDATE TO '7.0-3'; SELECT * FROM pg_dist_placement; - placementid | shardid | shardstate | shardlength | groupid + placementid | shardid | shardstate | shardlength | groupid --------------------------------------------------------------------- 1 | 1 | 1 | 0 | 1 (1 row) diff --git a/src/test/regress/expected/multi_agg_approximate_distinct.out b/src/test/regress/expected/multi_agg_approximate_distinct.out index 987d119f8..2831d7a7a 100644 --- a/src/test/regress/expected/multi_agg_approximate_distinct.out +++ b/src/test/regress/expected/multi_agg_approximate_distinct.out @@ -11,7 +11,7 @@ WHERE name = 'hll' :create_cmd; -- Try to execute count(distinct) when approximate distincts aren't enabled SELECT count(distinct l_orderkey) FROM lineitem; - count + count --------------------------------------------------------------------- 2985 (1 row) @@ -19,58 +19,58 @@ SELECT count(distinct l_orderkey) FROM lineitem; -- Check approximate count(distinct) at different precisions / error rates SET citus.count_distinct_error_rate = 0.1; SELECT count(distinct l_orderkey) FROM lineitem; - count + count --------------------------------------------------------------------- 2612 (1 row) SET citus.count_distinct_error_rate = 0.01; SELECT count(distinct l_orderkey) FROM lineitem; - count + count --------------------------------------------------------------------- 2967 (1 row) -- Check approximate count(distinct) for different data types SELECT count(distinct l_partkey) FROM lineitem; - count + count --------------------------------------------------------------------- 11654 (1 row) SELECT count(distinct l_extendedprice) FROM lineitem; - count + count --------------------------------------------------------------------- 11691 (1 row) SELECT count(distinct l_shipdate) FROM lineitem; - count + count --------------------------------------------------------------------- 2483 (1 row) SELECT count(distinct l_comment) FROM lineitem; - count + count --------------------------------------------------------------------- 11788 (1 row) -- Check that we can execute approximate count(distinct) on complex expressions SELECT count(distinct (l_orderkey * 2 + 1)) FROM lineitem; - count + count --------------------------------------------------------------------- 2980 (1 row) SELECT count(distinct extract(month from l_shipdate)) AS my_month FROM lineitem; - my_month + my_month --------------------------------------------------------------------- 12 (1 row) SELECT count(distinct l_partkey) / count(distinct l_orderkey) FROM lineitem; - ?column? + ?column? --------------------------------------------------------------------- 3 (1 row) @@ -79,14 +79,14 @@ SELECT count(distinct l_partkey) / count(distinct l_orderkey) FROM lineitem; -- contain different filter, join, sort and limit clauses SELECT count(distinct l_orderkey) FROM lineitem WHERE octet_length(l_comment) + octet_length('randomtext'::text) > 40; - count + count --------------------------------------------------------------------- 2355 (1 row) SELECT count(DISTINCT l_orderkey) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_quantity < 5; - count + count --------------------------------------------------------------------- 835 (1 row) @@ -96,7 +96,7 @@ SELECT count(DISTINCT l_orderkey) as distinct_order_count, l_quantity FROM linei GROUP BY l_quantity ORDER BY distinct_order_count ASC, l_quantity ASC LIMIT 10; - distinct_order_count | l_quantity + distinct_order_count | l_quantity --------------------------------------------------------------------- 210 | 29.00 216 | 13.00 @@ -122,16 +122,16 @@ CREATE TABLE test_count_distinct_schema.nation_hash( n_comment varchar(152) ); SELECT create_distributed_table('test_count_distinct_schema.nation_hash', 'n_nationkey', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) \copy test_count_distinct_schema.nation_hash FROM STDIN with delimiter '|'; SET search_path TO public; SET citus.count_distinct_error_rate TO 0.01; SELECT COUNT (DISTINCT n_regionkey) FROM test_count_distinct_schema.nation_hash; - count + count --------------------------------------------------------------------- 3 (1 row) @@ -139,7 +139,7 @@ SELECT COUNT (DISTINCT n_regionkey) FROM test_count_distinct_schema.nation_hash; -- test with search_path is set SET search_path TO test_count_distinct_schema; SELECT COUNT (DISTINCT n_regionkey) FROM nation_hash; - count + count --------------------------------------------------------------------- 3 (1 row) @@ -160,7 +160,7 @@ SELECT l_returnflag, count(DISTINCT l_shipdate) as count_distinct, count(*) as t GROUP BY l_returnflag ORDER BY total LIMIT 10; - l_returnflag | count_distinct | total + l_returnflag | count_distinct | total --------------------------------------------------------------------- R | 1103 | 2901 A | 1108 | 2944 @@ -176,7 +176,7 @@ SELECT GROUP BY l_orderkey ORDER BY 2 DESC, 1 DESC LIMIT 10; - l_orderkey | count | count | count + l_orderkey | count | count | count --------------------------------------------------------------------- 12005 | 4 | 4 | 4 5409 | 4 | 4 | 4 @@ -193,7 +193,7 @@ SELECT -- Check that we can revert config and disable count(distinct) approximations SET citus.count_distinct_error_rate = 0.0; SELECT count(distinct l_orderkey) FROM lineitem; - count + count --------------------------------------------------------------------- 2985 (1 row) diff --git a/src/test/regress/expected/multi_agg_approximate_distinct_0.out b/src/test/regress/expected/multi_agg_approximate_distinct_0.out index c6a39db5f..43fb5dfc0 100644 --- a/src/test/regress/expected/multi_agg_approximate_distinct_0.out +++ b/src/test/regress/expected/multi_agg_approximate_distinct_0.out @@ -9,14 +9,14 @@ AS create_cmd FROM pg_available_extensions() WHERE name = 'hll' \gset :create_cmd; - hll_present + hll_present --------------------------------------------------------------------- f (1 row) -- Try to execute count(distinct) when approximate distincts aren't enabled SELECT count(distinct l_orderkey) FROM lineitem; - count + count --------------------------------------------------------------------- 2985 (1 row) @@ -82,9 +82,9 @@ CREATE TABLE test_count_distinct_schema.nation_hash( n_comment varchar(152) ); SELECT create_distributed_table('test_count_distinct_schema.nation_hash', 'n_nationkey', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) \copy test_count_distinct_schema.nation_hash FROM STDIN with delimiter '|'; @@ -102,14 +102,14 @@ SET search_path TO public; -- If we have an order by on count(distinct) that we intend to push down to -- worker nodes, we need to error out. Otherwise, we are fine. SET citus.limit_clause_row_fetch_count = 1000; -SELECT l_returnflag, count(DISTINCT l_shipdate) as count_distinct, count(*) as total +SELECT l_returnflag, count(DISTINCT l_shipdate) as count_distinct, count(*) as total FROM lineitem GROUP BY l_returnflag ORDER BY count_distinct LIMIT 10; ERROR: cannot compute count (distinct) approximation HINT: You need to have the hll extension loaded. -SELECT l_returnflag, count(DISTINCT l_shipdate) as count_distinct, count(*) as total +SELECT l_returnflag, count(DISTINCT l_shipdate) as count_distinct, count(*) as total FROM lineitem GROUP BY l_returnflag ORDER BY total @@ -130,7 +130,7 @@ HINT: You need to have the hll extension loaded. -- Check that we can revert config and disable count(distinct) approximations SET citus.count_distinct_error_rate = 0.0; SELECT count(distinct l_orderkey) FROM lineitem; - count + count --------------------------------------------------------------------- 2985 (1 row) diff --git a/src/test/regress/expected/multi_alter_table_add_constraints.out b/src/test/regress/expected/multi_alter_table_add_constraints.out index 0956b6bdd..da3dcccf2 100644 --- a/src/test/regress/expected/multi_alter_table_add_constraints.out +++ b/src/test/regress/expected/multi_alter_table_add_constraints.out @@ -13,9 +13,9 @@ CREATE TABLE products ( price numeric ); SELECT create_distributed_table('products', 'product_no'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- Can only add primary key constraint on distribution column (or group of columns @@ -47,9 +47,9 @@ CREATE TABLE products_ref ( price numeric ); SELECT create_reference_table('products_ref'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- Can add PRIMARY KEY to any column @@ -71,9 +71,9 @@ CREATE TABLE products_append ( price numeric ); SELECT create_distributed_table('products_append', 'product_no', 'append'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- Can only add primary key constraint on distribution column (or group @@ -97,9 +97,9 @@ DROP TABLE products_append; -- Check "UNIQUE CONSTRAINT" CREATE TABLE unique_test_table(id int, name varchar(20)); SELECT create_distributed_table('unique_test_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- Can only add unique constraint on distribution column (or group @@ -135,21 +135,21 @@ DROP TABLE unique_test_table; -- Check "UNIQUE CONSTRAINT" with reference table CREATE TABLE unique_test_table_ref(id int, name varchar(20)); SELECT create_reference_table('unique_test_table_ref'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- We can add unique constraint on any column with reference tables ALTER TABLE unique_test_table_ref ADD CONSTRAINT unn_name UNIQUE(name); ALTER TABLE unique_test_table_ref ADD CONSTRAINT unn_id UNIQUE(id); --- Error out. Since the table can not have two rows with the same id. +-- Error out. Since the table can not have two rows with the same id. INSERT INTO unique_test_table_ref VALUES(1, 'Ahmet'); INSERT INTO unique_test_table_ref VALUES(1, 'Mehmet'); ERROR: duplicate key value violates unique constraint "unn_id_1450066" DETAIL: Key (id)=(X) already exists. CONTEXT: while executing command on localhost:xxxxx --- We can add unique constraint with multiple columns +-- We can add unique constraint with multiple columns ALTER TABLE unique_test_table_ref DROP CONSTRAINT unn_id; ALTER TABLE unique_test_table_ref ADD CONSTRAINT unn_id_name UNIQUE(id,name); -- Error out, since two rows can not have the same id or name. @@ -158,9 +158,9 @@ DROP TABLE unique_test_table_ref; -- Check "UNIQUE CONSTRAINT" with append table CREATE TABLE unique_test_table_append(id int, name varchar(20)); SELECT create_distributed_table('unique_test_table_append', 'id', 'append'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- Can only add unique constraint on distribution column (or group @@ -189,15 +189,15 @@ CREATE TABLE products ( discounted_price numeric ); SELECT create_distributed_table('products', 'product_no'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- Can add column and table check constraints ALTER TABLE products ADD CONSTRAINT p_check CHECK(price > 0); ALTER TABLE products ADD CONSTRAINT p_multi_check CHECK(price > discounted_price); --- First and third queries will error out, because of conflicts with p_check and +-- First and third queries will error out, because of conflicts with p_check and -- p_multi_check, respectively. INSERT INTO products VALUES(1, 'product_1', -1, -2); ERROR: new row for relation "products_1450069" violates check constraint "p_check_1450069" @@ -209,7 +209,7 @@ ERROR: new row for relation "products_1450069" violates check constraint "p_mul DETAIL: Failing row contains (1, product_1, 2, 3). CONTEXT: while executing command on localhost:xxxxx DROP TABLE products; --- Check "CHECK CONSTRAINT" with reference table +-- Check "CHECK CONSTRAINT" with reference table CREATE TABLE products_ref ( product_no integer, name text, @@ -217,15 +217,15 @@ CREATE TABLE products_ref ( discounted_price numeric ); SELECT create_reference_table('products_ref'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- Can add column and table check constraints ALTER TABLE products_ref ADD CONSTRAINT p_check CHECK(price > 0); ALTER TABLE products_ref ADD CONSTRAINT p_multi_check CHECK(price > discounted_price); --- First and third queries will error out, because of conflicts with p_check and +-- First and third queries will error out, because of conflicts with p_check and -- p_multi_check, respectively. INSERT INTO products_ref VALUES(1, 'product_1', -1, -2); ERROR: new row for relation "products_ref_1450100" violates check constraint "p_check_1450100" @@ -245,9 +245,9 @@ CREATE TABLE products_append ( discounted_price int ); SELECT create_distributed_table('products_append', 'product_no', 'append'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- Can add column and table check constraints @@ -265,18 +265,18 @@ CREATE TABLE products ( price numeric ); SELECT create_distributed_table('products', 'product_no'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- Can only add exclusion constraint on distribution column (or group of columns -- including distribution column) -- Command below should error out since 'name' is not a distribution column -ALTER TABLE products ADD CONSTRAINT exc_name EXCLUDE USING btree (name with =); +ALTER TABLE products ADD CONSTRAINT exc_name EXCLUDE USING btree (name with =); ERROR: cannot create constraint on "products" DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY constraints that do not include the partition column (with an equality operator if EXCLUDE). --- We can add composite exclusion +-- We can add composite exclusion ALTER TABLE products ADD CONSTRAINT exc_pno_name EXCLUDE USING btree (product_no with =, name with =); -- 4th command will error out since it conflicts with exc_pno_name constraint INSERT INTO products VALUES(1,'product_1', 5); @@ -294,13 +294,13 @@ CREATE TABLE products_ref ( price numeric ); SELECT create_reference_table('products_ref'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- We can add exclusion constraint on any column -ALTER TABLE products_ref ADD CONSTRAINT exc_name EXCLUDE USING btree (name with =); +ALTER TABLE products_ref ADD CONSTRAINT exc_name EXCLUDE USING btree (name with =); -- We can add composite exclusion because none of pair of rows are conflicting ALTER TABLE products_ref ADD CONSTRAINT exc_pno_name EXCLUDE USING btree (product_no with =, name with =); -- Third insertion will error out, since it has the same name with second insertion @@ -318,15 +318,15 @@ CREATE TABLE products_append ( price numeric ); SELECT create_distributed_table('products_append', 'product_no','append'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) --- Can only add exclusion constraint on distribution column (or group of column +-- Can only add exclusion constraint on distribution column (or group of column -- including distribution column) -- Command below should error out since 'name' is not a distribution column -ALTER TABLE products_append ADD CONSTRAINT exc_name EXCLUDE USING btree (name with =); +ALTER TABLE products_append ADD CONSTRAINT exc_name EXCLUDE USING btree (name with =); WARNING: table "products_append" has a UNIQUE or EXCLUDE constraint DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced. HINT: Consider using hash partitioning. @@ -348,9 +348,9 @@ CREATE TABLE products ( price numeric ); SELECT create_distributed_table('products', 'product_no'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE products ALTER COLUMN name SET NOT NULL; @@ -369,9 +369,9 @@ CREATE TABLE products_ref ( price numeric ); SELECT create_reference_table('products_ref'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE products_ref ALTER COLUMN name SET NOT NULL; @@ -389,9 +389,9 @@ CREATE TABLE products_append ( price numeric ); SELECT create_distributed_table('products_append', 'product_no', 'append'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE products_append ALTER COLUMN name SET NOT NULL; @@ -405,9 +405,9 @@ CREATE TABLE products ( price numeric ); SELECT create_distributed_table('products', 'product_no'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- Should error out since add constraint is not the single subcommand @@ -415,7 +415,7 @@ ALTER TABLE products ADD CONSTRAINT unn_1 UNIQUE(product_no, price), ADD CONSTRA ERROR: cannot execute ADD CONSTRAINT command with other subcommands HINT: You can issue each subcommand separately -- Tests for constraints without name --- Commands below should error out since constraints do not have the name +-- Commands below should error out since constraints do not have the name ALTER TABLE products ADD UNIQUE(product_no); ERROR: cannot create constraint without a name on a distributed table ALTER TABLE products ADD PRIMARY KEY(product_no); @@ -430,7 +430,7 @@ ALTER TABLE products ADD CONSTRAINT uniq_product_no EXCLUDE USING btree (product ALTER TABLE products DROP CONSTRAINT nonzero_product_no; ALTER TABLE products DROP CONSTRAINT uniq_product_no; DROP TABLE products; --- Tests with transactions +-- Tests with transactions CREATE TABLE products ( product_no integer, name text, @@ -438,9 +438,9 @@ CREATE TABLE products ( discounted_price numeric ); SELECT create_distributed_table('products', 'product_no'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -456,15 +456,15 @@ ALTER TABLE products ALTER COLUMN product_no SET NOT NULL; ALTER TABLE products ADD CONSTRAINT p_key_product PRIMARY KEY(product_no); INSERT INTO products VALUES(1,'product_1', 10, 8); ROLLBACK; --- There should be no constraint on master and worker(s) +-- There should be no constraint on master and worker(s) SELECT "Constraint", "Definition" FROM table_checks WHERE relid='products'::regclass; - Constraint | Definition + Constraint | Definition --------------------------------------------------------------------- (0 rows) \c - - - :worker_1_port SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.products_1450202'::regclass; - Constraint | Definition + Constraint | Definition --------------------------------------------------------------------- (0 rows) @@ -476,15 +476,15 @@ ALTER TABLE products ADD CONSTRAINT unn_pno UNIQUE(product_no); ALTER TABLE products ADD CONSTRAINT check_price CHECK(price > discounted_price); ALTER TABLE products ADD CONSTRAINT p_key_product PRIMARY KEY(product_no); ROLLBACK; --- There should be no constraint on master and worker(s) +-- There should be no constraint on master and worker(s) SELECT "Constraint", "Definition" FROM table_checks WHERE relid='products'::regclass; - Constraint | Definition + Constraint | Definition --------------------------------------------------------------------- (0 rows) \c - - - :worker_1_port SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.products_1450202'::regclass; - Constraint | Definition + Constraint | Definition --------------------------------------------------------------------- (0 rows) @@ -497,26 +497,26 @@ CREATE TABLE sc1.alter_add_prim_key(x int, y int); CREATE UNIQUE INDEX CONCURRENTLY alter_pk_idx ON sc1.alter_add_prim_key(x); ALTER TABLE sc1.alter_add_prim_key ADD CONSTRAINT alter_pk_idx PRIMARY KEY USING INDEX alter_pk_idx; SELECT create_distributed_table('sc1.alter_add_prim_key', 'x'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT (run_command_on_workers($$ - SELECT - kc.constraint_name - FROM + SELECT + kc.constraint_name + FROM information_schema.table_constraints tc join information_schema.key_column_usage kc on (kc.table_name = tc.table_name and kc.table_schema = tc.table_schema and kc.constraint_name = tc.constraint_name) WHERE - kc.table_schema = 'sc1' and tc.constraint_type = 'PRIMARY KEY' and kc.table_name LIKE 'alter_add_prim_key_%' - ORDER BY + kc.table_schema = 'sc1' and tc.constraint_type = 'PRIMARY KEY' and kc.table_name LIKE 'alter_add_prim_key_%' + ORDER BY 1 - LIMIT + LIMIT 1; $$)).* -ORDER BY +ORDER BY 1,2,3,4; - nodename | nodeport | success | result + nodename | nodeport | success | result --------------------------------------------------------------------- localhost | 57637 | t | alter_pk_idx_1450234 localhost | 57638 | t | alter_pk_idx_1450234 @@ -526,28 +526,28 @@ CREATE SCHEMA sc2; CREATE TABLE sc2.alter_add_prim_key(x int, y int); SET search_path TO 'sc2'; SELECT create_distributed_table('alter_add_prim_key', 'x'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE UNIQUE INDEX CONCURRENTLY alter_pk_idx ON alter_add_prim_key(x); ALTER TABLE alter_add_prim_key ADD CONSTRAINT alter_pk_idx PRIMARY KEY USING INDEX alter_pk_idx; SELECT (run_command_on_workers($$ - SELECT - kc.constraint_name - FROM + SELECT + kc.constraint_name + FROM information_schema.table_constraints tc join information_schema.key_column_usage kc on (kc.table_name = tc.table_name and kc.table_schema = tc.table_schema and kc.constraint_name = tc.constraint_name) WHERE kc.table_schema = 'sc2' and tc.constraint_type = 'PRIMARY KEY' and kc.table_name LIKE 'alter_add_prim_key_%' - ORDER BY + ORDER BY 1 - LIMIT + LIMIT 1; $$)).* -ORDER BY +ORDER BY 1,2,3,4; - nodename | nodeport | success | result + nodename | nodeport | success | result --------------------------------------------------------------------- localhost | 57637 | t | alter_pk_idx_1450236 localhost | 57638 | t | alter_pk_idx_1450236 @@ -560,29 +560,29 @@ INSERT INTO sc3.alter_add_prim_key(x) SELECT generate_series(1,100); SET search_path TO 'sc3'; SELECT create_distributed_table('alter_add_prim_key', 'x'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE UNIQUE INDEX CONCURRENTLY alter_pk_idx ON alter_add_prim_key(x); ALTER TABLE alter_add_prim_key ADD CONSTRAINT a_constraint PRIMARY KEY USING INDEX alter_pk_idx; NOTICE: ALTER TABLE / ADD CONSTRAINT USING INDEX will rename index "alter_pk_idx" to "a_constraint" SELECT (run_command_on_workers($$ - SELECT - kc.constraint_name - FROM + SELECT + kc.constraint_name + FROM information_schema.table_constraints tc join information_schema.key_column_usage kc on (kc.table_name = tc.table_name and kc.table_schema = tc.table_schema and kc.constraint_name = tc.constraint_name) WHERE kc.table_schema = 'sc3' and tc.constraint_type = 'PRIMARY KEY' and kc.table_name LIKE 'alter_add_prim_key_%' - ORDER BY + ORDER BY 1 - LIMIT + LIMIT 1; $$)).* -ORDER BY +ORDER BY 1,2,3,4; - nodename | nodeport | success | result + nodename | nodeport | success | result --------------------------------------------------------------------- localhost | 57637 | t | a_constraint_1450238 localhost | 57638 | t | a_constraint_1450238 @@ -590,23 +590,23 @@ ORDER BY ALTER TABLE alter_add_prim_key DROP CONSTRAINT a_constraint; SELECT (run_command_on_workers($$ - SELECT - kc.constraint_name - FROM + SELECT + kc.constraint_name + FROM information_schema.table_constraints tc join information_schema.key_column_usage kc on (kc.table_name = tc.table_name and kc.table_schema = tc.table_schema and kc.constraint_name = tc.constraint_name) WHERE kc.table_schema = 'sc3' and tc.constraint_type = 'PRIMARY KEY' and kc.table_name LIKE 'alter_add_prim_key_%' - ORDER BY + ORDER BY 1 - LIMIT + LIMIT 1; $$)).* -ORDER BY +ORDER BY 1,2,3,4; - nodename | nodeport | success | result + nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | t | - localhost | 57638 | t | + localhost | 57637 | t | + localhost | 57638 | t | (2 rows) SET search_path TO 'public'; diff --git a/src/test/regress/expected/multi_array_agg.out b/src/test/regress/expected/multi_array_agg.out index 5da5f5f41..dd673e11d 100644 --- a/src/test/regress/expected/multi_array_agg.out +++ b/src/test/regress/expected/multi_array_agg.out @@ -9,7 +9,7 @@ SELECT ARRAY(SELECT unnest($1) ORDER BY 1) $$; -- Check multi_cat_agg() aggregate which is used to implement array_agg() SELECT array_cat_agg(i) FROM (VALUES (ARRAY[1,2]), (NULL), (ARRAY[3,4])) AS t(i); - array_cat_agg + array_cat_agg --------------------------------------------------------------------- {1,2,3,4} (1 row) @@ -24,7 +24,7 @@ ERROR: array_agg with order by is unsupported -- Check array_agg() for different data types and LIMIT clauses SELECT array_sort(array_agg(l_partkey)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - array_sort + array_sort --------------------------------------------------------------------- {2132,15635,24027,63700,67310,155190} {106170} @@ -40,7 +40,7 @@ SELECT array_sort(array_agg(l_partkey)) FROM lineitem GROUP BY l_orderkey SELECT array_sort(array_agg(l_extendedprice)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - array_sort + array_sort --------------------------------------------------------------------- {13309.60,21168.23,22824.48,28955.64,45983.16,49620.16} {44694.46} @@ -56,7 +56,7 @@ SELECT array_sort(array_agg(l_extendedprice)) FROM lineitem GROUP BY l_orderkey SELECT array_sort(array_agg(l_shipdate)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - array_sort + array_sort --------------------------------------------------------------------- {01-29-1996,01-30-1996,03-13-1996,03-30-1996,04-12-1996,04-21-1996} {01-28-1997} @@ -72,7 +72,7 @@ SELECT array_sort(array_agg(l_shipdate)) FROM lineitem GROUP BY l_orderkey SELECT array_sort(array_agg(l_shipmode)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - array_sort + array_sort --------------------------------------------------------------------- {"AIR ","FOB ","MAIL ","MAIL ","REG AIR ","TRUCK "} {"RAIL "} @@ -88,7 +88,7 @@ SELECT array_sort(array_agg(l_shipmode)) FROM lineitem GROUP BY l_orderkey -- Check that we can execute array_agg() within other functions SELECT array_length(array_agg(l_orderkey), 1) FROM lineitem; - array_length + array_length --------------------------------------------------------------------- 12000 (1 row) @@ -100,7 +100,7 @@ SELECT array_length(array_agg(l_orderkey), 1) FROM lineitem; SELECT l_quantity, count(*), avg(l_extendedprice), array_sort(array_agg(l_orderkey)) FROM lineitem WHERE l_quantity < 5 AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; - l_quantity | count | avg | array_sort + l_quantity | count | avg | array_sort --------------------------------------------------------------------- 1.00 | 17 | 1477.1258823529411765 | {5543,5633,5634,5698,5766,5856,5857,5986,8997,9026,9158,9184,9220,9222,9348,9383,9476} 2.00 | 19 | 3078.4242105263157895 | {5506,5540,5573,5669,5703,5730,5798,5831,5893,5920,5923,9030,9058,9123,9124,9188,9344,9441,9476} @@ -111,7 +111,7 @@ SELECT l_quantity, count(*), avg(l_extendedprice), array_sort(array_agg(l_orderk SELECT l_quantity, array_sort(array_agg(extract (month FROM o_orderdate))) AS my_month FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_quantity < 5 AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; - l_quantity | my_month + l_quantity | my_month --------------------------------------------------------------------- 1.00 | {2,3,4,4,4,5,5,5,6,7,7,7,7,9,9,11,11} 2.00 | {1,3,5,5,5,5,6,6,6,7,7,8,10,10,11,11,11,12,12} @@ -122,7 +122,7 @@ SELECT l_quantity, array_sort(array_agg(extract (month FROM o_orderdate))) AS my SELECT l_quantity, array_sort(array_agg(l_orderkey * 2 + 1)) FROM lineitem WHERE l_quantity < 5 AND octet_length(l_comment) + octet_length('randomtext'::text) > 40 AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; - l_quantity | array_sort + l_quantity | array_sort --------------------------------------------------------------------- 1.00 | {11269,11397,11713,11715,11973,18317,18445} 2.00 | {11847,18061,18247,18953} @@ -133,15 +133,15 @@ SELECT l_quantity, array_sort(array_agg(l_orderkey * 2 + 1)) FROM lineitem WHERE -- Check that we can execute array_agg() with an expression containing NULL values SELECT array_agg(case when l_quantity > 20 then l_quantity else NULL end) FROM lineitem WHERE l_orderkey < 10; - array_agg + array_agg --------------------------------------------------------------------- {NULL,36.00,NULL,28.00,24.00,32.00,38.00,45.00,49.00,27.00,NULL,28.00,26.00,30.00,NULL,26.00,50.00,37.00,NULL,NULL,46.00,28.00,38.00,35.00,NULL} (1 row) -- Check that we return NULL in case there are no input rows to array_agg() SELECT array_agg(l_orderkey) FROM lineitem WHERE l_quantity < 0; - array_agg + array_agg --------------------------------------------------------------------- - + (1 row) diff --git a/src/test/regress/expected/multi_average_expression.out b/src/test/regress/expected/multi_average_expression.out index 2900c4499..f03a9ecdd 100644 --- a/src/test/regress/expected/multi_average_expression.out +++ b/src/test/regress/expected/multi_average_expression.out @@ -25,7 +25,7 @@ GROUP BY ORDER BY l_returnflag, l_linestatus; - sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order | l_returnflag | l_linestatus + sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order | l_returnflag | l_linestatus --------------------------------------------------------------------- 75465.00 | 113619873.63 | 107841287.0728 | 112171153.245923 | 25.6334918478260870 | 38593.707075407609 | 0.05055027173913043478 | 2944 | A | F 2022.00 | 3102551.45 | 2952540.7118 | 3072642.770652 | 26.6052631578947368 | 40823.045394736842 | 0.05263157894736842105 | 76 | N | F @@ -45,7 +45,7 @@ SELECT end) FROM lineitem; - avg + avg --------------------------------------------------------------------- 35.3570440077497924 (1 row) @@ -58,8 +58,8 @@ SELECT end) FROM lineitem; - avg + avg --------------------------------------------------------------------- - + (1 row) diff --git a/src/test/regress/expected/multi_basic_queries.out b/src/test/regress/expected/multi_basic_queries.out index 529179a2c..ddaf50b60 100644 --- a/src/test/regress/expected/multi_basic_queries.out +++ b/src/test/regress/expected/multi_basic_queries.out @@ -4,19 +4,19 @@ -- Execute simple sum, average, and count queries on data recently uploaded to -- our partitioned table. SELECT count(*) FROM lineitem; - count + count --------------------------------------------------------------------- 12000 (1 row) SELECT sum(l_extendedprice) FROM lineitem; - sum + sum --------------------------------------------------------------------- 457702024.50 (1 row) SELECT avg(l_extendedprice) FROM lineitem; - avg + avg --------------------------------------------------------------------- 38141.835375000000 (1 row) @@ -25,7 +25,7 @@ SELECT avg(l_extendedprice) FROM lineitem; BEGIN; SET TRANSACTION READ ONLY; SELECT count(*) FROM lineitem; - count + count --------------------------------------------------------------------- 12000 (1 row) @@ -33,7 +33,7 @@ SELECT count(*) FROM lineitem; COMMIT; -- Verify temp tables which are used for final result aggregation don't persist. SELECT count(*) FROM pg_class WHERE relname LIKE 'pg_merge_job_%' AND relkind = 'r'; - count + count --------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/multi_behavioral_analytics_basics.out b/src/test/regress/expected/multi_behavioral_analytics_basics.out index c28f69123..046eb4eeb 100644 --- a/src/test/regress/expected/multi_behavioral_analytics_basics.out +++ b/src/test/regress/expected/multi_behavioral_analytics_basics.out @@ -20,7 +20,7 @@ FROM ( ) q; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; - count | count | avg + count | count | avg --------------------------------------------------------------------- 2 | 2 | 1.5000000000000000 (1 row) @@ -69,7 +69,7 @@ FROM ( ) t GROUP BY user_id, hasdone_event; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; - count | count | avg + count | count | avg --------------------------------------------------------------------- 4 | 2 | 1.5000000000000000 (1 row) @@ -146,7 +146,7 @@ ORDER BY count_pay; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; - count | count | avg + count | count | avg --------------------------------------------------------------------- 7 | 3 | 1.7142857142857143 (1 row) @@ -187,7 +187,7 @@ FROM ( ORDER BY user_lastseen DESC; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; - count | count | avg + count | count | avg --------------------------------------------------------------------- 3 | 3 | 2.0000000000000000 (1 row) @@ -206,7 +206,7 @@ WHERE user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 1 AND value_1 AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 5 AND value_1 <= 6); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; - count | count | avg + count | count | avg --------------------------------------------------------------------- 5 | 5 | 3.8000000000000000 (1 row) @@ -227,7 +227,7 @@ GROUP BY user_id HAVING count(distinct value_1) >= 2; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; - count | count | avg + count | count | avg --------------------------------------------------------------------- 6 | 6 | 3.5000000000000000 (1 row) @@ -245,7 +245,7 @@ SELECT user_id, value_2 FROM users_table WHERE AND EXISTS (SELECT user_id FROM events_table WHERE event_type>1 AND event_type < 5 AND value_3 > 2 AND user_id=users_table.user_id); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; - count | count | avg + count | count | avg --------------------------------------------------------------------- 20 | 6 | 3.7500000000000000 (1 row) @@ -263,7 +263,7 @@ SELECT user_id, value_2 FROM users_table WHERE AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type=1 AND value_3 > 4 AND user_id=users_table.user_id); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; - count | count | avg + count | count | avg --------------------------------------------------------------------- 4 | 2 | 4.2500000000000000 (1 row) @@ -282,7 +282,7 @@ SELECT user_id, value_2 FROM users_table WHERE AND EXISTS (SELECT user_id FROM events_table WHERE event_type=2 AND value_3 > 1 AND user_id=users_table.user_id); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; - count | count | avg + count | count | avg --------------------------------------------------------------------- 29 | 5 | 3.1034482758620690 (1 row) @@ -300,7 +300,7 @@ SELECT user_id, value_2 FROM users_table WHERE AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 3 AND event_type <= 4 AND value_3 > 1 AND user_id=users_table.user_id); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; - count | count | avg + count | count | avg --------------------------------------------------------------------- 11 | 1 | 5.0000000000000000 (1 row) @@ -328,7 +328,7 @@ INSERT INTO agg_results(user_id, value_2_agg) HAVING Count(*) > 2); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; - count | count | avg + count | count | avg --------------------------------------------------------------------- 4 | 2 | 3.5000000000000000 (1 row) @@ -347,7 +347,7 @@ SELECT user_id, value_1 from ) as a; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; - count | count | avg + count | count | avg --------------------------------------------------------------------- 1 | 1 | 1.00000000000000000000 (1 row) @@ -370,7 +370,7 @@ And user_id in And value_2 > 1); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; - count | count | avg + count | count | avg --------------------------------------------------------------------- 11 | 4 | 3.1818181818181818 (1 row) @@ -387,7 +387,7 @@ WHERE user_id in (SELECT user_id from events_table WHERE event_type > 3 and even GROUP BY user_id, event_type; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; - count | count | avg + count | count | avg --------------------------------------------------------------------- 34 | 6 | 3.4411764705882353 (1 row) @@ -409,7 +409,7 @@ where event_type = 4 group by user_id having count(*) > 3 ) as a; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; - count | count | avg + count | count | avg --------------------------------------------------------------------- 4 | 4 | 2.5000000000000000 (1 row) @@ -437,7 +437,7 @@ FROM WHERE users_table.value_1 < 3; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; - count | count | avg + count | count | avg --------------------------------------------------------------------- 3488 | 6 | 3.5372706422018349 (1 row) @@ -461,7 +461,7 @@ FROM WHERE temp.value_1 < 3; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; - count | count | avg + count | count | avg --------------------------------------------------------------------- 6 | 6 | 3.5000000000000000 (1 row) @@ -485,7 +485,7 @@ FROM WHERE temp.value_1 < 3 ORDER BY 1, 2; SELECT count(*), count(DISTINCT user_id), avg(user_id), avg(value_1_agg) FROM agg_results; - count | count | avg | avg + count | count | avg | avg --------------------------------------------------------------------- 6 | 6 | 3.5000000000000000 | 0.16666666666666666667 (1 row) @@ -508,7 +508,7 @@ FROM ON users_ids.user_id = temp.user_id ORDER BY 1, 2; SELECT count(*), count(DISTINCT user_id), avg(user_id), avg(value_1_agg) FROM agg_results; - count | count | avg | avg + count | count | avg | avg --------------------------------------------------------------------- 6 | 6 | 3.5000000000000000 | 0.16666666666666666667 (1 row) diff --git a/src/test/regress/expected/multi_behavioral_analytics_single_shard_queries.out b/src/test/regress/expected/multi_behavioral_analytics_single_shard_queries.out index 65c99c730..3f3025267 100644 --- a/src/test/regress/expected/multi_behavioral_analytics_single_shard_queries.out +++ b/src/test/regress/expected/multi_behavioral_analytics_single_shard_queries.out @@ -22,7 +22,7 @@ FROM ( WHERE user_id = 2; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; - count | count | avg + count | count | avg --------------------------------------------------------------------- 1 | 1 | 2.0000000000000000 (1 row) @@ -51,7 +51,7 @@ FROM ( WHERE (user_id = 1 OR user_id = 2); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; - count | count | avg + count | count | avg --------------------------------------------------------------------- 2 | 2 | 1.5000000000000000 (1 row) @@ -144,7 +144,7 @@ FROM ( ) t GROUP BY user_id, hasdone_event; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; - count | count | avg + count | count | avg --------------------------------------------------------------------- 1 | 1 | 2.0000000000000000 (1 row) @@ -186,7 +186,7 @@ FROM ( ORDER BY user_lastseen DESC; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; - count | count | avg + count | count | avg --------------------------------------------------------------------- 1 | 1 | 5.0000000000000000 (1 row) @@ -229,7 +229,7 @@ FROM ( ORDER BY user_lastseen DESC; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; - count | count | avg + count | count | avg --------------------------------------------------------------------- 2 | 2 | 3.0000000000000000 (1 row) @@ -249,7 +249,7 @@ WHERE user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 1 AND value_1 AND user_id = 1; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; - count | count | avg + count | count | avg --------------------------------------------------------------------- 1 | 1 | 1.00000000000000000000 (1 row) @@ -269,7 +269,7 @@ WHERE user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 1 AND value_1 AND (user_id = 1 OR user_id = 2); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; - count | count | avg + count | count | avg --------------------------------------------------------------------- 1 | 1 | 1.00000000000000000000 (1 row) @@ -288,7 +288,7 @@ SELECT user_id, value_2 FROM users_table WHERE AND user_id = 2; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; - count | count | avg + count | count | avg --------------------------------------------------------------------- 7 | 1 | 2.0000000000000000 (1 row) @@ -307,7 +307,7 @@ SELECT user_id, value_2 FROM users_table WHERE AND (user_id = 2 OR user_id = 1); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; - count | count | avg + count | count | avg --------------------------------------------------------------------- 10 | 2 | 1.7000000000000000 (1 row) @@ -326,7 +326,7 @@ SELECT user_id, value_2 FROM users_table WHERE AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 4 AND event_type <= 5 AND value_3 > 4 AND user_id=users_table.user_id); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; - count | count | avg + count | count | avg --------------------------------------------------------------------- 6 | 1 | 1.00000000000000000000 (1 row) @@ -345,7 +345,7 @@ SELECT user_id, value_2 FROM users_table WHERE AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 4 AND event_type <= 5 AND value_3 > 4 AND user_id=users_table.user_id AND (user_id = 1 OR user_id = 2)); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; - count | count | avg + count | count | avg --------------------------------------------------------------------- 20 | 2 | 1.7000000000000000 (1 row) @@ -375,7 +375,7 @@ INSERT INTO agg_results_second(user_id, value_2_agg) HAVING Count(*) > 2); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; - count | count | avg + count | count | avg --------------------------------------------------------------------- 2 | 1 | 3.0000000000000000 (1 row) @@ -404,7 +404,7 @@ INSERT INTO agg_results_second(user_id, value_2_agg) HAVING Count(*) > 2); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; - count | count | avg + count | count | avg --------------------------------------------------------------------- 4 | 2 | 3.5000000000000000 (1 row) diff --git a/src/test/regress/expected/multi_binary_master_copy_format.out b/src/test/regress/expected/multi_binary_master_copy_format.out index 7269daeed..3d506408e 100644 --- a/src/test/regress/expected/multi_binary_master_copy_format.out +++ b/src/test/regress/expected/multi_binary_master_copy_format.out @@ -6,29 +6,29 @@ SET citus.next_shard_id TO 430000; SET citus.binary_master_copy_format TO 'on'; SET citus.task_executor_type TO 'task-tracker'; SELECT count(*) FROM lineitem; - count + count --------------------------------------------------------------------- 12000 (1 row) SELECT l_shipmode FROM lineitem WHERE l_partkey = 67310 OR l_partkey = 155190; - l_shipmode + l_shipmode --------------------------------------------------------------------- - TRUCK - MAIL + TRUCK + MAIL (2 rows) RESET citus.task_executor_type; SELECT count(*) FROM lineitem; - count + count --------------------------------------------------------------------- 12000 (1 row) SELECT l_shipmode FROM lineitem WHERE l_partkey = 67310 OR l_partkey = 155190; - l_shipmode + l_shipmode --------------------------------------------------------------------- - TRUCK - MAIL + TRUCK + MAIL (2 rows) diff --git a/src/test/regress/expected/multi_cache_invalidation.out b/src/test/regress/expected/multi_cache_invalidation.out index 1a1e2532d..285275ae2 100644 --- a/src/test/regress/expected/multi_cache_invalidation.out +++ b/src/test/regress/expected/multi_cache_invalidation.out @@ -6,21 +6,21 @@ SET citus.next_shard_id TO 1601000; CREATE TABLE mci_1.test (test_id integer NOT NULL, data int); CREATE TABLE mci_2.test (test_id integer NOT NULL, data int); SELECT create_distributed_table('mci_1.test', 'test_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('mci_2.test', 'test_id', 'append'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO mci_1.test VALUES (1,2), (3,4); -- move shards into other append-distributed table SELECT run_command_on_placements('mci_1.test', 'ALTER TABLE %s SET SCHEMA mci_2'); - run_command_on_placements + run_command_on_placements --------------------------------------------------------------------- (localhost,57637,1601000,t,"ALTER TABLE") (localhost,57638,1601000,t,"ALTER TABLE") @@ -36,7 +36,7 @@ UPDATE pg_dist_shard SET logicalrelid = 'mci_2.test'::regclass, shardminvalue = NULL, shardmaxvalue = NULL WHERE logicalrelid = 'mci_1.test'::regclass; SELECT * FROM mci_2.test ORDER BY test_id; - test_id | data + test_id | data --------------------------------------------------------------------- 1 | 2 3 | 4 diff --git a/src/test/regress/expected/multi_citus_tools.out b/src/test/regress/expected/multi_citus_tools.out index 05f983748..a1c073530 100644 --- a/src/test/regress/expected/multi_citus_tools.out +++ b/src/test/regress/expected/multi_citus_tools.out @@ -14,7 +14,7 @@ SET client_min_messages to ERROR; SELECT * FROM master_run_on_worker(ARRAY['localhost']::text[], ARRAY['666']::int[], ARRAY['select count(*) from pg_dist_shard']::text[], false); - node_name | node_port | success | result + node_name | node_port | success | result --------------------------------------------------------------------- localhost | 666 | f | failed to connect to localhost:xxxxx (1 row) @@ -22,7 +22,7 @@ SELECT * FROM master_run_on_worker(ARRAY['localhost']::text[], ARRAY['666']::int SELECT * FROM master_run_on_worker(ARRAY['localhost']::text[], ARRAY['666']::int[], ARRAY['select count(*) from pg_dist_shard']::text[], true); - node_name | node_port | success | result + node_name | node_port | success | result --------------------------------------------------------------------- localhost | 666 | f | failed to connect to localhost:xxxxx (1 row) @@ -37,7 +37,7 @@ SELECT quote_literal(node_name) as node_name, node_port as node_port SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select count(*) from pg_dist_shard']::text[], false); - node_name | node_port | success | result + node_name | node_port | success | result --------------------------------------------------------------------- localhost | 57637 | t | 0 (1 row) @@ -47,7 +47,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select * from pg_dist_shard']::text[], false); - node_name | node_port | success | result + node_name | node_port | success | result --------------------------------------------------------------------- localhost | 57637 | f | expected a single column in query target (1 row) @@ -56,7 +56,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select a from generate_series(1,2) a']::text[], false); - node_name | node_port | success | result + node_name | node_port | success | result --------------------------------------------------------------------- localhost | 57637 | f | expected a single row in query result (1 row) @@ -67,7 +67,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], ARRAY['select a from generate_series(1,1) a', 'select a from generate_series(2,2) a']::text[], false); - node_name | node_port | success | result + node_name | node_port | success | result --------------------------------------------------------------------- localhost | 57637 | t | 1 localhost | 57637 | t | 2 @@ -79,7 +79,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], ARRAY['select a from generate_series(1,1) a', 'select a from generate_series(1,2) a']::text[], false); - node_name | node_port | success | result + node_name | node_port | success | result --------------------------------------------------------------------- localhost | 57637 | t | 1 localhost | 57637 | f | expected a single row in query result @@ -91,7 +91,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], ARRAY['select a from generate_series(1,2) a', 'select a from generate_series(1,2) a']::text[], false); - node_name | node_port | success | result + node_name | node_port | success | result --------------------------------------------------------------------- localhost | 57637 | f | expected a single row in query result localhost | 57637 | f | expected a single row in query result @@ -103,7 +103,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], ARRAY['create table first_table(a int, b int)', 'create table second_table(a int, b int)']::text[], false); - node_name | node_port | success | result + node_name | node_port | success | result --------------------------------------------------------------------- localhost | 57637 | t | CREATE TABLE localhost | 57637 | t | CREATE TABLE @@ -113,7 +113,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['insert into first_table select a,a from generate_series(1,20) a']::text[], false); - node_name | node_port | success | result + node_name | node_port | success | result --------------------------------------------------------------------- localhost | 57637 | t | INSERT 0 20 (1 row) @@ -121,7 +121,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select count(*) from first_table']::text[], false); - node_name | node_port | success | result + node_name | node_port | success | result --------------------------------------------------------------------- localhost | 57637 | t | 20 (1 row) @@ -130,7 +130,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['insert into second_table select * from first_table']::text[], false); - node_name | node_port | success | result + node_name | node_port | success | result --------------------------------------------------------------------- localhost | 57637 | t | INSERT 0 20 (1 row) @@ -138,7 +138,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['insert into second_table select * from first_table']::text[], false); - node_name | node_port | success | result + node_name | node_port | success | result --------------------------------------------------------------------- localhost | 57637 | t | INSERT 0 20 (1 row) @@ -147,7 +147,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select count(*) from second_table']::text[], false); - node_name | node_port | success | result + node_name | node_port | success | result --------------------------------------------------------------------- localhost | 57637 | t | 40 (1 row) @@ -162,7 +162,7 @@ SELECT quote_literal(node_name) as node_name, node_port as node_port SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['create index first_table_index on first_table(a)']::text[], false); - node_name | node_port | success | result + node_name | node_port | success | result --------------------------------------------------------------------- localhost | 57637 | t | CREATE INDEX (1 row) @@ -171,7 +171,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['drop table first_table']::text[], false); - node_name | node_port | success | result + node_name | node_port | success | result --------------------------------------------------------------------- localhost | 57637 | t | DROP TABLE (1 row) @@ -179,7 +179,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['drop table second_table']::text[], false); - node_name | node_port | success | result + node_name | node_port | success | result --------------------------------------------------------------------- localhost | 57637 | t | DROP TABLE (1 row) @@ -188,7 +188,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select count(*) from second_table']::text[], false); - node_name | node_port | success | result + node_name | node_port | success | result --------------------------------------------------------------------- localhost | 57637 | f | ERROR: relation "second_table" does not exist (1 row) @@ -200,7 +200,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select count(*) from pg_dist_shard']::text[], true); - node_name | node_port | success | result + node_name | node_port | success | result --------------------------------------------------------------------- localhost | 57637 | t | 0 (1 row) @@ -210,7 +210,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select * from pg_dist_shard']::text[], true); - node_name | node_port | success | result + node_name | node_port | success | result --------------------------------------------------------------------- localhost | 57637 | f | expected a single column in query target (1 row) @@ -219,7 +219,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select a from generate_series(1,2) a']::text[], true); - node_name | node_port | success | result + node_name | node_port | success | result --------------------------------------------------------------------- localhost | 57637 | f | expected a single row in query result (1 row) @@ -230,7 +230,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], ARRAY['select a from generate_series(1,1) a', 'select a from generate_series(2,2) a']::text[], true); - node_name | node_port | success | result + node_name | node_port | success | result --------------------------------------------------------------------- localhost | 57637 | t | 1 localhost | 57637 | t | 2 @@ -242,7 +242,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], ARRAY['select a from generate_series(1,1) a', 'select a from generate_series(1,2) a']::text[], true); - node_name | node_port | success | result + node_name | node_port | success | result --------------------------------------------------------------------- localhost | 57637 | t | 1 localhost | 57637 | f | expected a single row in query result @@ -254,7 +254,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], ARRAY['select a from generate_series(1,2) a', 'select a from generate_series(1,2) a']::text[], true); - node_name | node_port | success | result + node_name | node_port | success | result --------------------------------------------------------------------- localhost | 57637 | f | expected a single row in query result localhost | 57637 | f | expected a single row in query result @@ -266,7 +266,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], ARRAY['create table first_table(a int, b int)', 'create table second_table(a int, b int)']::text[], true); - node_name | node_port | success | result + node_name | node_port | success | result --------------------------------------------------------------------- localhost | 57637 | t | CREATE TABLE localhost | 57637 | t | CREATE TABLE @@ -282,7 +282,7 @@ SELECT quote_literal(node_name) as node_name, node_port as node_port SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['insert into first_table select a,a from generate_series(1,20) a']::text[], true); - node_name | node_port | success | result + node_name | node_port | success | result --------------------------------------------------------------------- localhost | 57637 | t | INSERT 0 20 (1 row) @@ -290,7 +290,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select count(*) from first_table']::text[], true); - node_name | node_port | success | result + node_name | node_port | success | result --------------------------------------------------------------------- localhost | 57637 | t | 20 (1 row) @@ -299,7 +299,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['insert into second_table select * from first_table']::text[], true); - node_name | node_port | success | result + node_name | node_port | success | result --------------------------------------------------------------------- localhost | 57637 | t | INSERT 0 20 (1 row) @@ -307,7 +307,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['insert into second_table select * from first_table']::text[], true); - node_name | node_port | success | result + node_name | node_port | success | result --------------------------------------------------------------------- localhost | 57637 | t | INSERT 0 20 (1 row) @@ -316,7 +316,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select count(*) from second_table']::text[], true); - node_name | node_port | success | result + node_name | node_port | success | result --------------------------------------------------------------------- localhost | 57637 | t | 40 (1 row) @@ -325,7 +325,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['create index first_table_index on first_table(a)']::text[], true); - node_name | node_port | success | result + node_name | node_port | success | result --------------------------------------------------------------------- localhost | 57637 | t | CREATE INDEX (1 row) @@ -334,7 +334,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['drop table first_table']::text[], true); - node_name | node_port | success | result + node_name | node_port | success | result --------------------------------------------------------------------- localhost | 57637 | t | DROP TABLE (1 row) @@ -342,7 +342,7 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['drop table second_table']::text[], true); - node_name | node_port | success | result + node_name | node_port | success | result --------------------------------------------------------------------- localhost | 57637 | t | DROP TABLE (1 row) @@ -351,21 +351,21 @@ SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]: SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select count(*) from second_table']::text[], true); - node_name | node_port | success | result + node_name | node_port | success | result --------------------------------------------------------------------- localhost | 57637 | f | ERROR: relation "second_table" does not exist (1 row) -- run_command_on_XXX tests SELECT * FROM run_command_on_workers('select 1') ORDER BY 2 ASC; - nodename | nodeport | success | result + nodename | nodeport | success | result --------------------------------------------------------------------- localhost | 57637 | t | 1 localhost | 57638 | t | 1 (2 rows) SELECT * FROM run_command_on_workers('select count(*) from pg_dist_partition') ORDER BY 2 ASC; - nodename | nodeport | success | result + nodename | nodeport | success | result --------------------------------------------------------------------- localhost | 57637 | t | 0 localhost | 57638 | t | 0 @@ -375,13 +375,13 @@ SELECT * FROM run_command_on_workers('select count(*) from pg_dist_partition') O SET citus.shard_count TO 5; CREATE TABLE check_placements (key int); SELECT create_distributed_table('check_placements', 'key', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT * FROM run_command_on_placements('check_placements', 'select 1'); - nodename | nodeport | shardid | success | result + nodename | nodeport | shardid | success | result --------------------------------------------------------------------- localhost | 57637 | 1240000 | t | 1 localhost | 57638 | 1240000 | t | 1 @@ -398,7 +398,7 @@ SELECT * FROM run_command_on_placements('check_placements', 'select 1'); UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid % 2 = 0 AND nodeport = :worker_1_port; SELECT * FROM run_command_on_placements('check_placements', 'select 1'); - nodename | nodeport | shardid | success | result + nodename | nodeport | shardid | success | result --------------------------------------------------------------------- localhost | 57638 | 1240000 | t | 1 localhost | 57637 | 1240001 | t | 1 @@ -413,17 +413,17 @@ DROP TABLE check_placements CASCADE; -- make sure run_on_all_colocated_placements correctly detects colocation CREATE TABLE check_colocated (key int); SELECT create_distributed_table('check_colocated', 'key', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SET citus.shard_count TO 4; CREATE TABLE second_table (key int); SELECT create_distributed_table('second_table', 'key', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT * FROM run_command_on_colocated_placements('check_colocated', 'second_table', @@ -435,9 +435,9 @@ SET citus.shard_replication_factor TO 1; SET citus.shard_count TO 5; CREATE TABLE second_table (key int); SELECT create_distributed_table('second_table', 'key', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT * FROM run_command_on_colocated_placements('check_colocated', 'second_table', @@ -449,14 +449,14 @@ SET citus.shard_replication_factor TO 2; SET citus.shard_count TO 5; CREATE TABLE second_table (key int); SELECT create_distributed_table('second_table', 'key', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT * FROM run_command_on_colocated_placements('check_colocated', 'second_table', 'select 1'); - nodename | nodeport | shardid1 | shardid2 | success | result + nodename | nodeport | shardid1 | shardid2 | success | result --------------------------------------------------------------------- localhost | 57637 | 1240005 | 1240019 | t | 1 localhost | 57638 | 1240005 | 1240019 | t | 1 @@ -476,13 +476,13 @@ DROP TABLE second_table CASCADE; SET citus.shard_count TO 5; CREATE TABLE check_shards (key int); SELECT create_distributed_table('check_shards', 'key', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT * FROM run_command_on_shards('check_shards', 'select 1'); - shardid | success | result + shardid | success | result --------------------------------------------------------------------- 1240024 | t | 1 1240025 | t | 1 @@ -494,7 +494,7 @@ SELECT * FROM run_command_on_shards('check_shards', 'select 1'); UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid % 2 = 0; SELECT * FROM run_command_on_shards('check_shards', 'select 1'); NOTICE: some shards do not have active placements - shardid | success | result + shardid | success | result --------------------------------------------------------------------- 1240025 | t | 1 1240027 | t | 1 diff --git a/src/test/regress/expected/multi_cluster_management.out b/src/test/regress/expected/multi_cluster_management.out index fe559c68c..58b3c1d86 100644 --- a/src/test/regress/expected/multi_cluster_management.out +++ b/src/test/regress/expected/multi_cluster_management.out @@ -9,20 +9,20 @@ ERROR: cannot create reference table "test_reference_table" DETAIL: There are no active worker nodes. -- add the nodes to the cluster SELECT 1 FROM master_add_node('localhost', :worker_1_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) -- get the active nodes SELECT master_get_active_worker_nodes(); - master_get_active_worker_nodes + master_get_active_worker_nodes --------------------------------------------------------------------- (localhost,57638) (localhost,57637) @@ -30,14 +30,14 @@ SELECT master_get_active_worker_nodes(); -- try to add a node that is already in the cluster SELECT * FROM master_add_node('localhost', :worker_1_port); - master_add_node + master_add_node --------------------------------------------------------------------- 1 (1 row) -- get the active nodes SELECT master_get_active_worker_nodes(); - master_get_active_worker_nodes + master_get_active_worker_nodes --------------------------------------------------------------------- (localhost,57638) (localhost,57637) @@ -45,33 +45,33 @@ SELECT master_get_active_worker_nodes(); -- try to remove a node (with no placements) SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) -- verify that the node has been deleted SELECT master_get_active_worker_nodes(); - master_get_active_worker_nodes + master_get_active_worker_nodes --------------------------------------------------------------------- (localhost,57637) (1 row) -- try to disable a node with no placements see that node is removed SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) SELECT master_disable_node('localhost', :worker_2_port); - master_disable_node + master_disable_node --------------------------------------------------------------------- - + (1 row) SELECT master_get_active_worker_nodes(); - master_get_active_worker_nodes + master_get_active_worker_nodes --------------------------------------------------------------------- (localhost,57637) (1 row) @@ -80,21 +80,21 @@ SELECT master_get_active_worker_nodes(); SET citus.shard_count TO 16; SET citus.shard_replication_factor TO 1; SELECT * FROM master_activate_node('localhost', :worker_2_port); - master_activate_node + master_activate_node --------------------------------------------------------------------- 3 (1 row) CREATE TABLE cluster_management_test (col_1 text, col_2 int); SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- see that there are some active placements in the candidate node SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport=:worker_2_port; - shardid | shardstate | nodename | nodeport + shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- 1220001 | 1 | localhost | 57638 1220003 | 1 | localhost | 57638 @@ -110,7 +110,7 @@ SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHER SELECT master_remove_node('localhost', :worker_2_port); ERROR: you cannot remove the primary node of a node group which has shard placements SELECT master_get_active_worker_nodes(); - master_get_active_worker_nodes + master_get_active_worker_nodes --------------------------------------------------------------------- (localhost,57638) (localhost,57637) @@ -122,13 +122,13 @@ INSERT INTO test_reference_table VALUES (1, '1'); -- observe that a notification is displayed SELECT master_disable_node('localhost', :worker_2_port); NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT master_activate_node('localhost', 57638) to activate this node back. - master_disable_node + master_disable_node --------------------------------------------------------------------- - + (1 row) SELECT master_get_active_worker_nodes(); - master_get_active_worker_nodes + master_get_active_worker_nodes --------------------------------------------------------------------- (localhost,57637) (1 row) @@ -169,49 +169,49 @@ ERROR: permission denied for function master_update_node SET ROLE node_metadata_user; BEGIN; SELECT 1 FROM master_add_inactive_node('localhost', :worker_2_port + 1); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_activate_node('localhost', :worker_2_port + 1); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_disable_node('localhost', :worker_2_port + 1); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_remove_node('localhost', :worker_2_port + 1); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port + 1); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_add_secondary_node('localhost', :worker_2_port + 2, 'localhost', :worker_2_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) SELECT master_update_node(nodeid, 'localhost', :worker_2_port + 3) FROM pg_dist_node WHERE nodeport = :worker_2_port; - master_update_node + master_update_node --------------------------------------------------------------------- - + (1 row) SELECT nodename, nodeport, noderole FROM pg_dist_node ORDER BY nodeport; - nodename | nodeport | noderole + nodename | nodeport | noderole --------------------------------------------------------------------- localhost | 57637 | primary localhost | 57639 | primary @@ -222,14 +222,14 @@ SELECT nodename, nodeport, noderole FROM pg_dist_node ORDER BY nodeport; ABORT; \c - postgres - :master_port SELECT master_get_active_worker_nodes(); - master_get_active_worker_nodes + master_get_active_worker_nodes --------------------------------------------------------------------- (localhost,57637) (1 row) -- restore the node for next tests SELECT * FROM master_activate_node('localhost', :worker_2_port); - master_activate_node + master_activate_node --------------------------------------------------------------------- 3 (1 row) @@ -241,7 +241,7 @@ ERROR: you cannot remove the primary node of a node group which has shard place SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport=:worker_2_port \gset UPDATE pg_dist_placement SET shardstate=3 WHERE groupid=:worker_2_group; SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport=:worker_2_port; - shardid | shardstate | nodename | nodeport + shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- 1220001 | 3 | localhost | 57638 1220003 | 3 | localhost | 57638 @@ -257,7 +257,7 @@ SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHER SELECT master_remove_node('localhost', :worker_2_port); ERROR: you cannot remove the primary node of a node group which has shard placements SELECT master_get_active_worker_nodes(); - master_get_active_worker_nodes + master_get_active_worker_nodes --------------------------------------------------------------------- (localhost,57638) (localhost,57637) @@ -265,7 +265,7 @@ SELECT master_get_active_worker_nodes(); -- clean-up SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) @@ -285,7 +285,7 @@ SELECT groupid AS new_group FROM pg_dist_node WHERE nodeid = :new_node \gset UPDATE pg_dist_placement SET groupid = :new_group WHERE groupid = :worker_2_group; -- test that you are allowed to remove secondary nodes even if there are placements SELECT 1 FROM master_add_node('localhost', 9990, groupid => :new_group, noderole => 'secondary'); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) @@ -293,69 +293,69 @@ SELECT 1 FROM master_add_node('localhost', 9990, groupid => :new_group, noderole SELECT master_remove_node('localhost', :worker_2_port); ERROR: you cannot remove the primary node of a node group which has shard placements SELECT master_remove_node('localhost', 9990); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) -- clean-up DROP TABLE cluster_management_test; -- check that adding/removing nodes are propagated to nodes with metadata SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node + start_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) \c - - - :worker_1_port SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; - nodename | nodeport + nodename | nodeport --------------------------------------------------------------------- localhost | 57638 (1 row) \c - - - :master_port SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) \c - - - :worker_1_port SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; - nodename | nodeport + nodename | nodeport --------------------------------------------------------------------- (0 rows) \c - - - :master_port -- check that added nodes are not propagated to nodes without metadata SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); - stop_metadata_sync_to_node + stop_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) \c - - - :worker_1_port SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; - nodename | nodeport + nodename | nodeport --------------------------------------------------------------------- (0 rows) @@ -364,13 +364,13 @@ SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodep SELECT master_remove_node('localhost', :worker_1_port), master_remove_node('localhost', :worker_2_port); - master_remove_node | master_remove_node + master_remove_node | master_remove_node --------------------------------------------------------------------- - | + | (1 row) SELECT count(1) FROM pg_dist_node; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -379,13 +379,13 @@ SELECT count(1) FROM pg_dist_node; SELECT master_add_node('localhost', :worker_1_port), master_add_node('localhost', :worker_2_port); - master_add_node | master_add_node + master_add_node | master_add_node --------------------------------------------------------------------- 11 | 12 (1 row) SELECT * FROM pg_dist_node ORDER BY nodeid; - nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards + nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards --------------------------------------------------------------------- 11 | 9 | localhost | 57637 | default | f | t | primary | default | f | t 12 | 10 | localhost | 57638 | default | f | t | primary | default | f | t @@ -394,84 +394,84 @@ SELECT * FROM pg_dist_node ORDER BY nodeid; -- check that mixed add/remove node commands work fine inside transaction BEGIN; SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) COMMIT; SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; - nodename | nodeport + nodename | nodeport --------------------------------------------------------------------- (0 rows) SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node + start_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) BEGIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) COMMIT; SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; - nodename | nodeport + nodename | nodeport --------------------------------------------------------------------- localhost | 57638 (1 row) \c - - - :worker_1_port SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; - nodename | nodeport + nodename | nodeport --------------------------------------------------------------------- localhost | 57638 (1 row) \c - - - :master_port SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node; - master_remove_node + master_remove_node --------------------------------------------------------------------- - - + + (2 rows) SELECT 1 FROM master_add_node('localhost', :worker_1_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) @@ -479,30 +479,30 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port); -- check that a distributed table can be created after adding a node in a transaction SET citus.shard_count TO 4; SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) BEGIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) CREATE TABLE temp(col1 text, col2 int); SELECT create_distributed_table('temp', 'col1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO temp VALUES ('row1', 1); INSERT INTO temp VALUES ('row2', 2); COMMIT; SELECT col1, col2 FROM temp ORDER BY col1; - col1 | col2 + col1 | col2 --------------------------------------------------------------------- row1 | 1 row2 | 2 @@ -516,7 +516,7 @@ WHERE pg_dist_shard_placement.shardid = pg_dist_shard.shardid AND pg_dist_shard.logicalrelid = 'temp'::regclass AND pg_dist_shard_placement.nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 4 (1 row) @@ -529,15 +529,15 @@ DELETE FROM pg_dist_placement; DELETE FROM pg_dist_node; \c - - - :master_port SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); - stop_metadata_sync_to_node + stop_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); - stop_metadata_sync_to_node + stop_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) -- check that you can't add a primary to a non-default cluster @@ -550,47 +550,47 @@ ERROR: group 14 already has a primary node -- check that you can add secondaries and unavailable nodes to a group SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset SELECT 1 FROM master_add_node('localhost', 9998, groupid => :worker_1_group, noderole => 'secondary'); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', 9997, groupid => :worker_1_group, noderole => 'unavailable'); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) -- add_inactive_node also works with secondaries SELECT 1 FROM master_add_inactive_node('localhost', 9996, groupid => :worker_2_group, noderole => 'secondary'); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) -- check that you can add a seconary to a non-default cluster, and activate it, and remove it SELECT master_add_inactive_node('localhost', 9999, groupid => :worker_2_group, nodecluster => 'olap', noderole => 'secondary'); - master_add_inactive_node + master_add_inactive_node --------------------------------------------------------------------- 22 (1 row) SELECT master_activate_node('localhost', 9999); - master_activate_node + master_activate_node --------------------------------------------------------------------- 22 (1 row) SELECT master_disable_node('localhost', 9999); - master_disable_node + master_disable_node --------------------------------------------------------------------- - + (1 row) SELECT master_remove_node('localhost', 9999); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) -- check that you can't manually add two primaries to a group @@ -614,7 +614,7 @@ DETAIL: Failing row contains (16, 14, localhost, 57637, default, f, t, primary, -- check that you /can/ add a secondary node to a non-default cluster SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset SELECT master_add_node('localhost', 8888, groupid => :worker_1_group, noderole => 'secondary', nodecluster=> 'olap'); - master_add_node + master_add_node --------------------------------------------------------------------- 25 (1 row) @@ -627,13 +627,13 @@ SELECT master_add_node('localhost', 8887, groupid => :worker_1_group, noderole = 'thisisasixtyfourcharacterstringrepeatedfourtimestomake256chars.' 'overflow' ); - master_add_node + master_add_node --------------------------------------------------------------------- 26 (1 row) SELECT * FROM pg_dist_node WHERE nodeport=8887; - nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards + nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards --------------------------------------------------------------------- 26 | 14 | localhost | 8887 | default | f | t | secondary | thisisasixtyfourcharacterstringrepeatedfourtimestomake256chars. | f | t (1 row) @@ -642,13 +642,13 @@ SELECT * FROM pg_dist_node WHERE nodeport=8887; -- them in any of the remaining tests -- master_add_secondary_node lets you skip looking up the groupid SELECT master_add_secondary_node('localhost', 9995, 'localhost', :worker_1_port); - master_add_secondary_node + master_add_secondary_node --------------------------------------------------------------------- 27 (1 row) SELECT master_add_secondary_node('localhost', 9994, primaryname => 'localhost', primaryport => :worker_2_port); - master_add_secondary_node + master_add_secondary_node --------------------------------------------------------------------- 28 (1 row) @@ -656,7 +656,7 @@ SELECT master_add_secondary_node('localhost', 9994, primaryname => 'localhost', SELECT master_add_secondary_node('localhost', 9993, 'localhost', 2000); ERROR: node at "localhost:xxxxx" does not exist SELECT master_add_secondary_node('localhost', 9992, 'localhost', :worker_1_port, nodecluster => 'second-cluster'); - master_add_secondary_node + master_add_secondary_node --------------------------------------------------------------------- 29 (1 row) @@ -670,26 +670,26 @@ SELECT master_update_node(:worker_1_node, 'localhost', :worker_2_port); ERROR: there is already another node with the specified hostname and port -- master_update_node moves a node SELECT master_update_node(:worker_1_node, 'somehost', 9000); - master_update_node + master_update_node --------------------------------------------------------------------- - + (1 row) SELECT * FROM pg_dist_node WHERE nodeid = :worker_1_node; - nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards + nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards --------------------------------------------------------------------- 16 | 14 | somehost | 9000 | default | f | t | primary | default | f | t (1 row) -- cleanup SELECT master_update_node(:worker_1_node, 'localhost', :worker_1_port); - master_update_node + master_update_node --------------------------------------------------------------------- - + (1 row) SELECT * FROM pg_dist_node WHERE nodeid = :worker_1_node; - nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards + nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards --------------------------------------------------------------------- 16 | 14 | localhost | 57637 | default | f | t | primary | default | f | t (1 row) @@ -697,16 +697,16 @@ SELECT * FROM pg_dist_node WHERE nodeid = :worker_1_node; SET citus.shard_replication_factor TO 1; CREATE TABLE test_dist (x int, y int); SELECT create_distributed_table('test_dist', 'x'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- testing behaviour when setting shouldhaveshards to false on partially empty node SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', false); - master_set_node_property + master_set_node_property --------------------------------------------------------------------- - + (1 row) CREATE TABLE test_dist_colocated (x int, y int); @@ -714,34 +714,34 @@ CREATE TABLE test_dist_non_colocated (x int, y int); CREATE TABLE test_dist_colocated_with_non_colocated (x int, y int); CREATE TABLE test_ref (a int, b int); SELECT create_distributed_table('test_dist_colocated', 'x'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_dist_non_colocated', 'x', colocate_with => 'none'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_dist_colocated_with_non_colocated', 'x', colocate_with => 'test_dist_non_colocated'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_reference_table('test_ref'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- colocated tables should still be placed on shouldhaveshards false nodes for safety SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist_colocated'::regclass GROUP BY nodeport ORDER BY nodeport; - nodeport | count + nodeport | count --------------------------------------------------------------------- 57637 | 2 57638 | 2 @@ -751,7 +751,7 @@ WHERE logicalrelid = 'test_dist_colocated'::regclass GROUP BY nodeport ORDER BY SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist_non_colocated'::regclass GROUP BY nodeport ORDER BY nodeport; - nodeport | count + nodeport | count --------------------------------------------------------------------- 57637 | 4 (1 row) @@ -761,7 +761,7 @@ WHERE logicalrelid = 'test_dist_non_colocated'::regclass GROUP BY nodeport ORDER SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist_colocated_with_non_colocated'::regclass GROUP BY nodeport ORDER BY nodeport; - nodeport | count + nodeport | count --------------------------------------------------------------------- 57637 | 4 (1 row) @@ -770,7 +770,7 @@ WHERE logicalrelid = 'test_dist_colocated_with_non_colocated'::regclass GROUP BY SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_ref'::regclass GROUP BY nodeport ORDER BY nodeport; - nodeport | count + nodeport | count --------------------------------------------------------------------- 57637 | 1 57638 | 1 @@ -780,9 +780,9 @@ WHERE logicalrelid = 'test_ref'::regclass GROUP BY nodeport ORDER BY nodeport; DROP TABLE test_dist, test_ref, test_dist_colocated, test_dist_non_colocated, test_dist_colocated_with_non_colocated; -- testing behaviour when setting shouldhaveshards to false on fully empty node SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', false); - master_set_node_property + master_set_node_property --------------------------------------------------------------------- - + (1 row) CREATE TABLE test_dist (x int, y int); @@ -790,22 +790,22 @@ CREATE TABLE test_dist_colocated (x int, y int); CREATE TABLE test_dist_non_colocated (x int, y int); CREATE TABLE test_ref (a int, b int); SELECT create_distributed_table('test_dist', 'x'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_reference_table('test_ref'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- distributed tables should not be placed on nodes with shouldhaveshards false SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist'::regclass GROUP BY nodeport ORDER BY nodeport; - nodeport | count + nodeport | count --------------------------------------------------------------------- 57637 | 4 (1 row) @@ -814,16 +814,16 @@ WHERE logicalrelid = 'test_dist'::regclass GROUP BY nodeport ORDER BY nodeport; SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_ref'::regclass GROUP BY nodeport ORDER BY nodeport; - nodeport | count + nodeport | count --------------------------------------------------------------------- 57637 | 1 57638 | 1 (2 rows) SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', true); - master_set_node_property + master_set_node_property --------------------------------------------------------------------- - + (1 row) -- distributed tables should still not be placed on nodes that were switched to @@ -831,7 +831,7 @@ SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaves SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist'::regclass GROUP BY nodeport ORDER BY nodeport; - nodeport | count + nodeport | count --------------------------------------------------------------------- 57637 | 4 (1 row) @@ -840,22 +840,22 @@ WHERE logicalrelid = 'test_dist'::regclass GROUP BY nodeport ORDER BY nodeport; SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_ref'::regclass GROUP BY nodeport ORDER BY nodeport; - nodeport | count + nodeport | count --------------------------------------------------------------------- 57637 | 1 57638 | 1 (2 rows) SELECT create_distributed_table('test_dist_colocated', 'x'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('test_dist_non_colocated', 'x', colocate_with => 'none'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- colocated tables should not be placed on nodedes that were switched to @@ -863,7 +863,7 @@ SELECT create_distributed_table('test_dist_non_colocated', 'x', colocate_with => SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist_colocated'::regclass GROUP BY nodeport ORDER BY nodeport; - nodeport | count + nodeport | count --------------------------------------------------------------------- 57637 | 4 (1 row) @@ -873,7 +873,7 @@ WHERE logicalrelid = 'test_dist_colocated'::regclass GROUP BY nodeport ORDER BY SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist_non_colocated'::regclass GROUP BY nodeport ORDER BY nodeport; - nodeport | count + nodeport | count --------------------------------------------------------------------- 57637 | 2 57638 | 2 diff --git a/src/test/regress/expected/multi_colocated_shard_transfer.out b/src/test/regress/expected/multi_colocated_shard_transfer.out index 817b895b3..4dfc43a84 100644 --- a/src/test/regress/expected/multi_colocated_shard_transfer.out +++ b/src/test/regress/expected/multi_colocated_shard_transfer.out @@ -10,14 +10,14 @@ UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE nodeport = :worker_2_por -- test repairing colocated shards -- status before shard repair SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate -FROM +FROM pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp WHERE p.logicalrelid = s.logicalrelid AND s.shardid = sp.shardid AND colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass) ORDER BY s.shardid, sp.nodeport; - shardid | logicalrelid | nodeport | colocationid | shardstate + shardid | logicalrelid | nodeport | colocationid | shardstate --------------------------------------------------------------------- 1300000 | table1_group1 | 57637 | 1000 | 1 1300000 | table1_group1 | 57638 | 1000 | 3 @@ -39,21 +39,21 @@ ORDER BY s.shardid, sp.nodeport; -- repair colocated shards SELECT master_copy_shard_placement(1300000, 'localhost', :worker_1_port, 'localhost', :worker_2_port); - master_copy_shard_placement + master_copy_shard_placement --------------------------------------------------------------------- - + (1 row) -- status after shard repair SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate -FROM +FROM pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp WHERE p.logicalrelid = s.logicalrelid AND s.shardid = sp.shardid AND colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass) ORDER BY s.shardid, sp.nodeport; - shardid | logicalrelid | nodeport | colocationid | shardstate + shardid | logicalrelid | nodeport | colocationid | shardstate --------------------------------------------------------------------- 1300000 | table1_group1 | 57637 | 1000 | 1 1300000 | table1_group1 | 57638 | 1000 | 1 @@ -76,14 +76,14 @@ ORDER BY s.shardid, sp.nodeport; -- test repairing NOT colocated shard -- status before shard repair SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate -FROM +FROM pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp WHERE p.logicalrelid = s.logicalrelid AND s.shardid = sp.shardid AND p.logicalrelid = 'table5_groupX'::regclass ORDER BY s.shardid, sp.nodeport; - shardid | logicalrelid | nodeport | colocationid | shardstate + shardid | logicalrelid | nodeport | colocationid | shardstate --------------------------------------------------------------------- 1300016 | table5_groupx | 57637 | 0 | 1 1300016 | table5_groupx | 57638 | 0 | 3 @@ -97,21 +97,21 @@ ORDER BY s.shardid, sp.nodeport; -- repair NOT colocated shard SELECT master_copy_shard_placement(1300016, 'localhost', :worker_1_port, 'localhost', :worker_2_port); - master_copy_shard_placement + master_copy_shard_placement --------------------------------------------------------------------- - + (1 row) -- status after shard repair SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate -FROM +FROM pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp WHERE p.logicalrelid = s.logicalrelid AND s.shardid = sp.shardid AND p.logicalrelid = 'table5_groupX'::regclass ORDER BY s.shardid, sp.nodeport; - shardid | logicalrelid | nodeport | colocationid | shardstate + shardid | logicalrelid | nodeport | colocationid | shardstate --------------------------------------------------------------------- 1300016 | table5_groupx | 57637 | 0 | 1 1300016 | table5_groupx | 57638 | 0 | 1 @@ -126,14 +126,14 @@ ORDER BY s.shardid, sp.nodeport; -- test repairing shard in append distributed table -- status before shard repair SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate -FROM +FROM pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp WHERE p.logicalrelid = s.logicalrelid AND s.shardid = sp.shardid AND p.logicalrelid = 'table6_append'::regclass ORDER BY s.shardid, sp.nodeport; - shardid | logicalrelid | nodeport | colocationid | shardstate + shardid | logicalrelid | nodeport | colocationid | shardstate --------------------------------------------------------------------- 1300020 | table6_append | 57637 | 0 | 1 1300020 | table6_append | 57638 | 0 | 3 @@ -143,21 +143,21 @@ ORDER BY s.shardid, sp.nodeport; -- repair shard in append distributed table SELECT master_copy_shard_placement(1300020, 'localhost', :worker_1_port, 'localhost', :worker_2_port); - master_copy_shard_placement + master_copy_shard_placement --------------------------------------------------------------------- - + (1 row) -- status after shard repair SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate -FROM +FROM pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp WHERE p.logicalrelid = s.logicalrelid AND s.shardid = sp.shardid AND p.logicalrelid = 'table6_append'::regclass ORDER BY s.shardid, sp.nodeport; - shardid | logicalrelid | nodeport | colocationid | shardstate + shardid | logicalrelid | nodeport | colocationid | shardstate --------------------------------------------------------------------- 1300020 | table6_append | 57637 | 0 | 1 1300020 | table6_append | 57638 | 0 | 1 @@ -170,14 +170,14 @@ ORDER BY s.shardid, sp.nodeport; UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = 1300000; -- status before shard repair SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate -FROM +FROM pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp WHERE p.logicalrelid = s.logicalrelid AND s.shardid = sp.shardid AND colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass) ORDER BY s.shardid, sp.nodeport; - shardid | logicalrelid | nodeport | colocationid | shardstate + shardid | logicalrelid | nodeport | colocationid | shardstate --------------------------------------------------------------------- 1300000 | table1_group1 | 57637 | 1000 | 3 1300000 | table1_group1 | 57638 | 1000 | 3 @@ -202,14 +202,14 @@ SELECT master_copy_shard_placement(1300000, 'localhost', :worker_1_port, 'localh ERROR: source placement must be in finalized state -- status after shard repair SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate -FROM +FROM pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp WHERE p.logicalrelid = s.logicalrelid AND s.shardid = sp.shardid AND colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass) ORDER BY s.shardid, sp.nodeport; - shardid | logicalrelid | nodeport | colocationid | shardstate + shardid | logicalrelid | nodeport | colocationid | shardstate --------------------------------------------------------------------- 1300000 | table1_group1 | 57637 | 1000 | 3 1300000 | table1_group1 | 57638 | 1000 | 3 diff --git a/src/test/regress/expected/multi_colocation_utils.out b/src/test/regress/expected/multi_colocation_utils.out index 4a7d700f6..7a5120354 100644 --- a/src/test/regress/expected/multi_colocation_utils.out +++ b/src/test/regress/expected/multi_colocation_utils.out @@ -6,7 +6,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 4; CREATE SEQUENCE colocation_test_seq MINVALUE 1000 NO CYCLE; -/* a very simple UDF that only sets the colocation ids the same +/* a very simple UDF that only sets the colocation ids the same * DO NOT USE THIS FUNCTION IN PRODUCTION. It manually sets colocationid column of * pg_dist_partition and it does not check anything about pyshical state about shards. */ @@ -25,7 +25,7 @@ BEGIN FROM pg_dist_partition p1, pg_dist_partition p2 WHERE p2.logicalrelid = source_table AND - (p1.logicalrelid = source_table OR + (p1.logicalrelid = source_table OR (p1.colocationId = p2.colocationId AND p1.colocationId != 0))) UNION (SELECT target_table) @@ -58,280 +58,280 @@ CREATE FUNCTION find_shard_interval_index(bigint) -- create distributed table observe shard pruning CREATE TABLE table1_group1 ( id int ); SELECT master_create_distributed_table('table1_group1', 'id', 'hash'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_create_worker_shards('table1_group1', 4, 2); - master_create_worker_shards + master_create_worker_shards --------------------------------------------------------------------- - + (1 row) CREATE TABLE table2_group1 ( id int ); SELECT master_create_distributed_table('table2_group1', 'id', 'hash'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_create_worker_shards('table2_group1', 4, 2); - master_create_worker_shards + master_create_worker_shards --------------------------------------------------------------------- - + (1 row) CREATE TABLE table3_group2 ( id int ); SELECT master_create_distributed_table('table3_group2', 'id', 'hash'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_create_worker_shards('table3_group2', 4, 2); - master_create_worker_shards + master_create_worker_shards --------------------------------------------------------------------- - + (1 row) CREATE TABLE table4_group2 ( id int ); SELECT master_create_distributed_table('table4_group2', 'id', 'hash'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_create_worker_shards('table4_group2', 4, 2); - master_create_worker_shards + master_create_worker_shards --------------------------------------------------------------------- - + (1 row) CREATE TABLE table5_groupX ( id int ); SELECT master_create_distributed_table('table5_groupX', 'id', 'hash'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_create_worker_shards('table5_groupX', 4, 2); - master_create_worker_shards + master_create_worker_shards --------------------------------------------------------------------- - + (1 row) CREATE TABLE table6_append ( id int ); SELECT master_create_distributed_table('table6_append', 'id', 'append'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_create_empty_shard('table6_append'); - master_create_empty_shard + master_create_empty_shard --------------------------------------------------------------------- 1300020 (1 row) SELECT master_create_empty_shard('table6_append'); - master_create_empty_shard + master_create_empty_shard --------------------------------------------------------------------- 1300021 (1 row) -- make table1_group1 and table2_group1 co-located manually SELECT colocation_test_colocate_tables('table1_group1', 'table2_group1'); - colocation_test_colocate_tables + colocation_test_colocate_tables --------------------------------------------------------------------- t (1 row) -- check co-location id SELECT get_table_colocation_id('table1_group1'); - get_table_colocation_id + get_table_colocation_id --------------------------------------------------------------------- 1000 (1 row) SELECT get_table_colocation_id('table5_groupX'); - get_table_colocation_id + get_table_colocation_id --------------------------------------------------------------------- 0 (1 row) SELECT get_table_colocation_id('table6_append'); - get_table_colocation_id + get_table_colocation_id --------------------------------------------------------------------- 0 (1 row) -- check self table co-location SELECT tables_colocated('table1_group1', 'table1_group1'); - tables_colocated + tables_colocated --------------------------------------------------------------------- t (1 row) SELECT tables_colocated('table5_groupX', 'table5_groupX'); - tables_colocated + tables_colocated --------------------------------------------------------------------- t (1 row) SELECT tables_colocated('table6_append', 'table6_append'); - tables_colocated + tables_colocated --------------------------------------------------------------------- t (1 row) -- check table co-location with same co-location group SELECT tables_colocated('table1_group1', 'table2_group1'); - tables_colocated + tables_colocated --------------------------------------------------------------------- t (1 row) -- check table co-location with different co-location group SELECT tables_colocated('table1_group1', 'table3_group2'); - tables_colocated + tables_colocated --------------------------------------------------------------------- f (1 row) -- check table co-location with invalid co-location group SELECT tables_colocated('table1_group1', 'table5_groupX'); - tables_colocated + tables_colocated --------------------------------------------------------------------- f (1 row) SELECT tables_colocated('table1_group1', 'table6_append'); - tables_colocated + tables_colocated --------------------------------------------------------------------- f (1 row) -- check self shard co-location SELECT shards_colocated(1300000, 1300000); - shards_colocated + shards_colocated --------------------------------------------------------------------- t (1 row) SELECT shards_colocated(1300016, 1300016); - shards_colocated + shards_colocated --------------------------------------------------------------------- t (1 row) SELECT shards_colocated(1300020, 1300020); - shards_colocated + shards_colocated --------------------------------------------------------------------- t (1 row) -- check shard co-location with same co-location group SELECT shards_colocated(1300000, 1300004); - shards_colocated + shards_colocated --------------------------------------------------------------------- t (1 row) -- check shard co-location with same table different co-location group SELECT shards_colocated(1300000, 1300001); - shards_colocated + shards_colocated --------------------------------------------------------------------- f (1 row) -- check shard co-location with different co-location group SELECT shards_colocated(1300000, 1300005); - shards_colocated + shards_colocated --------------------------------------------------------------------- f (1 row) -- check shard co-location with invalid co-location group SELECT shards_colocated(1300000, 1300016); - shards_colocated + shards_colocated --------------------------------------------------------------------- f (1 row) SELECT shards_colocated(1300000, 1300020); - shards_colocated + shards_colocated --------------------------------------------------------------------- f (1 row) -- check co-located table list SELECT UNNEST(get_colocated_table_array('table1_group1'))::regclass ORDER BY 1; - unnest + unnest --------------------------------------------------------------------- table1_group1 table2_group1 (2 rows) SELECT UNNEST(get_colocated_table_array('table5_groupX'))::regclass ORDER BY 1; - unnest + unnest --------------------------------------------------------------------- table5_groupx (1 row) SELECT UNNEST(get_colocated_table_array('table6_append'))::regclass ORDER BY 1; - unnest + unnest --------------------------------------------------------------------- table6_append (1 row) -- check co-located shard list SELECT UNNEST(get_colocated_shard_array(1300000))::regclass ORDER BY 1; - unnest + unnest --------------------------------------------------------------------- 1300000 1300004 (2 rows) SELECT UNNEST(get_colocated_shard_array(1300016))::regclass ORDER BY 1; - unnest + unnest --------------------------------------------------------------------- 1300016 (1 row) SELECT UNNEST(get_colocated_shard_array(1300020))::regclass ORDER BY 1; - unnest + unnest --------------------------------------------------------------------- 1300020 (1 row) -- check FindShardIntervalIndex function SELECT find_shard_interval_index(1300000); - find_shard_interval_index + find_shard_interval_index --------------------------------------------------------------------- 0 (1 row) SELECT find_shard_interval_index(1300001); - find_shard_interval_index + find_shard_interval_index --------------------------------------------------------------------- 1 (1 row) SELECT find_shard_interval_index(1300002); - find_shard_interval_index + find_shard_interval_index --------------------------------------------------------------------- 2 (1 row) SELECT find_shard_interval_index(1300003); - find_shard_interval_index + find_shard_interval_index --------------------------------------------------------------------- 3 (1 row) SELECT find_shard_interval_index(1300016); - find_shard_interval_index + find_shard_interval_index --------------------------------------------------------------------- 0 (1 row) @@ -340,32 +340,32 @@ SELECT find_shard_interval_index(1300016); SET citus.shard_count = 2; CREATE TABLE table1_groupA ( id int ); SELECT create_distributed_table('table1_groupA', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE table2_groupA ( id int ); SELECT create_distributed_table('table2_groupA', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- change shard replication factor SET citus.shard_replication_factor = 1; CREATE TABLE table1_groupB ( id int ); SELECT create_distributed_table('table1_groupB', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE table2_groupB ( id int ); SELECT create_distributed_table('table2_groupB', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid='table1_groupB'::regclass; @@ -375,63 +375,63 @@ SET citus.shard_replication_factor to DEFAULT; -- change partition column type CREATE TABLE table1_groupC ( id text ); SELECT create_distributed_table('table1_groupC', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE table2_groupC ( id text ); SELECT create_distributed_table('table2_groupC', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- change shard count SET citus.shard_count = 8; CREATE TABLE table1_groupD ( id int ); SELECT create_distributed_table('table1_groupD', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE table2_groupD ( id int ); SELECT create_distributed_table('table2_groupD', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- try other distribution methods CREATE TABLE table_append ( id int ); SELECT create_distributed_table('table_append', 'id', 'append'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE table_range ( id int ); SELECT create_distributed_table('table_range', 'id', 'range'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- test foreign table creation CREATE FOREIGN TABLE table3_groupD ( id int ) SERVER fake_fdw_server; SELECT create_distributed_table('table3_groupD', 'id'); NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- check metadata -SELECT * FROM pg_dist_colocation - WHERE colocationid >= 1 AND colocationid < 1000 +SELECT * FROM pg_dist_colocation + WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid; - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 3 | 4 | 2 | 23 | 0 4 | 2 | 2 | 23 | 0 @@ -441,9 +441,9 @@ SELECT * FROM pg_dist_colocation (5 rows) SELECT logicalrelid, colocationid FROM pg_dist_partition - WHERE colocationid >= 1 AND colocationid < 1000 + WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY logicalrelid; - logicalrelid | colocationid + logicalrelid | colocationid --------------------------------------------------------------------- table1_groupa | 4 table2_groupa | 4 @@ -459,7 +459,7 @@ SELECT logicalrelid, colocationid FROM pg_dist_partition -- check effects of dropping tables DROP TABLE table1_groupA; SELECT * FROM pg_dist_colocation WHERE colocationid = 4; - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 4 | 2 | 2 | 23 | 0 (1 row) @@ -467,7 +467,7 @@ SELECT * FROM pg_dist_colocation WHERE colocationid = 4; -- dropping all tables in a colocation group also deletes the colocation group DROP TABLE table2_groupA; SELECT * FROM pg_dist_colocation WHERE colocationid = 4; - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 4 | 2 | 2 | 23 | 0 (1 row) @@ -476,86 +476,86 @@ SELECT * FROM pg_dist_colocation WHERE colocationid = 4; SET citus.shard_count = 2; CREATE TABLE table1_groupE ( id int ); SELECT create_distributed_table('table1_groupE', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE table2_groupE ( id int ); SELECT create_distributed_table('table2_groupE', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- test different table DDL CREATE TABLE table3_groupE ( dummy_column text, id int ); SELECT create_distributed_table('table3_groupE', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- test different schema CREATE SCHEMA schema_colocation; CREATE TABLE schema_colocation.table4_groupE ( id int ); SELECT create_distributed_table('schema_colocation.table4_groupE', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- test colocate_with option CREATE TABLE table1_group_none_1 ( id int ); SELECT create_distributed_table('table1_group_none_1', 'id', colocate_with => 'none'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE table2_group_none_1 ( id int ); SELECT create_distributed_table('table2_group_none_1', 'id', colocate_with => 'table1_group_none_1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE table1_group_none_2 ( id int ); SELECT create_distributed_table('table1_group_none_2', 'id', colocate_with => 'none'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE table4_groupE ( id int ); SELECT create_distributed_table('table4_groupE', 'id', colocate_with => 'default'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SET citus.shard_count = 3; -- check that this new configuration does not have a default group CREATE TABLE table1_group_none_3 ( id int ); SELECT create_distributed_table('table1_group_none_3', 'id', colocate_with => 'NONE'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- a new table does not use a non-default group CREATE TABLE table1_group_default ( id int ); SELECT create_distributed_table('table1_group_default', 'id', colocate_with => 'DEFAULT'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- check metadata SELECT * FROM pg_dist_colocation WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid; - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 3 | 4 | 2 | 23 | 0 4 | 2 | 2 | 23 | 0 @@ -568,7 +568,7 @@ SELECT * FROM pg_dist_colocation SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid, logicalrelid; - logicalrelid | colocationid + logicalrelid | colocationid --------------------------------------------------------------------- table1_groupe | 4 table2_groupe | 4 @@ -605,9 +605,9 @@ ERROR: relation "no_table" does not exist SELECT create_distributed_table('table_failing', 'id', colocate_with => ''); ERROR: invalid name syntax SELECT create_distributed_table('table_failing', 'id', colocate_with => NULL); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- check with different distribution column types @@ -618,39 +618,39 @@ DETAIL: Distribution column types don't match for table1_groupe and table_bigin -- check worker table schemas \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.table3_groupE_1300062'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- - dummy_column | text | - id | integer | + dummy_column | text | + id | integer | (2 rows) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='schema_colocation.table4_groupE_1300064'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- - id | integer | + id | integer | (1 row) \c - - - :master_port SET citus.next_shard_id TO 1300080; CREATE TABLE table1_groupF ( id int ); SELECT create_reference_table('table1_groupF'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE table2_groupF ( id int ); SELECT create_reference_table('table2_groupF'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- check metadata SELECT * FROM pg_dist_colocation WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid; - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 3 | 4 | 2 | 23 | 0 4 | 2 | 2 | 23 | 0 @@ -661,7 +661,7 @@ SELECT * FROM pg_dist_colocation (6 rows) -- cross check with internal colocation API -SELECT +SELECT p1.logicalrelid::regclass AS table1, p2.logicalrelid::regclass AS table2, tables_colocated(p1.logicalrelid , p2.logicalrelid) AS colocated @@ -676,7 +676,7 @@ WHERE ORDER BY table1, table2; - table1 | table2 | colocated + table1 | table2 | colocated --------------------------------------------------------------------- table1_group1 | table2_group1 | t table1_groupb | table2_groupb | t @@ -717,7 +717,7 @@ ORDER BY shardmaxvalue::integer, shardid, nodeport; - logicalrelid | shardid | shardstorage | nodeport | shardminvalue | shardmaxvalue + logicalrelid | shardid | shardstorage | nodeport | shardminvalue | shardmaxvalue --------------------------------------------------------------------- table1_groupb | 1300026 | t | 57637 | -2147483648 | -1 table1_groupb | 1300027 | t | 57638 | 0 | 2147483647 @@ -823,10 +823,10 @@ ORDER BY table1_group_default | 1300078 | t | 57638 | -715827883 | 715827881 table1_group_default | 1300079 | t | 57637 | 715827882 | 2147483647 table1_group_default | 1300079 | t | 57638 | 715827882 | 2147483647 - table1_groupf | 1300080 | t | 57637 | | - table1_groupf | 1300080 | t | 57638 | | - table2_groupf | 1300081 | t | 57637 | | - table2_groupf | 1300081 | t | 57638 | | + table1_groupf | 1300080 | t | 57637 | | + table1_groupf | 1300080 | t | 57638 | | + table2_groupf | 1300081 | t | 57637 | | + table2_groupf | 1300081 | t | 57638 | | (108 rows) -- reset colocation ids to test mark_tables_colocated @@ -839,14 +839,14 @@ UPDATE pg_dist_partition SET colocationid = 0 SELECT * FROM pg_dist_colocation WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid; - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- (0 rows) SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid, logicalrelid; - logicalrelid | colocationid + logicalrelid | colocationid --------------------------------------------------------------------- (0 rows) @@ -870,75 +870,75 @@ DETAIL: Shard counts don't match for table1_groupb and table1_groupd. SELECT * FROM pg_dist_colocation WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid; - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- (0 rows) SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid, logicalrelid; - logicalrelid | colocationid + logicalrelid | colocationid --------------------------------------------------------------------- (0 rows) -- check successfully cololated tables SELECT mark_tables_colocated('table1_groupB', ARRAY['table2_groupB']); - mark_tables_colocated + mark_tables_colocated --------------------------------------------------------------------- - + (1 row) SELECT mark_tables_colocated('table1_groupC', ARRAY['table2_groupC']); - mark_tables_colocated + mark_tables_colocated --------------------------------------------------------------------- - + (1 row) SELECT mark_tables_colocated('table1_groupD', ARRAY['table2_groupD']); - mark_tables_colocated + mark_tables_colocated --------------------------------------------------------------------- - + (1 row) SELECT mark_tables_colocated('table1_groupE', ARRAY['table2_groupE', 'table3_groupE']); - mark_tables_colocated + mark_tables_colocated --------------------------------------------------------------------- - + (1 row) SELECT mark_tables_colocated('table1_groupF', ARRAY['table2_groupF']); - mark_tables_colocated + mark_tables_colocated --------------------------------------------------------------------- - + (1 row) -- check to colocate with itself SELECT mark_tables_colocated('table1_groupB', ARRAY['table1_groupB']); - mark_tables_colocated + mark_tables_colocated --------------------------------------------------------------------- - + (1 row) SET citus.shard_count = 2; CREATE TABLE table1_group_none ( id int ); SELECT create_distributed_table('table1_group_none', 'id', colocate_with => 'NONE'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE table2_group_none ( id int ); SELECT create_distributed_table('table2_group_none', 'id', colocate_with => 'NONE'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- check metadata to see colocation groups are created successfully -SELECT * FROM pg_dist_colocation - WHERE colocationid >= 1 AND colocationid < 1000 +SELECT * FROM pg_dist_colocation + WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid; - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 2 | 2 | 1 | 23 | 0 3 | 2 | 2 | 25 | 100 @@ -949,7 +949,7 @@ SELECT * FROM pg_dist_colocation SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid, logicalrelid; - logicalrelid | colocationid + logicalrelid | colocationid --------------------------------------------------------------------- table1_groupb | 2 table2_groupb | 2 @@ -966,23 +966,23 @@ SELECT logicalrelid, colocationid FROM pg_dist_partition -- move the all tables in colocation group 5 to colocation group 7 SELECT mark_tables_colocated('table1_group_none', ARRAY['table1_groupE', 'table2_groupE', 'table3_groupE']); - mark_tables_colocated + mark_tables_colocated --------------------------------------------------------------------- - + (1 row) -- move a table with a colocation id which is already not in pg_dist_colocation SELECT mark_tables_colocated('table1_group_none', ARRAY['table2_group_none']); - mark_tables_colocated + mark_tables_colocated --------------------------------------------------------------------- - + (1 row) -- check metadata to see that unused colocation group is deleted SELECT * FROM pg_dist_colocation WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid; - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 2 | 2 | 1 | 23 | 0 3 | 2 | 2 | 25 | 100 @@ -992,7 +992,7 @@ SELECT * FROM pg_dist_colocation SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid, logicalrelid; - logicalrelid | colocationid + logicalrelid | colocationid --------------------------------------------------------------------- table1_groupb | 2 table2_groupb | 2 @@ -1010,9 +1010,9 @@ SELECT logicalrelid, colocationid FROM pg_dist_partition -- try to colocate different replication models CREATE TABLE table1_groupG ( id int ); SELECT create_distributed_table('table1_groupG', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- update replication model @@ -1024,9 +1024,9 @@ DETAIL: Replication models don't match for table1_groupg and table2_groupg. CREATE TABLE table2_groupG ( id int ); ERROR: relation "table2_groupg" already exists SELECT create_distributed_table('table2_groupG', 'id', colocate_with => 'NONE'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT mark_tables_colocated('table1_groupG', ARRAY['table2_groupG']); diff --git a/src/test/regress/expected/multi_complex_expressions.out b/src/test/regress/expected/multi_complex_expressions.out index b6d4e1042..bb37e3ea3 100644 --- a/src/test/regress/expected/multi_complex_expressions.out +++ b/src/test/regress/expected/multi_complex_expressions.out @@ -3,44 +3,44 @@ -- -- Check that we can correctly handle complex expressions and aggregates. SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem; - ?column? + ?column? --------------------------------------------------------------------- 12000.0000000000000000 (1 row) SELECT sum(l_quantity) / (10 * avg(l_quantity)) FROM lineitem; - ?column? + ?column? --------------------------------------------------------------------- 1200.0000000000000000 (1 row) SELECT (sum(l_quantity) / (10 * avg(l_quantity))) + 11 FROM lineitem; - ?column? + ?column? --------------------------------------------------------------------- 1211.0000000000000000 (1 row) SELECT avg(l_quantity) as average FROM lineitem; - average + average --------------------------------------------------------------------- 25.4462500000000000 (1 row) SELECT 100 * avg(l_quantity) as average_times_hundred FROM lineitem; - average_times_hundred + average_times_hundred --------------------------------------------------------------------- 2544.6250000000000000 (1 row) SELECT 100 * avg(l_quantity) / 10 as average_times_ten FROM lineitem; - average_times_ten + average_times_ten --------------------------------------------------------------------- 254.4625000000000000 (1 row) -SELECT l_quantity, 10 * count(*) count_quantity FROM lineitem +SELECT l_quantity, 10 * count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; - l_quantity | count_quantity + l_quantity | count_quantity --------------------------------------------------------------------- 44.00 | 2150 38.00 | 2160 @@ -97,42 +97,42 @@ SELECT l_quantity, 10 * count(*) count_quantity FROM lineitem -- Check that we can handle complex select clause expressions. SELECT count(*) FROM lineitem WHERE octet_length(l_comment || l_comment) > 40; - count + count --------------------------------------------------------------------- 8148 (1 row) SELECT count(*) FROM lineitem WHERE octet_length(concat(l_comment, l_comment)) > 40; - count + count --------------------------------------------------------------------- 8148 (1 row) SELECT count(*) FROM lineitem WHERE octet_length(l_comment) + octet_length('randomtext'::text) > 40; - count + count --------------------------------------------------------------------- 4611 (1 row) SELECT count(*) FROM lineitem WHERE octet_length(l_comment) + 10 > 40; - count + count --------------------------------------------------------------------- 4611 (1 row) SELECT count(*) FROM lineitem WHERE (l_receiptdate::timestamp - l_shipdate::timestamp) > interval '5 days'; - count + count --------------------------------------------------------------------- 10008 (1 row) -- can push down queries where no columns present on the WHERE clause SELECT count(*) FROM lineitem WHERE random() = -0.1; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -140,7 +140,7 @@ SELECT count(*) FROM lineitem WHERE random() = -0.1; -- boolean tests can be pushed down SELECT count(*) FROM lineitem WHERE (l_partkey > 10000) is true; - count + count --------------------------------------------------------------------- 11423 (1 row) @@ -148,7 +148,7 @@ SELECT count(*) FROM lineitem -- scalar array operator expressions can be pushed down SELECT count(*) FROM lineitem WHERE l_partkey = ANY(ARRAY[19353, 19354, 19355]); - count + count --------------------------------------------------------------------- 1 (1 row) @@ -156,7 +156,7 @@ SELECT count(*) FROM lineitem -- some more scalar array operator expressions SELECT count(*) FROM lineitem WHERE l_partkey = ALL(ARRAY[19353]); - count + count --------------------------------------------------------------------- 1 (1 row) @@ -164,7 +164,7 @@ SELECT count(*) FROM lineitem -- operator expressions involving arrays SELECT count(*) FROM lineitem WHERE ARRAY[19353, 19354, 19355] @> ARRAY[l_partkey]; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -172,7 +172,7 @@ SELECT count(*) FROM lineitem -- coerced via io expressions can be pushed down SELECT count(*) FROM lineitem WHERE (l_quantity/100)::int::bool::text::bool; - count + count --------------------------------------------------------------------- 260 (1 row) @@ -180,7 +180,7 @@ SELECT count(*) FROM lineitem -- case expressions can be pushed down SELECT count(*) FROM lineitem WHERE (CASE WHEN l_orderkey > 4000 THEN l_partkey / 100 > 1 ELSE false END); - count + count --------------------------------------------------------------------- 7948 (1 row) @@ -188,7 +188,7 @@ SELECT count(*) FROM lineitem -- coalesce expressions can be pushed down SELECT count(*) FROM lineitem WHERE COALESCE((l_partkey/50000)::bool, false); - count + count --------------------------------------------------------------------- 9122 (1 row) @@ -196,7 +196,7 @@ SELECT count(*) FROM lineitem -- nullif expressions can be pushed down SELECT count(*) FROM lineitem WHERE NULLIF((l_partkey/50000)::bool, false); - count + count --------------------------------------------------------------------- 9122 (1 row) @@ -204,7 +204,7 @@ SELECT count(*) FROM lineitem -- null test expressions can be pushed down SELECT count(*) FROM orders WHERE o_comment IS NOT null; - count + count --------------------------------------------------------------------- 2985 (1 row) @@ -212,7 +212,7 @@ SELECT count(*) FROM orders -- functions can be pushed down SELECT count(*) FROM lineitem WHERE isfinite(l_shipdate); - count + count --------------------------------------------------------------------- 12000 (1 row) @@ -220,7 +220,7 @@ SELECT count(*) FROM lineitem -- constant expressions can be pushed down SELECT count(*) FROM lineitem WHERE 0 != 0; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -228,7 +228,7 @@ SELECT count(*) FROM lineitem -- distinct expressions can be pushed down SELECT count(*) FROM lineitem WHERE l_partkey IS DISTINCT FROM 50040; - count + count --------------------------------------------------------------------- 11999 (1 row) @@ -236,7 +236,7 @@ SELECT count(*) FROM lineitem -- row compare expression can be pushed down SELECT count(*) FROM lineitem WHERE row(l_partkey, 2, 3) > row(2000, 2, 3); - count + count --------------------------------------------------------------------- 11882 (1 row) @@ -251,7 +251,7 @@ SELECT count(*) FROM lineitem isfinite(l_shipdate) AND l_partkey IS DISTINCT FROM 50040 AND row(l_partkey, 2, 3) > row(2000, 2, 3); - count + count --------------------------------------------------------------------- 137 (1 row) @@ -263,7 +263,7 @@ SELECT l_linenumber FROM lineitem ORDER BY l_linenumber LIMIT 1; - l_linenumber + l_linenumber --------------------------------------------------------------------- 1 (1 row) @@ -276,7 +276,7 @@ SELECT count(*) * l_discount as total_discount, count(*), sum(l_tax), l_discount l_discount ORDER BY total_discount DESC, sum(l_tax) DESC; - total_discount | count | sum | l_discount + total_discount | count | sum | l_discount --------------------------------------------------------------------- 104.80 | 1048 | 41.08 | 0.10 98.55 | 1095 | 44.15 | 0.09 @@ -299,7 +299,7 @@ SELECT l_linenumber FROM lineitem ORDER BY l_linenumber LIMIT 1; - l_linenumber + l_linenumber --------------------------------------------------------------------- 2 (1 row) @@ -314,7 +314,7 @@ SELECT max(l_linenumber), min(l_discount), l_receiptdate FROM lineitem ORDER BY l_receiptdate LIMIT 1; - max | min | l_receiptdate + max | min | l_receiptdate --------------------------------------------------------------------- 3 | 0.07 | 01-09-1992 (1 row) @@ -322,21 +322,21 @@ SELECT max(l_linenumber), min(l_discount), l_receiptdate FROM lineitem -- Check that we can handle implicit and explicit join clause definitions. SELECT count(*) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_quantity < 5; - count + count --------------------------------------------------------------------- 951 (1 row) SELECT count(*) FROM lineitem JOIN orders ON l_orderkey = o_orderkey AND l_quantity < 5; - count + count --------------------------------------------------------------------- 951 (1 row) SELECT count(*) FROM lineitem JOIN orders ON l_orderkey = o_orderkey WHERE l_quantity < 5; - count + count --------------------------------------------------------------------- 951 (1 row) @@ -347,7 +347,7 @@ ERROR: complex joins are only supported when all distributed tables are joined -- Check that we can issue limit/offset queries -- the subquery is recursively planned since it contains OFFSET, which is not pushdownable SELECT * FROM (SELECT o_custkey FROM orders GROUP BY o_custkey ORDER BY o_custkey OFFSET 20) sq ORDER BY 1 LIMIT 5; - o_custkey + o_custkey --------------------------------------------------------------------- 35 37 @@ -358,7 +358,7 @@ SELECT * FROM (SELECT o_custkey FROM orders GROUP BY o_custkey ORDER BY o_custke -- the subquery is recursively planned since it contains OFFSET, which is not pushdownable SELECT * FROM (SELECT o_orderkey FROM orders ORDER BY o_orderkey OFFSET 20) sq ORDER BY 1 LIMIT 5; - o_orderkey + o_orderkey --------------------------------------------------------------------- 69 70 @@ -369,7 +369,7 @@ SELECT * FROM (SELECT o_orderkey FROM orders ORDER BY o_orderkey OFFSET 20) sq O -- Simple LIMIT/OFFSET with ORDER BY SELECT o_orderkey FROM orders ORDER BY o_orderkey LIMIT 10 OFFSET 20; - o_orderkey + o_orderkey --------------------------------------------------------------------- 69 70 @@ -385,18 +385,18 @@ SELECT o_orderkey FROM orders ORDER BY o_orderkey LIMIT 10 OFFSET 20; -- LIMIT/OFFSET with a subquery SET citus.task_executor_type TO 'task-tracker'; -SELECT +SELECT customer_keys.o_custkey, - SUM(order_count) AS total_order_count -FROM - (SELECT o_custkey, o_orderstatus, COUNT(*) AS order_count + SUM(order_count) AS total_order_count +FROM + (SELECT o_custkey, o_orderstatus, COUNT(*) AS order_count FROM orders GROUP BY o_custkey, o_orderstatus ) customer_keys -GROUP BY +GROUP BY customer_keys.o_custkey -ORDER BY +ORDER BY customer_keys.o_custkey DESC LIMIT 10 OFFSET 20; - o_custkey | total_order_count + o_custkey | total_order_count --------------------------------------------------------------------- 1466 | 1 1465 | 2 @@ -429,7 +429,7 @@ CREATE TEMP TABLE temp_limit_test_4 AS SELECT o_custkey, COUNT(*) AS ccnt FROM orders GROUP BY o_custkey ORDER BY ccnt DESC LIMIT 10 OFFSET 15; -- OFFSET without LIMIT SELECT o_custkey FROM orders ORDER BY o_custkey OFFSET 2980; - o_custkey + o_custkey --------------------------------------------------------------------- 1498 1498 @@ -439,18 +439,18 @@ SELECT o_custkey FROM orders ORDER BY o_custkey OFFSET 2980; (5 rows) -- LIMIT/OFFSET with Joins -SELECT +SELECT li.l_partkey, o.o_custkey, li.l_quantity -FROM +FROM lineitem li JOIN orders o ON li.l_orderkey = o.o_orderkey -WHERE +WHERE li.l_quantity > 25 ORDER BY 1, 2, 3 LIMIT 10 OFFSET 20; DEBUG: push down of limit count: 30 - l_partkey | o_custkey | l_quantity + l_partkey | o_custkey | l_quantity --------------------------------------------------------------------- 655 | 58 | 50.00 669 | 319 | 34.00 @@ -478,7 +478,7 @@ SELECT GROUP BY l_orderkey ORDER BY 2 DESC, 1 DESC LIMIT 10; - l_orderkey | sum | sum | count | count | max | max + l_orderkey | sum | sum | count | count | max | max --------------------------------------------------------------------- 12804 | 440012.71 | 45788.16 | 7 | 1 | 94398.00 | 45788.16 9863 | 412560.63 | 175647.63 | 7 | 3 | 85723.77 | 50769.14 @@ -505,7 +505,7 @@ SELECT HAVING count(*) FILTER (WHERE l_shipmode = 'AIR') > 1 ORDER BY 2 DESC, 1 DESC LIMIT 10; - l_orderkey | sum | sum | count | count | max | max + l_orderkey | sum | sum | count | count | max | max --------------------------------------------------------------------- 9863 | 412560.63 | 175647.63 | 7 | 3 | 85723.77 | 50769.14 12039 | 407048.94 | 76406.30 | 7 | 2 | 94471.02 | 19679.30 diff --git a/src/test/regress/expected/multi_complex_expressions_0.out b/src/test/regress/expected/multi_complex_expressions_0.out index ae407e54a..9a2418d41 100644 --- a/src/test/regress/expected/multi_complex_expressions_0.out +++ b/src/test/regress/expected/multi_complex_expressions_0.out @@ -3,44 +3,44 @@ -- -- Check that we can correctly handle complex expressions and aggregates. SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem; - ?column? + ?column? --------------------------------------------------------------------- 12000.0000000000000000 (1 row) SELECT sum(l_quantity) / (10 * avg(l_quantity)) FROM lineitem; - ?column? + ?column? --------------------------------------------------------------------- 1200.0000000000000000 (1 row) SELECT (sum(l_quantity) / (10 * avg(l_quantity))) + 11 FROM lineitem; - ?column? + ?column? --------------------------------------------------------------------- 1211.0000000000000000 (1 row) SELECT avg(l_quantity) as average FROM lineitem; - average + average --------------------------------------------------------------------- 25.4462500000000000 (1 row) SELECT 100 * avg(l_quantity) as average_times_hundred FROM lineitem; - average_times_hundred + average_times_hundred --------------------------------------------------------------------- 2544.6250000000000000 (1 row) SELECT 100 * avg(l_quantity) / 10 as average_times_ten FROM lineitem; - average_times_ten + average_times_ten --------------------------------------------------------------------- 254.4625000000000000 (1 row) -SELECT l_quantity, 10 * count(*) count_quantity FROM lineitem +SELECT l_quantity, 10 * count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; - l_quantity | count_quantity + l_quantity | count_quantity --------------------------------------------------------------------- 44.00 | 2150 38.00 | 2160 @@ -97,42 +97,42 @@ SELECT l_quantity, 10 * count(*) count_quantity FROM lineitem -- Check that we can handle complex select clause expressions. SELECT count(*) FROM lineitem WHERE octet_length(l_comment || l_comment) > 40; - count + count --------------------------------------------------------------------- 8148 (1 row) SELECT count(*) FROM lineitem WHERE octet_length(concat(l_comment, l_comment)) > 40; - count + count --------------------------------------------------------------------- 8148 (1 row) SELECT count(*) FROM lineitem WHERE octet_length(l_comment) + octet_length('randomtext'::text) > 40; - count + count --------------------------------------------------------------------- 4611 (1 row) SELECT count(*) FROM lineitem WHERE octet_length(l_comment) + 10 > 40; - count + count --------------------------------------------------------------------- 4611 (1 row) SELECT count(*) FROM lineitem WHERE (l_receiptdate::timestamp - l_shipdate::timestamp) > interval '5 days'; - count + count --------------------------------------------------------------------- 10008 (1 row) -- can push down queries where no columns present on the WHERE clause SELECT count(*) FROM lineitem WHERE random() = -0.1; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -140,7 +140,7 @@ SELECT count(*) FROM lineitem WHERE random() = -0.1; -- boolean tests can be pushed down SELECT count(*) FROM lineitem WHERE (l_partkey > 10000) is true; - count + count --------------------------------------------------------------------- 11423 (1 row) @@ -148,7 +148,7 @@ SELECT count(*) FROM lineitem -- scalar array operator expressions can be pushed down SELECT count(*) FROM lineitem WHERE l_partkey = ANY(ARRAY[19353, 19354, 19355]); - count + count --------------------------------------------------------------------- 1 (1 row) @@ -156,7 +156,7 @@ SELECT count(*) FROM lineitem -- some more scalar array operator expressions SELECT count(*) FROM lineitem WHERE l_partkey = ALL(ARRAY[19353]); - count + count --------------------------------------------------------------------- 1 (1 row) @@ -164,7 +164,7 @@ SELECT count(*) FROM lineitem -- operator expressions involving arrays SELECT count(*) FROM lineitem WHERE ARRAY[19353, 19354, 19355] @> ARRAY[l_partkey]; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -172,7 +172,7 @@ SELECT count(*) FROM lineitem -- coerced via io expressions can be pushed down SELECT count(*) FROM lineitem WHERE (l_quantity/100)::int::bool::text::bool; - count + count --------------------------------------------------------------------- 260 (1 row) @@ -180,7 +180,7 @@ SELECT count(*) FROM lineitem -- case expressions can be pushed down SELECT count(*) FROM lineitem WHERE (CASE WHEN l_orderkey > 4000 THEN l_partkey / 100 > 1 ELSE false END); - count + count --------------------------------------------------------------------- 7948 (1 row) @@ -188,7 +188,7 @@ SELECT count(*) FROM lineitem -- coalesce expressions can be pushed down SELECT count(*) FROM lineitem WHERE COALESCE((l_partkey/50000)::bool, false); - count + count --------------------------------------------------------------------- 9122 (1 row) @@ -196,7 +196,7 @@ SELECT count(*) FROM lineitem -- nullif expressions can be pushed down SELECT count(*) FROM lineitem WHERE NULLIF((l_partkey/50000)::bool, false); - count + count --------------------------------------------------------------------- 9122 (1 row) @@ -204,7 +204,7 @@ SELECT count(*) FROM lineitem -- null test expressions can be pushed down SELECT count(*) FROM orders WHERE o_comment IS NOT null; - count + count --------------------------------------------------------------------- 2985 (1 row) @@ -212,7 +212,7 @@ SELECT count(*) FROM orders -- functions can be pushed down SELECT count(*) FROM lineitem WHERE isfinite(l_shipdate); - count + count --------------------------------------------------------------------- 12000 (1 row) @@ -220,7 +220,7 @@ SELECT count(*) FROM lineitem -- constant expressions can be pushed down SELECT count(*) FROM lineitem WHERE 0 != 0; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -228,7 +228,7 @@ SELECT count(*) FROM lineitem -- distinct expressions can be pushed down SELECT count(*) FROM lineitem WHERE l_partkey IS DISTINCT FROM 50040; - count + count --------------------------------------------------------------------- 11999 (1 row) @@ -236,7 +236,7 @@ SELECT count(*) FROM lineitem -- row compare expression can be pushed down SELECT count(*) FROM lineitem WHERE row(l_partkey, 2, 3) > row(2000, 2, 3); - count + count --------------------------------------------------------------------- 11882 (1 row) @@ -251,7 +251,7 @@ SELECT count(*) FROM lineitem isfinite(l_shipdate) AND l_partkey IS DISTINCT FROM 50040 AND row(l_partkey, 2, 3) > row(2000, 2, 3); - count + count --------------------------------------------------------------------- 137 (1 row) @@ -263,7 +263,7 @@ SELECT l_linenumber FROM lineitem ORDER BY l_linenumber LIMIT 1; - l_linenumber + l_linenumber --------------------------------------------------------------------- 1 (1 row) @@ -276,7 +276,7 @@ SELECT count(*) * l_discount as total_discount, count(*), sum(l_tax), l_discount l_discount ORDER BY total_discount DESC, sum(l_tax) DESC; - total_discount | count | sum | l_discount + total_discount | count | sum | l_discount --------------------------------------------------------------------- 104.80 | 1048 | 41.08 | 0.10 98.55 | 1095 | 44.15 | 0.09 @@ -299,7 +299,7 @@ SELECT l_linenumber FROM lineitem ORDER BY l_linenumber LIMIT 1; - l_linenumber + l_linenumber --------------------------------------------------------------------- 2 (1 row) @@ -314,7 +314,7 @@ SELECT max(l_linenumber), min(l_discount), l_receiptdate FROM lineitem ORDER BY l_receiptdate LIMIT 1; - max | min | l_receiptdate + max | min | l_receiptdate --------------------------------------------------------------------- 3 | 0.07 | 01-09-1992 (1 row) @@ -322,21 +322,21 @@ SELECT max(l_linenumber), min(l_discount), l_receiptdate FROM lineitem -- Check that we can handle implicit and explicit join clause definitions. SELECT count(*) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_quantity < 5; - count + count --------------------------------------------------------------------- 951 (1 row) SELECT count(*) FROM lineitem JOIN orders ON l_orderkey = o_orderkey AND l_quantity < 5; - count + count --------------------------------------------------------------------- 951 (1 row) SELECT count(*) FROM lineitem JOIN orders ON l_orderkey = o_orderkey WHERE l_quantity < 5; - count + count --------------------------------------------------------------------- 951 (1 row) @@ -355,7 +355,7 @@ ERROR: cannot perform distributed planning on this query DETAIL: Subqueries with offset are not supported yet -- Simple LIMIT/OFFSET with ORDER BY SELECT o_orderkey FROM orders ORDER BY o_orderkey LIMIT 10 OFFSET 20; - o_orderkey + o_orderkey --------------------------------------------------------------------- 69 70 @@ -371,18 +371,18 @@ SELECT o_orderkey FROM orders ORDER BY o_orderkey LIMIT 10 OFFSET 20; -- LIMIT/OFFSET with a subquery SET citus.task_executor_type TO 'task-tracker'; -SELECT +SELECT customer_keys.o_custkey, - SUM(order_count) AS total_order_count -FROM - (SELECT o_custkey, o_orderstatus, COUNT(*) AS order_count + SUM(order_count) AS total_order_count +FROM + (SELECT o_custkey, o_orderstatus, COUNT(*) AS order_count FROM orders GROUP BY o_custkey, o_orderstatus ) customer_keys -GROUP BY +GROUP BY customer_keys.o_custkey -ORDER BY +ORDER BY customer_keys.o_custkey DESC LIMIT 10 OFFSET 20; - o_custkey | total_order_count + o_custkey | total_order_count --------------------------------------------------------------------- 1466 | 1 1465 | 2 @@ -415,7 +415,7 @@ CREATE TEMP TABLE temp_limit_test_4 AS SELECT o_custkey, COUNT(*) AS ccnt FROM orders GROUP BY o_custkey ORDER BY ccnt DESC LIMIT 10 OFFSET 15; -- OFFSET without LIMIT SELECT o_custkey FROM orders ORDER BY o_custkey OFFSET 2980; - o_custkey + o_custkey --------------------------------------------------------------------- 1498 1498 @@ -425,18 +425,18 @@ SELECT o_custkey FROM orders ORDER BY o_custkey OFFSET 2980; (5 rows) -- LIMIT/OFFSET with Joins -SELECT +SELECT li.l_partkey, o.o_custkey, li.l_quantity -FROM +FROM lineitem li JOIN orders o ON li.l_orderkey = o.o_orderkey -WHERE +WHERE li.l_quantity > 25 ORDER BY 1, 2, 3 LIMIT 10 OFFSET 20; DEBUG: push down of limit count: 30 - l_partkey | o_custkey | l_quantity + l_partkey | o_custkey | l_quantity --------------------------------------------------------------------- 655 | 58 | 50.00 669 | 319 | 34.00 @@ -464,7 +464,7 @@ SELECT GROUP BY l_orderkey ORDER BY 2 DESC, 1 DESC LIMIT 10; - l_orderkey | sum | sum | count | count | max | max + l_orderkey | sum | sum | count | count | max | max --------------------------------------------------------------------- 12804 | 440012.71 | 45788.16 | 7 | 1 | 94398.00 | 45788.16 9863 | 412560.63 | 175647.63 | 7 | 3 | 85723.77 | 50769.14 @@ -491,7 +491,7 @@ SELECT HAVING count(*) FILTER (WHERE l_shipmode = 'AIR') > 1 ORDER BY 2 DESC, 1 DESC LIMIT 10; - l_orderkey | sum | sum | count | count | max | max + l_orderkey | sum | sum | count | count | max | max --------------------------------------------------------------------- 9863 | 412560.63 | 175647.63 | 7 | 3 | 85723.77 | 50769.14 12039 | 407048.94 | 76406.30 | 7 | 2 | 94471.02 | 19679.30 diff --git a/src/test/regress/expected/multi_count_type_conversion.out b/src/test/regress/expected/multi_count_type_conversion.out index af703a5ca..b2126bef7 100644 --- a/src/test/regress/expected/multi_count_type_conversion.out +++ b/src/test/regress/expected/multi_count_type_conversion.out @@ -10,7 +10,7 @@ SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY count_quantity ASC, l_quantity DESC; - count_quantity | l_quantity + count_quantity | l_quantity --------------------------------------------------------------------- 219 | 13.00 222 | 29.00 @@ -48,7 +48,7 @@ SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY count_quantity DESC, l_quantity ASC; - count_quantity | l_quantity + count_quantity | l_quantity --------------------------------------------------------------------- 273 | 28.00 264 | 30.00 diff --git a/src/test/regress/expected/multi_create_shards.out b/src/test/regress/expected/multi_create_shards.out index 1dec62d58..6604a247f 100644 --- a/src/test/regress/expected/multi_create_shards.out +++ b/src/test/regress/expected/multi_create_shards.out @@ -61,14 +61,14 @@ ERROR: could not identify a hash function for type dummy_type DETAIL: Partition column types must have a hash function defined to use hash partitioning. -- distribute table and inspect side effects SELECT master_create_distributed_table('table_to_distribute', 'name', 'hash'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT partmethod, partkey FROM pg_dist_partition WHERE logicalrelid = 'table_to_distribute'::regclass; - partmethod | partkey + partmethod | partkey --------------------------------------------------------------------- h | {VAR :varno 1 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 100 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} (1 row) @@ -85,15 +85,15 @@ ERROR: replication_factor (3) exceeds number of worker nodes (2) HINT: Add more worker nodes or try again with a lower replication factor. -- finally, create shards and inspect metadata SELECT master_create_worker_shards('table_to_distribute', 16, 1); - master_create_worker_shards + master_create_worker_shards --------------------------------------------------------------------- - + (1 row) SELECT shardstorage, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE logicalrelid = 'table_to_distribute'::regclass ORDER BY (shardminvalue::integer) ASC; - shardstorage | shardminvalue | shardmaxvalue + shardstorage | shardminvalue | shardmaxvalue --------------------------------------------------------------------- t | -2147483648 | -1879048193 t | -1879048192 | -1610612737 @@ -119,13 +119,13 @@ SELECT count(*) AS shard_count, FROM pg_dist_shard WHERE logicalrelid='table_to_distribute'::regclass GROUP BY shard_size; - shard_count | shard_size + shard_count | shard_size --------------------------------------------------------------------- 16 | 268435455 (1 row) SELECT COUNT(*) FROM pg_class WHERE relname LIKE 'table_to_distribute%' AND relkind = 'r'; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -135,16 +135,16 @@ SELECT master_create_worker_shards('table_to_distribute', 16, 1); ERROR: table "table_to_distribute" has already had shards created for it -- test list sorting SELECT sort_names('sumedh', 'jason', 'ozgun'); - sort_names + sort_names --------------------------------------------------------------------- jason + ozgun + sumedh + - + (1 row) SELECT COUNT(*) FROM pg_class WHERE relname LIKE 'throwaway%' AND relkind = 'r'; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -160,15 +160,15 @@ SET citus.shard_count TO 16; SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('foreign_table_to_distribute', 'id', 'hash'); NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT shardstorage, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE logicalrelid = 'foreign_table_to_distribute'::regclass ORDER BY (shardminvalue::integer) ASC; - shardstorage | shardminvalue | shardmaxvalue + shardstorage | shardminvalue | shardmaxvalue --------------------------------------------------------------------- f | -2147483648 | -1879048193 f | -1879048192 | -1610612737 @@ -196,9 +196,9 @@ CREATE TABLE weird_shard_count ); SET citus.shard_count TO 7; SELECT create_distributed_table('weird_shard_count', 'id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- Citus ensures all shards are roughly the same size @@ -206,7 +206,7 @@ SELECT shardmaxvalue::integer - shardminvalue::integer AS shard_size FROM pg_dist_shard WHERE logicalrelid = 'weird_shard_count'::regclass ORDER BY shardminvalue::integer ASC; - shard_size + shard_size --------------------------------------------------------------------- 613566755 613566755 diff --git a/src/test/regress/expected/multi_create_table.out b/src/test/regress/expected/multi_create_table.out index 1e89be657..7132d4e51 100644 --- a/src/test/regress/expected/multi_create_table.out +++ b/src/test/regress/expected/multi_create_table.out @@ -29,9 +29,9 @@ SELECT create_distributed_table('lineitem', 'l_orderkey', 'append'); WARNING: table "lineitem" has a UNIQUE or EXCLUDE constraint DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced. HINT: Consider using hash partitioning. - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE INDEX lineitem_time_index ON lineitem (l_shipdate); @@ -50,9 +50,9 @@ SELECT create_distributed_table('orders', 'o_orderkey', 'append'); WARNING: table "orders" has a UNIQUE or EXCLUDE constraint DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced. HINT: Consider using hash partitioning. - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE orders_reference ( @@ -67,9 +67,9 @@ CREATE TABLE orders_reference ( o_comment varchar(79) not null, PRIMARY KEY(o_orderkey) ); SELECT create_reference_table('orders_reference'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE customer ( @@ -82,9 +82,9 @@ CREATE TABLE customer ( c_mktsegment char(10) not null, c_comment varchar(117) not null); SELECT create_reference_table('customer'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE customer_append ( @@ -97,9 +97,9 @@ CREATE TABLE customer_append ( c_mktsegment char(10) not null, c_comment varchar(117) not null); SELECT create_distributed_table('customer_append', 'c_custkey', 'append'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE nation ( @@ -108,9 +108,9 @@ CREATE TABLE nation ( n_regionkey integer not null, n_comment varchar(152)); SELECT create_reference_table('nation'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE part ( @@ -124,9 +124,9 @@ CREATE TABLE part ( p_retailprice decimal(15,2) not null, p_comment varchar(23) not null); SELECT create_reference_table('part'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE part_append ( @@ -140,9 +140,9 @@ CREATE TABLE part_append ( p_retailprice decimal(15,2) not null, p_comment varchar(23) not null); SELECT create_distributed_table('part_append', 'p_partkey', 'append'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE supplier @@ -156,12 +156,12 @@ CREATE TABLE supplier s_comment varchar(101) not null ); SELECT create_reference_table('supplier'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) --- create a single shard supplier table which is not +-- create a single shard supplier table which is not -- a reference table CREATE TABLE supplier_single_shard ( @@ -174,9 +174,9 @@ CREATE TABLE supplier_single_shard s_comment varchar(101) not null ); SELECT create_distributed_table('supplier_single_shard', 's_suppkey', 'append'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE mx_table_test (col1 int, col2 text); @@ -190,13 +190,13 @@ HINT: Try again after reducing "citus.shard_replication_factor" to one or setti -- ok, so now actually create the one-off MX table SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('mx_table_test', 'col1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='mx_table_test'::regclass; - repmodel + repmodel --------------------------------------------------------------------- s (1 row) @@ -208,13 +208,13 @@ SELECT master_create_distributed_table('s_table', 'a', 'hash'); NOTICE: using statement-based replication DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table. HINT: Use create_distributed_table to use the streaming replication model. - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='s_table'::regclass; - repmodel + repmodel --------------------------------------------------------------------- c (1 row) @@ -227,7 +227,7 @@ DETAIL: The table s_table is marked as streaming replicated and the shard repli HINT: Use replication factor 1. DROP TABLE s_table; RESET citus.replication_model; --- Show that create_distributed_table with append and range distributions ignore +-- Show that create_distributed_table with append and range distributions ignore -- citus.replication_model GUC SET citus.shard_replication_factor TO 2; SET citus.replication_model TO streaming; @@ -235,13 +235,13 @@ CREATE TABLE repmodel_test (a int); SELECT create_distributed_table('repmodel_test', 'a', 'append'); NOTICE: using statement-based replication DETAIL: Streaming replication is supported only for hash-distributed tables. - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; - repmodel + repmodel --------------------------------------------------------------------- c (1 row) @@ -251,13 +251,13 @@ CREATE TABLE repmodel_test (a int); SELECT create_distributed_table('repmodel_test', 'a', 'range'); NOTICE: using statement-based replication DETAIL: Streaming replication is supported only for hash-distributed tables. - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; - repmodel + repmodel --------------------------------------------------------------------- c (1 row) @@ -270,13 +270,13 @@ SELECT master_create_distributed_table('repmodel_test', 'a', 'hash'); NOTICE: using statement-based replication DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table. HINT: Use create_distributed_table to use the streaming replication model. - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; - repmodel + repmodel --------------------------------------------------------------------- c (1 row) @@ -287,13 +287,13 @@ SELECT master_create_distributed_table('repmodel_test', 'a', 'append'); NOTICE: using statement-based replication DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table. HINT: Use create_distributed_table to use the streaming replication model. - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; - repmodel + repmodel --------------------------------------------------------------------- c (1 row) @@ -304,13 +304,13 @@ SELECT master_create_distributed_table('repmodel_test', 'a', 'range'); NOTICE: using statement-based replication DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table. HINT: Use create_distributed_table to use the streaming replication model. - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; - repmodel + repmodel --------------------------------------------------------------------- c (1 row) @@ -322,13 +322,13 @@ CREATE TABLE repmodel_test (a int); SELECT create_distributed_table('repmodel_test', 'a', 'append'); NOTICE: using statement-based replication DETAIL: Streaming replication is supported only for hash-distributed tables. - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; - repmodel + repmodel --------------------------------------------------------------------- c (1 row) @@ -338,13 +338,13 @@ CREATE TABLE repmodel_test (a int); SELECT create_distributed_table('repmodel_test', 'a', 'range'); NOTICE: using statement-based replication DETAIL: Streaming replication is supported only for hash-distributed tables. - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; - repmodel + repmodel --------------------------------------------------------------------- c (1 row) @@ -355,13 +355,13 @@ SELECT master_create_distributed_table('repmodel_test', 'a', 'hash'); NOTICE: using statement-based replication DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table. HINT: Use create_distributed_table to use the streaming replication model. - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; - repmodel + repmodel --------------------------------------------------------------------- c (1 row) @@ -372,13 +372,13 @@ SELECT master_create_distributed_table('repmodel_test', 'a', 'append'); NOTICE: using statement-based replication DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table. HINT: Use create_distributed_table to use the streaming replication model. - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; - repmodel + repmodel --------------------------------------------------------------------- c (1 row) @@ -389,13 +389,13 @@ SELECT master_create_distributed_table('repmodel_test', 'a', 'range'); NOTICE: using statement-based replication DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table. HINT: Use create_distributed_table to use the streaming replication model. - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; - repmodel + repmodel --------------------------------------------------------------------- c (1 row) @@ -423,13 +423,13 @@ HINT: Empty your table before distributing it. -- create_distributed_table creates shards and copies data into the distributed table SELECT create_distributed_table('data_load_test', 'col1'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT * FROM data_load_test ORDER BY col1; - col1 | col2 | col3 + col1 | col2 | col3 --------------------------------------------------------------------- 132 | hello | 1 243 | world | 2 @@ -439,39 +439,39 @@ DROP TABLE data_load_test; -- test queries on distributed tables with no shards CREATE TABLE no_shard_test (col1 int, col2 text); SELECT create_distributed_table('no_shard_test', 'col1', 'append'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT * FROM no_shard_test WHERE col1 > 1; - col1 | col2 + col1 | col2 --------------------------------------------------------------------- (0 rows) DROP TABLE no_shard_test; CREATE TABLE no_shard_test (col1 int, col2 text); SELECT create_distributed_table('no_shard_test', 'col1', 'range'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT * FROM no_shard_test WHERE col1 > 1; - col1 | col2 + col1 | col2 --------------------------------------------------------------------- (0 rows) DROP TABLE no_shard_test; CREATE TABLE no_shard_test (col1 int, col2 text); SELECT master_create_distributed_table('no_shard_test', 'col1', 'hash'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT * FROM no_shard_test WHERE col1 > 1; - col1 | col2 + col1 | col2 --------------------------------------------------------------------- (0 rows) @@ -482,15 +482,15 @@ CREATE TABLE data_load_test (col1 int, col2 text, col3 serial); INSERT INTO data_load_test VALUES (132, 'hello'); SELECT create_distributed_table('data_load_test', 'col1'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO data_load_test VALUES (243, 'world'); END; SELECT * FROM data_load_test ORDER BY col1; - col1 | col2 | col3 + col1 | col2 | col3 --------------------------------------------------------------------- 132 | hello | 1 243 | world | 2 @@ -503,24 +503,24 @@ CREATE TABLE data_load_test1 (col1 int, col2 text, col3 serial); INSERT INTO data_load_test1 VALUES (132, 'hello'); SELECT create_distributed_table('data_load_test1', 'col1'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE data_load_test2 (col1 int, col2 text, col3 serial); INSERT INTO data_load_test2 VALUES (132, 'world'); SELECT create_distributed_table('data_load_test2', 'col1'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT a.col2 ||' '|| b.col2 FROM data_load_test1 a JOIN data_load_test2 b USING (col1) WHERE col1 = 132; - ?column? + ?column? --------------------------------------------------------------------- hello world (1 row) @@ -530,7 +530,7 @@ END; -- There should be no table on the worker node \c - - - :worker_1_port SELECT relname FROM pg_class WHERE relname LIKE 'data_load_test%'; - relname + relname --------------------------------------------------------------------- (0 rows) @@ -541,9 +541,9 @@ CREATE TABLE data_load_test (col1 int, col2 text, col3 serial); INSERT INTO data_load_test VALUES (132, 'hello'); SELECT create_distributed_table('data_load_test', 'col1'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE INDEX data_load_test_idx ON data_load_test (col2); @@ -555,9 +555,9 @@ CREATE TABLE data_load_test (col1 int, col2 text, col3 serial); INSERT INTO data_load_test VALUES (132, 'hello'); SELECT create_distributed_table('data_load_test', 'col1'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) DROP TABLE data_load_test; @@ -568,9 +568,9 @@ CREATE TABLE data_load_test (col1 int, col2 text, col3 serial); INSERT INTO data_load_test VALUES (132, 'hello'); SELECT create_distributed_table('data_load_test', 'col1'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO data_load_test VALUES (243, 'world'); @@ -583,23 +583,23 @@ INSERT INTO data_load_test VALUES (243, 'world', 'hello'); ALTER TABLE data_load_test DROP COLUMN col1; SELECT create_distributed_table('data_load_test', 'col3'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT * FROM data_load_test ORDER BY col2; - col2 | col3 | CoL4") + col2 | col3 | CoL4") --------------------------------------------------------------------- - hello | world | - world | hello | + hello | world | + world | hello | (2 rows) -- make sure the tuple went to the right shard SELECT * FROM data_load_test WHERE col3 = 'world'; - col2 | col3 | CoL4") + col2 | col3 | CoL4") --------------------------------------------------------------------- - hello | world | + hello | world | (1 row) DROP TABLE data_load_test; @@ -607,16 +607,16 @@ SET citus.shard_replication_factor TO default; SET citus.shard_count to 4; CREATE TABLE lineitem_hash_part (like lineitem); SELECT create_distributed_table('lineitem_hash_part', 'l_orderkey'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE orders_hash_part (like orders); SELECT create_distributed_table('orders_hash_part', 'o_orderkey'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE UNLOGGED TABLE unlogged_table @@ -625,13 +625,13 @@ CREATE UNLOGGED TABLE unlogged_table value text ); SELECT create_distributed_table('unlogged_table', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT * FROM master_get_table_ddl_events('unlogged_table'); - master_get_table_ddl_events + master_get_table_ddl_events --------------------------------------------------------------------- CREATE UNLOGGED TABLE public.unlogged_table (key text, value text) ALTER TABLE public.unlogged_table OWNER TO postgres @@ -639,7 +639,7 @@ SELECT * FROM master_get_table_ddl_events('unlogged_table'); \c - - - :worker_1_port SELECT relpersistence FROM pg_class WHERE relname LIKE 'unlogged_table_%'; - relpersistence + relpersistence --------------------------------------------------------------------- u u @@ -652,22 +652,22 @@ SELECT relpersistence FROM pg_class WHERE relname LIKE 'unlogged_table_%'; BEGIN; CREATE TABLE rollback_table(id int, name varchar(20)); SELECT create_distributed_table('rollback_table','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ROLLBACK; -- Table should not exist on the worker node \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid FROM pg_class WHERE relname LIKE 'rollback_table%'); - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- (0 rows) \c - - - :master_port --- Insert 3 rows to make sure that copy after shard creation touches the same --- worker node twice. +-- Insert 3 rows to make sure that copy after shard creation touches the same +-- worker node twice. BEGIN; CREATE TABLE rollback_table(id int, name varchar(20)); INSERT INTO rollback_table VALUES(1, 'Name_1'); @@ -675,16 +675,16 @@ INSERT INTO rollback_table VALUES(2, 'Name_2'); INSERT INTO rollback_table VALUES(3, 'Name_3'); SELECT create_distributed_table('rollback_table','id'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ROLLBACK; -- Table should not exist on the worker node \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid FROM pg_class WHERE relname LIKE 'rollback_table%'); - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- (0 rows) @@ -692,17 +692,17 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid F BEGIN; CREATE TABLE rollback_table(id int, name varchar(20)); SELECT create_distributed_table('rollback_table','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) \copy rollback_table from stdin delimiter ',' CREATE INDEX rollback_index ON rollback_table(id); COMMIT; --- Check the table is created +-- Check the table is created SELECT count(*) FROM rollback_table; - count + count --------------------------------------------------------------------- 3 (1 row) @@ -711,9 +711,9 @@ DROP TABLE rollback_table; BEGIN; CREATE TABLE rollback_table(id int, name varchar(20)); SELECT create_distributed_table('rollback_table','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) \copy rollback_table from stdin delimiter ',' @@ -721,7 +721,7 @@ ROLLBACK; -- Table should not exist on the worker node \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid FROM pg_class WHERE relname LIKE 'rollback_table%'); - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- (0 rows) @@ -729,16 +729,16 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid F BEGIN; CREATE TABLE tt1(id int); SELECT create_distributed_table('tt1','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE tt2(id int); SELECT create_distributed_table('tt2','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO tt1 VALUES(1); @@ -747,32 +747,32 @@ COMMIT; -- Table should exist on the worker node \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.tt1_360069'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- - id | integer | + id | integer | (1 row) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.tt2_360073'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- - id | integer | + id | integer | (1 row) \c - - - :master_port DROP TABLE tt1; DROP TABLE tt2; --- It is known that creating a table with master_create_empty_shard is not +-- It is known that creating a table with master_create_empty_shard is not -- transactional, so table stay remaining on the worker node after the rollback BEGIN; CREATE TABLE append_tt1(id int); SELECT create_distributed_table('append_tt1','id','append'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_create_empty_shard('append_tt1'); - master_create_empty_shard + master_create_empty_shard --------------------------------------------------------------------- 360077 (1 row) @@ -781,16 +781,16 @@ ROLLBACK; -- Table exists on the worker node. \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.append_tt1_360077'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- - id | integer | + id | integer | (1 row) \c - - - :master_port -- There should be no table on the worker node \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid from pg_class WHERE relname LIKE 'public.tt1%'); - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- (0 rows) @@ -802,14 +802,14 @@ CREATE TABLE tt1(id int); INSERT INTO tt1 VALUES(1); SELECT create_distributed_table('tt1','id'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO tt1 VALUES(2); SELECT * FROM tt1 WHERE id = 1; - id + id --------------------------------------------------------------------- 1 (1 row) @@ -818,9 +818,9 @@ COMMIT; -- Placements should be created on the worker \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.tt1_360078'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- - id | integer | + id | integer | (1 row) \c - - - :master_port @@ -828,9 +828,9 @@ DROP TABLE tt1; BEGIN; CREATE TABLE tt1(id int); SELECT create_distributed_table('tt1','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) DROP TABLE tt1; @@ -838,7 +838,7 @@ COMMIT; -- There should be no table on the worker node \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid from pg_class WHERE relname LIKE 'tt1%'); - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- (0 rows) @@ -848,9 +848,9 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid f -- in this order of execution CREATE TABLE sample_table(id int); SELECT create_distributed_table('sample_table','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -858,23 +858,23 @@ CREATE TABLE stage_table (LIKE sample_table); \COPY stage_table FROM stdin; -- Note that this operation is a local copy SELECT create_distributed_table('stage_table', 'id'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO sample_table SELECT * FROM stage_table; DROP TABLE stage_table; SELECT * FROM sample_table WHERE id = 3; - id + id --------------------------------------------------------------------- 3 (1 row) COMMIT; --- Show that rows of sample_table are updated +-- Show that rows of sample_table are updated SELECT count(*) FROM sample_table; - count + count --------------------------------------------------------------------- 4 (1 row) @@ -885,36 +885,36 @@ DROP table sample_table; BEGIN; CREATE TABLE tt1(id int); SELECT create_distributed_table('tt1','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) \COPY tt1 from stdin; CREATE TABLE tt2(like tt1); SELECT create_distributed_table('tt2','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) \COPY tt2 from stdin; INSERT INTO tt1 SELECT * FROM tt2; SELECT * FROM tt1 WHERE id = 3; - id + id --------------------------------------------------------------------- 3 (1 row) SELECT * FROM tt2 WHERE id = 6; - id + id --------------------------------------------------------------------- 6 (1 row) END; SELECT count(*) FROM tt1; - count + count --------------------------------------------------------------------- 6 (1 row) @@ -929,18 +929,18 @@ CREATE TABLE sc.ref(a int); insert into sc.ref SELECT s FROM generate_series(0, 100) s; SELECT create_reference_table('sc.ref'); NOTICE: Copying data from local table... - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE sc.hash(a int); insert into sc.hash SELECT s FROM generate_series(0, 100) s; SELECT create_distributed_table('sc.hash', 'a'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) COMMIT; @@ -951,18 +951,18 @@ CREATE TABLE sc2.hash(a int); insert into sc2.hash SELECT s FROM generate_series(0, 100) s; SELECT create_distributed_table('sc2.hash', 'a'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE sc2.ref(a int); insert into sc2.ref SELECT s FROM generate_series(0, 100) s; SELECT create_reference_table('sc2.ref'); NOTICE: Copying data from local table... - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) COMMIT; @@ -976,14 +976,14 @@ CREATE TABLE sc3.alter_replica_table ); ALTER TABLE sc3.alter_replica_table REPLICA IDENTITY USING INDEX alter_replica_table_pkey; SELECT create_distributed_table('sc3.alter_replica_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) COMMIT; SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='sc3' LIMIT 1$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,i) (localhost,57638,t,i) @@ -1001,14 +1001,14 @@ SET search_path = 'sc4'; ALTER TABLE alter_replica_table REPLICA IDENTITY USING INDEX alter_replica_table_pkey; SELECT create_distributed_table('alter_replica_table', 'id'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) COMMIT; SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='sc4' LIMIT 1$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,i) (localhost,57638,t,i) @@ -1026,14 +1026,14 @@ INSERT INTO sc5.alter_replica_table(id) SELECT generate_series(1,100); ALTER TABLE sc5.alter_replica_table REPLICA IDENTITY FULL; SELECT create_distributed_table('sc5.alter_replica_table', 'id'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) COMMIT; SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='sc5' LIMIT 1$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,f) (localhost,57638,t,f) @@ -1051,14 +1051,14 @@ CREATE UNIQUE INDEX unique_idx ON sc6.alter_replica_table(id); ALTER TABLE sc6.alter_replica_table REPLICA IDENTITY USING INDEX unique_idx; SELECT create_distributed_table('sc6.alter_replica_table', 'id'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) COMMIT; SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='sc6' LIMIT 1$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,i) (localhost,57638,t,i) @@ -1075,14 +1075,14 @@ CREATE UNIQUE INDEX unique_idx ON alter_replica_table(id); ALTER TABLE alter_replica_table REPLICA IDENTITY USING INDEX unique_idx; SELECT create_distributed_table('alter_replica_table', 'id'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) COMMIT; SELECT run_command_on_workers($$SELECT relreplident FROM pg_class join information_schema.tables AS tables ON (pg_class.relname=tables.table_name) WHERE relname LIKE 'alter_replica_table_%' AND table_schema='public' LIMIT 1$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,i) (localhost,57638,t,i) diff --git a/src/test/regress/expected/multi_create_table_constraints.out b/src/test/regress/expected/multi_create_table_constraints.out index 514ec0cb3..a08a2f54b 100644 --- a/src/test/regress/expected/multi_create_table_constraints.out +++ b/src/test/regress/expected/multi_create_table_constraints.out @@ -12,9 +12,9 @@ SELECT create_distributed_table('uniq_cns_append_tables', 'partition_col', 'appe WARNING: table "uniq_cns_append_tables" has a UNIQUE or EXCLUDE constraint DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced. HINT: Consider using hash partitioning. - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE excl_cns_append_tables @@ -27,9 +27,9 @@ SELECT create_distributed_table('excl_cns_append_tables', 'partition_col', 'appe WARNING: table "excl_cns_append_tables" has a UNIQUE or EXCLUDE constraint DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced. HINT: Consider using hash partitioning. - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- test that Citus cannot distribute unique constraints that do not include @@ -59,7 +59,7 @@ CREATE TABLE ex_on_non_part_col SELECT create_distributed_table('ex_on_non_part_col', 'partition_col', 'hash'); ERROR: cannot create constraint on "ex_on_non_part_col" DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY constraints that do not include the partition column (with an equality operator if EXCLUDE). --- now show that Citus can distribute unique and EXCLUDE constraints that +-- now show that Citus can distribute unique and EXCLUDE constraints that -- include the partition column for hash-partitioned tables. -- However, EXCLUDE constraints must include the partition column with -- an equality operator. @@ -70,9 +70,9 @@ CREATE TABLE pk_on_part_col other_col integer ); SELECT create_distributed_table('pk_on_part_col', 'partition_col', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE uq_part_col @@ -81,9 +81,9 @@ CREATE TABLE uq_part_col other_col integer ); SELECT create_distributed_table('uq_part_col', 'partition_col', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE uq_two_columns @@ -93,9 +93,9 @@ CREATE TABLE uq_two_columns UNIQUE (partition_col, other_col) ); SELECT create_distributed_table('uq_two_columns', 'partition_col', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO uq_two_columns (partition_col, other_col) VALUES (1,1); @@ -110,9 +110,9 @@ CREATE TABLE ex_on_part_col EXCLUDE (partition_col WITH =) ); SELECT create_distributed_table('ex_on_part_col', 'partition_col', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO ex_on_part_col (partition_col, other_col) VALUES (1,1); @@ -127,9 +127,9 @@ CREATE TABLE ex_on_two_columns EXCLUDE (partition_col WITH =, other_col WITH =) ); SELECT create_distributed_table('ex_on_two_columns', 'partition_col', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO ex_on_two_columns (partition_col, other_col) VALUES (1,1); @@ -144,9 +144,9 @@ CREATE TABLE ex_on_two_columns_prt EXCLUDE (partition_col WITH =, other_col WITH =) WHERE (other_col > 100) ); SELECT create_distributed_table('ex_on_two_columns_prt', 'partition_col', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO ex_on_two_columns_prt (partition_col, other_col) VALUES (1,1); @@ -172,9 +172,9 @@ CREATE TABLE ex_overlaps EXCLUDE USING gist (other_col WITH &&, partition_col WITH =) ); SELECT create_distributed_table('ex_overlaps', 'partition_col', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO ex_overlaps (partition_col, other_col) VALUES ('[2016-01-01 00:00:00, 2016-02-01 00:00:00]', '[2016-01-01 00:00:00, 2016-02-01 00:00:00]'); @@ -182,8 +182,8 @@ INSERT INTO ex_overlaps (partition_col, other_col) VALUES ('[2016-01-01 00:00:00 ERROR: conflicting key value violates exclusion constraint "ex_overlaps_other_col_partition_col_excl_365027" DETAIL: Key (other_col, partition_col)=(["2016-01-15 00:00:00","2016-02-01 00:00:00"], ["2016-01-01 00:00:00","2016-02-01 00:00:00"]) conflicts with existing key (other_col, partition_col)=(["2016-01-01 00:00:00","2016-02-01 00:00:00"], ["2016-01-01 00:00:00","2016-02-01 00:00:00"]). CONTEXT: while executing command on localhost:xxxxx --- now show that Citus can distribute unique and EXCLUDE constraints that --- include the partition column, for hash-partitioned tables. +-- now show that Citus can distribute unique and EXCLUDE constraints that +-- include the partition column, for hash-partitioned tables. -- However, EXCLUDE constraints must include the partition column with -- an equality operator. -- These tests are for NAMED constraints. @@ -193,9 +193,9 @@ CREATE TABLE pk_on_part_col_named other_col integer ); SELECT create_distributed_table('pk_on_part_col_named', 'partition_col', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE uq_part_col_named @@ -204,9 +204,9 @@ CREATE TABLE uq_part_col_named other_col integer ); SELECT create_distributed_table('uq_part_col_named', 'partition_col', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE uq_two_columns_named @@ -216,9 +216,9 @@ CREATE TABLE uq_two_columns_named CONSTRAINT uq_two_columns_named_uniq UNIQUE (partition_col, other_col) ); SELECT create_distributed_table('uq_two_columns_named', 'partition_col', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO uq_two_columns_named (partition_col, other_col) VALUES (1,1); @@ -233,9 +233,9 @@ CREATE TABLE ex_on_part_col_named CONSTRAINT ex_on_part_col_named_exclude EXCLUDE (partition_col WITH =) ); SELECT create_distributed_table('ex_on_part_col_named', 'partition_col', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO ex_on_part_col_named (partition_col, other_col) VALUES (1,1); @@ -250,9 +250,9 @@ CREATE TABLE ex_on_two_columns_named CONSTRAINT ex_on_two_columns_named_exclude EXCLUDE (partition_col WITH =, other_col WITH =) ); SELECT create_distributed_table('ex_on_two_columns_named', 'partition_col', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO ex_on_two_columns_named (partition_col, other_col) VALUES (1,1); @@ -269,9 +269,9 @@ CREATE TABLE ex_multiple_excludes CONSTRAINT ex_multiple_excludes_excl2 EXCLUDE (partition_col WITH =, other_other_col WITH =) ); SELECT create_distributed_table('ex_multiple_excludes', 'partition_col', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO ex_multiple_excludes (partition_col, other_col, other_other_col) VALUES (1,1,1); @@ -299,9 +299,9 @@ CREATE TABLE ex_overlaps_named CONSTRAINT ex_overlaps_operator_named_exclude EXCLUDE USING gist (other_col WITH &&, partition_col WITH =) ); SELECT create_distributed_table('ex_overlaps_named', 'partition_col', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO ex_overlaps_named (partition_col, other_col) VALUES ('[2016-01-01 00:00:00, 2016-02-01 00:00:00]', '[2016-01-01 00:00:00, 2016-02-01 00:00:00]'); @@ -316,9 +316,9 @@ CREATE TABLE uq_range_tables other_col integer ); SELECT create_distributed_table('uq_range_tables', 'partition_col', 'range'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- show that CHECK constraints are distributed. @@ -329,21 +329,21 @@ CREATE TABLE check_example other_other_col integer CHECK (abs(other_other_col) >= 100) ); SELECT create_distributed_table('check_example', 'partition_col', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) \c - - - :worker_1_port SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'check_example_partition_col_key_365056'::regclass; - Column | Type | Definition + Column | Type | Definition --------------------------------------------------------------------- partition_col | integer | partition_col (1 row) SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.check_example_365056'::regclass; - Constraint | Definition + Constraint | Definition --------------------------------------------------------------------- check_example_other_col_check | CHECK (other_col >= 100) check_example_other_other_col_check | CHECK (abs(other_other_col) >= 100) @@ -375,21 +375,21 @@ SET citus.shard_count = 4; SET citus.shard_replication_factor = 1; CREATE TABLE raw_table_1 (user_id int, UNIQUE(user_id)); SELECT create_distributed_table('raw_table_1', 'user_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE raw_table_2 (user_id int REFERENCES raw_table_1(user_id), UNIQUE(user_id)); SELECT create_distributed_table('raw_table_2', 'user_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- see that the constraint exists SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='raw_table_2'::regclass; - Constraint | Definition + Constraint | Definition --------------------------------------------------------------------- raw_table_2_user_id_fkey | FOREIGN KEY (user_id) REFERENCES raw_table_1(user_id) (1 row) @@ -404,7 +404,7 @@ DROP TABLE raw_table_1 CASCADE; NOTICE: drop cascades to constraint raw_table_2_user_id_fkey on table raw_table_2 -- see that the constraint also dropped SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='raw_table_2'::regclass; - Constraint | Definition + Constraint | Definition --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/multi_cross_shard.out b/src/test/regress/expected/multi_cross_shard.out index 009fe0851..7ee7fb374 100644 --- a/src/test/regress/expected/multi_cross_shard.out +++ b/src/test/regress/expected/multi_cross_shard.out @@ -6,13 +6,13 @@ -- Create a distributed table and add data to it CREATE TABLE multi_task_table ( - id int, + id int, name varchar(20) ); SELECT create_distributed_table('multi_task_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO multi_task_table VALUES(1, 'elem_1'); @@ -20,13 +20,13 @@ INSERT INTO multi_task_table VALUES(2, 'elem_2'); INSERT INTO multi_task_table VALUES(3, 'elem_3'); -- Shouldn't log anything when the log level is 'off' SHOW citus.multi_task_query_log_level; - citus.multi_task_query_log_level + citus.multi_task_query_log_level --------------------------------------------------------------------- off (1 row) SELECT * FROM multi_task_table ORDER BY 1; - id | name + id | name --------------------------------------------------------------------- 1 | elem_1 2 | elem_2 @@ -38,7 +38,7 @@ SET citus.multi_task_query_log_level TO notice; SELECT * FROM multi_task_table ORDER BY 1; NOTICE: multi-task query about to be executed HINT: Queries are split to multiple tasks if they have to be split into several queries on the workers. - id | name + id | name --------------------------------------------------------------------- 1 | elem_1 2 | elem_2 @@ -48,7 +48,7 @@ HINT: Queries are split to multiple tasks if they have to be split into several SELECT AVG(id) AS avg_id FROM multi_task_table; NOTICE: multi-task query about to be executed HINT: Queries are split to multiple tasks if they have to be split into several queries on the workers. - avg_id + avg_id --------------------------------------------------------------------- 2.0000000000000000 (1 row) @@ -58,7 +58,7 @@ SET citus.multi_task_query_log_level TO error; SELECT * FROM multi_task_table; ERROR: multi-task query about to be executed HINT: Queries are split to multiple tasks if they have to be split into several queries on the workers. --- Check the log message with INSERT INTO ... SELECT +-- Check the log message with INSERT INTO ... SELECT CREATE TABLE raw_table ( id int, @@ -70,15 +70,15 @@ CREATE TABLE summary_table order_sum BIGINT ); SELECT create_distributed_table('raw_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('summary_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO raw_table VALUES(1, '15'); @@ -102,7 +102,7 @@ INSERT INTO summary_table SELECT id, SUM(order_count) FROM raw_table WHERE id = -- Should have four rows (three rows from the query without where and the one from with where) SET citus.multi_task_query_log_level to DEFAULT; SELECT * FROM summary_table ORDER BY 1,2; - id | order_sum + id | order_sum --------------------------------------------------------------------- 1 | 35 1 | 35 @@ -126,7 +126,7 @@ ROLLBACK; -- Should have only four rows since the transaction is rollbacked. SET citus.multi_task_query_log_level to DEFAULT; SELECT * FROM summary_table ORDER BY 1,2; - id | order_sum + id | order_sum --------------------------------------------------------------------- 1 | 35 1 | 35 @@ -138,7 +138,7 @@ SELECT * FROM summary_table ORDER BY 1,2; SET citus.multi_task_query_log_level TO notice; -- Shouldn't log since it is a router select query SELECT * FROM raw_table WHERE ID = 1; - id | order_count + id | order_count --------------------------------------------------------------------- 1 | 15 1 | 20 @@ -157,15 +157,15 @@ CREATE TABLE tt2 count bigint ); SELECT create_distributed_table('tt1', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('tt2', 'name'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO tt1 VALUES(1, 'Ahmet'); @@ -177,7 +177,7 @@ SET citus.task_executor_type to "task-tracker"; SELECT tt1.id, tt2.count from tt1,tt2 where tt1.id = tt2.id; NOTICE: multi-task query about to be executed HINT: Queries are split to multiple tasks if they have to be split into several queries on the workers. - id | count + id | count --------------------------------------------------------------------- 1 | 5 2 | 15 diff --git a/src/test/regress/expected/multi_data_types.out b/src/test/regress/expected/multi_data_types.out index b63653538..5c44cb1eb 100644 --- a/src/test/regress/expected/multi_data_types.out +++ b/src/test/regress/expected/multi_data_types.out @@ -16,9 +16,9 @@ SELECT run_command_on_coordinator_and_workers($cf$ IMMUTABLE RETURNS NULL ON NULL INPUT; $cf$); - run_command_on_coordinator_and_workers + run_command_on_coordinator_and_workers --------------------------------------------------------------------- - + (1 row) SELECT run_command_on_coordinator_and_workers($cf$ @@ -28,9 +28,9 @@ SELECT run_command_on_coordinator_and_workers($cf$ IMMUTABLE RETURNS NULL ON NULL INPUT; $cf$); - run_command_on_coordinator_and_workers + run_command_on_coordinator_and_workers --------------------------------------------------------------------- - + (1 row) -- ... use that function to create a custom equality operator... @@ -42,9 +42,9 @@ SELECT run_command_on_coordinator_and_workers($co$ HASHES ); $co$); - run_command_on_coordinator_and_workers + run_command_on_coordinator_and_workers --------------------------------------------------------------------- - + (1 row) -- ... and create a custom operator family for hash indexes... @@ -52,7 +52,7 @@ CREATE OPERATOR FAMILY cats_op_fam USING hash; -- ... create a test HASH function. Though it is a poor hash function, -- it is acceptable for our tests CREATE FUNCTION test_composite_type_hash(test_composite_type) RETURNS int -AS 'SELECT hashtext( ($1.i + $1.i2)::text);' +AS 'SELECT hashtext( ($1.i + $1.i2)::text);' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; @@ -74,26 +74,26 @@ CREATE TABLE composite_type_partitioned_table ); SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('composite_type_partitioned_table', 'col', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) --- execute INSERT, SELECT and UPDATE queries on composite_type_partitioned_table +-- execute INSERT, SELECT and UPDATE queries on composite_type_partitioned_table INSERT INTO composite_type_partitioned_table VALUES (1, '(1, 2)'::test_composite_type); INSERT INTO composite_type_partitioned_table VALUES (2, '(3, 4)'::test_composite_type); INSERT INTO composite_type_partitioned_table VALUES (3, '(5, 6)'::test_composite_type); INSERT INTO composite_type_partitioned_table VALUES (4, '(7, 8)'::test_composite_type); INSERT INTO composite_type_partitioned_table VALUES (5, '(9, 10)'::test_composite_type); SELECT * FROM composite_type_partitioned_table WHERE col = '(7, 8)'::test_composite_type; - id | col + id | col --------------------------------------------------------------------- 4 | (7,8) (1 row) UPDATE composite_type_partitioned_table SET id = 6 WHERE col = '(7, 8)'::test_composite_type; SELECT * FROM composite_type_partitioned_table WHERE col = '(7, 8)'::test_composite_type; - id | col + id | col --------------------------------------------------------------------- 6 | (7,8) (1 row) @@ -101,23 +101,23 @@ SELECT * FROM composite_type_partitioned_table WHERE col = '(7, 8)'::test_compo -- create and distribute a table on enum type column CREATE TYPE bug_status AS ENUM ('new', 'open', 'closed'); CREATE TABLE bugs ( - id integer, + id integer, status bug_status ); SELECT create_distributed_table('bugs', 'status', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) --- execute INSERT, SELECT and UPDATE queries on composite_type_partitioned_table +-- execute INSERT, SELECT and UPDATE queries on composite_type_partitioned_table INSERT INTO bugs VALUES (1, 'new'); INSERT INTO bugs VALUES (2, 'open'); INSERT INTO bugs VALUES (3, 'closed'); INSERT INTO bugs VALUES (4, 'closed'); INSERT INTO bugs VALUES (5, 'open'); SELECT * FROM bugs WHERE status = 'closed'::bug_status; - id | status + id | status --------------------------------------------------------------------- 3 | closed 4 | closed @@ -126,39 +126,39 @@ SELECT * FROM bugs WHERE status = 'closed'::bug_status; UPDATE bugs SET status = 'closed'::bug_status WHERE id = 2; ERROR: modifying the partition value of rows is not allowed SELECT * FROM bugs WHERE status = 'open'::bug_status; - id | status + id | status --------------------------------------------------------------------- 2 | open 5 | open (2 rows) -- create and distribute a table on varchar column -CREATE TABLE varchar_hash_partitioned_table +CREATE TABLE varchar_hash_partitioned_table ( id int, name varchar ); SELECT create_distributed_table('varchar_hash_partitioned_table', 'name', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) --- execute INSERT, SELECT and UPDATE queries on composite_type_partitioned_table +-- execute INSERT, SELECT and UPDATE queries on composite_type_partitioned_table INSERT INTO varchar_hash_partitioned_table VALUES (1, 'Jason'); INSERT INTO varchar_hash_partitioned_table VALUES (2, 'Ozgun'); INSERT INTO varchar_hash_partitioned_table VALUES (3, 'Onder'); INSERT INTO varchar_hash_partitioned_table VALUES (4, 'Sumedh'); INSERT INTO varchar_hash_partitioned_table VALUES (5, 'Marco'); SELECT * FROM varchar_hash_partitioned_table WHERE id = 1; - id | name + id | name --------------------------------------------------------------------- 1 | Jason (1 row) UPDATE varchar_hash_partitioned_table SET id = 6 WHERE name = 'Jason'; SELECT * FROM varchar_hash_partitioned_table WHERE id = 6; - id | name + id | name --------------------------------------------------------------------- 6 | Jason (1 row) diff --git a/src/test/regress/expected/multi_deparse_function.out b/src/test/regress/expected/multi_deparse_function.out index ecbad5946..2af87f91e 100644 --- a/src/test/regress/expected/multi_deparse_function.out +++ b/src/test/regress/expected/multi_deparse_function.out @@ -62,9 +62,9 @@ CREATE FUNCTION add(integer, integer) RETURNS integer IMMUTABLE RETURNS NULL ON NULL INPUT; SELECT create_distributed_function('add(int,int)'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) SELECT deparse_and_run_on_workers($cmd$ @@ -72,7 +72,7 @@ ALTER FUNCTION add CALLED ON NULL INPUT $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) CALLED ON NULL INPUT; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -85,7 +85,7 @@ ALTER FUNCTION add RETURNS NULL ON NULL INPUT $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) STRICT; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -96,7 +96,7 @@ ALTER FUNCTION add STRICT $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) STRICT; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -107,7 +107,7 @@ ALTER FUNCTION add IMMUTABLE $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) IMMUTABLE; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -118,7 +118,7 @@ ALTER FUNCTION add STABLE $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) STABLE; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -129,7 +129,7 @@ ALTER FUNCTION add VOLATILE $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) VOLATILE; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -140,7 +140,7 @@ ALTER FUNCTION add LEAKPROOF $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) LEAKPROOF; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -151,7 +151,7 @@ ALTER FUNCTION add NOT LEAKPROOF $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) NOT LEAKPROOF; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -164,7 +164,7 @@ ALTER FUNCTION add EXTERNAL SECURITY INVOKER $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SECURITY INVOKER; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -175,7 +175,7 @@ ALTER FUNCTION add SECURITY INVOKER $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SECURITY INVOKER; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -186,7 +186,7 @@ ALTER FUNCTION add EXTERNAL SECURITY DEFINER $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SECURITY DEFINER; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -197,7 +197,7 @@ ALTER FUNCTION add SECURITY DEFINER $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SECURITY DEFINER; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -208,7 +208,7 @@ ALTER FUNCTION add PARALLEL UNSAFE $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) PARALLEL UNSAFE; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -219,7 +219,7 @@ ALTER FUNCTION add PARALLEL RESTRICTED $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) PARALLEL RESTRICTED; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -230,7 +230,7 @@ ALTER FUNCTION add PARALLEL SAFE $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) PARALLEL SAFE; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -242,7 +242,7 @@ ALTER FUNCTION add COST 1234 $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) COST 1234.000000; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -253,7 +253,7 @@ ALTER FUNCTION add COST 1234.5 $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) COST 1234.500000; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -264,7 +264,7 @@ ALTER FUNCTION add SET log_min_messages = ERROR $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SET log_min_messages = 'error'; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -275,7 +275,7 @@ ALTER FUNCTION add SET log_min_messages TO DEFAULT $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SET log_min_messages TO DEFAULT; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -286,7 +286,7 @@ ALTER FUNCTION add SET log_min_messages FROM CURRENT $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SET log_min_messages FROM CURRENT; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -297,7 +297,7 @@ ALTER FUNCTION add(int, int) SET TIME ZONE INTERVAL '-08:00' HOUR TO MINUTE; $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SET TIME ZONE INTERVAL '@ 8 hours ago'; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -308,7 +308,7 @@ ALTER FUNCTION add(int, int) SET TIME ZONE '-7'; $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SET timezone = '-7'; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -319,7 +319,7 @@ ALTER FUNCTION add(int, int) SET "citus.setting;'" TO 'hello '' world'; $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SET "citus.setting;'" = 'hello '' world'; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -330,7 +330,7 @@ ALTER FUNCTION add(int, int) SET "citus.setting;'" TO -3.2; $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SET "citus.setting;'" = -3.2; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -341,7 +341,7 @@ ALTER FUNCTION add(int, int) SET "citus.setting;'" TO -32; $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SET "citus.setting;'" = -32; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -354,7 +354,7 @@ ALTER FUNCTION add(int, int) SET "citus.setting;'" TO 'hello '' world', 'second $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SET "citus.setting;'" = 'hello '' world', 'second '' item'; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,f,"ERROR: SET citus.setting;' takes only one argument") (localhost,57638,f,"ERROR: SET citus.setting;' takes only one argument") @@ -365,7 +365,7 @@ ALTER FUNCTION add RESET log_min_messages $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) RESET log_min_messages; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -376,7 +376,7 @@ ALTER FUNCTION add RESET ALL $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) RESET ALL; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -388,7 +388,7 @@ ALTER FUNCTION add RENAME TO summation $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) RENAME TO summation; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -403,7 +403,7 @@ ALTER FUNCTION summation RENAME TO add $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.summation(integer, integer) RENAME TO add; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -415,7 +415,7 @@ CREATE ROLE function_role; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. SELECT run_command_on_workers('CREATE ROLE function_role'); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") @@ -426,7 +426,7 @@ ALTER FUNCTION add OWNER TO function_role $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) OWNER TO function_role; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -437,7 +437,7 @@ ALTER FUNCTION add OWNER TO missing_role $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) OWNER TO missing_role; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,f,"ERROR: role ""missing_role"" does not exist") (localhost,57638,f,"ERROR: role ""missing_role"" does not exist") @@ -449,7 +449,7 @@ ALTER FUNCTION add SET SCHEMA public $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) SET SCHEMA public; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -462,7 +462,7 @@ ALTER FUNCTION public.add SET SCHEMA function_tests $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION public.add(integer, integer) SET SCHEMA function_tests; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -474,7 +474,7 @@ ALTER FUNCTION add DEPENDS ON EXTENSION citus $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) DEPENDS ON EXTENSION citus; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -486,7 +486,7 @@ ALTER FUNCTION pg_catalog.get_shard_id_for_distribution_column(table_name regcla $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION pg_catalog.get_shard_id_for_distribution_column(table_name regclass, distribution_value "any") PARALLEL SAFE; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -496,7 +496,7 @@ CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE SELECT deparse_test($cmd$ DROP FUNCTION add(int,int); $cmd$); - deparse_test + deparse_test --------------------------------------------------------------------- DROP FUNCTION function_tests.add(integer, integer); (1 row) @@ -507,7 +507,7 @@ ALTER FUNCTION add volatile leakproof SECURITY DEFINER PARALLEL unsafe; $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.add(integer, integer) VOLATILE LEAKPROOF SECURITY DEFINER PARALLEL UNSAFE; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -520,7 +520,7 @@ DROP FUNCTION missing_function(int, text); $cmd$); INFO: Propagating deparsed query: DROP FUNCTION missing_function(pg_catalog.int4,text); CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,f,"ERROR: function missing_function(integer, text) does not exist") (localhost,57638,f,"ERROR: function missing_function(integer, text) does not exist") @@ -533,7 +533,7 @@ DROP FUNCTION IF EXISTS missing_function(int, text); $cmd$); INFO: Propagating deparsed query: DROP FUNCTION IF EXISTS missing_function(pg_catalog.int4,text); CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"DROP FUNCTION") (localhost,57638,t,"DROP FUNCTION") @@ -544,7 +544,7 @@ DROP FUNCTION IF EXISTS missing_schema.missing_function(int,float); $cmd$); INFO: Propagating deparsed query: DROP FUNCTION IF EXISTS missing_schema.missing_function(pg_catalog.int4,pg_catalog.float8); CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"DROP FUNCTION") (localhost,57638,t,"DROP FUNCTION") @@ -555,7 +555,7 @@ DROP FUNCTION IF EXISTS missing_func_without_args; $cmd$); INFO: Propagating deparsed query: DROP FUNCTION IF EXISTS missing_func_without_args; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"DROP FUNCTION") (localhost,57638,t,"DROP FUNCTION") @@ -568,7 +568,7 @@ SELECT run_command_on_workers($$ CREATE SCHEMA IF NOT EXISTS "CiTuS.TeeN"; CREATE SCHEMA IF NOT EXISTS "CiTUS.TEEN2"; $$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"CREATE SCHEMA") (localhost,57638,t,"CREATE SCHEMA") @@ -582,15 +582,15 @@ CREATE FUNCTION "CiTuS.TeeN"."TeeNFunCT10N.1!?!"(text) RETURNS TEXT AS $$ SELECT 'Overloaded function called with param: ' || $1 $$ LANGUAGE SQL; SELECT create_distributed_function('"CiTuS.TeeN"."TeeNFunCT10N.1!?!"()'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_function('"CiTuS.TeeN"."TeeNFunCT10N.1!?!"(text)'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) SELECT deparse_and_run_on_workers($cmd$ @@ -598,7 +598,7 @@ ALTER FUNCTION "CiTuS.TeeN"."TeeNFunCT10N.1!?!"() SET SCHEMA "CiTUS.TEEN2" $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION "CiTuS.TeeN"."TeeNFunCT10N.1!?!"() SET SCHEMA "CiTUS.TEEN2"; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -610,7 +610,7 @@ DROP FUNCTION "CiTUS.TEEN2"."TeeNFunCT10N.1!?!"(),"CiTuS.TeeN"."TeeNFunCT10N.1!? $cmd$); INFO: Propagating deparsed query: DROP FUNCTION "CiTUS.TEEN2"."TeeNFunCT10N.1!?!"(), "CiTuS.TeeN"."TeeNFunCT10N.1!?!"(text); CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"DROP FUNCTION") (localhost,57638,t,"DROP FUNCTION") @@ -621,9 +621,9 @@ CREATE FUNCTION func_default_param(param INT DEFAULT 0) RETURNS TEXT AS $$ SELECT 'supplied param is : ' || param; $$ LANGUAGE SQL; SELECT create_distributed_function('func_default_param(INT)'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) SELECT deparse_and_run_on_workers($cmd$ @@ -631,7 +631,7 @@ ALTER FUNCTION func_default_param RENAME TO func_with_default_param; $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.func_default_param(param integer) RENAME TO func_with_default_param; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -642,9 +642,9 @@ CREATE FUNCTION func_out_param(IN param INT, OUT result TEXT) AS $$ SELECT 'supplied param is : ' || param; $$ LANGUAGE SQL; SELECT create_distributed_function('func_out_param(INT)'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) SELECT deparse_and_run_on_workers($cmd$ @@ -652,7 +652,7 @@ ALTER FUNCTION func_out_param RENAME TO func_in_and_out_param; $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.func_out_param(param integer, OUT result text) RENAME TO func_in_and_out_param; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -666,9 +666,9 @@ BEGIN END; $$ LANGUAGE plpgsql; SELECT create_distributed_function('square(NUMERIC)'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) SELECT deparse_and_run_on_workers($cmd$ @@ -676,7 +676,7 @@ ALTER FUNCTION square SET search_path TO DEFAULT; $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.square(INOUT a numeric) SET search_path TO DEFAULT; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -697,9 +697,9 @@ BEGIN END; $$ LANGUAGE plpgsql; SELECT create_distributed_function('sum_avg(NUMERIC[])'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) SELECT deparse_and_run_on_workers($cmd$ @@ -707,7 +707,7 @@ ALTER FUNCTION sum_avg COST 10000; $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.sum_avg(VARIADIC list numeric[], OUT total numeric, OUT average numeric) COST 10000.000000; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -719,9 +719,9 @@ CREATE FUNCTION func_custom_param(IN param intpair, OUT total INT) AS $$ SELECT param.x + param.y $$ LANGUAGE SQL; SELECT create_distributed_function('func_custom_param(intpair)'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) SELECT deparse_and_run_on_workers($cmd$ @@ -729,7 +729,7 @@ ALTER FUNCTION func_custom_param RENAME TO func_with_custom_param; $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.func_custom_param(param function_tests.intpair, OUT total integer) RENAME TO func_with_custom_param; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -741,9 +741,9 @@ CREATE FUNCTION func_returns_table(IN count INT) AS $$ SELECT i,i FROM generate_series(1,count) i $$ LANGUAGE SQL; SELECT create_distributed_function('func_returns_table(INT)'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) SELECT deparse_and_run_on_workers($cmd$ @@ -751,7 +751,7 @@ ALTER FUNCTION func_returns_table ROWS 100; $cmd$); INFO: Propagating deparsed query: ALTER FUNCTION function_tests.func_returns_table(count integer) ROWS 100.000000; CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line 6 at RAISE - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER FUNCTION") (localhost,57638,t,"ALTER FUNCTION") @@ -767,7 +767,7 @@ SELECT run_command_on_workers($$ DROP SCHEMA "CiTUS.TEEN2" CASCADE; DROP SCHEMA function_tests CASCADE; $$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"DROP SCHEMA") (localhost,57638,t,"DROP SCHEMA") diff --git a/src/test/regress/expected/multi_deparse_procedure.out b/src/test/regress/expected/multi_deparse_procedure.out index c7138d557..f31578f38 100644 --- a/src/test/regress/expected/multi_deparse_procedure.out +++ b/src/test/regress/expected/multi_deparse_procedure.out @@ -17,13 +17,13 @@ -- SET configuration_parameter FROM CURRENT -- RESET configuration_parameter -- RESET ALL --- +-- -- DROP PROCEDURE [ IF EXISTS ] name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] [, ...] -- [ CASCADE | RESTRICT ] --- +-- -- Please note that current deparser does not return errors on some invalid queries. --- --- For example CALLED ON NULL INPUT action is valid only for FUNCTIONS, but we still +-- +-- For example CALLED ON NULL INPUT action is valid only for FUNCTIONS, but we still -- allow deparsing them here. SET citus.next_shard_id TO 20030000; SET citus.enable_ddl_propagation TO off; @@ -39,7 +39,7 @@ CREATE FUNCTION deparse_and_run_on_workers(text) RETURNS SETOF record AS $fnc$ WITH deparsed_query AS ( SELECT deparse_test($1) qualified_query ) - SELECT run_command_on_workers(qualified_query) FROM deparsed_query d + SELECT run_command_on_workers(qualified_query) FROM deparsed_query d $fnc$ LANGUAGE SQL; -- Create a simple PROCEDURE and distribute it @@ -50,15 +50,15 @@ BEGIN END; $proc$; SELECT create_distributed_function('raise_info(text)'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info CALLED ON NULL INPUT $cmd$); - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") @@ -67,7 +67,7 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info RETURNS NULL ON NULL INPUT $cmd$); - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") @@ -76,7 +76,7 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info STRICT $cmd$); - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") @@ -85,7 +85,7 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info IMMUTABLE $cmd$); - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") @@ -94,7 +94,7 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info STABLE $cmd$); - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") @@ -103,7 +103,7 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info VOLATILE $cmd$); - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") @@ -112,7 +112,7 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info LEAKPROOF $cmd$); - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") @@ -121,7 +121,7 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info NOT LEAKPROOF $cmd$); - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") @@ -130,7 +130,7 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info EXTERNAL SECURITY INVOKER $cmd$); - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") @@ -139,7 +139,7 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info SECURITY INVOKER $cmd$); - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") @@ -148,7 +148,7 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info EXTERNAL SECURITY DEFINER $cmd$); - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") @@ -157,7 +157,7 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info SECURITY DEFINER $cmd$); - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") @@ -166,7 +166,7 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info PARALLEL UNSAFE $cmd$); - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") @@ -175,7 +175,7 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info PARALLEL RESTRICTED $cmd$); - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") @@ -184,7 +184,7 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info PARALLEL SAFE $cmd$); - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") @@ -194,7 +194,7 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info COST 1234 $cmd$); - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") @@ -203,7 +203,7 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info COST 1234.5 $cmd$); - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") @@ -212,7 +212,7 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info ROWS 10 $cmd$); - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") @@ -221,7 +221,7 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info ROWS 10.8 $cmd$); - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,f,"ERROR: invalid attribute in procedure definition") (localhost,57638,f,"ERROR: invalid attribute in procedure definition") @@ -230,7 +230,7 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info SECURITY INVOKER SET client_min_messages TO warning; $cmd$); - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") @@ -239,7 +239,7 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info SET log_min_messages = ERROR $cmd$); - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") @@ -248,7 +248,7 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info SET log_min_messages TO DEFAULT $cmd$); - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") @@ -257,7 +257,7 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info SET log_min_messages FROM CURRENT $cmd$); - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") @@ -266,7 +266,7 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info RESET log_min_messages $cmd$); - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") @@ -275,7 +275,7 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info RESET ALL $cmd$); - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") @@ -285,7 +285,7 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info RENAME TO summation; $cmd$); - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") @@ -295,7 +295,7 @@ ALTER PROCEDURE raise_info RENAME TO summation; SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE summation RENAME TO raise_info; $cmd$); - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") @@ -306,7 +306,7 @@ CREATE ROLE procedure_role; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. SELECT run_command_on_workers($$CREATE ROLE procedure_role;$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") @@ -315,7 +315,7 @@ SELECT run_command_on_workers($$CREATE ROLE procedure_role;$$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info OWNER TO procedure_role $cmd$); - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") @@ -324,7 +324,7 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info OWNER TO missing_role $cmd$); - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,f,"ERROR: role ""missing_role"" does not exist") (localhost,57638,f,"ERROR: role ""missing_role"" does not exist") @@ -334,7 +334,7 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info SET SCHEMA public; $cmd$); - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") @@ -344,7 +344,7 @@ ALTER PROCEDURE raise_info SET SCHEMA public; SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE public.raise_info SET SCHEMA procedure_tests; $cmd$); - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") @@ -354,7 +354,7 @@ ALTER PROCEDURE public.raise_info SET SCHEMA procedure_tests; SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info DEPENDS ON EXTENSION citus $cmd$); - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"ALTER PROCEDURE") (localhost,57638,t,"ALTER PROCEDURE") @@ -363,7 +363,7 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ DROP PROCEDURE raise_info(text); $cmd$); - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"DROP PROCEDURE") (localhost,57638,t,"DROP PROCEDURE") @@ -373,7 +373,7 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ DROP PROCEDURE IF EXISTS missing_PROCEDURE(int, text); $cmd$); - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"DROP PROCEDURE") (localhost,57638,t,"DROP PROCEDURE") @@ -382,7 +382,7 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ DROP PROCEDURE IF EXISTS missing_schema.missing_PROCEDURE(int,float); $cmd$); - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"DROP PROCEDURE") (localhost,57638,t,"DROP PROCEDURE") @@ -391,7 +391,7 @@ $cmd$); SELECT deparse_and_run_on_workers($cmd$ DROP PROCEDURE IF EXISTS missing_schema.missing_PROCEDURE(int,float) CASCADE; $cmd$); - deparse_and_run_on_workers + deparse_and_run_on_workers --------------------------------------------------------------------- (localhost,57637,t,"DROP PROCEDURE") (localhost,57638,t,"DROP PROCEDURE") @@ -402,7 +402,7 @@ SET client_min_messages TO WARNING; -- suppress cascading objects dropping DROP SCHEMA procedure_tests CASCADE; DROP ROLE procedure_role; SELECT run_command_on_workers($$DROP ROLE procedure_role;$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"DROP ROLE") (localhost,57638,t,"DROP ROLE") diff --git a/src/test/regress/expected/multi_deparse_shard_query.out b/src/test/regress/expected/multi_deparse_shard_query.out index 6816d9e04..32a97eafa 100644 --- a/src/test/regress/expected/multi_deparse_shard_query.out +++ b/src/test/regress/expected/multi_deparse_shard_query.out @@ -20,9 +20,9 @@ CREATE TABLE raw_events_1 event_at date DEfAULT now() ); SELECT create_distributed_table('raw_events_1', 'tenant_id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- create the first table @@ -38,14 +38,14 @@ CREATE TABLE raw_events_2 event_at date DEfAULT now() ); SELECT create_distributed_table('raw_events_2', 'tenant_id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE aggregated_events (tenant_id bigint, - sum_value_1 bigint, + sum_value_1 bigint, average_value_2 float, average_value_3 float, sum_value_4 bigint, @@ -53,9 +53,9 @@ CREATE TABLE aggregated_events average_value_6 int, rollup_hour date); SELECT create_distributed_table('aggregated_events', 'tenant_id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- start with very simple examples on a single table @@ -64,9 +64,9 @@ INSERT INTO raw_events_1 SELECT * FROM raw_events_1; '); INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_1, value_2, value_3, value_4, value_5, value_6, value_7, event_at) SELECT tenant_id, value_1, value_2, value_3, value_4, value_5, value_6, value_7, event_at FROM public.raw_events_1 - deparse_shard_query_test + deparse_shard_query_test --------------------------------------------------------------------- - + (1 row) SELECT deparse_shard_query_test(' @@ -77,9 +77,9 @@ FROM raw_events_1; '); INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_4, value_6, event_at) SELECT tenant_id, value_4, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1 - deparse_shard_query_test + deparse_shard_query_test --------------------------------------------------------------------- - + (1 row) -- now that shuffle columns a bit on a single table @@ -91,9 +91,9 @@ FROM raw_events_1; '); INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_2, value_4, value_5, value_6, event_at) SELECT tenant_id, (value_5)::integer AS value_5, value_4, (value_2)::text AS value_2, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1 - deparse_shard_query_test + deparse_shard_query_test --------------------------------------------------------------------- - + (1 row) -- same test on two different tables @@ -105,9 +105,9 @@ FROM raw_events_2; '); INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_2, value_4, value_5, value_6, event_at) SELECT tenant_id, (value_5)::integer AS value_5, value_4, (value_2)::text AS value_2, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_2 - deparse_shard_query_test + deparse_shard_query_test --------------------------------------------------------------------- - + (1 row) -- lets do some simple aggregations @@ -121,9 +121,9 @@ GROUP BY tenant_id, date_trunc(\'hour\', event_at) '); INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, average_value_3, sum_value_4, average_value_6, rollup_hour) SELECT tenant_id, sum(value_1) AS sum, avg(value_3) AS avg, sum(value_4) AS sum, avg(value_6) AS avg, date_trunc('hour'::text, (event_at)::timestamp with time zone) AS date_trunc FROM public.raw_events_1 GROUP BY tenant_id, (date_trunc('hour'::text, (event_at)::timestamp with time zone)) - deparse_shard_query_test + deparse_shard_query_test --------------------------------------------------------------------- - + (1 row) -- also some subqueries, JOINS with a complicated target lists @@ -138,9 +138,9 @@ WHERE raw_events_1.tenant_id = raw_events_2.tenant_id; '); INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_3, value_6, event_at) SELECT raw_events_1.tenant_id, raw_events_2.value_3, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1, public.raw_events_2 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id) - deparse_shard_query_test + deparse_shard_query_test --------------------------------------------------------------------- - + (1 row) -- join with group by @@ -154,9 +154,9 @@ WHERE raw_events_1.tenant_id = raw_events_2.tenant_id GROUP BY raw_events_1.event_at '); INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_3, value_6, event_at) SELECT avg(raw_events_1.value_3) AS avg, max(raw_events_2.value_3) AS max, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1, public.raw_events_2 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id) GROUP BY raw_events_1.event_at - deparse_shard_query_test + deparse_shard_query_test --------------------------------------------------------------------- - + (1 row) -- a more complicated JOIN @@ -174,9 +174,9 @@ ORDER BY r2.event_at DESC; '); INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_4) SELECT r3.tenant_id, max(r1.value_4) AS max FROM public.raw_events_1 r1, public.raw_events_2 r2, public.raw_events_1 r3 WHERE ((r1.tenant_id OPERATOR(pg_catalog.=) r2.tenant_id) AND (r2.tenant_id OPERATOR(pg_catalog.=) r3.tenant_id)) GROUP BY r1.value_1, r3.tenant_id, r2.event_at ORDER BY r2.event_at DESC - deparse_shard_query_test + deparse_shard_query_test --------------------------------------------------------------------- - + (1 row) -- queries with CTEs are supported @@ -191,9 +191,9 @@ GROUP BY event_at, tenant_id; '); INFO: query: WITH first_tenant AS (SELECT raw_events_1.event_at, raw_events_1.value_5, raw_events_1.tenant_id FROM public.raw_events_1) INSERT INTO public.aggregated_events (tenant_id, sum_value_5, rollup_hour) SELECT tenant_id, sum((value_5)::integer) AS sum, event_at FROM public.raw_events_1 GROUP BY event_at, tenant_id - deparse_shard_query_test + deparse_shard_query_test --------------------------------------------------------------------- - + (1 row) SELECT deparse_shard_query_test(' @@ -207,9 +207,9 @@ GROUP BY event_at, tenant_id; '); INFO: query: WITH first_tenant AS (SELECT raw_events_1.event_at, raw_events_1.value_5, raw_events_1.tenant_id FROM public.raw_events_1) INSERT INTO public.aggregated_events (tenant_id, sum_value_5) SELECT tenant_id, sum((value_5)::integer) AS sum FROM public.raw_events_1 GROUP BY event_at, tenant_id - deparse_shard_query_test + deparse_shard_query_test --------------------------------------------------------------------- - + (1 row) SELECT deparse_shard_query_test(' @@ -226,9 +226,9 @@ WITH RECURSIVE hierarchy as ( SELECT * FROM hierarchy WHERE LEVEL <= 2; '); INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, sum_value_5) WITH RECURSIVE hierarchy AS (SELECT raw_events_1.value_1, 1 AS level, raw_events_1.tenant_id FROM public.raw_events_1 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) 1) UNION SELECT re.value_2, (h.level OPERATOR(pg_catalog.+) 1), re.tenant_id FROM (hierarchy h JOIN public.raw_events_1 re ON (((h.tenant_id OPERATOR(pg_catalog.=) re.tenant_id) AND (h.value_1 OPERATOR(pg_catalog.=) re.value_6))))) SELECT tenant_id, value_1, level FROM hierarchy WHERE (level OPERATOR(pg_catalog.<=) 2) - deparse_shard_query_test + deparse_shard_query_test --------------------------------------------------------------------- - + (1 row) SELECT deparse_shard_query_test(' @@ -239,9 +239,9 @@ FROM raw_events_1; '); INFO: query: INSERT INTO public.aggregated_events (sum_value_1) SELECT DISTINCT value_1 FROM public.raw_events_1 - deparse_shard_query_test + deparse_shard_query_test --------------------------------------------------------------------- - + (1 row) -- many filters suffled @@ -252,9 +252,9 @@ SELECT value_3, value_2, tenant_id WHERE (value_5 like \'%s\' or value_5 like \'%a\') and (tenant_id = 1) and (value_6 < 3000 or value_3 > 8000); '); INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, sum_value_5) SELECT tenant_id, value_2, value_3 FROM public.raw_events_1 WHERE (((value_5 OPERATOR(pg_catalog.~~) '%s'::text) OR (value_5 OPERATOR(pg_catalog.~~) '%a'::text)) AND (tenant_id OPERATOR(pg_catalog.=) 1) AND ((value_6 OPERATOR(pg_catalog.<) 3000) OR (value_3 OPERATOR(pg_catalog.>) (8000)::double precision))) - deparse_shard_query_test + deparse_shard_query_test --------------------------------------------------------------------- - + (1 row) SELECT deparse_shard_query_test(E' @@ -264,9 +264,9 @@ SELECT rank() OVER (PARTITION BY tenant_id ORDER BY value_6), tenant_id WHERE event_at = now(); '); INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_5) SELECT tenant_id, rank() OVER (PARTITION BY tenant_id ORDER BY value_6) AS rank FROM public.raw_events_1 WHERE (event_at OPERATOR(pg_catalog.=) now()) - deparse_shard_query_test + deparse_shard_query_test --------------------------------------------------------------------- - + (1 row) SELECT deparse_shard_query_test(E' @@ -277,9 +277,9 @@ SELECT random(), int4eq(1, max(value_1))::int, value_6 GROUP BY event_at, value_7, value_6; '); INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_4, sum_value_5) SELECT (int4eq(1, max(value_1)))::integer AS int4eq, value_6, random() AS random FROM public.raw_events_1 WHERE (event_at OPERATOR(pg_catalog.=) now()) GROUP BY event_at, value_7, value_6 - deparse_shard_query_test + deparse_shard_query_test --------------------------------------------------------------------- - + (1 row) SELECT deparse_shard_query_test(' @@ -298,34 +298,34 @@ SELECT raw_events_1; '); INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1) SELECT max(tenant_id) AS max, count(DISTINCT CASE WHEN (value_1 OPERATOR(pg_catalog.>) 100) THEN tenant_id ELSE (value_6)::bigint END) AS c FROM public.raw_events_1 - deparse_shard_query_test + deparse_shard_query_test --------------------------------------------------------------------- - + (1 row) SELECT deparse_shard_query_test(' INSERT INTO raw_events_1(value_7, value_1, tenant_id) -SELECT +SELECT value_7, value_1, tenant_id FROM - (SELECT + (SELECT tenant_id, value_2 as value_7, value_1 FROM raw_events_2 ) as foo '); INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_1, value_6, value_7, event_at) SELECT tenant_id, value_1, 10 AS value_6, value_7, (now())::date AS event_at FROM (SELECT raw_events_2.tenant_id, raw_events_2.value_2 AS value_7, raw_events_2.value_1 FROM public.raw_events_2) foo - deparse_shard_query_test + deparse_shard_query_test --------------------------------------------------------------------- - + (1 row) SELECT deparse_shard_query_test(E' INSERT INTO aggregated_events(sum_value_1, tenant_id, sum_value_5) -SELECT +SELECT sum(value_1), tenant_id, sum(value_5::bigint) FROM - (SELECT + (SELECT raw_events_1.event_at, raw_events_2.tenant_id, raw_events_2.value_5, raw_events_1.value_1 FROM raw_events_2, raw_events_1 @@ -336,43 +336,43 @@ GROUP BY tenant_id, date_trunc(\'hour\', event_at) '); INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, sum_value_5) SELECT tenant_id, sum(value_1) AS sum, sum((value_5)::bigint) AS sum FROM (SELECT raw_events_1.event_at, raw_events_2.tenant_id, raw_events_2.value_5, raw_events_1.value_1 FROM public.raw_events_2, public.raw_events_1 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id)) foo GROUP BY tenant_id, (date_trunc('hour'::text, (event_at)::timestamp with time zone)) - deparse_shard_query_test + deparse_shard_query_test --------------------------------------------------------------------- - + (1 row) SELECT deparse_shard_query_test(E' INSERT INTO raw_events_2(tenant_id, value_1, value_2, value_3, value_4) -SELECT +SELECT tenant_id, value_1, value_2, value_3, value_4 FROM - (SELECT + (SELECT value_2, value_4, tenant_id, value_1, value_3 FROM raw_events_1 ) as foo '); INFO: query: INSERT INTO public.raw_events_2 (tenant_id, value_1, value_2, value_3, value_4, value_6, event_at) SELECT tenant_id, value_1, value_2, value_3, value_4, (random() OPERATOR(pg_catalog.*) (100)::double precision) AS value_6, (now())::date AS event_at FROM (SELECT raw_events_1.value_2, raw_events_1.value_4, raw_events_1.tenant_id, raw_events_1.value_1, raw_events_1.value_3 FROM public.raw_events_1) foo - deparse_shard_query_test + deparse_shard_query_test --------------------------------------------------------------------- - + (1 row) SELECT deparse_shard_query_test(E' INSERT INTO raw_events_2(tenant_id, value_1, value_4, value_2, value_3) -SELECT +SELECT * FROM - (SELECT + (SELECT value_2, value_4, tenant_id, value_1, value_3 FROM raw_events_1 ) as foo '); INFO: query: INSERT INTO public.raw_events_2 (tenant_id, value_1, value_2, value_3, value_4, value_6, event_at) SELECT value_2, value_4, value_1, value_3, tenant_id, (random() OPERATOR(pg_catalog.*) (100)::double precision) AS value_6, (now())::date AS event_at FROM (SELECT raw_events_1.value_2, raw_events_1.value_4, raw_events_1.tenant_id, raw_events_1.value_1, raw_events_1.value_3 FROM public.raw_events_1) foo - deparse_shard_query_test + deparse_shard_query_test --------------------------------------------------------------------- - + (1 row) -- use a column multiple times @@ -386,9 +386,9 @@ ORDER BY value_2, value_1; '); INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_4, value_6, value_7, event_at) SELECT tenant_id, value_7, 10 AS value_6, value_7, (now())::date AS event_at FROM public.raw_events_1 ORDER BY value_2, value_1 - deparse_shard_query_test + deparse_shard_query_test --------------------------------------------------------------------- - + (1 row) -- test dropped table as well @@ -401,8 +401,8 @@ FROM raw_events_1; '); INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_4, value_6, value_7, event_at) SELECT tenant_id, value_4, 10 AS value_6, value_7, (now())::date AS event_at FROM public.raw_events_1 - deparse_shard_query_test + deparse_shard_query_test --------------------------------------------------------------------- - + (1 row) diff --git a/src/test/regress/expected/multi_distributed_transaction_id.out b/src/test/regress/expected/multi_distributed_transaction_id.out index 1e58c6a65..e622ff619 100644 --- a/src/test/regress/expected/multi_distributed_transaction_id.out +++ b/src/test/regress/expected/multi_distributed_transaction_id.out @@ -11,29 +11,29 @@ SET TIME ZONE 'PST8PDT'; -- should return uninitialized values if not in a transaction SELECT initiator_node_identifier, transaction_number, transaction_stamp FROM get_current_transaction_id(); - initiator_node_identifier | transaction_number | transaction_stamp + initiator_node_identifier | transaction_number | transaction_stamp --------------------------------------------------------------------- - 0 | 0 | + 0 | 0 | (1 row) BEGIN; -- we should still see the uninitialized values SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); - initiator_node_identifier | transaction_number | transaction_stamp | ?column? + initiator_node_identifier | transaction_number | transaction_stamp | ?column? --------------------------------------------------------------------- 0 | 0 | | t (1 row) -- now assign a value SELECT assign_distributed_transaction_id(50, 50, '2016-01-01 00:00:00+0'); - assign_distributed_transaction_id + assign_distributed_transaction_id --------------------------------------------------------------------- - + (1 row) -- see the assigned value SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); - initiator_node_identifier | transaction_number | transaction_stamp | ?column? + initiator_node_identifier | transaction_number | transaction_stamp | ?column? --------------------------------------------------------------------- 50 | 50 | Thu Dec 31 16:00:00 2015 PST | t (1 row) @@ -44,7 +44,7 @@ ERROR: the backend has already been assigned a transaction id ROLLBACK; -- since the transaction finished, we should see the uninitialized values SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); - initiator_node_identifier | transaction_number | transaction_stamp | ?column? + initiator_node_identifier | transaction_number | transaction_stamp | ?column? --------------------------------------------------------------------- 0 | 0 | | t (1 row) @@ -53,16 +53,16 @@ SELECT initiator_node_identifier, transaction_number, transaction_stamp, (proces BEGIN; -- we should still see the uninitialized values SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); - initiator_node_identifier | transaction_number | transaction_stamp | ?column? + initiator_node_identifier | transaction_number | transaction_stamp | ?column? --------------------------------------------------------------------- 0 | 0 | | t (1 row) -- now assign a value SELECT assign_distributed_transaction_id(52, 52, '2015-01-01 00:00:00+0'); - assign_distributed_transaction_id + assign_distributed_transaction_id --------------------------------------------------------------------- - + (1 row) SELECT 5 / 0; @@ -70,7 +70,7 @@ ERROR: division by zero COMMIT; -- since the transaction errored, we should see the uninitialized values again SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); - initiator_node_identifier | transaction_number | transaction_stamp | ?column? + initiator_node_identifier | transaction_number | transaction_stamp | ?column? --------------------------------------------------------------------- 0 | 0 | | t (1 row) @@ -78,20 +78,20 @@ COMMIT; -- we should also see that a new connection means an uninitialized transaction id BEGIN; SELECT assign_distributed_transaction_id(52, 52, '2015-01-01 00:00:00+0'); - assign_distributed_transaction_id + assign_distributed_transaction_id --------------------------------------------------------------------- - + (1 row) SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); - initiator_node_identifier | transaction_number | transaction_stamp | ?column? + initiator_node_identifier | transaction_number | transaction_stamp | ?column? --------------------------------------------------------------------- 52 | 52 | Wed Dec 31 16:00:00 2014 PST | t (1 row) \c - - - :master_port SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); - initiator_node_identifier | transaction_number | transaction_stamp | ?column? + initiator_node_identifier | transaction_number | transaction_stamp | ?column? --------------------------------------------------------------------- 0 | 0 | | t (1 row) @@ -99,13 +99,13 @@ BEGIN; -- now show that PREPARE resets the distributed transaction id BEGIN; SELECT assign_distributed_transaction_id(120, 120, '2015-01-01 00:00:00+0'); - assign_distributed_transaction_id + assign_distributed_transaction_id --------------------------------------------------------------------- - + (1 row) SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); - initiator_node_identifier | transaction_number | transaction_stamp | ?column? + initiator_node_identifier | transaction_number | transaction_stamp | ?column? --------------------------------------------------------------------- 120 | 120 | Wed Dec 31 16:00:00 2014 PST | t (1 row) @@ -113,7 +113,7 @@ BEGIN; PREPARE TRANSACTION 'dist_xact_id_test'; -- after the prepare we should see that transaction id is cleared SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); - initiator_node_identifier | transaction_number | transaction_stamp | ?column? + initiator_node_identifier | transaction_number | transaction_stamp | ?column? --------------------------------------------------------------------- 0 | 0 | | t (1 row) @@ -130,9 +130,9 @@ $$ LANGUAGE sql; -- force the transaction ID to be used in a parallel plan BEGIN; SELECT assign_distributed_transaction_id(50, 1234567, '2016-01-01 00:00:00+0'); - assign_distributed_transaction_id + assign_distributed_transaction_id --------------------------------------------------------------------- - + (1 row) -- create >8MB table @@ -144,7 +144,7 @@ SET LOCAL max_parallel_workers_per_gather TO 2; SET LOCAL parallel_tuple_cost TO 0; EXPLAIN (COSTS OFF) SELECT a FROM parallel_id_test WHERE a = parallel_worker_transaction_id_test(); - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Gather Workers Planned: 1 @@ -153,7 +153,7 @@ SELECT a FROM parallel_id_test WHERE a = parallel_worker_transaction_id_test(); (4 rows) SELECT a FROM parallel_id_test WHERE a = parallel_worker_transaction_id_test(); - a + a --------------------------------------------------------------------- 1234567 1234567 diff --git a/src/test/regress/expected/multi_distribution_metadata.out b/src/test/regress/expected/multi_distribution_metadata.out index f375fa071..1e37ea6d1 100644 --- a/src/test/regress/expected/multi_distribution_metadata.out +++ b/src/test/regress/expected/multi_distribution_metadata.out @@ -51,9 +51,9 @@ CREATE TABLE events_hash ( name text ); SELECT create_distributed_table('events_hash', 'name', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- set shardstate of one replication from each shard to 0 (invalid value) @@ -61,14 +61,14 @@ UPDATE pg_dist_placement SET shardstate = 0 WHERE shardid BETWEEN 540000 AND 540 AND groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port); -- should see above shard identifiers SELECT load_shard_id_array('events_hash'); - load_shard_id_array + load_shard_id_array --------------------------------------------------------------------- {540000,540001,540002,540003} (1 row) -- should see array with first shard range SELECT load_shard_interval_array(540000, 0); - load_shard_interval_array + load_shard_interval_array --------------------------------------------------------------------- {-2147483648,-1073741825} (1 row) @@ -80,14 +80,14 @@ CREATE TABLE events_range ( name text ); SELECT master_create_distributed_table('events_range', 'name', 'range'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) -- create empty shard SELECT master_create_empty_shard('events_range'); - master_create_empty_shard + master_create_empty_shard --------------------------------------------------------------------- 540004 (1 row) @@ -97,7 +97,7 @@ UPDATE pg_dist_shard SET shardmaxvalue = 'Zebra' WHERE shardid = 540004; SELECT load_shard_interval_array(540004, ''::text); - load_shard_interval_array + load_shard_interval_array --------------------------------------------------------------------- {Aardvark,Zebra} (1 row) @@ -107,35 +107,35 @@ SELECT load_shard_interval_array(540005, 0); ERROR: could not find valid entry for shard xxxxx -- should see two placements SELECT load_shard_placement_array(540001, false); - load_shard_placement_array + load_shard_placement_array --------------------------------------------------------------------- {localhost:xxxxx,localhost:xxxxx} (1 row) -- only one of which is finalized SELECT load_shard_placement_array(540001, true); - load_shard_placement_array + load_shard_placement_array --------------------------------------------------------------------- {localhost:xxxxx} (1 row) -- should see error for non-existent shard SELECT load_shard_placement_array(540001, false); - load_shard_placement_array + load_shard_placement_array --------------------------------------------------------------------- {localhost:xxxxx,localhost:xxxxx} (1 row) -- should see column id of 'name' SELECT partition_column_id('events_hash'); - partition_column_id + partition_column_id --------------------------------------------------------------------- 2 (1 row) -- should see hash partition type and fail for non-distributed tables SELECT partition_type('events_hash'); - partition_type + partition_type --------------------------------------------------------------------- h (1 row) @@ -144,26 +144,26 @@ SELECT partition_type('pg_type'); ERROR: relation pg_type is not distributed -- should see true for events_hash, false for others SELECT is_distributed_table('events_hash'); - is_distributed_table + is_distributed_table --------------------------------------------------------------------- t (1 row) SELECT is_distributed_table('pg_type'); - is_distributed_table + is_distributed_table --------------------------------------------------------------------- f (1 row) SELECT is_distributed_table('pg_dist_shard'); - is_distributed_table + is_distributed_table --------------------------------------------------------------------- f (1 row) -- test underlying column name-id translation SELECT column_name_to_column_id('events_hash', 'name'); - column_name_to_column_id + column_name_to_column_id --------------------------------------------------------------------- 2 (1 row) @@ -181,7 +181,7 @@ DELETE FROM pg_dist_shard WHERE logicalrelid = 'events_range'::regclass; -- verify that an eager load shows them missing SELECT load_shard_id_array('events_hash'); - load_shard_id_array + load_shard_id_array --------------------------------------------------------------------- {} (1 row) @@ -198,7 +198,7 @@ VALUES ('customers'::regclass, 'h', column_name_to_column('customers'::regclass, 'id')); SELECT partmethod, column_to_column_name(logicalrelid, partkey) FROM pg_dist_partition WHERE logicalrelid = 'customers'::regclass; - partmethod | column_to_column_name + partmethod | column_to_column_name --------------------------------------------------------------------- h | id (1 row) @@ -209,9 +209,9 @@ ERROR: not a valid column SELECT column_to_column_name('customers',''); ERROR: not a valid column SELECT column_to_column_name('pg_dist_node'::regclass, NULL); - column_to_column_name + column_to_column_name --------------------------------------------------------------------- - + (1 row) SELECT column_to_column_name('pg_dist_node'::regclass,'{FROMEXPR :fromlist ({RANGETBLREF :rtindex 1 }) :quals <>}'); @@ -226,7 +226,7 @@ SELECT create_monolithic_shard_row('customers') AS new_shard_id \gset SELECT shardstorage, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid = :new_shard_id; - shardstorage | shardminvalue | shardmaxvalue + shardstorage | shardminvalue | shardmaxvalue --------------------------------------------------------------------- t | -2147483648 | 2147483647 (1 row) @@ -236,13 +236,13 @@ WHERE shardid = :new_shard_id; BEGIN; -- pick up a shard lock and look for it in pg_locks SELECT acquire_shared_shard_lock(5); - acquire_shared_shard_lock + acquire_shared_shard_lock --------------------------------------------------------------------- - + (1 row) SELECT objid, mode FROM pg_locks WHERE locktype = 'advisory' AND objid = 5; - objid | mode + objid | mode --------------------------------------------------------------------- 5 | ShareLock (1 row) @@ -251,7 +251,7 @@ SELECT objid, mode FROM pg_locks WHERE locktype = 'advisory' AND objid = 5; COMMIT; -- lock should be gone now SELECT COUNT(*) FROM pg_locks WHERE locktype = 'advisory' AND objid = 5; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -260,26 +260,26 @@ SELECT COUNT(*) FROM pg_locks WHERE locktype = 'advisory' AND objid = 5; SET citus.shard_count TO 4; CREATE TABLE get_shardid_test_table1(column1 int, column2 int); SELECT create_distributed_table('get_shardid_test_table1', 'column1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) \COPY get_shardid_test_table1 FROM STDIN with delimiter '|'; SELECT get_shard_id_for_distribution_column('get_shardid_test_table1', 1); - get_shard_id_for_distribution_column + get_shard_id_for_distribution_column --------------------------------------------------------------------- 540006 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table1', 2); - get_shard_id_for_distribution_column + get_shard_id_for_distribution_column --------------------------------------------------------------------- 540009 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table1', 3); - get_shard_id_for_distribution_column + get_shard_id_for_distribution_column --------------------------------------------------------------------- 540007 (1 row) @@ -287,19 +287,19 @@ SELECT get_shard_id_for_distribution_column('get_shardid_test_table1', 3); -- verify result of the get_shard_id_for_distribution_column \c - - - :worker_1_port SELECT * FROM get_shardid_test_table1_540006; - column1 | column2 + column1 | column2 --------------------------------------------------------------------- 1 | 1 (1 row) SELECT * FROM get_shardid_test_table1_540009; - column1 | column2 + column1 | column2 --------------------------------------------------------------------- 2 | 2 (1 row) SELECT * FROM get_shardid_test_table1_540007; - column1 | column2 + column1 | column2 --------------------------------------------------------------------- 3 | 3 (1 row) @@ -307,7 +307,7 @@ SELECT * FROM get_shardid_test_table1_540007; \c - - - :master_port -- test non-existing value SELECT get_shard_id_for_distribution_column('get_shardid_test_table1', 4); - get_shard_id_for_distribution_column + get_shard_id_for_distribution_column --------------------------------------------------------------------- 540007 (1 row) @@ -316,20 +316,20 @@ SELECT get_shard_id_for_distribution_column('get_shardid_test_table1', 4); SET citus.shard_count TO 4; CREATE TABLE get_shardid_test_table2(column1 text[], column2 int); SELECT create_distributed_table('get_shardid_test_table2', 'column1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) \COPY get_shardid_test_table2 FROM STDIN with delimiter '|'; SELECT get_shard_id_for_distribution_column('get_shardid_test_table2', '{a, b, c}'); - get_shard_id_for_distribution_column + get_shard_id_for_distribution_column --------------------------------------------------------------------- 540013 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table2', '{d, e, f}'); - get_shard_id_for_distribution_column + get_shard_id_for_distribution_column --------------------------------------------------------------------- 540011 (1 row) @@ -337,13 +337,13 @@ SELECT get_shard_id_for_distribution_column('get_shardid_test_table2', '{d, e, f -- verify result of the get_shard_id_for_distribution_column \c - - - :worker_1_port SELECT * FROM get_shardid_test_table2_540013; - column1 | column2 + column1 | column2 --------------------------------------------------------------------- {a,b,c} | 1 (1 row) SELECT * FROM get_shardid_test_table2_540011; - column1 | column2 + column1 | column2 --------------------------------------------------------------------- {d,e,f} | 2 (1 row) @@ -364,9 +364,9 @@ SELECT get_shard_id_for_distribution_column('get_shardid_test_table3', 1); ERROR: relation is not distributed -- test append distributed table SELECT create_distributed_table('get_shardid_test_table3', 'column1', 'append'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table3', 1); @@ -374,39 +374,39 @@ ERROR: finding shard id of given distribution value is only supported for hash -- test reference table; CREATE TABLE get_shardid_test_table4(column1 int, column2 int); SELECT create_reference_table('get_shardid_test_table4'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- test NULL distribution column value for reference table SELECT get_shard_id_for_distribution_column('get_shardid_test_table4'); - get_shard_id_for_distribution_column + get_shard_id_for_distribution_column --------------------------------------------------------------------- 540014 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table4', NULL); - get_shard_id_for_distribution_column + get_shard_id_for_distribution_column --------------------------------------------------------------------- 540014 (1 row) -- test different data types for reference table SELECT get_shard_id_for_distribution_column('get_shardid_test_table4', 1); - get_shard_id_for_distribution_column + get_shard_id_for_distribution_column --------------------------------------------------------------------- 540014 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table4', 'a'); - get_shard_id_for_distribution_column + get_shard_id_for_distribution_column --------------------------------------------------------------------- 540014 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table4', '{a, b, c}'); - get_shard_id_for_distribution_column + get_shard_id_for_distribution_column --------------------------------------------------------------------- 540014 (1 row) @@ -414,32 +414,32 @@ SELECT get_shard_id_for_distribution_column('get_shardid_test_table4', '{a, b, c -- test range distributed table CREATE TABLE get_shardid_test_table5(column1 int, column2 int); SELECT create_distributed_table('get_shardid_test_table5', 'column1', 'range'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- create worker shards SELECT master_create_empty_shard('get_shardid_test_table5'); - master_create_empty_shard + master_create_empty_shard --------------------------------------------------------------------- 540015 (1 row) SELECT master_create_empty_shard('get_shardid_test_table5'); - master_create_empty_shard + master_create_empty_shard --------------------------------------------------------------------- 540016 (1 row) SELECT master_create_empty_shard('get_shardid_test_table5'); - master_create_empty_shard + master_create_empty_shard --------------------------------------------------------------------- 540017 (1 row) SELECT master_create_empty_shard('get_shardid_test_table5'); - master_create_empty_shard + master_create_empty_shard --------------------------------------------------------------------- 540018 (1 row) @@ -450,38 +450,38 @@ UPDATE pg_dist_shard SET shardminvalue = 1001, shardmaxvalue = 2000 WHERE shardi UPDATE pg_dist_shard SET shardminvalue = 2001, shardmaxvalue = 3000 WHERE shardid = 540017; UPDATE pg_dist_shard SET shardminvalue = 3001, shardmaxvalue = 4000 WHERE shardid = 540018; SELECT get_shard_id_for_distribution_column('get_shardid_test_table5', 5); - get_shard_id_for_distribution_column + get_shard_id_for_distribution_column --------------------------------------------------------------------- 540015 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table5', 1111); - get_shard_id_for_distribution_column + get_shard_id_for_distribution_column --------------------------------------------------------------------- 540016 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table5', 2689); - get_shard_id_for_distribution_column + get_shard_id_for_distribution_column --------------------------------------------------------------------- 540017 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table5', 3248); - get_shard_id_for_distribution_column + get_shard_id_for_distribution_column --------------------------------------------------------------------- 540018 (1 row) -- test non-existing value for range distributed tables SELECT get_shard_id_for_distribution_column('get_shardid_test_table5', 4001); - get_shard_id_for_distribution_column + get_shard_id_for_distribution_column --------------------------------------------------------------------- 0 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table5', -999); - get_shard_id_for_distribution_column + get_shard_id_for_distribution_column --------------------------------------------------------------------- 0 (1 row) @@ -489,16 +489,16 @@ SELECT get_shard_id_for_distribution_column('get_shardid_test_table5', -999); SET citus.shard_count TO 2; CREATE TABLE events_table_count (user_id int, time timestamp, event_type int, value_2 int, value_3 float, value_4 bigint); SELECT create_distributed_table('events_table_count', 'user_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE users_table_count (user_id int, time timestamp, value_1 int, value_2 int, value_3 float, value_4 bigint); SELECT create_distributed_table('users_table_count', 'user_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT relation_count_in_query($$-- we can support arbitrary subqueries within UNIONs @@ -572,7 +572,7 @@ GROUP BY types ORDER BY types;$$); - relation_count_in_query + relation_count_in_query --------------------------------------------------------------------- 6 (1 row) diff --git a/src/test/regress/expected/multi_drop_extension.out b/src/test/regress/expected/multi_drop_extension.out index 6dc707a22..6419ebd1a 100644 --- a/src/test/regress/expected/multi_drop_extension.out +++ b/src/test/regress/expected/multi_drop_extension.out @@ -5,9 +5,9 @@ SET citus.next_shard_id TO 550000; CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); SELECT create_distributed_table('testtableddl', 'distributecol', 'append'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- this emits a NOTICE message for every table we are dropping with our CASCADE. It would @@ -20,13 +20,13 @@ RESET client_min_messages; CREATE EXTENSION citus; -- re-add the nodes to the cluster SELECT 1 FROM master_add_node('localhost', :worker_1_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) @@ -34,19 +34,19 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port); -- verify that a table can be created after the extension has been dropped and recreated CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); SELECT create_distributed_table('testtableddl', 'distributecol', 'append'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT 1 FROM master_create_empty_shard('testtableddl'); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) SELECT * FROM testtableddl; - somecol | distributecol + somecol | distributecol --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/multi_dropped_column_aliases.out b/src/test/regress/expected/multi_dropped_column_aliases.out index 930c8e3a6..c3f250d62 100644 --- a/src/test/regress/expected/multi_dropped_column_aliases.out +++ b/src/test/regress/expected/multi_dropped_column_aliases.out @@ -2,13 +2,13 @@ -- table schema is modified via ALTER statements. SET citus.next_shard_id TO 620000; SELECT count(*) FROM customer; - count + count --------------------------------------------------------------------- 1000 (1 row) SELECT * FROM customer LIMIT 2; - c_custkey | c_name | c_address | c_nationkey | c_phone | c_acctbal | c_mktsegment | c_comment + c_custkey | c_name | c_address | c_nationkey | c_phone | c_acctbal | c_mktsegment | c_comment --------------------------------------------------------------------- 1 | Customer#000000001 | IVhzIApeRb ot,c,E | 15 | 25-989-741-2988 | 711.56 | BUILDING | to the even, regular platelets. regular, ironic epitaphs nag e 2 | Customer#000000002 | XSTf4,NCwDVaWNe6tEgvwfmRchLXak | 13 | 23-768-687-3665 | 121.65 | AUTOMOBILE | l accounts. blithely ironic theodolites integrate boldly: caref @@ -17,28 +17,28 @@ SELECT * FROM customer LIMIT 2; ALTER TABLE customer ADD COLUMN new_column1 INTEGER; ALTER TABLE customer ADD COLUMN new_column2 INTEGER; SELECT count(*) FROM customer; - count + count --------------------------------------------------------------------- 1000 (1 row) SELECT * FROM customer LIMIT 2; - c_custkey | c_name | c_address | c_nationkey | c_phone | c_acctbal | c_mktsegment | c_comment | new_column1 | new_column2 + c_custkey | c_name | c_address | c_nationkey | c_phone | c_acctbal | c_mktsegment | c_comment | new_column1 | new_column2 --------------------------------------------------------------------- - 1 | Customer#000000001 | IVhzIApeRb ot,c,E | 15 | 25-989-741-2988 | 711.56 | BUILDING | to the even, regular platelets. regular, ironic epitaphs nag e | | - 2 | Customer#000000002 | XSTf4,NCwDVaWNe6tEgvwfmRchLXak | 13 | 23-768-687-3665 | 121.65 | AUTOMOBILE | l accounts. blithely ironic theodolites integrate boldly: caref | | + 1 | Customer#000000001 | IVhzIApeRb ot,c,E | 15 | 25-989-741-2988 | 711.56 | BUILDING | to the even, regular platelets. regular, ironic epitaphs nag e | | + 2 | Customer#000000002 | XSTf4,NCwDVaWNe6tEgvwfmRchLXak | 13 | 23-768-687-3665 | 121.65 | AUTOMOBILE | l accounts. blithely ironic theodolites integrate boldly: caref | | (2 rows) ALTER TABLE customer DROP COLUMN new_column1; ALTER TABLE customer DROP COLUMN new_column2; SELECT count(*) FROM customer; - count + count --------------------------------------------------------------------- 1000 (1 row) SELECT * FROM customer LIMIT 2; - c_custkey | c_name | c_address | c_nationkey | c_phone | c_acctbal | c_mktsegment | c_comment + c_custkey | c_name | c_address | c_nationkey | c_phone | c_acctbal | c_mktsegment | c_comment --------------------------------------------------------------------- 1 | Customer#000000001 | IVhzIApeRb ot,c,E | 15 | 25-989-741-2988 | 711.56 | BUILDING | to the even, regular platelets. regular, ironic epitaphs nag e 2 | Customer#000000002 | XSTf4,NCwDVaWNe6tEgvwfmRchLXak | 13 | 23-768-687-3665 | 121.65 | AUTOMOBILE | l accounts. blithely ironic theodolites integrate boldly: caref @@ -46,7 +46,7 @@ SELECT * FROM customer LIMIT 2; -- Verify joins work with dropped columns. SELECT count(*) FROM customer, orders WHERE c_custkey = o_custkey; - count + count --------------------------------------------------------------------- 1956 (1 row) diff --git a/src/test/regress/expected/multi_explain.out b/src/test/regress/expected/multi_explain.out index 3aad89cad..c02435c96 100644 --- a/src/test/regress/expected/multi_explain.out +++ b/src/test/regress/expected/multi_explain.out @@ -216,40 +216,40 @@ t EXPLAIN (COSTS FALSE, FORMAT YAML) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; -- Plan: +- Plan: Node Type: "Sort" Parallel Aware: false - Sort Key: + Sort Key: - "(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))" - "remote_scan.l_quantity" - Plans: + Plans: - Node Type: "Aggregate" Strategy: "Hashed" Partial Mode: "Simple" Parent Relationship: "Outer" Parallel Aware: false - Group Key: + Group Key: - "remote_scan.l_quantity" - Plans: + Plans: - Node Type: "Custom Scan" Parent Relationship: "Outer" Custom Plan Provider: "Citus Adaptive" Parallel Aware: false - Distributed Query: - Job: + Distributed Query: + Job: Task Count: 2 Tasks Shown: "One of 2" - Tasks: + Tasks: - Node: "host=localhost port=xxxxx dbname=regression" - Remote Plan: - - Plan: + Remote Plan: + - Plan: Node Type: "Aggregate" Strategy: "Hashed" Partial Mode: "Simple" Parallel Aware: false - Group Key: + Group Key: - "l_quantity" - Plans: + Plans: - Node Type: "Seq Scan" Parent Relationship: "Outer" Parallel Aware: false @@ -1089,21 +1089,21 @@ EXPLAIN (COSTS FALSE, FORMAT YAML) WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey; -- Plan: +- Plan: Node Type: "Aggregate" Strategy: "Plain" Partial Mode: "Simple" Parallel Aware: false - Plans: + Plans: - Node Type: "Custom Scan" Parent Relationship: "Outer" Custom Plan Provider: "Citus Task-Tracker" Parallel Aware: false - Distributed Query: - Job: + Distributed Query: + Job: Task Count: 1 Tasks Shown: "None, not supported for re-partition queries" - Dependent Jobs: + Dependent Jobs: - Map Task Count: 2 Merge Task Count: 1 -- test parallel aggregates diff --git a/src/test/regress/expected/multi_extension.out b/src/test/regress/expected/multi_extension.out index bd50bd04e..9857bd288 100644 --- a/src/test/regress/expected/multi_extension.out +++ b/src/test/regress/expected/multi_extension.out @@ -31,7 +31,7 @@ SELECT datname, datname = current_database(), usename = (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus') FROM test.maintenance_worker(); - datname | ?column? | ?column? + datname | ?column? | ?column? --------------------------------------------------------------------- regression | t | t (1 row) @@ -45,7 +45,7 @@ WHERE pgd.refclassid = 'pg_extension'::regclass AND pgd.refobjid = pge.oid AND pge.extname = 'citus' AND pgio.schema NOT IN ('pg_catalog', 'citus', 'citus_internal', 'test'); - count + count --------------------------------------------------------------------- 0 (1 row) @@ -114,7 +114,7 @@ ALTER EXTENSION citus UPDATE TO '9.1-1'; ALTER EXTENSION citus UPDATE TO '9.2-1'; -- show running version SHOW citus.version; - citus.version + citus.version --------------------------------------------------------------------- 9.2devel (1 row) @@ -128,7 +128,7 @@ WHERE pgd.refclassid = 'pg_extension'::regclass AND pgd.refobjid = pge.oid AND pge.extname = 'citus' AND pgio.schema NOT IN ('pg_catalog', 'citus', 'citus_internal', 'test'); - count + count --------------------------------------------------------------------- 0 (1 row) @@ -152,7 +152,7 @@ CREATE TABLE version_mismatch_table(column1 int); INSERT INTO version_mismatch_table(column1) VALUES(5); -- Test SELECT SELECT * FROM version_mismatch_table ORDER BY column1; - column1 + column1 --------------------------------------------------------------------- 0 1 @@ -168,10 +168,10 @@ SELECT d.datname as "Name", pg_catalog.array_to_string(d.datacl, E'\n') AS "Access privileges" FROM pg_catalog.pg_database d ORDER BY 1; - Name | Owner | Access privileges + Name | Owner | Access privileges --------------------------------------------------------------------- - postgres | postgres | - regression | postgres | + postgres | postgres | + regression | postgres | template0 | postgres | =c/postgres + | | postgres=CTc/postgres template1 | postgres | =c/postgres + @@ -216,7 +216,7 @@ ALTER EXTENSION citus UPDATE; -- if cache is invalidated succesfull, this \d should work without any problem \d List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- (0 rows) @@ -230,7 +230,7 @@ SELECT datname, datname = current_database(), usename = (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus') FROM test.maintenance_worker(); - datname | ?column? | ?column? + datname | ?column? | ?column? --------------------------------------------------------------------- regression | t | t (1 row) @@ -271,7 +271,7 @@ SELECT datname, datname = current_database(), usename = (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus') FROM test.maintenance_worker(); - datname | ?column? | ?column? + datname | ?column? | ?column? --------------------------------------------------------------------- another | t | t (1 row) @@ -308,7 +308,7 @@ SELECT * FROM test_deamon.maintenance_deamon_died('another'); - maintenance_deamon_died + maintenance_deamon_died --------------------------------------------------------------------- t (1 row) diff --git a/src/test/regress/expected/multi_follower_configure_followers.out b/src/test/regress/expected/multi_follower_configure_followers.out index e5ca57c0e..de87d311c 100644 --- a/src/test/regress/expected/multi_follower_configure_followers.out +++ b/src/test/regress/expected/multi_follower_configure_followers.out @@ -3,7 +3,7 @@ ALTER SYSTEM SET citus.use_secondary_nodes TO 'always'; ALTER SYSTEM SET citus.cluster_name TO 'second-cluster'; SELECT pg_reload_conf(); - pg_reload_conf + pg_reload_conf --------------------------------------------------------------------- t (1 row) @@ -13,7 +13,7 @@ SELECT pg_reload_conf(); ALTER SYSTEM SET citus.use_secondary_nodes TO 'always'; ALTER SYSTEM SET citus.cluster_name TO 'second-cluster'; SELECT pg_reload_conf(); - pg_reload_conf + pg_reload_conf --------------------------------------------------------------------- t (1 row) @@ -22,7 +22,7 @@ SELECT pg_reload_conf(); ALTER SYSTEM SET citus.use_secondary_nodes TO 'always'; ALTER SYSTEM SET citus.cluster_name TO 'second-cluster'; SELECT pg_reload_conf(); - pg_reload_conf + pg_reload_conf --------------------------------------------------------------------- t (1 row) diff --git a/src/test/regress/expected/multi_follower_dml.out b/src/test/regress/expected/multi_follower_dml.out index bab93291a..08e84a8b4 100644 --- a/src/test/regress/expected/multi_follower_dml.out +++ b/src/test/regress/expected/multi_follower_dml.out @@ -1,9 +1,9 @@ \c - - - :master_port CREATE TABLE the_table (a int, b int, z bigserial); SELECT create_distributed_table('the_table', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE local (a int, b int); @@ -16,21 +16,21 @@ DETAIL: the database is in recovery mode SET citus.writable_standby_coordinator TO on; INSERT INTO the_table (a, b, z) VALUES (1, 2, 2); SELECT * FROM the_table; - a | b | z + a | b | z --------------------------------------------------------------------- 1 | 2 | 2 (1 row) UPDATE the_table SET z = 3 WHERE a = 1; SELECT * FROM the_table; - a | b | z + a | b | z --------------------------------------------------------------------- 1 | 2 | 3 (1 row) DELETE FROM the_table WHERE a = 1; SELECT * FROM the_table; - a | b | z + a | b | z --------------------------------------------------------------------- (0 rows) @@ -47,7 +47,7 @@ ERROR: cannot assign TransactionIds during recovery SET citus.multi_shard_commit_protocol TO '1pc'; INSERT INTO the_table (a, b, z) VALUES (2, 3, 4), (5, 6, 7); SELECT * FROM the_table ORDER BY a; - a | b | z + a | b | z --------------------------------------------------------------------- 2 | 3 | 4 5 | 6 | 7 @@ -56,7 +56,7 @@ SELECT * FROM the_table ORDER BY a; -- modifying CTEs are possible WITH del AS (DELETE FROM the_table RETURNING *) SELECT * FROM del ORDER BY a; - a | b | z + a | b | z --------------------------------------------------------------------- 2 | 3 | 4 5 | 6 | 7 @@ -65,7 +65,7 @@ SELECT * FROM del ORDER BY a; -- COPY is possible in 1PC mode COPY the_table (a, b, z) FROM STDIN WITH CSV; SELECT * FROM the_table ORDER BY a; - a | b | z + a | b | z --------------------------------------------------------------------- 10 | 10 | 10 11 | 11 | 11 @@ -83,7 +83,7 @@ BEGIN; INSERT INTO the_table (a, b, z) VALUES (1, 2, 2); ROLLBACK; SELECT * FROM the_table ORDER BY a; - a | b | z + a | b | z --------------------------------------------------------------------- (0 rows) @@ -103,7 +103,7 @@ INSERT INTO the_table (a, b, z) VALUES (1, 2, 3); ERROR: writing to worker nodes is not currently allowed DETAIL: citus.use_secondary_nodes is set to 'always' SELECT * FROM the_table ORDER BY a; - a | b | z + a | b | z --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/multi_follower_select_statements.out b/src/test/regress/expected/multi_follower_select_statements.out index b4f87de0a..3f4340d61 100644 --- a/src/test/regress/expected/multi_follower_select_statements.out +++ b/src/test/regress/expected/multi_follower_select_statements.out @@ -1,22 +1,22 @@ \c - - - :master_port -- do some setup SELECT 1 FROM master_add_node('localhost', :worker_1_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) CREATE TABLE the_table (a int, b int); SELECT create_distributed_table('the_table', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO the_table (a, b) VALUES (1, 1); @@ -27,9 +27,9 @@ CREATE TABLE stock ( s_order_cnt int NOT NULL ); SELECT create_distributed_table('stock','s_w_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO stock SELECT c, c, c FROM generate_series(1, 5) as c; @@ -37,7 +37,7 @@ INSERT INTO stock SELECT c, c, c FROM generate_series(1, 5) as c; -- is still in the default cluster and will send queries to the primary nodes \c - - - :follower_master_port SELECT * FROM the_table; - a | b + a | b --------------------------------------------------------------------- 1 | 1 1 | 2 @@ -48,7 +48,7 @@ from stock group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; - s_i_id | ordercount + s_i_id | ordercount --------------------------------------------------------------------- 3 | 3 4 | 4 @@ -66,7 +66,7 @@ ERROR: node group does not have a secondary node SELECT 1 FROM master_add_node('localhost', :follower_worker_1_port, groupid => (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_1_port), noderole => 'secondary'); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) @@ -74,7 +74,7 @@ SELECT 1 FROM master_add_node('localhost', :follower_worker_1_port, SELECT 1 FROM master_add_node('localhost', :follower_worker_2_port, groupid => (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port), noderole => 'secondary'); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) @@ -82,7 +82,7 @@ SELECT 1 FROM master_add_node('localhost', :follower_worker_2_port, \c "port=9070 dbname=regression options='-c\ citus.use_secondary_nodes=always'" -- now that we've added secondaries this should work SELECT * FROM the_table; - a | b + a | b --------------------------------------------------------------------- 1 | 1 1 | 2 @@ -93,7 +93,7 @@ from stock group by s_i_id having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock) order by s_i_id; - s_i_id | ordercount + s_i_id | ordercount --------------------------------------------------------------------- 3 | 3 4 | 4 @@ -106,7 +106,7 @@ FROM master_get_active_worker_nodes() ORDER BY node_name, node_port; - node_name | node_port + node_name | node_port --------------------------------------------------------------------- localhost | 9071 localhost | 9072 @@ -130,7 +130,7 @@ ERROR: there is a shard placement in node group but there are no nodes in that UPDATE pg_dist_node SET nodecluster = 'second-cluster' WHERE noderole = 'secondary'; \c "port=9070 dbname=regression options='-c\ citus.use_secondary_nodes=always\ -c\ citus.cluster_name=second-cluster'" SELECT * FROM the_table; - a | b + a | b --------------------------------------------------------------------- 1 | 1 1 | 2 diff --git a/src/test/regress/expected/multi_follower_task_tracker.out b/src/test/regress/expected/multi_follower_task_tracker.out index 56b479e51..f6b75bd40 100644 --- a/src/test/regress/expected/multi_follower_task_tracker.out +++ b/src/test/regress/expected/multi_follower_task_tracker.out @@ -2,9 +2,9 @@ -- do some setup CREATE TABLE tab(a int, b int); SELECT create_distributed_table('tab', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO tab (a, b) VALUES (1, 1); @@ -12,7 +12,7 @@ INSERT INTO tab (a, b) VALUES (1, 2); \c - - - :follower_master_port RESET citus.task_executor_type; SELECT * FROM tab; - a | b + a | b --------------------------------------------------------------------- 1 | 1 1 | 2 diff --git a/src/test/regress/expected/multi_foreign_key.out b/src/test/regress/expected/multi_foreign_key.out index c695283a6..2b612124e 100644 --- a/src/test/regress/expected/multi_foreign_key.out +++ b/src/test/regress/expected/multi_foreign_key.out @@ -7,9 +7,9 @@ SET citus.shard_count TO 32; -- create tables CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); SELECT create_distributed_table('referenced_table', 'id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- test foreign constraint creation with not supported parameters @@ -47,9 +47,9 @@ HINT: Please change "citus.shard_replication_factor to 1". To learn more about DROP TABLE self_referencing_table; CREATE TABLE self_referencing_table(id int, ref_id int, PRIMARY KEY (id, ref_id)); SELECT create_distributed_table('self_referencing_table', 'id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE self_referencing_table ADD CONSTRAINT fkey FOREIGN KEY(id,ref_id) REFERENCES self_referencing_table(id, ref_id); @@ -86,14 +86,14 @@ HINT: Please change "citus.shard_replication_factor to 1". To learn more about DROP TABLE referencing_table; DROP TABLE referenced_table; -- test foreign constraint creation on append and range distributed tables --- foreign keys are supported either in between distributed tables including the +-- foreign keys are supported either in between distributed tables including the -- distribution column or from distributed tables to reference tables. SET citus.shard_replication_factor TO 1; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); SELECT create_distributed_table('referenced_table', 'id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY (id) REFERENCES referenced_table(id)); @@ -104,9 +104,9 @@ DROP TABLE referencing_table; DROP TABLE referenced_table; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); SELECT create_distributed_table('referenced_table', 'id', 'range'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE referencing_table(id int, ref_id int,FOREIGN KEY (id) REFERENCES referenced_table(id)); @@ -119,15 +119,15 @@ DROP TABLE referenced_table; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id)); SELECT create_distributed_table('referenced_table', 'id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- test inserts @@ -154,7 +154,7 @@ INSERT INTO referencing_table VALUES(2, 2); TRUNCATE referenced_table CASCADE; NOTICE: truncate cascades to table "referencing_table" SELECT * FROM referencing_table; - id | ref_id + id | ref_id --------------------------------------------------------------------- (0 rows) @@ -166,15 +166,15 @@ DROP TABLE referenced_table; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE CASCADE); SELECT create_distributed_table('referenced_table', 'id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- single shard cascading delete @@ -182,12 +182,12 @@ INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); DELETE FROM referenced_table WHERE id = 1; SELECT * FROM referencing_table; - id | ref_id + id | ref_id --------------------------------------------------------------------- (0 rows) SELECT * FROM referenced_table; - id | test_column + id | test_column --------------------------------------------------------------------- (0 rows) @@ -196,7 +196,7 @@ INSERT INTO referenced_table VALUES(2, 2); INSERT INTO referencing_table VALUES(2, 2); DELETE FROM referenced_table; SELECT * FROM referencing_table; - id | ref_id + id | ref_id --------------------------------------------------------------------- (0 rows) @@ -213,15 +213,15 @@ DROP TABLE referenced_table; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE NO ACTION DEFERRABLE INITIALLY DEFERRED); SELECT create_distributed_table('referenced_table', 'id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO referenced_table VALUES(1, 1); @@ -235,12 +235,12 @@ DELETE FROM referenced_table WHERE id = 1; DELETE FROM referencing_table WHERE ref_id = 1; COMMIT; SELECT * FROM referencing_table; - id | ref_id + id | ref_id --------------------------------------------------------------------- (0 rows) SELECT * FROM referenced_table; - id | test_column + id | test_column --------------------------------------------------------------------- (0 rows) @@ -250,15 +250,15 @@ DROP TABLE referenced_table; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE RESTRICT); SELECT create_distributed_table('referenced_table', 'id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO referenced_table VALUES(1, 1); @@ -272,13 +272,13 @@ DELETE FROM referencing_table WHERE ref_id = 1; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; SELECT * FROM referencing_table; - id | ref_id + id | ref_id --------------------------------------------------------------------- 1 | 1 (1 row) SELECT * FROM referenced_table; - id | test_column + id | test_column --------------------------------------------------------------------- 1 | 1 (1 row) @@ -289,15 +289,15 @@ DROP TABLE referenced_table; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id, id) REFERENCES referenced_table(id, test_column) ON UPDATE NO ACTION DEFERRABLE INITIALLY DEFERRED); SELECT create_distributed_table('referenced_table', 'id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO referenced_table VALUES(1, 1); @@ -311,13 +311,13 @@ UPDATE referenced_table SET test_column = 10 WHERE id = 1; UPDATE referencing_table SET id = 10 WHERE ref_id = 1; COMMIT; SELECT * FROM referencing_table; - id | ref_id + id | ref_id --------------------------------------------------------------------- 10 | 1 (1 row) SELECT * FROM referenced_table; - id | test_column + id | test_column --------------------------------------------------------------------- 1 | 10 (1 row) @@ -328,15 +328,15 @@ DROP TABLE referenced_table; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id, id) REFERENCES referenced_table(id, test_column) ON UPDATE RESTRICT); SELECT create_distributed_table('referenced_table', 'id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO referenced_table VALUES(1, 1); @@ -350,13 +350,13 @@ UPDATE referencing_table SET id = 20 WHERE ref_id = 1; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; SELECT * FROM referencing_table; - id | ref_id + id | ref_id --------------------------------------------------------------------- 1 | 1 (1 row) SELECT * FROM referenced_table; - id | test_column + id | test_column --------------------------------------------------------------------- 1 | 1 (1 row) @@ -367,20 +367,20 @@ DROP TABLE referenced_table; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id, id) REFERENCES referenced_table(id, test_column) MATCH SIMPLE); SELECT create_distributed_table('referenced_table', 'id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO referencing_table VALUES(null, 2); SELECT * FROM referencing_table; - id | ref_id + id | ref_id --------------------------------------------------------------------- | 2 (1 row) @@ -392,15 +392,15 @@ DROP TABLE referenced_table; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id, id) REFERENCES referenced_table(id, test_column) MATCH FULL); SELECT create_distributed_table('referenced_table', 'id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO referencing_table VALUES(null, 2); @@ -408,7 +408,7 @@ ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign k DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. CONTEXT: while executing command on localhost:xxxxx SELECT * FROM referencing_table; - id | ref_id + id | ref_id --------------------------------------------------------------------- (0 rows) @@ -419,16 +419,16 @@ DROP TABLE referenced_table; SET citus.shard_count TO 4; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); SELECT create_distributed_table('referenced_table', 'id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- verify that we skip foreign key validation when propagation is turned off @@ -472,16 +472,16 @@ DROP TABLE referencing_table; DROP TABLE referenced_table; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); SELECT create_distributed_table('referenced_table', 'id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id', 'hash', colocate_with => 'none'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) REFERENCES referenced_table(id); @@ -493,15 +493,15 @@ DROP TABLE referenced_table; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referenced_table', 'id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- columns for the referenced table is empty @@ -554,12 +554,12 @@ INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); DELETE FROM referenced_table WHERE id = 1; SELECT * FROM referencing_table; - id | ref_id + id | ref_id --------------------------------------------------------------------- (0 rows) SELECT * FROM referenced_table; - id | test_column + id | test_column --------------------------------------------------------------------- (0 rows) @@ -577,12 +577,12 @@ DELETE FROM referenced_table WHERE id = 1; DELETE FROM referencing_table WHERE ref_id = 1; COMMIT; SELECT * FROM referencing_table; - id | ref_id + id | ref_id --------------------------------------------------------------------- (0 rows) SELECT * FROM referenced_table; - id | test_column + id | test_column --------------------------------------------------------------------- (0 rows) @@ -600,13 +600,13 @@ DELETE FROM referencing_table WHERE ref_id = 1; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; SELECT * FROM referencing_table; - id | ref_id + id | ref_id --------------------------------------------------------------------- 1 | 1 (1 row) SELECT * FROM referenced_table; - id | test_column + id | test_column --------------------------------------------------------------------- 1 | 1 (1 row) @@ -623,13 +623,13 @@ UPDATE referenced_table SET test_column = 10 WHERE id = 1; UPDATE referencing_table SET id = 10 WHERE ref_id = 1; COMMIT; SELECT * FROM referencing_table; - id | ref_id + id | ref_id --------------------------------------------------------------------- 10 | 1 (1 row) SELECT * FROM referenced_table; - id | test_column + id | test_column --------------------------------------------------------------------- 1 | 10 (1 row) @@ -646,13 +646,13 @@ UPDATE referencing_table SET id = 20 WHERE ref_id = 1; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; SELECT * FROM referencing_table; - id | ref_id + id | ref_id --------------------------------------------------------------------- 10 | 1 (1 row) SELECT * FROM referenced_table; - id | test_column + id | test_column --------------------------------------------------------------------- 1 | 10 (1 row) @@ -662,7 +662,7 @@ ALTER TABLE referencing_table DROP CONSTRAINT test_constraint; ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id, id) REFERENCES referenced_table(id, test_column) MATCH SIMPLE; INSERT INTO referencing_table VALUES(null, 2); SELECT * FROM referencing_table ORDER BY 1,2; - id | ref_id + id | ref_id --------------------------------------------------------------------- 10 | 1 | 2 @@ -677,7 +677,7 @@ ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign k DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. CONTEXT: while executing command on localhost:xxxxx SELECT * FROM referencing_table; - id | ref_id + id | ref_id --------------------------------------------------------------------- 10 | 1 (1 row) @@ -690,15 +690,15 @@ DROP TABLE referenced_table; CREATE TABLE cyclic_reference_table1(id int, table2_id int, PRIMARY KEY(id, table2_id)); CREATE TABLE cyclic_reference_table2(id int, table1_id int, PRIMARY KEY(id, table1_id)); SELECT create_distributed_table('cyclic_reference_table1', 'id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('cyclic_reference_table2', 'table1_id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE cyclic_reference_table1 ADD CONSTRAINT cyclic_constraint1 FOREIGN KEY(id, table2_id) REFERENCES cyclic_reference_table2(table1_id, id) DEFERRABLE INITIALLY DEFERRED; @@ -715,13 +715,13 @@ INSERT INTO cyclic_reference_table2 VALUES(1, 1); COMMIT; -- verify that rows are actually inserted SELECT * FROM cyclic_reference_table1; - id | table2_id + id | table2_id --------------------------------------------------------------------- 1 | 1 (1 row) SELECT * FROM cyclic_reference_table2; - id | table1_id + id | table1_id --------------------------------------------------------------------- 1 | 1 (1 row) @@ -755,13 +755,13 @@ INSERT INTO transaction_referenced_table VALUES(1); INSERT INTO transaction_referencing_table VALUES(1, 1); -- verify that rows are actually inserted SELECT * FROM transaction_referenced_table; - id + id --------------------------------------------------------------------- 1 (1 row) SELECT * FROM transaction_referencing_table; - id | ref_id + id | ref_id --------------------------------------------------------------------- 1 | 1 (1 row) @@ -778,9 +778,9 @@ CREATE TABLE self_referencing_table1( FOREIGN KEY(id, other_column_ref) REFERENCES self_referencing_table1(id, other_column) ); SELECT create_distributed_table('self_referencing_table1', 'id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- test insertion to self referencing table @@ -792,7 +792,7 @@ DETAIL: Key (id, other_column_ref)=(1, 3) is not present in table "self_referen CONTEXT: while executing command on localhost:xxxxx -- verify that rows are actually inserted SELECT * FROM self_referencing_table1; - id | other_column | other_column_ref + id | other_column | other_column_ref --------------------------------------------------------------------- 1 | 1 | 1 (1 row) @@ -802,9 +802,9 @@ DROP TABLE self_referencing_table1; -- test self referencing foreign key with ALTER TABLE CREATE TABLE self_referencing_table2(id int, other_column int, other_column_ref int, PRIMARY KEY(id, other_column)); SELECT create_distributed_table('self_referencing_table2', 'id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE self_referencing_table2 ADD CONSTRAINT self_referencing_fk_constraint FOREIGN KEY(id, other_column_ref) REFERENCES self_referencing_table2(id, other_column); @@ -817,7 +817,7 @@ DETAIL: Key (id, other_column_ref)=(1, 3) is not present in table "self_referen CONTEXT: while executing command on localhost:xxxxx -- verify that rows are actually inserted SELECT * FROM self_referencing_table2; - id | other_column | other_column_ref + id | other_column | other_column_ref --------------------------------------------------------------------- 1 | 1 | 1 (1 row) @@ -828,9 +828,9 @@ DROP TABLE self_referencing_table2; -- test foreign key creation on CREATE TABLE from reference table CREATE TABLE referenced_by_reference_table(id int PRIMARY KEY, other_column int); SELECT create_distributed_table('referenced_by_reference_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE reference_table(id int, referencing_column int REFERENCES referenced_by_reference_table(id)); @@ -841,16 +841,16 @@ DETAIL: A reference table can only have reference keys to other reference table DROP TABLE reference_table; CREATE TABLE reference_table(id int PRIMARY KEY, referencing_column int); SELECT create_reference_table('reference_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE reference_table_second(id int, referencing_column int REFERENCES reference_table(id)); SELECT create_reference_table('reference_table_second'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- test foreign key creation on CREATE TABLE from reference table to local table @@ -870,18 +870,18 @@ CREATE TABLE self_referencing_reference_table( FOREIGN KEY(id, other_column_ref) REFERENCES self_referencing_reference_table(id, other_column) ); SELECT create_reference_table('self_referencing_reference_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- test foreign key creation on ALTER TABLE from reference table DROP TABLE reference_table; CREATE TABLE reference_table(id int PRIMARY KEY, referencing_column int); SELECT create_reference_table('reference_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE reference_table ADD CONSTRAINT fk FOREIGN KEY(referencing_column) REFERENCES referenced_by_reference_table(id); @@ -890,9 +890,9 @@ DETAIL: A reference table can only have reference keys to other reference table -- test foreign key creation on ALTER TABLE to reference table CREATE TABLE references_to_reference_table(id int, referencing_column int); SELECT create_distributed_table('references_to_reference_table', 'referencing_column'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE references_to_reference_table ADD CONSTRAINT fk FOREIGN KEY(referencing_column) REFERENCES reference_table(id); @@ -900,9 +900,9 @@ ALTER TABLE references_to_reference_table ADD CONSTRAINT fk FOREIGN KEY(referenc DROP TABLE reference_table_second; CREATE TABLE reference_table_second(id int, referencing_column int); SELECT create_reference_table('reference_table_second'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE reference_table_second ADD CONSTRAINT fk FOREIGN KEY(referencing_column) REFERENCES reference_table(id); @@ -913,9 +913,9 @@ DETAIL: drop cascades to constraint fk on table references_to_reference_table drop cascades to constraint fk on table reference_table_second CREATE TABLE reference_table(id int PRIMARY KEY, referencing_column int); SELECT create_reference_table('reference_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE reference_table ADD CONSTRAINT fk FOREIGN KEY(referencing_column) REFERENCES referenced_local_table(id); @@ -930,9 +930,9 @@ CREATE TABLE self_referencing_reference_table( PRIMARY KEY(id, other_column) ); SELECT create_reference_table('self_referencing_reference_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE self_referencing_reference_table ADD CONSTRAINT fk FOREIGN KEY(id, other_column_ref) REFERENCES self_referencing_reference_table(id, other_column); diff --git a/src/test/regress/expected/multi_foreign_key_relation_graph.out b/src/test/regress/expected/multi_foreign_key_relation_graph.out index 1dd36db04..8345f0cfe 100644 --- a/src/test/regress/expected/multi_foreign_key_relation_graph.out +++ b/src/test/regress/expected/multi_foreign_key_relation_graph.out @@ -13,81 +13,81 @@ CREATE FUNCTION get_referenced_relation_id_list(Oid) -- Simple case with distributed tables CREATE TABLE dtt1(id int PRIMARY KEY); SELECT create_distributed_table('dtt1','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE dtt2(id int PRIMARY KEY REFERENCES dtt1(id)); SELECT create_distributed_table('dtt2','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE dtt3(id int PRIMARY KEY REFERENCES dtt2(id)); SELECT create_distributed_table('dtt3','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('dtt1'::regclass) ORDER BY 1; - get_referenced_relation_id_list + get_referenced_relation_id_list --------------------------------------------------------------------- (0 rows) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('dtt2'::regclass) ORDER BY 1; - get_referenced_relation_id_list + get_referenced_relation_id_list --------------------------------------------------------------------- dtt1 (1 row) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('dtt3'::regclass) ORDER BY 1; - get_referenced_relation_id_list + get_referenced_relation_id_list --------------------------------------------------------------------- dtt1 dtt2 (2 rows) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('dtt1'::regclass) ORDER BY 1; - get_referencing_relation_id_list + get_referencing_relation_id_list --------------------------------------------------------------------- dtt2 dtt3 (2 rows) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('dtt2'::regclass) ORDER BY 1; - get_referencing_relation_id_list + get_referencing_relation_id_list --------------------------------------------------------------------- dtt3 (1 row) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('dtt3'::regclass) ORDER BY 1; - get_referencing_relation_id_list + get_referencing_relation_id_list --------------------------------------------------------------------- (0 rows) CREATE TABLE dtt4(id int PRIMARY KEY); SELECT create_distributed_table('dtt4', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('dtt4'::regclass) ORDER BY 1; - get_referenced_relation_id_list + get_referenced_relation_id_list --------------------------------------------------------------------- (0 rows) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('dtt4'::regclass) ORDER BY 1; - get_referencing_relation_id_list + get_referencing_relation_id_list --------------------------------------------------------------------- (0 rows) ALTER TABLE dtt4 ADD CONSTRAINT dtt4_fkey FOREIGN KEY (id) REFERENCES dtt3(id); SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('dtt4'::regclass) ORDER BY 1; - get_referenced_relation_id_list + get_referenced_relation_id_list --------------------------------------------------------------------- dtt1 dtt2 @@ -95,30 +95,30 @@ SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id (3 rows) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('dtt4'::regclass) ORDER BY 1; - get_referencing_relation_id_list + get_referencing_relation_id_list --------------------------------------------------------------------- (0 rows) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('dtt1'::regclass) ORDER BY 1; - get_referenced_relation_id_list + get_referenced_relation_id_list --------------------------------------------------------------------- (0 rows) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('dtt2'::regclass) ORDER BY 1; - get_referenced_relation_id_list + get_referenced_relation_id_list --------------------------------------------------------------------- dtt1 (1 row) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('dtt3'::regclass) ORDER BY 1; - get_referenced_relation_id_list + get_referenced_relation_id_list --------------------------------------------------------------------- dtt1 dtt2 (2 rows) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('dtt1'::regclass) ORDER BY 1; - get_referencing_relation_id_list + get_referencing_relation_id_list --------------------------------------------------------------------- dtt2 dtt3 @@ -126,38 +126,38 @@ SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_ (3 rows) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('dtt2'::regclass) ORDER BY 1; - get_referencing_relation_id_list + get_referencing_relation_id_list --------------------------------------------------------------------- dtt3 dtt4 (2 rows) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('dtt3'::regclass) ORDER BY 1; - get_referencing_relation_id_list + get_referencing_relation_id_list --------------------------------------------------------------------- dtt4 (1 row) ALTER TABLE dtt4 DROP CONSTRAINT dtt4_fkey; SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('dtt3'::regclass) ORDER BY 1; - get_referenced_relation_id_list + get_referenced_relation_id_list --------------------------------------------------------------------- dtt1 dtt2 (2 rows) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('dtt3'::regclass) ORDER BY 1; - get_referencing_relation_id_list + get_referencing_relation_id_list --------------------------------------------------------------------- (0 rows) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('dtt4'::regclass) ORDER BY 1; - get_referenced_relation_id_list + get_referenced_relation_id_list --------------------------------------------------------------------- (0 rows) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('dtt4'::regclass) ORDER BY 1; - get_referencing_relation_id_list + get_referencing_relation_id_list --------------------------------------------------------------------- (0 rows) @@ -169,33 +169,33 @@ CREATE TABLE test_3 (id int UNIQUE); CREATE TABLE test_4 (id int UNIQUE); CREATE TABLE test_5 (id int UNIQUE); SELECT create_distributed_Table('test_1', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_Table('test_2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_Table('test_3', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_Table('test_4', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_Table('test_5', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE VIEW referential_integrity_summary AS @@ -218,42 +218,42 @@ CREATE VIEW referential_integrity_summary AS BEGIN; ALTER TABLE test_2 ADD CONSTRAINT fkey_1 FOREIGN KEY(id) REFERENCES test_1(id); SELECT * FROM referential_integrity_summary; - n | table_name | referencing_relations | referenced_relations + n | table_name | referencing_relations | referenced_relations --------------------------------------------------------------------- - 1 | test_1 | {test_2} | + 1 | test_1 | {test_2} | 2 | test_2 | | {test_1} - 3 | test_3 | | - 4 | test_4 | | - 5 | test_5 | | + 3 | test_3 | | + 4 | test_4 | | + 5 | test_5 | | (5 rows) ALTER TABLE test_3 ADD CONSTRAINT fkey_1 FOREIGN KEY(id) REFERENCES test_2(id); SELECT * FROM referential_integrity_summary; - n | table_name | referencing_relations | referenced_relations + n | table_name | referencing_relations | referenced_relations --------------------------------------------------------------------- - 1 | test_1 | {test_2,test_3} | + 1 | test_1 | {test_2,test_3} | 2 | test_2 | {test_3} | {test_1} 3 | test_3 | | {test_2,test_1} - 4 | test_4 | | - 5 | test_5 | | + 4 | test_4 | | + 5 | test_5 | | (5 rows) ALTER TABLE test_4 ADD CONSTRAINT fkey_1 FOREIGN KEY(id) REFERENCES test_3(id); SELECT * FROM referential_integrity_summary; - n | table_name | referencing_relations | referenced_relations + n | table_name | referencing_relations | referenced_relations --------------------------------------------------------------------- - 1 | test_1 | {test_2,test_3,test_4} | + 1 | test_1 | {test_2,test_3,test_4} | 2 | test_2 | {test_3,test_4} | {test_1} 3 | test_3 | {test_4} | {test_2,test_1} 4 | test_4 | | {test_3,test_2,test_1} - 5 | test_5 | | + 5 | test_5 | | (5 rows) ALTER TABLE test_5 ADD CONSTRAINT fkey_1 FOREIGN KEY(id) REFERENCES test_4(id); SELECT * FROM referential_integrity_summary; - n | table_name | referencing_relations | referenced_relations + n | table_name | referencing_relations | referenced_relations --------------------------------------------------------------------- - 1 | test_1 | {test_2,test_3,test_4,test_5} | + 1 | test_1 | {test_2,test_3,test_4,test_5} | 2 | test_2 | {test_3,test_4,test_5} | {test_1} 3 | test_3 | {test_4,test_5} | {test_2,test_1} 4 | test_4 | {test_5} | {test_3,test_2,test_1} @@ -265,42 +265,42 @@ ROLLBACK; BEGIN; ALTER TABLE test_2 ADD CONSTRAINT fkey_1 FOREIGN KEY(id) REFERENCES test_1(id); SELECT * FROM referential_integrity_summary; - n | table_name | referencing_relations | referenced_relations + n | table_name | referencing_relations | referenced_relations --------------------------------------------------------------------- - 1 | test_1 | {test_2} | + 1 | test_1 | {test_2} | 2 | test_2 | | {test_1} - 3 | test_3 | | - 4 | test_4 | | - 5 | test_5 | | + 3 | test_3 | | + 4 | test_4 | | + 5 | test_5 | | (5 rows) ALTER TABLE test_4 ADD CONSTRAINT fkey_1 FOREIGN KEY(id) REFERENCES test_3(id); SELECT * FROM referential_integrity_summary; - n | table_name | referencing_relations | referenced_relations + n | table_name | referencing_relations | referenced_relations --------------------------------------------------------------------- - 1 | test_1 | {test_2} | + 1 | test_1 | {test_2} | 2 | test_2 | | {test_1} - 3 | test_3 | {test_4} | + 3 | test_3 | {test_4} | 4 | test_4 | | {test_3} - 5 | test_5 | | + 5 | test_5 | | (5 rows) ALTER TABLE test_5 ADD CONSTRAINT fkey_1 FOREIGN KEY(id) REFERENCES test_4(id); SELECT * FROM referential_integrity_summary; - n | table_name | referencing_relations | referenced_relations + n | table_name | referencing_relations | referenced_relations --------------------------------------------------------------------- - 1 | test_1 | {test_2} | + 1 | test_1 | {test_2} | 2 | test_2 | | {test_1} - 3 | test_3 | {test_4,test_5} | + 3 | test_3 | {test_4,test_5} | 4 | test_4 | {test_5} | {test_3} 5 | test_5 | | {test_4,test_3} (5 rows) ALTER TABLE test_3 ADD CONSTRAINT fkey_1 FOREIGN KEY(id) REFERENCES test_2(id); SELECT * FROM referential_integrity_summary; - n | table_name | referencing_relations | referenced_relations + n | table_name | referencing_relations | referenced_relations --------------------------------------------------------------------- - 1 | test_1 | {test_2,test_3,test_4,test_5} | + 1 | test_1 | {test_2,test_3,test_4,test_5} | 2 | test_2 | {test_3,test_4,test_5} | {test_1} 3 | test_3 | {test_4,test_5} | {test_2,test_1} 4 | test_4 | {test_5} | {test_3,test_2,test_1} @@ -315,9 +315,9 @@ BEGIN; ALTER TABLE test_4 ADD CONSTRAINT fkey_1 FOREIGN KEY(id) REFERENCES test_3(id); ALTER TABLE test_5 ADD CONSTRAINT fkey_1 FOREIGN KEY(id) REFERENCES test_4(id); SELECT * FROM referential_integrity_summary; - n | table_name | referencing_relations | referenced_relations + n | table_name | referencing_relations | referenced_relations --------------------------------------------------------------------- - 1 | test_1 | {test_2,test_3,test_4,test_5} | + 1 | test_1 | {test_2,test_3,test_4,test_5} | 2 | test_2 | {test_3,test_4,test_5} | {test_1} 3 | test_3 | {test_4,test_5} | {test_2,test_1} 4 | test_4 | {test_5} | {test_3,test_2,test_1} @@ -326,11 +326,11 @@ BEGIN; ALTER TABLE test_3 DROP CONSTRAINT fkey_1; SELECT * FROM referential_integrity_summary; - n | table_name | referencing_relations | referenced_relations + n | table_name | referencing_relations | referenced_relations --------------------------------------------------------------------- - 1 | test_1 | {test_2} | + 1 | test_1 | {test_2} | 2 | test_2 | | {test_1} - 3 | test_3 | {test_4,test_5} | + 3 | test_3 | {test_4,test_5} | 4 | test_4 | {test_5} | {test_3} 5 | test_5 | | {test_4,test_3} (5 rows) @@ -341,51 +341,51 @@ DROP TABLE test_1, test_2, test_3, test_4, test_5 CASCADE; BEGIN; CREATE TABLE test_1 (id int UNIQUE); SELECT create_distributed_Table('test_1', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE test_2 (id int UNIQUE, FOREIGN KEY(id) REFERENCES test_1(id)); SELECT create_distributed_Table('test_2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT * FROM referential_integrity_summary; - n | table_name | referencing_relations | referenced_relations + n | table_name | referencing_relations | referenced_relations --------------------------------------------------------------------- - 1 | test_1 | {test_2} | + 1 | test_1 | {test_2} | 2 | test_2 | | {test_1} (2 rows) CREATE TABLE test_3 (id int UNIQUE, FOREIGN KEY(id) REFERENCES test_2(id)); SELECT create_distributed_Table('test_3', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT * FROM referential_integrity_summary; - n | table_name | referencing_relations | referenced_relations + n | table_name | referencing_relations | referenced_relations --------------------------------------------------------------------- - 1 | test_1 | {test_2,test_3} | + 1 | test_1 | {test_2,test_3} | 2 | test_2 | {test_3} | {test_1} 3 | test_3 | | {test_2,test_1} (3 rows) CREATE TABLE test_4 (id int UNIQUE, FOREIGN KEY(id) REFERENCES test_3(id)); SELECT create_distributed_Table('test_4', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT * FROM referential_integrity_summary; - n | table_name | referencing_relations | referenced_relations + n | table_name | referencing_relations | referenced_relations --------------------------------------------------------------------- - 1 | test_1 | {test_2,test_3,test_4} | + 1 | test_1 | {test_2,test_3,test_4} | 2 | test_2 | {test_3,test_4} | {test_1} 3 | test_3 | {test_4} | {test_2,test_1} 4 | test_4 | | {test_3,test_2,test_1} @@ -393,15 +393,15 @@ BEGIN; CREATE TABLE test_5 (id int UNIQUE, FOREIGN KEY(id) REFERENCES test_4(id)); SELECT create_distributed_Table('test_5', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT * FROM referential_integrity_summary; - n | table_name | referencing_relations | referenced_relations + n | table_name | referencing_relations | referenced_relations --------------------------------------------------------------------- - 1 | test_1 | {test_2,test_3,test_4,test_5} | + 1 | test_1 | {test_2,test_3,test_4,test_5} | 2 | test_2 | {test_3,test_4,test_5} | {test_1} 3 | test_3 | {test_4,test_5} | {test_2,test_1} 4 | test_4 | {test_5} | {test_3,test_2,test_1} @@ -417,9 +417,9 @@ BEGIN; ALTER TABLE test_4 ADD CONSTRAINT fkey_1 FOREIGN KEY(id) REFERENCES test_3(id); ALTER TABLE test_5 ADD CONSTRAINT fkey_1 FOREIGN KEY(id) REFERENCES test_4(id); SELECT * FROM referential_integrity_summary; - n | table_name | referencing_relations | referenced_relations + n | table_name | referencing_relations | referenced_relations --------------------------------------------------------------------- - 1 | test_1 | {test_2,test_3,test_4,test_5} | + 1 | test_1 | {test_2,test_3,test_4,test_5} | 2 | test_2 | {test_3,test_4,test_5} | {test_1} 3 | test_3 | {test_4,test_5} | {test_2,test_1} 4 | test_4 | {test_5} | {test_3,test_2,test_1} @@ -431,9 +431,9 @@ NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to constraint test_4_id_fkey on table test_4 drop cascades to constraint fkey_1 on table test_4 SELECT * FROM referential_integrity_summary; - n | table_name | referencing_relations | referenced_relations + n | table_name | referencing_relations | referenced_relations --------------------------------------------------------------------- - 1 | test_1 | {test_2} | + 1 | test_1 | {test_2} | 2 | test_2 | | {test_1} (2 rows) @@ -445,56 +445,56 @@ BEGIN; SET search_path TO fkey_graph, fkey_intermediate_schema_1, fkey_intermediate_schema_2; CREATE TABLE fkey_intermediate_schema_1.test_6(id int PRIMARY KEY); SELECT create_distributed_table('fkey_intermediate_schema_1.test_6', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE fkey_intermediate_schema_2.test_7(id int PRIMARY KEY REFERENCES fkey_intermediate_schema_1.test_6(id)); SELECT create_distributed_table('fkey_intermediate_schema_2.test_7','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE fkey_intermediate_schema_1.test_8(id int PRIMARY KEY REFERENCES fkey_intermediate_schema_2.test_7(id)); SELECT create_distributed_table('fkey_intermediate_schema_1.test_8', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('test_6'::regclass) ORDER BY 1; - get_referencing_relation_id_list + get_referencing_relation_id_list --------------------------------------------------------------------- test_7 test_8 (2 rows) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('test_7'::regclass) ORDER BY 1; - get_referencing_relation_id_list + get_referencing_relation_id_list --------------------------------------------------------------------- test_8 (1 row) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('test_8'::regclass) ORDER BY 1; - get_referencing_relation_id_list + get_referencing_relation_id_list --------------------------------------------------------------------- (0 rows) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('test_6'::regclass) ORDER BY 1; - get_referenced_relation_id_list + get_referenced_relation_id_list --------------------------------------------------------------------- (0 rows) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('test_7'::regclass) ORDER BY 1; - get_referenced_relation_id_list + get_referenced_relation_id_list --------------------------------------------------------------------- test_6 (1 row) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('test_8'::regclass) ORDER BY 1; - get_referenced_relation_id_list + get_referenced_relation_id_list --------------------------------------------------------------------- test_6 test_7 @@ -505,22 +505,22 @@ NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to table test_7 drop cascades to constraint test_8_id_fkey on table test_8 SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('test_6'::regclass) ORDER BY 1; - get_referencing_relation_id_list + get_referencing_relation_id_list --------------------------------------------------------------------- (0 rows) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('test_8'::regclass) ORDER BY 1; - get_referencing_relation_id_list + get_referencing_relation_id_list --------------------------------------------------------------------- (0 rows) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('test_6'::regclass) ORDER BY 1; - get_referenced_relation_id_list + get_referenced_relation_id_list --------------------------------------------------------------------- (0 rows) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('test_8'::regclass) ORDER BY 1; - get_referenced_relation_id_list + get_referenced_relation_id_list --------------------------------------------------------------------- (0 rows) @@ -531,56 +531,56 @@ BEGIN; SET search_path TO fkey_graph, fkey_intermediate_schema_1, fkey_intermediate_schema_2; CREATE TABLE fkey_intermediate_schema_1.test_6(id int PRIMARY KEY); SELECT create_distributed_table('fkey_intermediate_schema_1.test_6', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE fkey_intermediate_schema_2.test_7(id int PRIMARY KEY REFERENCES fkey_intermediate_schema_1.test_6(id)); SELECT create_distributed_table('fkey_intermediate_schema_2.test_7','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE fkey_intermediate_schema_1.test_8(id int PRIMARY KEY REFERENCES fkey_intermediate_schema_2.test_7(id)); SELECT create_distributed_table('fkey_intermediate_schema_1.test_8', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('test_6'::regclass) ORDER BY 1; - get_referencing_relation_id_list + get_referencing_relation_id_list --------------------------------------------------------------------- test_7 test_8 (2 rows) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('test_7'::regclass) ORDER BY 1; - get_referencing_relation_id_list + get_referencing_relation_id_list --------------------------------------------------------------------- test_8 (1 row) SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('test_8'::regclass) ORDER BY 1; - get_referencing_relation_id_list + get_referencing_relation_id_list --------------------------------------------------------------------- (0 rows) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('test_6'::regclass) ORDER BY 1; - get_referenced_relation_id_list + get_referenced_relation_id_list --------------------------------------------------------------------- (0 rows) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('test_7'::regclass) ORDER BY 1; - get_referenced_relation_id_list + get_referenced_relation_id_list --------------------------------------------------------------------- test_6 (1 row) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('test_8'::regclass) ORDER BY 1; - get_referenced_relation_id_list + get_referenced_relation_id_list --------------------------------------------------------------------- test_6 test_7 @@ -592,12 +592,12 @@ DETAIL: drop cascades to table test_6 drop cascades to constraint test_7_id_fkey on table test_7 drop cascades to table test_8 SELECT get_referencing_relation_id_list::regclass FROM get_referencing_relation_id_list('test_7'::regclass) ORDER BY 1; - get_referencing_relation_id_list + get_referencing_relation_id_list --------------------------------------------------------------------- (0 rows) SELECT get_referenced_relation_id_list::regclass FROM get_referenced_relation_id_list('test_7'::regclass) ORDER BY 1; - get_referenced_relation_id_list + get_referenced_relation_id_list --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/multi_function_evaluation.out b/src/test/regress/expected/multi_function_evaluation.out index 784d76bf8..5c656f331 100644 --- a/src/test/regress/expected/multi_function_evaluation.out +++ b/src/test/regress/expected/multi_function_evaluation.out @@ -3,27 +3,27 @@ -- SET citus.next_shard_id TO 1200000; -- many of the tests in this file is intended for testing non-fast-path --- router planner, so we're explicitly disabling it in this file. --- We've bunch of other tests that triggers fast-path-router +-- router planner, so we're explicitly disabling it in this file. +-- We've bunch of other tests that triggers fast-path-router SET citus.enable_fast_path_router_planner TO false; -- nextval() works (no good way to test DEFAULT, or, by extension, SERIAL) CREATE TABLE example (key INT, value INT); SELECT master_create_distributed_table('example', 'key', 'hash'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE SEQUENCE example_value_seq; SELECT master_create_worker_shards('example', 1, 2); - master_create_worker_shards + master_create_worker_shards --------------------------------------------------------------------- - + (1 row) INSERT INTO example VALUES (1, nextval('example_value_seq')); SELECT * FROM example; - key | value + key | value --------------------------------------------------------------------- 1 | 1 (1 row) @@ -33,11 +33,11 @@ PREPARE stmt AS INSERT INTO example VALUES (2); EXECUTE stmt; EXECUTE stmt; SELECT * FROM example; - key | value + key | value --------------------------------------------------------------------- 1 | 1 - 2 | - 2 | + 2 | + 2 | (3 rows) -- non-immutable functions inside CASE/COALESCE aren't allowed @@ -106,14 +106,14 @@ ALTER TABLE example ADD value timestamptz; INSERT INTO example VALUES (3, now()); UPDATE example SET value = timestamp '10-10-2000 00:00' WHERE key = 3 AND value > now() - interval '1 hour'; SELECT * FROM example WHERE key = 3; - key | value + key | value --------------------------------------------------------------------- 3 | Tue Oct 10 00:00:00 2000 PDT (1 row) DELETE FROM example WHERE key = 3 AND value < now() - interval '1 hour'; SELECT * FROM example WHERE key = 3; - key | value + key | value --------------------------------------------------------------------- (0 rows) @@ -133,7 +133,7 @@ CONTEXT: PL/pgSQL function stable_fn() line 3 at RAISE NOTICE: stable_fn called CONTEXT: PL/pgSQL function stable_fn() line 3 at RAISE SELECT * FROM example WHERE key = 44; - key | value + key | value --------------------------------------------------------------------- 44 | Tue Oct 10 00:00:00 2000 PDT (1 row) diff --git a/src/test/regress/expected/multi_function_in_join.out b/src/test/regress/expected/multi_function_in_join.out index b6a4d1ee1..a70d39a05 100644 --- a/src/test/regress/expected/multi_function_in_join.out +++ b/src/test/regress/expected/multi_function_in_join.out @@ -14,9 +14,9 @@ SET search_path TO 'functions_in_joins'; SET citus.next_shard_id TO 2500000; CREATE TABLE table1 (id int, data int); SELECT create_distributed_table('table1','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO table1 @@ -29,7 +29,7 @@ CREATE SEQUENCE numbers; SELECT * FROM table1 JOIN nextval('numbers') n ON (id = n) ORDER BY id ASC; DEBUG: generating subplan 2_1 for subquery SELECT n FROM nextval('functions_in_joins.numbers'::regclass) n(n) DEBUG: Plan 2 query after replacing subqueries and CTEs: SELECT table1.id, table1.data, n.n FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.n FROM read_intermediate_result('2_1'::text, 'binary'::citus_copy_format) intermediate_result(n bigint)) n ON ((table1.id OPERATOR(pg_catalog.=) n.n))) ORDER BY table1.id - id | data | n + id | data | n --------------------------------------------------------------------- 1 | 1 | 1 (1 row) @@ -41,7 +41,7 @@ LANGUAGE SQL; SELECT * FROM table1 JOIN add(3,5) sum ON (id = sum) ORDER BY id ASC; DEBUG: generating subplan 3_1 for subquery SELECT sum FROM functions_in_joins.add(3, 5) sum(sum) DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT table1.id, table1.data, sum.sum FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.sum FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(sum integer)) sum ON ((table1.id OPERATOR(pg_catalog.=) sum.sum))) ORDER BY table1.id - id | data | sum + id | data | sum --------------------------------------------------------------------- 8 | 64 | 8 (1 row) @@ -56,7 +56,7 @@ $$ LANGUAGE plpgsql; SELECT * FROM table1 JOIN increment(2) val ON (id = val) ORDER BY id ASC; DEBUG: generating subplan 4_1 for subquery SELECT val FROM functions_in_joins.increment(2) val(val) DEBUG: Plan 4 query after replacing subqueries and CTEs: SELECT table1.id, table1.data, val.val FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.val FROM read_intermediate_result('4_1'::text, 'binary'::citus_copy_format) intermediate_result(val integer)) val ON ((table1.id OPERATOR(pg_catalog.=) val.val))) ORDER BY table1.id - id | data | val + id | data | val --------------------------------------------------------------------- 3 | 9 | 3 (1 row) @@ -75,7 +75,7 @@ FROM table1 JOIN next_k_integers(3,2) next_integers ON (id = next_integers.resul ORDER BY id ASC; DEBUG: generating subplan 5_1 for subquery SELECT result FROM functions_in_joins.next_k_integers(3, 2) next_integers(result) DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT table1.id, table1.data, next_integers.result FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.result FROM read_intermediate_result('5_1'::text, 'binary'::citus_copy_format) intermediate_result(result integer)) next_integers ON ((table1.id OPERATOR(pg_catalog.=) next_integers.result))) ORDER BY table1.id - id | data | result + id | data | result --------------------------------------------------------------------- 3 | 9 | 3 4 | 16 | 4 @@ -89,7 +89,7 @@ LANGUAGE SQL; SELECT * FROM table1 JOIN get_set_of_records() AS t2(x int, y int) ON (id = x) ORDER BY id ASC; DEBUG: generating subplan 6_1 for subquery SELECT x, y FROM functions_in_joins.get_set_of_records() t2(x integer, y integer) DEBUG: Plan 6 query after replacing subqueries and CTEs: SELECT table1.id, table1.data, t2.x, t2.y FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('6_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) t2 ON ((table1.id OPERATOR(pg_catalog.=) t2.x))) ORDER BY table1.id - id | data | x | y + id | data | x | y --------------------------------------------------------------------- 1 | 1 | 1 | 2 2 | 4 | 2 | 3 @@ -104,7 +104,7 @@ LANGUAGE SQL; SELECT f.* FROM table1 t JOIN dup(32) f ON (f1 = id); DEBUG: generating subplan 7_1 for subquery SELECT f1, f2 FROM functions_in_joins.dup(32) f(f1, f2) DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT f.f1, f.f2 FROM (functions_in_joins.table1 t JOIN (SELECT intermediate_result.f1, intermediate_result.f2 FROM read_intermediate_result('7_1'::text, 'binary'::citus_copy_format) intermediate_result(f1 integer, f2 text)) f ON ((f.f1 OPERATOR(pg_catalog.=) t.id))) - f1 | f2 + f1 | f2 --------------------------------------------------------------------- 32 | 32 is text (1 row) @@ -115,14 +115,14 @@ CREATE OR REPLACE FUNCTION the_minimum_id() SELECT * FROM table1 JOIN the_minimum_id() min_id ON (id = min_id); DEBUG: generating subplan 8_1 for subquery SELECT min_id FROM functions_in_joins.the_minimum_id() min_id(min_id) DEBUG: Plan 8 query after replacing subqueries and CTEs: SELECT table1.id, table1.data, min_id.min_id FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.min_id FROM read_intermediate_result('8_1'::text, 'binary'::citus_copy_format) intermediate_result(min_id integer)) min_id ON ((table1.id OPERATOR(pg_catalog.=) min_id.min_id))) - id | data | min_id + id | data | min_id --------------------------------------------------------------------- 1 | 1 | 1 (1 row) -- a built-in immutable function SELECT * FROM table1 JOIN abs(100) as hundred ON (id = hundred) ORDER BY id ASC; - id | data | hundred + id | data | hundred --------------------------------------------------------------------- 100 | 10000 | 100 (1 row) @@ -139,7 +139,7 @@ DEBUG: generating subplan 11_1 for CTE next_row_to_process: SELECT table1.id, t DEBUG: generating subplan 12_1 for subquery SELECT n FROM nextval('functions_in_joins.numbers'::regclass) n(n) DEBUG: Plan 12 query after replacing subqueries and CTEs: SELECT table1.id, table1.data, n.n FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.n FROM read_intermediate_result('12_1'::text, 'binary'::citus_copy_format) intermediate_result(n bigint)) n ON ((table1.id OPERATOR(pg_catalog.=) n.n))) DEBUG: Plan 11 query after replacing subqueries and CTEs: SELECT table1.id, table1.data, next_row_to_process.id, next_row_to_process.data, next_row_to_process.n FROM functions_in_joins.table1, (SELECT intermediate_result.id, intermediate_result.data, intermediate_result.n FROM read_intermediate_result('11_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer, data integer, n bigint)) next_row_to_process WHERE (table1.data OPERATOR(pg_catalog.<=) next_row_to_process.data) ORDER BY table1.id, table1.data - id | data | id | data | n + id | data | id | data | n --------------------------------------------------------------------- 1 | 1 | 2 | 4 | 2 2 | 4 | 2 | 4 | 2 @@ -150,7 +150,7 @@ SELECT * FROM ROWS FROM (next_k_integers(5), next_k_integers(10)) AS f(a, b), table1 WHERE id = a ORDER BY id ASC; DEBUG: generating subplan 13_1 for subquery SELECT a, b FROM ROWS FROM(functions_in_joins.next_k_integers(5), functions_in_joins.next_k_integers(10)) f(a, b) DEBUG: Plan 13 query after replacing subqueries and CTEs: SELECT f.a, f.b, table1.id, table1.data FROM (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('13_1'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) f(a, b), functions_in_joins.table1 WHERE (table1.id OPERATOR(pg_catalog.=) f.a) ORDER BY table1.id - a | b | id | data + a | b | id | data --------------------------------------------------------------------- 5 | 10 | 5 | 25 6 | 11 | 6 | 36 @@ -176,7 +176,7 @@ $$ language plpgsql; SELECT * FROM table1 JOIN max_and_min() m ON (m.maximum = data OR m.minimum = data) ORDER BY 1,2,3,4; DEBUG: generating subplan 14_1 for subquery SELECT minimum, maximum FROM functions_in_joins.max_and_min() m(minimum, maximum) DEBUG: Plan 14 query after replacing subqueries and CTEs: SELECT table1.id, table1.data, m.minimum, m.maximum FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.minimum, intermediate_result.maximum FROM read_intermediate_result('14_1'::text, 'binary'::citus_copy_format) intermediate_result(minimum integer, maximum integer)) m ON (((m.maximum OPERATOR(pg_catalog.=) table1.data) OR (m.minimum OPERATOR(pg_catalog.=) table1.data)))) ORDER BY table1.id, table1.data, m.minimum, m.maximum - id | data | minimum | maximum + id | data | minimum | maximum --------------------------------------------------------------------- 1 | 1 | 1 | 10000 100 | 10000 | 1 | 10000 @@ -184,7 +184,7 @@ DEBUG: Plan 14 query after replacing subqueries and CTEs: SELECT table1.id, tab -- The following tests will fail as we do not support all joins on -- all kinds of functions --- In other words, we cannot recursively plan the functions and hence +-- In other words, we cannot recursively plan the functions and hence -- the query fails on the workers SET client_min_messages TO ERROR; \set VERBOSITY terse diff --git a/src/test/regress/expected/multi_generate_ddl_commands.out b/src/test/regress/expected/multi_generate_ddl_commands.out index 44270f63e..14079ae82 100644 --- a/src/test/regress/expected/multi_generate_ddl_commands.out +++ b/src/test/regress/expected/multi_generate_ddl_commands.out @@ -9,7 +9,7 @@ CREATE TABLE simple_table ( id bigint ); SELECT master_get_table_ddl_events('simple_table'); - master_get_table_ddl_events + master_get_table_ddl_events --------------------------------------------------------------------- CREATE TABLE public.simple_table (first_name text, last_name text, id bigint) ALTER TABLE public.simple_table OWNER TO postgres @@ -21,7 +21,7 @@ CREATE TABLE not_null_table ( id bigint not null ); SELECT master_get_table_ddl_events('not_null_table'); - master_get_table_ddl_events + master_get_table_ddl_events --------------------------------------------------------------------- CREATE TABLE public.not_null_table (city text, id bigint NOT NULL) ALTER TABLE public.not_null_table OWNER TO postgres @@ -34,7 +34,7 @@ CREATE TABLE column_constraint_table ( age int CONSTRAINT non_negative_age CHECK (age >= 0) ); SELECT master_get_table_ddl_events('column_constraint_table'); - master_get_table_ddl_events + master_get_table_ddl_events --------------------------------------------------------------------- CREATE TABLE public.column_constraint_table (first_name text, last_name text, age integer, CONSTRAINT non_negative_age CHECK (age >= 0)) ALTER TABLE public.column_constraint_table OWNER TO postgres @@ -48,7 +48,7 @@ CREATE TABLE table_constraint_table ( CONSTRAINT bids_ordered CHECK (min_bid > max_bid) ); SELECT master_get_table_ddl_events('table_constraint_table'); - master_get_table_ddl_events + master_get_table_ddl_events --------------------------------------------------------------------- CREATE TABLE public.table_constraint_table (bid_item_id bigint, min_bid numeric NOT NULL, max_bid numeric NOT NULL, CONSTRAINT bids_ordered CHECK (min_bid > max_bid)) ALTER TABLE public.table_constraint_table OWNER TO postgres @@ -60,7 +60,7 @@ CREATE TABLE default_value_table ( price decimal default 0.00 ); SELECT master_get_table_ddl_events('default_value_table'); - master_get_table_ddl_events + master_get_table_ddl_events --------------------------------------------------------------------- CREATE TABLE public.default_value_table (name text, price numeric DEFAULT 0.00) ALTER TABLE public.default_value_table OWNER TO postgres @@ -73,7 +73,7 @@ CREATE TABLE pkey_table ( id bigint PRIMARY KEY ); SELECT master_get_table_ddl_events('pkey_table'); - master_get_table_ddl_events + master_get_table_ddl_events --------------------------------------------------------------------- CREATE TABLE public.pkey_table (first_name text, last_name text, id bigint NOT NULL) ALTER TABLE public.pkey_table OWNER TO postgres @@ -86,7 +86,7 @@ CREATE TABLE unique_table ( username text UNIQUE not null ); SELECT master_get_table_ddl_events('unique_table'); - master_get_table_ddl_events + master_get_table_ddl_events --------------------------------------------------------------------- CREATE TABLE public.unique_table (user_id bigint NOT NULL, username text NOT NULL) ALTER TABLE public.unique_table OWNER TO postgres @@ -101,7 +101,7 @@ CREATE TABLE clustered_table ( CREATE INDEX clustered_time_idx ON clustered_table (received_at); CLUSTER clustered_table USING clustered_time_idx; SELECT master_get_table_ddl_events('clustered_table'); - master_get_table_ddl_events + master_get_table_ddl_events --------------------------------------------------------------------- CREATE TABLE public.clustered_table (data json NOT NULL, received_at timestamp without time zone NOT NULL) ALTER TABLE public.clustered_table OWNER TO postgres @@ -123,7 +123,7 @@ ALTER TABLE fiddly_table ALTER traceroute SET STORAGE EXTERNAL, ALTER ip_addr SET STATISTICS 500; SELECT master_get_table_ddl_events('fiddly_table'); - master_get_table_ddl_events + master_get_table_ddl_events --------------------------------------------------------------------- CREATE TABLE public.fiddly_table (hostname character(255) NOT NULL, os character(255) NOT NULL, ip_addr inet NOT NULL, traceroute text NOT NULL) ALTER TABLE ONLY public.fiddly_table ALTER COLUMN hostname SET STORAGE PLAIN, ALTER COLUMN os SET STORAGE MAIN, ALTER COLUMN ip_addr SET STORAGE EXTENDED, ALTER COLUMN ip_addr SET STATISTICS 500, ALTER COLUMN traceroute SET STORAGE EXTERNAL @@ -137,9 +137,9 @@ CREATE FOREIGN TABLE foreign_table ( ) SERVER fake_fdw_server OPTIONS (encoding 'utf-8', compression 'true'); SELECT create_distributed_table('foreign_table', 'id'); NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER FOREIGN TABLE foreign_table rename to renamed_foreign_table; @@ -150,7 +150,7 @@ select table_name, column_name, data_type from information_schema.columns where table_schema='public' and table_name like 'renamed_foreign_table_%' and column_name <> 'id' order by table_name; - table_name | column_name | data_type + table_name | column_name | data_type --------------------------------------------------------------------- renamed_foreign_table_610000 | rename_name | character renamed_foreign_table_610001 | rename_name | character @@ -161,7 +161,7 @@ order by table_name; \c - - - :master_port SELECT master_get_table_ddl_events('renamed_foreign_table'); NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined - master_get_table_ddl_events + master_get_table_ddl_events --------------------------------------------------------------------- CREATE SERVER fake_fdw_server FOREIGN DATA WRAPPER fake_fdw CREATE FOREIGN TABLE public.renamed_foreign_table (id bigint NOT NULL, rename_name character(8) DEFAULT ''::text NOT NULL) SERVER fake_fdw_server OPTIONS (encoding 'utf-8', compression 'true') @@ -180,7 +180,7 @@ select table_name, column_name, data_type from information_schema.columns where table_schema='public' and table_name like 'renamed_foreign_table_%' and column_name <> 'id' order by table_name; - table_name | column_name | data_type + table_name | column_name | data_type --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/multi_hash_pruning.out b/src/test/regress/expected/multi_hash_pruning.out index 01331abfb..f61625c85 100644 --- a/src/test/regress/expected/multi_hash_pruning.out +++ b/src/test/regress/expected/multi_hash_pruning.out @@ -6,8 +6,8 @@ SET citus.next_shard_id TO 630000; SET citus.shard_count to 4; SET citus.shard_replication_factor to 1; -- many of the tests in this file is intended for testing non-fast-path --- router planner, so we're explicitly disabling it in this file. --- We've bunch of other tests that triggers fast-path-router +-- router planner, so we're explicitly disabling it in this file. +-- We've bunch of other tests that triggers fast-path-router SET citus.enable_fast_path_router_planner TO false; -- Create a table partitioned on integer column and update partition type to -- hash. Then load data into this table and update shard min max values with @@ -24,9 +24,9 @@ CREATE TABLE orders_hash_partitioned ( o_shippriority integer, o_comment varchar(79) ); SELECT create_distributed_table('orders_hash_partitioned', 'o_orderkey'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SET client_min_messages TO DEBUG2; @@ -34,7 +34,7 @@ SET client_min_messages TO DEBUG2; -- immutable functions. SELECT count(*) FROM orders_hash_partitioned; DEBUG: Router planner cannot handle multi-shard select queries - count + count --------------------------------------------------------------------- 0 (1 row) @@ -43,7 +43,7 @@ SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - count + count --------------------------------------------------------------------- 0 (1 row) @@ -52,7 +52,7 @@ SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 2; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - count + count --------------------------------------------------------------------- 0 (1 row) @@ -61,7 +61,7 @@ SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 3; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 3 - count + count --------------------------------------------------------------------- 0 (1 row) @@ -70,7 +70,7 @@ SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 4; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 4 - count + count --------------------------------------------------------------------- 0 (1 row) @@ -80,7 +80,7 @@ SELECT count(*) FROM orders_hash_partitioned DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - count + count --------------------------------------------------------------------- 0 (1 row) @@ -89,7 +89,7 @@ SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = abs(-1); DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - count + count --------------------------------------------------------------------- 0 (1 row) @@ -98,35 +98,35 @@ DETAIL: distribution column value: 1 SET citus.enable_router_execution TO 'false'; SELECT count(*) FROM orders_hash_partitioned; DEBUG: Router planner not enabled. - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1; DEBUG: Router planner not enabled. - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 2; DEBUG: Router planner not enabled. - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 3; DEBUG: Router planner not enabled. - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 4; DEBUG: Router planner not enabled. - count + count --------------------------------------------------------------------- 0 (1 row) @@ -134,14 +134,14 @@ DEBUG: Router planner not enabled. SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 AND o_clerk = 'aaa'; DEBUG: Router planner not enabled. - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = abs(-1); DEBUG: Router planner not enabled. - count + count --------------------------------------------------------------------- 0 (1 row) @@ -149,21 +149,21 @@ DEBUG: Router planner not enabled. SET citus.enable_router_execution TO DEFAULT; SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey is NULL; DEBUG: Router planner cannot handle multi-shard select queries - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey is not NULL; DEBUG: Router planner cannot handle multi-shard select queries - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey > 2; DEBUG: Router planner cannot handle multi-shard select queries - count + count --------------------------------------------------------------------- 0 (1 row) @@ -171,7 +171,7 @@ DEBUG: Router planner cannot handle multi-shard select queries SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 OR o_orderkey = 2; DEBUG: Router planner cannot handle multi-shard select queries - count + count --------------------------------------------------------------------- 0 (1 row) @@ -179,7 +179,7 @@ DEBUG: Router planner cannot handle multi-shard select queries SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 OR o_clerk = 'aaa'; DEBUG: Router planner cannot handle multi-shard select queries - count + count --------------------------------------------------------------------- 0 (1 row) @@ -187,7 +187,7 @@ DEBUG: Router planner cannot handle multi-shard select queries SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 OR (o_orderkey = 3 AND o_clerk = 'aaa'); DEBUG: Router planner cannot handle multi-shard select queries - count + count --------------------------------------------------------------------- 0 (1 row) @@ -195,7 +195,7 @@ DEBUG: Router planner cannot handle multi-shard select queries SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 OR o_orderkey is NULL; DEBUG: Router planner cannot handle multi-shard select queries - count + count --------------------------------------------------------------------- 0 (1 row) @@ -205,7 +205,7 @@ SELECT count(*) FROM DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - count + count --------------------------------------------------------------------- 0 (1 row) @@ -214,14 +214,14 @@ SET client_min_messages TO DEFAULT; -- Check that we support runing for ANY/IN with literal. SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = ANY ('{1,2,3}'); - count + count --------------------------------------------------------------------- 13 (1 row) SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey IN (1,2,3); - count + count --------------------------------------------------------------------- 13 (1 row) @@ -229,84 +229,84 @@ SELECT count(*) FROM lineitem_hash_part -- Check whether we can deal with null arrays SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey IN (NULL); - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = ANY (NULL); - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey IN (NULL) OR TRUE; - count + count --------------------------------------------------------------------- 12000 (1 row) SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = ANY (NULL) OR TRUE; - count + count --------------------------------------------------------------------- 12000 (1 row) -- Check whether we support IN/ANY in subquery SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey IN (SELECT l_orderkey FROM lineitem_hash_part); - count + count --------------------------------------------------------------------- 12000 (1 row) SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = ANY (SELECT l_orderkey FROM lineitem_hash_part); - count + count --------------------------------------------------------------------- 12000 (1 row) --- Check whether we support IN/ANY in subquery with append and range distributed table +-- Check whether we support IN/ANY in subquery with append and range distributed table SELECT count(*) FROM lineitem WHERE l_orderkey = ANY ('{1,2,3}'); - count + count --------------------------------------------------------------------- 13 (1 row) SELECT count(*) FROM lineitem WHERE l_orderkey IN (1,2,3); - count + count --------------------------------------------------------------------- 13 (1 row) SELECT count(*) FROM lineitem WHERE l_orderkey = ANY(NULL) OR TRUE; - count + count --------------------------------------------------------------------- 12000 (1 row) SELECT count(*) FROM lineitem_range WHERE l_orderkey = ANY ('{1,2,3}'); - count + count --------------------------------------------------------------------- 13 (1 row) SELECT count(*) FROM lineitem_range WHERE l_orderkey IN (1,2,3); - count + count --------------------------------------------------------------------- 13 (1 row) SELECT count(*) FROM lineitem_range WHERE l_orderkey = ANY(NULL) OR TRUE; - count + count --------------------------------------------------------------------- 12000 (1 row) @@ -317,17 +317,17 @@ SET client_min_messages TO DEBUG2; SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey < ALL ('{1,2,3}'); DEBUG: Router planner cannot handle multi-shard select queries - count + count --------------------------------------------------------------------- 0 (1 row) --- Check that we don't give a spurious hint message when non-partition +-- Check that we don't give a spurious hint message when non-partition -- columns are used with ANY/IN/ALL SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 OR o_totalprice IN (2, 5); DEBUG: Router planner cannot handle multi-shard select queries - count + count --------------------------------------------------------------------- 0 (1 row) @@ -335,7 +335,7 @@ DEBUG: Router planner cannot handle multi-shard select queries -- Check that we cannot prune for mutable functions. SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = random(); DEBUG: Router planner cannot handle multi-shard select queries - count + count --------------------------------------------------------------------- 0 (1 row) @@ -343,7 +343,7 @@ DEBUG: Router planner cannot handle multi-shard select queries SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = random() OR o_orderkey = 1; DEBUG: Router planner cannot handle multi-shard select queries - count + count --------------------------------------------------------------------- 0 (1 row) @@ -353,7 +353,7 @@ SELECT count(*) FROM orders_hash_partitioned DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - count + count --------------------------------------------------------------------- 0 (1 row) @@ -375,7 +375,7 @@ DEBUG: join prunable for intervals [0,1073741823] and [1073741824,2147483647] DEBUG: join prunable for intervals [1073741824,2147483647] and [-2147483648,-1073741825] DEBUG: join prunable for intervals [1073741824,2147483647] and [-1073741824,-1] DEBUG: join prunable for intervals [1073741824,2147483647] and [0,1073741823] - count + count --------------------------------------------------------------------- 0 (1 row) @@ -388,7 +388,7 @@ SELECT count(*) DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - count + count --------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/multi_having_pushdown.out b/src/test/regress/expected/multi_having_pushdown.out index 25944b69a..7c15c3976 100644 --- a/src/test/regress/expected/multi_having_pushdown.out +++ b/src/test/regress/expected/multi_having_pushdown.out @@ -4,16 +4,16 @@ SET citus.next_shard_id TO 590000; CREATE TABLE lineitem_hash (LIKE lineitem); SELECT create_distributed_table('lineitem_hash', 'l_orderkey', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE orders_hash (LIKE orders); SELECT create_distributed_table('orders_hash', 'o_orderkey', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- push down when table is distributed by hash and grouped by partition column @@ -22,7 +22,7 @@ EXPLAIN (COSTS FALSE) FROM lineitem_hash GROUP BY l_orderkey HAVING sum(l_quantity) > 24 ORDER BY 2 DESC, 1 ASC LIMIT 3; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit -> Sort @@ -47,7 +47,7 @@ EXPLAIN (COSTS FALSE) FROM lineitem GROUP BY l_orderkey HAVING sum(l_quantity) > 24 ORDER BY 2 DESC, 1 ASC LIMIT 3; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit -> Sort @@ -71,7 +71,7 @@ EXPLAIN (COSTS FALSE) FROM lineitem_hash GROUP BY l_shipmode HAVING sum(l_quantity) > 24 ORDER BY 2 DESC, 1 ASC LIMIT 3; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit -> Sort @@ -95,7 +95,7 @@ EXPLAIN (COSTS FALSE) FROM lineitem_hash GROUP BY l_shipmode, l_orderkey HAVING sum(l_quantity) > 24 ORDER BY 3 DESC, 1, 2 LIMIT 3; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit -> Sort @@ -121,7 +121,7 @@ EXPLAIN (COSTS FALSE) WHERE o_orderkey = l_orderkey GROUP BY l_orderkey, o_orderkey, l_shipmode HAVING sum(l_quantity) > 24 ORDER BY 1 DESC LIMIT 3; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit -> Sort @@ -150,7 +150,7 @@ EXPLAIN (COSTS FALSE) WHERE o_orderkey = l_orderkey GROUP BY l_shipmode, o_clerk HAVING sum(l_quantity) > 24 ORDER BY 1 DESC LIMIT 3; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit -> Sort @@ -179,7 +179,7 @@ FROM users_table GROUP BY user_id HAVING max(value_2) > 4 AND min(value_2) < 1 ORDER BY 1; - max + max --------------------------------------------------------------------- 4 5 @@ -191,7 +191,7 @@ FROM users_table GROUP BY user_id HAVING max(value_2) > 4 AND min(value_2) < 1 OR count(*) > 10 ORDER BY 1; - max + max --------------------------------------------------------------------- 4 5 @@ -204,7 +204,7 @@ FROM users_table GROUP BY user_id HAVING max(value_2) > 4 AND min(value_2) < 1 AND count(*) > 20 ORDER BY 1; - max + max --------------------------------------------------------------------- 5 5 @@ -214,7 +214,7 @@ SELECT max(value_1) FROM users_table GROUP BY user_id HAVING max(value_2) > 0 AND count(*) FILTER (WHERE value_3=2) > 3 AND min(value_2) IN (0,1,2,3); - max + max --------------------------------------------------------------------- 5 (1 row) diff --git a/src/test/regress/expected/multi_index_statements.out b/src/test/regress/expected/multi_index_statements.out index 8a38f9e1f..7d3515336 100644 --- a/src/test/regress/expected/multi_index_statements.out +++ b/src/test/regress/expected/multi_index_statements.out @@ -9,19 +9,19 @@ SET citus.next_shard_id TO 102080; CREATE TABLE index_test_range(a int, b int, c int); SELECT create_distributed_table('index_test_range', 'a', 'range'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_create_empty_shard('index_test_range'); - master_create_empty_shard + master_create_empty_shard --------------------------------------------------------------------- 102080 (1 row) SELECT master_create_empty_shard('index_test_range'); - master_create_empty_shard + master_create_empty_shard --------------------------------------------------------------------- 102081 (1 row) @@ -30,26 +30,26 @@ SET citus.shard_count TO 8; SET citus.shard_replication_factor TO 2; CREATE TABLE index_test_hash(a int, b int, c int); SELECT create_distributed_table('index_test_hash', 'a', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE index_test_append(a int, b int, c int); SELECT create_distributed_table('index_test_append', 'a', 'append'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_create_empty_shard('index_test_append'); - master_create_empty_shard + master_create_empty_shard --------------------------------------------------------------------- 102090 (1 row) SELECT master_create_empty_shard('index_test_append'); - master_create_empty_shard + master_create_empty_shard --------------------------------------------------------------------- 102091 (1 row) @@ -99,7 +99,7 @@ CLUSTER local_table USING local_table_index; DROP TABLE local_table; -- Verify that all indexes got created on the master node and one of the workers SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_test_%' ORDER BY indexname; - schemaname | tablename | indexname | tablespace | indexdef + schemaname | tablename | indexname | tablespace | indexdef --------------------------------------------------------------------- public | index_test_hash | index_test_hash_index_a | | CREATE UNIQUE INDEX index_test_hash_index_a ON public.index_test_hash USING btree (a) public | index_test_hash | index_test_hash_index_a_b | | CREATE UNIQUE INDEX index_test_hash_index_a_b ON public.index_test_hash USING btree (a, b) @@ -121,25 +121,25 @@ SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_t \c - - - :worker_1_port SELECT count(*) FROM pg_indexes WHERE tablename = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1); - count + count --------------------------------------------------------------------- 9 (1 row) SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_hash%'; - count + count --------------------------------------------------------------------- 32 (1 row) SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_range%'; - count + count --------------------------------------------------------------------- 6 (1 row) SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_append%'; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -177,7 +177,7 @@ CREATE INDEX ON lineitem (l_orderkey); ERROR: creating index without a name on a distributed table is currently unsupported -- Verify that none of failed indexes got created on the master node SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_test_%' ORDER BY indexname; - schemaname | tablename | indexname | tablespace | indexdef + schemaname | tablename | indexname | tablespace | indexdef --------------------------------------------------------------------- public | index_test_hash | index_test_hash_index_a | | CREATE UNIQUE INDEX index_test_hash_index_a ON public.index_test_hash USING btree (a) public | index_test_hash | index_test_hash_index_a_b | | CREATE UNIQUE INDEX index_test_hash_index_a_b ON public.index_test_hash USING btree (a, b) @@ -237,24 +237,24 @@ DROP INDEX CONCURRENTLY lineitem_concurrently_index; -- Verify that all the indexes are dropped from the master and one worker node. -- As there's a primary key, so exclude those from this check. SELECT indrelid::regclass, indexrelid::regclass FROM pg_index WHERE indrelid = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1)::regclass AND NOT indisprimary AND indexrelid::regclass::text NOT LIKE 'lineitem_time_index%'; - indrelid | indexrelid + indrelid | indexrelid --------------------------------------------------------------------- (0 rows) SELECT * FROM pg_indexes WHERE tablename LIKE 'index_test_%' ORDER BY indexname; - schemaname | tablename | indexname | tablespace | indexdef + schemaname | tablename | indexname | tablespace | indexdef --------------------------------------------------------------------- public | index_test_hash | index_test_hash_index_a_b_c | | CREATE UNIQUE INDEX index_test_hash_index_a_b_c ON public.index_test_hash USING btree (a) INCLUDE (b, c) (1 row) \c - - - :worker_1_port SELECT indrelid::regclass, indexrelid::regclass FROM pg_index WHERE indrelid = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1)::regclass AND NOT indisprimary AND indexrelid::regclass::text NOT LIKE 'lineitem_time_index%'; - indrelid | indexrelid + indrelid | indexrelid --------------------------------------------------------------------- (0 rows) SELECT * FROM pg_indexes WHERE tablename LIKE 'index_test_%' ORDER BY indexname; - schemaname | tablename | indexname | tablespace | indexdef + schemaname | tablename | indexname | tablespace | indexdef --------------------------------------------------------------------- public | index_test_hash_102082 | index_test_hash_index_a_b_c_102082 | | CREATE UNIQUE INDEX index_test_hash_index_a_b_c_102082 ON public.index_test_hash_102082 USING btree (a) INCLUDE (b, c) public | index_test_hash_102083 | index_test_hash_index_a_b_c_102083 | | CREATE UNIQUE INDEX index_test_hash_index_a_b_c_102083 ON public.index_test_hash_102083 USING btree (a) INCLUDE (b, c) @@ -276,7 +276,7 @@ DETAIL: CONCURRENTLY-enabled index commands can fail partially, leaving behind HINT: Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index, then retry the original command. -- the failure results in an INVALID index SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::regclass; - Index Valid? + Index Valid? --------------------------------------------------------------------- f (1 row) @@ -285,7 +285,7 @@ SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx':: DROP INDEX CONCURRENTLY IF EXISTS ith_b_idx; CREATE INDEX CONCURRENTLY ith_b_idx ON index_test_hash(b); SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::regclass; - Index Valid? + Index Valid? --------------------------------------------------------------------- t (1 row) @@ -300,7 +300,7 @@ DETAIL: CONCURRENTLY-enabled index commands can fail partially, leaving behind HINT: Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index, then retry the original command. -- the failure results in an INVALID index SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::regclass; - Index Valid? + Index Valid? --------------------------------------------------------------------- f (1 row) diff --git a/src/test/regress/expected/multi_insert_select.out b/src/test/regress/expected/multi_insert_select.out index dbe06e9f2..33f332e00 100644 --- a/src/test/regress/expected/multi_insert_select.out +++ b/src/test/regress/expected/multi_insert_select.out @@ -11,38 +11,38 @@ SET citus.shard_replication_factor = 2; -- so be less verbose with \set VERBOSITY TERSE when necessary CREATE TABLE raw_events_first (user_id int, time timestamp, value_1 int, value_2 int, value_3 float, value_4 bigint, UNIQUE(user_id, value_1)); SELECT create_distributed_table('raw_events_first', 'user_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE raw_events_second (user_id int, time timestamp, value_1 int, value_2 int, value_3 float, value_4 bigint, UNIQUE(user_id, value_1)); SELECT create_distributed_table('raw_events_second', 'user_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE agg_events (user_id int, value_1_agg int, value_2_agg int, value_3_agg float, value_4_agg bigint, agg_time timestamp, UNIQUE(user_id, value_1_agg)); SELECT create_distributed_table('agg_events', 'user_id');; - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- create the reference table as well CREATE TABLE reference_table (user_id int); SELECT create_reference_table('reference_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE insert_select_varchar_test (key varchar, value int); SELECT create_distributed_table('insert_select_varchar_test', 'key', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- set back to the defaults @@ -78,7 +78,7 @@ WHERE raw_events_first.user_id = raw_events_second.user_id ORDER BY user_id DESC; - user_id + user_id --------------------------------------------------------------------- 6 5 @@ -218,9 +218,9 @@ DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300005 AS DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300006 AS citus_table_alias (user_id, value_1, value_3) SELECT user_id, value_1, value_3 FROM public.raw_events_first_13300002 raw_events_first WHERE ((value_3 OPERATOR(pg_catalog.=) (9000)::double precision) AND ((worker_hash(user_id) OPERATOR(pg_catalog.>=) 0) AND (worker_hash(user_id) OPERATOR(pg_catalog.<=) 1073741823))) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4 DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS citus_table_alias (user_id, value_1, value_3) SELECT user_id, value_1, value_3 FROM public.raw_events_first_13300003 raw_events_first WHERE ((value_3 OPERATOR(pg_catalog.=) (9000)::double precision) AND ((worker_hash(user_id) OPERATOR(pg_catalog.>=) 1073741824) AND (worker_hash(user_id) OPERATOR(pg_catalog.<=) 2147483647))) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4 DEBUG: Plan is router executable - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 9 | | 90 | | 9000 | + 9 | | 90 | | 9000 | (1 row) -- hits two shards @@ -491,9 +491,9 @@ DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS ae (use DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS ae (user_id, value_1_agg, agg_time) SELECT user_id, value_1, "time" FROM public.raw_events_first_13300002 raw_events_first WHERE ((worker_hash(user_id) OPERATOR(pg_catalog.>=) 0) AND (worker_hash(user_id) OPERATOR(pg_catalog.<=) 1073741823)) ON CONFLICT(user_id, value_1_agg) DO UPDATE SET agg_time = excluded.agg_time WHERE (ae.agg_time OPERATOR(pg_catalog.<) excluded.agg_time) RETURNING ae.user_id, ae.value_1_agg DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS ae (user_id, value_1_agg, agg_time) SELECT user_id, value_1, "time" FROM public.raw_events_first_13300003 raw_events_first WHERE ((worker_hash(user_id) OPERATOR(pg_catalog.>=) 1073741824) AND (worker_hash(user_id) OPERATOR(pg_catalog.<=) 2147483647)) ON CONFLICT(user_id, value_1_agg) DO UPDATE SET agg_time = excluded.agg_time WHERE (ae.agg_time OPERATOR(pg_catalog.<) excluded.agg_time) RETURNING ae.user_id, ae.value_1_agg DEBUG: Plan is router executable - user_id | value_1_agg + user_id | value_1_agg --------------------------------------------------------------------- - 7 | + 7 | (1 row) INSERT INTO agg_events (user_id, value_1_agg) @@ -554,10 +554,10 @@ SELECT t1.user_id AS col1, ON t1.user_id = t2.user_id ORDER BY t1.user_id, t2.user_id; - col1 | col2 + col1 | col2 --------------------------------------------------------------------- 1 | 1 - 2 | + 2 | 3 | 3 4 | 4 5 | 5 @@ -590,10 +590,10 @@ FROM agg_events ORDER BY user_id, value_1_agg; - user_id | value_1_agg + user_id | value_1_agg --------------------------------------------------------------------- 1 | 1 - 2 | + 2 | 3 | 3 4 | 4 5 | 5 @@ -635,7 +635,7 @@ DEBUG: Collecting INSERT ... SELECT results on coordinator DEBUG: Router planner cannot handle multi-shard select queries SELECT user_id, value_1_agg FROM agg_events ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries - user_id | value_1_agg + user_id | value_1_agg --------------------------------------------------------------------- 1 | 10 2 | 20 @@ -643,7 +643,7 @@ DEBUG: Router planner cannot handle multi-shard select queries 4 | 40 5 | 50 6 | 60 - 7 | + 7 | 8 | 80 9 | 90 (9 rows) @@ -666,7 +666,7 @@ DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_t DEBUG: Plan is router executable SELECT user_id, value_1_agg FROM agg_events ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries - user_id | value_1_agg + user_id | value_1_agg --------------------------------------------------------------------- 1 | 10 2 | 20 @@ -674,7 +674,7 @@ DEBUG: Router planner cannot handle multi-shard select queries 4 | 40 5 | 50 6 | 60 - 7 | + 7 | 8 | 80 9 | 90 (9 rows) @@ -1747,7 +1747,7 @@ BEGIN; COPY raw_events_second (user_id, value_1) FROM STDIN DELIMITER ','; INSERT INTO raw_events_first SELECT * FROM raw_events_second WHERE user_id = 101; SELECT user_id FROM raw_events_first WHERE user_id = 101; - user_id + user_id --------------------------------------------------------------------- 101 (1 row) @@ -1772,7 +1772,7 @@ CREATE VIEW test_view AS SELECT * FROM raw_events_first; INSERT INTO raw_events_first (user_id, time, value_1, value_2, value_3, value_4) VALUES (16, now(), 60, 600, 6000.1, 60000); SELECT count(*) FROM raw_events_second; - count + count --------------------------------------------------------------------- 36 (1 row) @@ -1782,7 +1782,7 @@ INSERT INTO raw_events_first (user_id, time, value_1, value_2, value_3, value_4) (17, now(), 60, 600, 6000.1, 60000); INSERT INTO raw_events_second SELECT * FROM test_view WHERE user_id = 17 GROUP BY 1,2,3,4,5,6; SELECT count(*) FROM raw_events_second; - count + count --------------------------------------------------------------------- 38 (1 row) @@ -1801,7 +1801,7 @@ inserts AS ( NULL ) SELECT count(*) FROM inserts; - count + count --------------------------------------------------------------------- 2 (1 row) @@ -1890,7 +1890,7 @@ FROM (SELECT f1.key WHERE f1.key = f2.key GROUP BY 1) AS foo; SELECT * FROM insert_select_varchar_test ORDER BY 1 DESC, 2 DESC; - key | value + key | value --------------------------------------------------------------------- test_2 | 100 test_2 | 30 @@ -1911,9 +1911,9 @@ CREATE TABLE table_with_defaults -- we don't need many shards SET citus.shard_count = 2; SELECT create_distributed_table('table_with_defaults', 'store_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- let's see the queries @@ -2044,9 +2044,9 @@ CREATE TABLE table_with_serial ( s bigserial ); SELECT create_distributed_table('table_with_serial', 'store_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO table_with_serial (store_id) @@ -2064,21 +2064,21 @@ CREATE TABLE text_table (part_col text, val int); CREATE TABLE char_table (part_col char[], val int); create table table_with_starts_with_defaults (a int DEFAULT 5, b int, c int); SELECT create_distributed_table('text_table', 'part_col'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('char_table','part_col'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('table_with_starts_with_defaults', 'c'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SET client_min_messages TO DEBUG; @@ -2159,21 +2159,21 @@ CREATE TABLE summary_table count BIGINT ); SELECT create_distributed_table('raw_table', 'time'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('summary_table', 'time'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO raw_table VALUES(1, '11-11-1980'); INSERT INTO summary_table SELECT time, COUNT(*) FROM raw_table GROUP BY time; SELECT * FROM summary_table; - time | count + time | count --------------------------------------------------------------------- 11-11-1980 | 1 (1 row) @@ -2184,7 +2184,7 @@ TRUNCATE raw_events_first; INSERT INTO raw_events_first (user_id, value_1) SELECT * FROM (VALUES (1,2), (3,4), (5,6)) AS v(int,int); SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id; - user_id | value_1 + user_id | value_1 --------------------------------------------------------------------- 1 | 2 3 | 4 @@ -2204,7 +2204,7 @@ DEBUG: distributed INSERT ... SELECT can only select from distributed tables DEBUG: Collecting INSERT ... SELECT results on coordinator SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; DEBUG: Router planner cannot handle multi-shard select queries - user_id | value_1 + user_id | value_1 --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -2233,13 +2233,13 @@ DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300000 AS c DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300001 AS citus_table_alias (user_id, value_1) SELECT user_id, value_1 FROM read_intermediate_result('insert_select_207_13300001'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4 DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300002 AS citus_table_alias (user_id, value_1) SELECT user_id, value_1 FROM read_intermediate_result('insert_select_207_13300002'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4 DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300003 AS citus_table_alias (user_id, value_1) SELECT user_id, value_1 FROM read_intermediate_result('insert_select_207_13300003'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4 - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 1 | | 11 | | | - 2 | | 12 | | | - 3 | | 13 | | | - 4 | | 14 | | | - 5 | | 15 | | | + 1 | | 11 | | | + 2 | | 12 | | | + 3 | | 13 | | | + 4 | | 14 | | | + 5 | | 15 | | | (5 rows) RESET client_min_messages; @@ -2249,7 +2249,7 @@ BEGIN; INSERT INTO raw_events_first (user_id, value_1) SELECT s, s FROM generate_series(1, 5) s; SELECT user_id, value_1 FROM raw_events_first ORDER BY 1; - user_id | value_1 + user_id | value_1 --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -2265,7 +2265,7 @@ BEGIN; INSERT INTO raw_events_first (user_id, value_1) SELECT s, s FROM generate_series(1, 5) s; SELECT user_id, value_1 FROM raw_events_first WHERE user_id = 1; - user_id | value_1 + user_id | value_1 --------------------------------------------------------------------- 1 | 1 (1 row) @@ -2278,7 +2278,7 @@ SELECT s AS u, 2*s AS v FROM generate_series(1, 5) s; INSERT INTO raw_events_first (user_id, value_1) SELECT u, v FROM raw_events_first_local; SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; - user_id | value_1 + user_id | value_1 --------------------------------------------------------------------- 1 | 2 2 | 4 @@ -2292,7 +2292,7 @@ TRUNCATE raw_events_first; INSERT INTO raw_events_first (value_1, user_id) SELECT u, v FROM raw_events_first_local; SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; - user_id | value_1 + user_id | value_1 --------------------------------------------------------------------- 2 | 1 4 | 2 @@ -2308,7 +2308,7 @@ INSERT INTO raw_events_first (value_3, user_id) UNION ALL ( SELECT v, u FROM raw_events_first_local ); SELECT user_id, value_3 FROM raw_events_first ORDER BY user_id, value_3; - user_id | value_3 + user_id | value_3 --------------------------------------------------------------------- 1 | 2 1 | 2 @@ -2330,7 +2330,7 @@ SELECT s, 3*s FROM generate_series (1,5) s; INSERT INTO raw_events_first (user_id, value_1) SELECT user_id, value_4 FROM raw_events_second LIMIT 5; SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; - user_id | value_1 + user_id | value_1 --------------------------------------------------------------------- 1 | 3 2 | 6 @@ -2348,7 +2348,7 @@ INSERT INTO raw_events_first (user_id, value_1) WITH value AS (SELECT 1) SELECT * FROM removed_rows, value; SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; - user_id | value_1 + user_id | value_1 --------------------------------------------------------------------- 1 | 1 2 | 1 @@ -2380,7 +2380,7 @@ WITH ultra_rows AS ( ) SELECT u, v FROM ultra_rows; SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; - user_id | value_1 + user_id | value_1 --------------------------------------------------------------------- 1 | 2 2 | 4 @@ -2400,7 +2400,7 @@ WITH super_rows AS ( ) SELECT u, 5 FROM super_rows; SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; - user_id | value_1 + user_id | value_1 --------------------------------------------------------------------- 0 | 5 (1 row) @@ -2413,7 +2413,7 @@ WITH user_two AS ( INSERT INTO raw_events_first (user_id, value_1) SELECT * FROM user_two; SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; - user_id | value_1 + user_id | value_1 --------------------------------------------------------------------- 2 | 6 (1 row) @@ -2430,15 +2430,15 @@ SELECT * FROM numbers; -- Select into distributed table with a sequence CREATE TABLE "CaseSensitiveTable" ("UserID" int, "Value1" int); SELECT create_distributed_table('"CaseSensitiveTable"', 'UserID'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO "CaseSensitiveTable" SELECT s, s FROM generate_series(1,10) s; SELECT * FROM "CaseSensitiveTable" ORDER BY "UserID"; - UserID | Value1 + UserID | Value1 --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -2456,16 +2456,16 @@ DROP TABLE "CaseSensitiveTable"; -- Select into distributed table with a sequence CREATE TABLE dist_table_with_sequence (user_id serial, value_1 serial); SELECT create_distributed_table('dist_table_with_sequence', 'user_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- from local query INSERT INTO dist_table_with_sequence (value_1) SELECT s FROM generate_series(1,5) s; SELECT * FROM dist_table_with_sequence ORDER BY user_id; - user_id | value_1 + user_id | value_1 --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -2479,7 +2479,7 @@ INSERT INTO dist_table_with_sequence (value_1) SELECT value_1 FROM dist_table_with_sequence; ERROR: INSERT ... SELECT cannot generate sequence values when selecting from a distributed table SELECT * FROM dist_table_with_sequence ORDER BY user_id; - user_id | value_1 + user_id | value_1 --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -2491,43 +2491,43 @@ SELECT * FROM dist_table_with_sequence ORDER BY user_id; -- Select from distributed table into reference table CREATE TABLE ref_table (user_id int, value_1 int); SELECT create_reference_table('ref_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) INSERT INTO ref_table SELECT user_id, value_1 FROM raw_events_second; SELECT * FROM ref_table ORDER BY user_id, value_1; - user_id | value_1 + user_id | value_1 --------------------------------------------------------------------- - 1 | - 2 | - 3 | - 4 | - 5 | + 1 | + 2 | + 3 | + 4 | + 5 | (5 rows) DROP TABLE ref_table; -- Select from reference table into reference table CREATE TABLE ref1 (d timestamptz); SELECT create_reference_table('ref1'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE ref2 (d date); SELECT create_reference_table('ref2'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) INSERT INTO ref2 VALUES ('2017-10-31'); INSERT INTO ref1 SELECT * FROM ref2; SELECT count(*) from ref1; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -2535,7 +2535,7 @@ SELECT count(*) from ref1; -- also test with now() INSERT INTO ref1 SELECT now() FROM ref2; SELECT count(*) from ref1; - count + count --------------------------------------------------------------------- 2 (1 row) @@ -2545,9 +2545,9 @@ DROP TABLE ref2; -- Select into an append-partitioned table is not supported CREATE TABLE insert_append_table (user_id int, value_4 bigint); SELECT create_distributed_table('insert_append_table', 'user_id', 'append'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO insert_append_table (user_id, value_4) @@ -2566,7 +2566,7 @@ EXECUTE insert_prep(4); EXECUTE insert_prep(5); EXECUTE insert_prep(6); SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; - user_id | value_1 + user_id | value_1 --------------------------------------------------------------------- 1 | 3 2 | 3 @@ -2581,7 +2581,7 @@ TRUNCATE raw_events_first; INSERT INTO test_view SELECT * FROM raw_events_second; SELECT user_id, value_4 FROM test_view ORDER BY user_id, value_4; - user_id | value_4 + user_id | value_4 --------------------------------------------------------------------- 1 | 3 2 | 6 @@ -2595,16 +2595,16 @@ DROP VIEW test_view; -- Make sure we handle dropped columns correctly CREATE TABLE drop_col_table (col1 text, col2 text, col3 text); SELECT create_distributed_table('drop_col_table', 'col2'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE drop_col_table DROP COLUMN col1; INSERT INTO drop_col_table (col3, col2) SELECT value_4, user_id FROM raw_events_second LIMIT 5; SELECT * FROM drop_col_table ORDER BY col2, col3; - col2 | col3 + col2 | col3 --------------------------------------------------------------------- 1 | 3 2 | 6 @@ -2615,7 +2615,7 @@ SELECT * FROM drop_col_table ORDER BY col2, col3; -- make sure the tuple went to the right shard SELECT * FROM drop_col_table WHERE col2 = '1'; - col2 | col3 + col2 | col3 --------------------------------------------------------------------- 1 | 3 (1 row) @@ -2624,16 +2624,16 @@ RESET client_min_messages; -- make sure casts are handled correctly CREATE TABLE coerce_events(user_id int, time timestamp, value_1 numeric); SELECT create_distributed_table('coerce_events', 'user_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE coerce_agg (user_id int, value_1_agg int); SELECT create_distributed_table('coerce_agg', 'user_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO coerce_events(user_id, value_1) VALUES (1, 1), (2, 2), (10, 10); @@ -2656,7 +2656,7 @@ FROM ( ) AS ftop LIMIT 5; SELECT * FROM coerce_agg ORDER BY 1 DESC, 2 DESC; - user_id | value_1_agg + user_id | value_1_agg --------------------------------------------------------------------- 10 | 10 10 | 10 @@ -2678,7 +2678,7 @@ FROM ( LIMIT 5; ERROR: value too long for type character(1) SELECT * FROM coerce_agg ORDER BY 1 DESC, 2 DESC; - user_id | value_1_agg + user_id | value_1_agg --------------------------------------------------------------------- (0 rows) @@ -2706,10 +2706,10 @@ FROM ( ) AS ftop LIMIT 5; SELECT * FROM coerce_agg ORDER BY 1 DESC, 2 DESC; - user_id | value_1_agg + user_id | value_1_agg --------------------------------------------------------------------- - 2 | b - 1 | a + 2 | b + 1 | a (2 rows) TRUNCATE coerce_agg; @@ -2729,7 +2729,7 @@ FROM ( ERROR: new row for relation "coerce_agg_13300060" violates check constraint "small_number_13300060" \set VERBOSITY DEFAULT SELECT * FROM coerce_agg ORDER BY 1 DESC, 2 DESC; - user_id | value_1_agg + user_id | value_1_agg --------------------------------------------------------------------- (0 rows) @@ -2747,7 +2747,7 @@ FROM ( ) AS ftop LIMIT 5; SELECT * FROM coerce_agg ORDER BY 1 DESC, 2 DESC; - user_id | value_1_agg + user_id | value_1_agg --------------------------------------------------------------------- 2 | {2,2,2} 1 | {1,1,1} diff --git a/src/test/regress/expected/multi_insert_select_conflict.out b/src/test/regress/expected/multi_insert_select_conflict.out index ac30e5043..6bc3c90cb 100644 --- a/src/test/regress/expected/multi_insert_select_conflict.out +++ b/src/test/regress/expected/multi_insert_select_conflict.out @@ -4,51 +4,51 @@ SET citus.next_shard_id TO 1900000; SET citus.shard_replication_factor TO 1; CREATE TABLE target_table(col_1 int primary key, col_2 int); SELECT create_distributed_table('target_table','col_1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO target_table VALUES(1,2),(2,3),(3,4),(4,5),(5,6); CREATE TABLE source_table_1(col_1 int primary key, col_2 int, col_3 int); SELECT create_distributed_table('source_table_1','col_1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO source_table_1 VALUES(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5); CREATE TABLE source_table_2(col_1 int, col_2 int, col_3 int); SELECT create_distributed_table('source_table_2','col_1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO source_table_2 VALUES(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10); SET client_min_messages to debug1; -- Generate series directly on the coordinator and on conflict do nothing -INSERT INTO target_table (col_1, col_2) -SELECT - s, s -FROM - generate_series(1,10) s +INSERT INTO target_table (col_1, col_2) +SELECT + s, s +FROM + generate_series(1,10) s ON CONFLICT DO NOTHING; DEBUG: distributed INSERT ... SELECT can only select from distributed tables DEBUG: Collecting INSERT ... SELECT results on coordinator -- Generate series directly on the coordinator and on conflict update the target table -INSERT INTO target_table (col_1, col_2) -SELECT s, s -FROM - generate_series(1,10) s +INSERT INTO target_table (col_1, col_2) +SELECT s, s +FROM + generate_series(1,10) s ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 + 1; DEBUG: distributed INSERT ... SELECT can only select from distributed tables DEBUG: Collecting INSERT ... SELECT results on coordinator -- Since partition columns do not match, pull the data to the coordinator -- and do not change conflicted values INSERT INTO target_table -SELECT - col_2, col_3 +SELECT + col_2, col_3 FROM source_table_1 ON CONFLICT DO NOTHING; @@ -60,8 +60,8 @@ DEBUG: Collecting INSERT ... SELECT results on coordinator -- ordered result. WITH inserted_table AS ( INSERT INTO target_table - SELECT - col_2, col_3 + SELECT + col_2, col_3 FROM source_table_1 ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING * @@ -71,7 +71,7 @@ DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition DETAIL: The target table's partition column should correspond to a partition column in the subquery. DEBUG: Plan 8 query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('8_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) inserted_table ORDER BY col_1 DEBUG: Collecting INSERT ... SELECT results on coordinator - col_1 | col_2 + col_1 | col_2 --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -82,11 +82,11 @@ DEBUG: Collecting INSERT ... SELECT results on coordinator -- Subquery should be recursively planned due to the limit and do nothing on conflict INSERT INTO target_table -SELECT +SELECT col_1, col_2 FROM ( - SELECT - col_1, col_2, col_3 + SELECT + col_1, col_2, col_3 FROM source_table_1 LIMIT 5 @@ -101,11 +101,11 @@ DEBUG: Plan 12 query after replacing subqueries and CTEs: SELECT col_1, col_2 F -- Query is wrapped by CTE to return ordered result. WITH inserted_table AS ( INSERT INTO target_table - SELECT + SELECT col_1, col_2 FROM ( - SELECT - col_1, col_2, col_3 + SELECT + col_1, col_2, col_3 FROM source_table_1 LIMIT 5 @@ -119,7 +119,7 @@ DEBUG: Collecting INSERT ... SELECT results on coordinator DEBUG: push down of limit count: 5 DEBUG: generating subplan 16_1 for subquery SELECT col_1, col_2, col_3 FROM on_conflict.source_table_1 LIMIT 5 DEBUG: Plan 16 query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('16_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)) foo - col_1 | col_2 + col_1 | col_2 --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -131,11 +131,11 @@ DEBUG: Plan 16 query after replacing subqueries and CTEs: SELECT col_1, col_2 F -- Test with multiple subqueries. Query is wrapped by CTE to return ordered result. WITH inserted_table AS ( INSERT INTO target_table - SELECT + SELECT col_1, col_2 FROM ( - (SELECT - col_1, col_2, col_3 + (SELECT + col_1, col_2, col_3 FROM source_table_1 LIMIT 5) @@ -158,7 +158,7 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 20_2 for subquery SELECT col_1, col_2, col_3 FROM on_conflict.source_table_2 LIMIT 5 DEBUG: generating subplan 20_3 for subquery SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('20_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer) UNION SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('20_2'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer) DEBUG: Plan 20 query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('20_3'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)) foo - col_1 | col_2 + col_1 | col_2 --------------------------------------------------------------------- 1 | 0 2 | 0 @@ -191,7 +191,7 @@ DEBUG: Collecting INSERT ... SELECT results on coordinator DEBUG: generating subplan 28_1 for CTE cte: SELECT col_1, col_2 FROM on_conflict.source_table_1 DEBUG: Plan 28 query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT cte.col_1, cte.col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('28_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) cte) citus_insert_select_subquery SELECT * FROM target_table ORDER BY 1; - col_1 | col_2 + col_1 | col_2 --------------------------------------------------------------------- 1 | 2 2 | 3 @@ -219,7 +219,7 @@ DEBUG: generating subplan 32_2 for CTE cte_2: SELECT col_1, col_2 FROM on_confl DEBUG: generating subplan 32_3 for subquery SELECT cte.col_1, cte.col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('32_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) cte UNION SELECT cte_2.col_1, cte_2.col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('32_2'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) cte_2 DEBUG: Plan 32 query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('32_3'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) citus_insert_select_subquery SELECT * FROM target_table ORDER BY 1; - col_1 | col_2 + col_1 | col_2 --------------------------------------------------------------------- 1 | 2 2 | 3 @@ -248,7 +248,7 @@ DEBUG: Collecting INSERT ... SELECT results on coordinator DEBUG: generating subplan 39_1 for CTE cte: SELECT col_1, col_2, col_3 FROM on_conflict.source_table_1 DEBUG: generating subplan 39_2 for CTE cte_2: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('39_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)) cte DEBUG: Plan 39 query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT cte_2.col_1, cte_2.col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('39_2'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) cte_2) citus_insert_select_subquery - col_1 | col_2 + col_1 | col_2 --------------------------------------------------------------------- 1 | 2 2 | 3 @@ -274,13 +274,13 @@ RESET client_min_messages; -- Following query is not supported since error checks of the subquery pushdown planner -- and insert select planner have not been unified. It should work after unifying them. WITH cte AS ( - SELECT + SELECT col_1, col_2 - FROM + FROM source_table_1 -) -INSERT INTO target_table -SELECT +) +INSERT INTO target_table +SELECT source_table_1.col_1, source_table_1.col_2 FROM cte, source_table_1 @@ -290,9 +290,9 @@ DETAIL: Select query cannot be pushed down to the worker. -- Tests with foreign key to reference table CREATE TABLE test_ref_table (key int PRIMARY KEY); SELECT create_reference_table('test_ref_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) INSERT INTO test_ref_table VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10); @@ -300,9 +300,9 @@ ALTER TABLE target_table ADD CONSTRAINT fkey FOREIGN KEY (col_1) REFERENCES test BEGIN; TRUNCATE test_ref_table CASCADE; NOTICE: truncate cascades to table "target_table" - INSERT INTO - target_table - SELECT + INSERT INTO + target_table + SELECT col_2, col_1 FROM source_table_1 ON CONFLICT (col_1) DO UPDATE SET col_2 = 55 RETURNING *; @@ -312,13 +312,13 @@ CONTEXT: while executing command on localhost:xxxxx ROLLBACK; BEGIN; DELETE FROM test_ref_table WHERE key > 10; - INSERT INTO + INSERT INTO target_table - SELECT - col_2, - col_1 + SELECT + col_2, + col_1 FROM source_table_1 ON CONFLICT (col_1) DO UPDATE SET col_2 = 1 RETURNING *; - col_1 | col_2 + col_1 | col_2 --------------------------------------------------------------------- 1 | 1 2 | 1 @@ -333,26 +333,26 @@ ROLLBACK; BEGIN; TRUNCATE test_ref_table CASCADE; NOTICE: truncate cascades to table "target_table" - INSERT INTO + INSERT INTO source_table_1 - SELECT + SELECT col_2, - col_1 + col_1 FROM target_table ON CONFLICT (col_1) DO UPDATE SET col_2 = 55 RETURNING *; - col_1 | col_2 | col_3 + col_1 | col_2 | col_3 --------------------------------------------------------------------- (0 rows) ROLLBACK; BEGIN; DELETE FROM test_ref_table; - INSERT INTO + INSERT INTO source_table_1 - SELECT + SELECT col_2, col_1 FROM target_table ON CONFLICT (col_1) DO UPDATE SET col_2 = 55 RETURNING *; - col_1 | col_2 | col_3 + col_1 | col_2 | col_3 --------------------------------------------------------------------- (0 rows) @@ -360,32 +360,32 @@ ROLLBACK; -- INSERT .. SELECT with different column types CREATE TABLE source_table_3(col_1 numeric, col_2 numeric, col_3 numeric); SELECT create_distributed_table('source_table_3','col_1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO source_table_3 VALUES(1,11,1),(2,22,2),(3,33,3),(4,44,4),(5,55,5); CREATE TABLE source_table_4(id int, arr_val text[]); SELECT create_distributed_table('source_table_4','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO source_table_4 VALUES(1, '{"abc","cde","efg"}'), (2, '{"xyz","tvu"}'); CREATE TABLE target_table_2(id int primary key, arr_val char(10)[]); SELECT create_distributed_table('target_table_2','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO target_table_2 VALUES(1, '{"abc","def","gyx"}'); SET client_min_messages to debug1; INSERT INTO target_table -SELECT - col_1, col_2 +SELECT + col_1, col_2 FROM source_table_3 ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2; @@ -393,7 +393,7 @@ DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition DETAIL: The data type of the target table's partition column should exactly match the data type of the corresponding simple column reference in the subquery. DEBUG: Collecting INSERT ... SELECT results on coordinator SELECT * FROM target_table ORDER BY 1; - col_1 | col_2 + col_1 | col_2 --------------------------------------------------------------------- 1 | 11 2 | 22 @@ -408,13 +408,13 @@ SELECT * FROM target_table ORDER BY 1; (10 rows) INSERT INTO target_table_2 -SELECT - * +SELECT + * FROM source_table_4 ON CONFLICT DO NOTHING; SELECT * FROM target_table_2 ORDER BY 1; - id | arr_val + id | arr_val --------------------------------------------------------------------- 1 | {"abc ","def ","gyx "} 2 | {"xyz ","tvu "} @@ -426,45 +426,45 @@ SET citus.shard_replication_factor to 2; DROP TABLE target_table, source_table_1, source_table_2; CREATE TABLE target_table(col_1 int primary key, col_2 int); SELECT create_distributed_table('target_table','col_1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO target_table VALUES(1,2),(2,3),(3,4),(4,5),(5,6); CREATE TABLE source_table_1(col_1 int, col_2 int, col_3 int); SELECT create_distributed_table('source_table_1','col_1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO source_table_1 VALUES(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5); CREATE TABLE source_table_2(col_1 int, col_2 int, col_3 int); SELECT create_distributed_table('source_table_2','col_1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO source_table_2 VALUES(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10); SET client_min_messages to debug1; -- Generate series directly on the coordinator and on conflict do nothing -INSERT INTO target_table (col_1, col_2) -SELECT - s, s -FROM - generate_series(1,10) s +INSERT INTO target_table (col_1, col_2) +SELECT + s, s +FROM + generate_series(1,10) s ON CONFLICT DO NOTHING; DEBUG: distributed INSERT ... SELECT can only select from distributed tables DEBUG: Collecting INSERT ... SELECT results on coordinator -- Test with multiple subqueries INSERT INTO target_table -SELECT +SELECT col_1, col_2 FROM ( - (SELECT - col_1, col_2, col_3 + (SELECT + col_1, col_2, col_3 FROM source_table_1 LIMIT 5) @@ -485,7 +485,7 @@ DEBUG: generating subplan 71_2 for subquery SELECT col_1, col_2, col_3 FROM on_ DEBUG: generating subplan 71_3 for subquery SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('71_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer) UNION SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('71_2'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer) DEBUG: Plan 71 query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('71_3'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)) foo SELECT * FROM target_table ORDER BY 1; - col_1 | col_2 + col_1 | col_2 --------------------------------------------------------------------- 1 | 0 2 | 0 @@ -511,7 +511,7 @@ DEBUG: generating subplan 77_1 for CTE cte: SELECT col_1, col_2, col_3 FROM on_ DEBUG: generating subplan 77_2 for CTE cte_2: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('77_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)) cte DEBUG: Plan 77 query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT cte_2.col_1, cte_2.col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('77_2'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) cte_2) citus_insert_select_subquery SELECT * FROM target_table ORDER BY 1; - col_1 | col_2 + col_1 | col_2 --------------------------------------------------------------------- 1 | 2 2 | 3 diff --git a/src/test/regress/expected/multi_insert_select_window.out b/src/test/regress/expected/multi_insert_select_window.out index c069c4421..aa3dea909 100644 --- a/src/test/regress/expected/multi_insert_select_window.out +++ b/src/test/regress/expected/multi_insert_select_window.out @@ -14,7 +14,7 @@ FROM ) as foo; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg + count | count | avg --------------------------------------------------------------------- 101 | 6 | 3.2079207920792079 (1 row) @@ -33,7 +33,7 @@ FROM ) as foo; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg + count | count | avg --------------------------------------------------------------------- 101 | 6 | 3.2079207920792079 (1 row) @@ -52,7 +52,7 @@ FROM ) as foo; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg + count | count | avg --------------------------------------------------------------------- 101 | 6 | 3.2079207920792079 (1 row) @@ -74,7 +74,7 @@ FROM ) as foo; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg + count | count | avg --------------------------------------------------------------------- 12 | 6 | 3.5000000000000000 (1 row) @@ -95,7 +95,7 @@ GROUP BY lag_event_type; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg + count | count | avg --------------------------------------------------------------------- 8 | 2 | 1.1250000000000000 (1 row) @@ -116,7 +116,7 @@ SELECT * FROM ) as foo; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg + count | count | avg --------------------------------------------------------------------- 30 | 6 | 3.4000000000000000 (1 row) @@ -138,7 +138,7 @@ SELECT * FROM ) as foo; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg + count | count | avg --------------------------------------------------------------------- 20 | 6 | 3.3500000000000000 (1 row) @@ -175,7 +175,7 @@ JOIN sub_1.user_id; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg + count | count | avg --------------------------------------------------------------------- 6 | 6 | 3.5000000000000000 (1 row) @@ -201,7 +201,7 @@ GROUP BY my_rank; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg + count | count | avg --------------------------------------------------------------------- 1 | 1 | 4.0000000000000000 (1 row) @@ -227,7 +227,7 @@ GROUP BY my_rank; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg + count | count | avg --------------------------------------------------------------------- 2 | 2 | 3.5000000000000000 (1 row) @@ -252,7 +252,7 @@ GROUP BY my_rank; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg + count | count | avg --------------------------------------------------------------------- 1 | 1 | 4.0000000000000000 (1 row) @@ -274,7 +274,7 @@ LIMIT 10; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg + count | count | avg --------------------------------------------------------------------- 6 | 6 | 3.5000000000000000 (1 row) @@ -292,7 +292,7 @@ SELECT user_id, max(sum) FROM ( GROUP BY user_id; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg + count | count | avg --------------------------------------------------------------------- 6 | 6 | 3.5000000000000000 (1 row) @@ -318,7 +318,7 @@ GROUP BY user_id; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg + count | count | avg --------------------------------------------------------------------- 4 | 4 | 2.5000000000000000 (1 row) @@ -338,7 +338,7 @@ SELECT * FROM ( ) a; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg + count | count | avg --------------------------------------------------------------------- 6 | 6 | 3.5000000000000000 (1 row) @@ -361,7 +361,7 @@ GROUP BY user_id, rank; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg + count | count | avg --------------------------------------------------------------------- 32 | 6 | 3.5937500000000000 (1 row) @@ -392,7 +392,7 @@ WHERE ) a; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg + count | count | avg --------------------------------------------------------------------- 6 | 6 | 3.5000000000000000 (1 row) @@ -410,7 +410,7 @@ FROM ) a; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg + count | count | avg --------------------------------------------------------------------- 6 | 6 | 3.5000000000000000 (1 row) @@ -428,7 +428,7 @@ SELECT * FROM ( ) a; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg + count | count | avg --------------------------------------------------------------------- 26 | 6 | 3.7692307692307692 (1 row) @@ -450,7 +450,7 @@ LIMIT 10; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg + count | count | avg --------------------------------------------------------------------- 10 | 5 | 3.8000000000000000 (1 row) @@ -471,7 +471,7 @@ FROM view_with_window_func; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg + count | count | avg --------------------------------------------------------------------- 26 | 6 | 3.7692307692307692 (1 row) @@ -487,7 +487,7 @@ LIMIT -- get some statistics from the aggregated results to ensure the results are correct -- since there is a limit but not order, we cannot run avg(user_id) SELECT count(*) FROM agg_results_window; - count + count --------------------------------------------------------------------- 10 (1 row) @@ -517,7 +517,7 @@ LIMIT -- get some statistics from the aggregated results to ensure the results are correct -- since there is a limit but not order, we cannot test avg or distinct count SELECT count(*) FROM agg_results_window; - count + count --------------------------------------------------------------------- 5 (1 row) @@ -544,7 +544,7 @@ GROUP BY user_id; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg + count | count | avg --------------------------------------------------------------------- 6 | 6 | 3.5000000000000000 (1 row) @@ -589,7 +589,7 @@ LIMIT -- get some statistics from the aggregated results to ensure the results are correct -- since there is a limit but not order, we cannot test avg or distinct count SELECT count(*) FROM agg_results_window; - count + count --------------------------------------------------------------------- 5 (1 row) @@ -631,7 +631,7 @@ FROM ( ) AS ftop; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_window; - count | count | avg + count | count | avg --------------------------------------------------------------------- 6 | 6 | 3.5000000000000000 (1 row) diff --git a/src/test/regress/expected/multi_join_order_additional.out b/src/test/regress/expected/multi_join_order_additional.out index 6b4e7e6d3..86aec27af 100644 --- a/src/test/regress/expected/multi_join_order_additional.out +++ b/src/test/regress/expected/multi_join_order_additional.out @@ -30,9 +30,9 @@ CREATE TABLE lineitem_hash ( l_comment varchar(44) not null, PRIMARY KEY(l_orderkey, l_linenumber) ); SELECT create_distributed_table('lineitem_hash', 'l_orderkey'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE INDEX lineitem_hash_time_index ON lineitem_hash (l_shipdate); @@ -48,9 +48,9 @@ CREATE TABLE orders_hash ( o_comment varchar(79) not null, PRIMARY KEY(o_orderkey) ); SELECT create_distributed_table('orders_hash', 'o_orderkey'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE customer_hash ( @@ -63,9 +63,9 @@ CREATE TABLE customer_hash ( c_mktsegment char(10) not null, c_comment varchar(117) not null); SELECT create_distributed_table('customer_hash', 'c_custkey'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SET client_min_messages TO DEBUG2; @@ -76,7 +76,7 @@ DEBUG: Router planner does not support append-partitioned tables. LOG: join order: [ "lineitem" ][ local partition join "lineitem" ] DEBUG: join prunable for intervals [1,5986] and [8997,14947] DEBUG: join prunable for intervals [8997,14947] and [1,5986] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled @@ -90,7 +90,7 @@ EXPLAIN SELECT count(*) FROM lineitem, orders WHERE (l_orderkey = o_orderkey AND l_quantity > 5) OR (l_orderkey = o_orderkey AND l_quantity < 10); LOG: join order: [ "lineitem" ][ local partition join "orders" ] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) @@ -103,7 +103,7 @@ ERROR: complex joins are only supported when all distributed tables are joined EXPLAIN SELECT count(*) FROM orders, lineitem_hash WHERE o_orderkey = l_orderkey; LOG: join order: [ "orders" ][ single range partition join "lineitem_hash" ] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) @@ -114,7 +114,7 @@ LOG: join order: [ "orders" ][ single range partition join "lineitem_hash" ] EXPLAIN SELECT count(*) FROM orders_hash, lineitem_hash WHERE o_orderkey = l_orderkey; LOG: join order: [ "orders_hash" ][ local partition join "lineitem_hash" ] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) @@ -125,7 +125,7 @@ LOG: join order: [ "orders_hash" ][ local partition join "lineitem_hash" ] EXPLAIN SELECT count(*) FROM customer_hash, nation WHERE c_nationkey = n_nationkey; LOG: join order: [ "customer_hash" ][ reference join "nation" ] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) @@ -137,7 +137,7 @@ LOG: join order: [ "customer_hash" ][ reference join "nation" ] EXPLAIN SELECT count(*) FROM orders, lineitem, customer_append WHERE o_custkey = l_partkey AND o_custkey = c_nationkey; LOG: join order: [ "orders" ][ dual partition join "lineitem" ][ dual partition join "customer_append" ] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) @@ -149,7 +149,7 @@ LOG: join order: [ "orders" ][ dual partition join "lineitem" ][ dual partition EXPLAIN SELECT count(*) FROM orders, customer_hash WHERE c_custkey = o_custkey; LOG: join order: [ "orders" ][ dual partition join "customer_hash" ] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) @@ -161,7 +161,7 @@ LOG: join order: [ "orders" ][ dual partition join "customer_hash" ] EXPLAIN SELECT count(*) FROM orders_hash, customer_append WHERE c_custkey = o_custkey; LOG: join order: [ "orders_hash" ][ single range partition join "customer_append" ] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) diff --git a/src/test/regress/expected/multi_join_order_tpch_repartition.out b/src/test/regress/expected/multi_join_order_tpch_repartition.out index dc60b221d..24beca674 100644 --- a/src/test/regress/expected/multi_join_order_tpch_repartition.out +++ b/src/test/regress/expected/multi_join_order_tpch_repartition.out @@ -21,7 +21,7 @@ WHERE and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24; LOG: join order: [ "lineitem" ] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) @@ -52,7 +52,7 @@ ORDER BY revenue DESC, o_orderdate; LOG: join order: [ "orders" ][ local partition join "lineitem" ][ single range partition join "customer_append" ] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Sort (cost=0.00..0.00 rows=0 width=0) Sort Key: (sum(remote_scan.revenue)) DESC, remote_scan.o_orderdate @@ -95,7 +95,7 @@ GROUP BY ORDER BY revenue DESC; LOG: join order: [ "orders" ][ local partition join "lineitem" ][ single range partition join "customer_append" ][ reference join "nation" ] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Sort (cost=0.00..0.00 rows=0 width=0) Sort Key: (sum(remote_scan.revenue)) DESC @@ -136,7 +136,7 @@ WHERE AND l_shipinstruct = 'DELIVER IN PERSON' ); LOG: join order: [ "lineitem" ][ single range partition join "part_append" ] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) @@ -155,7 +155,7 @@ WHERE GROUP BY l_partkey; LOG: join order: [ "lineitem" ][ local partition join "orders" ][ single range partition join "part_append" ][ single range partition join "customer_append" ] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- HashAggregate (cost=0.00..0.00 rows=0 width=0) Group Key: remote_scan.l_partkey diff --git a/src/test/regress/expected/multi_join_order_tpch_small.out b/src/test/regress/expected/multi_join_order_tpch_small.out index 1f0b58a14..b7ccdaafe 100644 --- a/src/test/regress/expected/multi_join_order_tpch_small.out +++ b/src/test/regress/expected/multi_join_order_tpch_small.out @@ -16,7 +16,7 @@ WHERE and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24; LOG: join order: [ "lineitem" ] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) @@ -47,7 +47,7 @@ ORDER BY revenue DESC, o_orderdate; LOG: join order: [ "orders" ][ reference join "customer" ][ local partition join "lineitem" ] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Sort (cost=0.00..0.00 rows=0 width=0) Sort Key: (sum(remote_scan.revenue)) DESC, remote_scan.o_orderdate @@ -90,7 +90,7 @@ GROUP BY ORDER BY revenue DESC; LOG: join order: [ "orders" ][ reference join "customer" ][ reference join "nation" ][ local partition join "lineitem" ] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Sort (cost=0.00..0.00 rows=0 width=0) Sort Key: (sum(remote_scan.revenue)) DESC @@ -131,7 +131,7 @@ WHERE AND l_shipinstruct = 'DELIVER IN PERSON' ); LOG: join order: [ "lineitem" ][ reference join "part" ] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) diff --git a/src/test/regress/expected/multi_join_pruning.out b/src/test/regress/expected/multi_join_pruning.out index fbd706f29..59160fe10 100644 --- a/src/test/regress/expected/multi_join_pruning.out +++ b/src/test/regress/expected/multi_join_pruning.out @@ -11,7 +11,7 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders DEBUG: Router planner does not support append-partitioned tables. DEBUG: join prunable for intervals [1,5986] and [8997,14947] DEBUG: join prunable for intervals [8997,14947] and [1,5986] - sum | avg + sum | avg --------------------------------------------------------------------- 36089 | 3.0074166666666667 (1 row) @@ -20,7 +20,7 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_orderkey > 9030; DEBUG: Router planner does not support append-partitioned tables. DEBUG: join prunable for intervals [8997,14947] and [1,5986] - sum | avg + sum | avg --------------------------------------------------------------------- 17999 | 3.0189533713518953 (1 row) @@ -30,9 +30,9 @@ DEBUG: join prunable for intervals [8997,14947] and [1,5986] SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_orderkey > 20000; DEBUG: Router planner does not support append-partitioned tables. - sum | avg + sum | avg --------------------------------------------------------------------- - | + | (1 row) -- Partition pruning left three shards for the lineitem and one shard for the @@ -41,27 +41,27 @@ DEBUG: Router planner does not support append-partitioned tables. SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_orderkey > 6000 AND o_orderkey < 6000; DEBUG: Router planner does not support append-partitioned tables. - sum | avg + sum | avg --------------------------------------------------------------------- - | + | (1 row) -- Make sure that we can handle filters without a column SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND false; DEBUG: Router planner does not support append-partitioned tables. - sum | avg + sum | avg --------------------------------------------------------------------- - | + | (1 row) SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem INNER JOIN orders ON (l_orderkey = o_orderkey) WHERE false; DEBUG: Router planner does not support append-partitioned tables. - sum | avg + sum | avg --------------------------------------------------------------------- - | + | (1 row) -- These tests check that we can do join pruning for tables partitioned over @@ -74,7 +74,7 @@ EXPLAIN SELECT count(*) DEBUG: Router planner does not support append-partitioned tables. DEBUG: join prunable for intervals [{},{AZZXSP27F21T6,AZZXSP27F21T6}] and [{BA1000U2AMO4ZGX,BZZXSP27F21T6},{CA1000U2AMO4ZGX,CZZXSP27F21T6}] DEBUG: join prunable for intervals [{BA1000U2AMO4ZGX,BZZXSP27F21T6},{CA1000U2AMO4ZGX,CZZXSP27F21T6}] and [{},{AZZXSP27F21T6,AZZXSP27F21T6}] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) @@ -87,7 +87,7 @@ EXPLAIN SELECT count(*) DEBUG: Router planner does not support append-partitioned tables. DEBUG: join prunable for intervals [(a,3,b),(b,4,c)] and [(c,5,d),(d,6,e)] DEBUG: join prunable for intervals [(c,5,d),(d,6,e)] and [(a,3,b),(b,4,c)] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) @@ -101,7 +101,7 @@ EXPLAIN SELECT count(*) DEBUG: Router planner does not support append-partitioned tables. DEBUG: join prunable for intervals [AA1000U2AMO4ZGX,AZZXSP27F21T6] and [BA1000U2AMO4ZGX,BZZXSP27F21T6] DEBUG: join prunable for intervals [BA1000U2AMO4ZGX,BZZXSP27F21T6] and [AA1000U2AMO4ZGX,AZZXSP27F21T6] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) diff --git a/src/test/regress/expected/multi_json_agg.out b/src/test/regress/expected/multi_json_agg.out index 397ccc609..16503104c 100644 --- a/src/test/regress/expected/multi_json_agg.out +++ b/src/test/regress/expected/multi_json_agg.out @@ -12,7 +12,7 @@ $$; -- Check multi_cat_agg() aggregate which is used to implement json_agg() SELECT json_cat_agg(i) FROM (VALUES ('[1,{"a":2}]'::json), ('[null]'::json), (NULL), ('["3",5,4]'::json)) AS t(i); - json_cat_agg + json_cat_agg --------------------------------------------------------------------- [1, {"a":2}, null, "3", 5, 4] (1 row) @@ -27,7 +27,7 @@ ERROR: json_agg with order by is unsupported -- Check json_agg() for different data types and LIMIT clauses SELECT array_sort(json_agg(l_partkey)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - array_sort + array_sort --------------------------------------------------------------------- [2132, 15635, 24027, 63700, 67310, 155190] [106170] @@ -43,7 +43,7 @@ SELECT array_sort(json_agg(l_partkey)) FROM lineitem GROUP BY l_orderkey SELECT array_sort(json_agg(l_extendedprice)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - array_sort + array_sort --------------------------------------------------------------------- [13309.60, 21168.23, 22824.48, 28955.64, 45983.16, 49620.16] [44694.46] @@ -59,7 +59,7 @@ SELECT array_sort(json_agg(l_extendedprice)) FROM lineitem GROUP BY l_orderkey SELECT array_sort(json_agg(l_shipdate)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - array_sort + array_sort --------------------------------------------------------------------- ["1996-01-29", "1996-01-30", "1996-03-13", "1996-03-30", "1996-04-12", "1996-04-21"] ["1997-01-28"] @@ -75,7 +75,7 @@ SELECT array_sort(json_agg(l_shipdate)) FROM lineitem GROUP BY l_orderkey SELECT array_sort(json_agg(l_shipmode)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - array_sort + array_sort --------------------------------------------------------------------- ["AIR ", "FOB ", "MAIL ", "MAIL ", "REG AIR ", "TRUCK "] ["RAIL "] @@ -91,7 +91,7 @@ SELECT array_sort(json_agg(l_shipmode)) FROM lineitem GROUP BY l_orderkey -- Check that we can execute json_agg() within other functions SELECT json_array_length(json_agg(l_orderkey)) FROM lineitem; - json_array_length + json_array_length --------------------------------------------------------------------- 12000 (1 row) @@ -103,7 +103,7 @@ SELECT json_array_length(json_agg(l_orderkey)) FROM lineitem; SELECT l_quantity, count(*), avg(l_extendedprice), array_sort(json_agg(l_orderkey)) FROM lineitem WHERE l_quantity < 5 AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; - l_quantity | count | avg | array_sort + l_quantity | count | avg | array_sort --------------------------------------------------------------------- 1.00 | 17 | 1477.1258823529411765 | [5543, 5633, 5634, 5698, 5766, 5856, 5857, 5986, 8997, 9026, 9158, 9184, 9220, 9222, 9348, 9383, 9476] 2.00 | 19 | 3078.4242105263157895 | [5506, 5540, 5573, 5669, 5703, 5730, 5798, 5831, 5893, 5920, 5923, 9030, 9058, 9123, 9124, 9188, 9344, 9441, 9476] @@ -114,7 +114,7 @@ SELECT l_quantity, count(*), avg(l_extendedprice), array_sort(json_agg(l_orderke SELECT l_quantity, array_sort(json_agg(extract (month FROM o_orderdate))) AS my_month FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_quantity < 5 AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; - l_quantity | my_month + l_quantity | my_month --------------------------------------------------------------------- 1.00 | [2, 3, 4, 4, 4, 5, 5, 5, 6, 7, 7, 7, 7, 9, 9, 11, 11] 2.00 | [1, 3, 5, 5, 5, 5, 6, 6, 6, 7, 7, 8, 10, 10, 11, 11, 11, 12, 12] @@ -125,7 +125,7 @@ SELECT l_quantity, array_sort(json_agg(extract (month FROM o_orderdate))) AS my_ SELECT l_quantity, array_sort(json_agg(l_orderkey * 2 + 1)) FROM lineitem WHERE l_quantity < 5 AND octet_length(l_comment) + octet_length('randomtext'::text) > 40 AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; - l_quantity | array_sort + l_quantity | array_sort --------------------------------------------------------------------- 1.00 | [11269, 11397, 11713, 11715, 11973, 18317, 18445] 2.00 | [11847, 18061, 18247, 18953] @@ -136,7 +136,7 @@ SELECT l_quantity, array_sort(json_agg(l_orderkey * 2 + 1)) FROM lineitem WHERE -- Check that we can execute json_agg() with an expression containing NULL values SELECT json_agg(case when l_quantity > 20 then l_quantity else NULL end) FROM lineitem WHERE l_orderkey < 5; - json_agg + json_agg --------------------------------------------------------------------- [null, 36.00, null, 28.00, 24.00, 32.00, 38.00, 45.00, 49.00, 27.00, null, 28.00, 26.00, 30.00] (1 row) @@ -144,7 +144,7 @@ SELECT json_agg(case when l_quantity > 20 then l_quantity else NULL end) -- Check that we can execute json_agg() with an expression containing different types SELECT json_agg(case when l_quantity > 20 then to_json(l_quantity) else '"f"'::json end) FROM lineitem WHERE l_orderkey < 5; - json_agg + json_agg --------------------------------------------------------------------- ["f", 36.00, "f", 28.00, 24.00, 32.00, 38.00, 45.00, 49.00, 27.00, "f", 28.00, 26.00, 30.00] (1 row) @@ -152,7 +152,7 @@ SELECT json_agg(case when l_quantity > 20 then to_json(l_quantity) else '"f"'::j -- Check that we can execute json_agg() with an expression containing json arrays SELECT json_agg(json_build_array(l_quantity, l_shipdate)) FROM lineitem WHERE l_orderkey < 3; - json_agg + json_agg --------------------------------------------------------------------- [[17.00, "1996-03-13"], [36.00, "1996-04-12"], [8.00, "1996-01-29"], [28.00, "1996-04-21"], [24.00, "1996-03-30"], [32.00, "1996-01-30"], [38.00, "1997-01-28"]] (1 row) @@ -160,7 +160,7 @@ SELECT json_agg(json_build_array(l_quantity, l_shipdate)) -- Check that we can execute json_agg() with an expression containing arrays SELECT json_agg(ARRAY[l_quantity, l_orderkey]) FROM lineitem WHERE l_orderkey < 3; - json_agg + json_agg --------------------------------------------------------------------- [[17.00,1], + [36.00,1], + @@ -173,8 +173,8 @@ SELECT json_agg(ARRAY[l_quantity, l_orderkey]) -- Check that we return NULL in case there are no input rows to json_agg() SELECT json_agg(l_orderkey) FROM lineitem WHERE l_quantity < 0; - json_agg + json_agg --------------------------------------------------------------------- - + (1 row) diff --git a/src/test/regress/expected/multi_json_object_agg.out b/src/test/regress/expected/multi_json_object_agg.out index b4f65eee8..adb4d7ee1 100644 --- a/src/test/regress/expected/multi_json_object_agg.out +++ b/src/test/regress/expected/multi_json_object_agg.out @@ -17,7 +17,7 @@ $$; -- Check multi_cat_agg() aggregate which is used to implement json_object_agg() SELECT json_cat_agg(i) FROM (VALUES ('{"c":[], "b":2}'::json), (NULL), ('{"d":null, "a":{"b":3}, "b":2}'::json)) AS t(i); - json_cat_agg + json_cat_agg --------------------------------------------------------------------- { "c" : [], "b" : 2, "d" : null, "a" : {"b":3}, "b" : 2 } (1 row) @@ -33,7 +33,7 @@ ERROR: json_object_agg with order by is unsupported SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, l_partkey)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - keys_sort + keys_sort --------------------------------------------------------------------- { "11" : 155190, "12" : 67310, "13" : 63700, "14" : 2132, "15" : 24027, "16" : 15635 } { "21" : 106170 } @@ -50,7 +50,7 @@ SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, l_partk SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, l_extendedprice)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - keys_sort + keys_sort --------------------------------------------------------------------- { "11" : 21168.23, "12" : 45983.16, "13" : 13309.60, "14" : 28955.64, "15" : 22824.48, "16" : 49620.16 } { "21" : 44694.46 } @@ -67,7 +67,7 @@ SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, l_exten SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, l_shipmode)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - keys_sort + keys_sort --------------------------------------------------------------------- { "11" : "TRUCK ", "12" : "MAIL ", "13" : "REG AIR ", "14" : "AIR ", "15" : "FOB ", "16" : "MAIL " } { "21" : "RAIL " } @@ -84,7 +84,7 @@ SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, l_shipm SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, l_shipdate)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - keys_sort + keys_sort --------------------------------------------------------------------- { "11" : "1996-03-13", "12" : "1996-04-12", "13" : "1996-01-29", "14" : "1996-04-21", "15" : "1996-03-30", "16" : "1996-01-30" } { "21" : "1997-01-28" } @@ -100,7 +100,7 @@ SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, l_shipd -- Check that we can execute json_object_agg() within other functions SELECT count_keys(json_object_agg(l_shipdate, l_orderkey)) FROM lineitem; - count_keys + count_keys --------------------------------------------------------------------- 12000 (1 row) @@ -114,7 +114,7 @@ SELECT l_quantity, count(*), avg(l_extendedprice), FROM lineitem WHERE l_quantity < 5 AND l_orderkey > 5000 AND l_orderkey < 5300 GROUP BY l_quantity ORDER BY l_quantity; - l_quantity | count | avg | keys_sort + l_quantity | count | avg | keys_sort --------------------------------------------------------------------- 1.00 | 8 | 1748.3387500000000000 | { "50635" : "1997-09-03", "51551" : "1994-07-03", "51872" : "1997-08-08", "52221" : "1994-08-19", "52832" : "1994-06-20", "52855" : "1994-03-14", "52856" : "1994-02-08", "52861" : "1997-11-25" } 2.00 | 8 | 2990.9825000000000000 | { "50292" : "1992-11-25", "50633" : "1997-06-17", "50904" : "1997-04-07", "50952" : "1992-07-09", "51216" : "1992-08-10", "52191" : "1997-06-26", "52501" : "1995-08-09", "52551" : "1996-09-27" } @@ -126,7 +126,7 @@ SELECT l_quantity, keys_sort(json_object_agg(l_orderkey::text || l_linenumber::t extract (month FROM o_orderdate))) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_quantity < 5 AND l_orderkey > 5000 AND l_orderkey < 5300 GROUP BY l_quantity ORDER BY l_quantity; - l_quantity | keys_sort + l_quantity | keys_sort --------------------------------------------------------------------- 1.00 | { "50635" : 5, "51551" : 6, "51872" : 7, "52221" : 5, "52832" : 6, "52855" : 1, "52856" : 1, "52861" : 9 } 2.00 | { "50292" : 11, "50633" : 5, "50904" : 3, "50952" : 4, "51216" : 5, "52191" : 2, "52501" : 7, "52551" : 7 } @@ -138,7 +138,7 @@ SELECT l_quantity, keys_sort(json_object_agg(l_orderkey::text || l_linenumber::t FROM lineitem WHERE l_quantity < 5 AND octet_length(l_comment) + octet_length('randomtext'::text) > 40 AND l_orderkey > 5000 AND l_orderkey < 6000 GROUP BY l_quantity ORDER BY l_quantity; - l_quantity | keys_sort + l_quantity | keys_sort --------------------------------------------------------------------- 1.00 | { "51551" : 10311, "52221" : 10445, "52855" : 10571, "56345" : 11269, "56986" : 11397, "58561" : 11713, "58573" : 11715, "59863" : 11973 } 2.00 | { "52191" : 10439, "53513" : 10703, "59233" : 11847 } @@ -150,7 +150,7 @@ SELECT l_quantity, keys_sort(json_object_agg(l_orderkey::text || l_linenumber::t SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, case when l_quantity > 20 then l_quantity else NULL end)) FROM lineitem WHERE l_orderkey < 5; - keys_sort + keys_sort --------------------------------------------------------------------- { "11" : null, "12" : 36.00, "13" : null, "14" : 28.00, "15" : 24.00, "16" : 32.00, "21" : 38.00, "31" : 45.00, "32" : 49.00, "33" : 27.00, "34" : null, "35" : 28.00, "36" : 26.00, "41" : 30.00 } (1 row) @@ -159,7 +159,7 @@ SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, case when l_quantity > 20 then to_json(l_quantity) else '"f"'::json end)) FROM lineitem WHERE l_orderkey < 5; - keys_sort + keys_sort --------------------------------------------------------------------- { "11" : "f", "12" : 36.00, "13" : "f", "14" : 28.00, "15" : 24.00, "16" : 32.00, "21" : 38.00, "31" : 45.00, "32" : 49.00, "33" : 27.00, "34" : "f", "35" : 28.00, "36" : 26.00, "41" : 30.00 } (1 row) @@ -167,7 +167,7 @@ SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, -- Check that we can execute json_object_agg() with an expression containing json arrays SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, json_build_array(l_quantity, l_shipdate))) FROM lineitem WHERE l_orderkey < 3; - keys_sort + keys_sort --------------------------------------------------------------------- { "11" : [17.00, "1996-03-13"], "12" : [36.00, "1996-04-12"], "13" : [8.00, "1996-01-29"], "14" : [28.00, "1996-04-21"], "15" : [24.00, "1996-03-30"], "16" : [32.00, "1996-01-30"], "21" : [38.00, "1997-01-28"] } (1 row) @@ -175,15 +175,15 @@ SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, json_bu -- Check that we can execute json_object_agg() with an expression containing arrays SELECT keys_sort(json_object_agg(l_orderkey::text || l_linenumber::text, ARRAY[l_quantity, l_orderkey])) FROM lineitem WHERE l_orderkey < 3; - keys_sort + keys_sort --------------------------------------------------------------------- { "11" : [17.00,1], "12" : [36.00,1], "13" : [8.00,1], "14" : [28.00,1], "15" : [24.00,1], "16" : [32.00,1], "21" : [38.00,2] } (1 row) -- Check that we return NULL in case there are no input rows to json_object_agg() SELECT json_object_agg(l_shipdate, l_orderkey) FROM lineitem WHERE l_quantity < 0; - json_object_agg + json_object_agg --------------------------------------------------------------------- - + (1 row) diff --git a/src/test/regress/expected/multi_jsonb_agg.out b/src/test/regress/expected/multi_jsonb_agg.out index 3c1c63322..79d787d86 100644 --- a/src/test/regress/expected/multi_jsonb_agg.out +++ b/src/test/regress/expected/multi_jsonb_agg.out @@ -12,7 +12,7 @@ $$; -- Check multi_cat_agg() aggregate which is used to implement jsonb_agg() SELECT jsonb_cat_agg(i) FROM (VALUES ('[1,{"a":2}]'::jsonb), ('[null]'::jsonb), (NULL), ('["3",5,4]'::jsonb)) AS t(i); - jsonb_cat_agg + jsonb_cat_agg --------------------------------------------------------------------- [1, {"a": 2}, null, "3", 5, 4] (1 row) @@ -27,7 +27,7 @@ ERROR: jsonb_agg with order by is unsupported -- Check jsonb_agg() for different data types and LIMIT clauses SELECT array_sort(jsonb_agg(l_partkey)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - array_sort + array_sort --------------------------------------------------------------------- [2132, 15635, 24027, 63700, 67310, 155190] [106170] @@ -43,7 +43,7 @@ SELECT array_sort(jsonb_agg(l_partkey)) FROM lineitem GROUP BY l_orderkey SELECT array_sort(jsonb_agg(l_extendedprice)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - array_sort + array_sort --------------------------------------------------------------------- [13309.60, 21168.23, 22824.48, 28955.64, 45983.16, 49620.16] [44694.46] @@ -59,7 +59,7 @@ SELECT array_sort(jsonb_agg(l_extendedprice)) FROM lineitem GROUP BY l_orderkey SELECT array_sort(jsonb_agg(l_shipdate)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - array_sort + array_sort --------------------------------------------------------------------- ["1996-01-29", "1996-01-30", "1996-03-13", "1996-03-30", "1996-04-12", "1996-04-21"] ["1997-01-28"] @@ -75,7 +75,7 @@ SELECT array_sort(jsonb_agg(l_shipdate)) FROM lineitem GROUP BY l_orderkey SELECT array_sort(jsonb_agg(l_shipmode)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - array_sort + array_sort --------------------------------------------------------------------- ["AIR ", "FOB ", "MAIL ", "MAIL ", "REG AIR ", "TRUCK "] ["RAIL "] @@ -91,7 +91,7 @@ SELECT array_sort(jsonb_agg(l_shipmode)) FROM lineitem GROUP BY l_orderkey -- Check that we can execute jsonb_agg() within other functions SELECT jsonb_array_length(jsonb_agg(l_orderkey)) FROM lineitem; - jsonb_array_length + jsonb_array_length --------------------------------------------------------------------- 12000 (1 row) @@ -103,7 +103,7 @@ SELECT jsonb_array_length(jsonb_agg(l_orderkey)) FROM lineitem; SELECT l_quantity, count(*), avg(l_extendedprice), array_sort(jsonb_agg(l_orderkey)) FROM lineitem WHERE l_quantity < 5 AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; - l_quantity | count | avg | array_sort + l_quantity | count | avg | array_sort --------------------------------------------------------------------- 1.00 | 17 | 1477.1258823529411765 | [5543, 5633, 5634, 5698, 5766, 5856, 5857, 5986, 8997, 9026, 9158, 9184, 9220, 9222, 9348, 9383, 9476] 2.00 | 19 | 3078.4242105263157895 | [5506, 5540, 5573, 5669, 5703, 5730, 5798, 5831, 5893, 5920, 5923, 9030, 9058, 9123, 9124, 9188, 9344, 9441, 9476] @@ -114,7 +114,7 @@ SELECT l_quantity, count(*), avg(l_extendedprice), array_sort(jsonb_agg(l_orderk SELECT l_quantity, array_sort(jsonb_agg(extract (month FROM o_orderdate))) AS my_month FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_quantity < 5 AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; - l_quantity | my_month + l_quantity | my_month --------------------------------------------------------------------- 1.00 | [2, 3, 4, 4, 4, 5, 5, 5, 6, 7, 7, 7, 7, 9, 9, 11, 11] 2.00 | [1, 3, 5, 5, 5, 5, 6, 6, 6, 7, 7, 8, 10, 10, 11, 11, 11, 12, 12] @@ -125,7 +125,7 @@ SELECT l_quantity, array_sort(jsonb_agg(extract (month FROM o_orderdate))) AS my SELECT l_quantity, array_sort(jsonb_agg(l_orderkey * 2 + 1)) FROM lineitem WHERE l_quantity < 5 AND octet_length(l_comment) + octet_length('randomtext'::text) > 40 AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; - l_quantity | array_sort + l_quantity | array_sort --------------------------------------------------------------------- 1.00 | [11269, 11397, 11713, 11715, 11973, 18317, 18445] 2.00 | [11847, 18061, 18247, 18953] @@ -136,7 +136,7 @@ SELECT l_quantity, array_sort(jsonb_agg(l_orderkey * 2 + 1)) FROM lineitem WHERE -- Check that we can execute jsonb_agg() with an expression containing NULL values SELECT jsonb_agg(case when l_quantity > 20 then l_quantity else NULL end) FROM lineitem WHERE l_orderkey < 5; - jsonb_agg + jsonb_agg --------------------------------------------------------------------- [null, 36.00, null, 28.00, 24.00, 32.00, 38.00, 45.00, 49.00, 27.00, null, 28.00, 26.00, 30.00] (1 row) @@ -144,7 +144,7 @@ SELECT jsonb_agg(case when l_quantity > 20 then l_quantity else NULL end) -- Check that we can execute jsonb_agg() with an expression containing different types SELECT jsonb_agg(case when l_quantity > 20 then to_jsonb(l_quantity) else '"f"'::jsonb end) FROM lineitem WHERE l_orderkey < 5; - jsonb_agg + jsonb_agg --------------------------------------------------------------------- ["f", 36.00, "f", 28.00, 24.00, 32.00, 38.00, 45.00, 49.00, 27.00, "f", 28.00, 26.00, 30.00] (1 row) @@ -152,7 +152,7 @@ SELECT jsonb_agg(case when l_quantity > 20 then to_jsonb(l_quantity) else '"f"': -- Check that we can execute jsonb_agg() with an expression containing jsonb arrays SELECT jsonb_agg(jsonb_build_array(l_quantity, l_shipdate)) FROM lineitem WHERE l_orderkey < 3; - jsonb_agg + jsonb_agg --------------------------------------------------------------------- [[17.00, "1996-03-13"], [36.00, "1996-04-12"], [8.00, "1996-01-29"], [28.00, "1996-04-21"], [24.00, "1996-03-30"], [32.00, "1996-01-30"], [38.00, "1997-01-28"]] (1 row) @@ -160,15 +160,15 @@ SELECT jsonb_agg(jsonb_build_array(l_quantity, l_shipdate)) -- Check that we can execute jsonb_agg() with an expression containing arrays SELECT jsonb_agg(ARRAY[l_quantity, l_orderkey]) FROM lineitem WHERE l_orderkey < 3; - jsonb_agg + jsonb_agg --------------------------------------------------------------------- [[17.00, 1], [36.00, 1], [8.00, 1], [28.00, 1], [24.00, 1], [32.00, 1], [38.00, 2]] (1 row) -- Check that we return NULL in case there are no input rows to jsonb_agg() SELECT jsonb_agg(l_orderkey) FROM lineitem WHERE l_quantity < 0; - jsonb_agg + jsonb_agg --------------------------------------------------------------------- - + (1 row) diff --git a/src/test/regress/expected/multi_jsonb_object_agg.out b/src/test/regress/expected/multi_jsonb_object_agg.out index 25882488a..0a85c7dbd 100644 --- a/src/test/regress/expected/multi_jsonb_object_agg.out +++ b/src/test/regress/expected/multi_jsonb_object_agg.out @@ -10,7 +10,7 @@ $$; -- Check multi_cat_agg() aggregate which is used to implement jsonb_object_agg() SELECT jsonb_cat_agg(i) FROM (VALUES ('{"c":[], "b":2}'::jsonb), (NULL), ('{"d":null, "a":{"b":3}, "b":2}'::jsonb)) AS t(i); - jsonb_cat_agg + jsonb_cat_agg --------------------------------------------------------------------- {"a": {"b": 3}, "b": 2, "c": [], "d": null} (1 row) @@ -26,7 +26,7 @@ ERROR: jsonb_object_agg with order by is unsupported SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, l_partkey) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - jsonb_object_agg + jsonb_object_agg --------------------------------------------------------------------- {"11": 155190, "12": 67310, "13": 63700, "14": 2132, "15": 24027, "16": 15635} {"21": 106170} @@ -43,7 +43,7 @@ SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, l_partkey) SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, l_extendedprice) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - jsonb_object_agg + jsonb_object_agg --------------------------------------------------------------------- {"11": 21168.23, "12": 45983.16, "13": 13309.60, "14": 28955.64, "15": 22824.48, "16": 49620.16} {"21": 44694.46} @@ -60,7 +60,7 @@ SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, l_extendedprice) SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, l_shipmode) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - jsonb_object_agg + jsonb_object_agg --------------------------------------------------------------------- {"11": "TRUCK ", "12": "MAIL ", "13": "REG AIR ", "14": "AIR ", "15": "FOB ", "16": "MAIL "} {"21": "RAIL "} @@ -77,7 +77,7 @@ SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, l_shipmode) SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, l_shipdate) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; - jsonb_object_agg + jsonb_object_agg --------------------------------------------------------------------- {"11": "1996-03-13", "12": "1996-04-12", "13": "1996-01-29", "14": "1996-04-21", "15": "1996-03-30", "16": "1996-01-30"} {"21": "1997-01-28"} @@ -93,7 +93,7 @@ SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, l_shipdate) -- Check that we can execute jsonb_object_agg() within other functions SELECT count_keys(jsonb_object_agg(l_shipdate, l_orderkey)) FROM lineitem; - count_keys + count_keys --------------------------------------------------------------------- 2470 (1 row) @@ -107,7 +107,7 @@ SELECT l_quantity, count(*), avg(l_extendedprice), FROM lineitem WHERE l_quantity < 5 AND l_orderkey > 5000 AND l_orderkey < 5300 GROUP BY l_quantity ORDER BY l_quantity; - l_quantity | count | avg | jsonb_object_agg + l_quantity | count | avg | jsonb_object_agg --------------------------------------------------------------------- 1.00 | 8 | 1748.3387500000000000 | {"50635": "1997-09-03", "51551": "1994-07-03", "51872": "1997-08-08", "52221": "1994-08-19", "52832": "1994-06-20", "52855": "1994-03-14", "52856": "1994-02-08", "52861": "1997-11-25"} 2.00 | 8 | 2990.9825000000000000 | {"50292": "1992-11-25", "50633": "1997-06-17", "50904": "1997-04-07", "50952": "1992-07-09", "51216": "1992-08-10", "52191": "1997-06-26", "52501": "1995-08-09", "52551": "1996-09-27"} @@ -119,7 +119,7 @@ SELECT l_quantity, jsonb_object_agg(l_orderkey::text || l_linenumber::text, extract (month FROM o_orderdate)) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_quantity < 5 AND l_orderkey > 5000 AND l_orderkey < 5300 GROUP BY l_quantity ORDER BY l_quantity; - l_quantity | jsonb_object_agg + l_quantity | jsonb_object_agg --------------------------------------------------------------------- 1.00 | {"50635": 5, "51551": 6, "51872": 7, "52221": 5, "52832": 6, "52855": 1, "52856": 1, "52861": 9} 2.00 | {"50292": 11, "50633": 5, "50904": 3, "50952": 4, "51216": 5, "52191": 2, "52501": 7, "52551": 7} @@ -131,7 +131,7 @@ SELECT l_quantity, jsonb_object_agg(l_orderkey::text || l_linenumber::text, l_or FROM lineitem WHERE l_quantity < 5 AND octet_length(l_comment) + octet_length('randomtext'::text) > 40 AND l_orderkey > 5000 AND l_orderkey < 6000 GROUP BY l_quantity ORDER BY l_quantity; - l_quantity | jsonb_object_agg + l_quantity | jsonb_object_agg --------------------------------------------------------------------- 1.00 | {"51551": 10311, "52221": 10445, "52855": 10571, "56345": 11269, "56986": 11397, "58561": 11713, "58573": 11715, "59863": 11973} 2.00 | {"52191": 10439, "53513": 10703, "59233": 11847} @@ -143,7 +143,7 @@ SELECT l_quantity, jsonb_object_agg(l_orderkey::text || l_linenumber::text, l_or SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, case when l_quantity > 20 then l_quantity else NULL end) FROM lineitem WHERE l_orderkey < 5; - jsonb_object_agg + jsonb_object_agg --------------------------------------------------------------------- {"11": null, "12": 36.00, "13": null, "14": 28.00, "15": 24.00, "16": 32.00, "21": 38.00, "31": 45.00, "32": 49.00, "33": 27.00, "34": null, "35": 28.00, "36": 26.00, "41": 30.00} (1 row) @@ -152,7 +152,7 @@ SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, case when l_quantity > 20 then to_jsonb(l_quantity) else '"f"'::jsonb end) FROM lineitem WHERE l_orderkey < 5; - jsonb_object_agg + jsonb_object_agg --------------------------------------------------------------------- {"11": "f", "12": 36.00, "13": "f", "14": 28.00, "15": 24.00, "16": 32.00, "21": 38.00, "31": 45.00, "32": 49.00, "33": 27.00, "34": "f", "35": 28.00, "36": 26.00, "41": 30.00} (1 row) @@ -160,7 +160,7 @@ SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, -- Check that we can execute jsonb_object_agg() with an expression containing jsonb arrays SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, jsonb_build_array(l_quantity, l_shipdate)) FROM lineitem WHERE l_orderkey < 3; - jsonb_object_agg + jsonb_object_agg --------------------------------------------------------------------- {"11": [17.00, "1996-03-13"], "12": [36.00, "1996-04-12"], "13": [8.00, "1996-01-29"], "14": [28.00, "1996-04-21"], "15": [24.00, "1996-03-30"], "16": [32.00, "1996-01-30"], "21": [38.00, "1997-01-28"]} (1 row) @@ -168,15 +168,15 @@ SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, jsonb_build_arra -- Check that we can execute jsonb_object_agg() with an expression containing arrays SELECT jsonb_object_agg(l_orderkey::text || l_linenumber::text, ARRAY[l_quantity, l_orderkey]) FROM lineitem WHERE l_orderkey < 3; - jsonb_object_agg + jsonb_object_agg --------------------------------------------------------------------- {"11": [17.00, 1], "12": [36.00, 1], "13": [8.00, 1], "14": [28.00, 1], "15": [24.00, 1], "16": [32.00, 1], "21": [38.00, 2]} (1 row) -- Check that we return NULL in case there are no input rows to jsonb_object_agg() SELECT jsonb_object_agg(l_shipdate, l_orderkey) FROM lineitem WHERE l_quantity < 0; - jsonb_object_agg + jsonb_object_agg --------------------------------------------------------------------- - + (1 row) diff --git a/src/test/regress/expected/multi_limit_clause.out b/src/test/regress/expected/multi_limit_clause.out index c58df4b2e..6d891ddde 100644 --- a/src/test/regress/expected/multi_limit_clause.out +++ b/src/test/regress/expected/multi_limit_clause.out @@ -3,9 +3,9 @@ -- CREATE TABLE lineitem_hash (LIKE lineitem); SELECT create_distributed_table('lineitem_hash', 'l_orderkey', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO lineitem_hash SELECT * FROM lineitem; @@ -17,7 +17,7 @@ SET client_min_messages TO DEBUG1; SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY count_quantity ASC, l_quantity ASC; - count_quantity | l_quantity + count_quantity | l_quantity --------------------------------------------------------------------- 219 | 13.00 222 | 29.00 @@ -55,7 +55,7 @@ SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY count_quantity DESC, l_quantity DESC; - count_quantity | l_quantity + count_quantity | l_quantity --------------------------------------------------------------------- 273 | 28.00 264 | 30.00 @@ -93,7 +93,7 @@ SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY count_quantity ASC, l_quantity ASC LIMIT 5; - count_quantity | l_quantity + count_quantity | l_quantity --------------------------------------------------------------------- 219 | 13.00 222 | 29.00 @@ -105,7 +105,7 @@ SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY count_quantity ASC, l_quantity ASC LIMIT 10; - count_quantity | l_quantity + count_quantity | l_quantity --------------------------------------------------------------------- 219 | 13.00 222 | 29.00 @@ -122,7 +122,7 @@ SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY count_quantity DESC, l_quantity DESC LIMIT 10; - count_quantity | l_quantity + count_quantity | l_quantity --------------------------------------------------------------------- 273 | 28.00 264 | 30.00 @@ -139,34 +139,34 @@ SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 -- Check that we can handle limits for simple sort clauses. We order by columns -- in the first two tests, and then by a simple expression in the last test. SELECT min(l_orderkey) FROM lineitem; - min + min --------------------------------------------------------------------- 1 (1 row) SELECT l_orderkey FROM lineitem ORDER BY l_orderkey ASC LIMIT 1; DEBUG: push down of limit count: 1 - l_orderkey + l_orderkey --------------------------------------------------------------------- 1 (1 row) SELECT max(l_orderkey) FROM lineitem; - max + max --------------------------------------------------------------------- 14947 (1 row) SELECT l_orderkey FROM lineitem ORDER BY l_orderkey DESC LIMIT 1; DEBUG: push down of limit count: 1 - l_orderkey + l_orderkey --------------------------------------------------------------------- 14947 (1 row) SELECT * FROM lineitem ORDER BY l_orderkey DESC, l_linenumber DESC LIMIT 3; DEBUG: push down of limit count: 3 - l_orderkey | l_partkey | l_suppkey | l_linenumber | l_quantity | l_extendedprice | l_discount | l_tax | l_returnflag | l_linestatus | l_shipdate | l_commitdate | l_receiptdate | l_shipinstruct | l_shipmode | l_comment + l_orderkey | l_partkey | l_suppkey | l_linenumber | l_quantity | l_extendedprice | l_discount | l_tax | l_returnflag | l_linestatus | l_shipdate | l_commitdate | l_receiptdate | l_shipinstruct | l_shipmode | l_comment --------------------------------------------------------------------- 14947 | 107098 | 7099 | 2 | 29.00 | 32047.61 | 0.04 | 0.06 | N | O | 11-08-1995 | 08-30-1995 | 12-03-1995 | TAKE BACK RETURN | FOB | inal sentiments t 14947 | 31184 | 3688 | 1 | 14.00 | 15612.52 | 0.09 | 0.02 | N | O | 11-05-1995 | 09-25-1995 | 11-27-1995 | TAKE BACK RETURN | RAIL | bout the even, iro @@ -174,7 +174,7 @@ DEBUG: push down of limit count: 3 (3 rows) SELECT max(extract(epoch from l_shipdate)) FROM lineitem; - max + max --------------------------------------------------------------------- 912124800 (1 row) @@ -182,7 +182,7 @@ SELECT max(extract(epoch from l_shipdate)) FROM lineitem; SELECT * FROM lineitem ORDER BY extract(epoch from l_shipdate) DESC, l_orderkey DESC LIMIT 3; DEBUG: push down of limit count: 3 - l_orderkey | l_partkey | l_suppkey | l_linenumber | l_quantity | l_extendedprice | l_discount | l_tax | l_returnflag | l_linestatus | l_shipdate | l_commitdate | l_receiptdate | l_shipinstruct | l_shipmode | l_comment + l_orderkey | l_partkey | l_suppkey | l_linenumber | l_quantity | l_extendedprice | l_discount | l_tax | l_returnflag | l_linestatus | l_shipdate | l_commitdate | l_receiptdate | l_shipinstruct | l_shipmode | l_comment --------------------------------------------------------------------- 4678 | 57388 | 9894 | 1 | 35.00 | 47088.30 | 0.04 | 0.08 | N | O | 11-27-1998 | 10-02-1998 | 12-17-1998 | TAKE BACK RETURN | AIR | he accounts. fluffily bold sheaves b 12384 | 84161 | 1686 | 5 | 6.00 | 6870.96 | 0.04 | 0.00 | N | O | 11-26-1998 | 10-04-1998 | 12-08-1998 | COLLECT COD | RAIL | ep blithely. blithely ironic r @@ -196,7 +196,7 @@ SELECT l_quantity, l_discount, avg(l_partkey) FROM lineitem GROUP BY l_quantity, l_discount ORDER BY l_quantity LIMIT 1; DEBUG: push down of limit count: 1 - l_quantity | l_discount | avg + l_quantity | l_discount | avg --------------------------------------------------------------------- 1.00 | 0.00 | 99167.304347826087 (1 row) @@ -206,7 +206,7 @@ SELECT l_quantity, l_discount, avg(l_partkey) FROM lineitem GROUP BY l_quantity, l_discount ORDER BY l_quantity, l_discount LIMIT 1; DEBUG: push down of limit count: 1 - l_quantity | l_discount | avg + l_quantity | l_discount | avg --------------------------------------------------------------------- 1.00 | 0.00 | 99167.304347826087 (1 row) @@ -218,7 +218,7 @@ SELECT l_orderkey, count(DISTINCT l_partkey) GROUP BY l_orderkey ORDER BY 2 DESC, 1 DESC LIMIT 5; DEBUG: push down of limit count: 5 - l_orderkey | count + l_orderkey | count --------------------------------------------------------------------- 14885 | 7 14884 | 7 @@ -232,7 +232,7 @@ SELECT l_orderkey GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 5; DEBUG: push down of limit count: 5 - l_orderkey + l_orderkey --------------------------------------------------------------------- 1 2 @@ -246,7 +246,7 @@ SELECT max(l_orderkey) FROM lineitem_hash GROUP BY l_linestatus ORDER BY 1 DESC LIMIT 2; - max + max --------------------------------------------------------------------- 14947 14916 @@ -257,7 +257,7 @@ SELECT l_orderkey, max(l_shipdate) FROM lineitem GROUP BY l_orderkey ORDER BY 2 DESC, 1 LIMIT 5; - l_orderkey | max + l_orderkey | max --------------------------------------------------------------------- 4678 | 11-27-1998 12384 | 11-26-1998 @@ -273,7 +273,7 @@ SELECT GROUP BY l_linestatus, l_orderkey ORDER BY 3 DESC, 1, 2 LIMIT 5; DEBUG: push down of limit count: 5 - l_linestatus | l_orderkey | max + l_linestatus | l_orderkey | max --------------------------------------------------------------------- O | 4678 | 11-27-1998 O | 12384 | 11-26-1998 @@ -288,7 +288,7 @@ SELECT FROM lineitem_hash GROUP BY l_linestatus, l_shipmode ORDER BY 3 DESC, 1, 2 LIMIT 5; - l_linestatus | l_shipmode | max + l_linestatus | l_shipmode | max --------------------------------------------------------------------- O | AIR | 11-27-1998 O | RAIL | 11-26-1998 @@ -305,7 +305,7 @@ SELECT ORDER BY l_orderkey, l_linenumber LIMIT 5; DEBUG: push down of limit count: 5 - l_orderkey | l_linenumber + l_orderkey | l_linenumber --------------------------------------------------------------------- 1 | 1 1 | 2 @@ -321,7 +321,7 @@ SELECT GROUP BY l_orderkey, l_linenumber ORDER BY l_linenumber, l_orderkey LIMIT 5; - l_orderkey | l_linenumber + l_orderkey | l_linenumber --------------------------------------------------------------------- 1 | 1 1 | 2 @@ -340,7 +340,7 @@ SELECT ORDER BY l_linenumber, l_orderkey LIMIT 5; DEBUG: push down of limit count: 5 - l_orderkey | l_linenumber + l_orderkey | l_linenumber --------------------------------------------------------------------- 1 | 1 2 | 1 @@ -359,7 +359,7 @@ SELECT GROUP BY l_orderkey, (1+1), l_linenumber ORDER BY l_linenumber, (1+1), l_orderkey LIMIT 5; - l_orderkey | l_linenumber + l_orderkey | l_linenumber --------------------------------------------------------------------- 1 | 1 1 | 2 @@ -377,7 +377,7 @@ SELECT GROUP BY l_orderkey, l_linenumber ORDER BY l_linenumber, l_orderkey LIMIT 5; - l_orderkey | l_linenumber + l_orderkey | l_linenumber --------------------------------------------------------------------- 1 | 1 1 | 2 @@ -396,7 +396,7 @@ SELECT ORDER BY l_orderkey + 1 LIMIT 5; DEBUG: push down of limit count: 5 - ?column? + ?column? --------------------------------------------------------------------- 2 3 @@ -414,7 +414,7 @@ SELECT GROUP BY l_orderkey + 1 ORDER BY l_orderkey + 1 , 2 LIMIT 5; - ?column? | count + ?column? | count --------------------------------------------------------------------- 2 | 6 3 | 1 @@ -432,7 +432,7 @@ SELECT ORDER BY l_orderkey , 2 LIMIT 5; DEBUG: push down of limit count: 5 - l_orderkey | count + l_orderkey | count --------------------------------------------------------------------- 1 | 6 2 | 1 @@ -449,7 +449,7 @@ SELECT GROUP BY l_orderkey ORDER BY 2 DESC, 1 LIMIT 2; - l_orderkey | count + l_orderkey | count --------------------------------------------------------------------- 7 | 7 1 | 6 @@ -463,7 +463,7 @@ SELECT ORDER BY l_orderkey , 2 LIMIT 5; DEBUG: push down of limit count: 5 - l_orderkey | rank + l_orderkey | rank --------------------------------------------------------------------- 1 | 1 2 | 1 @@ -482,7 +482,7 @@ SELECT GROUP BY l_orderkey ORDER BY l_orderkey , 3, 2 LIMIT 5; - l_orderkey | count | rank + l_orderkey | count | rank --------------------------------------------------------------------- 1 | 6 | 1 2 | 1 | 1 @@ -498,7 +498,7 @@ SELECT GROUP BY l_orderkey, l_linenumber ORDER BY l_orderkey , count(*) OVER (partition by l_orderkey), count(*), l_linenumber LIMIT 5; - l_orderkey | l_linenumber | count | count + l_orderkey | l_linenumber | count | count --------------------------------------------------------------------- 1 | 1 | 1 | 6 2 | 1 | 1 | 1 @@ -512,9 +512,9 @@ SELECT DISTINCT ON (RANK() OVER (partition by l_orderkey)) l_orderkey, RANK() OVER (partition by l_orderkey) FROM lineitem_hash GROUP BY l_orderkey - ORDER BY 2 DESC, 1 + ORDER BY 2 DESC, 1 LIMIT 5; - l_orderkey | rank + l_orderkey | rank --------------------------------------------------------------------- 1 | 1 (1 row) diff --git a/src/test/regress/expected/multi_limit_clause_approximate.out b/src/test/regress/expected/multi_limit_clause_approximate.out index 69d84c6d1..1855182a2 100644 --- a/src/test/regress/expected/multi_limit_clause_approximate.out +++ b/src/test/regress/expected/multi_limit_clause_approximate.out @@ -9,7 +9,7 @@ SET client_min_messages TO DEBUG1; SELECT l_partkey, sum(l_partkey * (1 + l_suppkey)) AS aggregate FROM lineitem GROUP BY l_partkey ORDER BY aggregate DESC LIMIT 10; - l_partkey | aggregate + l_partkey | aggregate --------------------------------------------------------------------- 194541 | 3727794642 160895 | 3671463005 @@ -29,7 +29,7 @@ SELECT l_partkey, sum(l_partkey * (1 + l_suppkey)) AS aggregate FROM lineitem GROUP BY l_partkey ORDER BY aggregate DESC LIMIT 10; DEBUG: push down of limit count: 600 - l_partkey | aggregate + l_partkey | aggregate --------------------------------------------------------------------- 194541 | 3727794642 160895 | 3671463005 @@ -51,7 +51,7 @@ SELECT c_custkey, c_name, count(*) as lineitem_count WHERE c_custkey = o_custkey AND l_orderkey = o_orderkey GROUP BY c_custkey, c_name ORDER BY lineitem_count DESC, c_custkey LIMIT 10; - c_custkey | c_name | lineitem_count + c_custkey | c_name | lineitem_count --------------------------------------------------------------------- 43 | Customer#000000043 | 42 370 | Customer#000000370 | 40 @@ -73,7 +73,7 @@ SELECT c_custkey, c_name, count(*) as lineitem_count GROUP BY c_custkey, c_name ORDER BY lineitem_count DESC, c_custkey LIMIT 10; DEBUG: push down of limit count: 150 - c_custkey | c_name | lineitem_count + c_custkey | c_name | lineitem_count --------------------------------------------------------------------- 43 | Customer#000000043 | 42 370 | Customer#000000370 | 40 @@ -93,7 +93,7 @@ DEBUG: push down of limit count: 150 SELECT l_partkey, avg(l_suppkey) AS average FROM lineitem GROUP BY l_partkey ORDER BY average DESC, l_partkey LIMIT 10; - l_partkey | average + l_partkey | average --------------------------------------------------------------------- 9998 | 9999.0000000000000000 102466 | 9997.0000000000000000 @@ -112,7 +112,7 @@ SELECT l_partkey, avg(l_suppkey) AS average FROM lineitem SELECT l_partkey, round(sum(l_suppkey)) AS complex_expression FROM lineitem GROUP BY l_partkey ORDER BY complex_expression DESC LIMIT 10; - l_partkey | complex_expression + l_partkey | complex_expression --------------------------------------------------------------------- 160895 | 22816 194541 | 19160 @@ -130,7 +130,7 @@ SELECT l_partkey, round(sum(l_suppkey)) AS complex_expression FROM lineitem SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 10.0 GROUP BY l_quantity ORDER BY count_quantity ASC, l_quantity ASC; - count_quantity | l_quantity + count_quantity | l_quantity --------------------------------------------------------------------- 227 | 3.00 232 | 7.00 diff --git a/src/test/regress/expected/multi_master_protocol.out b/src/test/regress/expected/multi_master_protocol.out index 2c203847c..0ec41658d 100644 --- a/src/test/regress/expected/multi_master_protocol.out +++ b/src/test/regress/expected/multi_master_protocol.out @@ -5,13 +5,13 @@ SET citus.next_shard_id TO 740000; SELECT part_storage_type, part_key, part_replica_count, part_max_size, part_placement_policy FROM master_get_table_metadata('lineitem'); - part_storage_type | part_key | part_replica_count | part_max_size | part_placement_policy + part_storage_type | part_key | part_replica_count | part_max_size | part_placement_policy --------------------------------------------------------------------- t | l_orderkey | 2 | 1536000 | 2 (1 row) SELECT * FROM master_get_table_ddl_events('lineitem') order by 1; - master_get_table_ddl_events + master_get_table_ddl_events --------------------------------------------------------------------- ALTER TABLE public.lineitem ADD CONSTRAINT lineitem_pkey PRIMARY KEY (l_orderkey, l_linenumber) ALTER TABLE public.lineitem OWNER TO postgres @@ -20,13 +20,13 @@ SELECT * FROM master_get_table_ddl_events('lineitem') order by 1; (4 rows) SELECT * FROM master_get_new_shardid(); - master_get_new_shardid + master_get_new_shardid --------------------------------------------------------------------- 740000 (1 row) SELECT * FROM master_get_active_worker_nodes(); - node_name | node_port + node_name | node_port --------------------------------------------------------------------- localhost | 57638 localhost | 57637 diff --git a/src/test/regress/expected/multi_metadata_access.out b/src/test/regress/expected/multi_metadata_access.out index 0567aa595..ed11ff12b 100644 --- a/src/test/regress/expected/multi_metadata_access.out +++ b/src/test/regress/expected/multi_metadata_access.out @@ -18,7 +18,7 @@ WHERE AND ext.extname = 'citus' AND nsp.nspname = 'pg_catalog' AND NOT has_table_privilege(pg_class.oid, 'select'); - oid + oid --------------------------------------------------------------------- pg_dist_authinfo (1 row) diff --git a/src/test/regress/expected/multi_metadata_attributes.out b/src/test/regress/expected/multi_metadata_attributes.out index 505777e67..91d927c18 100644 --- a/src/test/regress/expected/multi_metadata_attributes.out +++ b/src/test/regress/expected/multi_metadata_attributes.out @@ -8,7 +8,7 @@ SELECT attrelid::regclass, attname, atthasmissing, attmissingval FROM pg_attribute WHERE atthasmissing AND attrelid NOT IN ('pg_dist_node'::regclass) ORDER BY attrelid, attname; - attrelid | attname | atthasmissing | attmissingval + attrelid | attname | atthasmissing | attmissingval --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/multi_metadata_sync.out b/src/test/regress/expected/multi_metadata_sync.out index 85c7ded5b..2213f1ed0 100644 --- a/src/test/regress/expected/multi_metadata_sync.out +++ b/src/test/regress/expected/multi_metadata_sync.out @@ -18,14 +18,14 @@ COMMENT ON FUNCTION master_metadata_snapshot() IS 'commands to create the metadata snapshot'; -- Show that none of the existing tables are qualified to be MX tables SELECT * FROM pg_dist_partition WHERE partmethod='h' AND repmodel='s'; - logicalrelid | partmethod | partkey | colocationid | repmodel + logicalrelid | partmethod | partkey | colocationid | repmodel --------------------------------------------------------------------- (0 rows) -- Show that, with no MX tables, metadata snapshot contains only the delete commands, -- pg_dist_node entries and reference tables SELECT unnest(master_metadata_snapshot()) order by 1; - unnest + unnest --------------------------------------------------------------------- INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default'),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default') SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition @@ -35,15 +35,15 @@ SELECT unnest(master_metadata_snapshot()) order by 1; -- Create a test table with constraints and SERIAL CREATE TABLE mx_test_table (col_1 int UNIQUE, col_2 text NOT NULL, col_3 BIGSERIAL); SELECT master_create_distributed_table('mx_test_table', 'col_1', 'hash'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_create_worker_shards('mx_test_table', 8, 1); - master_create_worker_shards + master_create_worker_shards --------------------------------------------------------------------- - + (1 row) -- Set the replication model of the test table to streaming replication so that it is @@ -51,7 +51,7 @@ SELECT master_create_worker_shards('mx_test_table', 8, 1); UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='mx_test_table'::regclass; -- Show that the created MX table is included in the metadata snapshot SELECT unnest(master_metadata_snapshot()) order by 1; - unnest + unnest --------------------------------------------------------------------- ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) @@ -71,7 +71,7 @@ SELECT unnest(master_metadata_snapshot()) order by 1; -- Show that CREATE INDEX commands are included in the metadata snapshot CREATE INDEX mx_index ON mx_test_table(col_2); SELECT unnest(master_metadata_snapshot()) order by 1; - unnest + unnest --------------------------------------------------------------------- ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) @@ -95,7 +95,7 @@ ALTER TABLE mx_test_table SET SCHEMA mx_testing_schema; WARNING: not propagating ALTER ... SET SCHEMA commands to worker nodes HINT: Connect to worker nodes directly to manually change schemas of affected objects. SELECT unnest(master_metadata_snapshot()) order by 1; - unnest + unnest --------------------------------------------------------------------- ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) @@ -116,14 +116,14 @@ SELECT unnest(master_metadata_snapshot()) order by 1; -- Show that append distributed tables are not included in the metadata snapshot CREATE TABLE non_mx_test_table (col_1 int, col_2 text); SELECT master_create_distributed_table('non_mx_test_table', 'col_1', 'append'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='non_mx_test_table'::regclass; SELECT unnest(master_metadata_snapshot()) order by 1; - unnest + unnest --------------------------------------------------------------------- ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) @@ -144,7 +144,7 @@ SELECT unnest(master_metadata_snapshot()) order by 1; -- Show that range distributed tables are not included in the metadata snapshot UPDATE pg_dist_partition SET partmethod='r' WHERE logicalrelid='non_mx_test_table'::regclass; SELECT unnest(master_metadata_snapshot()) order by 1; - unnest + unnest --------------------------------------------------------------------- ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) @@ -165,7 +165,7 @@ SELECT unnest(master_metadata_snapshot()) order by 1; -- Test start_metadata_sync_to_node UDF -- Ensure that hasmetadata=false for all nodes SELECT count(*) FROM pg_dist_node WHERE hasmetadata=true; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -173,51 +173,51 @@ SELECT count(*) FROM pg_dist_node WHERE hasmetadata=true; -- Ensure it works when run on a secondary node SELECT groupid AS worker_1_group FROM pg_dist_node WHERE nodeport = :worker_1_port \gset SELECT master_add_node('localhost', 8888, groupid => :worker_1_group, noderole => 'secondary'); - master_add_node + master_add_node --------------------------------------------------------------------- 4 (1 row) SELECT start_metadata_sync_to_node('localhost', 8888); - start_metadata_sync_to_node + start_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) SELECT hasmetadata FROM pg_dist_node WHERE nodeport = 8888; - hasmetadata + hasmetadata --------------------------------------------------------------------- t (1 row) SELECT stop_metadata_sync_to_node('localhost', 8888); - stop_metadata_sync_to_node + stop_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) SELECT hasmetadata FROM pg_dist_node WHERE nodeport = 8888; - hasmetadata + hasmetadata --------------------------------------------------------------------- f (1 row) -- Add a node to another cluster to make sure it's also synced SELECT master_add_secondary_node('localhost', 8889, 'localhost', :worker_1_port, nodecluster => 'second-cluster'); - master_add_secondary_node + master_add_secondary_node --------------------------------------------------------------------- 5 (1 row) -- Run start_metadata_sync_to_node and check that it marked hasmetadata for that worker SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node + start_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) SELECT nodeid, hasmetadata FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_1_port; - nodeid | hasmetadata + nodeid | hasmetadata --------------------------------------------------------------------- 1 | t (1 row) @@ -225,13 +225,13 @@ SELECT nodeid, hasmetadata FROM pg_dist_node WHERE nodename='localhost' AND node -- Check that the metadata has been copied to the worker \c - - - :worker_1_port SELECT * FROM pg_dist_local_group; - groupid + groupid --------------------------------------------------------------------- 1 (1 row) SELECT * FROM pg_dist_node ORDER BY nodeid; - nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards + nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards --------------------------------------------------------------------- 1 | 1 | localhost | 57637 | default | t | t | primary | default | f | t 2 | 2 | localhost | 57638 | default | f | t | primary | default | f | t @@ -240,13 +240,13 @@ SELECT * FROM pg_dist_node ORDER BY nodeid; (4 rows) SELECT * FROM pg_dist_partition ORDER BY logicalrelid; - logicalrelid | partmethod | partkey | colocationid | repmodel + logicalrelid | partmethod | partkey | colocationid | repmodel --------------------------------------------------------------------- mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 0 | s (1 row) SELECT * FROM pg_dist_shard ORDER BY shardid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue --------------------------------------------------------------------- mx_testing_schema.mx_test_table | 1310000 | t | -2147483648 | -1610612737 mx_testing_schema.mx_test_table | 1310001 | t | -1610612736 | -1073741825 @@ -259,7 +259,7 @@ SELECT * FROM pg_dist_shard ORDER BY shardid; (8 rows) SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport; - shardid | shardstate | shardlength | nodename | nodeport | placementid + shardid | shardstate | shardlength | nodename | nodeport | placementid --------------------------------------------------------------------- 1310000 | 1 | 0 | localhost | 57637 | 100000 1310001 | 1 | 0 | localhost | 57638 | 100001 @@ -272,36 +272,36 @@ SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport; (8 rows) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_testing_schema.mx_test_table'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- - col_1 | integer | + col_1 | integer | col_2 | text | not null col_3 | bigint | not null default nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) (3 rows) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_testing_schema.mx_test_table_col_1_key'::regclass; - Column | Type | Definition + Column | Type | Definition --------------------------------------------------------------------- col_1 | integer | col_1 (1 row) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_testing_schema.mx_index'::regclass; - Column | Type | Definition + Column | Type | Definition --------------------------------------------------------------------- col_2 | text | col_2 (1 row) -- Check that pg_dist_colocation is not synced SELECT * FROM pg_dist_colocation ORDER BY colocationid; - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- (0 rows) -- Make sure that truncate trigger has been set for the MX table on worker SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table'::regclass; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -317,27 +317,27 @@ CREATE TABLE mx_testing_schema.fk_test_1 (col1 int, col2 text, col3 int, UNIQUE( CREATE TABLE mx_testing_schema_2.fk_test_2 (col1 int, col2 int, col3 text, FOREIGN KEY (col1, col2) REFERENCES mx_testing_schema.fk_test_1 (col1, col3)); SELECT create_distributed_table('mx_testing_schema.fk_test_1', 'col1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('mx_testing_schema_2.fk_test_2', 'col1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node + start_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) -- Check that foreign key metadata exists on the worker \c - - - :worker_1_port SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_testing_schema_2.fk_test_2'::regclass; - Constraint | Definition + Constraint | Definition --------------------------------------------------------------------- fk_test_2_col1_col2_fkey | FOREIGN KEY (col1, col2) REFERENCES mx_testing_schema.fk_test_1(col1, col3) (1 row) @@ -350,26 +350,26 @@ RESET citus.replication_model; -- Check that repeated calls to start_metadata_sync_to_node has no side effects \c - - - :master_port SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node + start_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node + start_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) \c - - - :worker_1_port SELECT * FROM pg_dist_local_group; - groupid + groupid --------------------------------------------------------------------- 1 (1 row) SELECT * FROM pg_dist_node ORDER BY nodeid; - nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards + nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards --------------------------------------------------------------------- 1 | 1 | localhost | 57637 | default | t | t | primary | default | t | t 2 | 2 | localhost | 57638 | default | f | t | primary | default | f | t @@ -378,13 +378,13 @@ SELECT * FROM pg_dist_node ORDER BY nodeid; (4 rows) SELECT * FROM pg_dist_partition ORDER BY logicalrelid; - logicalrelid | partmethod | partkey | colocationid | repmodel + logicalrelid | partmethod | partkey | colocationid | repmodel --------------------------------------------------------------------- mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 0 | s (1 row) SELECT * FROM pg_dist_shard ORDER BY shardid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue --------------------------------------------------------------------- mx_testing_schema.mx_test_table | 1310000 | t | -2147483648 | -1610612737 mx_testing_schema.mx_test_table | 1310001 | t | -1610612736 | -1073741825 @@ -397,7 +397,7 @@ SELECT * FROM pg_dist_shard ORDER BY shardid; (8 rows) SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport; - shardid | shardstate | shardlength | nodename | nodeport | placementid + shardid | shardstate | shardlength | nodename | nodeport | placementid --------------------------------------------------------------------- 1310000 | 1 | 0 | localhost | 57637 | 100000 1310001 | 1 | 0 | localhost | 57638 | 100001 @@ -410,29 +410,29 @@ SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport; (8 rows) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_testing_schema.mx_test_table'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- - col_1 | integer | + col_1 | integer | col_2 | text | not null col_3 | bigint | not null default nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) (3 rows) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_testing_schema.mx_test_table_col_1_key'::regclass; - Column | Type | Definition + Column | Type | Definition --------------------------------------------------------------------- col_1 | integer | col_1 (1 row) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_testing_schema.mx_index'::regclass; - Column | Type | Definition + Column | Type | Definition --------------------------------------------------------------------- col_2 | text | col_2 (1 row) SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table'::regclass; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -444,7 +444,7 @@ SELECT start_metadata_sync_to_node('localhost', :worker_2_port); ERROR: start_metadata_sync_to_node cannot run inside a transaction block ROLLBACK; SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; - hasmetadata + hasmetadata --------------------------------------------------------------------- f (1 row) @@ -454,20 +454,20 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node + start_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) CREATE TABLE mx_query_test (a int, b text, c int); SELECT create_distributed_table('mx_query_test', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='mx_query_test'::regclass; - repmodel + repmodel --------------------------------------------------------------------- s (1 row) @@ -479,7 +479,7 @@ INSERT INTO mx_query_test VALUES (4, 'four', 16); INSERT INTO mx_query_test VALUES (5, 'five', 24); \c - - - :worker_1_port SELECT * FROM mx_query_test ORDER BY a; - a | b | c + a | b | c --------------------------------------------------------------------- 1 | one | 1 2 | two | 4 @@ -492,7 +492,7 @@ INSERT INTO mx_query_test VALUES (6, 'six', 36); UPDATE mx_query_test SET c = 25 WHERE a = 5; \c - - - :master_port SELECT * FROM mx_query_test ORDER BY a; - a | b | c + a | b | c --------------------------------------------------------------------- 1 | one | 1 2 | two | 4 @@ -507,34 +507,34 @@ DROP TABLE mx_query_test; -- Check that stop_metadata_sync_to_node function sets hasmetadata of the node to false \c - - - :master_port SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node + start_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_1_port; - hasmetadata + hasmetadata --------------------------------------------------------------------- t (1 row) SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); - stop_metadata_sync_to_node + stop_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_1_port; - hasmetadata + hasmetadata --------------------------------------------------------------------- f (1 row) -- Test DDL propagation in MX tables SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node + start_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) SET citus.shard_count = 5; @@ -550,56 +550,56 @@ CREATE TABLE mx_test_schema_2.mx_table_2 (col1 int, col2 text); CREATE INDEX mx_index_2 ON mx_test_schema_2.mx_table_2 (col2); ALTER TABLE mx_test_schema_2.mx_table_2 ADD CONSTRAINT mx_fk_constraint FOREIGN KEY(col1) REFERENCES mx_test_schema_1.mx_table_1(col1); SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_test_schema_1.mx_table_1'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- - col1 | integer | - col2 | text | + col1 | integer | + col2 | text | (2 rows) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_test_schema_1.mx_table_1_col1_key'::regclass; - Column | Type | Definition + Column | Type | Definition --------------------------------------------------------------------- col1 | integer | col1 (1 row) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_test_schema_1.mx_index_1'::regclass; - Column | Type | Definition + Column | Type | Definition --------------------------------------------------------------------- col1 | integer | col1 (1 row) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_test_schema_2.mx_table_2'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- - col1 | integer | - col2 | text | + col1 | integer | + col2 | text | (2 rows) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_test_schema_2.mx_index_2'::regclass; - Column | Type | Definition + Column | Type | Definition --------------------------------------------------------------------- col2 | text | col2 (1 row) SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_test_schema_2.mx_table_2'::regclass; - Constraint | Definition + Constraint | Definition --------------------------------------------------------------------- mx_fk_constraint | FOREIGN KEY (col1) REFERENCES mx_test_schema_1.mx_table_1(col1) (1 row) SELECT create_distributed_table('mx_test_schema_1.mx_table_1', 'col1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('mx_test_schema_2.mx_table_2', 'col1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- Check that created tables are marked as streaming replicated tables @@ -612,7 +612,7 @@ WHERE OR logicalrelid = 'mx_test_schema_2.mx_table_2'::regclass ORDER BY logicalrelid; - logicalrelid | repmodel + logicalrelid | repmodel --------------------------------------------------------------------- mx_test_schema_1.mx_table_1 | s mx_test_schema_2.mx_table_2 | s @@ -628,7 +628,7 @@ WHERE OR logicalrelid = 'mx_test_schema_2.mx_table_2'::regclass ORDER BY logicalrelid, shardid; - logicalrelid | shardid | nodename | nodeport + logicalrelid | shardid | nodename | nodeport --------------------------------------------------------------------- mx_test_schema_1.mx_table_1 | 1310020 | localhost | 57637 mx_test_schema_1.mx_table_1 | 1310021 | localhost | 57638 @@ -647,7 +647,7 @@ ORDER BY -- Check that tables are created \dt mx_test_schema_?.mx_table_? List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- mx_test_schema_1 | mx_table_1 | table | postgres mx_test_schema_2 | mx_table_2 | table | postgres @@ -661,7 +661,7 @@ FROM WHERE logicalrelid = 'mx_test_schema_1.mx_table_1'::regclass OR logicalrelid = 'mx_test_schema_2.mx_table_2'::regclass; - logicalrelid | repmodel + logicalrelid | repmodel --------------------------------------------------------------------- mx_test_schema_1.mx_table_1 | s mx_test_schema_2.mx_table_2 | s @@ -677,7 +677,7 @@ WHERE OR logicalrelid = 'mx_test_schema_2.mx_table_2'::regclass ORDER BY logicalrelid, shardid; - logicalrelid | shardid | nodename | nodeport + logicalrelid | shardid | nodename | nodeport --------------------------------------------------------------------- mx_test_schema_1.mx_table_1 | 1310020 | localhost | 57637 mx_test_schema_1.mx_table_1 | 1310021 | localhost | 57638 @@ -696,17 +696,17 @@ ORDER BY \d mx_test_schema_1.mx_table_1 \d mx_test_schema_2.mx_table_2 SELECT * FROM pg_dist_partition; - logicalrelid | partmethod | partkey | colocationid | repmodel + logicalrelid | partmethod | partkey | colocationid | repmodel --------------------------------------------------------------------- (0 rows) SELECT * FROM pg_dist_shard; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue --------------------------------------------------------------------- (0 rows) SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport; - shardid | shardstate | shardlength | nodename | nodeport | placementid + shardid | shardstate | shardlength | nodename | nodeport | placementid --------------------------------------------------------------------- (0 rows) @@ -719,14 +719,14 @@ ALTER TABLE mx_test_schema_2.mx_table_2 ADD CONSTRAINT mx_table_2_col1_key UNIQU \c - - - :worker_1_port SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_test_schema_2.mx_index_3'::regclass; - Column | Type | Definition + Column | Type | Definition --------------------------------------------------------------------- col1 | integer | col1 (1 row) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_test_schema_2.mx_table_2_col1_key'::regclass; - Column | Type | Definition + Column | Type | Definition --------------------------------------------------------------------- col1 | integer | col1 (1 row) @@ -754,15 +754,15 @@ REFERENCES mx_test_schema_2.mx_table_2(col1); \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_test_schema_1.mx_table_1'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- - col1 | integer | - col2 | text | - col3 | integer | + col1 | integer | + col2 | text | + col3 | integer | (3 rows) SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_test_schema_1.mx_table_1'::regclass; - Constraint | Definition + Constraint | Definition --------------------------------------------------------------------- mx_fk_constraint | FOREIGN KEY (col1) REFERENCES mx_test_schema_2.mx_table_2(col1) (1 row) @@ -782,7 +782,7 @@ REFERENCES NOT VALID; \c - - - :worker_1_port SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_test_schema_1.mx_table_1'::regclass; - Constraint | Definition + Constraint | Definition --------------------------------------------------------------------- mx_fk_constraint_2 | FOREIGN KEY (col1) REFERENCES mx_test_schema_2.mx_table_2(col1) (1 row) @@ -796,16 +796,16 @@ SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; CREATE TABLE mx_colocation_test_1 (a int); SELECT create_distributed_table('mx_colocation_test_1', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE mx_colocation_test_2 (a int); SELECT create_distributed_table('mx_colocation_test_2', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- Check the colocation IDs of the created tables @@ -817,7 +817,7 @@ WHERE logicalrelid = 'mx_colocation_test_1'::regclass OR logicalrelid = 'mx_colocation_test_2'::regclass ORDER BY logicalrelid; - logicalrelid | colocationid + logicalrelid | colocationid --------------------------------------------------------------------- mx_colocation_test_1 | 10000 mx_colocation_test_2 | 10000 @@ -841,9 +841,9 @@ WHERE OR logicalrelid = 'mx_colocation_test_2'::regclass; -- Mark tables colocated and see the changes on the master and the worker SELECT mark_tables_colocated('mx_colocation_test_1', ARRAY['mx_colocation_test_2']); - mark_tables_colocated + mark_tables_colocated --------------------------------------------------------------------- - + (1 row) SELECT @@ -853,7 +853,7 @@ FROM WHERE logicalrelid = 'mx_colocation_test_1'::regclass OR logicalrelid = 'mx_colocation_test_2'::regclass; - logicalrelid | colocationid + logicalrelid | colocationid --------------------------------------------------------------------- mx_colocation_test_1 | 10001 mx_colocation_test_2 | 10001 @@ -867,7 +867,7 @@ FROM WHERE logicalrelid = 'mx_colocation_test_1'::regclass OR logicalrelid = 'mx_colocation_test_2'::regclass; - logicalrelid | colocationid + logicalrelid | colocationid --------------------------------------------------------------------- mx_colocation_test_1 | 10001 mx_colocation_test_2 | 10001 @@ -889,13 +889,13 @@ SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; CREATE TABLE mx_temp_drop_test (a int); SELECT create_distributed_table('mx_temp_drop_test', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT logicalrelid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'mx_temp_drop_test'::regclass; - logicalrelid | repmodel + logicalrelid | repmodel --------------------------------------------------------------------- mx_temp_drop_test | s (1 row) @@ -903,13 +903,13 @@ SELECT logicalrelid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'mx_te DROP TABLE mx_temp_drop_test; CREATE TABLE mx_temp_drop_test (a int); SELECT create_distributed_table('mx_temp_drop_test', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT logicalrelid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'mx_temp_drop_test'::regclass; - logicalrelid | repmodel + logicalrelid | repmodel --------------------------------------------------------------------- mx_temp_drop_test | s (1 row) @@ -921,38 +921,38 @@ SET citus.shard_count TO 3; SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); - stop_metadata_sync_to_node + stop_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); - stop_metadata_sync_to_node + stop_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) -- sync table with serial column after create_distributed_table CREATE TABLE mx_table_with_small_sequence(a int, b SERIAL, c SMALLSERIAL); SELECT create_distributed_table('mx_table_with_small_sequence', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node + start_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) DROP TABLE mx_table_with_small_sequence; -- Show that create_distributed_table works with a serial column CREATE TABLE mx_table_with_small_sequence(a int, b SERIAL, c SMALLSERIAL); SELECT create_distributed_table('mx_table_with_small_sequence', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO mx_table_with_small_sequence VALUES (0); @@ -964,29 +964,29 @@ SET citus.replication_model TO 'streaming'; -- Create an MX table with (BIGSERIAL) sequences CREATE TABLE mx_table_with_sequence(a int, b BIGSERIAL, c BIGSERIAL); SELECT create_distributed_table('mx_table_with_sequence', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_sequence'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- - a | integer | + a | integer | b | bigint | not null default nextval('mx_table_with_sequence_b_seq'::regclass) c | bigint | not null default nextval('mx_table_with_sequence_c_seq'::regclass) (3 rows) \ds mx_table_with_sequence_b_seq List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- public | mx_table_with_sequence_b_seq | sequence | postgres (1 row) \ds mx_table_with_sequence_c_seq List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- public | mx_table_with_sequence_c_seq | sequence | postgres (1 row) @@ -994,36 +994,36 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_ -- Check that the sequences created on the metadata worker as well \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_sequence'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- - a | integer | + a | integer | b | bigint | not null default nextval('mx_table_with_sequence_b_seq'::regclass) c | bigint | not null default nextval('mx_table_with_sequence_c_seq'::regclass) (3 rows) \ds mx_table_with_sequence_b_seq List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- public | mx_table_with_sequence_b_seq | sequence | postgres (1 row) \ds mx_table_with_sequence_c_seq List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- public | mx_table_with_sequence_c_seq | sequence | postgres (1 row) -- Check that the sequences on the worker have their own space SELECT nextval('mx_table_with_sequence_b_seq'); - nextval + nextval --------------------------------------------------------------------- 281474976710657 (1 row) SELECT nextval('mx_table_with_sequence_c_seq'); - nextval + nextval --------------------------------------------------------------------- 281474976710657 (1 row) @@ -1031,48 +1031,48 @@ SELECT nextval('mx_table_with_sequence_c_seq'); -- Check that adding a new metadata node sets the sequence space correctly \c - - - :master_port SELECT start_metadata_sync_to_node('localhost', :worker_2_port); - start_metadata_sync_to_node + start_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) \c - - - :worker_2_port SELECT groupid FROM pg_dist_local_group; - groupid + groupid --------------------------------------------------------------------- 2 (1 row) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_sequence'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- - a | integer | + a | integer | b | bigint | not null default nextval('mx_table_with_sequence_b_seq'::regclass) c | bigint | not null default nextval('mx_table_with_sequence_c_seq'::regclass) (3 rows) \ds mx_table_with_sequence_b_seq List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- public | mx_table_with_sequence_b_seq | sequence | postgres (1 row) \ds mx_table_with_sequence_c_seq List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- public | mx_table_with_sequence_c_seq | sequence | postgres (1 row) SELECT nextval('mx_table_with_sequence_b_seq'); - nextval + nextval --------------------------------------------------------------------- 562949953421313 (1 row) SELECT nextval('mx_table_with_sequence_c_seq'); - nextval + nextval --------------------------------------------------------------------- 562949953421313 (1 row) @@ -1082,7 +1082,7 @@ INSERT INTO mx_table_with_small_sequence VALUES (2), (4); \c - - - :master_port -- check our small sequence values SELECT a, b, c FROM mx_table_with_small_sequence ORDER BY a,b,c; - a | b | c + a | b | c --------------------------------------------------------------------- 0 | 1 | 1 1 | 268435457 | 4097 @@ -1096,13 +1096,13 @@ DROP TABLE mx_table_with_small_sequence, mx_table_with_sequence; \d mx_table_with_sequence \ds mx_table_with_sequence_b_seq List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- (0 rows) \ds mx_table_with_sequence_c_seq List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- (0 rows) @@ -1111,13 +1111,13 @@ DROP TABLE mx_table_with_small_sequence, mx_table_with_sequence; \d mx_table_with_sequence \ds mx_table_with_sequence_b_seq List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- (0 rows) \ds mx_table_with_sequence_c_seq List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- (0 rows) @@ -1125,13 +1125,13 @@ DROP TABLE mx_table_with_small_sequence, mx_table_with_sequence; \c - - - :worker_2_port \ds mx_table_with_sequence_b_seq List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- (0 rows) \ds mx_table_with_sequence_c_seq List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- (0 rows) @@ -1146,9 +1146,9 @@ DELETE FROM pg_dist_placement; DELETE FROM pg_dist_partition; SELECT groupid AS old_worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) -- the master user needs superuser permissions to change the replication model @@ -1169,27 +1169,27 @@ CREATE TABLE mx_table (a int, b BIGSERIAL); SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; SELECT create_distributed_table('mx_table', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) \c - postgres - :master_port SELECT master_add_node('localhost', :worker_2_port); - master_add_node + master_add_node --------------------------------------------------------------------- 6 (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_port); - start_metadata_sync_to_node + start_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) \c - mx_user - :worker_1_port SELECT nextval('mx_table_b_seq'); - nextval + nextval --------------------------------------------------------------------- 281474976710657 (1 row) @@ -1197,7 +1197,7 @@ SELECT nextval('mx_table_b_seq'); INSERT INTO mx_table (a) VALUES (37); INSERT INTO mx_table (a) VALUES (38); SELECT * FROM mx_table ORDER BY a; - a | b + a | b --------------------------------------------------------------------- 37 | 281474976710658 38 | 281474976710659 @@ -1205,7 +1205,7 @@ SELECT * FROM mx_table ORDER BY a; \c - mx_user - :worker_2_port SELECT nextval('mx_table_b_seq'); - nextval + nextval --------------------------------------------------------------------- 1125899906842625 (1 row) @@ -1213,7 +1213,7 @@ SELECT nextval('mx_table_b_seq'); INSERT INTO mx_table (a) VALUES (39); INSERT INTO mx_table (a) VALUES (40); SELECT * FROM mx_table ORDER BY a; - a | b + a | b --------------------------------------------------------------------- 37 | 281474976710658 38 | 281474976710659 @@ -1242,9 +1242,9 @@ UPDATE pg_dist_placement WHERE groupid = :old_worker_2_group; \c - - - :master_port SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); - stop_metadata_sync_to_node + stop_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) DROP USER mx_user; @@ -1256,22 +1256,22 @@ DROP USER mx_user; \c - - - :master_port CREATE TABLE mx_ref (col_1 int, col_2 text); SELECT create_reference_table('mx_ref'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- make sure that adding/removing nodes doesn't cause -- multiple colocation entries for reference tables SELECT count(*) FROM pg_dist_colocation WHERE distributioncolumntype = 0; - count + count --------------------------------------------------------------------- 1 (1 row) \dt mx_ref List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- public | mx_ref | table | postgres (1 row) @@ -1279,7 +1279,7 @@ SELECT count(*) FROM pg_dist_colocation WHERE distributioncolumntype = 0; \c - - - :worker_1_port \dt mx_ref List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- public | mx_ref | table | postgres (1 row) @@ -1294,7 +1294,7 @@ WHERE logicalrelid = 'mx_ref'::regclass ORDER BY nodeport; - logicalrelid | partmethod | repmodel | shardid | placementid | nodename | nodeport + logicalrelid | partmethod | repmodel | shardid | placementid | nodename | nodeport --------------------------------------------------------------------- mx_ref | n | t | 1310072 | 100072 | localhost | 57637 mx_ref | n | t | 1310072 | 100073 | localhost | 57638 @@ -1306,32 +1306,32 @@ SELECT shardid AS ref_table_shardid FROM pg_dist_shard WHERE logicalrelid='mx_re ALTER TABLE mx_ref ADD COLUMN col_3 NUMERIC DEFAULT 0; CREATE INDEX mx_ref_index ON mx_ref(col_1); SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ref'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- - col_1 | integer | - col_2 | text | + col_1 | integer | + col_2 | text | col_3 | numeric | default 0 (3 rows) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_ref_index'::regclass; - Column | Type | Definition + Column | Type | Definition --------------------------------------------------------------------- col_1 | integer | col_1 (1 row) \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ref'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- - col_1 | integer | - col_2 | text | + col_1 | integer | + col_2 | text | col_3 | numeric | default 0 (3 rows) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_ref_index'::regclass; - Column | Type | Definition + Column | Type | Definition --------------------------------------------------------------------- col_1 | integer | col_1 (1 row) @@ -1347,12 +1347,12 @@ SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_ref_index'::regclass; ERROR: relation "mx_ref_index" does not exist SELECT * FROM pg_dist_shard WHERE shardid=:ref_table_shardid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue --------------------------------------------------------------------- (0 rows) SELECT * FROM pg_dist_shard_placement WHERE shardid=:ref_table_shardid; - shardid | shardstate | shardlength | nodename | nodeport | placementid + shardid | shardstate | shardlength | nodename | nodeport | placementid --------------------------------------------------------------------- (0 rows) @@ -1365,22 +1365,22 @@ CREATE TABLE tmp_placement AS DELETE FROM pg_dist_placement WHERE groupid = :old_worker_2_group; SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) CREATE TABLE mx_ref (col_1 int, col_2 text); SELECT create_reference_table('mx_ref'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT shardid, nodename, nodeport FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid='mx_ref'::regclass; - shardid | nodename | nodeport + shardid | nodename | nodeport --------------------------------------------------------------------- 1310073 | localhost | 57637 (1 row) @@ -1389,7 +1389,7 @@ WHERE logicalrelid='mx_ref'::regclass; SELECT shardid, nodename, nodeport FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid='mx_ref'::regclass; - shardid | nodename | nodeport + shardid | nodename | nodeport --------------------------------------------------------------------- 1310073 | localhost | 57637 (1 row) @@ -1397,7 +1397,7 @@ WHERE logicalrelid='mx_ref'::regclass; \c - - - :master_port SELECT master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "mx_ref" to the node localhost:xxxxx - master_add_node + master_add_node --------------------------------------------------------------------- 7 (1 row) @@ -1406,7 +1406,7 @@ SELECT shardid, nodename, nodeport FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid='mx_ref'::regclass ORDER BY shardid, nodeport; - shardid | nodename | nodeport + shardid | nodename | nodeport --------------------------------------------------------------------- 1310073 | localhost | 57637 1310073 | localhost | 57638 @@ -1417,7 +1417,7 @@ SELECT shardid, nodename, nodeport FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid='mx_ref'::regclass ORDER BY shardid, nodeport; - shardid | nodename | nodeport + shardid | nodename | nodeport --------------------------------------------------------------------- 1310073 | localhost | 57637 1310073 | localhost | 57638 @@ -1437,14 +1437,14 @@ UPDATE pg_dist_placement -- Confirm that shouldhaveshards is 'true' \c - - - :master_port select shouldhaveshards from pg_dist_node where nodeport = 8888; - shouldhaveshards + shouldhaveshards --------------------------------------------------------------------- t (1 row) \c - postgres - :worker_1_port select shouldhaveshards from pg_dist_node where nodeport = 8888; - shouldhaveshards + shouldhaveshards --------------------------------------------------------------------- t (1 row) @@ -1452,20 +1452,20 @@ select shouldhaveshards from pg_dist_node where nodeport = 8888; -- Check that setting shouldhaveshards to false is correctly transferred to other mx nodes \c - - - :master_port SELECT * from master_set_node_property('localhost', 8888, 'shouldhaveshards', false); - master_set_node_property + master_set_node_property --------------------------------------------------------------------- - + (1 row) select shouldhaveshards from pg_dist_node where nodeport = 8888; - shouldhaveshards + shouldhaveshards --------------------------------------------------------------------- f (1 row) \c - postgres - :worker_1_port select shouldhaveshards from pg_dist_node where nodeport = 8888; - shouldhaveshards + shouldhaveshards --------------------------------------------------------------------- f (1 row) @@ -1473,20 +1473,20 @@ select shouldhaveshards from pg_dist_node where nodeport = 8888; -- Check that setting shouldhaveshards to true is correctly transferred to other mx nodes \c - postgres - :master_port SELECT * from master_set_node_property('localhost', 8888, 'shouldhaveshards', true); - master_set_node_property + master_set_node_property --------------------------------------------------------------------- - + (1 row) select shouldhaveshards from pg_dist_node where nodeport = 8888; - shouldhaveshards + shouldhaveshards --------------------------------------------------------------------- t (1 row) \c - postgres - :worker_1_port select shouldhaveshards from pg_dist_node where nodeport = 8888; - shouldhaveshards + shouldhaveshards --------------------------------------------------------------------- t (1 row) @@ -1499,7 +1499,7 @@ select shouldhaveshards from pg_dist_node where nodeport = 8888; ALTER SYSTEM SET citus.metadata_sync_interval TO 300000; ALTER SYSTEM SET citus.metadata_sync_retry_interval TO 300000; SELECT pg_reload_conf(); - pg_reload_conf + pg_reload_conf --------------------------------------------------------------------- t (1 row) @@ -1508,14 +1508,14 @@ SET citus.replication_model TO 'streaming'; SET citus.shard_replication_factor TO 1; CREATE TABLE dist_table_1(a int); SELECT create_distributed_table('dist_table_1', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) UPDATE pg_dist_node SET metadatasynced=false WHERE nodeport=:worker_1_port; SELECT hasmetadata, metadatasynced FROM pg_dist_node WHERE nodeport=:worker_1_port; - hasmetadata | metadatasynced + hasmetadata | metadatasynced --------------------------------------------------------------------- t | f (1 row) @@ -1550,21 +1550,21 @@ HINT: If the node is up, wait until metadata gets synced to it and try again. -- master_update_node should succeed SELECT nodeid AS worker_2_nodeid FROM pg_dist_node WHERE nodeport=:worker_2_port \gset SELECT master_update_node(:worker_2_nodeid, 'localhost', 4444); - master_update_node + master_update_node --------------------------------------------------------------------- - + (1 row) SELECT master_update_node(:worker_2_nodeid, 'localhost', :worker_2_port); - master_update_node + master_update_node --------------------------------------------------------------------- - + (1 row) ALTER SYSTEM SET citus.metadata_sync_interval TO DEFAULT; ALTER SYSTEM SET citus.metadata_sync_retry_interval TO DEFAULT; SELECT pg_reload_conf(); - pg_reload_conf + pg_reload_conf --------------------------------------------------------------------- t (1 row) @@ -1572,15 +1572,15 @@ SELECT pg_reload_conf(); UPDATE pg_dist_node SET metadatasynced=true WHERE nodeport=:worker_1_port; -- Cleanup SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); - stop_metadata_sync_to_node + stop_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); - stop_metadata_sync_to_node + stop_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) DROP TABLE mx_test_schema_2.mx_table_2 CASCADE; diff --git a/src/test/regress/expected/multi_modifications.out b/src/test/regress/expected/multi_modifications.out index 15be73716..92b3de6f0 100644 --- a/src/test/regress/expected/multi_modifications.out +++ b/src/test/regress/expected/multi_modifications.out @@ -26,32 +26,32 @@ CREATE TABLE range_partitioned ( LIKE limit_orders ); CREATE TABLE append_partitioned ( LIKE limit_orders ); SET citus.shard_count TO 2; SELECT create_distributed_table('limit_orders', 'id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('multiple_hash', 'id', 'hash'); ERROR: column "id" of relation "multiple_hash" does not exist SELECT create_distributed_table('range_partitioned', 'id', 'range'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('append_partitioned', 'id', 'append'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SET citus.shard_count TO 1; SET citus.shard_replication_factor TO 1; -- make a single shard that covers no partition values SELECT create_distributed_table('insufficient_shards', 'id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) UPDATE pg_dist_shard SET shardminvalue = 0, shardmaxvalue = 0 @@ -78,14 +78,14 @@ WHERE shardid = :new_shard_id; INSERT INTO limit_orders VALUES (32743, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69); SELECT COUNT(*) FROM limit_orders WHERE id = 32743; - count + count --------------------------------------------------------------------- 1 (1 row) -- basic single-row INSERT with RETURNING INSERT INTO limit_orders VALUES (32744, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69) RETURNING *; - id | symbol | bidder_id | placed_at | kind | limit_price + id | symbol | bidder_id | placed_at | kind | limit_price --------------------------------------------------------------------- 32744 | AAPL | 9580 | Tue Oct 19 10:23:54 2004 | buy | 20.69 (1 row) @@ -106,7 +106,7 @@ SET client_min_messages TO 'DEBUG2'; SELECT * FROM range_partitioned WHERE id = 32743; DEBUG: Creating router plan DEBUG: Plan is router executable - id | symbol | bidder_id | placed_at | kind | limit_price + id | symbol | bidder_id | placed_at | kind | limit_price --------------------------------------------------------------------- 32743 | AAPL | 9580 | Tue Oct 19 10:23:54 2004 | buy | 20.69 (1 row) @@ -114,7 +114,7 @@ DEBUG: Plan is router executable SELECT * FROM append_partitioned WHERE id = 414123; DEBUG: Router planner does not support append-partitioned tables. DEBUG: Plan is router executable - id | symbol | bidder_id | placed_at | kind | limit_price + id | symbol | bidder_id | placed_at | kind | limit_price --------------------------------------------------------------------- 414123 | AAPL | 9580 | Tue Oct 19 10:23:54 2004 | buy | 20.69 (1 row) @@ -134,7 +134,7 @@ HINT: Make sure the value for partition column "id" falls into a single shard. INSERT INTO limit_orders VALUES (12756, 'MSFT', 10959, '2013-05-08 07:29:23', 'sell', DEFAULT); SELECT COUNT(*) FROM limit_orders WHERE id = 12756; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -143,7 +143,7 @@ SELECT COUNT(*) FROM limit_orders WHERE id = 12756; INSERT INTO limit_orders VALUES (430, upper('ibm'), 214, timestamp '2003-01-28 10:31:17' + interval '5 hours', 'buy', sqrt(2)); SELECT COUNT(*) FROM limit_orders WHERE id = 430; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -188,7 +188,7 @@ INSERT INTO limit_orders VALUES (12037, 'GOOG', 5634, '2001-04-16 03:37:28', 'bu (12038, 'GOOG', 5634, '2001-04-17 03:37:28', 'buy', 2.50), (12039, 'GOOG', 5634, '2001-04-18 03:37:28', 'buy', 1.50); SELECT COUNT(*) FROM limit_orders WHERE id BETWEEN 12037 AND 12039; - count + count --------------------------------------------------------------------- 3 (1 row) @@ -198,7 +198,7 @@ INSERT INTO limit_orders VALUES (22037, 'GOOG', 5634, now(), 'buy', 0.50), (22038, 'GOOG', 5634, now(), 'buy', 2.50), (22039, 'GOOG', 5634, now(), 'buy', 1.50) RETURNING id; - id + id --------------------------------------------------------------------- 22037 22038 @@ -206,7 +206,7 @@ RETURNING id; (3 rows) SELECT COUNT(*) FROM limit_orders WHERE id BETWEEN 22037 AND 22039; - count + count --------------------------------------------------------------------- 3 (1 row) @@ -216,7 +216,7 @@ INSERT INTO limit_orders VALUES (random() * 10 + 70000, 'GOOG', 5634, now(), 'bu (random() * 10 + 80000, 'GOOG', 5634, now(), 'buy', 2.50), (random() * 10 + 80090, 'GOOG', 5634, now(), 'buy', 1.50); SELECT COUNT(*) FROM limit_orders WHERE id BETWEEN 70000 AND 90000; - count + count --------------------------------------------------------------------- 3 (1 row) @@ -227,27 +227,27 @@ INSERT INTO limit_orders SELECT * FROM deleted_orders; -- test simple DELETE INSERT INTO limit_orders VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell', 20.69); SELECT COUNT(*) FROM limit_orders WHERE id = 246; - count + count --------------------------------------------------------------------- 1 (1 row) DELETE FROM limit_orders WHERE id = 246; SELECT COUNT(*) FROM limit_orders WHERE id = 246; - count + count --------------------------------------------------------------------- 0 (1 row) -- test simple DELETE with RETURNING DELETE FROM limit_orders WHERE id = 430 RETURNING *; - id | symbol | bidder_id | placed_at | kind | limit_price + id | symbol | bidder_id | placed_at | kind | limit_price --------------------------------------------------------------------- 430 | IBM | 214 | Tue Jan 28 15:31:17 2003 | buy | 1.4142135623731 (1 row) SELECT COUNT(*) FROM limit_orders WHERE id = 430; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -255,14 +255,14 @@ SELECT COUNT(*) FROM limit_orders WHERE id = 430; -- DELETE with expression in WHERE clause INSERT INTO limit_orders VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell', 20.69); SELECT COUNT(*) FROM limit_orders WHERE id = 246; - count + count --------------------------------------------------------------------- 1 (1 row) DELETE FROM limit_orders WHERE id = (2 * 123); SELECT COUNT(*) FROM limit_orders WHERE id = 246; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -283,7 +283,7 @@ DELETE FROM limit_orders RETURNING id / 0; ERROR: division by zero \set VERBOSITY default SELECT * FROM limit_orders WHERE id = 412; - id | symbol | bidder_id | placed_at | kind | limit_price + id | symbol | bidder_id | placed_at | kind | limit_price --------------------------------------------------------------------- (0 rows) @@ -291,14 +291,14 @@ INSERT INTO limit_orders VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell' -- simple UPDATE UPDATE limit_orders SET symbol = 'GM' WHERE id = 246; SELECT symbol FROM limit_orders WHERE id = 246; - symbol + symbol --------------------------------------------------------------------- GM (1 row) -- simple UPDATE with RETURNING UPDATE limit_orders SET symbol = 'GM' WHERE id = 246 RETURNING *; - id | symbol | bidder_id | placed_at | kind | limit_price + id | symbol | bidder_id | placed_at | kind | limit_price --------------------------------------------------------------------- 246 | GM | 162 | Mon Jul 02 16:32:15 2007 | sell | 20.69 (1 row) @@ -306,14 +306,14 @@ UPDATE limit_orders SET symbol = 'GM' WHERE id = 246 RETURNING *; -- expression UPDATE UPDATE limit_orders SET bidder_id = 6 * 3 WHERE id = 246; SELECT bidder_id FROM limit_orders WHERE id = 246; - bidder_id + bidder_id --------------------------------------------------------------------- 18 (1 row) -- expression UPDATE with RETURNING UPDATE limit_orders SET bidder_id = 6 * 5 WHERE id = 246 RETURNING *; - id | symbol | bidder_id | placed_at | kind | limit_price + id | symbol | bidder_id | placed_at | kind | limit_price --------------------------------------------------------------------- 246 | GM | 30 | Mon Jul 02 16:32:15 2007 | sell | 20.69 (1 row) @@ -321,14 +321,14 @@ UPDATE limit_orders SET bidder_id = 6 * 5 WHERE id = 246 RETURNING *; -- multi-column UPDATE UPDATE limit_orders SET (kind, limit_price) = ('buy', DEFAULT) WHERE id = 246; SELECT kind, limit_price FROM limit_orders WHERE id = 246; - kind | limit_price + kind | limit_price --------------------------------------------------------------------- buy | 0.00 (1 row) -- multi-column UPDATE with RETURNING UPDATE limit_orders SET (kind, limit_price) = ('buy', 999) WHERE id = 246 RETURNING *; - id | symbol | bidder_id | placed_at | kind | limit_price + id | symbol | bidder_id | placed_at | kind | limit_price --------------------------------------------------------------------- 246 | GM | 30 | Mon Jul 02 16:32:15 2007 | buy | 999 (1 row) @@ -358,21 +358,21 @@ ALTER TABLE renamed_orders RENAME TO limit_orders_750000; -- or the insert succeeded and placement marked unhealthy \c - - - :worker_1_port SELECT count(*) FROM limit_orders_750000 WHERE id = 276; - count + count --------------------------------------------------------------------- 0 (1 row) \c - - - :worker_2_port SELECT count(*) FROM limit_orders_750000 WHERE id = 276; - count + count --------------------------------------------------------------------- 0 (1 row) \c - - - :master_port SELECT count(*) FROM limit_orders WHERE id = 276; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -383,7 +383,7 @@ FROM pg_dist_shard_placement AS sp, WHERE sp.shardid = s.shardid AND sp.shardstate = 3 AND s.logicalrelid = 'limit_orders'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -409,7 +409,7 @@ AND sp.nodename = 'localhost' AND sp.nodeport = :worker_1_port AND sp.shardstate = 1 AND s.logicalrelid = 'limit_orders'::regclass; - count + count --------------------------------------------------------------------- 2 (1 row) @@ -440,7 +440,7 @@ ERROR: relation bidders is not distributed WITH deleted_orders AS (INSERT INTO limit_orders VALUES (399, 'PDR', 14, '2017-07-02 16:32:15', 'sell', 43)) UPDATE limit_orders SET symbol = 'GM'; SELECT symbol, bidder_id FROM limit_orders WHERE id = 246; - symbol | bidder_id + symbol | bidder_id --------------------------------------------------------------------- GM | 30 (1 row) @@ -452,14 +452,14 @@ UPDATE limit_orders SET bidder_id = bidder_id + 1 WHERE id = 246; -- IMMUTABLE functions are allowed UPDATE limit_orders SET symbol = LOWER(symbol) WHERE id = 246; SELECT symbol, bidder_id FROM limit_orders WHERE id = 246; - symbol | bidder_id + symbol | bidder_id --------------------------------------------------------------------- gm | 247 (1 row) -- IMMUTABLE functions are allowed -- even in returning UPDATE limit_orders SET symbol = UPPER(symbol) WHERE id = 246 RETURNING id, LOWER(symbol), symbol; - id | lower | symbol + id | lower | symbol --------------------------------------------------------------------- 246 | gm | GM (1 row) @@ -489,7 +489,7 @@ UPDATE limit_orders SET array_of_values = stable_append(array_of_values, 3) WHERE id = 246; ERROR: STABLE functions used in UPDATE queries cannot be called with column references SELECT array_of_values FROM limit_orders WHERE id = 246; - array_of_values + array_of_values --------------------------------------------------------------------- {1,2} (1 row) @@ -502,7 +502,7 @@ UPDATE limit_orders SET bidder_id = temp_strict_func(1, null) WHERE id = 246; ERROR: null value in column "bidder_id" violates not-null constraint \set VERBOSITY default SELECT array_of_values FROM limit_orders WHERE id = 246; - array_of_values + array_of_values --------------------------------------------------------------------- {1,2} (1 row) @@ -519,7 +519,7 @@ INSERT INTO multiple_hash VALUES ('0', '4'); INSERT INTO multiple_hash VALUES ('0', '5'); INSERT INTO multiple_hash VALUES ('0', '6'); UPDATE multiple_hash SET data = data ||'-1' WHERE category = '0' RETURNING *; - category | data + category | data --------------------------------------------------------------------- 0 | 1-1 0 | 2-1 @@ -530,7 +530,7 @@ UPDATE multiple_hash SET data = data ||'-1' WHERE category = '0' RETURNING *; (6 rows) DELETE FROM multiple_hash WHERE category = '0' RETURNING *; - category | data + category | data --------------------------------------------------------------------- 0 | 1-1 0 | 2-1 @@ -555,7 +555,7 @@ INSERT 0 1 INSERT INTO multiple_hash VALUES ('2', '3'); INSERT 0 1 INSERT INTO multiple_hash VALUES ('2', '3') RETURNING *; - category | data + category | data --------------------------------------------------------------------- 2 | 3 (1 row) @@ -570,7 +570,7 @@ UPDATE multiple_hash SET data = data ||'-2' WHERE category = '1'; UPDATE 3 -- three rows, with RETURNING UPDATE multiple_hash SET data = data ||'-2' WHERE category = '1' RETURNING category; - category + category --------------------------------------------------------------------- 1 1 @@ -580,7 +580,7 @@ UPDATE multiple_hash SET data = data ||'-2' WHERE category = '1' RETURNING categ UPDATE 3 -- check SELECT * FROM multiple_hash WHERE category = '1' ORDER BY category, data; - category | data + category | data --------------------------------------------------------------------- 1 | 1-1-2-2 1 | 2-2-2 @@ -596,7 +596,7 @@ DELETE FROM multiple_hash WHERE category = '2'; DELETE 3 -- three rows, with RETURNING DELETE FROM multiple_hash WHERE category = '1' RETURNING category; - category + category --------------------------------------------------------------------- 1 1 @@ -606,12 +606,12 @@ DELETE FROM multiple_hash WHERE category = '1' RETURNING category; DELETE 3 -- check SELECT * FROM multiple_hash WHERE category = '1' ORDER BY category, data; - category | data + category | data --------------------------------------------------------------------- (0 rows) SELECT * FROM multiple_hash WHERE category = '2' ORDER BY category, data; - category | data + category | data --------------------------------------------------------------------- (0 rows) @@ -620,25 +620,25 @@ SELECT * FROM multiple_hash WHERE category = '2' ORDER BY category, data; CREATE TABLE app_analytics_events (id serial, app_id integer, name text); SET citus.shard_count TO 4; SELECT create_distributed_table('app_analytics_events', 'app_id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO app_analytics_events VALUES (DEFAULT, 101, 'Fauxkemon Geaux') RETURNING id; - id + id --------------------------------------------------------------------- 1 (1 row) INSERT INTO app_analytics_events (app_id, name) VALUES (102, 'Wayz') RETURNING id; - id + id --------------------------------------------------------------------- 2 (1 row) INSERT INTO app_analytics_events (app_id, name) VALUES (103, 'Mynt') RETURNING *; - id | app_id | name + id | app_id | name --------------------------------------------------------------------- 3 | 103 | Mynt (1 row) @@ -647,25 +647,25 @@ DROP TABLE app_analytics_events; -- again with serial in the partition column CREATE TABLE app_analytics_events (id serial, app_id integer, name text); SELECT create_distributed_table('app_analytics_events', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO app_analytics_events VALUES (DEFAULT, 101, 'Fauxkemon Geaux') RETURNING id; - id + id --------------------------------------------------------------------- 1 (1 row) INSERT INTO app_analytics_events (app_id, name) VALUES (102, 'Wayz') RETURNING id; - id + id --------------------------------------------------------------------- 2 (1 row) INSERT INTO app_analytics_events (app_id, name) VALUES (103, 'Mynt') RETURNING *; - id | app_id | name + id | app_id | name --------------------------------------------------------------------- 3 | 103 | Mynt (1 row) @@ -673,7 +673,7 @@ INSERT INTO app_analytics_events (app_id, name) VALUES (103, 'Mynt') RETURNING * -- Test multi-row insert with serial in the partition column INSERT INTO app_analytics_events (app_id, name) VALUES (104, 'Wayz'), (105, 'Mynt') RETURNING *; - id | app_id | name + id | app_id | name --------------------------------------------------------------------- 4 | 104 | Wayz 5 | 105 | Mynt @@ -681,7 +681,7 @@ VALUES (104, 'Wayz'), (105, 'Mynt') RETURNING *; INSERT INTO app_analytics_events (id, name) VALUES (DEFAULT, 'Foo'), (300, 'Wah') RETURNING *; - id | app_id | name + id | app_id | name --------------------------------------------------------------------- 6 | | Foo 300 | | Wah @@ -691,49 +691,49 @@ PREPARE prep(varchar) AS INSERT INTO app_analytics_events (id, name) VALUES (DEFAULT, $1 || '.1'), (400 , $1 || '.2') RETURNING *; EXECUTE prep('version-1'); - id | app_id | name + id | app_id | name --------------------------------------------------------------------- 7 | | version-1.1 400 | | version-1.2 (2 rows) EXECUTE prep('version-2'); - id | app_id | name + id | app_id | name --------------------------------------------------------------------- 8 | | version-2.1 400 | | version-2.2 (2 rows) EXECUTE prep('version-3'); - id | app_id | name + id | app_id | name --------------------------------------------------------------------- 9 | | version-3.1 400 | | version-3.2 (2 rows) EXECUTE prep('version-4'); - id | app_id | name + id | app_id | name --------------------------------------------------------------------- 10 | | version-4.1 400 | | version-4.2 (2 rows) EXECUTE prep('version-5'); - id | app_id | name + id | app_id | name --------------------------------------------------------------------- 11 | | version-5.1 400 | | version-5.2 (2 rows) EXECUTE prep('version-6'); - id | app_id | name + id | app_id | name --------------------------------------------------------------------- 12 | | version-6.1 400 | | version-6.2 (2 rows) SELECT * FROM app_analytics_events ORDER BY id, name; - id | app_id | name + id | app_id | name --------------------------------------------------------------------- 1 | 101 | Fauxkemon Geaux 2 | 102 | Wayz @@ -761,14 +761,14 @@ TRUNCATE app_analytics_events; ALTER TABLE app_analytics_events DROP COLUMN app_id; INSERT INTO app_analytics_events (name) VALUES ('Wayz'), ('Mynt') RETURNING *; - id | name + id | name --------------------------------------------------------------------- 13 | Wayz 14 | Mynt (2 rows) SELECT * FROM app_analytics_events ORDER BY id; - id | name + id | name --------------------------------------------------------------------- 13 | Wayz 14 | Mynt @@ -778,22 +778,22 @@ DROP TABLE app_analytics_events; -- Test multi-row insert with a dropped column before the partition column CREATE TABLE app_analytics_events (id int default 3, app_id integer, name text); SELECT create_distributed_table('app_analytics_events', 'name', colocate_with => 'none'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE app_analytics_events DROP COLUMN app_id; INSERT INTO app_analytics_events (name) VALUES ('Wayz'), ('Mynt') RETURNING *; - id | name + id | name --------------------------------------------------------------------- 3 | Mynt 3 | Wayz (2 rows) SELECT * FROM app_analytics_events WHERE name = 'Wayz'; - id | name + id | name --------------------------------------------------------------------- 3 | Wayz (1 row) @@ -802,21 +802,21 @@ DROP TABLE app_analytics_events; -- Test multi-row insert with serial in a reference table CREATE TABLE app_analytics_events (id serial, app_id integer, name text); SELECT create_reference_table('app_analytics_events'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) INSERT INTO app_analytics_events (app_id, name) VALUES (104, 'Wayz'), (105, 'Mynt') RETURNING *; - id | app_id | name + id | app_id | name --------------------------------------------------------------------- 1 | 104 | Wayz 2 | 105 | Mynt (2 rows) SELECT * FROM app_analytics_events ORDER BY id; - id | app_id | name + id | app_id | name --------------------------------------------------------------------- 1 | 104 | Wayz 2 | 105 | Mynt @@ -826,21 +826,21 @@ DROP TABLE app_analytics_events; -- Test multi-row insert with serial in a non-partition column CREATE TABLE app_analytics_events (id int, app_id serial, name text); SELECT create_distributed_table('app_analytics_events', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO app_analytics_events (id, name) VALUES (99, 'Wayz'), (98, 'Mynt') RETURNING name, app_id; - name | app_id + name | app_id --------------------------------------------------------------------- Mynt | 2 Wayz | 1 (2 rows) SELECT * FROM app_analytics_events ORDER BY id; - id | app_id | name + id | app_id | name --------------------------------------------------------------------- 98 | 2 | Mynt 99 | 1 | Wayz @@ -856,15 +856,15 @@ CREATE TABLE summary_table ( count int, uniques int); SELECT create_distributed_table('raw_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('summary_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO raw_table VALUES (1, 100); @@ -883,10 +883,10 @@ UPDATE summary_table SET uniques = 0 WHERE false; UPDATE summary_table SET uniques = 0 WHERE null; UPDATE summary_table SET uniques = 0 WHERE null > jsonb_build_array(); SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques + id | min_value | average_value | count | uniques --------------------------------------------------------------------- - 1 | | | | - 2 | | | | + 1 | | | | + 2 | | | | (2 rows) UPDATE summary_table SET average_value = average_query.average FROM ( @@ -894,10 +894,10 @@ UPDATE summary_table SET average_value = average_query.average FROM ( ) average_query WHERE id = 1; SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques + id | min_value | average_value | count | uniques --------------------------------------------------------------------- - 1 | | 200.0000000000000000 | | - 2 | | | | + 1 | | 200.0000000000000000 | | + 2 | | | | (2 rows) -- try different syntax @@ -905,39 +905,39 @@ UPDATE summary_table SET (min_value, average_value) = (SELECT min(value), avg(value) FROM raw_table WHERE id = 2) WHERE id = 2; SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques + id | min_value | average_value | count | uniques --------------------------------------------------------------------- - 1 | | 200.0000000000000000 | | - 2 | 400 | 450.0000000000000000 | | + 1 | | 200.0000000000000000 | | + 2 | 400 | 450.0000000000000000 | | (2 rows) UPDATE summary_table SET min_value = 100 WHERE id IN (SELECT id FROM raw_table WHERE id = 1 and value > 100) AND id = 1; SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques + id | min_value | average_value | count | uniques --------------------------------------------------------------------- - 1 | 100 | 200.0000000000000000 | | - 2 | 400 | 450.0000000000000000 | | + 1 | 100 | 200.0000000000000000 | | + 2 | 400 | 450.0000000000000000 | | (2 rows) -- indeed, we don't need filter on UPDATE explicitly if SELECT already prunes to one shard UPDATE summary_table SET uniques = 2 WHERE id IN (SELECT id FROM raw_table WHERE id = 1 and value IN (100, 200)); SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques + id | min_value | average_value | count | uniques --------------------------------------------------------------------- 1 | 100 | 200.0000000000000000 | | 2 - 2 | 400 | 450.0000000000000000 | | + 2 | 400 | 450.0000000000000000 | | (2 rows) -- use inner results for non-partition column UPDATE summary_table SET uniques = NULL WHERE min_value IN (SELECT value FROM raw_table WHERE id = 1) AND id = 1; SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques + id | min_value | average_value | count | uniques --------------------------------------------------------------------- - 1 | 100 | 200.0000000000000000 | | - 2 | 400 | 450.0000000000000000 | | + 1 | 100 | 200.0000000000000000 | | + 2 | 400 | 450.0000000000000000 | | (2 rows) -- these should not update anything @@ -950,10 +950,10 @@ UPDATE summary_table SET average_value = average_query.average FROM ( ) average_query WHERE id = 1 AND id = 4; SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques + id | min_value | average_value | count | uniques --------------------------------------------------------------------- - 1 | 100 | 200.0000000000000000 | | - 2 | 400 | 450.0000000000000000 | | + 1 | 100 | 200.0000000000000000 | | + 2 | 400 | 450.0000000000000000 | | (2 rows) -- update with NULL value @@ -962,10 +962,10 @@ UPDATE summary_table SET average_value = average_query.average FROM ( ) average_query WHERE id = 1; SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques + id | min_value | average_value | count | uniques --------------------------------------------------------------------- - 1 | 100 | | | - 2 | 400 | 450.0000000000000000 | | + 1 | 100 | | | + 2 | 400 | 450.0000000000000000 | | (2 rows) -- multi-shard updates with recursively planned subqueries @@ -994,20 +994,20 @@ WHERE summary_table.id = metrics.id AND summary_table.id = 1; SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques + id | min_value | average_value | count | uniques --------------------------------------------------------------------- 1 | 100 | | 4 | 2 - 2 | 400 | 450.0000000000000000 | | + 2 | 400 | 450.0000000000000000 | | (2 rows) -- test joins UPDATE summary_table SET count = count + 1 FROM raw_table WHERE raw_table.id = summary_table.id AND summary_table.id = 1; SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques + id | min_value | average_value | count | uniques --------------------------------------------------------------------- 1 | 100 | | 5 | 2 - 2 | 400 | 450.0000000000000000 | | + 2 | 400 | 450.0000000000000000 | | (2 rows) -- test with prepared statements @@ -1022,10 +1022,10 @@ EXECUTE prepared_update_with_subquery(10, 1); EXECUTE prepared_update_with_subquery(10, 1); EXECUTE prepared_update_with_subquery(10, 1); SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques + id | min_value | average_value | count | uniques --------------------------------------------------------------------- 1 | 100 | | 65 | 2 - 2 | 400 | 450.0000000000000000 | | + 2 | 400 | 450.0000000000000000 | | (2 rows) -- test with reference tables @@ -1037,22 +1037,22 @@ CREATE TABLE reference_summary_table ( count int, uniques int); SELECT create_reference_table('reference_raw_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_reference_table('reference_summary_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) INSERT INTO reference_raw_table VALUES (1, 100); INSERT INTO reference_raw_table VALUES (1, 200); INSERT INTO reference_raw_table VALUES (1, 200); INSERT INTO reference_raw_table VALUES (1,300), (2, 400), (2,500) RETURNING *; - id | value + id | value --------------------------------------------------------------------- 1 | 300 2 | 400 @@ -1062,10 +1062,10 @@ INSERT INTO reference_raw_table VALUES (1,300), (2, 400), (2,500) RETURNING *; INSERT INTO reference_summary_table VALUES (1); INSERT INTO reference_summary_table VALUES (2); SELECT * FROM reference_summary_table ORDER BY id; - id | min_value | average_value | count | uniques + id | min_value | average_value | count | uniques --------------------------------------------------------------------- - 1 | | | | - 2 | | | | + 1 | | | | + 2 | | | | (2 rows) UPDATE reference_summary_table SET average_value = average_query.average FROM ( @@ -1081,10 +1081,10 @@ UPDATE reference_summary_table SET (min_value, average_value) = (SELECT min(value), avg(value) FROM reference_raw_table WHERE id = 2) WHERE id = 2; SELECT * FROM reference_summary_table ORDER BY id; - id | min_value | average_value | count | uniques + id | min_value | average_value | count | uniques --------------------------------------------------------------------- - 1 | | 200.0000000000000000 | | - 2 | 400 | 450.0000000000000000 | | + 1 | | 200.0000000000000000 | | + 2 | 400 | 450.0000000000000000 | | (2 rows) -- no need partition colum equalities on reference tables @@ -1092,10 +1092,10 @@ UPDATE reference_summary_table SET (count) = (SELECT id AS inner_id FROM reference_raw_table WHERE value = 500) WHERE min_value = 400; SELECT * FROM reference_summary_table ORDER BY id; - id | min_value | average_value | count | uniques + id | min_value | average_value | count | uniques --------------------------------------------------------------------- - 1 | | 200.0000000000000000 | | - 2 | 400 | 450.0000000000000000 | 2 | + 1 | | 200.0000000000000000 | | + 2 | 400 | 450.0000000000000000 | 2 | (2 rows) -- can read from a reference table and update a distributed table @@ -1125,11 +1125,11 @@ UPDATE summary_table SET average_value = average_query.average FROM ( WHERE id = 3; COMMIT; SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques + id | min_value | average_value | count | uniques --------------------------------------------------------------------- 1 | 100 | 200.0000000000000000 | 65 | 2 - 2 | 400 | 450.0000000000000000 | | - 3 | | 150.0000000000000000 | | + 2 | 400 | 450.0000000000000000 | | + 3 | | 150.0000000000000000 | | (3 rows) -- COPY on UPDATE part @@ -1143,12 +1143,12 @@ UPDATE summary_table SET average_value = average_query.average FROM ( WHERE id = 4; COMMIT; SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques + id | min_value | average_value | count | uniques --------------------------------------------------------------------- 1 | 100 | 200.0000000000000000 | 65 | 2 - 2 | 400 | 450.0000000000000000 | | - 3 | | 150.0000000000000000 | | - 4 | | 150.0000000000000000 | | + 2 | 400 | 450.0000000000000000 | | + 3 | | 150.0000000000000000 | | + 4 | | 150.0000000000000000 | | (4 rows) -- COPY on both part @@ -1161,13 +1161,13 @@ UPDATE summary_table SET average_value = average_query.average FROM ( WHERE id = 5; COMMIT; SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques + id | min_value | average_value | count | uniques --------------------------------------------------------------------- 1 | 100 | 200.0000000000000000 | 65 | 2 - 2 | 400 | 450.0000000000000000 | | - 3 | | 150.0000000000000000 | | - 4 | | 150.0000000000000000 | | - 5 | | 150.0000000000000000 | | + 2 | 400 | 450.0000000000000000 | | + 3 | | 150.0000000000000000 | | + 4 | | 150.0000000000000000 | | + 5 | | 150.0000000000000000 | | (5 rows) -- COPY on reference tables @@ -1180,19 +1180,19 @@ UPDATE summary_table SET average_value = average_query.average FROM ( WHERE id = 6; COMMIT; SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques + id | min_value | average_value | count | uniques --------------------------------------------------------------------- 1 | 100 | 200.0000000000000000 | 65 | 2 - 2 | 400 | 450.0000000000000000 | | - 3 | | 150.0000000000000000 | | - 4 | | 150.0000000000000000 | | - 5 | | 150.0000000000000000 | | - 6 | | 150.0000000000000000 | | + 2 | 400 | 450.0000000000000000 | | + 3 | | 150.0000000000000000 | | + 4 | | 150.0000000000000000 | | + 5 | | 150.0000000000000000 | | + 6 | | 150.0000000000000000 | | (6 rows) -- test DELETE queries SELECT * FROM raw_table ORDER BY id, value; - id | value + id | value --------------------------------------------------------------------- 1 | 100 1 | 200 @@ -1211,25 +1211,25 @@ SELECT * FROM raw_table ORDER BY id, value; DELETE FROM summary_table WHERE min_value IN (SELECT value FROM raw_table WHERE id = 1) AND id = 1; SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques + id | min_value | average_value | count | uniques --------------------------------------------------------------------- - 2 | 400 | 450.0000000000000000 | | - 3 | | 150.0000000000000000 | | - 4 | | 150.0000000000000000 | | - 5 | | 150.0000000000000000 | | - 6 | | 150.0000000000000000 | | + 2 | 400 | 450.0000000000000000 | | + 3 | | 150.0000000000000000 | | + 4 | | 150.0000000000000000 | | + 5 | | 150.0000000000000000 | | + 6 | | 150.0000000000000000 | | (5 rows) -- test with different syntax DELETE FROM summary_table USING raw_table WHERE summary_table.id = raw_table.id AND raw_table.id = 2; SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques + id | min_value | average_value | count | uniques --------------------------------------------------------------------- - 3 | | 150.0000000000000000 | | - 4 | | 150.0000000000000000 | | - 5 | | 150.0000000000000000 | | - 6 | | 150.0000000000000000 | | + 3 | | 150.0000000000000000 | | + 4 | | 150.0000000000000000 | | + 5 | | 150.0000000000000000 | | + 6 | | 150.0000000000000000 | | (4 rows) -- cannot read from a distributed table and delete from a reference table @@ -1237,12 +1237,12 @@ DELETE FROM reference_summary_table USING raw_table WHERE reference_summary_table.id = raw_table.id AND raw_table.id = 3; ERROR: cannot perform select on a distributed table and modify a reference table SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques + id | min_value | average_value | count | uniques --------------------------------------------------------------------- - 3 | | 150.0000000000000000 | | - 4 | | 150.0000000000000000 | | - 5 | | 150.0000000000000000 | | - 6 | | 150.0000000000000000 | | + 3 | | 150.0000000000000000 | | + 4 | | 150.0000000000000000 | | + 5 | | 150.0000000000000000 | | + 6 | | 150.0000000000000000 | | (4 rows) -- test connection API via using COPY with DELETEs @@ -1254,12 +1254,12 @@ DELETE FROM summary_table USING reference_raw_table WHERE summary_table.id = reference_raw_table.id AND reference_raw_table.id = 2; COMMIT; SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques + id | min_value | average_value | count | uniques --------------------------------------------------------------------- - 3 | | 150.0000000000000000 | | - 4 | | 150.0000000000000000 | | - 5 | | 150.0000000000000000 | | - 6 | | 150.0000000000000000 | | + 3 | | 150.0000000000000000 | | + 4 | | 150.0000000000000000 | | + 5 | | 150.0000000000000000 | | + 6 | | 150.0000000000000000 | | (4 rows) -- test DELETEs with prepared statements @@ -1275,7 +1275,7 @@ EXECUTE prepared_delete_with_join(4); EXECUTE prepared_delete_with_join(5); EXECUTE prepared_delete_with_join(6); SELECT * FROM summary_table ORDER BY id; - id | min_value | average_value | count | uniques + id | min_value | average_value | count | uniques --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/multi_modifying_xacts.out b/src/test/regress/expected/multi_modifying_xacts.out index ecd25bcd8..7af0de8d9 100644 --- a/src/test/regress/expected/multi_modifying_xacts.out +++ b/src/test/regress/expected/multi_modifying_xacts.out @@ -13,27 +13,27 @@ CREATE TABLE labs ( name text NOT NULL ); SELECT master_create_distributed_table('researchers', 'lab_id', 'hash'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_create_worker_shards('researchers', 2, 2); - master_create_worker_shards + master_create_worker_shards --------------------------------------------------------------------- - + (1 row) SELECT master_create_distributed_table('labs', 'id', 'hash'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_create_worker_shards('labs', 1, 1); - master_create_worker_shards + master_create_worker_shards --------------------------------------------------------------------- - + (1 row) -- might be confusing to have two people in the same lab with the same name @@ -49,7 +49,7 @@ DELETE FROM researchers WHERE lab_id = 1 AND id = 2; INSERT INTO researchers VALUES (2, 1, 'John Backus'), (12, 1, 'Frances E. Allen'); COMMIT; SELECT name FROM researchers WHERE lab_id = 1 AND id % 10 = 2; - name + name --------------------------------------------------------------------- John Backus Frances E. Allen @@ -62,7 +62,7 @@ DELETE FROM researchers WHERE id = 14 AND lab_id = 2; ROLLBACK; -- should have rolled everything back SELECT * FROM researchers WHERE id = 15 AND lab_id = 2; - id | lab_id | name + id | lab_id | name --------------------------------------------------------------------- (0 rows) @@ -71,7 +71,7 @@ BEGIN; DELETE FROM researchers WHERE lab_id = 1 AND id = 1; ABORT; SELECT name FROM researchers WHERE lab_id = 1 AND id = 1; - name + name --------------------------------------------------------------------- Donald Knuth (1 row) @@ -90,7 +90,7 @@ SAVEPOINT hire_thompson; INSERT INTO researchers VALUES (6, 3, 'Ken Thompson'); COMMIT; SELECT name FROM researchers WHERE lab_id = 3 AND id = 6; - name + name --------------------------------------------------------------------- Ken Thompson (1 row) @@ -114,7 +114,7 @@ INSERT INTO researchers VALUES (8, 4, 'Douglas Engelbart'); ROLLBACK TO hire_engelbart; COMMIT; SELECT name FROM researchers WHERE lab_id = 4; - name + name --------------------------------------------------------------------- Jim Gray (1 row) @@ -137,7 +137,7 @@ INSERT INTO researchers VALUES (8, 5, 'Douglas Engelbart'); INSERT INTO labs VALUES (5, 'Los Alamos'); COMMIT; SELECT * FROM researchers, labs WHERE labs.id = researchers.lab_id AND researchers.lab_id = 5; - id | lab_id | name | id | name + id | lab_id | name | id | name --------------------------------------------------------------------- 8 | 5 | Douglas Engelbart | 5 | Los Alamos (1 row) @@ -160,7 +160,7 @@ ABORT; BEGIN; INSERT INTO labs VALUES (6, 'Bell Labs'); SELECT count(*) FROM researchers WHERE lab_id = 6; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -176,7 +176,7 @@ AND sp.nodeport = :worker_1_port AND s.logicalrelid = 'researchers'::regclass; INSERT INTO labs VALUES (6, 'Bell Labs'); SELECT count(*) FROM researchers WHERE lab_id = 6; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -200,14 +200,14 @@ INSERT INTO labs VALUES (6, 'Bell Labs'); ABORT; -- but the DDL should correctly roll back SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.labs'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- id | bigint | not null name | text | not null (2 rows) SELECT * FROM labs WHERE id = 6; - id | name + id | name --------------------------------------------------------------------- 6 | Bell Labs (1 row) @@ -243,7 +243,7 @@ ROLLBACK; BEGIN; \copy labs from stdin delimiter ',' SELECT name FROM labs WHERE id = 10; - name + name --------------------------------------------------------------------- Weyland-Yutani Weyland-Yutani @@ -257,7 +257,7 @@ BEGIN; \copy labs from stdin delimiter ',' COMMIT; SELECT name FROM labs WHERE id = 11 OR id = 12 ORDER BY id; - name + name --------------------------------------------------------------------- Planet Express fsociety @@ -265,7 +265,7 @@ SELECT name FROM labs WHERE id = 11 OR id = 12 ORDER BY id; -- 1pc failure test SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) @@ -279,13 +279,13 @@ DETAIL: Key (lab_id, name)=(6, 'Bjarne Stroustrup') already exists. COMMIT; -- verify rollback SELECT * FROM researchers WHERE lab_id = 6; - id | lab_id | name + id | lab_id | name --------------------------------------------------------------------- 9 | 6 | Leslie Lamport (1 row) SELECT count(*) FROM pg_dist_transaction; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -293,7 +293,7 @@ SELECT count(*) FROM pg_dist_transaction; -- 2pc failure and success tests SET citus.multi_shard_commit_protocol TO '2pc'; SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) @@ -307,13 +307,13 @@ DETAIL: Key (lab_id, name)=(6, 'Bjarne Stroustrup') already exists. COMMIT; -- verify rollback SELECT * FROM researchers WHERE lab_id = 6; - id | lab_id | name + id | lab_id | name --------------------------------------------------------------------- 9 | 6 | Leslie Lamport (1 row) SELECT count(*) FROM pg_dist_transaction; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -324,7 +324,7 @@ BEGIN; COMMIT; -- verify success SELECT * FROM researchers WHERE lab_id = 6; - id | lab_id | name + id | lab_id | name --------------------------------------------------------------------- 9 | 6 | Leslie Lamport 17 | 6 | 'Bjarne Stroustrup' @@ -333,7 +333,7 @@ SELECT * FROM researchers WHERE lab_id = 6; -- verify 2pc SELECT count(*) FROM pg_dist_transaction; - count + count --------------------------------------------------------------------- 2 (1 row) @@ -350,7 +350,7 @@ SELECT * from run_command_on_workers('CREATE FUNCTION reject_large_id() RETURNS END; $rli$ LANGUAGE plpgsql;') ORDER BY nodeport; - nodename | nodeport | success | result + nodename | nodeport | success | result --------------------------------------------------------------------- localhost | 57637 | t | CREATE FUNCTION localhost | 57638 | t | CREATE FUNCTION @@ -359,7 +359,7 @@ ORDER BY nodeport; -- register after insert trigger SELECT * FROM run_command_on_placements('researchers', 'CREATE CONSTRAINT TRIGGER reject_large_researcher_id AFTER INSERT ON %s DEFERRABLE INITIALLY DEFERRED FOR EACH ROW EXECUTE PROCEDURE reject_large_id()') ORDER BY nodeport, shardid; - nodename | nodeport | shardid | success | result + nodename | nodeport | shardid | success | result --------------------------------------------------------------------- localhost | 57637 | 1200000 | t | CREATE TRIGGER localhost | 57637 | 1200001 | t | CREATE TRIGGER @@ -385,7 +385,7 @@ ERROR: could not commit transaction on any active node \unset VERBOSITY -- verify everyhing including delete is rolled back SELECT * FROM researchers WHERE lab_id = 6; - id | lab_id | name + id | lab_id | name --------------------------------------------------------------------- 9 | 6 | Leslie Lamport 17 | 6 | 'Bjarne Stroustrup' @@ -395,7 +395,7 @@ SELECT * FROM researchers WHERE lab_id = 6; -- cleanup triggers and the function SELECT * from run_command_on_placements('researchers', 'drop trigger reject_large_researcher_id on %s') ORDER BY nodeport, shardid; - nodename | nodeport | shardid | success | result + nodename | nodeport | shardid | success | result --------------------------------------------------------------------- localhost | 57637 | 1200000 | t | DROP TRIGGER localhost | 57637 | 1200001 | t | DROP TRIGGER @@ -405,7 +405,7 @@ ORDER BY nodeport, shardid; SELECT * FROM run_command_on_workers('drop function reject_large_id()') ORDER BY nodeport; - nodename | nodeport | success | result + nodename | nodeport | success | result --------------------------------------------------------------------- localhost | 57637 | t | DROP FUNCTION localhost | 57638 | t | DROP FUNCTION @@ -423,12 +423,12 @@ ABORT; -- can perform parallel DDL even a connection is used for multiple shards BEGIN; SELECT lab_id FROM researchers WHERE lab_id = 1 AND id = 0; - lab_id + lab_id --------------------------------------------------------------------- (0 rows) SELECT lab_id FROM researchers WHERE lab_id = 2 AND id = 0; - lab_id + lab_id --------------------------------------------------------------------- (0 rows) @@ -438,12 +438,12 @@ ROLLBACK; BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT lab_id FROM researchers WHERE lab_id = 1 AND id = 0; - lab_id + lab_id --------------------------------------------------------------------- (0 rows) SELECT lab_id FROM researchers WHERE lab_id = 2 AND id = 0; - lab_id + lab_id --------------------------------------------------------------------- (0 rows) @@ -457,7 +457,7 @@ ALTER TABLE labs ADD COLUMN score float; ROLLBACK; -- should have rolled everything back SELECT * FROM labs WHERE id = 12; - id | name + id | name --------------------------------------------------------------------- 12 | fsociety (1 row) @@ -468,15 +468,15 @@ CREATE TABLE objects ( name text NOT NULL ); SELECT master_create_distributed_table('objects', 'id', 'hash'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_create_worker_shards('objects', 1, 2); - master_create_worker_shards + master_create_worker_shards --------------------------------------------------------------------- - + (1 row) -- test primary key violations @@ -489,7 +489,7 @@ CONTEXT: while executing command on localhost:xxxxx COMMIT; -- data shouldn't have persisted... SELECT * FROM objects WHERE id = 1; - id | name + id | name --------------------------------------------------------------------- (0 rows) @@ -500,7 +500,7 @@ FROM pg_dist_shard_placement AS sp, WHERE sp.shardid = s.shardid AND sp.shardstate = 1 AND s.logicalrelid = 'objects'::regclass; - count + count --------------------------------------------------------------------- 2 (1 row) @@ -531,12 +531,12 @@ ERROR: illegal value COMMIT; -- so the data should noy be persisted SELECT * FROM objects WHERE id = 2; - id | name + id | name --------------------------------------------------------------------- (0 rows) SELECT * FROM labs WHERE id = 7; - id | name + id | name --------------------------------------------------------------------- (0 rows) @@ -549,7 +549,7 @@ AND sp.nodename = 'localhost' AND sp.nodeport = :worker_2_port AND sp.shardstate = 3 AND s.logicalrelid = 'objects'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -583,12 +583,12 @@ ERROR: current transaction is aborted, commands ignored until end of transactio COMMIT; -- data should NOT be persisted SELECT * FROM objects WHERE id = 1; - id | name + id | name --------------------------------------------------------------------- (0 rows) SELECT * FROM labs WHERE id = 8; - id | name + id | name --------------------------------------------------------------------- (0 rows) @@ -600,7 +600,7 @@ WHERE sp.shardid = s.shardid AND sp.shardstate = 1 AND (s.logicalrelid = 'objects'::regclass OR s.logicalrelid = 'labs'::regclass); - count + count --------------------------------------------------------------------- 3 (1 row) @@ -623,7 +623,7 @@ WARNING: illegal value WARNING: failed to commit transaction on localhost:xxxxx -- data should be persisted SELECT * FROM objects WHERE id = 2; - id | name + id | name --------------------------------------------------------------------- 2 | BAD (1 row) @@ -637,7 +637,7 @@ AND sp.nodename = 'localhost' AND sp.nodeport = :worker_2_port AND sp.shardstate = 3 AND s.logicalrelid = 'objects'::regclass; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -671,12 +671,12 @@ WARNING: could not commit transaction for shard xxxxx on any active node ERROR: could not commit transaction on any active node -- data should NOT be persisted SELECT * FROM objects WHERE id = 1; - id | name + id | name --------------------------------------------------------------------- (0 rows) SELECT * FROM labs WHERE id = 8; - id | name + id | name --------------------------------------------------------------------- (0 rows) @@ -688,7 +688,7 @@ WHERE sp.shardid = s.shardid AND sp.shardstate = 1 AND (s.logicalrelid = 'objects'::regclass OR s.logicalrelid = 'labs'::regclass); - count + count --------------------------------------------------------------------- 3 (1 row) @@ -709,13 +709,13 @@ WARNING: could not commit transaction for shard xxxxx on any active node \set VERBOSITY default -- data to objects should be persisted, but labs should not... SELECT * FROM objects WHERE id = 1; - id | name + id | name --------------------------------------------------------------------- 1 | apple (1 row) SELECT * FROM labs WHERE id = 8; - id | name + id | name --------------------------------------------------------------------- (0 rows) @@ -728,7 +728,7 @@ AND (s.logicalrelid = 'objects'::regclass OR s.logicalrelid = 'labs'::regclass) GROUP BY s.logicalrelid, sp.shardstate ORDER BY s.logicalrelid, sp.shardstate; - logicalrelid | shardstate | count + logicalrelid | shardstate | count --------------------------------------------------------------------- labs | 1 | 1 objects | 1 | 1 @@ -738,9 +738,9 @@ ORDER BY s.logicalrelid, sp.shardstate; -- some append-partitioned tests for good measure CREATE TABLE append_researchers ( LIKE researchers ); SELECT master_create_distributed_table('append_researchers', 'id', 'append'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SET citus.shard_replication_factor TO 1; @@ -758,7 +758,7 @@ BEGIN; INSERT INTO append_researchers VALUES (0, 0, 'John Backus'); COMMIT; SELECT * FROM append_researchers WHERE id = 0; - id | lab_id | name + id | lab_id | name --------------------------------------------------------------------- 0 | 0 | John Backus (1 row) @@ -768,7 +768,7 @@ BEGIN; DELETE FROM append_researchers WHERE id = 0; ROLLBACK; SELECT * FROM append_researchers WHERE id = 0; - id | lab_id | name + id | lab_id | name --------------------------------------------------------------------- 0 | 0 | John Backus (1 row) @@ -781,7 +781,7 @@ ERROR: cannot run INSERT command which targets multiple shards HINT: Make sure the value for partition column "id" falls into a single shard. ROLLBACK; SELECT * FROM append_researchers; - id | lab_id | name + id | lab_id | name --------------------------------------------------------------------- 0 | 0 | John Backus (1 row) @@ -790,15 +790,15 @@ SELECT * FROM append_researchers; -- let's add some tests for them CREATE TABLE reference_modifying_xacts (key int, value int); SELECT create_reference_table('reference_modifying_xacts'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- very basic test, ensure that INSERTs work INSERT INTO reference_modifying_xacts VALUES (1, 1); SELECT * FROM reference_modifying_xacts; - key | value + key | value --------------------------------------------------------------------- 1 | 1 (1 row) @@ -807,7 +807,7 @@ SELECT * FROM reference_modifying_xacts; BEGIN; INSERT INTO reference_modifying_xacts VALUES (2, 2); SELECT * FROM reference_modifying_xacts; - key | value + key | value --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -816,7 +816,7 @@ SELECT * FROM reference_modifying_xacts; COMMIT; -- we should be able to see the insert outside of the transaction as well SELECT * FROM reference_modifying_xacts; - key | value + key | value --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -826,7 +826,7 @@ SELECT * FROM reference_modifying_xacts; BEGIN; INSERT INTO reference_modifying_xacts VALUES (3, 3); SELECT * FROM reference_modifying_xacts; - key | value + key | value --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -836,7 +836,7 @@ SELECT * FROM reference_modifying_xacts; ROLLBACK; -- see that we've not inserted SELECT * FROM reference_modifying_xacts; - key | value + key | value --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -892,7 +892,7 @@ WHERE sp.shardid = s.shardid AND s.logicalrelid = 'reference_modifying_xacts'::regclass GROUP BY s.logicalrelid, sp.shardstate ORDER BY s.logicalrelid, sp.shardstate; - logicalrelid | shardstate | count + logicalrelid | shardstate | count --------------------------------------------------------------------- reference_modifying_xacts | 1 | 2 (1 row) @@ -911,9 +911,9 @@ SET citus.shard_count = 4; SET citus.shard_replication_factor = 1; CREATE TABLE hash_modifying_xacts (key int, value int); SELECT create_distributed_table('hash_modifying_xacts', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- let's try to expand the xact participants @@ -955,7 +955,7 @@ ERROR: illegal value COMMIT; -- ensure that the value didn't go into the reference table SELECT * FROM reference_modifying_xacts WHERE key = 55; - key | value + key | value --------------------------------------------------------------------- (0 rows) @@ -978,7 +978,7 @@ COMMIT; ERROR: illegal value -- ensure that the values didn't go into the reference table SELECT * FROM reference_modifying_xacts WHERE key = 12; - key | value + key | value --------------------------------------------------------------------- (0 rows) @@ -987,11 +987,11 @@ SELECT s.logicalrelid::regclass::text, sp.shardstate, count(*) FROM pg_dist_shard_placement AS sp, pg_dist_shard AS s WHERE sp.shardid = s.shardid -AND (s.logicalrelid = 'reference_modifying_xacts'::regclass OR +AND (s.logicalrelid = 'reference_modifying_xacts'::regclass OR s.logicalrelid = 'hash_modifying_xacts'::regclass) GROUP BY s.logicalrelid, sp.shardstate ORDER BY s.logicalrelid, sp.shardstate; - logicalrelid | shardstate | count + logicalrelid | shardstate | count --------------------------------------------------------------------- reference_modifying_xacts | 1 | 2 hash_modifying_xacts | 1 | 4 @@ -1015,17 +1015,17 @@ INSERT INTO reference_modifying_xacts VALUES (999, 3); ERROR: illegal value COMMIT; SELECT * FROM hash_modifying_xacts WHERE key = 80; - key | value + key | value --------------------------------------------------------------------- (0 rows) SELECT * FROM reference_modifying_xacts WHERE key = 66; - key | value + key | value --------------------------------------------------------------------- (0 rows) SELECT * FROM reference_modifying_xacts WHERE key = 999; - key | value + key | value --------------------------------------------------------------------- (0 rows) @@ -1034,11 +1034,11 @@ SELECT s.logicalrelid::regclass::text, sp.shardstate, count(*) FROM pg_dist_shard_placement AS sp, pg_dist_shard AS s WHERE sp.shardid = s.shardid -AND (s.logicalrelid = 'reference_modifying_xacts'::regclass OR +AND (s.logicalrelid = 'reference_modifying_xacts'::regclass OR s.logicalrelid = 'hash_modifying_xacts'::regclass) GROUP BY s.logicalrelid, sp.shardstate ORDER BY s.logicalrelid, sp.shardstate; - logicalrelid | shardstate | count + logicalrelid | shardstate | count --------------------------------------------------------------------- reference_modifying_xacts | 1 | 2 hash_modifying_xacts | 1 | 4 @@ -1047,21 +1047,21 @@ ORDER BY s.logicalrelid, sp.shardstate; -- now show that all modifications to reference -- tables are done in 2PC SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) INSERT INTO reference_modifying_xacts VALUES (70, 70); SELECT count(*) FROM pg_dist_transaction; - count + count --------------------------------------------------------------------- 2 (1 row) -- reset the transactions table SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) @@ -1070,7 +1070,7 @@ BEGIN; INSERT INTO reference_modifying_xacts VALUES (71, 71); COMMIT; SELECT count(*) FROM pg_dist_transaction; - count + count --------------------------------------------------------------------- 2 (1 row) @@ -1080,14 +1080,14 @@ SET citus.shard_count = 4; SET citus.shard_replication_factor = 2; CREATE TABLE hash_modifying_xacts_second (key int, value int); SELECT create_distributed_table('hash_modifying_xacts_second', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- reset the transactions table SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) @@ -1097,35 +1097,35 @@ INSERT INTO hash_modifying_xacts_second VALUES (72, 1); INSERT INTO reference_modifying_xacts VALUES (72, 3); COMMIT; SELECT count(*) FROM pg_dist_transaction; - count + count --------------------------------------------------------------------- 2 (1 row) -- reset the transactions table SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) DELETE FROM reference_modifying_xacts; SELECT count(*) FROM pg_dist_transaction; - count + count --------------------------------------------------------------------- 2 (1 row) -- reset the transactions table SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) UPDATE reference_modifying_xacts SET key = 10; SELECT count(*) FROM pg_dist_transaction; - count + count --------------------------------------------------------------------- 2 (1 row) @@ -1146,30 +1146,30 @@ NOTICE: not propagating CREATE ROLE/USER commands to worker nodes SET citus.next_shard_id TO 1200015; CREATE TABLE reference_failure_test (key int, value int); SELECT create_reference_table('reference_failure_test'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- create a hash distributed table SET citus.shard_count TO 4; CREATE TABLE numbers_hash_failure_test(key int, value int); SELECT create_distributed_table('numbers_hash_failure_test', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- ensure that the shard is created for this user \c - test_user - :worker_1_port \dt reference_failure_test_1200015 List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- public | reference_failure_test_1200015 | table | test_user (1 row) --- now connect with the default user, +-- now connect with the default user, -- and rename the existing user \c - :default_user - :worker_1_port ALTER USER test_user RENAME TO test_user_new; @@ -1190,7 +1190,7 @@ COMMIT; -- show that no data go through the table and shard states are good SET client_min_messages to 'ERROR'; SELECT * FROM reference_failure_test; - key | value + key | value --------------------------------------------------------------------- (0 rows) @@ -1203,7 +1203,7 @@ WHERE sp.shardid = s.shardid AND s.logicalrelid = 'reference_failure_test'::regclass GROUP BY s.logicalrelid, sp.shardstate ORDER BY s.logicalrelid, sp.shardstate; - logicalrelid | shardstate | count + logicalrelid | shardstate | count --------------------------------------------------------------------- reference_failure_test | 1 | 2 (1 row) @@ -1217,7 +1217,7 @@ SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement JOIN pg_dist_shard USING (shardid) WHERE logicalrelid = 'numbers_hash_failure_test'::regclass ORDER BY shardid, nodeport; - shardid | shardstate | nodename | nodeport + shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- 1200016 | 3 | localhost | 57637 1200016 | 1 | localhost | 57638 @@ -1234,7 +1234,7 @@ ABORT; SELECT count(*) FROM numbers_hash_failure_test; WARNING: connection error: localhost:xxxxx WARNING: connection error: localhost:xxxxx - count + count --------------------------------------------------------------------- 0 (1 row) @@ -1244,7 +1244,7 @@ SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement JOIN pg_dist_shard USING (shardid) WHERE logicalrelid = 'numbers_hash_failure_test'::regclass ORDER BY shardid, nodeport; - shardid | shardstate | nodename | nodeport + shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- 1200016 | 1 | localhost | 57637 1200016 | 1 | localhost | 57638 @@ -1265,7 +1265,7 @@ SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement JOIN pg_dist_shard USING (shardid) WHERE logicalrelid = 'numbers_hash_failure_test'::regclass ORDER BY shardid, nodeport; - shardid | shardstate | nodename | nodeport + shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- 1200016 | 3 | localhost | 57637 1200016 | 1 | localhost | 57638 @@ -1283,7 +1283,7 @@ SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement JOIN pg_dist_shard USING (shardid) WHERE logicalrelid = 'numbers_hash_failure_test'::regclass ORDER BY shardid, nodeport; - shardid | shardstate | nodename | nodeport + shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- 1200016 | 3 | localhost | 57637 1200016 | 1 | localhost | 57638 @@ -1299,7 +1299,7 @@ ORDER BY shardid, nodeport; SELECT count(*) FROM numbers_hash_failure_test; WARNING: connection error: localhost:xxxxx WARNING: connection error: localhost:xxxxx - count + count --------------------------------------------------------------------- 2 (1 row) @@ -1311,13 +1311,13 @@ ALTER USER test_user RENAME TO test_user_new; -- fails on all shard placements INSERT INTO numbers_hash_failure_test VALUES (2,2); ERROR: connection error: localhost:xxxxx --- connect back to the master with the proper user to continue the tests +-- connect back to the master with the proper user to continue the tests \c - :default_user - :master_port SET citus.next_shard_id TO 1200020; SET citus.next_placement_id TO 1200033; -- unbreak both nodes by renaming the user back to the original name SELECT * FROM run_command_on_workers('ALTER USER test_user_new RENAME TO test_user'); - nodename | nodeport | success | result + nodename | nodeport | success | result --------------------------------------------------------------------- localhost | 57637 | t | ALTER ROLE localhost | 57638 | t | ALTER ROLE @@ -1326,7 +1326,7 @@ SELECT * FROM run_command_on_workers('ALTER USER test_user_new RENAME TO test_us DROP TABLE reference_modifying_xacts, hash_modifying_xacts, hash_modifying_xacts_second, reference_failure_test, numbers_hash_failure_test; SELECT * FROM run_command_on_workers('DROP USER test_user'); - nodename | nodeport | success | result + nodename | nodeport | success | result --------------------------------------------------------------------- localhost | 57637 | t | DROP ROLE localhost | 57638 | t | DROP ROLE @@ -1342,9 +1342,9 @@ CREATE TABLE usergroups ( name text ); SELECT create_reference_table('usergroups'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE itemgroups ( @@ -1352,9 +1352,9 @@ CREATE TABLE itemgroups ( name text ); SELECT create_reference_table('itemgroups'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE users ( @@ -1363,9 +1363,9 @@ CREATE TABLE users ( user_group int ); SELECT create_distributed_table('users', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE items ( @@ -1374,9 +1374,9 @@ CREATE TABLE items ( item_group int ); SELECT create_distributed_table('items', 'user_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- Table to find values that live in different shards on the same node @@ -1388,7 +1388,7 @@ JOIN USING (shardid) ORDER BY id; - id | shard_name | nodename | nodeport + id | shard_name | nodename | nodeport --------------------------------------------------------------------- 1 | users_1200022 | localhost | 57637 2 | users_1200025 | localhost | 57638 @@ -1410,7 +1410,7 @@ INSERT INTO items VALUES (1, 'item-1'); INSERT INTO items VALUES (6, 'item-6'); END; SELECT user_id FROM items ORDER BY user_id; - user_id + user_id --------------------------------------------------------------------- 1 6 @@ -1425,13 +1425,13 @@ ROLLBACK; -- perform parallel DDL after a co-located table has been read over 1 connection BEGIN; SELECT id FROM users WHERE id = 1; - id + id --------------------------------------------------------------------- 1 (1 row) SELECT id FROM users WHERE id = 6; - id + id --------------------------------------------------------------------- 6 (1 row) @@ -1442,13 +1442,13 @@ ROLLBACK; BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT id FROM users WHERE id = 1; - id + id --------------------------------------------------------------------- 1 (1 row) SELECT id FROM users WHERE id = 6; - id + id --------------------------------------------------------------------- 6 (1 row) @@ -1459,13 +1459,13 @@ ROLLBACK; BEGIN; ALTER TABLE items ADD COLUMN last_update timestamptz; SELECT id FROM users JOIN items ON (id = user_id) WHERE id = 1; - id + id --------------------------------------------------------------------- 1 (1 row) SELECT id FROM users JOIN items ON (id = user_id) WHERE id = 6; - id + id --------------------------------------------------------------------- 6 (1 row) @@ -1476,18 +1476,18 @@ BEGIN; \COPY users FROM STDIN WITH CSV -- now read from the reference table over each connection SELECT user_id FROM items JOIN itemgroups ON (item_group = gid) WHERE user_id = 2; - user_id + user_id --------------------------------------------------------------------- (0 rows) SELECT user_id FROM items JOIN itemgroups ON (item_group = gid) WHERE user_id = 3; - user_id + user_id --------------------------------------------------------------------- (0 rows) -- perform a DDL command on the reference table errors -- because the current implementation of COPY always opens one connection --- per placement SELECTs have to use those connections for correctness +-- per placement SELECTs have to use those connections for correctness ALTER TABLE itemgroups ADD COLUMN last_update timestamptz; ERROR: cannot perform DDL on placement xxxxx, which has been read over multiple connections END; @@ -1496,12 +1496,12 @@ BEGIN; \COPY users FROM STDIN WITH CSV -- read from the reference table over each connection SELECT user_id FROM items JOIN itemgroups ON (item_group = gid) WHERE user_id = 2; - user_id + user_id --------------------------------------------------------------------- (0 rows) SELECT user_id FROM items JOIN itemgroups ON (item_group = gid) WHERE user_id = 3; - user_id + user_id --------------------------------------------------------------------- (0 rows) @@ -1522,12 +1522,12 @@ END; BEGIN; DELETE FROM users; SELECT user_id FROM items JOIN itemgroups ON (item_group = gid) WHERE user_id = 1; - user_id + user_id --------------------------------------------------------------------- (0 rows) SELECT user_id FROM items JOIN itemgroups ON (item_group = gid) WHERE user_id = 6; - user_id + user_id --------------------------------------------------------------------- (0 rows) @@ -1539,14 +1539,14 @@ BEGIN; \COPY users FROM STDIN WITH CSV -- Uses first connection, which wrote the row with id = 2 SELECT * FROM users JOIN usergroups ON (user_group = gid) WHERE id = 2; - id | name | user_group | gid | name + id | name | user_group | gid | name --------------------------------------------------------------------- 2 | onder | 2 | 2 | group (1 row) -- Should use second connection, which wrote the row with id = 4 SELECT * FROM users JOIN usergroups ON (user_group = gid) WHERE id = 4; - id | name | user_group | gid | name + id | name | user_group | gid | name --------------------------------------------------------------------- 4 | murat | 2 | 2 | group (1 row) @@ -1565,7 +1565,7 @@ $BODY$ LANGUAGE plpgsql; SELECT insert_abort(); ERROR: do not insert SELECT name FROM labs WHERE id = 1001; - name + name --------------------------------------------------------------------- (0 rows) @@ -1574,7 +1574,7 @@ SET citus.function_opens_transaction_block TO off; SELECT insert_abort(); ERROR: do not insert SELECT name FROM labs WHERE id = 1001; - name + name --------------------------------------------------------------------- Rollback Labs (1 row) diff --git a/src/test/regress/expected/multi_multiuser.out b/src/test/regress/expected/multi_multiuser.out index 78f585eeb..bcb132278 100644 --- a/src/test/regress/expected/multi_multiuser.out +++ b/src/test/regress/expected/multi_multiuser.out @@ -7,24 +7,24 @@ SET citus.next_shard_id TO 1420000; SET citus.shard_replication_factor TO 1; CREATE TABLE test (id integer, val integer); SELECT create_distributed_table('test', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE test_coloc (id integer, val integer); SELECT create_distributed_table('test_coloc', 'id', colocate_with := 'test'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SET citus.shard_count TO 1; CREATE TABLE singleshard (id integer, val integer); SELECT create_distributed_table('singleshard', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- turn off propagation to avoid Enterprise processing the following section @@ -123,34 +123,34 @@ ERROR: path must be in the pgsql_job_cache directory SET ROLE full_access; EXECUTE prepare_insert(1); EXECUTE prepare_select; - count + count --------------------------------------------------------------------- 1 (1 row) INSERT INTO test VALUES (2); SELECT count(*) FROM test; - count + count --------------------------------------------------------------------- 2 (1 row) SELECT count(*) FROM test WHERE id = 1; - count + count --------------------------------------------------------------------- 1 (1 row) SET citus.task_executor_type TO 'task-tracker'; SELECT count(*), min(current_user) FROM test; - count | min + count | min --------------------------------------------------------------------- 2 | full_access (1 row) -- test re-partition query (needs to transmit intermediate results) SELECT count(*) FROM test a JOIN test b ON (a.val = b.val) WHERE a.id = 1 AND b.id = 2; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -166,23 +166,23 @@ ERROR: operation is not allowed HINT: Run the command with a superuser. -- create a task that other users should not be able to inspect SELECT task_tracker_assign_task(1, 1, 'SELECT 1'); - task_tracker_assign_task + task_tracker_assign_task --------------------------------------------------------------------- - + (1 row) -- check read permission SET ROLE read_access; -- should be allowed to run commands, as the current user SELECT result FROM run_command_on_workers($$SELECT current_user$$); - result + result --------------------------------------------------------------------- read_access read_access (2 rows) SELECT result FROM run_command_on_placements('test', $$SELECT current_user$$); - result + result --------------------------------------------------------------------- read_access read_access @@ -191,7 +191,7 @@ SELECT result FROM run_command_on_placements('test', $$SELECT current_user$$); (4 rows) SELECT result FROM run_command_on_colocated_placements('test', 'test_coloc', $$SELECT current_user$$); - result + result --------------------------------------------------------------------- read_access read_access @@ -202,7 +202,7 @@ SELECT result FROM run_command_on_colocated_placements('test', 'test_coloc', $$S EXECUTE prepare_insert(1); ERROR: permission denied for table test EXECUTE prepare_select; - count + count --------------------------------------------------------------------- 2 (1 row) @@ -210,27 +210,27 @@ EXECUTE prepare_select; INSERT INTO test VALUES (2); ERROR: permission denied for table test SELECT count(*) FROM test; - count + count --------------------------------------------------------------------- 2 (1 row) SELECT count(*) FROM test WHERE id = 1; - count + count --------------------------------------------------------------------- 1 (1 row) SET citus.task_executor_type TO 'task-tracker'; SELECT count(*), min(current_user) FROM test; - count | min + count | min --------------------------------------------------------------------- 2 | read_access (1 row) -- test re-partition query (needs to transmit intermediate results) SELECT count(*) FROM test a JOIN test b ON (a.val = b.val) WHERE a.id = 1 AND b.id = 2; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -250,7 +250,7 @@ ERROR: must be owner of schema pg_merge_job_0001 -- should not be allowed to take aggressive locks on table BEGIN; SELECT lock_relation_if_exists('test', 'ACCESS SHARE'); - lock_relation_if_exists + lock_relation_if_exists --------------------------------------------------------------------- t (1 row) @@ -285,13 +285,13 @@ RESET citus.task_executor_type; -- should be able to use intermediate results as any user BEGIN; SELECT create_intermediate_result('topten', 'SELECT s FROM generate_series(1,10) s'); - create_intermediate_result + create_intermediate_result --------------------------------------------------------------------- 10 (1 row) SELECT * FROM read_intermediate_result('topten', 'binary'::citus_copy_format) AS res (s int) ORDER BY s; - s + s --------------------------------------------------------------------- 1 2 @@ -333,22 +333,22 @@ SET ROLE full_access; CREATE TABLE my_table (id integer, val integer); RESET ROLE; SELECT create_distributed_table('my_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT result FROM run_command_on_workers($$SELECT tableowner FROM pg_tables WHERE tablename LIKE 'my_table_%' LIMIT 1$$); - result + result --------------------------------------------------------------------- full_access full_access (2 rows) SELECT task_tracker_cleanup_job(1); - task_tracker_cleanup_job + task_tracker_cleanup_job --------------------------------------------------------------------- - + (1 row) -- table should be distributable by super user when it has data in there @@ -358,13 +358,13 @@ INSERT INTO my_table_with_data VALUES (1,2); RESET ROLE; SELECT create_distributed_table('my_table_with_data', 'id'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM my_table_with_data; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -385,9 +385,9 @@ RESET ROLE; SET ROLE read_access; SELECT create_distributed_table('my_role_table_with_data', 'id'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) RESET ROLE; @@ -395,7 +395,7 @@ RESET ROLE; SELECT result FROM run_command_on_workers($cmd$ SELECT tableowner FROM pg_tables WHERE tablename LIKE 'my_role_table_with_data%' LIMIT 1; $cmd$); - result + result --------------------------------------------------------------------- some_role some_role @@ -430,48 +430,48 @@ SELECT create_distributed_function('usage_access_func(usage_access_type,int[])') ERROR: must be owner of function usage_access_func SET ROLE usage_access; SELECT create_distributed_function('usage_access_func(usage_access_type,int[])'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) SELECT typowner::regrole FROM pg_type WHERE typname = 'usage_access_type'; - typowner + typowner --------------------------------------------------------------------- usage_access (1 row) SELECT proowner::regrole FROM pg_proc WHERE proname = 'usage_access_func'; - proowner + proowner --------------------------------------------------------------------- usage_access (1 row) SELECT run_command_on_workers($$SELECT typowner::regrole FROM pg_type WHERE typname = 'usage_access_type'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,usage_access) (localhost,57638,t,usage_access) (2 rows) SELECT run_command_on_workers($$SELECT proowner::regrole FROM pg_proc WHERE proname = 'usage_access_func'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,usage_access) (localhost,57638,t,usage_access) (2 rows) SELECT wait_until_metadata_sync(); - wait_until_metadata_sync + wait_until_metadata_sync --------------------------------------------------------------------- - + (1 row) CREATE TABLE colocation_table(id text); SELECT create_distributed_table('colocation_table','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- now, make sure that the user can use the function @@ -480,13 +480,13 @@ BEGIN; CREATE FUNCTION usage_access_func_second(key int, variadic v int[]) RETURNS text LANGUAGE plpgsql AS 'begin return current_user; end;'; SELECT create_distributed_function('usage_access_func_second(int,int[])', '$1', colocate_with := 'colocation_table'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) SELECT usage_access_func_second(1, 2,3,4,5) FROM full_access_user_schema.t1 LIMIT 1; - usage_access_func_second + usage_access_func_second --------------------------------------------------------------------- usage_access (1 row) @@ -498,26 +498,26 @@ CREATE FUNCTION usage_access_func_third(key int, variadic v int[]) RETURNS text \c - - - :master_port -- show that the current user is a super user SELECT usesuper FROM pg_user where usename IN (SELECT current_user); - usesuper + usesuper --------------------------------------------------------------------- t (1 row) -- superuser creates the distributed function that is owned by a regular user SELECT create_distributed_function('usage_access_func_third(int,int[])', '$1', colocate_with := 'colocation_table'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) SELECT proowner::regrole FROM pg_proc WHERE proname = 'usage_access_func_third'; - proowner + proowner --------------------------------------------------------------------- usage_access (1 row) SELECT run_command_on_workers($$SELECT proowner::regrole FROM pg_proc WHERE proname = 'usage_access_func_third'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,usage_access) (localhost,57638,t,usage_access) @@ -526,24 +526,24 @@ SELECT run_command_on_workers($$SELECT proowner::regrole FROM pg_proc WHERE pron -- we don't want other tests to have metadata synced -- that might change the test outputs, so we're just trying to be careful SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); - stop_metadata_sync_to_node + stop_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); - stop_metadata_sync_to_node + stop_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) RESET ROLE; -- now we distribute the table as super user SELECT create_distributed_table('full_access_user_schema.t1', 'id'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- verify the owner of the shards for the distributed tables @@ -554,7 +554,7 @@ SELECT result FROM run_command_on_workers($cmd$ AND tablename LIKE 't1_%' LIMIT 1; $cmd$); - result + result --------------------------------------------------------------------- usage_access usage_access @@ -564,9 +564,9 @@ $cmd$); SET ROLE full_access; CREATE TABLE full_access_user_schema.t2(id int); SELECT create_distributed_table('full_access_user_schema.t2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) RESET ROLE; @@ -577,15 +577,15 @@ BEGIN; CREATE TABLE full_access_user_schema.r1(id int); SET LOCAL citus.shard_count TO 1; SELECT create_distributed_table('full_access_user_schema.r1', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT upgrade_to_reference_table('full_access_user_schema.r1'); - upgrade_to_reference_table + upgrade_to_reference_table --------------------------------------------------------------------- - + (1 row) COMMIT; @@ -597,9 +597,9 @@ BEGIN; CREATE TABLE full_access_user_schema.r2(id int); SET LOCAL citus.shard_count TO 1; SELECT create_distributed_table('full_access_user_schema.r2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) COMMIT; @@ -611,9 +611,9 @@ ERROR: must be owner of table r2 RESET ROLE; -- the super user should be able SELECT upgrade_to_reference_table('full_access_user_schema.r2'); - upgrade_to_reference_table + upgrade_to_reference_table --------------------------------------------------------------------- - + (1 row) -- verify the owner of the shards for the reference table @@ -624,7 +624,7 @@ SELECT result FROM run_command_on_workers($cmd$ AND tablename LIKE 'r2_%' LIMIT 1; $cmd$); - result + result --------------------------------------------------------------------- full_access full_access @@ -632,9 +632,9 @@ $cmd$); -- super user should be the only one being able to call worker_cleanup_job_schema_cache SELECT worker_cleanup_job_schema_cache(); - worker_cleanup_job_schema_cache + worker_cleanup_job_schema_cache --------------------------------------------------------------------- - + (1 row) SET ROLE full_access; @@ -654,9 +654,9 @@ RESET ROLE; \c - - - :worker_1_port SET ROLE full_access; SELECT worker_hash_partition_table(42,1,'SELECT a FROM generate_series(1,100) AS a', 'a', 23, ARRAY[-2147483648, -1073741824, 0, 1073741824]::int4[]); - worker_hash_partition_table + worker_hash_partition_table --------------------------------------------------------------------- - + (1 row) RESET ROLE; @@ -676,9 +676,9 @@ ERROR: could not receive file "base/pgsql_job_cache/job_0042/task_000001/p_0000 -- only the user whom created the files should be able to fetch SET ROLE full_access; SELECT worker_fetch_partition_file(42, 1, 1, 1, 'localhost', :worker_1_port); - worker_fetch_partition_file + worker_fetch_partition_file --------------------------------------------------------------------- - + (1 row) RESET ROLE; @@ -695,9 +695,9 @@ SET ROLE full_access; -- user could call worker_merge_files_into_table and store the results in public, which is -- not what we want SELECT task_tracker_assign_task(42, 1, 'SELECT 1'); - task_tracker_assign_task + task_tracker_assign_task --------------------------------------------------------------------- - + (1 row) RESET ROLE; @@ -710,13 +710,13 @@ RESET ROLE; -- although it does create the table SELECT worker_merge_files_into_table(42, 1, ARRAY['a'], ARRAY['integer']); WARNING: Task file "task_000001.xxxx" does not have expected suffix ".10" - worker_merge_files_into_table + worker_merge_files_into_table --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_merge_job_0042.task_000001; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -724,13 +724,13 @@ SELECT count(*) FROM pg_merge_job_0042.task_000001; DROP TABLE pg_merge_job_0042.task_000001; -- drop table so we can reuse the same files for more tests SET ROLE full_access; SELECT worker_merge_files_into_table(42, 1, ARRAY['a'], ARRAY['integer']); - worker_merge_files_into_table + worker_merge_files_into_table --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_merge_job_0042.task_000001; - count + count --------------------------------------------------------------------- 25 (1 row) @@ -752,19 +752,19 @@ SELECT worker_merge_files_and_run_query(42, 1, 'CREATE TABLE task_000001 (a) AS SELECT sum(merge_column_0) FROM task_000001_merge' ); WARNING: Task file "task_000001.xxxx" does not have expected suffix ".10" - worker_merge_files_and_run_query + worker_merge_files_and_run_query --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_merge_job_0042.task_000001_merge; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM pg_merge_job_0042.task_000001; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -776,9 +776,9 @@ SELECT worker_merge_files_and_run_query(42, 1, 'CREATE TABLE task_000001_merge(merge_column_0 int)', 'CREATE TABLE task_000001 (a) AS SELECT sum(merge_column_0) FROM task_000001_merge' ); - worker_merge_files_and_run_query + worker_merge_files_and_run_query --------------------------------------------------------------------- - + (1 row) -- test that owner of task cannot execute arbitrary sql @@ -795,13 +795,13 @@ SELECT worker_merge_files_and_run_query(42, 1, ERROR: permission denied to drop role CONTEXT: SQL statement "DROP USER usage_access" SELECT count(*) FROM pg_merge_job_0042.task_000001_merge; - count + count --------------------------------------------------------------------- 25 (1 row) SELECT count(*) FROM pg_merge_job_0042.task_000001; - count + count --------------------------------------------------------------------- 1 (1 row) diff --git a/src/test/regress/expected/multi_mx_add_coordinator.out b/src/test/regress/expected/multi_mx_add_coordinator.out index 371db737b..68bec5415 100644 --- a/src/test/regress/expected/multi_mx_add_coordinator.out +++ b/src/test/regress/expected/multi_mx_add_coordinator.out @@ -8,7 +8,7 @@ SET citus.replication_model TO streaming; SET client_min_messages TO WARNING; CREATE USER reprefuser WITH LOGIN; SELECT run_command_on_workers('CREATE USER reprefuser WITH LOGIN'); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") @@ -17,30 +17,30 @@ SELECT run_command_on_workers('CREATE USER reprefuser WITH LOGIN'); SET citus.enable_alter_role_propagation TO ON; ALTER ROLE reprefuser WITH CREATEDB; SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) -- test that coordinator pg_dist_node entry is synced to the workers SELECT wait_until_metadata_sync(); - wait_until_metadata_sync + wait_until_metadata_sync --------------------------------------------------------------------- - + (1 row) SELECT verify_metadata('localhost', :worker_1_port), verify_metadata('localhost', :worker_2_port); - verify_metadata | verify_metadata + verify_metadata | verify_metadata --------------------------------------------------------------------- t | t (1 row) CREATE TABLE ref(a int); SELECT create_reference_table('ref'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- alter role from mx worker isn't propagated @@ -48,14 +48,14 @@ SELECT create_reference_table('ref'); SET citus.enable_alter_role_propagation TO ON; ALTER ROLE reprefuser WITH CREATEROLE; select rolcreatedb, rolcreaterole from pg_roles where rolname = 'reprefuser'; - rolcreatedb | rolcreaterole + rolcreatedb | rolcreaterole --------------------------------------------------------------------- t | t (1 row) \c - - - :worker_2_port select rolcreatedb, rolcreaterole from pg_roles where rolname = 'reprefuser'; - rolcreatedb | rolcreaterole + rolcreatedb | rolcreaterole --------------------------------------------------------------------- t | f (1 row) @@ -64,7 +64,7 @@ select rolcreatedb, rolcreaterole from pg_roles where rolname = 'reprefuser'; SET search_path TO mx_add_coordinator,public; SET client_min_messages TO WARNING; select rolcreatedb, rolcreaterole from pg_roles where rolname = 'reprefuser'; - rolcreatedb | rolcreaterole + rolcreatedb | rolcreaterole --------------------------------------------------------------------- t | f (1 row) @@ -78,7 +78,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable LOG: executing the command locally: SELECT count(*) AS count FROM mx_add_coordinator.ref_7000000 ref - count + count --------------------------------------------------------------------- 0 (1 row) @@ -88,7 +88,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable LOG: executing the command locally: SELECT count(*) AS count FROM mx_add_coordinator.ref_7000000 ref - count + count --------------------------------------------------------------------- 0 (1 row) @@ -99,7 +99,7 @@ SELECT count(*) FROM ref; DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - count + count --------------------------------------------------------------------- 0 (1 row) @@ -108,7 +108,7 @@ SELECT count(*) FROM ref; DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - count + count --------------------------------------------------------------------- 0 (1 row) @@ -117,7 +117,7 @@ SELECT count(*) FROM ref; DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - count + count --------------------------------------------------------------------- 0 (1 row) @@ -143,7 +143,7 @@ ERROR: relation local_table is not distributed \c - - - :master_port SET search_path TO mx_add_coordinator,public; SELECT * FROM ref ORDER BY a; - a + a --------------------------------------------------------------------- 2 3 @@ -152,33 +152,33 @@ SELECT * FROM ref ORDER BY a; -- Clear pg_dist_transaction before removing the node. This is to keep the output -- of multi_mx_transaction_recovery consistent. SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM run_command_on_workers('SELECT recover_prepared_transactions()'); - count + count --------------------------------------------------------------------- 2 (1 row) SELECT master_remove_node('localhost', :master_port); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) -- test that coordinator pg_dist_node entry was removed from the workers SELECT wait_until_metadata_sync(); - wait_until_metadata_sync + wait_until_metadata_sync --------------------------------------------------------------------- - + (1 row) SELECT verify_metadata('localhost', :worker_1_port), verify_metadata('localhost', :worker_2_port); - verify_metadata | verify_metadata + verify_metadata | verify_metadata --------------------------------------------------------------------- t | t (1 row) diff --git a/src/test/regress/expected/multi_mx_call.out b/src/test/regress/expected/multi_mx_call.out index 8745fd6da..7355e14ac 100644 --- a/src/test/regress/expected/multi_mx_call.out +++ b/src/test/regress/expected/multi_mx_call.out @@ -7,9 +7,9 @@ set citus.replication_model to 'statement'; -- This table requires specific settings, create before getting into things create table mx_call_dist_table_replica(id int, val int); select create_distributed_table('mx_call_dist_table_replica', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) insert into mx_call_dist_table_replica values (9,1),(8,2),(7,3),(6,4),(5,5); @@ -20,42 +20,42 @@ set citus.replication_model to 'streaming'; -- create table mx_call_dist_table_1(id int, val int); select create_distributed_table('mx_call_dist_table_1', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) insert into mx_call_dist_table_1 values (3,1),(4,5),(9,2),(6,5),(3,5); create table mx_call_dist_table_2(id int, val int); select create_distributed_table('mx_call_dist_table_2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) insert into mx_call_dist_table_2 values (1,1),(1,2),(2,2),(3,3),(3,4); create table mx_call_dist_table_bigint(id bigint, val bigint); select create_distributed_table('mx_call_dist_table_bigint', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) insert into mx_call_dist_table_bigint values (1,1),(1,2),(2,2),(3,3),(3,4); create table mx_call_dist_table_ref(id int, val int); select create_reference_table('mx_call_dist_table_ref'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) insert into mx_call_dist_table_ref values (2,7),(1,8),(2,8),(1,8),(2,8); create type mx_call_enum as enum ('A', 'S', 'D', 'F'); create table mx_call_dist_table_enum(id int, key mx_call_enum); select create_distributed_table('mx_call_dist_table_enum', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) insert into mx_call_dist_table_enum values (1,'S'),(2,'A'),(3,'D'),(4,'F'); @@ -85,47 +85,47 @@ BEGIN END;$$; -- Test that undistributed procedures have no issue executing call multi_mx_call.mx_call_proc(2, 0); - y + y --------------------------------------------------------------------- 29 (1 row) call multi_mx_call.mx_call_proc_custom_types('S', 'A'); - x | y + x | y --------------------------------------------------------------------- F | S (1 row) -- Same for unqualified names call mx_call_proc(2, 0); - y + y --------------------------------------------------------------------- 29 (1 row) call mx_call_proc_custom_types('S', 'A'); - x | y + x | y --------------------------------------------------------------------- F | S (1 row) -- Mark both procedures as distributed ... select create_distributed_function('mx_call_proc(int,int)'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) select create_distributed_function('mx_call_proc_bigint(bigint,bigint)'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) select create_distributed_function('mx_call_proc_custom_types(mx_call_enum,mx_call_enum)'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) -- We still don't route them to the workers, because they aren't @@ -139,68 +139,68 @@ PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment DEBUG: Plan 11 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('11_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment - y + y --------------------------------------------------------------------- 29 (1 row) call mx_call_proc_bigint(4, 2); DEBUG: stored procedure does not have co-located tables - y + y --------------------------------------------------------------------- 8 (1 row) call multi_mx_call.mx_call_proc_custom_types('S', 'A'); DEBUG: stored procedure does not have co-located tables - x | y + x | y --------------------------------------------------------------------- F | S (1 row) -- Mark them as colocated with a table. Now we should route them to workers. select colocate_proc_with_table('mx_call_proc', 'mx_call_dist_table_1'::regclass, 1); - colocate_proc_with_table + colocate_proc_with_table --------------------------------------------------------------------- - + (1 row) select colocate_proc_with_table('mx_call_proc_bigint', 'mx_call_dist_table_bigint'::regclass, 1); - colocate_proc_with_table + colocate_proc_with_table --------------------------------------------------------------------- - + (1 row) select colocate_proc_with_table('mx_call_proc_custom_types', 'mx_call_dist_table_enum'::regclass, 1); - colocate_proc_with_table + colocate_proc_with_table --------------------------------------------------------------------- - + (1 row) call multi_mx_call.mx_call_proc(2, 0); DEBUG: pushing down the procedure - y + y --------------------------------------------------------------------- 28 (1 row) call multi_mx_call.mx_call_proc_custom_types('S', 'A'); DEBUG: pushing down the procedure - x | y + x | y --------------------------------------------------------------------- S | S (1 row) call mx_call_proc(2, 0); DEBUG: pushing down the procedure - y + y --------------------------------------------------------------------- 28 (1 row) call mx_call_proc_custom_types('S', 'A'); DEBUG: pushing down the procedure - x | y + x | y --------------------------------------------------------------------- S | S (1 row) @@ -208,7 +208,7 @@ DEBUG: pushing down the procedure -- Test implicit cast of int to bigint call mx_call_proc_bigint(4, 2); DEBUG: pushing down the procedure - y + y --------------------------------------------------------------------- 8 (1 row) @@ -223,7 +223,7 @@ PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment DEBUG: Plan 13 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('13_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment - y + y --------------------------------------------------------------------- 29 (1 row) @@ -236,7 +236,7 @@ drop table mx_call_dist_table_enum; SET client_min_messages TO DEBUG1; call multi_mx_call.mx_call_proc_custom_types('S', 'A'); DEBUG: stored procedure does not have co-located tables - x | y + x | y --------------------------------------------------------------------- F | S (1 row) @@ -244,9 +244,9 @@ DEBUG: stored procedure does not have co-located tables -- Make sure we do bounds checking on distributed argument index -- This also tests that we have cache invalidation for pg_dist_object updates select colocate_proc_with_table('mx_call_proc', 'mx_call_dist_table_1'::regclass, -1); - colocate_proc_with_table + colocate_proc_with_table --------------------------------------------------------------------- - + (1 row) call multi_mx_call.mx_call_proc(2, 0); @@ -257,15 +257,15 @@ PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment DEBUG: Plan 15 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('15_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment - y + y --------------------------------------------------------------------- 29 (1 row) select colocate_proc_with_table('mx_call_proc', 'mx_call_dist_table_1'::regclass, 2); - colocate_proc_with_table + colocate_proc_with_table --------------------------------------------------------------------- - + (1 row) call multi_mx_call.mx_call_proc(2, 0); @@ -276,16 +276,16 @@ PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment DEBUG: Plan 18 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('18_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment - y + y --------------------------------------------------------------------- 29 (1 row) -- We don't currently support colocating with reference tables select colocate_proc_with_table('mx_call_proc', 'mx_call_dist_table_ref'::regclass, 1); - colocate_proc_with_table + colocate_proc_with_table --------------------------------------------------------------------- - + (1 row) call multi_mx_call.mx_call_proc(2, 0); @@ -296,16 +296,16 @@ PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment DEBUG: Plan 20 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('20_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment - y + y --------------------------------------------------------------------- 29 (1 row) -- We don't currently support colocating with replicated tables select colocate_proc_with_table('mx_call_proc', 'mx_call_dist_table_replica'::regclass, 1); - colocate_proc_with_table + colocate_proc_with_table --------------------------------------------------------------------- - + (1 row) call multi_mx_call.mx_call_proc(2, 0); @@ -316,7 +316,7 @@ PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment DEBUG: Plan 22 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('22_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment - y + y --------------------------------------------------------------------- 29 (1 row) @@ -325,9 +325,9 @@ SET client_min_messages TO NOTICE; drop table mx_call_dist_table_replica; SET client_min_messages TO DEBUG1; select colocate_proc_with_table('mx_call_proc', 'mx_call_dist_table_1'::regclass, 1); - colocate_proc_with_table + colocate_proc_with_table --------------------------------------------------------------------- - + (1 row) -- Test that we handle transactional constructs correctly inside a procedure @@ -347,15 +347,15 @@ CALL multi_mx_call.mx_call_proc_tx(10); select create_distributed_function('mx_call_proc_tx(int)', '$1', 'mx_call_dist_table_1'); DEBUG: switching to sequential query execution mode DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) CALL multi_mx_call.mx_call_proc_tx(20); DEBUG: pushing down the procedure SELECT id, val FROM mx_call_dist_table_1 ORDER BY id, val; - id | val + id | val --------------------------------------------------------------------- 3 | 1 3 | 5 @@ -377,9 +377,9 @@ END;$$; select create_distributed_function('mx_call_proc_raise(int)', '$1', 'mx_call_dist_table_1'); DEBUG: switching to sequential query execution mode DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) \set VERBOSITY terse @@ -390,15 +390,15 @@ ERROR: error \set VERBOSITY default -- Test that we don't propagate to non-metadata worker nodes select stop_metadata_sync_to_node('localhost', :worker_1_port); - stop_metadata_sync_to_node + stop_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) select stop_metadata_sync_to_node('localhost', :worker_2_port); - stop_metadata_sync_to_node + stop_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) call multi_mx_call.mx_call_proc(2, 0); @@ -409,22 +409,22 @@ PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment DEBUG: Plan 28 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('28_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment - y + y --------------------------------------------------------------------- 29 (1 row) SET client_min_messages TO NOTICE; select start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node + start_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) select start_metadata_sync_to_node('localhost', :worker_2_port); - start_metadata_sync_to_node + start_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) -- stop_metadata_sync_to_node()/start_metadata_sync_to_node() might make @@ -441,9 +441,9 @@ CREATE FUNCTION mx_call_add(int, int) RETURNS int SELECT create_distributed_function('mx_call_add(int,int)'); DEBUG: switching to sequential query execution mode DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) -- non-const distribution parameters cannot be pushed down @@ -455,7 +455,7 @@ PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment DEBUG: Plan 1 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('1_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment - y + y --------------------------------------------------------------------- 29 (1 row) @@ -463,7 +463,7 @@ PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment -- non-const parameter can be pushed down call multi_mx_call.mx_call_proc(multi_mx_call.mx_call_add(3, 4), 2); DEBUG: pushing down the procedure - y + y --------------------------------------------------------------------- 33 (1 row) @@ -477,7 +477,7 @@ PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT (1 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_proc(integer,integer) line 8 at assignment - y + y --------------------------------------------------------------------- 27 (1 row) diff --git a/src/test/regress/expected/multi_mx_create_table.out b/src/test/regress/expected/multi_mx_create_table.out index 2969d6779..e64a49439 100644 --- a/src/test/regress/expected/multi_mx_create_table.out +++ b/src/test/regress/expected/multi_mx_create_table.out @@ -3,15 +3,15 @@ -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1220000; SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node + start_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_port); - start_metadata_sync_to_node + start_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) -- create schema to test schema support @@ -145,9 +145,9 @@ CREATE TABLE nation_hash( ); SET citus.shard_count TO 16; SELECT create_distributed_table('nation_hash', 'n_nationkey'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SET search_path TO citus_mx_test_schema; @@ -159,9 +159,9 @@ CREATE TABLE citus_mx_test_schema.nation_hash( n_comment varchar(152) ); SELECT create_distributed_table('nation_hash', 'n_nationkey'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE citus_mx_test_schema_join_1.nation_hash ( @@ -171,9 +171,9 @@ CREATE TABLE citus_mx_test_schema_join_1.nation_hash ( n_comment varchar(152)); SET citus.shard_count TO 4; SELECT create_distributed_table('citus_mx_test_schema_join_1.nation_hash', 'n_nationkey'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE citus_mx_test_schema_join_1.nation_hash_2 ( @@ -182,9 +182,9 @@ CREATE TABLE citus_mx_test_schema_join_1.nation_hash_2 ( n_regionkey integer not null, n_comment varchar(152)); SELECT create_distributed_table('citus_mx_test_schema_join_1.nation_hash_2', 'n_nationkey'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SET search_path TO citus_mx_test_schema_join_2; @@ -194,9 +194,9 @@ CREATE TABLE nation_hash ( n_regionkey integer not null, n_comment varchar(152)); SELECT create_distributed_table('nation_hash', 'n_nationkey'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SET search_path TO citus_mx_test_schema; @@ -207,9 +207,9 @@ CREATE TABLE nation_hash_collation_search_path( n_comment varchar(152) ); SELECT create_distributed_table('nation_hash_collation_search_path', 'n_nationkey'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) \COPY nation_hash_collation_search_path FROM STDIN with delimiter '|'; @@ -221,9 +221,9 @@ CREATE TABLE citus_mx_test_schema.nation_hash_composite_types( test_col citus_mx_test_schema.new_composite_type ); SELECT create_distributed_table('citus_mx_test_schema.nation_hash_composite_types', 'n_nationkey'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- insert some data to verify composite type queries @@ -252,9 +252,9 @@ CREATE TABLE lineitem_mx ( PRIMARY KEY(l_orderkey, l_linenumber) ); SET citus.shard_count TO 16; SELECT create_distributed_table('lineitem_mx', 'l_orderkey'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE INDEX lineitem_mx_time_index ON lineitem_mx (l_shipdate); @@ -270,9 +270,9 @@ CREATE TABLE orders_mx ( o_comment varchar(79) not null, PRIMARY KEY(o_orderkey) ); SELECT create_distributed_table('orders_mx', 'o_orderkey'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE customer_mx ( @@ -285,9 +285,9 @@ CREATE TABLE customer_mx ( c_mktsegment char(10) not null, c_comment varchar(117) not null); SELECT create_reference_table('customer_mx'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE nation_mx ( @@ -296,9 +296,9 @@ CREATE TABLE nation_mx ( n_regionkey integer not null, n_comment varchar(152)); SELECT create_reference_table('nation_mx'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE part_mx ( @@ -312,9 +312,9 @@ CREATE TABLE part_mx ( p_retailprice decimal(15,2) not null, p_comment varchar(23) not null); SELECT create_reference_table('part_mx'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE supplier_mx @@ -328,9 +328,9 @@ CREATE TABLE supplier_mx s_comment varchar(101) not null ); SELECT create_reference_table('supplier_mx'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- Create test table for ddl @@ -340,9 +340,9 @@ CREATE TABLE mx_ddl_table ( ); SET citus.shard_count TO 4; SELECT create_distributed_table('mx_ddl_table', 'key', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- Load some test data @@ -358,9 +358,9 @@ CREATE TABLE limit_orders_mx ( ); SET citus.shard_count TO 2; SELECT create_distributed_table('limit_orders_mx', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- test table for modifications @@ -369,17 +369,17 @@ CREATE TABLE multiple_hash_mx ( data text NOT NULL ); SELECT create_distributed_table('multiple_hash_mx', 'category'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SET citus.shard_count TO 4; CREATE TABLE app_analytics_events_mx (id bigserial, app_id integer, name text); SELECT create_distributed_table('app_analytics_events_mx', 'app_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE researchers_mx ( @@ -389,9 +389,9 @@ CREATE TABLE researchers_mx ( ); SET citus.shard_count TO 2; SELECT create_distributed_table('researchers_mx', 'lab_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE labs_mx ( @@ -400,9 +400,9 @@ CREATE TABLE labs_mx ( ); SET citus.shard_count TO 1; SELECT create_distributed_table('labs_mx', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- now, for some special failures... @@ -411,9 +411,9 @@ CREATE TABLE objects_mx ( name text NOT NULL ); SELECT create_distributed_table('objects_mx', 'id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE articles_hash_mx ( @@ -426,24 +426,24 @@ CREATE TABLE articles_hash_mx ( CREATE TABLE articles_single_shard_hash_mx (LIKE articles_hash_mx); SET citus.shard_count TO 2; SELECT create_distributed_table('articles_hash_mx', 'author_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SET citus.shard_count TO 1; SELECT create_distributed_table('articles_single_shard_hash_mx', 'author_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SET citus.shard_count TO 4; CREATE TABLE company_employees_mx (company_id int, employee_id int, manager_id int); SELECT create_distributed_table('company_employees_mx', 'company_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) WITH shard_counts AS ( @@ -452,7 +452,7 @@ WITH shard_counts AS ( SELECT logicalrelid, colocationid, shard_count, partmethod, repmodel FROM pg_dist_partition NATURAL JOIN shard_counts ORDER BY colocationid, logicalrelid; - logicalrelid | colocationid | shard_count | partmethod | repmodel + logicalrelid | colocationid | shard_count | partmethod | repmodel --------------------------------------------------------------------- citus_mx_test_schema_join_1.nation_hash | 1390002 | 4 | h | s citus_mx_test_schema_join_1.nation_hash_2 | 1390002 | 4 | h | s diff --git a/src/test/regress/expected/multi_mx_ddl.out b/src/test/regress/expected/multi_mx_ddl.out index d55fd1359..6deaa6dd0 100644 --- a/src/test/regress/expected/multi_mx_ddl.out +++ b/src/test/regress/expected/multi_mx_ddl.out @@ -1,6 +1,6 @@ -- Tests related to distributed DDL commands on mx cluster SELECT * FROM mx_ddl_table ORDER BY key; - key | value + key | value --------------------------------------------------------------------- 1 | 10 2 | 11 @@ -18,22 +18,22 @@ CREATE INDEX CONCURRENTLY ddl_test_concurrent_index ON mx_ddl_table(value); -- ADD COLUMN ALTER TABLE mx_ddl_table ADD COLUMN version INTEGER; -- SET DEFAULT -ALTER TABLE mx_ddl_table ALTER COLUMN version SET DEFAULT 1; +ALTER TABLE mx_ddl_table ALTER COLUMN version SET DEFAULT 1; UPDATE mx_ddl_table SET version=0.1 WHERE version IS NULL; -- SET NOT NULL ALTER TABLE mx_ddl_table ALTER COLUMN version SET NOT NULL; -- See that the changes are applied on coordinator, worker tables and shards SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- key | integer | not null - value | integer | + value | integer | version | integer | not null default 1 (3 rows) SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE relname LIKE 'ddl_test%_index'; - relname | Column | Type | Definition + relname | Column | Type | Definition --------------------------------------------------------------------- ddl_test_index | value | integer | value ddl_test_concurrent_index | value | integer | value @@ -43,32 +43,32 @@ SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE -- make sure we don't break the following tests by hiding the shard names SET citus.override_table_visibility TO FALSE; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- key | integer | not null - value | integer | + value | integer | version | integer | not null default 1 (3 rows) SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE relname LIKE 'ddl_test%_index'; - relname | Column | Type | Definition + relname | Column | Type | Definition --------------------------------------------------------------------- ddl_test_index | value | integer | value ddl_test_concurrent_index | value | integer | value (2 rows) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1220088'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- key | integer | not null - value | integer | + value | integer | version | integer | not null default 1 (3 rows) SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE relname LIKE 'ddl_test%_index_1220088'; - relname | Column | Type | Definition + relname | Column | Type | Definition --------------------------------------------------------------------- ddl_test_index_1220088 | value | integer | value ddl_test_concurrent_index_1220088 | value | integer | value @@ -78,32 +78,32 @@ SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE -- make sure we don't break the following tests by hiding the shard names SET citus.override_table_visibility TO FALSE; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- key | integer | not null - value | integer | + value | integer | version | integer | not null default 1 (3 rows) SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE relname LIKE 'ddl_test%_index'; - relname | Column | Type | Definition + relname | Column | Type | Definition --------------------------------------------------------------------- ddl_test_index | value | integer | value ddl_test_concurrent_index | value | integer | value (2 rows) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1220089'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- key | integer | not null - value | integer | + value | integer | version | integer | not null default 1 (3 rows) SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE relname LIKE 'ddl_test%_index_1220089'; - relname | Column | Type | Definition + relname | Column | Type | Definition --------------------------------------------------------------------- ddl_test_index_1220089 | value | integer | value ddl_test_concurrent_index_1220089 | value | integer | value @@ -118,7 +118,7 @@ ALTER TABLE mx_ddl_table ALTER COLUMN version SET DATA TYPE double precision; INSERT INTO mx_ddl_table VALUES (78, 83, 2.1); \c - - - :worker_1_port SELECT * FROM mx_ddl_table ORDER BY key; - key | value | version + key | value | version --------------------------------------------------------------------- 1 | 10 | 0 2 | 11 | 0 @@ -146,69 +146,69 @@ ALTER TABLE mx_ddl_table ALTER COLUMN version DROP NOT NULL; ALTER TABLE mx_ddl_table DROP COLUMN version; -- See that the changes are applied on coordinator, worker tables and shards SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- key | integer | not null - value | integer | + value | integer | (2 rows) \di ddl_test*_index List of relations - Schema | Name | Type | Owner | Table + Schema | Name | Type | Owner | Table --------------------------------------------------------------------- (0 rows) \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- key | integer | not null - value | integer | + value | integer | (2 rows) \di ddl_test*_index List of relations - Schema | Name | Type | Owner | Table + Schema | Name | Type | Owner | Table --------------------------------------------------------------------- (0 rows) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1220088'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- key | integer | not null - value | integer | + value | integer | (2 rows) \di ddl_test*_index_1220088 List of relations - Schema | Name | Type | Owner | Table + Schema | Name | Type | Owner | Table --------------------------------------------------------------------- (0 rows) \c - - - :worker_2_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- key | integer | not null - value | integer | + value | integer | (2 rows) \di ddl_test*_index List of relations - Schema | Name | Type | Owner | Table + Schema | Name | Type | Owner | Table --------------------------------------------------------------------- (0 rows) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1220089'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- key | integer | not null - value | integer | + value | integer | (2 rows) \di ddl_test*_index_1220089 List of relations - Schema | Name | Type | Owner | Table + Schema | Name | Type | Owner | Table --------------------------------------------------------------------- (0 rows) @@ -222,9 +222,9 @@ SET citus.shard_count TO 4; SET citus.replication_model TO streaming; CREATE TABLE mx_sequence(key INT, value BIGSERIAL); SELECT create_distributed_table('mx_sequence', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) \c - - - :worker_1_port @@ -235,7 +235,7 @@ SELECT last_value AS worker_2_lastval FROM mx_sequence_value_seq \gset -- don't look at the actual values because they rely on the groupids of the nodes -- which can change depending on the tests which have run before this one SELECT :worker_1_lastval = :worker_2_lastval; - ?column? + ?column? --------------------------------------------------------------------- f (1 row) diff --git a/src/test/regress/expected/multi_mx_explain.out b/src/test/regress/expected/multi_mx_explain.out index 2659c46d6..fe2f78a29 100644 --- a/src/test/regress/expected/multi_mx_explain.out +++ b/src/test/regress/expected/multi_mx_explain.out @@ -225,40 +225,40 @@ t EXPLAIN (COSTS FALSE, FORMAT YAML) SELECT l_quantity, count(*) count_quantity FROM lineitem_mx GROUP BY l_quantity ORDER BY count_quantity, l_quantity; -- Plan: +- Plan: Node Type: "Sort" Parallel Aware: false - Sort Key: + Sort Key: - "(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))" - "remote_scan.l_quantity" - Plans: + Plans: - Node Type: "Aggregate" Strategy: "Hashed" Partial Mode: "Simple" Parent Relationship: "Outer" Parallel Aware: false - Group Key: + Group Key: - "remote_scan.l_quantity" - Plans: + Plans: - Node Type: "Custom Scan" Parent Relationship: "Outer" Custom Plan Provider: "Citus Adaptive" Parallel Aware: false - Distributed Query: - Job: + Distributed Query: + Job: Task Count: 16 Tasks Shown: "One of 16" - Tasks: + Tasks: - Node: "host=localhost port=xxxxx dbname=regression" - Remote Plan: - - Plan: + Remote Plan: + - Plan: Node Type: "Aggregate" Strategy: "Hashed" Partial Mode: "Simple" Parallel Aware: false - Group Key: + Group Key: - "l_quantity" - Plans: + Plans: - Node Type: "Seq Scan" Parent Relationship: "Outer" Parallel Aware: false @@ -799,43 +799,43 @@ EXPLAIN (COSTS FALSE, FORMAT YAML) WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey; -- Plan: +- Plan: Node Type: "Aggregate" Strategy: "Plain" Partial Mode: "Simple" Parallel Aware: false - Plans: + Plans: - Node Type: "Custom Scan" Parent Relationship: "Outer" Custom Plan Provider: "Citus Task-Tracker" Parallel Aware: false - Distributed Query: - Job: + Distributed Query: + Job: Task Count: 16 Tasks Shown: "One of 16" - Tasks: + Tasks: - Node: "host=localhost port=xxxxx dbname=regression" - Remote Plan: - - Plan: + Remote Plan: + - Plan: Node Type: "Aggregate" Strategy: "Plain" Partial Mode: "Simple" Parallel Aware: false - Plans: + Plans: - Node Type: "Hash Join" Parent Relationship: "Outer" Parallel Aware: false Join Type: "Inner" Inner Unique: false Hash Cond: "(lineitem_mx.l_orderkey = orders_mx.o_orderkey)" - Plans: + Plans: - Node Type: "Hash Join" Parent Relationship: "Outer" Parallel Aware: false Join Type: "Inner" Inner Unique: false Hash Cond: "(supplier_mx.s_suppkey = lineitem_mx.l_suppkey)" - Plans: + Plans: - Node Type: "Seq Scan" Parent Relationship: "Outer" Parallel Aware: false @@ -844,7 +844,7 @@ EXPLAIN (COSTS FALSE, FORMAT YAML) - Node Type: "Hash" Parent Relationship: "Inner" Parallel Aware: false - Plans: + Plans: - Node Type: "Seq Scan" Parent Relationship: "Outer" Parallel Aware: false @@ -853,14 +853,14 @@ EXPLAIN (COSTS FALSE, FORMAT YAML) - Node Type: "Hash" Parent Relationship: "Inner" Parallel Aware: false - Plans: + Plans: - Node Type: "Hash Join" Parent Relationship: "Outer" Parallel Aware: false Join Type: "Inner" Inner Unique: false Hash Cond: "(customer_mx.c_custkey = orders_mx.o_custkey)" - Plans: + Plans: - Node Type: "Seq Scan" Parent Relationship: "Outer" Parallel Aware: false @@ -869,7 +869,7 @@ EXPLAIN (COSTS FALSE, FORMAT YAML) - Node Type: "Hash" Parent Relationship: "Inner" Parallel Aware: false - Plans: + Plans: - Node Type: "Seq Scan" Parent Relationship: "Outer" Parallel Aware: false diff --git a/src/test/regress/expected/multi_mx_function_call_delegation.out b/src/test/regress/expected/multi_mx_function_call_delegation.out index d8b44ff3e..c4d25b4c6 100644 --- a/src/test/regress/expected/multi_mx_function_call_delegation.out +++ b/src/test/regress/expected/multi_mx_function_call_delegation.out @@ -6,9 +6,9 @@ SET citus.replication_model TO 'statement'; -- This table requires specific settings, create before getting into things create table mx_call_dist_table_replica(id int, val int); select create_distributed_table('mx_call_dist_table_replica', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) insert into mx_call_dist_table_replica values (9,1),(8,2),(7,3),(6,4),(5,5); @@ -19,42 +19,42 @@ SET citus.replication_model TO 'streaming'; -- create table mx_call_dist_table_1(id int, val int); select create_distributed_table('mx_call_dist_table_1', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) insert into mx_call_dist_table_1 values (3,1),(4,5),(9,2),(6,5),(3,5); create table mx_call_dist_table_2(id int, val int); select create_distributed_table('mx_call_dist_table_2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) insert into mx_call_dist_table_2 values (1,1),(1,2),(2,2),(3,3),(3,4); create table mx_call_dist_table_bigint(id bigint, val bigint); select create_distributed_table('mx_call_dist_table_bigint', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) insert into mx_call_dist_table_bigint values (1,1),(1,2),(2,2),(3,3),(3,4); create table mx_call_dist_table_ref(id int, val int); select create_reference_table('mx_call_dist_table_ref'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) insert into mx_call_dist_table_ref values (2,7),(1,8),(2,8),(1,8),(2,8); create type mx_call_enum as enum ('A', 'S', 'D', 'F'); create table mx_call_dist_table_enum(id int, key mx_call_enum); select create_distributed_table('mx_call_dist_table_enum', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) insert into mx_call_dist_table_enum values (1,'S'),(2,'A'),(3,'D'),(4,'F'); @@ -87,19 +87,19 @@ BEGIN END;$$; -- Test that undistributed functions have no issue executing select multi_mx_function_call_delegation.mx_call_func(2, 0); - mx_call_func + mx_call_func --------------------------------------------------------------------- 29 (1 row) select multi_mx_function_call_delegation.mx_call_func_custom_types('S', 'A'); - mx_call_func_custom_types + mx_call_func_custom_types --------------------------------------------------------------------- (F,S) (1 row) select squares(4); - squares + squares --------------------------------------------------------------------- (1,1) (2,4) @@ -109,34 +109,34 @@ select squares(4); -- Same for unqualified name select mx_call_func(2, 0); - mx_call_func + mx_call_func --------------------------------------------------------------------- 29 (1 row) -- Mark both functions as distributed ... select create_distributed_function('mx_call_func(int,int)'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) select create_distributed_function('mx_call_func_bigint(bigint,bigint)'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) select create_distributed_function('mx_call_func_custom_types(mx_call_enum,mx_call_enum)'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) select create_distributed_function('squares(int)'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) -- We still don't route them to the workers, because they aren't @@ -150,67 +150,67 @@ PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment DEBUG: Plan 11 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('11_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment - mx_call_func + mx_call_func --------------------------------------------------------------------- 29 (1 row) select multi_mx_function_call_delegation.mx_call_func_bigint(4, 2); DEBUG: function does not have co-located tables - mx_call_func_bigint + mx_call_func_bigint --------------------------------------------------------------------- 8 (1 row) select mx_call_func_custom_types('S', 'A'); DEBUG: function does not have co-located tables - mx_call_func_custom_types + mx_call_func_custom_types --------------------------------------------------------------------- (F,S) (1 row) -- Mark them as colocated with a table. Now we should route them to workers. select colocate_proc_with_table('mx_call_func', 'mx_call_dist_table_1'::regclass, 1); - colocate_proc_with_table + colocate_proc_with_table --------------------------------------------------------------------- - + (1 row) select colocate_proc_with_table('mx_call_func_bigint', 'mx_call_dist_table_bigint'::regclass, 1); - colocate_proc_with_table + colocate_proc_with_table --------------------------------------------------------------------- - + (1 row) select colocate_proc_with_table('mx_call_func_custom_types', 'mx_call_dist_table_enum'::regclass, 1); - colocate_proc_with_table + colocate_proc_with_table --------------------------------------------------------------------- - + (1 row) select colocate_proc_with_table('squares', 'mx_call_dist_table_2'::regclass, 0); - colocate_proc_with_table + colocate_proc_with_table --------------------------------------------------------------------- - + (1 row) select mx_call_func(2, 0); DEBUG: pushing down the function call - mx_call_func + mx_call_func --------------------------------------------------------------------- 28 (1 row) select mx_call_func_bigint(4, 2); DEBUG: pushing down the function call - mx_call_func_bigint + mx_call_func_bigint --------------------------------------------------------------------- 8 (1 row) select mx_call_func_custom_types('S', 'A'); DEBUG: pushing down the function call - mx_call_func_custom_types + mx_call_func_custom_types --------------------------------------------------------------------- (S,S) (1 row) @@ -220,14 +220,14 @@ DEBUG: pushing down the function call ERROR: input of anonymous composite types is not implemented select multi_mx_function_call_delegation.mx_call_func(2, 0); DEBUG: pushing down the function call - mx_call_func + mx_call_func --------------------------------------------------------------------- 28 (1 row) select multi_mx_function_call_delegation.mx_call_func_custom_types('S', 'A'); DEBUG: pushing down the function call - mx_call_func_custom_types + mx_call_func_custom_types --------------------------------------------------------------------- (S,S) (1 row) @@ -242,7 +242,7 @@ PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment DEBUG: Plan 13 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('13_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment - mx_call_func + mx_call_func --------------------------------------------------------------------- 29 (1 row) @@ -255,7 +255,7 @@ drop table mx_call_dist_table_enum; SET client_min_messages TO DEBUG1; select mx_call_func_custom_types('S', 'A'); DEBUG: function does not have co-located tables - mx_call_func_custom_types + mx_call_func_custom_types --------------------------------------------------------------------- (F,S) (1 row) @@ -263,9 +263,9 @@ DEBUG: function does not have co-located tables -- Make sure we do bounds checking on distributed argument index -- This also tests that we have cache invalidation for pg_dist_object updates select colocate_proc_with_table('mx_call_func', 'mx_call_dist_table_1'::regclass, -1); - colocate_proc_with_table + colocate_proc_with_table --------------------------------------------------------------------- - + (1 row) select mx_call_func(2, 0); @@ -276,15 +276,15 @@ PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment DEBUG: Plan 15 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('15_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment - mx_call_func + mx_call_func --------------------------------------------------------------------- 29 (1 row) select colocate_proc_with_table('mx_call_func', 'mx_call_dist_table_1'::regclass, 2); - colocate_proc_with_table + colocate_proc_with_table --------------------------------------------------------------------- - + (1 row) select mx_call_func(2, 0); @@ -295,16 +295,16 @@ PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment DEBUG: Plan 18 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('18_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment - mx_call_func + mx_call_func --------------------------------------------------------------------- 29 (1 row) -- We don't currently support colocating with reference tables select colocate_proc_with_table('mx_call_func', 'mx_call_dist_table_ref'::regclass, 1); - colocate_proc_with_table + colocate_proc_with_table --------------------------------------------------------------------- - + (1 row) select mx_call_func(2, 0); @@ -315,16 +315,16 @@ PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment DEBUG: Plan 20 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('20_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment - mx_call_func + mx_call_func --------------------------------------------------------------------- 29 (1 row) -- We don't currently support colocating with replicated tables select colocate_proc_with_table('mx_call_func', 'mx_call_dist_table_replica'::regclass, 1); - colocate_proc_with_table + colocate_proc_with_table --------------------------------------------------------------------- - + (1 row) select mx_call_func(2, 0); @@ -335,7 +335,7 @@ PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment DEBUG: Plan 22 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('22_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment - mx_call_func + mx_call_func --------------------------------------------------------------------- 29 (1 row) @@ -344,9 +344,9 @@ SET client_min_messages TO NOTICE; drop table mx_call_dist_table_replica; SET client_min_messages TO DEBUG1; select colocate_proc_with_table('mx_call_func', 'mx_call_dist_table_1'::regclass, 1); - colocate_proc_with_table + colocate_proc_with_table --------------------------------------------------------------------- - + (1 row) -- Test table returning functions. @@ -365,7 +365,7 @@ BEGIN END;$$; -- before distribution ... select mx_call_func_tbl(10); - mx_call_func_tbl + mx_call_func_tbl --------------------------------------------------------------------- (10,-1) (11,4) @@ -375,14 +375,14 @@ select mx_call_func_tbl(10); select create_distributed_function('mx_call_func_tbl(int)', '$1', 'mx_call_dist_table_1'); DEBUG: switching to sequential query execution mode DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) select mx_call_func_tbl(20); DEBUG: pushing down the function call - mx_call_func_tbl + mx_call_func_tbl --------------------------------------------------------------------- (20,-1) (21,4) @@ -398,9 +398,9 @@ END;$$; select create_distributed_function('mx_call_func_raise(int)', '$1', 'mx_call_dist_table_1'); DEBUG: switching to sequential query execution mode DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) select mx_call_func_raise(2); @@ -414,9 +414,9 @@ PL/pgSQL function multi_mx_function_call_delegation.mx_call_func_raise(integer) SET client_min_messages TO ERROR; CREATE TABLE test (x int primary key); SELECT create_distributed_table('test','x'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE OR REPLACE FUNCTION delegated_function(a int) @@ -431,9 +431,9 @@ BEGIN END; $function$; SELECT create_distributed_function('delegated_function(int)', 'a'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) SET client_min_messages TO DEBUG1; @@ -448,7 +448,7 @@ SELECT * FROM test WHERE not exists( DEBUG: not pushing down function calls in CTEs or Subqueries DEBUG: generating subplan 31_1 for subquery SELECT multi_mx_function_call_delegation.delegated_function(4) AS delegated_function DEBUG: Plan 31 query after replacing subqueries and CTEs: SELECT x FROM multi_mx_function_call_delegation.test WHERE (NOT (EXISTS (SELECT intermediate_result.delegated_function FROM read_intermediate_result('31_1'::text, 'binary'::citus_copy_format) intermediate_result(delegated_function integer)))) - x + x --------------------------------------------------------------------- (0 rows) @@ -459,7 +459,7 @@ DEBUG: generating subplan 34_1 for CTE r: SELECT multi_mx_function_call_delegat DEBUG: not pushing down function calls in CTEs or Subqueries DEBUG: generating subplan 34_2 for subquery SELECT (count(*) OPERATOR(pg_catalog.=) 0) FROM (SELECT intermediate_result.delegated_function FROM read_intermediate_result('34_1'::text, 'binary'::citus_copy_format) intermediate_result(delegated_function integer)) r DEBUG: Plan 34 query after replacing subqueries and CTEs: SELECT x FROM multi_mx_function_call_delegation.test WHERE (SELECT intermediate_result."?column?" FROM read_intermediate_result('34_2'::text, 'binary'::citus_copy_format) intermediate_result("?column?" boolean)) - x + x --------------------------------------------------------------------- (0 rows) @@ -472,7 +472,7 @@ DEBUG: generating subplan 38_1 for CTE r: SELECT multi_mx_function_call_delegat DEBUG: not pushing down function calls in CTEs or Subqueries DEBUG: generating subplan 38_2 for CTE t: SELECT count(*) AS c FROM (SELECT intermediate_result.delegated_function FROM read_intermediate_result('38_1'::text, 'binary'::citus_copy_format) intermediate_result(delegated_function integer)) r DEBUG: Plan 38 query after replacing subqueries and CTEs: SELECT test.x, t.c FROM multi_mx_function_call_delegation.test, (SELECT intermediate_result.c FROM read_intermediate_result('38_2'::text, 'binary'::citus_copy_format) intermediate_result(c bigint)) t WHERE (t.c OPERATOR(pg_catalog.=) 0) - x | c + x | c --------------------------------------------------------------------- (0 rows) @@ -488,21 +488,21 @@ DEBUG: generating subplan 42_2 for CTE s: SELECT multi_mx_function_call_delegat DEBUG: not pushing down function calls in CTEs or Subqueries DEBUG: generating subplan 42_3 for CTE t: SELECT count(*) AS c FROM (SELECT intermediate_result.delegated_function FROM read_intermediate_result('42_2'::text, 'binary'::citus_copy_format) intermediate_result(delegated_function integer)) s DEBUG: Plan 42 query after replacing subqueries and CTEs: SELECT test.x, r.count, t.c FROM multi_mx_function_call_delegation.test, (SELECT intermediate_result.count FROM read_intermediate_result('42_1'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) r, (SELECT intermediate_result.c FROM read_intermediate_result('42_3'::text, 'binary'::citus_copy_format) intermediate_result(c bigint)) t WHERE (t.c OPERATOR(pg_catalog.=) 0) - x | count | c + x | count | c --------------------------------------------------------------------- (0 rows) -- Test that we don't propagate to non-metadata worker nodes select stop_metadata_sync_to_node('localhost', :worker_1_port); - stop_metadata_sync_to_node + stop_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) select stop_metadata_sync_to_node('localhost', :worker_2_port); - stop_metadata_sync_to_node + stop_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) select mx_call_func(2, 0); @@ -513,22 +513,22 @@ PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment DEBUG: Plan 47 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('47_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment - mx_call_func + mx_call_func --------------------------------------------------------------------- 29 (1 row) SET client_min_messages TO NOTICE; select start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node + start_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) select start_metadata_sync_to_node('localhost', :worker_2_port); - start_metadata_sync_to_node + start_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) -- stop_metadata_sync_to_node()/start_metadata_sync_to_node() might make @@ -546,9 +546,9 @@ CREATE FUNCTION mx_call_add(int, int) RETURNS int SELECT create_distributed_function('mx_call_add(int,int)', '$1'); DEBUG: switching to sequential query execution mode DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) -- subquery parameters cannot be pushed down @@ -560,7 +560,7 @@ PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment DEBUG: Plan 1 query after replacing subqueries and CTEs: SELECT (9 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('1_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment - mx_call_func + mx_call_func --------------------------------------------------------------------- 35 (1 row) @@ -574,7 +574,7 @@ PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT (1 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment - mx_call_func + mx_call_func --------------------------------------------------------------------- 27 (1 row) @@ -587,13 +587,13 @@ PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('5_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment - y + y --------------------------------------------------------------------- 29 (1 row) select mx_call_func(2, 0) from mx_call_dist_table_1; - mx_call_func + mx_call_func --------------------------------------------------------------------- 28 28 @@ -613,7 +613,7 @@ PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment DEBUG: Plan 8 query after replacing subqueries and CTEs: SELECT (1 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('8_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment - mx_call_func + mx_call_func --------------------------------------------------------------------- (0 rows) @@ -630,7 +630,7 @@ PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment DEBUG: Plan 13 query after replacing subqueries and CTEs: SELECT (1 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('13_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line 8 at assignment - mx_call_func | mx_call_func + mx_call_func | mx_call_func --------------------------------------------------------------------- 29 | 27 (1 row) @@ -640,7 +640,7 @@ DEBUG: not pushing down function calls in a multi-statement transaction CONTEXT: SQL statement "SELECT mx_call_func_tbl(40)" PL/pgSQL function inline_code_block line 1 at PERFORM SELECT * FROM mx_call_dist_table_1 WHERE id >= 40 ORDER BY id, val; - id | val + id | val --------------------------------------------------------------------- 40 | -1 41 | 4 @@ -650,42 +650,42 @@ SELECT * FROM mx_call_dist_table_1 WHERE id >= 40 ORDER BY id, val; PREPARE call_plan (int, int) AS SELECT mx_call_func($1, $2); EXECUTE call_plan(2, 0); DEBUG: pushing down the function call - mx_call_func + mx_call_func --------------------------------------------------------------------- 28 (1 row) EXECUTE call_plan(2, 0); DEBUG: pushing down the function call - mx_call_func + mx_call_func --------------------------------------------------------------------- 28 (1 row) EXECUTE call_plan(2, 0); DEBUG: pushing down the function call - mx_call_func + mx_call_func --------------------------------------------------------------------- 28 (1 row) EXECUTE call_plan(2, 0); DEBUG: pushing down the function call - mx_call_func + mx_call_func --------------------------------------------------------------------- 28 (1 row) EXECUTE call_plan(2, 0); DEBUG: pushing down the function call - mx_call_func + mx_call_func --------------------------------------------------------------------- 28 (1 row) EXECUTE call_plan(2, 0); DEBUG: pushing down the function call - mx_call_func + mx_call_func --------------------------------------------------------------------- 28 (1 row) diff --git a/src/test/regress/expected/multi_mx_hide_shard_names.out b/src/test/regress/expected/multi_mx_hide_shard_names.out index 6cc614d54..cbc57ef7b 100644 --- a/src/test/regress/expected/multi_mx_hide_shard_names.out +++ b/src/test/regress/expected/multi_mx_hide_shard_names.out @@ -14,10 +14,10 @@ FROM WHERE proname LIKE '%table_is_visible%' ORDER BY 1; - proname | proisstrict | proretset | provolatile | proparallel | pronargs | pronargdefaults | prorettype | proargtypes | proacl + proname | proisstrict | proretset | provolatile | proparallel | pronargs | pronargdefaults | prorettype | proargtypes | proacl --------------------------------------------------------------------- - citus_table_is_visible | t | f | s | s | 1 | 0 | 16 | 26 | - pg_table_is_visible | t | f | s | s | 1 | 0 | 16 | 26 | + citus_table_is_visible | t | f | s | s | 1 | 0 | 16 | 26 | + pg_table_is_visible | t | f | s | s | 1 | 0 | 16 | 26 | (2 rows) CREATE SCHEMA mx_hide_shard_names; @@ -26,33 +26,33 @@ SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node + start_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_port); - start_metadata_sync_to_node + start_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) CREATE TABLE test_table(id int, time date); SELECT create_distributed_table('test_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- first show that the views does not show -- any shards on the coordinator as expected SELECT * FROM citus_shards_on_worker; - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- (0 rows) SELECT * FROM citus_shard_indexes_on_worker; - Schema | Name | Type | Owner | Table + Schema | Name | Type | Owner | Table --------------------------------------------------------------------- (0 rows) @@ -61,14 +61,14 @@ SELECT * FROM citus_shard_indexes_on_worker; \c - - - :worker_1_port SET search_path TO 'mx_hide_shard_names'; SELECT * FROM citus_shards_on_worker ORDER BY 2; - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- mx_hide_shard_names | test_table_1130000 | table | postgres mx_hide_shard_names | test_table_1130002 | table | postgres (2 rows) SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2; - Schema | Name | Type | Owner | Table + Schema | Name | Type | Owner | Table --------------------------------------------------------------------- (0 rows) @@ -84,7 +84,7 @@ SELECT NOT pg_table_is_visible("t1"."Name"::regclass) LIMIT 1)); - pg_table_is_visible + pg_table_is_visible --------------------------------------------------------------------- f (1 row) @@ -98,14 +98,14 @@ CREATE INDEX test_index ON mx_hide_shard_names.test_table(id); \c - - - :worker_1_port SET search_path TO 'mx_hide_shard_names'; SELECT * FROM citus_shards_on_worker ORDER BY 2; - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- mx_hide_shard_names | test_table_1130000 | table | postgres mx_hide_shard_names | test_table_1130002 | table | postgres (2 rows) SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2; - Schema | Name | Type | Owner | Table + Schema | Name | Type | Owner | Table --------------------------------------------------------------------- mx_hide_shard_names | test_index_1130000 | index | postgres | test_table_1130000 mx_hide_shard_names | test_index_1130002 | index | postgres | test_table_1130002 @@ -114,21 +114,21 @@ SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2; -- we should be able to select from the shards directly if we -- know the name of the tables SELECT count(*) FROM test_table_1130000; - count + count --------------------------------------------------------------------- 0 (1 row) -- disable the config so that table becomes visible SELECT pg_table_is_visible('test_table_1130000'::regclass); - pg_table_is_visible + pg_table_is_visible --------------------------------------------------------------------- f (1 row) SET citus.override_table_visibility TO FALSE; SELECT pg_table_is_visible('test_table_1130000'::regclass); - pg_table_is_visible + pg_table_is_visible --------------------------------------------------------------------- t (1 row) @@ -143,9 +143,9 @@ SET citus.replication_model TO 'streaming'; -- not existing shard ids appended to the distributed table name CREATE TABLE test_table_102008(id int, time date); SELECT create_distributed_table('test_table_102008', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) \c - - - :worker_1_port @@ -156,7 +156,7 @@ SET search_path TO 'mx_hide_shard_names'; -- name already exists :) CREATE TABLE test_table_2_1130000(id int, time date); SELECT * FROM citus_shards_on_worker ORDER BY 2; - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- mx_hide_shard_names | test_table_102008_1130004 | table | postgres mx_hide_shard_names | test_table_102008_1130006 | table | postgres @@ -166,7 +166,7 @@ SELECT * FROM citus_shards_on_worker ORDER BY 2; \d List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- mx_hide_shard_names | test_table | table | postgres mx_hide_shard_names | test_table_102008 | table | postgres @@ -182,16 +182,16 @@ SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; CREATE TABLE test_table(id int, time date); SELECT create_distributed_table('test_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE INDEX test_index ON mx_hide_shard_names_2.test_table(id); \c - - - :worker_1_port SET search_path TO 'mx_hide_shard_names'; SELECT * FROM citus_shards_on_worker ORDER BY 2; - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- mx_hide_shard_names | test_table_102008_1130004 | table | postgres mx_hide_shard_names | test_table_102008_1130006 | table | postgres @@ -200,7 +200,7 @@ SELECT * FROM citus_shards_on_worker ORDER BY 2; (4 rows) SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2; - Schema | Name | Type | Owner | Table + Schema | Name | Type | Owner | Table --------------------------------------------------------------------- mx_hide_shard_names | test_index_1130000 | index | postgres | test_table_1130000 mx_hide_shard_names | test_index_1130002 | index | postgres | test_table_1130002 @@ -208,14 +208,14 @@ SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2; SET search_path TO 'mx_hide_shard_names_2'; SELECT * FROM citus_shards_on_worker ORDER BY 2; - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- mx_hide_shard_names_2 | test_table_1130008 | table | postgres mx_hide_shard_names_2 | test_table_1130010 | table | postgres (2 rows) SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2; - Schema | Name | Type | Owner | Table + Schema | Name | Type | Owner | Table --------------------------------------------------------------------- mx_hide_shard_names_2 | test_index_1130008 | index | postgres | test_table_1130008 mx_hide_shard_names_2 | test_index_1130010 | index | postgres | test_table_1130010 @@ -223,12 +223,12 @@ SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2; SET search_path TO 'mx_hide_shard_names_2, mx_hide_shard_names'; SELECT * FROM citus_shards_on_worker ORDER BY 2; - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- (0 rows) SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2; - Schema | Name | Type | Owner | Table + Schema | Name | Type | Owner | Table --------------------------------------------------------------------- (0 rows) @@ -244,15 +244,15 @@ CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 ( col1 integer not null, col2 integer not null); SELECT create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) \c - - - :worker_1_port SET search_path TO 'mx_hide_shard_names_3'; SELECT * FROM citus_shards_on_worker ORDER BY 2; - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- mx_hide_shard_names_3 | too_long_1234567890123456789012345678901234567_e0119164_1130012 | table | postgres mx_hide_shard_names_3 | too_long_1234567890123456789012345678901234567_e0119164_1130014 | table | postgres @@ -260,7 +260,7 @@ SELECT * FROM citus_shards_on_worker ORDER BY 2; \d List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- mx_hide_shard_names_3 | too_long_12345678901234567890123456789012345678901234567890 | table | postgres (1 row) @@ -276,22 +276,22 @@ CREATE TABLE "TeeNTabLE.1!?!"(id int, "TeNANt_Id" int); CREATE INDEX "MyTenantIndex" ON "CiTuS.TeeN"."TeeNTabLE.1!?!"("TeNANt_Id"); -- create distributed table with weird names SELECT create_distributed_table('"CiTuS.TeeN"."TeeNTabLE.1!?!"', 'TeNANt_Id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) \c - - - :worker_1_port SET search_path TO "CiTuS.TeeN"; SELECT * FROM citus_shards_on_worker ORDER BY 2; - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- CiTuS.TeeN | TeeNTabLE.1!?!_1130016 | table | postgres CiTuS.TeeN | TeeNTabLE.1!?!_1130018 | table | postgres (2 rows) SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2; - Schema | Name | Type | Owner | Table + Schema | Name | Type | Owner | Table --------------------------------------------------------------------- CiTuS.TeeN | MyTenantIndex_1130016 | index | postgres | TeeNTabLE.1!?!_1130016 CiTuS.TeeN | MyTenantIndex_1130018 | index | postgres | TeeNTabLE.1!?!_1130018 @@ -299,14 +299,14 @@ SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2; \d List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- CiTuS.TeeN | TeeNTabLE.1!?! | table | postgres (1 row) \di List of relations - Schema | Name | Type | Owner | Table + Schema | Name | Type | Owner | Table --------------------------------------------------------------------- CiTuS.TeeN | MyTenantIndex | index | postgres | TeeNTabLE.1!?! (1 row) @@ -318,7 +318,7 @@ SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2; SET search_path TO 'mx_hide_shard_names'; \d List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- mx_hide_shard_names | test_table | table | postgres mx_hide_shard_names | test_table_102008 | table | postgres @@ -326,7 +326,7 @@ SET search_path TO 'mx_hide_shard_names'; \di List of relations - Schema | Name | Type | Owner | Table + Schema | Name | Type | Owner | Table --------------------------------------------------------------------- mx_hide_shard_names | test_index | index | postgres | test_table (1 row) diff --git a/src/test/regress/expected/multi_mx_metadata.out b/src/test/regress/expected/multi_mx_metadata.out index 5af03cee6..3198db3c4 100644 --- a/src/test/regress/expected/multi_mx_metadata.out +++ b/src/test/regress/expected/multi_mx_metadata.out @@ -2,7 +2,7 @@ -- Temporarily disable automatic 2PC recovery ALTER SYSTEM SET citus.recover_2pc_interval TO -1; SELECT pg_reload_conf(); - pg_reload_conf + pg_reload_conf --------------------------------------------------------------------- t (1 row) @@ -32,102 +32,102 @@ SET citus.shard_replication_factor TO 1; SET citus.replication_model TO streaming; SET citus.shard_count TO 4; SELECT create_distributed_table('distributed_mx_table', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- Verify that we've logged commit records SELECT count(*) FROM pg_dist_transaction; - count + count --------------------------------------------------------------------- 2 (1 row) -- Confirm that the metadata transactions have been committed SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) -- Verify that the commit records have been removed SELECT count(*) FROM pg_dist_transaction; - count + count --------------------------------------------------------------------- 0 (1 row) \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='distributed_mx_table'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- key | text | not null - value | jsonb | + value | jsonb | some_val | bigint | not null default nextval('distributed_mx_table_some_val_seq'::regclass) (3 rows) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'distributed_mx_table_pkey'::regclass; - Column | Type | Definition + Column | Type | Definition --------------------------------------------------------------------- key | text | key (1 row) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'distributed_mx_table_value_idx'::regclass; - Column | Type | Definition + Column | Type | Definition --------------------------------------------------------------------- value | text | value (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid = 'distributed_mx_table'::regclass; - repmodel + repmodel --------------------------------------------------------------------- s (1 row) SELECT count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'distributed_mx_table'::regclass; - count + count --------------------------------------------------------------------- 4 (1 row) \c - - - :worker_2_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='distributed_mx_table'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- key | text | not null - value | jsonb | + value | jsonb | some_val | bigint | not null default nextval('distributed_mx_table_some_val_seq'::regclass) (3 rows) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'distributed_mx_table_pkey'::regclass; - Column | Type | Definition + Column | Type | Definition --------------------------------------------------------------------- key | text | key (1 row) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'distributed_mx_table_value_idx'::regclass; - Column | Type | Definition + Column | Type | Definition --------------------------------------------------------------------- value | text | value (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid = 'distributed_mx_table'::regclass; - repmodel + repmodel --------------------------------------------------------------------- s (1 row) SELECT count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'distributed_mx_table'::regclass; - count + count --------------------------------------------------------------------- 4 (1 row) @@ -142,16 +142,16 @@ CREATE TABLE should_not_exist ( value jsonb ); SELECT create_distributed_table('should_not_exist', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ABORT; -- Verify that the table does not exist on the worker \c - - - :worker_1_port SELECT count(*) FROM pg_tables WHERE tablename = 'should_not_exist'; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -166,9 +166,9 @@ CREATE TABLE should_not_exist ( value jsonb ); SELECT create_distributed_table('should_not_exist', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) PREPARE TRANSACTION 'this_should_fail'; @@ -184,9 +184,9 @@ CREATE TABLE objects_for_xacts ( name text NOT NULL ); SELECT create_distributed_table('objects_for_xacts', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) COMMIT; @@ -194,14 +194,14 @@ COMMIT; \c - - - :worker_1_port SELECT repmodel FROM pg_dist_partition WHERE logicalrelid = 'citus_mx_schema_for_xacts.objects_for_xacts'::regclass; - repmodel + repmodel --------------------------------------------------------------------- s (1 row) SELECT count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'citus_mx_schema_for_xacts.objects_for_xacts'::regclass; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -209,7 +209,7 @@ WHERE logicalrelid = 'citus_mx_schema_for_xacts.objects_for_xacts'::regclass; \c - - - :master_port SET citus.shard_replication_factor TO 1; SET citus.replication_model TO streaming; --- now show that we can rollback on creating mx table, but shards remain.... +-- now show that we can rollback on creating mx table, but shards remain.... BEGIN; CREATE SCHEMA IF NOT EXISTS citus_mx_schema_for_xacts; NOTICE: schema "citus_mx_schema_for_xacts" already exists, skipping @@ -221,15 +221,15 @@ CREATE TABLE objects_for_xacts2 ( name text NOT NULL ); SELECT create_distributed_table('objects_for_xacts2', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ROLLBACK; -- show that the table not exists on the coordinator SELECT count(*) FROM pg_tables WHERE tablename = 'objects_for_xacts2' and schemaname = 'citus_mx_schema_for_xacts'; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -237,14 +237,14 @@ SELECT count(*) FROM pg_tables WHERE tablename = 'objects_for_xacts2' and schema \c - - - :worker_1_port -- the distributed table not exists on the worker node SELECT count(*) FROM pg_tables WHERE tablename = 'objects_for_xacts2' and schemaname = 'citus_mx_schema_for_xacts'; - count + count --------------------------------------------------------------------- 0 (1 row) -- shard also does not exist since we create shards in a transaction SELECT count(*) FROM pg_tables WHERE tablename LIKE 'objects_for_xacts2_%' and schemaname = 'citus_mx_schema_for_xacts'; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -255,7 +255,7 @@ ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. -- Ensure pg_dist_transaction is empty for test SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) @@ -277,13 +277,13 @@ SELECT groupid AS worker_1_group FROM pg_dist_node WHERE nodeport = :worker_1_po INSERT INTO pg_dist_transaction VALUES (:worker_1_group, 'citus_0_should_commit'); INSERT INTO pg_dist_transaction VALUES (:worker_1_group, 'citus_0_should_be_forgotten'); SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 3 (1 row) SELECT count(*) FROM pg_dist_transaction; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -291,13 +291,13 @@ SELECT count(*) FROM pg_dist_transaction; -- Confirm that transactions were correctly rolled forward \c - - - :worker_1_port SELECT count(*) FROM pg_tables WHERE tablename = 'should_abort'; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM pg_tables WHERE tablename = 'should_commit'; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -307,47 +307,47 @@ CREATE USER no_access_mx; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. SELECT run_command_on_workers($$CREATE USER no_access_mx;$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") (2 rows) SET ROLE no_access_mx; -SELECT raise_failed_aclcheck($$ +SELECT raise_failed_aclcheck($$ DROP TABLE distributed_mx_table; $$); ERROR: must be owner of the object CONTEXT: PL/pgSQL function raise_failed_aclcheck(text) line 6 at RAISE -SELECT raise_failed_aclcheck($$ +SELECT raise_failed_aclcheck($$ SELECT master_remove_distributed_table_metadata_from_workers('distributed_mx_table'::regclass, 'public', 'distributed_mx_table'); $$); ERROR: must be owner of the object CONTEXT: PL/pgSQL function raise_failed_aclcheck(text) line 6 at RAISE -SELECT raise_failed_aclcheck($$ +SELECT raise_failed_aclcheck($$ SELECT master_drop_all_shards('distributed_mx_table'::regclass, 'public', 'distributed_mx_table'); $$); ERROR: must be owner of the object CONTEXT: PL/pgSQL function raise_failed_aclcheck(text) line 6 at RAISE -SELECT raise_failed_aclcheck($$ +SELECT raise_failed_aclcheck($$ SELECT master_remove_partition_metadata('distributed_mx_table'::regclass, 'public', 'distributed_mx_table'); $$); ERROR: must be owner of the object CONTEXT: PL/pgSQL function raise_failed_aclcheck(text) line 6 at RAISE -SELECT raise_failed_aclcheck($$ +SELECT raise_failed_aclcheck($$ SELECT master_drop_sequences(ARRAY['public.distributed_mx_table_some_val_seq']); $$); ERROR: must be owner of the object CONTEXT: PL/pgSQL function raise_failed_aclcheck(text) line 6 at RAISE -SELECT raise_failed_aclcheck($$ +SELECT raise_failed_aclcheck($$ SELECT master_drop_sequences(ARRAY['distributed_mx_table_some_val_seq']); $$); ERROR: must be owner of the object CONTEXT: PL/pgSQL function raise_failed_aclcheck(text) line 6 at RAISE SELECT master_drop_sequences(ARRAY['non_existing_schema.distributed_mx_table_some_val_seq']); - master_drop_sequences + master_drop_sequences --------------------------------------------------------------------- - + (1 row) SELECT master_drop_sequences(ARRAY['']); @@ -355,9 +355,9 @@ ERROR: invalid name syntax SELECT master_drop_sequences(ARRAY['public.']); ERROR: invalid name syntax SELECT master_drop_sequences(ARRAY['public.distributed_mx_table_some_val_seq_not_existing']); - master_drop_sequences + master_drop_sequences --------------------------------------------------------------------- - + (1 row) -- make sure that we can drop unrelated tables/sequences @@ -365,18 +365,18 @@ CREATE TABLE unrelated_table(key serial); DROP TABLE unrelated_table; -- doesn't error out but it has no effect, so no need to error out SELECT master_drop_sequences(NULL); - master_drop_sequences + master_drop_sequences --------------------------------------------------------------------- - + (1 row) \c - postgres - :master_port -- finally make sure that the sequence remains SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='distributed_mx_table'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- key | text | not null - value | jsonb | + value | jsonb | some_val | bigint | not null default nextval('distributed_mx_table_some_val_seq'::regclass) (3 rows) @@ -391,22 +391,22 @@ BEGIN END IF; END; $$LANGUAGE plpgsql; -SELECT raise_failed_aclcheck($$ +SELECT raise_failed_aclcheck($$ DROP TABLE distributed_mx_table; $$); ERROR: must be owner of the object CONTEXT: PL/pgSQL function raise_failed_aclcheck(text) line 6 at RAISE -SELECT raise_failed_aclcheck($$ +SELECT raise_failed_aclcheck($$ SELECT master_remove_distributed_table_metadata_from_workers('distributed_mx_table'::regclass, 'public', 'distributed_mx_table'); $$); ERROR: must be owner of the object CONTEXT: PL/pgSQL function raise_failed_aclcheck(text) line 6 at RAISE -SELECT raise_failed_aclcheck($$ +SELECT raise_failed_aclcheck($$ SELECT master_drop_sequences(ARRAY['public.distributed_mx_table_some_val_seq']); $$); - raise_failed_aclcheck + raise_failed_aclcheck --------------------------------------------------------------------- - + (1 row) SELECT master_drop_all_shards('distributed_mx_table'::regclass, 'public', 'distributed_mx_table'); @@ -421,10 +421,10 @@ DROP TABLE unrelated_table; \c - postgres - :worker_1_port -- finally make sure that the sequence remains SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='distributed_mx_table'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- key | text | not null - value | jsonb | + value | jsonb | some_val | bigint | not null default nextval('distributed_mx_table_some_val_seq'::regclass) (3 rows) @@ -432,7 +432,7 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='distributed_mx \c - postgres - :master_port ALTER SYSTEM RESET citus.recover_2pc_interval; SELECT pg_reload_conf(); - pg_reload_conf + pg_reload_conf --------------------------------------------------------------------- t (1 row) diff --git a/src/test/regress/expected/multi_mx_modifications.out b/src/test/regress/expected/multi_mx_modifications.out index b1604c8d9..86132f12f 100644 --- a/src/test/regress/expected/multi_mx_modifications.out +++ b/src/test/regress/expected/multi_mx_modifications.out @@ -6,7 +6,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1330000; INSERT INTO limit_orders_mx VALUES (32743, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69); SELECT COUNT(*) FROM limit_orders_mx WHERE id = 32743; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -16,7 +16,7 @@ SELECT COUNT(*) FROM limit_orders_mx WHERE id = 32743; INSERT INTO limit_orders_mx VALUES (32744, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69); SELECT COUNT(*) FROM limit_orders_mx WHERE id = 32744; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -27,14 +27,14 @@ SELECT COUNT(*) FROM limit_orders_mx WHERE id = 32744; INSERT INTO limit_orders_mx VALUES (32745, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69); SELECT COUNT(*) FROM limit_orders_mx WHERE id = 32745; - count + count --------------------------------------------------------------------- 1 (1 row) -- and see all the inserted rows SELECT * FROM limit_orders_mx ORDER BY 1; - id | symbol | bidder_id | placed_at | kind | limit_price + id | symbol | bidder_id | placed_at | kind | limit_price --------------------------------------------------------------------- 32743 | AAPL | 9580 | Tue Oct 19 10:23:54 2004 | buy | 20.69 32744 | AAPL | 9580 | Tue Oct 19 10:23:54 2004 | buy | 20.69 @@ -43,7 +43,7 @@ SELECT * FROM limit_orders_mx ORDER BY 1; -- basic single-row INSERT with RETURNING INSERT INTO limit_orders_mx VALUES (32746, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69) RETURNING *; - id | symbol | bidder_id | placed_at | kind | limit_price + id | symbol | bidder_id | placed_at | kind | limit_price --------------------------------------------------------------------- 32746 | AAPL | 9580 | Tue Oct 19 10:23:54 2004 | buy | 20.69 (1 row) @@ -52,7 +52,7 @@ INSERT INTO limit_orders_mx VALUES (32746, 'AAPL', 9580, '2004-10-19 10:23:54', INSERT INTO limit_orders_mx VALUES (12756, 'MSFT', 10959, '2013-05-08 07:29:23', 'sell', DEFAULT); SELECT * FROM limit_orders_mx WHERE id = 12756; - id | symbol | bidder_id | placed_at | kind | limit_price + id | symbol | bidder_id | placed_at | kind | limit_price --------------------------------------------------------------------- 12756 | MSFT | 10959 | Wed May 08 07:29:23 2013 | sell | 0.00 (1 row) @@ -61,7 +61,7 @@ SELECT * FROM limit_orders_mx WHERE id = 12756; INSERT INTO limit_orders_mx VALUES (430, upper('ibm'), 214, timestamp '2003-01-28 10:31:17' + interval '5 hours', 'buy', sqrt(2)); SELECT * FROM limit_orders_mx WHERE id = 430; - id | symbol | bidder_id | placed_at | kind | limit_price + id | symbol | bidder_id | placed_at | kind | limit_price --------------------------------------------------------------------- 430 | IBM | 214 | Tue Jan 28 15:31:17 2003 | buy | 1.4142135623731 (1 row) @@ -111,27 +111,27 @@ INSERT INTO limit_orders_mx SELECT * FROM deleted_orders; -- test simple DELETE INSERT INTO limit_orders_mx VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell', 20.69); SELECT COUNT(*) FROM limit_orders_mx WHERE id = 246; - count + count --------------------------------------------------------------------- 1 (1 row) DELETE FROM limit_orders_mx WHERE id = 246; SELECT COUNT(*) FROM limit_orders_mx WHERE id = 246; - count + count --------------------------------------------------------------------- 0 (1 row) -- test simple DELETE with RETURNING DELETE FROM limit_orders_mx WHERE id = 430 RETURNING *; - id | symbol | bidder_id | placed_at | kind | limit_price + id | symbol | bidder_id | placed_at | kind | limit_price --------------------------------------------------------------------- 430 | IBM | 214 | Tue Jan 28 15:31:17 2003 | buy | 1.4142135623731 (1 row) SELECT COUNT(*) FROM limit_orders_mx WHERE id = 430; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -139,14 +139,14 @@ SELECT COUNT(*) FROM limit_orders_mx WHERE id = 430; -- DELETE with expression in WHERE clause INSERT INTO limit_orders_mx VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell', 20.69); SELECT COUNT(*) FROM limit_orders_mx WHERE id = 246; - count + count --------------------------------------------------------------------- 1 (1 row) DELETE FROM limit_orders_mx WHERE id = (2 * 123); SELECT COUNT(*) FROM limit_orders_mx WHERE id = 246; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -169,14 +169,14 @@ INSERT INTO limit_orders_mx VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'se -- simple UPDATE UPDATE limit_orders_mx SET symbol = 'GM' WHERE id = 246; SELECT symbol FROM limit_orders_mx WHERE id = 246; - symbol + symbol --------------------------------------------------------------------- GM (1 row) -- simple UPDATE with RETURNING UPDATE limit_orders_mx SET symbol = 'GM' WHERE id = 246 RETURNING *; - id | symbol | bidder_id | placed_at | kind | limit_price + id | symbol | bidder_id | placed_at | kind | limit_price --------------------------------------------------------------------- 246 | GM | 162 | Mon Jul 02 16:32:15 2007 | sell | 20.69 (1 row) @@ -184,14 +184,14 @@ UPDATE limit_orders_mx SET symbol = 'GM' WHERE id = 246 RETURNING *; -- expression UPDATE UPDATE limit_orders_mx SET bidder_id = 6 * 3 WHERE id = 246; SELECT bidder_id FROM limit_orders_mx WHERE id = 246; - bidder_id + bidder_id --------------------------------------------------------------------- 18 (1 row) -- expression UPDATE with RETURNING UPDATE limit_orders_mx SET bidder_id = 6 * 5 WHERE id = 246 RETURNING *; - id | symbol | bidder_id | placed_at | kind | limit_price + id | symbol | bidder_id | placed_at | kind | limit_price --------------------------------------------------------------------- 246 | GM | 30 | Mon Jul 02 16:32:15 2007 | sell | 20.69 (1 row) @@ -199,14 +199,14 @@ UPDATE limit_orders_mx SET bidder_id = 6 * 5 WHERE id = 246 RETURNING *; -- multi-column UPDATE UPDATE limit_orders_mx SET (kind, limit_price) = ('buy', DEFAULT) WHERE id = 246; SELECT kind, limit_price FROM limit_orders_mx WHERE id = 246; - kind | limit_price + kind | limit_price --------------------------------------------------------------------- buy | 0.00 (1 row) -- multi-column UPDATE with RETURNING UPDATE limit_orders_mx SET (kind, limit_price) = ('buy', 999) WHERE id = 246 RETURNING *; - id | symbol | bidder_id | placed_at | kind | limit_price + id | symbol | bidder_id | placed_at | kind | limit_price --------------------------------------------------------------------- 246 | GM | 30 | Mon Jul 02 16:32:15 2007 | buy | 999 (1 row) @@ -230,7 +230,7 @@ ERROR: relation bidders is not distributed WITH deleted_orders AS (INSERT INTO limit_orders_mx VALUES (399, 'PDR', 14, '2017-07-02 16:32:15', 'sell', 43)) UPDATE limit_orders_mx SET symbol = 'GM'; SELECT symbol, bidder_id FROM limit_orders_mx WHERE id = 246; - symbol | bidder_id + symbol | bidder_id --------------------------------------------------------------------- GM | 30 (1 row) @@ -242,14 +242,14 @@ UPDATE limit_orders_mx SET bidder_id = bidder_id + 1 WHERE id = 246; -- IMMUTABLE functions are allowed UPDATE limit_orders_mx SET symbol = LOWER(symbol) WHERE id = 246; SELECT symbol, bidder_id FROM limit_orders_mx WHERE id = 246; - symbol | bidder_id + symbol | bidder_id --------------------------------------------------------------------- gm | 247 (1 row) -- IMMUTABLE functions are allowed -- even in returning UPDATE limit_orders_mx SET symbol = UPPER(symbol) WHERE id = 246 RETURNING id, LOWER(symbol), symbol; - id | lower | symbol + id | lower | symbol --------------------------------------------------------------------- 246 | gm | GM (1 row) @@ -276,7 +276,7 @@ UPDATE limit_orders_mx SET array_of_values = stable_append_mx(array_of_values, 3) WHERE id = 246; ERROR: STABLE functions used in UPDATE queries cannot be called with column references SELECT array_of_values FROM limit_orders_mx WHERE id = 246; - array_of_values + array_of_values --------------------------------------------------------------------- {1,2} (1 row) @@ -287,7 +287,7 @@ CREATE FUNCTION temp_strict_func(integer,integer) RETURNS integer AS UPDATE limit_orders_mx SET bidder_id = temp_strict_func(1, null) WHERE id = 246; ERROR: null value in column "bidder_id" violates not-null constraint SELECT array_of_values FROM limit_orders_mx WHERE id = 246; - array_of_values + array_of_values --------------------------------------------------------------------- {1,2} (1 row) @@ -311,7 +311,7 @@ INSERT INTO multiple_hash_mx VALUES ('0', '4'); INSERT INTO multiple_hash_mx VALUES ('0', '5'); INSERT INTO multiple_hash_mx VALUES ('0', '6'); UPDATE multiple_hash_mx SET data = data ||'-1' WHERE category = '0' RETURNING *; - category | data + category | data --------------------------------------------------------------------- 0 | 1-1 0 | 2-1 @@ -322,7 +322,7 @@ UPDATE multiple_hash_mx SET data = data ||'-1' WHERE category = '0' RETURNING *; (6 rows) DELETE FROM multiple_hash_mx WHERE category = '0' RETURNING *; - category | data + category | data --------------------------------------------------------------------- 0 | 1-1 0 | 2-1 @@ -347,7 +347,7 @@ INSERT 0 1 INSERT INTO multiple_hash_mx VALUES ('2', '3'); INSERT 0 1 INSERT INTO multiple_hash_mx VALUES ('2', '3') RETURNING *; - category | data + category | data --------------------------------------------------------------------- 2 | 3 (1 row) @@ -362,7 +362,7 @@ UPDATE multiple_hash_mx SET data = data ||'-2' WHERE category = '1'; UPDATE 3 -- three rows, with RETURNING UPDATE multiple_hash_mx SET data = data ||'-2' WHERE category = '1' RETURNING category; - category + category --------------------------------------------------------------------- 1 1 @@ -372,7 +372,7 @@ UPDATE multiple_hash_mx SET data = data ||'-2' WHERE category = '1' RETURNING ca UPDATE 3 -- check SELECT * FROM multiple_hash_mx WHERE category = '1' ORDER BY category, data; - category | data + category | data --------------------------------------------------------------------- 1 | 1-1-2-2 1 | 2-2-2 @@ -388,7 +388,7 @@ DELETE FROM multiple_hash_mx WHERE category = '2'; DELETE 3 -- three rows, with RETURNING DELETE FROM multiple_hash_mx WHERE category = '1' RETURNING category; - category + category --------------------------------------------------------------------- 1 1 @@ -398,12 +398,12 @@ DELETE FROM multiple_hash_mx WHERE category = '1' RETURNING category; DELETE 3 -- check SELECT * FROM multiple_hash_mx WHERE category = '1' ORDER BY category, data; - category | data + category | data --------------------------------------------------------------------- (0 rows) SELECT * FROM multiple_hash_mx WHERE category = '2' ORDER BY category, data; - category | data + category | data --------------------------------------------------------------------- (0 rows) @@ -430,32 +430,32 @@ SELECT minimum_value::bigint AS min_value, SELECT last_value FROM app_analytics_events_mx_id_seq \gset ALTER SEQUENCE app_analytics_events_mx_id_seq NO MINVALUE NO MAXVALUE; SELECT setval('app_analytics_events_mx_id_seq'::regclass, 3940649673949184); - setval + setval --------------------------------------------------------------------- 3940649673949184 (1 row) INSERT INTO app_analytics_events_mx VALUES (DEFAULT, 101, 'Fauxkemon Geaux') RETURNING id; - id + id --------------------------------------------------------------------- 3940649673949185 (1 row) INSERT INTO app_analytics_events_mx (app_id, name) VALUES (102, 'Wayz') RETURNING id; - id + id --------------------------------------------------------------------- 3940649673949186 (1 row) INSERT INTO app_analytics_events_mx (app_id, name) VALUES (103, 'Mynt') RETURNING *; - id | app_id | name + id | app_id | name --------------------------------------------------------------------- 3940649673949187 | 103 | Mynt (1 row) -- clean up SELECT setval('app_analytics_events_mx_id_seq'::regclass, :last_value); - setval + setval --------------------------------------------------------------------- 4503599627370497 (1 row) diff --git a/src/test/regress/expected/multi_mx_modifications_to_reference_tables.out b/src/test/regress/expected/multi_mx_modifications_to_reference_tables.out index 7b6cd15bf..fb2961934 100644 --- a/src/test/regress/expected/multi_mx_modifications_to_reference_tables.out +++ b/src/test/regress/expected/multi_mx_modifications_to_reference_tables.out @@ -8,36 +8,36 @@ SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node + start_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_port); - start_metadata_sync_to_node + start_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) CREATE TABlE ref_table(id int, value_1 int); SELECT create_reference_table('ref_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABlE ref_table_2(id int, value_1 int); SELECT create_reference_table('ref_table_2'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE test_table_1(id int, value_1 int); SELECT create_distributed_table('test_table_1', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO test_table_1 VALUES(5,5),(6,6); @@ -46,28 +46,28 @@ SET search_path TO 'mx_modify_reference_table'; -- Simple DML operations from the first worker node INSERT INTO ref_table VALUES(1,1),(2,2); SELECT SUM(value_1) FROM ref_table; - sum + sum --------------------------------------------------------------------- 3 (1 row) UPDATE ref_table SET value_1 = 1 WHERE id = 2; SELECT SUM(value_1) FROM ref_table; - sum + sum --------------------------------------------------------------------- 2 (1 row) DELETE FROM ref_table; SELECT SUM(value_1) FROM ref_table; - sum + sum --------------------------------------------------------------------- - + (1 row) COPY ref_table FROM STDIN DELIMITER ','; SELECT SUM(value_1) FROM ref_table; - sum + sum --------------------------------------------------------------------- 3 (1 row) @@ -75,7 +75,7 @@ SELECT SUM(value_1) FROM ref_table; -- Select For Update also follows the same logic with modification. -- It has been started to be supported on MX nodes with DML operations. SELECT * FROM ref_table FOR UPDATE; - id | value_1 + id | value_1 --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -85,14 +85,14 @@ SELECT * FROM ref_table FOR UPDATE; -- queries are also supported on MX nodes. INSERT INTO ref_table SELECT * FROM test_table_1; SELECT SUM(value_1) FROM ref_table; - sum + sum --------------------------------------------------------------------- 14 (1 row) INSERT INTO ref_table_2 SELECT * FROM ref_table; SELECT SUM(value_1) FROM ref_table_2; - sum + sum --------------------------------------------------------------------- 14 (1 row) @@ -101,51 +101,51 @@ SELECT SUM(value_1) FROM ref_table_2; \c - - - :worker_2_port SET search_path TO 'mx_modify_reference_table'; SELECT SUM(value_1) FROM ref_table; - sum + sum --------------------------------------------------------------------- 14 (1 row) SELECT SUM(value_1) FROM ref_table_2; - sum + sum --------------------------------------------------------------------- 14 (1 row) --- Run basic queries from second worker node. These tests have been added +-- Run basic queries from second worker node. These tests have been added -- since locking logic is slightly different between running these commands -- from first worker node and the second one INSERT INTO ref_table VALUES(1,1),(2,2); SELECT SUM(value_1) FROM ref_table; - sum + sum --------------------------------------------------------------------- 17 (1 row) UPDATE ref_table SET value_1 = 1 WHERE id = 2; SELECT SUM(value_1) FROM ref_table; - sum + sum --------------------------------------------------------------------- 15 (1 row) COPY ref_table FROM STDIN DELIMITER ','; SELECT SUM(value_1) FROM ref_table; - sum + sum --------------------------------------------------------------------- 18 (1 row) INSERT INTO ref_table SELECT * FROM test_table_1; SELECT SUM(value_1) FROM ref_table; - sum + sum --------------------------------------------------------------------- 29 (1 row) INSERT INTO ref_table_2 SELECT * FROM ref_table; SELECT SUM(value_1) FROM ref_table_2; - sum + sum --------------------------------------------------------------------- 43 (1 row) diff --git a/src/test/regress/expected/multi_mx_modifying_xacts.out b/src/test/regress/expected/multi_mx_modifying_xacts.out index fcffb25b1..0466c847e 100644 --- a/src/test/regress/expected/multi_mx_modifying_xacts.out +++ b/src/test/regress/expected/multi_mx_modifying_xacts.out @@ -13,7 +13,7 @@ DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 2; INSERT INTO researchers_mx VALUES (2, 1, 'John Backus'); COMMIT; SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 2; - name + name --------------------------------------------------------------------- John Backus (1 row) @@ -25,7 +25,7 @@ DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 2; INSERT INTO researchers_mx VALUES (2, 1, 'John Backus Worker 1'); COMMIT; SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 2; - name + name --------------------------------------------------------------------- John Backus Worker 1 (1 row) @@ -37,7 +37,7 @@ DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 2; INSERT INTO researchers_mx VALUES (2, 1, 'John Backus Worker 2'); COMMIT; SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 2; - name + name --------------------------------------------------------------------- John Backus Worker 2 (1 row) @@ -48,7 +48,7 @@ BEGIN; DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 1; ABORT; SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 1; - name + name --------------------------------------------------------------------- Donald Knuth (1 row) @@ -59,7 +59,7 @@ BEGIN; DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 1; ABORT; SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 1; - name + name --------------------------------------------------------------------- Donald Knuth (1 row) @@ -70,7 +70,7 @@ BEGIN; DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 1; ABORT; SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 1; - name + name --------------------------------------------------------------------- Donald Knuth (1 row) @@ -84,7 +84,7 @@ SAVEPOINT hire_thompson; INSERT INTO researchers_mx VALUES (6, 3, 'Ken Thompson'); COMMIT; SELECT name FROM researchers_mx WHERE lab_id = 3 AND id = 6; - name + name --------------------------------------------------------------------- Ken Thompson (1 row) @@ -108,7 +108,7 @@ INSERT INTO researchers_mx VALUES (8, 4, 'Douglas Engelbart'); ROLLBACK TO hire_engelbart; COMMIT; SELECT name FROM researchers_mx WHERE lab_id = 4; - name + name --------------------------------------------------------------------- Jim Gray (1 row) @@ -130,7 +130,7 @@ INSERT INTO researchers_mx VALUES (8, 5, 'Douglas Engelbart'); INSERT INTO labs_mx VALUES (5, 'Los Alamos'); COMMIT; SELECT * FROM researchers_mx, labs_mx WHERE labs_mx.id = researchers_mx.lab_id and researchers_mx.lab_id = 5;; - id | lab_id | name | id | name + id | lab_id | name | id | name --------------------------------------------------------------------- 8 | 5 | Douglas Engelbart | 5 | Los Alamos (1 row) @@ -149,7 +149,7 @@ INSERT INTO researchers_mx VALUES (8, 5, 'Douglas Engelbart'); INSERT INTO labs_mx VALUES (5, 'Los Alamos'); COMMIT; SELECT * FROM researchers_mx, labs_mx WHERE labs_mx.id = researchers_mx.lab_id and researchers_mx.lab_id = 5; - id | lab_id | name | id | name + id | lab_id | name | id | name --------------------------------------------------------------------- 8 | 5 | Douglas Engelbart | 5 | Los Alamos 8 | 5 | Douglas Engelbart | 5 | Los Alamos @@ -171,7 +171,7 @@ BEGIN; SET LOCAL citus.enable_local_execution TO off; INSERT INTO labs_mx VALUES (6, 'Bell labs_mx'); SELECT count(*) FROM researchers_mx WHERE lab_id = 6; - count + count --------------------------------------------------------------------- 2 (1 row) @@ -187,7 +187,7 @@ COMMIT; BEGIN; \copy labs_mx from stdin delimiter ',' SELECT name FROM labs_mx WHERE id = 10; - name + name --------------------------------------------------------------------- Weyland-Yutani-1 Weyland-Yutani-2 @@ -205,7 +205,7 @@ DETAIL: Key (id)=(X) already exists. COMMIT; -- data shouldn't have persisted... SELECT * FROM objects_mx WHERE id = 1; - id | name + id | name --------------------------------------------------------------------- (0 rows) @@ -221,7 +221,7 @@ CONTEXT: while executing command on localhost:xxxxx COMMIT; -- data shouldn't have persisted... SELECT * FROM objects_mx WHERE id = 1; - id | name + id | name --------------------------------------------------------------------- (0 rows) @@ -249,12 +249,12 @@ ERROR: illegal value COMMIT; -- data should NOT be persisted SELECT * FROM objects_mx WHERE id = 2; - id | name + id | name --------------------------------------------------------------------- (0 rows) SELECT * FROM labs_mx WHERE id = 7; - id | name + id | name --------------------------------------------------------------------- (0 rows) @@ -268,12 +268,12 @@ ERROR: illegal value COMMIT; -- data should NOT be persisted SELECT * FROM objects_mx WHERE id = 2; - id | name + id | name --------------------------------------------------------------------- (0 rows) SELECT * FROM labs_mx WHERE id = 7; - id | name + id | name --------------------------------------------------------------------- (0 rows) @@ -296,12 +296,12 @@ ERROR: current transaction is aborted, commands ignored until end of transactio COMMIT; -- data should NOT be persisted SELECT * FROM objects_mx WHERE id = 1; - id | name + id | name --------------------------------------------------------------------- (0 rows) SELECT * FROM labs_mx WHERE id = 8; - id | name + id | name --------------------------------------------------------------------- (0 rows) @@ -318,12 +318,12 @@ ERROR: current transaction is aborted, commands ignored until end of transactio COMMIT; -- data should NOT be persisted SELECT * FROM objects_mx WHERE id = 1; - id | name + id | name --------------------------------------------------------------------- (0 rows) SELECT * FROM labs_mx WHERE id = 8; - id | name + id | name --------------------------------------------------------------------- (0 rows) @@ -348,12 +348,12 @@ WARNING: could not commit transaction for shard xxxxx on any active node ERROR: could not commit transaction on any active node -- data should NOT be persisted SELECT * FROM objects_mx WHERE id = 2; - id | name + id | name --------------------------------------------------------------------- (0 rows) SELECT * FROM labs_mx WHERE id = 7; - id | name + id | name --------------------------------------------------------------------- (0 rows) @@ -376,12 +376,12 @@ WARNING: could not commit transaction for shard xxxxx on any active node ERROR: could not commit transaction on any active node -- data should NOT be persisted SELECT * FROM objects_mx WHERE id = 1; - id | name + id | name --------------------------------------------------------------------- (0 rows) SELECT * FROM labs_mx WHERE id = 8; - id | name + id | name --------------------------------------------------------------------- (0 rows) @@ -401,12 +401,12 @@ WARNING: could not commit transaction for shard xxxxx on any active node ERROR: could not commit transaction on any active node -- no data should persists SELECT * FROM objects_mx WHERE id = 1; - id | name + id | name --------------------------------------------------------------------- (0 rows) SELECT * FROM labs_mx WHERE id = 8; - id | name + id | name --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/multi_mx_node_metadata.out b/src/test/regress/expected/multi_mx_node_metadata.out index 0f05bc090..b44d69a84 100644 --- a/src/test/regress/expected/multi_mx_node_metadata.out +++ b/src/test/regress/expected/multi_mx_node_metadata.out @@ -22,7 +22,7 @@ $$; -- add a node to the cluster SELECT master_add_node('localhost', :worker_1_port) As nodeid_1 \gset SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node; - nodeid | nodename | nodeport | hasmetadata | metadatasynced + nodeid | nodename | nodeport | hasmetadata | metadatasynced --------------------------------------------------------------------- 2 | localhost | 57637 | f | f (1 row) @@ -30,41 +30,41 @@ SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node -- create couple of tables CREATE TABLE ref_table(a int primary key); SELECT create_reference_table('ref_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE dist_table_1(a int primary key, b int references ref_table(a)); SELECT create_distributed_table('dist_table_1', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- update the node SELECT 1 FROM master_update_node((SELECT nodeid FROM pg_dist_node), 'localhost', :worker_2_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node; - nodeid | nodename | nodeport | hasmetadata | metadatasynced + nodeid | nodename | nodeport | hasmetadata | metadatasynced --------------------------------------------------------------------- 2 | localhost | 57638 | f | f (1 row) -- start syncing metadata to the node SELECT 1 FROM start_metadata_sync_to_node('localhost', :worker_2_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node; - nodeid | nodename | nodeport | hasmetadata | metadatasynced + nodeid | nodename | nodeport | hasmetadata | metadatasynced --------------------------------------------------------------------- 2 | localhost | 57638 | t | t (1 row) @@ -77,19 +77,19 @@ SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node -- if the maintenance daemon does the metadata sync too fast. BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ; SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node; - nodeid | nodename | nodeport | hasmetadata | metadatasynced + nodeid | nodename | nodeport | hasmetadata | metadatasynced --------------------------------------------------------------------- 2 | localhost | 57638 | t | t (1 row) SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', :worker_1_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node; - nodeid | nodename | nodeport | hasmetadata | metadatasynced + nodeid | nodename | nodeport | hasmetadata | metadatasynced --------------------------------------------------------------------- 2 | localhost | 57637 | t | f (1 row) @@ -98,19 +98,19 @@ END; -- wait until maintenance daemon does the next metadata sync, and then -- check if metadata is synced again SELECT wait_until_metadata_sync(); - wait_until_metadata_sync + wait_until_metadata_sync --------------------------------------------------------------------- - + (1 row) SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node; - nodeid | hasmetadata | metadatasynced + nodeid | hasmetadata | metadatasynced --------------------------------------------------------------------- 2 | t | t (1 row) SELECT verify_metadata('localhost', :worker_1_port); - verify_metadata + verify_metadata --------------------------------------------------------------------- t (1 row) @@ -119,19 +119,19 @@ SELECT verify_metadata('localhost', :worker_1_port); -- a unwriteable node. BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ; SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node; - nodeid | nodename | nodeport | hasmetadata | metadatasynced + nodeid | nodename | nodeport | hasmetadata | metadatasynced --------------------------------------------------------------------- 2 | localhost | 57637 | t | t (1 row) SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', 12345); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node; - nodeid | nodename | nodeport | hasmetadata | metadatasynced + nodeid | nodename | nodeport | hasmetadata | metadatasynced --------------------------------------------------------------------- 2 | localhost | 12345 | t | f (1 row) @@ -139,32 +139,32 @@ SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node END; -- maintenace daemon metadata sync should fail, because node is still unwriteable. SELECT wait_until_metadata_sync(); - wait_until_metadata_sync + wait_until_metadata_sync --------------------------------------------------------------------- - + (1 row) SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node; - nodeid | hasmetadata | metadatasynced + nodeid | hasmetadata | metadatasynced --------------------------------------------------------------------- 2 | t | f (1 row) -- update it back to :worker_1_port, now metadata should be synced SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', :worker_1_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) SELECT wait_until_metadata_sync(); - wait_until_metadata_sync + wait_until_metadata_sync --------------------------------------------------------------------- - + (1 row) SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node; - nodeid | hasmetadata | metadatasynced + nodeid | hasmetadata | metadatasynced --------------------------------------------------------------------- 2 | t | t (1 row) @@ -175,7 +175,7 @@ SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node; SELECT master_add_node('localhost', :worker_2_port) AS nodeid_2 \gset NOTICE: Replicating reference table "ref_table" to the node localhost:xxxxx SELECT 1 FROM start_metadata_sync_to_node('localhost', :worker_2_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) @@ -183,14 +183,14 @@ SELECT 1 FROM start_metadata_sync_to_node('localhost', :worker_2_port); -- Create a table with shards on both nodes CREATE TABLE dist_table_2(a int); SELECT create_distributed_table('dist_table_2', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO dist_table_2 SELECT i FROM generate_series(1, 100) i; SELECT mark_node_readonly('localhost', :worker_2_port, TRUE); - mark_node_readonly + mark_node_readonly --------------------------------------------------------------------- t (1 row) @@ -198,13 +198,13 @@ SELECT mark_node_readonly('localhost', :worker_2_port, TRUE); -- Now updating the other node will mark worker 2 as not synced. BEGIN; SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', 12345); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node ORDER BY nodeid; - nodeid | hasmetadata | metadatasynced + nodeid | hasmetadata | metadatasynced --------------------------------------------------------------------- 2 | t | f 3 | t | f @@ -214,13 +214,13 @@ COMMIT; -- worker_2 is out of sync, so further updates aren't sent to it and -- we shouldn't see the warnings. SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', 23456); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node ORDER BY nodeid; - nodeid | hasmetadata | metadatasynced + nodeid | hasmetadata | metadatasynced --------------------------------------------------------------------- 2 | t | f 3 | t | f @@ -228,20 +228,20 @@ SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node ORDER BY nodeid; -- Make the node writeable. SELECT mark_node_readonly('localhost', :worker_2_port, FALSE); - mark_node_readonly + mark_node_readonly --------------------------------------------------------------------- t (1 row) SELECT wait_until_metadata_sync(); - wait_until_metadata_sync + wait_until_metadata_sync --------------------------------------------------------------------- - + (1 row) -- Mark the node readonly again, so the following master_update_node warns SELECT mark_node_readonly('localhost', :worker_2_port, TRUE); - mark_node_readonly + mark_node_readonly --------------------------------------------------------------------- t (1 row) @@ -249,46 +249,46 @@ SELECT mark_node_readonly('localhost', :worker_2_port, TRUE); -- Revert the nodeport of worker 1. BEGIN; SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', :worker_1_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM dist_table_2; - count + count --------------------------------------------------------------------- 100 (1 row) END; SELECT wait_until_metadata_sync(); - wait_until_metadata_sync + wait_until_metadata_sync --------------------------------------------------------------------- - + (1 row) -- Make the node writeable. SELECT mark_node_readonly('localhost', :worker_2_port, FALSE); - mark_node_readonly + mark_node_readonly --------------------------------------------------------------------- t (1 row) SELECT wait_until_metadata_sync(); - wait_until_metadata_sync + wait_until_metadata_sync --------------------------------------------------------------------- - + (1 row) SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', :worker_1_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) SELECT verify_metadata('localhost', :worker_1_port), verify_metadata('localhost', :worker_2_port); - verify_metadata | verify_metadata + verify_metadata | verify_metadata --------------------------------------------------------------------- t | t (1 row) @@ -298,7 +298,7 @@ SELECT verify_metadata('localhost', :worker_1_port), --------------------------------------------------------------------- BEGIN; SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', 12345); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) @@ -306,7 +306,7 @@ SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', 12345); ROLLBACK; SELECT verify_metadata('localhost', :worker_1_port), verify_metadata('localhost', :worker_2_port); - verify_metadata | verify_metadata + verify_metadata | verify_metadata --------------------------------------------------------------------- t | t (1 row) @@ -316,7 +316,7 @@ SELECT verify_metadata('localhost', :worker_1_port), --------------------------------------------------------------------- BEGIN; SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', 12345); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) @@ -324,13 +324,13 @@ SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', 12345); PREPARE TRANSACTION 'tx01'; COMMIT PREPARED 'tx01'; SELECT wait_until_metadata_sync(); - wait_until_metadata_sync + wait_until_metadata_sync --------------------------------------------------------------------- - + (1 row) SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node ORDER BY nodeid; - nodeid | hasmetadata | metadatasynced + nodeid | hasmetadata | metadatasynced --------------------------------------------------------------------- 2 | t | f 3 | t | t @@ -338,7 +338,7 @@ SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node ORDER BY nodeid; BEGIN; SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', :worker_1_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) @@ -346,13 +346,13 @@ SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', :worker_1_port); PREPARE TRANSACTION 'tx01'; COMMIT PREPARED 'tx01'; SELECT wait_until_metadata_sync(); - wait_until_metadata_sync + wait_until_metadata_sync --------------------------------------------------------------------- - + (1 row) SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node ORDER BY nodeid; - nodeid | hasmetadata | metadatasynced + nodeid | hasmetadata | metadatasynced --------------------------------------------------------------------- 2 | t | t 3 | t | t @@ -360,7 +360,7 @@ SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node ORDER BY nodeid; SELECT verify_metadata('localhost', :worker_1_port), verify_metadata('localhost', :worker_2_port); - verify_metadata | verify_metadata + verify_metadata | verify_metadata --------------------------------------------------------------------- t | t (1 row) @@ -371,26 +371,26 @@ SELECT verify_metadata('localhost', :worker_1_port), -- Don't drop the reference table so it has shards on the nodes being disabled DROP TABLE dist_table_1, dist_table_2; SELECT 1 FROM master_disable_node('localhost', :worker_2_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) SELECT verify_metadata('localhost', :worker_1_port); - verify_metadata + verify_metadata --------------------------------------------------------------------- t (1 row) SELECT 1 FROM master_activate_node('localhost', :worker_2_port); NOTICE: Replicating reference table "ref_table" to the node localhost:xxxxx - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) SELECT verify_metadata('localhost', :worker_1_port); - verify_metadata + verify_metadata --------------------------------------------------------------------- t (1 row) @@ -399,15 +399,15 @@ SELECT verify_metadata('localhost', :worker_1_port); -- Test master_disable_node() when the node that is being disabled is actually down --------------------------------------------------------------------- SELECT master_update_node(:nodeid_2, 'localhost', 1); - master_update_node + master_update_node --------------------------------------------------------------------- - + (1 row) SELECT wait_until_metadata_sync(); - wait_until_metadata_sync + wait_until_metadata_sync --------------------------------------------------------------------- - + (1 row) -- set metadatasynced so we try porpagating metadata changes @@ -419,44 +419,44 @@ DETAIL: connection error: localhost:xxxxx HINT: If you are using MX, try stop_metadata_sync_to_node(hostname, port) for nodes that are down before disabling them. -- try again after stopping metadata sync SELECT stop_metadata_sync_to_node('localhost', 1); - stop_metadata_sync_to_node + stop_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) SELECT 1 FROM master_disable_node('localhost', 1); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) SELECT verify_metadata('localhost', :worker_1_port); - verify_metadata + verify_metadata --------------------------------------------------------------------- t (1 row) SELECT master_update_node(:nodeid_2, 'localhost', :worker_2_port); - master_update_node + master_update_node --------------------------------------------------------------------- - + (1 row) SELECT wait_until_metadata_sync(); - wait_until_metadata_sync + wait_until_metadata_sync --------------------------------------------------------------------- - + (1 row) SELECT 1 FROM master_activate_node('localhost', :worker_2_port); NOTICE: Replicating reference table "ref_table" to the node localhost:xxxxx - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) SELECT verify_metadata('localhost', :worker_1_port); - verify_metadata + verify_metadata --------------------------------------------------------------------- t (1 row) @@ -466,15 +466,15 @@ SELECT verify_metadata('localhost', :worker_1_port); --------------------------------------------------------------------- -- node 1 is down. SELECT master_update_node(:nodeid_1, 'localhost', 1); - master_update_node + master_update_node --------------------------------------------------------------------- - + (1 row) SELECT wait_until_metadata_sync(); - wait_until_metadata_sync + wait_until_metadata_sync --------------------------------------------------------------------- - + (1 row) -- set metadatasynced so we try porpagating metadata changes @@ -486,39 +486,39 @@ DETAIL: connection error: localhost:xxxxx HINT: If you are using MX, try stop_metadata_sync_to_node(hostname, port) for nodes that are down before disabling them. -- try again after stopping metadata sync SELECT stop_metadata_sync_to_node('localhost', 1); - stop_metadata_sync_to_node + stop_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) SELECT 1 FROM master_disable_node('localhost', :worker_2_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) -- bring up node 1 SELECT master_update_node(:nodeid_1, 'localhost', :worker_1_port); - master_update_node + master_update_node --------------------------------------------------------------------- - + (1 row) SELECT wait_until_metadata_sync(); - wait_until_metadata_sync + wait_until_metadata_sync --------------------------------------------------------------------- - + (1 row) SELECT 1 FROM master_activate_node('localhost', :worker_2_port); NOTICE: Replicating reference table "ref_table" to the node localhost:xxxxx - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) SELECT verify_metadata('localhost', :worker_1_port); - verify_metadata + verify_metadata --------------------------------------------------------------------- t (1 row) @@ -527,7 +527,7 @@ SELECT verify_metadata('localhost', :worker_1_port); DROP TABLE ref_table; TRUNCATE pg_dist_colocation; SELECT count(*) FROM (SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node) t; - count + count --------------------------------------------------------------------- 2 (1 row) diff --git a/src/test/regress/expected/multi_mx_partitioning.out b/src/test/regress/expected/multi_mx_partitioning.out index 8d7a22f7c..b985bc146 100644 --- a/src/test/regress/expected/multi_mx_partitioning.out +++ b/src/test/regress/expected/multi_mx_partitioning.out @@ -7,9 +7,9 @@ SET citus.shard_replication_factor TO 1; -- make sure wen can create partitioning tables in MX SET citus.replication_model TO 'streaming'; SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node + start_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) -- 1-) Distributing partitioned table @@ -27,15 +27,15 @@ INSERT INTO partitioning_test_2010 VALUES (4, '2010-03-03'); SELECT create_distributed_table('partitioning_test', 'id'); NOTICE: Copying data from local table... NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- see from MX node, the data is loaded to shards \c - - - :worker_1_port SELECT * FROM partitioning_test ORDER BY 1; - id | time + id | time --------------------------------------------------------------------- 1 | 06-06-2009 2 | 07-07-2010 @@ -51,7 +51,7 @@ FROM WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; - logicalrelid + logicalrelid --------------------------------------------------------------------- partitioning_test partitioning_test_2009 @@ -66,7 +66,7 @@ GROUP BY logicalrelid ORDER BY 1,2; - logicalrelid | count + logicalrelid | count --------------------------------------------------------------------- partitioning_test | 4 partitioning_test_2009 | 4 @@ -75,7 +75,7 @@ ORDER BY -- see from MX node, partitioning hierarchy is built SELECT inhrelid::regclass FROM pg_inherits WHERE inhparent = 'partitioning_test'::regclass ORDER BY 1; - inhrelid + inhrelid --------------------------------------------------------------------- partitioning_test_2009 partitioning_test_2010 @@ -95,7 +95,7 @@ FROM WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2011') ORDER BY 1; - logicalrelid + logicalrelid --------------------------------------------------------------------- partitioning_test partitioning_test_2011 @@ -109,7 +109,7 @@ GROUP BY logicalrelid ORDER BY 1,2; - logicalrelid | count + logicalrelid | count --------------------------------------------------------------------- partitioning_test | 4 partitioning_test_2011 | 4 @@ -117,7 +117,7 @@ ORDER BY -- see from MX node, partitioning hierarchy is built SELECT inhrelid::regclass FROM pg_inherits WHERE inhparent = 'partitioning_test'::regclass ORDER BY 1; - inhrelid + inhrelid --------------------------------------------------------------------- partitioning_test_2009 partitioning_test_2010 @@ -143,7 +143,7 @@ FROM WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2012') ORDER BY 1; - logicalrelid + logicalrelid --------------------------------------------------------------------- partitioning_test partitioning_test_2012 @@ -157,7 +157,7 @@ GROUP BY logicalrelid ORDER BY 1,2; - logicalrelid | count + logicalrelid | count --------------------------------------------------------------------- partitioning_test | 4 partitioning_test_2012 | 4 @@ -165,7 +165,7 @@ ORDER BY -- see from MX node, see the data is loaded to shards SELECT * FROM partitioning_test ORDER BY 1; - id | time + id | time --------------------------------------------------------------------- 1 | 06-06-2009 2 | 07-07-2010 @@ -177,7 +177,7 @@ SELECT * FROM partitioning_test ORDER BY 1; -- see from MX node, partitioning hierarchy is built SELECT inhrelid::regclass FROM pg_inherits WHERE inhparent = 'partitioning_test'::regclass ORDER BY 1; - inhrelid + inhrelid --------------------------------------------------------------------- partitioning_test_2009 partitioning_test_2010 @@ -191,9 +191,9 @@ SET citus.shard_replication_factor TO 1; -- 4-) Attaching distributed table to distributed table CREATE TABLE partitioning_test_2013(id int, time date); SELECT create_distributed_table('partitioning_test_2013', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- load some data @@ -203,7 +203,7 @@ ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2013 FOR VALUES -- see from MX node, see the data is loaded to shards \c - - - :worker_1_port SELECT * FROM partitioning_test ORDER BY 1; - id | time + id | time --------------------------------------------------------------------- 1 | 06-06-2009 2 | 07-07-2010 @@ -217,7 +217,7 @@ SELECT * FROM partitioning_test ORDER BY 1; -- see from MX node, partitioning hierarchy is built SELECT inhrelid::regclass FROM pg_inherits WHERE inhparent = 'partitioning_test'::regclass ORDER BY 1; - inhrelid + inhrelid --------------------------------------------------------------------- partitioning_test_2009 partitioning_test_2010 @@ -232,7 +232,7 @@ ALTER TABLE partitioning_test DETACH PARTITION partitioning_test_2009; -- see from MX node, partitioning hierarchy is built \c - - - :worker_1_port SELECT inhrelid::regclass FROM pg_inherits WHERE inhparent = 'partitioning_test'::regclass ORDER BY 1; - inhrelid + inhrelid --------------------------------------------------------------------- partitioning_test_2010 partitioning_test_2011 @@ -247,21 +247,21 @@ HINT: Connect to the coordinator and run it again. \c - - - :master_port -- make sure we can repeatedly call start_metadata_sync_to_node SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node + start_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node + start_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node + start_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) -- make sure we can drop partitions @@ -278,9 +278,9 @@ CREATE SCHEMA partition_test; SET SEARCH_PATH TO partition_test; CREATE TABLE partition_parent_table(a int, b int, c int) PARTITION BY RANGE (b); SELECT create_distributed_table('partition_parent_table', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE partition_0 PARTITION OF partition_parent_table FOR VALUES FROM (1) TO (10); diff --git a/src/test/regress/expected/multi_mx_reference_table.out b/src/test/regress/expected/multi_mx_reference_table.out index bb7c1abbf..a84ea15e5 100644 --- a/src/test/regress/expected/multi_mx_reference_table.out +++ b/src/test/regress/expected/multi_mx_reference_table.out @@ -2,9 +2,9 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1250000; \c - - - :master_port CREATE TABLE reference_table_test (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) INSERT INTO reference_table_test VALUES (1, 1.0, '1', '2016-12-01'); @@ -14,14 +14,14 @@ INSERT INTO reference_table_test VALUES (4, 4.0, '4', '2016-12-04'); INSERT INTO reference_table_test VALUES (5, 5.0, '5', '2016-12-05'); -- SELECT .. FOR UPDATE should work on coordinator (takes lock on first worker) SELECT value_1, value_2 FROM reference_table_test ORDER BY value_1, value_2 LIMIT 1 FOR UPDATE; - value_1 | value_2 + value_1 | value_2 --------------------------------------------------------------------- 1 | 1 (1 row) BEGIN; SELECT value_1, value_2 FROM reference_table_test ORDER BY value_1, value_2 LIMIT 1 FOR UPDATE; - value_1 | value_2 + value_1 | value_2 --------------------------------------------------------------------- 1 | 1 (1 row) @@ -30,14 +30,14 @@ END; \c - - - :worker_1_port -- SELECT .. FOR UPDATE should work on first worker (takes lock on self) SELECT value_1, value_2 FROM reference_table_test ORDER BY value_1, value_2 LIMIT 1 FOR UPDATE; - value_1 | value_2 + value_1 | value_2 --------------------------------------------------------------------- 1 | 1 (1 row) BEGIN; SELECT value_1, value_2 FROM reference_table_test ORDER BY value_1, value_2 LIMIT 1 FOR UPDATE; - value_1 | value_2 + value_1 | value_2 --------------------------------------------------------------------- 1 | 1 (1 row) @@ -48,7 +48,7 @@ SELECT * FROM reference_table_test; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 @@ -63,7 +63,7 @@ FROM reference_table_test WHERE value_1 = 1; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) @@ -75,7 +75,7 @@ FROM reference_table_test ORDER BY 2 ASC LIMIT 3; - value_1 | value_2 + value_1 | value_2 --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -90,7 +90,7 @@ WHERE value_2 >= 4 ORDER BY 2 LIMIT 3; - value_1 | value_3 + value_1 | value_3 --------------------------------------------------------------------- 4 | 4 5 | 5 @@ -103,7 +103,7 @@ FROM ORDER BY 2 ASC LIMIT 2; - value_1 | ?column? + value_1 | ?column? --------------------------------------------------------------------- 1 | 15 2 | 30 @@ -115,7 +115,7 @@ FROM reference_table_test ORDER BY 2 ASC LIMIT 2 OFFSET 2; - value_1 | ?column? + value_1 | ?column? --------------------------------------------------------------------- 3 | 45 4 | 60 @@ -127,7 +127,7 @@ FROM reference_table_test WHERE value_2 = 2 OR value_2 = 3; - value_2 | value_4 + value_2 | value_4 --------------------------------------------------------------------- 2 | Fri Dec 02 00:00:00 2016 3 | Sat Dec 03 00:00:00 2016 @@ -139,7 +139,7 @@ FROM reference_table_test WHERE value_2 = 2 AND value_2 = 3; - value_2 | value_4 + value_2 | value_4 --------------------------------------------------------------------- (0 rows) @@ -149,7 +149,7 @@ FROM reference_table_test WHERE value_3 = '2' OR value_1 = 3; - value_2 | value_4 + value_2 | value_4 --------------------------------------------------------------------- 2 | Fri Dec 02 00:00:00 2016 3 | Sat Dec 03 00:00:00 2016 @@ -164,7 +164,7 @@ WHERE value_3 = '2' OR value_1 = 3 ) AND FALSE; - value_2 | value_4 + value_2 | value_4 --------------------------------------------------------------------- (0 rows) @@ -181,7 +181,7 @@ WHERE reference_table_test ) AND value_1 < 3; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 @@ -196,7 +196,7 @@ WHERE ( '1', '2' ); - value_4 + value_4 --------------------------------------------------------------------- Thu Dec 01 00:00:00 2016 Fri Dec 02 00:00:00 2016 @@ -211,7 +211,7 @@ WHERE ( '5', '2' ); - date_part + date_part --------------------------------------------------------------------- 2 5 @@ -223,7 +223,7 @@ FROM reference_table_test WHERE value_2 <= 2 AND value_2 >= 4; - value_4 + value_4 --------------------------------------------------------------------- (0 rows) @@ -233,7 +233,7 @@ FROM reference_table_test WHERE value_2 <= 20 AND value_2 >= 4; - value_4 + value_4 --------------------------------------------------------------------- Sun Dec 04 00:00:00 2016 Mon Dec 05 00:00:00 2016 @@ -245,7 +245,7 @@ FROM reference_table_test WHERE value_2 >= 5 AND value_2 <= random(); - value_4 + value_4 --------------------------------------------------------------------- (0 rows) @@ -255,7 +255,7 @@ FROM reference_table_test WHERE value_4 BETWEEN '2016-12-01' AND '2016-12-03'; - value_1 + value_1 --------------------------------------------------------------------- 1 2 @@ -268,7 +268,7 @@ FROM reference_table_test WHERE FALSE; - value_1 + value_1 --------------------------------------------------------------------- (0 rows) @@ -278,7 +278,7 @@ FROM reference_table_test WHERE int4eq(1, 2); - value_1 + value_1 --------------------------------------------------------------------- (0 rows) @@ -287,7 +287,7 @@ SELECT value_1 as id, value_2 * 15 as age FROM reference_table_test; - id | age + id | age --------------------------------------------------------------------- 1 | 15 2 | 30 @@ -302,7 +302,7 @@ SELECT * FROM some_data; - value_2 | value_4 + value_2 | value_4 --------------------------------------------------------------------- 3 | Sat Dec 03 00:00:00 2016 4 | Sun Dec 04 00:00:00 2016 @@ -312,7 +312,7 @@ FROM -- queries with CTEs are supported even if CTE is not referenced inside query WITH some_data AS ( SELECT value_2, value_4 FROM reference_table_test WHERE value_2 >=3) SELECT * FROM reference_table_test ORDER BY 1 LIMIT 1; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) @@ -324,7 +324,7 @@ FROM reference_table_test, position('om' in 'Thomas') WHERE value_1 = 1; - value_1 | value_2 | value_3 | value_4 | position + value_1 | value_2 | value_3 | value_4 | position --------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 | 3 (1 row) @@ -335,7 +335,7 @@ FROM reference_table_test, position('om' in 'Thomas') WHERE value_1 = 1 OR value_1 = 2; - value_1 | value_2 | value_3 | value_4 | position + value_1 | value_2 | value_3 | value_4 | position --------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 | 3 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 | 3 @@ -348,7 +348,7 @@ SELECT * FROM ( SELECT * FROM reference_table_test WHERE value_1 = 3 ) AS combination ORDER BY value_1; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 3 | 3 | 3 | Sat Dec 03 00:00:00 2016 @@ -360,7 +360,7 @@ SELECT * FROM ( SELECT * FROM reference_table_test WHERE value_1 = 3 ) AS combination ORDER BY value_1; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) @@ -371,7 +371,7 @@ SELECT * FROM ( SELECT * FROM reference_table_test WHERE value_1 = 3 ) AS combination ORDER BY value_1; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- (0 rows) @@ -392,7 +392,7 @@ HAVING SUM(value_2) > 3 ORDER BY 1; - value_4 | sum + value_4 | sum --------------------------------------------------------------------- Fri Dec 02 00:00:00 2016 | 4 Sat Dec 03 00:00:00 2016 | 6 @@ -409,7 +409,7 @@ FROM GROUP BY GROUPING sets ((value_4), (value_3)) ORDER BY 1, 2, 3; - value_4 | value_3 | sum + value_4 | value_3 | sum --------------------------------------------------------------------- Thu Dec 01 00:00:00 2016 | | 2 Fri Dec 02 00:00:00 2016 | | 4 @@ -430,7 +430,7 @@ FROM reference_table_test ORDER BY 1; - value_4 + value_4 --------------------------------------------------------------------- Thu Dec 01 00:00:00 2016 Fri Dec 02 00:00:00 2016 @@ -444,7 +444,7 @@ SELECT value_4, RANK() OVER (PARTITION BY value_1 ORDER BY value_4) FROM reference_table_test; - value_4 | rank + value_4 | rank --------------------------------------------------------------------- Thu Dec 01 00:00:00 2016 | 1 Thu Dec 01 00:00:00 2016 | 1 @@ -461,7 +461,7 @@ SELECT value_4, AVG(value_1) OVER (PARTITION BY value_4 ORDER BY value_4) FROM reference_table_test; - value_4 | avg + value_4 | avg --------------------------------------------------------------------- Thu Dec 01 00:00:00 2016 | 1.00000000000000000000 Thu Dec 01 00:00:00 2016 | 1.00000000000000000000 @@ -484,7 +484,7 @@ SELECT END) as c FROM reference_table_test; - c + c --------------------------------------------------------------------- 3 (1 row) @@ -505,7 +505,7 @@ SELECT value_1 ORDER BY 1; - value_1 | c + value_1 | c --------------------------------------------------------------------- 1 | 0 2 | 0 @@ -517,7 +517,7 @@ SELECT -- selects inside a transaction works fine as well BEGIN; SELECT * FROM reference_table_test; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 @@ -530,7 +530,7 @@ SELECT * FROM reference_table_test; (8 rows) SELECT * FROM reference_table_test WHERE value_1 = 1; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 @@ -545,13 +545,13 @@ DECLARE test_cursor CURSOR FOR WHERE value_1 = 1 OR value_1 = 2 ORDER BY value_1; FETCH test_cursor; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) FETCH ALL test_cursor; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 @@ -559,12 +559,12 @@ FETCH ALL test_cursor; (3 rows) FETCH test_cursor; -- fetch one row after the last - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- (0 rows) FETCH BACKWARD test_cursor; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 (1 row) @@ -580,16 +580,16 @@ CREATE TEMP TABLE temp_reference_test as -- first create two more tables CREATE TABLE reference_table_test_second (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test_second'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE reference_table_test_third (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test_third'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- ingest some data to both tables @@ -601,14 +601,14 @@ INSERT INTO reference_table_test_third VALUES (5, 5.0, '5', '2016-12-05'); \c - - - :worker_2_port -- SELECT .. FOR UPDATE should work on second worker (takes lock on first worker) SELECT value_1, value_2 FROM reference_table_test ORDER BY value_1, value_2 LIMIT 1 FOR UPDATE; - value_1 | value_2 + value_1 | value_2 --------------------------------------------------------------------- 1 | 1 (1 row) BEGIN; SELECT value_1, value_2 FROM reference_table_test ORDER BY value_1, value_2 LIMIT 1 FOR UPDATE; - value_1 | value_2 + value_1 | value_2 --------------------------------------------------------------------- 1 | 1 (1 row) @@ -623,7 +623,7 @@ WHERE t1.value_2 = t2.value_2 ORDER BY 1; - value_1 + value_1 --------------------------------------------------------------------- 1 2 @@ -638,7 +638,7 @@ WHERE t1.value_2 = t3.value_2 ORDER BY 1; - value_1 + value_1 --------------------------------------------------------------------- 4 5 @@ -652,7 +652,7 @@ WHERE t2.value_2 = t3.value_2 ORDER BY 1; - value_1 + value_1 --------------------------------------------------------------------- (0 rows) @@ -665,7 +665,7 @@ WHERE t1.value_2 = t2.value_1 ORDER BY 1; - value_1 + value_1 --------------------------------------------------------------------- 1 2 @@ -680,7 +680,7 @@ WHERE t1.value_2 = t2.value_3::int ORDER BY 1; - value_1 + value_1 --------------------------------------------------------------------- 1 2 @@ -695,7 +695,7 @@ WHERE t1.value_2 = date_part('day', t2.value_4) ORDER BY 1; - value_1 + value_1 --------------------------------------------------------------------- 1 2 @@ -714,7 +714,7 @@ WHERE t1.value_2 = date_part('day', t2.value_4) AND t3.value_2 = t1.value_2 ORDER BY 1; - value_1 + value_1 --------------------------------------------------------------------- 3 (1 row) @@ -728,7 +728,7 @@ WHERE t1.value_1 = date_part('day', t2.value_4) AND t3.value_2 = t1.value_1 ORDER BY 1; - value_1 + value_1 --------------------------------------------------------------------- 3 (1 row) @@ -741,7 +741,7 @@ FROM JOIN reference_table_test_third t3 USING (value_1) ORDER BY 1; - value_1 + value_1 --------------------------------------------------------------------- 3 (1 row) @@ -754,7 +754,7 @@ FROM LEFT JOIN reference_table_test_third t3 USING (value_1) ORDER BY 1; - value_1 + value_1 --------------------------------------------------------------------- 1 2 @@ -770,10 +770,10 @@ FROM RIGHT JOIN reference_table_test_third t3 USING (value_1) ORDER BY 1; - value_1 + value_1 --------------------------------------------------------------------- 3 - + (2 rows) \c - - - :master_port @@ -782,16 +782,16 @@ SET citus.shard_replication_factor TO 1; SET citus.replication_model TO streaming; CREATE TABLE colocated_table_test (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_distributed_table('colocated_table_test', 'value_1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE colocated_table_test_2 (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_distributed_table('colocated_table_test_2', 'value_1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) DELETE FROM reference_table_test; @@ -812,7 +812,7 @@ WHERE colocated_table_test.value_1 = reference_table_test.value_1 ORDER BY 1; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ] - value_1 + value_1 --------------------------------------------------------------------- 1 2 @@ -826,7 +826,7 @@ WHERE colocated_table_test.value_2 = reference_table_test.value_2 ORDER BY 1; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ] - value_2 + value_2 --------------------------------------------------------------------- 1 2 @@ -840,7 +840,7 @@ WHERE reference_table_test.value_1 = colocated_table_test.value_1 ORDER BY 1; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ] - value_2 + value_2 --------------------------------------------------------------------- 1 2 @@ -855,7 +855,7 @@ WHERE colocated_table_test.value_2 = reference_table_test.value_2 ORDER BY colocated_table_test.value_2; LOG: join order: [ "colocated_table_test_2" ][ cartesian product reference join "reference_table_test" ][ dual partition join "colocated_table_test" ] - value_2 + value_2 --------------------------------------------------------------------- 1 1 @@ -872,7 +872,7 @@ WHERE colocated_table_test.value_1 = colocated_table_test_2.value_1 AND colocated_table_test.value_2 = reference_table_test.value_2 ORDER BY 1; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ][ local partition join "colocated_table_test_2" ] - value_2 + value_2 --------------------------------------------------------------------- 1 2 @@ -887,7 +887,7 @@ WHERE colocated_table_test.value_2 = colocated_table_test_2.value_2 AND colocated_table_test.value_2 = reference_table_test.value_2 ORDER BY 1; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ][ dual partition join "colocated_table_test_2" ] - value_2 + value_2 --------------------------------------------------------------------- 1 2 @@ -901,7 +901,7 @@ WHERE colocated_table_test.value_1 = reference_table_test.value_1 AND colocated_table_test_2.value_1 = reference_table_test.value_1 ORDER BY 1; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ][ dual partition join "colocated_table_test_2" ] - value_2 + value_2 --------------------------------------------------------------------- 1 2 diff --git a/src/test/regress/expected/multi_mx_repartition_join_w1.out b/src/test/regress/expected/multi_mx_repartition_join_w1.out index f0057e5fc..c3072c5f3 100644 --- a/src/test/regress/expected/multi_mx_repartition_join_w1.out +++ b/src/test/regress/expected/multi_mx_repartition_join_w1.out @@ -1,5 +1,5 @@ -- Test two concurrent reparttition joins from two different workers --- This test runs the below query from the :worker_1_port and the +-- This test runs the below query from the :worker_1_port and the -- concurrent test runs the same query on :worker_2_port. Note that, both -- tests use the same sequence ids but the queries should not fail. \c - - - :worker_1_port diff --git a/src/test/regress/expected/multi_mx_repartition_join_w2.out b/src/test/regress/expected/multi_mx_repartition_join_w2.out index 4913108fa..a643cd90d 100644 --- a/src/test/regress/expected/multi_mx_repartition_join_w2.out +++ b/src/test/regress/expected/multi_mx_repartition_join_w2.out @@ -1,5 +1,5 @@ -- Test two concurrent reparttition joins from two different workers --- This test runs the below query from the :worker_2_port and the +-- This test runs the below query from the :worker_2_port and the -- concurrent test runs the same query on :worker_1_port. Note that, both -- tests use the same sequence ids but the queries should not fail. \c - - - :worker_2_port diff --git a/src/test/regress/expected/multi_mx_repartition_udt_prepare.out b/src/test/regress/expected/multi_mx_repartition_udt_prepare.out index ba6a2e269..143f0a94d 100644 --- a/src/test/regress/expected/multi_mx_repartition_udt_prepare.out +++ b/src/test/regress/expected/multi_mx_repartition_udt_prepare.out @@ -127,16 +127,16 @@ SET citus.shard_replication_factor TO 1; SET citus.replication_model TO streaming; SET citus.shard_count TO 3; SELECT create_distributed_table('repartition_udt', 'pk'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SET citus.shard_count TO 5; SELECT create_distributed_table('repartition_udt_other', 'pk'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO repartition_udt values (1, '(1,1)'::test_udt, 'foo'); @@ -157,7 +157,7 @@ SET citus.task_executor_type = 'task-tracker'; -- join on int column, and be empty. SELECT * FROM repartition_udt JOIN repartition_udt_other ON repartition_udt.pk = repartition_udt_other.pk; - pk | udtcol | txtcol | pk | udtcol | txtcol + pk | udtcol | txtcol | pk | udtcol | txtcol --------------------------------------------------------------------- (0 rows) @@ -167,7 +167,7 @@ EXPLAIN SELECT * FROM repartition_udt JOIN repartition_udt_other ON repartition_udt.udtcol = repartition_udt_other.udtcol WHERE repartition_udt.pk > 1; LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) Task Count: 4 @@ -185,7 +185,7 @@ SELECT * FROM repartition_udt JOIN repartition_udt_other WHERE repartition_udt.pk > 1 ORDER BY repartition_udt.pk; LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ] - pk | udtcol | txtcol | pk | udtcol | txtcol + pk | udtcol | txtcol | pk | udtcol | txtcol --------------------------------------------------------------------- 2 | (1,2) | foo | 8 | (1,2) | foo 3 | (1,3) | foo | 9 | (1,3) | foo diff --git a/src/test/regress/expected/multi_mx_repartition_udt_w1.out b/src/test/regress/expected/multi_mx_repartition_udt_w1.out index ba8450bc9..2095866e8 100644 --- a/src/test/regress/expected/multi_mx_repartition_udt_w1.out +++ b/src/test/regress/expected/multi_mx_repartition_udt_w1.out @@ -11,7 +11,7 @@ SET citus.log_multi_join_order = true; SELECT * FROM repartition_udt JOIN repartition_udt_other ON repartition_udt.pk = repartition_udt_other.pk; LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ] - pk | udtcol | txtcol | pk | udtcol | txtcol + pk | udtcol | txtcol | pk | udtcol | txtcol --------------------------------------------------------------------- (0 rows) @@ -20,7 +20,7 @@ SELECT * FROM repartition_udt JOIN repartition_udt_other WHERE repartition_udt.pk > 1 ORDER BY repartition_udt.pk; LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ] - pk | udtcol | txtcol | pk | udtcol | txtcol + pk | udtcol | txtcol | pk | udtcol | txtcol --------------------------------------------------------------------- 2 | (1,2) | foo | 8 | (1,2) | foo 3 | (1,3) | foo | 9 | (1,3) | foo diff --git a/src/test/regress/expected/multi_mx_repartition_udt_w2.out b/src/test/regress/expected/multi_mx_repartition_udt_w2.out index 22ec5ddec..547c62c5b 100644 --- a/src/test/regress/expected/multi_mx_repartition_udt_w2.out +++ b/src/test/regress/expected/multi_mx_repartition_udt_w2.out @@ -11,7 +11,7 @@ SET citus.log_multi_join_order = true; SELECT * FROM repartition_udt JOIN repartition_udt_other ON repartition_udt.pk = repartition_udt_other.pk; LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ] - pk | udtcol | txtcol | pk | udtcol | txtcol + pk | udtcol | txtcol | pk | udtcol | txtcol --------------------------------------------------------------------- (0 rows) @@ -20,7 +20,7 @@ SELECT * FROM repartition_udt JOIN repartition_udt_other WHERE repartition_udt.pk > 1 ORDER BY repartition_udt.pk; LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ] - pk | udtcol | txtcol | pk | udtcol | txtcol + pk | udtcol | txtcol | pk | udtcol | txtcol --------------------------------------------------------------------- 2 | (1,2) | foo | 8 | (1,2) | foo 3 | (1,3) | foo | 9 | (1,3) | foo diff --git a/src/test/regress/expected/multi_mx_router_planner.out b/src/test/regress/expected/multi_mx_router_planner.out index 12c238777..96767009c 100644 --- a/src/test/regress/expected/multi_mx_router_planner.out +++ b/src/test/regress/expected/multi_mx_router_planner.out @@ -73,7 +73,7 @@ SELECT * FROM articles_hash_mx WHERE author_id = 10 AND id = 50; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 50 | 10 | anjanette | 19519 (1 row) @@ -83,7 +83,7 @@ SELECT title FROM articles_hash_mx WHERE author_id = 10; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - title + title --------------------------------------------------------------------- aggrandize absentness @@ -99,7 +99,7 @@ SELECT title, word_count FROM articles_hash_mx DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - title | word_count + title | word_count --------------------------------------------------------------------- anjanette | 19519 aggrandize | 17277 @@ -116,7 +116,7 @@ SELECT title, id FROM articles_hash_mx DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 - title | id + title | id --------------------------------------------------------------------- aruru | 5 adversa | 15 @@ -129,7 +129,7 @@ SELECT title, author_id FROM articles_hash_mx ORDER BY author_id ASC, id; DEBUG: Creating router plan DEBUG: Plan is router executable - title | author_id + title | author_id --------------------------------------------------------------------- aseptic | 7 auriga | 7 @@ -148,7 +148,7 @@ SELECT title, author_id FROM articles_hash_mx WHERE author_id = 7 OR author_id = 8; DEBUG: Creating router plan DEBUG: Plan is router executable - title | author_id + title | author_id --------------------------------------------------------------------- aseptic | 7 agatized | 8 @@ -171,7 +171,7 @@ SELECT author_id, sum(word_count) AS corpus_size FROM articles_hash_mx ORDER BY sum(word_count) DESC; DEBUG: Creating router plan DEBUG: Plan is router executable - author_id | corpus_size + author_id | corpus_size --------------------------------------------------------------------- 10 | 59955 8 | 55410 @@ -188,7 +188,7 @@ SELECT author_id, sum(word_count) AS corpus_size FROM articles_hash_mx DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - author_id | corpus_size + author_id | corpus_size --------------------------------------------------------------------- 1 | 35894 (1 row) @@ -197,7 +197,7 @@ DETAIL: distribution column value: 1 -- not router-plannable due to <= and IN SELECT * FROM articles_hash_mx WHERE author_id <= 1; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -209,7 +209,7 @@ DEBUG: Router planner cannot handle multi-shard select queries SELECT * FROM articles_hash_mx WHERE author_id IN (1, 3); DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 3 | 3 | asternal | 10480 @@ -229,7 +229,7 @@ SELECT * FROM first_author; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id + id --------------------------------------------------------------------- 1 11 @@ -244,7 +244,7 @@ SELECT title FROM articles_hash_mx WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - title + title --------------------------------------------------------------------- arsenous alamo @@ -260,7 +260,7 @@ SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | id | title + id | author_id | id | title --------------------------------------------------------------------- 1 | 1 | 1 | arsenous 11 | 1 | 11 | alamo @@ -274,7 +274,7 @@ id_title AS (SELECT id, title from articles_hash_mx WHERE author_id = 3) SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | id | title + id | author_id | id | title --------------------------------------------------------------------- (0 rows) @@ -294,7 +294,7 @@ DETAIL: distribution column value: 2 DEBUG: Plan 66 query after replacing subqueries and CTEs: SELECT id_author.id, id_author.author_id, id_title.id, id_title.title FROM (SELECT intermediate_result.id, intermediate_result.author_id FROM read_intermediate_result('66_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint)) id_author, (SELECT intermediate_result.id, intermediate_result.title FROM read_intermediate_result('66_2'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, title character varying(20))) id_title WHERE (id_author.id OPERATOR(pg_catalog.=) id_title.id) DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | id | title + id | author_id | id | title --------------------------------------------------------------------- (0 rows) @@ -346,7 +346,7 @@ SELECT * FROM hierarchy WHERE LEVEL <= 2; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - company_id | employee_id | manager_id | level + company_id | employee_id | manager_id | level --------------------------------------------------------------------- 1 | 1 | 0 | 1 1 | 2 | 1 | 2 @@ -392,7 +392,7 @@ SELECT ORDER BY id, subtitle; DEBUG: Creating router plan DEBUG: Plan is router executable - id | subtitle | count + id | subtitle | count --------------------------------------------------------------------- 1 | | 1 3 | | 1 @@ -429,7 +429,7 @@ SELECT * FROM articles_hash_mx, position('om' in 'Thomas') WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count | position + id | author_id | title | word_count | position --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 | 3 11 | 1 | alamo | 1347 | 3 @@ -441,7 +441,7 @@ DETAIL: distribution column value: 1 SELECT * FROM articles_hash_mx, position('om' in 'Thomas') WHERE author_id = 1 or author_id = 3; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count | position + id | author_id | title | word_count | position --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 | 3 3 | 3 | asternal | 10480 | 3 @@ -459,7 +459,7 @@ DEBUG: Plan is router executable SELECT * FROM articles_hash_mx, position('om' in 'Thomas') WHERE author_id = 1 or author_id = 2 ORDER BY 4 DESC, 1 DESC, 2 DESC LIMIT 5; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: push down of limit count: 5 - id | author_id | title | word_count | position + id | author_id | title | word_count | position --------------------------------------------------------------------- 12 | 2 | archiblast | 18185 | 3 42 | 2 | ausable | 15885 | 3 @@ -478,7 +478,7 @@ DEBUG: generating subplan 85_1 for subquery SELECT id, word_count FROM public.a DEBUG: Plan 85 query after replacing subqueries and CTEs: SELECT articles_hash_mx.id, test.word_count FROM public.articles_hash_mx, (SELECT intermediate_result.id, intermediate_result.word_count FROM read_intermediate_result('85_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, word_count integer)) test WHERE (test.id OPERATOR(pg_catalog.=) articles_hash_mx.id) ORDER BY test.word_count DESC, articles_hash_mx.id LIMIT 5 DEBUG: Router planner cannot handle multi-shard select queries DEBUG: push down of limit count: 5 - id | word_count + id | word_count --------------------------------------------------------------------- 50 | 19519 14 | 19094 @@ -498,7 +498,7 @@ DEBUG: Plan 87 query after replacing subqueries and CTEs: SELECT articles_hash_ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | word_count + id | word_count --------------------------------------------------------------------- 1 | 9572 11 | 1347 @@ -521,7 +521,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -536,7 +536,7 @@ SELECT * WHERE author_id = 1 OR author_id = 17; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -551,7 +551,7 @@ SELECT * FROM articles_hash_mx WHERE author_id = 1 OR author_id = 18; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -567,7 +567,7 @@ SELECT id as article_id, word_count * id as random_value DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - article_id | random_value + article_id | random_value --------------------------------------------------------------------- 1 | 9572 11 | 14817 @@ -584,7 +584,7 @@ SELECT a.author_id as first_author, b.word_count as second_word_count DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - first_author | second_word_count + first_author | second_word_count --------------------------------------------------------------------- 10 | 17277 10 | 1820 @@ -600,7 +600,7 @@ SELECT a.author_id as first_author, b.word_count as second_word_count DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - first_author | second_word_count + first_author | second_word_count --------------------------------------------------------------------- 10 | 19519 10 | 19519 @@ -624,7 +624,7 @@ DEBUG: Plan 96 query after replacing subqueries and CTEs: SELECT a.author_id AS DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - first_author | second_word_count + first_author | second_word_count --------------------------------------------------------------------- (0 rows) @@ -636,7 +636,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -652,7 +652,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -668,7 +668,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 31 | 1 | athwartships | 7271 21 | 1 | arcading | 5890 @@ -683,7 +683,7 @@ SELECT id DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id + id --------------------------------------------------------------------- 1 11 @@ -700,7 +700,7 @@ SELECT distinct id DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id + id --------------------------------------------------------------------- 1 11 @@ -716,7 +716,7 @@ SELECT avg(word_count) DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - avg + avg --------------------------------------------------------------------- 12356.400000000000 (1 row) @@ -729,7 +729,7 @@ SELECT max(word_count) as max, min(word_count) as min, DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - max | min | sum | cnt + max | min | sum | cnt --------------------------------------------------------------------- 18185 | 2728 | 61782 | 5 (1 row) @@ -742,7 +742,7 @@ SELECT max(word_count) DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - max + max --------------------------------------------------------------------- 11814 (1 row) @@ -756,7 +756,7 @@ SELECT * FROM ( ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 3 | 3 | asternal | 10480 @@ -775,7 +775,7 @@ UNION (SELECT LEFT(title, 1) FROM articles_hash_mx WHERE author_id = 3); DEBUG: Creating router plan DEBUG: Plan is router executable - left + left --------------------------------------------------------------------- a (1 row) @@ -785,7 +785,7 @@ INTERSECT (SELECT LEFT(title, 1) FROM articles_hash_mx WHERE author_id = 3); DEBUG: Creating router plan DEBUG: Plan is router executable - left + left --------------------------------------------------------------------- a (1 row) @@ -798,7 +798,7 @@ SELECT * FROM ( ORDER BY 1; DEBUG: Creating router plan DEBUG: Plan is router executable - left + left --------------------------------------------------------------------- al ar @@ -815,7 +815,7 @@ ORDER BY 1,2; DEBUG: generating subplan 110_1 for subquery SELECT id, author_id, title, word_count FROM public.articles_hash_mx WHERE (author_id OPERATOR(pg_catalog.=) 1) DEBUG: generating subplan 110_2 for subquery SELECT id, author_id, title, word_count FROM public.articles_hash_mx WHERE (author_id OPERATOR(pg_catalog.=) 2) DEBUG: Plan 110 query after replacing subqueries and CTEs: SELECT intermediate_result.id, intermediate_result.author_id, intermediate_result.title, intermediate_result.word_count FROM read_intermediate_result('110_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint, title character varying(20), word_count integer) UNION SELECT intermediate_result.id, intermediate_result.author_id, intermediate_result.title, intermediate_result.word_count FROM read_intermediate_result('110_2'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint, title character varying(20), word_count integer) ORDER BY 1, 2 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 2 | 2 | abducing | 13642 @@ -836,7 +836,7 @@ SELECT * FROM ( ORDER BY 1, 2 LIMIT 5; DEBUG: push down of limit count: 5 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 2 | 2 | abducing | 13642 @@ -857,7 +857,7 @@ SELECT * FROM articles_hash_mx WHERE author_id >= 1 AND author_id <= 3 ORDER BY 1,2,3,4; - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 2 | 2 | abducing | 13642 @@ -887,7 +887,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -901,7 +901,7 @@ SELECT * FROM articles_hash_mx WHERE author_id = 1 or id = 1; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -917,7 +917,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 41 | 1 | aznavour | 11814 @@ -930,7 +930,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -939,7 +939,7 @@ SELECT * FROM articles_hash_mx WHERE author_id = (random()::int * 0 + 1); DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -953,7 +953,7 @@ SELECT * FROM articles_hash_mx WHERE author_id = 1 or id = 1; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -969,7 +969,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -983,7 +983,7 @@ SELECT * FROM articles_hash_mx WHERE 1 = abs(author_id); DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -997,7 +997,7 @@ SELECT * FROM articles_hash_mx WHERE author_id = abs(author_id - 2); DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1013,7 +1013,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 (1 row) @@ -1023,7 +1023,7 @@ SELECT * FROM articles_hash_mx WHERE (author_id = 1) is true; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1039,7 +1039,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1055,7 +1055,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1068,7 +1068,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 31 | 1 | athwartships | 7271 @@ -1081,7 +1081,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 31 | 1 | athwartships | 7271 @@ -1094,7 +1094,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1110,7 +1110,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1124,7 +1124,7 @@ SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 - prev | title | word_count + prev | title | word_count --------------------------------------------------------------------- | afrasia | 864 afrasia | adversa | 3164 @@ -1140,7 +1140,7 @@ SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 - prev | title | word_count + prev | title | word_count --------------------------------------------------------------------- aminate | aruru | 11389 antehall | aminate | 9089 @@ -1155,7 +1155,7 @@ SELECT id, MIN(id) over (order by word_count) DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | min + id | min --------------------------------------------------------------------- 11 | 11 21 | 11 @@ -1170,7 +1170,7 @@ SELECT id, word_count, AVG(word_count) over (order by word_count) DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | word_count | avg + id | word_count | avg --------------------------------------------------------------------- 11 | 1347 | 1347.0000000000000000 21 | 5890 | 3618.5000000000000000 @@ -1185,7 +1185,7 @@ SELECT word_count, rank() OVER (PARTITION BY author_id ORDER BY word_count) DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - word_count | rank + word_count | rank --------------------------------------------------------------------- 1347 | 1 5890 | 2 @@ -1224,7 +1224,7 @@ SELECT DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 - c + c --------------------------------------------------------------------- 5 (1 row) @@ -1245,7 +1245,7 @@ SELECT author_id ORDER BY c; DEBUG: Router planner cannot handle multi-shard select queries - c + c --------------------------------------------------------------------- 4 5 @@ -1268,7 +1268,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1289,19 +1289,19 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 FETCH test_cursor; - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 (1 row) FETCH test_cursor; - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 11 | 1 | alamo | 1347 (1 row) FETCH BACKWARD test_cursor; - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 (1 row) @@ -1337,7 +1337,7 @@ SELECT count(*), count(*) FILTER (WHERE id < 3) DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - count | count + count | count --------------------------------------------------------------------- 5 | 1 (1 row) @@ -1347,7 +1347,7 @@ SELECT count(*), count(*) FILTER (WHERE id < 3) FROM articles_hash_mx WHERE author_id = 1 or author_id = 2; DEBUG: Router planner cannot handle multi-shard select queries - count | count + count | count --------------------------------------------------------------------- 10 | 2 (1 row) @@ -1361,7 +1361,7 @@ EXECUTE author_1_articles; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1379,7 +1379,7 @@ EXECUTE author_articles(1); DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1409,7 +1409,7 @@ DETAIL: distribution column value: 1 CONTEXT: SQL statement "SELECT MAX(id) FROM articles_hash_mx ah WHERE author_id = 1" PL/pgSQL function author_articles_max_id() line 5 at SQL statement - author_articles_max_id + author_articles_max_id --------------------------------------------------------------------- 41 (1 row) @@ -1437,7 +1437,7 @@ CONTEXT: SQL statement "SELECT ah.id, ah.word_count FROM articles_hash_mx ah WHERE author_id = 1" PL/pgSQL function author_articles_id_word_count() line 4 at RETURN QUERY - id | word_count + id | word_count --------------------------------------------------------------------- 1 | 9572 11 | 1347 @@ -1453,7 +1453,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 SELECT * FROM mv_articles_hash_mx; - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1487,7 +1487,7 @@ SELECT id DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id + id --------------------------------------------------------------------- 1 11 diff --git a/src/test/regress/expected/multi_mx_schema_support.out b/src/test/regress/expected/multi_mx_schema_support.out index 5a9427f1c..30fbffc5a 100644 --- a/src/test/regress/expected/multi_mx_schema_support.out +++ b/src/test/regress/expected/multi_mx_schema_support.out @@ -5,43 +5,43 @@ \c - - - :worker_1_port -- test very basic queries SELECT * FROM nation_hash ORDER BY n_nationkey LIMIT 4; - n_nationkey | n_name | n_regionkey | n_comment + n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon - 2 | BRAZIL | 1 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special + 2 | BRAZIL | 1 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special 3 | CANADA | 1 | eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold (4 rows) SELECT * FROM citus_mx_test_schema.nation_hash ORDER BY n_nationkey LIMIT 4; - n_nationkey | n_name | n_regionkey | n_comment + n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon - 2 | BRAZIL | 1 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special + 2 | BRAZIL | 1 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special 3 | CANADA | 1 | eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold (4 rows) -- test cursors SET search_path TO public; BEGIN; -DECLARE test_cursor CURSOR FOR +DECLARE test_cursor CURSOR FOR SELECT * FROM nation_hash WHERE n_nationkey = 1; FETCH test_cursor; - n_nationkey | n_name | n_regionkey | n_comment + n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) FETCH test_cursor; - n_nationkey | n_name | n_regionkey | n_comment + n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- (0 rows) FETCH BACKWARD test_cursor; - n_nationkey | n_name | n_regionkey | n_comment + n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) @@ -50,23 +50,23 @@ END; -- test with search_path is set SET search_path TO citus_mx_test_schema; BEGIN; -DECLARE test_cursor CURSOR FOR +DECLARE test_cursor CURSOR FOR SELECT * FROM nation_hash WHERE n_nationkey = 1; FETCH test_cursor; - n_nationkey | n_name | n_regionkey | n_comment + n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) FETCH test_cursor; - n_nationkey | n_name | n_regionkey | n_comment + n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- (0 rows) FETCH BACKWARD test_cursor; - n_nationkey | n_name | n_regionkey | n_comment + n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) @@ -77,9 +77,9 @@ SET search_path TO public; INSERT INTO citus_mx_test_schema.nation_hash(n_nationkey, n_name, n_regionkey) VALUES (100, 'TURKEY', 3); -- verify insertion SELECT * FROM citus_mx_test_schema.nation_hash WHERE n_nationkey = 100; - n_nationkey | n_name | n_regionkey | n_comment + n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- - 100 | TURKEY | 3 | + 100 | TURKEY | 3 | (1 row) -- test with search_path is set @@ -87,9 +87,9 @@ SET search_path TO citus_mx_test_schema; INSERT INTO nation_hash(n_nationkey, n_name, n_regionkey) VALUES (101, 'GERMANY', 3); -- verify insertion SELECT * FROM nation_hash WHERE n_nationkey = 101; - n_nationkey | n_name | n_regionkey | n_comment + n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- - 101 | GERMANY | 3 | + 101 | GERMANY | 3 | (1 row) -- TODO: add UPDATE/DELETE/UPSERT @@ -97,7 +97,7 @@ SELECT * FROM nation_hash WHERE n_nationkey = 101; SET search_path TO public; -- UDF in public, table in a schema other than public, search_path is not set SELECT simpleTestFunction(n_nationkey)::int FROM citus_mx_test_schema.nation_hash GROUP BY 1 ORDER BY 1 DESC LIMIT 5; - simpletestfunction + simpletestfunction --------------------------------------------------------------------- 152 151 @@ -109,7 +109,7 @@ SELECT simpleTestFunction(n_nationkey)::int FROM citus_mx_test_schema.nation_has -- UDF in public, table in a schema other than public, search_path is set SET search_path TO citus_mx_test_schema; SELECT public.simpleTestFunction(n_nationkey)::int FROM citus_mx_test_schema.nation_hash GROUP BY 1 ORDER BY 1 DESC LIMIT 5; - simpletestfunction + simpletestfunction --------------------------------------------------------------------- 152 151 @@ -121,7 +121,7 @@ SELECT public.simpleTestFunction(n_nationkey)::int FROM citus_mx_test_schema.nat -- UDF in schema, table in a schema other than public, search_path is not set SET search_path TO public; SELECT citus_mx_test_schema.simpleTestFunction2(n_nationkey)::int FROM citus_mx_test_schema.nation_hash GROUP BY 1 ORDER BY 1 DESC LIMIT 5; - simpletestfunction2 + simpletestfunction2 --------------------------------------------------------------------- 152 151 @@ -133,7 +133,7 @@ SELECT citus_mx_test_schema.simpleTestFunction2(n_nationkey)::int FROM citus_mx_ -- UDF in schema, table in a schema other than public, search_path is set SET search_path TO citus_mx_test_schema; SELECT simpleTestFunction2(n_nationkey)::int FROM nation_hash GROUP BY 1 ORDER BY 1 DESC LIMIT 5; - simpletestfunction2 + simpletestfunction2 --------------------------------------------------------------------- 152 151 @@ -146,7 +146,7 @@ SELECT simpleTestFunction2(n_nationkey)::int FROM nation_hash GROUP BY 1 ORDER SET search_path TO public; -- test with search_path is not set SELECT * FROM citus_mx_test_schema.nation_hash WHERE n_nationkey OPERATOR(citus_mx_test_schema.===) 1; - n_nationkey | n_name | n_regionkey | n_comment + n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) @@ -154,58 +154,58 @@ SELECT * FROM citus_mx_test_schema.nation_hash WHERE n_nationkey OPERATOR(citus -- test with search_path is set SET search_path TO citus_mx_test_schema; SELECT * FROM nation_hash WHERE n_nationkey OPERATOR(===) 1; - n_nationkey | n_name | n_regionkey | n_comment + n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) SELECT * FROM citus_mx_test_schema.nation_hash_collation_search_path ORDER BY 1; - n_nationkey | n_name | n_regionkey | n_comment + n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon - 2 | BRAZIL | 1 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special + 2 | BRAZIL | 1 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special 3 | CANADA | 1 | eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold 4 | EGYPT | 4 | y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 5 | ETHIOPIA | 0 | ven packages wake quickly. regu (6 rows) SELECT n_comment FROM citus_mx_test_schema.nation_hash_collation_search_path ORDER BY n_comment COLLATE citus_mx_test_schema.english; - n_comment + n_comment --------------------------------------------------------------------- al foxes promise slyly according to the regular accounts. bold requests alon eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold haggle. carefully final deposits detect slyly agai ven packages wake quickly. regu y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d - y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special + y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special (6 rows) SET search_path TO citus_mx_test_schema; SELECT * FROM nation_hash_collation_search_path ORDER BY 1 DESC; - n_nationkey | n_name | n_regionkey | n_comment + n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- 5 | ETHIOPIA | 0 | ven packages wake quickly. regu 4 | EGYPT | 4 | y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 3 | CANADA | 1 | eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold - 2 | BRAZIL | 1 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special + 2 | BRAZIL | 1 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai (6 rows) SELECT n_comment FROM nation_hash_collation_search_path ORDER BY n_comment COLLATE english; - n_comment + n_comment --------------------------------------------------------------------- al foxes promise slyly according to the regular accounts. bold requests alon eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold haggle. carefully final deposits detect slyly agai ven packages wake quickly. regu y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d - y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special + y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special (6 rows) SELECT * FROM citus_mx_test_schema.nation_hash_composite_types WHERE test_col = '(a,a)'::citus_mx_test_schema.new_composite_type ORDER BY 1::int DESC; - n_nationkey | n_name | n_regionkey | n_comment | test_col + n_nationkey | n_name | n_regionkey | n_comment | test_col --------------------------------------------------------------------- 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai | (a,a) (1 row) @@ -213,7 +213,7 @@ SELECT * FROM citus_mx_test_schema.nation_hash_composite_types WHERE test_col = --test with search_path is set SET search_path TO citus_mx_test_schema; SELECT * FROM nation_hash_composite_types WHERE test_col = '(a,a)'::new_composite_type ORDER BY 1::int DESC; - n_nationkey | n_name | n_regionkey | n_comment | test_col + n_nationkey | n_name | n_regionkey | n_comment | test_col --------------------------------------------------------------------- 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai | (a,a) (1 row) @@ -222,13 +222,13 @@ SELECT * FROM nation_hash_composite_types WHERE test_col = '(a,a)'::new_composit -- join of two tables which are in different schemas, -- join on partition column SET search_path TO public; -SELECT +SELECT count (*) FROM - citus_mx_test_schema_join_1.nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2 + citus_mx_test_schema_join_1.nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2 WHERE n1.n_nationkey = n2.n_nationkey; - count + count --------------------------------------------------------------------- 25 (1 row) @@ -237,13 +237,13 @@ WHERE -- join of two tables which are in different schemas, -- join on partition column SET search_path TO citus_mx_test_schema_join_1; -SELECT +SELECT count (*) FROM - nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2 + nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2 WHERE n1.n_nationkey = n2.n_nationkey; - count + count --------------------------------------------------------------------- 25 (1 row) @@ -252,13 +252,13 @@ WHERE -- join of two tables which are in same schemas, -- join on partition column SET search_path TO public; -SELECT +SELECT count (*) FROM - citus_mx_test_schema_join_1.nation_hash n1, citus_mx_test_schema_join_1.nation_hash_2 n2 + citus_mx_test_schema_join_1.nation_hash n1, citus_mx_test_schema_join_1.nation_hash_2 n2 WHERE n1.n_nationkey = n2.n_nationkey; - count + count --------------------------------------------------------------------- 25 (1 row) @@ -267,13 +267,13 @@ WHERE -- join of two tables which are in same schemas, -- join on partition column SET search_path TO citus_mx_test_schema_join_1; -SELECT +SELECT count (*) FROM - nation_hash n1, nation_hash_2 n2 + nation_hash n1, nation_hash_2 n2 WHERE n1.n_nationkey = n2.n_nationkey; - count + count --------------------------------------------------------------------- 25 (1 row) @@ -290,7 +290,7 @@ FROM citus_mx_test_schema_join_1.nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2 WHERE n1.n_nationkey = n2.n_regionkey; - count + count --------------------------------------------------------------------- 25 (1 row) @@ -305,7 +305,7 @@ FROM nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2 WHERE n1.n_nationkey = n2.n_regionkey; - count + count --------------------------------------------------------------------- 25 (1 row) @@ -320,12 +320,12 @@ FROM nation_hash n1, nation_hash_2 n2 WHERE n1.n_nationkey = n2.n_regionkey; - count + count --------------------------------------------------------------------- 25 (1 row) --- hash repartition joins +-- hash repartition joins -- check when search_path is public, -- join of two tables which are in different schemas, -- join on non-partition column @@ -336,7 +336,7 @@ FROM citus_mx_test_schema_join_1.nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2 WHERE n1.n_regionkey = n2.n_regionkey; - count + count --------------------------------------------------------------------- 125 (1 row) @@ -351,7 +351,7 @@ FROM nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2 WHERE n1.n_regionkey = n2.n_regionkey; - count + count --------------------------------------------------------------------- 125 (1 row) @@ -366,7 +366,7 @@ FROM nation_hash n1, nation_hash_2 n2 WHERE n1.n_regionkey = n2.n_regionkey; - count + count --------------------------------------------------------------------- 125 (1 row) @@ -388,9 +388,9 @@ SET citus.replication_model TO 'streaming'; SET search_path TO mx_ddl_schema_1; CREATE TABLE table_1 (key int PRIMARY KEY, value text); SELECT create_distributed_table('table_1', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE INDEX i1 ON table_1(value); @@ -399,19 +399,19 @@ CREATE INDEX CONCURRENTLY i2 ON table_1(value); SET search_path TO mx_ddl_schema_1, mx_ddl_schema_2; CREATE TABLE mx_ddl_schema_2.table_2 (key int PRIMARY KEY, value text); SELECT create_distributed_table('mx_ddl_schema_2.table_2', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE table_2 ADD CONSTRAINT test_constraint FOREIGN KEY (key) REFERENCES table_1(key); --- we can also handle schema/table names with quotation +-- we can also handle schema/table names with quotation SET search_path TO "CiTuS.TeAeN"; CREATE TABLE "TeeNTabLE.1!?!"(id int, "TeNANt_Id" int); SELECT create_distributed_table('"TeeNTabLE.1!?!"', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE INDEX "MyTenantIndex" ON "CiTuS.TeAeN"."TeeNTabLE.1!?!"("TeNANt_Id"); @@ -430,7 +430,7 @@ ALTER TABLE "TeeNTabLE.1!?!" ADD COLUMN new_col INT; -- test with a public schema is in the search path SET search_path TO public, "CiTuS.TeAeN"; ALTER TABLE "TeeNTabLE.1!?!" DROP COLUMN new_col; --- make sure that we handle transaction blocks properly +-- make sure that we handle transaction blocks properly BEGIN; SET search_path TO public, "CiTuS.TeAeN"; ALTER TABLE "TeeNTabLE.1!?!" ADD COLUMN new_col INT; diff --git a/src/test/regress/expected/multi_mx_tpch_query1.out b/src/test/regress/expected/multi_mx_tpch_query1.out index 9247272ab..567586303 100644 --- a/src/test/regress/expected/multi_mx_tpch_query1.out +++ b/src/test/regress/expected/multi_mx_tpch_query1.out @@ -25,7 +25,7 @@ GROUP BY ORDER BY l_returnflag, l_linestatus; - l_returnflag | l_linestatus | sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order + l_returnflag | l_linestatus | sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order --------------------------------------------------------------------- A | F | 75465.00 | 113619873.63 | 107841287.0728 | 112171153.245923 | 25.6334918478260870 | 38593.707075407609 | 0.05055027173913043478 | 2944 N | F | 2022.00 | 3102551.45 | 2952540.7118 | 3072642.770652 | 26.6052631578947368 | 40823.045394736842 | 0.05263157894736842105 | 76 @@ -57,7 +57,7 @@ GROUP BY ORDER BY l_returnflag, l_linestatus; - l_returnflag | l_linestatus | sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order + l_returnflag | l_linestatus | sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order --------------------------------------------------------------------- A | F | 75465.00 | 113619873.63 | 107841287.0728 | 112171153.245923 | 25.6334918478260870 | 38593.707075407609 | 0.05055027173913043478 | 2944 N | F | 2022.00 | 3102551.45 | 2952540.7118 | 3072642.770652 | 26.6052631578947368 | 40823.045394736842 | 0.05263157894736842105 | 76 @@ -89,7 +89,7 @@ GROUP BY ORDER BY l_returnflag, l_linestatus; - l_returnflag | l_linestatus | sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order + l_returnflag | l_linestatus | sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order --------------------------------------------------------------------- A | F | 75465.00 | 113619873.63 | 107841287.0728 | 112171153.245923 | 25.6334918478260870 | 38593.707075407609 | 0.05055027173913043478 | 2944 N | F | 2022.00 | 3102551.45 | 2952540.7118 | 3072642.770652 | 26.6052631578947368 | 40823.045394736842 | 0.05263157894736842105 | 76 diff --git a/src/test/regress/expected/multi_mx_tpch_query10.out b/src/test/regress/expected/multi_mx_tpch_query10.out index 40eb3f63c..0ef4049aa 100644 --- a/src/test/regress/expected/multi_mx_tpch_query10.out +++ b/src/test/regress/expected/multi_mx_tpch_query10.out @@ -1,7 +1,7 @@ -- -- MULTI_MX_TPCH_QUERY10 -- --- Query #10 from the TPC-H decision support benchmark. +-- Query #10 from the TPC-H decision support benchmark. -- connect to master \c - - - :master_port SELECT @@ -36,18 +36,18 @@ GROUP BY ORDER BY revenue DESC LIMIT 20; - c_custkey | c_name | revenue | c_acctbal | n_name | c_address | c_phone | c_comment + c_custkey | c_name | revenue | c_acctbal | n_name | c_address | c_phone | c_comment --------------------------------------------------------------------- 436 | Customer#000000436 | 255187.7382 | 5896.87 | ROMANIA | 4DCNzAT842cVYTcaUS94kR0QXHSRM5oco0D6Z | 29-927-687-6390 | olites engage carefully. slyly ironic asymptotes about the ironi - 640 | Customer#000000640 | 251941.1430 | 3025.84 | BRAZIL | j3vjr0 n,pJFG4gIOtC | 12-702-315-6637 | lly. furiously quick deposits haggle quickly regular packages. pinto - 361 | Customer#000000361 | 239204.0858 | 7451.84 | SAUDI ARABIA | l0F8jMJVe63cb | 30-164-267-4590 | fully busy ideas. regular foxes cajole + 640 | Customer#000000640 | 251941.1430 | 3025.84 | BRAZIL | j3vjr0 n,pJFG4gIOtC | 12-702-315-6637 | lly. furiously quick deposits haggle quickly regular packages. pinto + 361 | Customer#000000361 | 239204.0858 | 7451.84 | SAUDI ARABIA | l0F8jMJVe63cb | 30-164-267-4590 | fully busy ideas. regular foxes cajole 223 | Customer#000000223 | 218652.8040 | 7476.20 | SAUDI ARABIA | ftau6Pk,brboMyEl,,kFm | 30-193-643-1517 | al, regular requests run furiously blithely silent packages. blithely ironic accounts across the furious 613 | Customer#000000613 | 186092.2017 | 6679.75 | EGYPT | AJT,26RbanTdEHOBgTWg | 14-275-416-1669 | ironic, pending deposits: quickl 355 | Customer#000000355 | 168184.4825 | 8727.90 | KENYA | 205r3Xg9ZWjPZNX1z | 24-656-787-6091 | ly bold requests detect furiously. unusual instructions sleep aft 872 | Customer#000000872 | 166831.7294 | -858.61 | PERU | vLP7iNZBK4B,HANFTKabVI3AO Y9O8H | 27-357-139-7164 | detect. packages wake slyly express foxes. even deposits ru 805 | Customer#000000805 | 165239.8440 | 511.69 | IRAN | wCKx5zcHvwpSffyc9qfi9dvqcm9LT,cLAG | 20-732-989-5653 | busy sentiments. pending packages haggle among the express requests-- slyly regular excuses above the slyl 427 | Customer#000000427 | 148033.5226 | 4376.80 | BRAZIL | LHzXa71U2AGqfbqj1yYYqw2MEXq99dWmY | 12-124-309-3821 | y even multipliers according to the regu - 581 | Customer#000000581 | 146369.1712 | 3242.10 | UNITED STATES | s9SoN9XeVuCri | 34-415-978-2518 | ns. quickly regular pinto beans must sleep fluffily + 581 | Customer#000000581 | 146369.1712 | 3242.10 | UNITED STATES | s9SoN9XeVuCri | 34-415-978-2518 | ns. quickly regular pinto beans must sleep fluffily 679 | Customer#000000679 | 145188.0664 | 1394.44 | IRAN | IJf1FlZL9I9m,rvofcoKy5pRUOjUQV | 20-146-696-9508 | ely pending frays boost carefully 160 | Customer#000000160 | 138511.7370 | 4363.17 | JORDAN | 5soVQ3dOCRBWBS | 23-428-666-4806 | olites. silently ironic accounts cajole furious 883 | Customer#000000883 | 128224.1349 | 479.96 | CANADA | qVQ8rWNU5KZYDcS | 13-526-239-6950 | uctions are carefully across the regular, regular asymptote @@ -94,18 +94,18 @@ GROUP BY ORDER BY revenue DESC LIMIT 20; - c_custkey | c_name | revenue | c_acctbal | n_name | c_address | c_phone | c_comment + c_custkey | c_name | revenue | c_acctbal | n_name | c_address | c_phone | c_comment --------------------------------------------------------------------- 436 | Customer#000000436 | 255187.7382 | 5896.87 | ROMANIA | 4DCNzAT842cVYTcaUS94kR0QXHSRM5oco0D6Z | 29-927-687-6390 | olites engage carefully. slyly ironic asymptotes about the ironi - 640 | Customer#000000640 | 251941.1430 | 3025.84 | BRAZIL | j3vjr0 n,pJFG4gIOtC | 12-702-315-6637 | lly. furiously quick deposits haggle quickly regular packages. pinto - 361 | Customer#000000361 | 239204.0858 | 7451.84 | SAUDI ARABIA | l0F8jMJVe63cb | 30-164-267-4590 | fully busy ideas. regular foxes cajole + 640 | Customer#000000640 | 251941.1430 | 3025.84 | BRAZIL | j3vjr0 n,pJFG4gIOtC | 12-702-315-6637 | lly. furiously quick deposits haggle quickly regular packages. pinto + 361 | Customer#000000361 | 239204.0858 | 7451.84 | SAUDI ARABIA | l0F8jMJVe63cb | 30-164-267-4590 | fully busy ideas. regular foxes cajole 223 | Customer#000000223 | 218652.8040 | 7476.20 | SAUDI ARABIA | ftau6Pk,brboMyEl,,kFm | 30-193-643-1517 | al, regular requests run furiously blithely silent packages. blithely ironic accounts across the furious 613 | Customer#000000613 | 186092.2017 | 6679.75 | EGYPT | AJT,26RbanTdEHOBgTWg | 14-275-416-1669 | ironic, pending deposits: quickl 355 | Customer#000000355 | 168184.4825 | 8727.90 | KENYA | 205r3Xg9ZWjPZNX1z | 24-656-787-6091 | ly bold requests detect furiously. unusual instructions sleep aft 872 | Customer#000000872 | 166831.7294 | -858.61 | PERU | vLP7iNZBK4B,HANFTKabVI3AO Y9O8H | 27-357-139-7164 | detect. packages wake slyly express foxes. even deposits ru 805 | Customer#000000805 | 165239.8440 | 511.69 | IRAN | wCKx5zcHvwpSffyc9qfi9dvqcm9LT,cLAG | 20-732-989-5653 | busy sentiments. pending packages haggle among the express requests-- slyly regular excuses above the slyl 427 | Customer#000000427 | 148033.5226 | 4376.80 | BRAZIL | LHzXa71U2AGqfbqj1yYYqw2MEXq99dWmY | 12-124-309-3821 | y even multipliers according to the regu - 581 | Customer#000000581 | 146369.1712 | 3242.10 | UNITED STATES | s9SoN9XeVuCri | 34-415-978-2518 | ns. quickly regular pinto beans must sleep fluffily + 581 | Customer#000000581 | 146369.1712 | 3242.10 | UNITED STATES | s9SoN9XeVuCri | 34-415-978-2518 | ns. quickly regular pinto beans must sleep fluffily 679 | Customer#000000679 | 145188.0664 | 1394.44 | IRAN | IJf1FlZL9I9m,rvofcoKy5pRUOjUQV | 20-146-696-9508 | ely pending frays boost carefully 160 | Customer#000000160 | 138511.7370 | 4363.17 | JORDAN | 5soVQ3dOCRBWBS | 23-428-666-4806 | olites. silently ironic accounts cajole furious 883 | Customer#000000883 | 128224.1349 | 479.96 | CANADA | qVQ8rWNU5KZYDcS | 13-526-239-6950 | uctions are carefully across the regular, regular asymptote @@ -152,18 +152,18 @@ GROUP BY ORDER BY revenue DESC LIMIT 20; - c_custkey | c_name | revenue | c_acctbal | n_name | c_address | c_phone | c_comment + c_custkey | c_name | revenue | c_acctbal | n_name | c_address | c_phone | c_comment --------------------------------------------------------------------- 436 | Customer#000000436 | 255187.7382 | 5896.87 | ROMANIA | 4DCNzAT842cVYTcaUS94kR0QXHSRM5oco0D6Z | 29-927-687-6390 | olites engage carefully. slyly ironic asymptotes about the ironi - 640 | Customer#000000640 | 251941.1430 | 3025.84 | BRAZIL | j3vjr0 n,pJFG4gIOtC | 12-702-315-6637 | lly. furiously quick deposits haggle quickly regular packages. pinto - 361 | Customer#000000361 | 239204.0858 | 7451.84 | SAUDI ARABIA | l0F8jMJVe63cb | 30-164-267-4590 | fully busy ideas. regular foxes cajole + 640 | Customer#000000640 | 251941.1430 | 3025.84 | BRAZIL | j3vjr0 n,pJFG4gIOtC | 12-702-315-6637 | lly. furiously quick deposits haggle quickly regular packages. pinto + 361 | Customer#000000361 | 239204.0858 | 7451.84 | SAUDI ARABIA | l0F8jMJVe63cb | 30-164-267-4590 | fully busy ideas. regular foxes cajole 223 | Customer#000000223 | 218652.8040 | 7476.20 | SAUDI ARABIA | ftau6Pk,brboMyEl,,kFm | 30-193-643-1517 | al, regular requests run furiously blithely silent packages. blithely ironic accounts across the furious 613 | Customer#000000613 | 186092.2017 | 6679.75 | EGYPT | AJT,26RbanTdEHOBgTWg | 14-275-416-1669 | ironic, pending deposits: quickl 355 | Customer#000000355 | 168184.4825 | 8727.90 | KENYA | 205r3Xg9ZWjPZNX1z | 24-656-787-6091 | ly bold requests detect furiously. unusual instructions sleep aft 872 | Customer#000000872 | 166831.7294 | -858.61 | PERU | vLP7iNZBK4B,HANFTKabVI3AO Y9O8H | 27-357-139-7164 | detect. packages wake slyly express foxes. even deposits ru 805 | Customer#000000805 | 165239.8440 | 511.69 | IRAN | wCKx5zcHvwpSffyc9qfi9dvqcm9LT,cLAG | 20-732-989-5653 | busy sentiments. pending packages haggle among the express requests-- slyly regular excuses above the slyl 427 | Customer#000000427 | 148033.5226 | 4376.80 | BRAZIL | LHzXa71U2AGqfbqj1yYYqw2MEXq99dWmY | 12-124-309-3821 | y even multipliers according to the regu - 581 | Customer#000000581 | 146369.1712 | 3242.10 | UNITED STATES | s9SoN9XeVuCri | 34-415-978-2518 | ns. quickly regular pinto beans must sleep fluffily + 581 | Customer#000000581 | 146369.1712 | 3242.10 | UNITED STATES | s9SoN9XeVuCri | 34-415-978-2518 | ns. quickly regular pinto beans must sleep fluffily 679 | Customer#000000679 | 145188.0664 | 1394.44 | IRAN | IJf1FlZL9I9m,rvofcoKy5pRUOjUQV | 20-146-696-9508 | ely pending frays boost carefully 160 | Customer#000000160 | 138511.7370 | 4363.17 | JORDAN | 5soVQ3dOCRBWBS | 23-428-666-4806 | olites. silently ironic accounts cajole furious 883 | Customer#000000883 | 128224.1349 | 479.96 | CANADA | qVQ8rWNU5KZYDcS | 13-526-239-6950 | uctions are carefully across the regular, regular asymptote diff --git a/src/test/regress/expected/multi_mx_tpch_query12.out b/src/test/regress/expected/multi_mx_tpch_query12.out index 0c0a8c0e2..efe3361d4 100644 --- a/src/test/regress/expected/multi_mx_tpch_query12.out +++ b/src/test/regress/expected/multi_mx_tpch_query12.out @@ -32,7 +32,7 @@ GROUP BY l_shipmode ORDER BY l_shipmode; - l_shipmode | high_line_count | low_line_count + l_shipmode | high_line_count | low_line_count --------------------------------------------------------------------- MAIL | 11 | 15 SHIP | 11 | 19 @@ -69,7 +69,7 @@ GROUP BY l_shipmode ORDER BY l_shipmode; - l_shipmode | high_line_count | low_line_count + l_shipmode | high_line_count | low_line_count --------------------------------------------------------------------- MAIL | 11 | 15 SHIP | 11 | 19 @@ -106,7 +106,7 @@ GROUP BY l_shipmode ORDER BY l_shipmode; - l_shipmode | high_line_count | low_line_count + l_shipmode | high_line_count | low_line_count --------------------------------------------------------------------- MAIL | 11 | 15 SHIP | 11 | 19 diff --git a/src/test/regress/expected/multi_mx_tpch_query14.out b/src/test/regress/expected/multi_mx_tpch_query14.out index f39cd6296..5cf6e71d2 100644 --- a/src/test/regress/expected/multi_mx_tpch_query14.out +++ b/src/test/regress/expected/multi_mx_tpch_query14.out @@ -17,7 +17,7 @@ WHERE l_partkey = p_partkey AND l_shipdate >= date '1995-09-01' AND l_shipdate < date '1995-09-01' + interval '1' year; - promo_revenue + promo_revenue --------------------------------------------------------------------- 32.1126387112005225 (1 row) @@ -38,7 +38,7 @@ WHERE l_partkey = p_partkey AND l_shipdate >= date '1995-09-01' AND l_shipdate < date '1995-09-01' + interval '1' year; - promo_revenue + promo_revenue --------------------------------------------------------------------- 32.1126387112005225 (1 row) @@ -59,7 +59,7 @@ WHERE l_partkey = p_partkey AND l_shipdate >= date '1995-09-01' AND l_shipdate < date '1995-09-01' + interval '1' year; - promo_revenue + promo_revenue --------------------------------------------------------------------- 32.1126387112005225 (1 row) diff --git a/src/test/regress/expected/multi_mx_tpch_query19.out b/src/test/regress/expected/multi_mx_tpch_query19.out index 789cd86af..cb0625f1b 100644 --- a/src/test/regress/expected/multi_mx_tpch_query19.out +++ b/src/test/regress/expected/multi_mx_tpch_query19.out @@ -34,7 +34,7 @@ WHERE AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ); - revenue + revenue --------------------------------------------------------------------- 144747.0857 (1 row) @@ -72,7 +72,7 @@ WHERE AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ); - revenue + revenue --------------------------------------------------------------------- 144747.0857 (1 row) @@ -110,7 +110,7 @@ WHERE AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ); - revenue + revenue --------------------------------------------------------------------- 144747.0857 (1 row) diff --git a/src/test/regress/expected/multi_mx_tpch_query3.out b/src/test/regress/expected/multi_mx_tpch_query3.out index d7e3a78b8..ab151aff0 100644 --- a/src/test/regress/expected/multi_mx_tpch_query3.out +++ b/src/test/regress/expected/multi_mx_tpch_query3.out @@ -1,7 +1,7 @@ -- -- MULTI_MX_TPCH_QUERY3 -- --- Query #3 from the TPC-H decision support benchmark. +-- Query #3 from the TPC-H decision support benchmark. -- connect to the coordinator \c - - - :master_port SELECT @@ -26,7 +26,7 @@ GROUP BY ORDER BY revenue DESC, o_orderdate; - l_orderkey | revenue | o_orderdate | o_shippriority + l_orderkey | revenue | o_orderdate | o_shippriority --------------------------------------------------------------------- 1637 | 268170.6408 | 02-08-1995 | 0 9696 | 252014.5497 | 02-20-1995 | 0 @@ -70,7 +70,7 @@ GROUP BY ORDER BY revenue DESC, o_orderdate; - l_orderkey | revenue | o_orderdate | o_shippriority + l_orderkey | revenue | o_orderdate | o_shippriority --------------------------------------------------------------------- 1637 | 268170.6408 | 02-08-1995 | 0 9696 | 252014.5497 | 02-20-1995 | 0 @@ -114,7 +114,7 @@ GROUP BY ORDER BY revenue DESC, o_orderdate; - l_orderkey | revenue | o_orderdate | o_shippriority + l_orderkey | revenue | o_orderdate | o_shippriority --------------------------------------------------------------------- 1637 | 268170.6408 | 02-08-1995 | 0 9696 | 252014.5497 | 02-20-1995 | 0 diff --git a/src/test/regress/expected/multi_mx_tpch_query6.out b/src/test/regress/expected/multi_mx_tpch_query6.out index 9899ace97..68e2f40e4 100644 --- a/src/test/regress/expected/multi_mx_tpch_query6.out +++ b/src/test/regress/expected/multi_mx_tpch_query6.out @@ -13,7 +13,7 @@ WHERE and l_shipdate < date '1994-01-01' + interval '1 year' and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24; - revenue + revenue --------------------------------------------------------------------- 243277.7858 (1 row) @@ -30,7 +30,7 @@ WHERE and l_shipdate < date '1994-01-01' + interval '1 year' and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24; - revenue + revenue --------------------------------------------------------------------- 243277.7858 (1 row) @@ -47,7 +47,7 @@ WHERE and l_shipdate < date '1994-01-01' + interval '1 year' and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24; - revenue + revenue --------------------------------------------------------------------- 243277.7858 (1 row) diff --git a/src/test/regress/expected/multi_mx_tpch_query7.out b/src/test/regress/expected/multi_mx_tpch_query7.out index 4bc98ca05..263680d4a 100644 --- a/src/test/regress/expected/multi_mx_tpch_query7.out +++ b/src/test/regress/expected/multi_mx_tpch_query7.out @@ -43,7 +43,7 @@ ORDER BY supp_nation, cust_nation, l_year; - supp_nation | cust_nation | l_year | revenue + supp_nation | cust_nation | l_year | revenue --------------------------------------------------------------------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) @@ -90,7 +90,7 @@ ORDER BY supp_nation, cust_nation, l_year; - supp_nation | cust_nation | l_year | revenue + supp_nation | cust_nation | l_year | revenue --------------------------------------------------------------------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) @@ -137,7 +137,7 @@ ORDER BY supp_nation, cust_nation, l_year; - supp_nation | cust_nation | l_year | revenue + supp_nation | cust_nation | l_year | revenue --------------------------------------------------------------------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) diff --git a/src/test/regress/expected/multi_mx_tpch_query7_nested.out b/src/test/regress/expected/multi_mx_tpch_query7_nested.out index c8ebfce7b..a95f89f26 100644 --- a/src/test/regress/expected/multi_mx_tpch_query7_nested.out +++ b/src/test/regress/expected/multi_mx_tpch_query7_nested.out @@ -22,18 +22,18 @@ FROM orders_mx, customer_mx, ( - SELECT + SELECT n1.n_nationkey AS supp_nation_key, n2.n_nationkey AS cust_nation_key, n1.n_name AS supp_nation, n2.n_name AS cust_nation - FROM + FROM nation_mx n1, nation_mx n2 - WHERE + WHERE ( (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY') - OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE') + OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE') ) ) AS temp WHERE @@ -52,7 +52,7 @@ ORDER BY supp_nation, cust_nation, l_year; - supp_nation | cust_nation | l_year | revenue + supp_nation | cust_nation | l_year | revenue --------------------------------------------------------------------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) @@ -78,18 +78,18 @@ FROM orders_mx, customer_mx, ( - SELECT + SELECT n1.n_nationkey AS supp_nation_key, n2.n_nationkey AS cust_nation_key, n1.n_name AS supp_nation, n2.n_name AS cust_nation - FROM + FROM nation_mx n1, nation_mx n2 - WHERE + WHERE ( (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY') - OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE') + OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE') ) ) AS temp WHERE @@ -108,7 +108,7 @@ ORDER BY supp_nation, cust_nation, l_year; - supp_nation | cust_nation | l_year | revenue + supp_nation | cust_nation | l_year | revenue --------------------------------------------------------------------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) @@ -134,18 +134,18 @@ FROM orders_mx, customer_mx, ( - SELECT + SELECT n1.n_nationkey AS supp_nation_key, n2.n_nationkey AS cust_nation_key, n1.n_name AS supp_nation, n2.n_name AS cust_nation - FROM + FROM nation_mx n1, nation_mx n2 - WHERE + WHERE ( (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY') - OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE') + OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE') ) ) AS temp WHERE @@ -164,7 +164,7 @@ ORDER BY supp_nation, cust_nation, l_year; - supp_nation | cust_nation | l_year | revenue + supp_nation | cust_nation | l_year | revenue --------------------------------------------------------------------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) diff --git a/src/test/regress/expected/multi_mx_transaction_recovery.out b/src/test/regress/expected/multi_mx_transaction_recovery.out index 47b6aeaad..6ef2ba445 100644 --- a/src/test/regress/expected/multi_mx_transaction_recovery.out +++ b/src/test/regress/expected/multi_mx_transaction_recovery.out @@ -4,16 +4,16 @@ SET citus.shard_replication_factor TO 1; SET citus.replication_model TO streaming; CREATE TABLE test_recovery (x text); SELECT create_distributed_table('test_recovery', 'x'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) \c - - - :worker_1_port -- Disable auto-recovery for the initial tests ALTER SYSTEM SET citus.recover_2pc_interval TO -1; SELECT pg_reload_conf(); - pg_reload_conf + pg_reload_conf --------------------------------------------------------------------- t (1 row) @@ -21,13 +21,13 @@ SELECT pg_reload_conf(); SET citus.multi_shard_commit_protocol TO '2pc'; -- Ensure pg_dist_transaction is empty for test SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM pg_dist_transaction; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -36,7 +36,7 @@ SELECT count(*) FROM pg_dist_transaction; -- different result and the prepared statement names should be adapted -- accordingly. SELECT * FROM pg_dist_local_group; - groupid + groupid --------------------------------------------------------------------- 14 (1 row) @@ -60,33 +60,33 @@ INSERT INTO pg_dist_transaction VALUES (14, 'citus_14_should_commit'); INSERT INTO pg_dist_transaction VALUES (14, 'citus_14_should_be_forgotten'); INSERT INTO pg_dist_transaction VALUES (122, 'citus_122_should_do_nothing'); SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 3 (1 row) -- delete the citus_122_should_do_nothing transaction DELETE FROM pg_dist_transaction WHERE gid = 'citus_122_should_do_nothing' RETURNING *; - groupid | gid + groupid | gid --------------------------------------------------------------------- 122 | citus_122_should_do_nothing (1 row) ROLLBACK PREPARED 'citus_122_should_do_nothing'; SELECT count(*) FROM pg_dist_transaction; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM pg_tables WHERE tablename = 'table_should_abort'; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM pg_tables WHERE tablename = 'table_should_commit'; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -94,7 +94,7 @@ SELECT count(*) FROM pg_tables WHERE tablename = 'table_should_commit'; -- plain INSERT does not use 2PC INSERT INTO test_recovery VALUES ('hello'); SELECT count(*) FROM pg_dist_transaction; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -106,13 +106,13 @@ INSERT INTO test_recovery VALUES ('hello'); INSERT INTO test_recovery VALUES ('world'); COMMIT; SELECT count(*) FROM pg_dist_transaction; - count + count --------------------------------------------------------------------- 2 (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) @@ -126,13 +126,13 @@ INSERT INTO test_recovery VALUES ('hello'); INSERT INTO test_recovery VALUES ('world'); COMMIT; SELECT count(*) FROM pg_dist_transaction; - count + count --------------------------------------------------------------------- 1 (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) @@ -140,13 +140,13 @@ SELECT recover_prepared_transactions(); -- Committed INSERT..SELECT via coordinator should write 4 transaction recovery records INSERT INTO test_recovery (x) SELECT 'hello-'||s FROM generate_series(1,100) s; SELECT count(*) FROM pg_dist_transaction; - count + count --------------------------------------------------------------------- 4 (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) @@ -154,7 +154,7 @@ SELECT recover_prepared_transactions(); -- Committed COPY should write 3 transaction records (2 fall into the same shard) COPY test_recovery (x) FROM STDIN CSV; SELECT count(*) FROM pg_dist_transaction; - count + count --------------------------------------------------------------------- 3 (1 row) @@ -162,27 +162,27 @@ SELECT count(*) FROM pg_dist_transaction; -- Test whether auto-recovery runs ALTER SYSTEM SET citus.recover_2pc_interval TO 10; SELECT pg_reload_conf(); - pg_reload_conf + pg_reload_conf --------------------------------------------------------------------- t (1 row) -- Sleep 1 second to give Valgrind enough time to clear transactions SELECT pg_sleep(1); - pg_sleep + pg_sleep --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_dist_transaction; - count + count --------------------------------------------------------------------- 0 (1 row) ALTER SYSTEM RESET citus.recover_2pc_interval; SELECT pg_reload_conf(); - pg_reload_conf + pg_reload_conf --------------------------------------------------------------------- t (1 row) diff --git a/src/test/regress/expected/multi_mx_truncate_from_worker.out b/src/test/regress/expected/multi_mx_truncate_from_worker.out index 00f96a56f..dc255f01c 100644 --- a/src/test/regress/expected/multi_mx_truncate_from_worker.out +++ b/src/test/regress/expected/multi_mx_truncate_from_worker.out @@ -7,16 +7,16 @@ SET citus.shard_count TO 6; SET citus.replication_model TO streaming; CREATE TABLE "refer'ence_table"(id int PRIMARY KEY); SELECT create_reference_table('refer''ence_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE on_update_fkey_table(id int PRIMARY KEY, value_1 int); SELECT create_distributed_table('on_update_fkey_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE on_update_fkey_table ADD CONSTRAINT fkey FOREIGN KEY(value_1) REFERENCES "refer'ence_table"(id) ON UPDATE CASCADE; @@ -25,7 +25,7 @@ INSERT INTO on_update_fkey_table SELECT i, i % 100 FROM generate_series(0, 1000 -- first, make sure that truncate from the coordinator workers as expected TRUNCATE on_update_fkey_table; SELECT count(*) FROM on_update_fkey_table; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -36,13 +36,13 @@ INSERT INTO on_update_fkey_table SELECT i, i % 100 FROM generate_series(0, 1000 TRUNCATE "refer'ence_table" CASCADE; NOTICE: truncate cascades to table "on_update_fkey_table" SELECT count(*) FROM on_update_fkey_table; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM "refer'ence_table"; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -55,7 +55,7 @@ BEGIN; ALTER TABLE on_update_fkey_table ADD COLUMN x INT; TRUNCATE on_update_fkey_table; SELECT count(*) FROM on_update_fkey_table; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -66,7 +66,7 @@ SET search_path TO 'truncate_from_workers'; -- make sure that TRUNCATE workes expected from the worker node TRUNCATE on_update_fkey_table; SELECT count(*) FROM on_update_fkey_table; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -77,13 +77,13 @@ INSERT INTO on_update_fkey_table SELECT i, i % 100 FROM generate_series(0, 1000 TRUNCATE "refer'ence_table" CASCADE; NOTICE: truncate cascades to table "on_update_fkey_table" SELECT count(*) FROM on_update_fkey_table; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM "refer'ence_table"; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -114,14 +114,14 @@ SET search_path TO 'truncate_from_workers'; BEGIN; INSERT INTO on_update_fkey_table SELECT i, i % 100 FROM generate_series(0, 1000) i; SELECT count(*) FROM on_update_fkey_table; - count + count --------------------------------------------------------------------- 1001 (1 row) TRUNCATE on_update_fkey_table; SELECT count(*) FROM on_update_fkey_table; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -137,7 +137,7 @@ ERROR: lock_relation_if_exists can only be used in transaction blocks BEGIN; -- should fail since the schema is not provided SELECT lock_relation_if_exists('on_update_fkey_table', 'ACCESS SHARE'); - lock_relation_if_exists + lock_relation_if_exists --------------------------------------------------------------------- f (1 row) @@ -147,7 +147,7 @@ BEGIN; -- should work since the schema is in the search path SET search_path TO 'truncate_from_workers'; SELECT lock_relation_if_exists('on_update_fkey_table', 'ACCESS SHARE'); - lock_relation_if_exists + lock_relation_if_exists --------------------------------------------------------------------- t (1 row) @@ -156,7 +156,7 @@ ROLLBACK; BEGIN; -- should return false since there is no such table SELECT lock_relation_if_exists('truncate_from_workers.on_update_fkey_tableXXX', 'ACCESS SHARE'); - lock_relation_if_exists + lock_relation_if_exists --------------------------------------------------------------------- f (1 row) @@ -170,62 +170,62 @@ ROLLBACK; BEGIN; -- test all lock levels SELECT lock_relation_if_exists('truncate_from_workers.on_update_fkey_table', 'ACCESS SHARE'); - lock_relation_if_exists + lock_relation_if_exists --------------------------------------------------------------------- t (1 row) SELECT lock_relation_if_exists('truncate_from_workers.on_update_fkey_table', 'ROW SHARE'); - lock_relation_if_exists + lock_relation_if_exists --------------------------------------------------------------------- t (1 row) SELECT lock_relation_if_exists('truncate_from_workers.on_update_fkey_table', 'ROW EXCLUSIVE'); - lock_relation_if_exists + lock_relation_if_exists --------------------------------------------------------------------- t (1 row) SELECT lock_relation_if_exists('truncate_from_workers.on_update_fkey_table', 'SHARE UPDATE EXCLUSIVE'); - lock_relation_if_exists + lock_relation_if_exists --------------------------------------------------------------------- t (1 row) SELECT lock_relation_if_exists('truncate_from_workers.on_update_fkey_table', 'SHARE'); - lock_relation_if_exists + lock_relation_if_exists --------------------------------------------------------------------- t (1 row) SELECT lock_relation_if_exists('truncate_from_workers.on_update_fkey_table', 'SHARE ROW EXCLUSIVE'); - lock_relation_if_exists + lock_relation_if_exists --------------------------------------------------------------------- t (1 row) SELECT lock_relation_if_exists('truncate_from_workers.on_update_fkey_table', 'SHARE ROW EXCLUSIVE'); - lock_relation_if_exists + lock_relation_if_exists --------------------------------------------------------------------- t (1 row) SELECT lock_relation_if_exists('truncate_from_workers.on_update_fkey_table', 'EXCLUSIVE'); - lock_relation_if_exists + lock_relation_if_exists --------------------------------------------------------------------- t (1 row) SELECT lock_relation_if_exists('truncate_from_workers.on_update_fkey_table', 'ACCESS EXCLUSIVE'); - lock_relation_if_exists + lock_relation_if_exists --------------------------------------------------------------------- t (1 row) -- see them all SELECT relation::regclass, mode FROM pg_locks WHERE pid = pg_backend_pid() AND relation = 'truncate_from_workers.on_update_fkey_table'::regclass ORDER BY 2 DESC; - relation | mode + relation | mode --------------------------------------------------------------------- truncate_from_workers.on_update_fkey_table | ShareUpdateExclusiveLock truncate_from_workers.on_update_fkey_table | ShareRowExclusiveLock diff --git a/src/test/regress/expected/multi_name_lengths.out b/src/test/regress/expected/multi_name_lengths.out index 3350dfac8..74d74f1de 100644 --- a/src/test/regress/expected/multi_name_lengths.out +++ b/src/test/regress/expected/multi_name_lengths.out @@ -9,21 +9,21 @@ CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 ( col1 integer not null, col2 integer not null); SELECT master_create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_create_worker_shards('too_long_12345678901234567890123456789012345678901234567890', '2', '2'); - master_create_worker_shards + master_create_worker_shards --------------------------------------------------------------------- - + (1 row) \c - - - :worker_1_port \dt too_long_* List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- public | too_long_12345678901234567890123456789012345678_e0119164_225000 | table | postgres public | too_long_12345678901234567890123456789012345678_e0119164_225001 | table | postgres @@ -34,23 +34,23 @@ SET citus.shard_count TO 2; SET citus.shard_replication_factor TO 2; -- Verify that the UDF works and rejects bad arguments. SELECT shard_name(NULL, 666666); - shard_name + shard_name --------------------------------------------------------------------- - + (1 row) SELECT shard_name(0, 666666); ERROR: object_name does not reference a valid relation SELECT shard_name('too_long_12345678901234567890123456789012345678901234567890'::regclass, 666666); - shard_name + shard_name --------------------------------------------------------------------- too_long_12345678901234567890123456789012345678_e0119164_666666 (1 row) SELECT shard_name('too_long_12345678901234567890123456789012345678901234567890'::regclass, NULL); - shard_name + shard_name --------------------------------------------------------------------- - + (1 row) SELECT shard_name('too_long_12345678901234567890123456789012345678901234567890'::regclass, -21); @@ -63,9 +63,9 @@ CREATE TABLE name_lengths ( constraint constraint_a UNIQUE (col1) ); SELECT create_distributed_table('name_lengths', 'col1', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- Verify that we CAN add columns with "too-long names", because @@ -82,11 +82,11 @@ ALTER TABLE name_lengths ADD CHECK (date_col_12345678901234567890123456789012345 ERROR: cannot create constraint without a name on a distributed table \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.name_lengths_225002'::regclass ORDER BY 1 DESC, 2 DESC; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- int_col_12345678901234567890123456789012345678901234567890 | integer | default 1 - float_col_12345678901234567890123456789012345678901234567890 | double precision | - date_col_12345678901234567890123456789012345678901234567890 | date | + float_col_12345678901234567890123456789012345678901234567890 | double precision | + date_col_12345678901234567890123456789012345678901234567890 | date | col2 | integer | not null col1 | integer | not null (5 rows) @@ -102,7 +102,7 @@ DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY const ALTER TABLE name_lengths ADD CONSTRAINT nl_checky_12345678901234567890123456789012345678901234567890 CHECK (date_col_12345678901234567890123456789012345678901234567890 >= '2014-01-01'::date); \c - - - :worker_1_port SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.name_lengths_225002'::regclass ORDER BY 1 DESC, 2 DESC; - Constraint | Definition + Constraint | Definition --------------------------------------------------------------------- nl_checky_1234567890123456789012345678901234567_b16df46d_225002 | CHECK (date_col_12345678901234567890123456789012345678901234567890 >= '01-01-2014'::date) (1 row) @@ -120,7 +120,7 @@ CREATE INDEX tmp_idx_12345678901234567890123456789012345678901234567890 ON name_ \c - - - :worker_1_port SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE relname LIKE 'tmp_idx_%' ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC; - relname | Column | Type | Definition + relname | Column | Type | Definition --------------------------------------------------------------------- tmp_idx_123456789012345678901234567890123456789_5e470afa_225003 | col2 | integer | col2 tmp_idx_123456789012345678901234567890123456789_5e470afa_225002 | col2 | integer | col2 @@ -134,7 +134,7 @@ NOTICE: identifier "tmp_idx_123456789012345678901234567890123456789012345678901 \c - - - :worker_1_port SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE relname LIKE 'tmp_idx_%' ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC; - relname | Column | Type | Definition + relname | Column | Type | Definition --------------------------------------------------------------------- tmp_idx_123456789012345678901234567890123456789_5e470afa_225003 | col2 | integer | col2 tmp_idx_123456789012345678901234567890123456789_5e470afa_225002 | col2 | integer | col2 @@ -145,7 +145,7 @@ SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE \c - - - :master_port SET citus.shard_count TO 2; SET citus.shard_replication_factor TO 2; --- Verify that distributed tables with too-long names +-- Verify that distributed tables with too-long names -- for CHECK constraints are no trouble. CREATE TABLE sneaky_name_lengths ( col1 integer not null, @@ -154,9 +154,9 @@ CREATE TABLE sneaky_name_lengths ( CHECK (int_col_12345678901234567890123456789012345678901234567890 > 100) ); SELECT create_distributed_table('sneaky_name_lengths', 'col1', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) DROP TABLE sneaky_name_lengths CASCADE; @@ -167,39 +167,39 @@ CREATE TABLE sneaky_name_lengths ( ); \di public.sneaky_name_lengths* List of relations - Schema | Name | Type | Owner | Table + Schema | Name | Type | Owner | Table --------------------------------------------------------------------- public | sneaky_name_lengths_int_col_1234567890123456789012345678901_key | index | postgres | sneaky_name_lengths (1 row) SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.sneaky_name_lengths'::regclass ORDER BY 1 DESC, 2 DESC; - Constraint | Definition + Constraint | Definition --------------------------------------------------------------------- checky_12345678901234567890123456789012345678901234567890 | CHECK (int_col_123456789012345678901234567890123456789012345678901234 > 100) (1 row) SELECT master_create_distributed_table('sneaky_name_lengths', 'int_col_123456789012345678901234567890123456789012345678901234', 'hash'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_create_worker_shards('sneaky_name_lengths', '2', '2'); - master_create_worker_shards + master_create_worker_shards --------------------------------------------------------------------- - + (1 row) \c - - - :worker_1_port \di public.sneaky*225006 List of relations - Schema | Name | Type | Owner | Table + Schema | Name | Type | Owner | Table --------------------------------------------------------------------- public | sneaky_name_lengths_int_col_1234567890123456789_6402d2cd_225006 | index | postgres | sneaky_name_lengths_225006 (1 row) SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.sneaky_name_lengths_225006'::regclass ORDER BY 1 DESC, 2 DESC; - Constraint | Definition + Constraint | Definition --------------------------------------------------------------------- checky_12345678901234567890123456789012345678901234567890 | CHECK (int_col_123456789012345678901234567890123456789012345678901234 > 100) (1 row) @@ -216,15 +216,15 @@ CREATE TABLE sneaky_name_lengths ( constraint unique_12345678901234567890123456789012345678901234567890 UNIQUE (col1) ); SELECT create_distributed_table('sneaky_name_lengths', 'col1', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) \c - - - :worker_1_port \di unique*225008 List of relations - Schema | Name | Type | Owner | Table + Schema | Name | Type | Owner | Table --------------------------------------------------------------------- public | unique_1234567890123456789012345678901234567890_a5986f27_225008 | index | postgres | sneaky_name_lengths_225008 (1 row) @@ -239,15 +239,15 @@ CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 ( col1 integer not null, col2 integer not null); SELECT create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) \c - - - :worker_1_port \dt *225000000000* List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- public | too_long_1234567890123456789012345678901_e0119164_2250000000000 | table | postgres public | too_long_1234567890123456789012345678901_e0119164_2250000000001 | table | postgres @@ -262,16 +262,16 @@ CREATE TABLE U&"elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E col1 integer not null PRIMARY KEY, col2 integer not null); SELECT create_distributed_table(U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!', 'col1', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- Verify that quoting is used in shard_name SELECT shard_name(U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!'::regclass, min(shardid)) FROM pg_dist_shard WHERE logicalrelid = U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!'::regclass; - shard_name + shard_name --------------------------------------------------------------------- "elephant_слонслонслонсло_c8b737c2_2250000000002" (1 row) @@ -279,7 +279,7 @@ WHERE logicalrelid = U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!0 \c - - - :worker_1_port \dt public.elephant_* List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- public | elephant_слонслонслонсло_c8b737c2_2250000000002 | table | postgres public | elephant_слонслонслонсло_c8b737c2_2250000000003 | table | postgres @@ -287,7 +287,7 @@ WHERE logicalrelid = U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!0 \di public.elephant_* List of relations - Schema | Name | Type | Owner | Table + Schema | Name | Type | Owner | Table --------------------------------------------------------------------- public | elephant_слонслонслонсло_14d34928_2250000000002 | index | postgres | elephant_слонслонслонсло_c8b737c2_2250000000002 public | elephant_слонслонслонсло_14d34928_2250000000003 | index | postgres | elephant_слонслонслонсло_c8b737c2_2250000000003 @@ -302,15 +302,15 @@ CREATE TABLE multi_name_lengths.too_long_123456789012345678901234567890123456789 col1 integer not null, col2 integer not null); SELECT create_distributed_table('multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT shard_name('multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890'::regclass, min(shardid)) FROM pg_dist_shard WHERE logicalrelid = 'multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890'::regclass; - shard_name + shard_name --------------------------------------------------------------------- multi_name_lengths.too_long_1234567890123456789012345678901_e0119164_2250000000004 (1 row) diff --git a/src/test/regress/expected/multi_name_resolution.out b/src/test/regress/expected/multi_name_resolution.out index 71b91e08d..890c336bf 100644 --- a/src/test/regress/expected/multi_name_resolution.out +++ b/src/test/regress/expected/multi_name_resolution.out @@ -8,15 +8,15 @@ SET search_path TO multi_name_resolution; create table namenest1 (id integer primary key, user_id integer); create table namenest2 (id integer primary key, value_2 integer); select * from create_distributed_table('namenest1', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) select * from create_reference_table('namenest2'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT r @@ -31,7 +31,7 @@ FROM ( JOIN namenest2 ON (namenest1.user_id = namenest2.value_2) ) AS join_alias(id_deep) WHERE bar.id_deep = join_alias.id_deep; - r + r --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/multi_null_minmax_value_pruning.out b/src/test/regress/expected/multi_null_minmax_value_pruning.out index c114acebf..ea5b81844 100644 --- a/src/test/regress/expected/multi_null_minmax_value_pruning.out +++ b/src/test/regress/expected/multi_null_minmax_value_pruning.out @@ -12,13 +12,13 @@ RESET citus.task_executor_type; SET citus.log_multi_join_order to true; SET citus.enable_repartition_joins to ON; SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290000; - shardminvalue | shardmaxvalue + shardminvalue | shardmaxvalue --------------------------------------------------------------------- 1 | 5986 (1 row) SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290001; - shardminvalue | shardmaxvalue + shardminvalue | shardmaxvalue --------------------------------------------------------------------- 8997 | 14947 (1 row) @@ -33,7 +33,7 @@ DEBUG: Router planner does not support append-partitioned tables. CONTEXT: PL/pgSQL function coordinator_plan(text) line 3 at FOR over EXECUTE statement LOG: join order: [ "lineitem" ] CONTEXT: PL/pgSQL function coordinator_plan(text) line 3 at FOR over EXECUTE statement - coordinator_plan + coordinator_plan --------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 2 @@ -46,7 +46,7 @@ DEBUG: Router planner does not support append-partitioned tables. LOG: join order: [ "lineitem" ][ local partition join "orders" ] DEBUG: join prunable for intervals [1,5986] and [8997,14947] DEBUG: join prunable for intervals [8997,14947] and [1,5986] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) @@ -82,7 +82,7 @@ DEBUG: Router planner does not support append-partitioned tables. CONTEXT: PL/pgSQL function coordinator_plan(text) line 3 at FOR over EXECUTE statement LOG: join order: [ "lineitem" ] CONTEXT: PL/pgSQL function coordinator_plan(text) line 3 at FOR over EXECUTE statement - coordinator_plan + coordinator_plan --------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 2 @@ -121,7 +121,7 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 12 - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) @@ -147,7 +147,7 @@ DEBUG: Router planner does not support append-partitioned tables. CONTEXT: PL/pgSQL function coordinator_plan(text) line 3 at FOR over EXECUTE statement LOG: join order: [ "lineitem" ] CONTEXT: PL/pgSQL function coordinator_plan(text) line 3 at FOR over EXECUTE statement - coordinator_plan + coordinator_plan --------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 2 @@ -186,7 +186,7 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 12 - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) @@ -214,7 +214,7 @@ LOG: join order: [ "lineitem" ] CONTEXT: PL/pgSQL function coordinator_plan(text) line 3 at FOR over EXECUTE statement DEBUG: Plan is router executable CONTEXT: PL/pgSQL function coordinator_plan(text) line 3 at FOR over EXECUTE statement - coordinator_plan + coordinator_plan --------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 1 @@ -253,7 +253,7 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 12 - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) diff --git a/src/test/regress/expected/multi_orderby_limit_pushdown.out b/src/test/regress/expected/multi_orderby_limit_pushdown.out index b79b799f8..860f8501a 100644 --- a/src/test/regress/expected/multi_orderby_limit_pushdown.out +++ b/src/test/regress/expected/multi_orderby_limit_pushdown.out @@ -9,7 +9,7 @@ FROM users_table GROUP BY user_id ORDER BY avg(value_1) DESC LIMIT 5; - user_id | avg + user_id | avg --------------------------------------------------------------------- 1 | 3.2857142857142857 4 | 2.7391304347826087 @@ -23,7 +23,7 @@ FROM users_table GROUP BY user_id ORDER BY avg(value_1) DESC LIMIT 1; - user_id | avg + user_id | avg --------------------------------------------------------------------- 1 | 3.2857142857142857 (1 row) @@ -34,7 +34,7 @@ FROM users_table GROUP BY user_id ORDER BY avg(value_1) DESC LIMIT 1; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit (cost=0.00..0.00 rows=0 width=0) -> Sort (cost=0.00..0.00 rows=0 width=0) @@ -57,7 +57,7 @@ FROM users_table GROUP BY user_id ORDER BY avg(value_1) + 1 DESC LIMIT 1; - user_id | ?column? + user_id | ?column? --------------------------------------------------------------------- 1 | 4.2857142857142857 (1 row) @@ -67,7 +67,7 @@ FROM users_table GROUP BY user_id ORDER BY avg(value_1) + 1 DESC LIMIT 1; - user_id | avg + user_id | avg --------------------------------------------------------------------- 1 | 3.2857142857142857 (1 row) @@ -77,7 +77,7 @@ FROM users_table GROUP BY user_id ORDER BY 2 DESC LIMIT 1; - user_id | ?column? + user_id | ?column? --------------------------------------------------------------------- 5 | 65.6538461538461538 (1 row) @@ -86,7 +86,7 @@ SELECT user_id, avg(value_1) + count(value_2) FROM users_table GROUP BY user_id ORDER BY 2 DESC; - user_id | ?column? + user_id | ?column? --------------------------------------------------------------------- 5 | 28.6538461538461538 4 | 25.7391304347826087 @@ -101,7 +101,7 @@ SELECT user_id, avg(value_1) + count(value_2) FROM users_table GROUP BY user_id ORDER BY 2 DESC; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Sort (cost=0.00..0.00 rows=0 width=0) Sort Key: remote_scan."?column?" DESC @@ -120,7 +120,7 @@ FROM users_table GROUP BY user_id ORDER BY 2 DESC LIMIT 1; - user_id | ?column? + user_id | ?column? --------------------------------------------------------------------- 5 | 28.6538461538461538 (1 row) @@ -130,7 +130,7 @@ FROM users_table GROUP BY user_id ORDER BY 2 DESC LIMIT 1; - user_id | ?column? + user_id | ?column? --------------------------------------------------------------------- 5 | 132 (1 row) @@ -140,7 +140,7 @@ FROM users_table GROUP BY user_id ORDER BY sum(value_2) DESC LIMIT 1; - user_id | ?column? + user_id | ?column? --------------------------------------------------------------------- 5 | 132 (1 row) @@ -150,7 +150,7 @@ FROM users_table GROUP BY user_id ORDER BY 2 DESC, 1 DESC LIMIT 2; - user_id | ?column? + user_id | ?column? --------------------------------------------------------------------- 2 | 25 6 | 20 @@ -161,7 +161,7 @@ FROM users_table GROUP BY user_id ORDER BY 2 DESC, 1 LIMIT 2; - user_id | ?column? + user_id | ?column? --------------------------------------------------------------------- 2 | 100 3 | 100 @@ -172,7 +172,7 @@ FROM users_table GROUP BY user_id ORDER BY 2 DESC LIMIT 2; - user_id | sum + user_id | sum --------------------------------------------------------------------- 5 | 132 4 | 113 @@ -183,7 +183,7 @@ FROM users_table GROUP BY user_id ORDER BY 2 DESC LIMIT 2; - user_id | ?column? + user_id | ?column? --------------------------------------------------------------------- 6 | 238 1 | 232 @@ -194,7 +194,7 @@ FROM users_table GROUP BY user_id ORDER BY (10000 / (sum(value_1 + value_2))) DESC LIMIT 2; - user_id | sum + user_id | sum --------------------------------------------------------------------- 6 | 42 1 | 43 @@ -205,7 +205,7 @@ FROM users_table GROUP BY user_id ORDER BY (10000 / (sum(value_1 + value_2))) DESC LIMIT 2; - user_id + user_id --------------------------------------------------------------------- 6 1 @@ -217,7 +217,7 @@ FROM users_table GROUP BY user_id ORDER BY (10000 / (sum(value_1 + value_2))) DESC LIMIT 2; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit -> Sort @@ -239,7 +239,7 @@ SELECT 10000 / (sum(value_1 + value_2)) FROM users_table ORDER BY 1 DESC LIMIT 2; - ?column? + ?column? --------------------------------------------------------------------- 19 (1 row) @@ -249,7 +249,7 @@ FROM users_table GROUP BY user_id ORDER BY user_id * avg(value_1) DESC LIMIT 2; - user_id | avg + user_id | avg --------------------------------------------------------------------- 5 | 2.6538461538461538 6 | 2.1000000000000000 @@ -260,7 +260,7 @@ FROM users_table GROUP BY user_id ORDER BY user_id * avg(value_1 + value_2) DESC LIMIT 2; - user_id | avg + user_id | avg --------------------------------------------------------------------- 5 | 2.6538461538461538 6 | 2.1000000000000000 @@ -271,7 +271,7 @@ FROM users_table GROUP BY user_id ORDER BY sum(value_1) DESC LIMIT 2; - user_id + user_id --------------------------------------------------------------------- 5 4 @@ -283,7 +283,7 @@ FROM users_table GROUP BY user_id ORDER BY sum(value_1) DESC LIMIT 2; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit -> Sort @@ -307,7 +307,7 @@ WHERE ut.user_id = et.user_id and et.value_2 < 5 GROUP BY ut.user_id ORDER BY MAX(et.time), AVG(ut.value_1) LIMIT 5; - user_id | avg + user_id | avg --------------------------------------------------------------------- 6 | 2.1000000000000000 2 | 2.7777777777777778 @@ -323,7 +323,7 @@ WHERE ut.user_id = et.user_id and et.value_2 < 5 GROUP BY ut.user_id ORDER BY MAX(et.time), AVG(ut.value_1) LIMIT 5; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit -> Sort @@ -352,7 +352,7 @@ WHERE ut.user_id = et.user_id and et.value_2 < 5 GROUP BY ut.user_id ORDER BY avg(ut.value_2) DESC, AVG(et.value_2) LIMIT 5; - user_id | avg + user_id | avg --------------------------------------------------------------------- 3 | 1.8947368421052632 1 | 2.4615384615384615 @@ -367,7 +367,7 @@ WHERE ut.user_id = et.user_id and et.value_2 < 5 GROUP BY ut.user_id ORDER BY 2, AVG(ut.value_1), 1 DESC LIMIT 2; - user_id | count + user_id | count --------------------------------------------------------------------- 1 | 4 6 | 5 @@ -380,7 +380,7 @@ WHERE ut.user_id = et.user_id and et.value_2 < 5 GROUP BY ut.user_id ORDER BY 2, AVG(ut.value_1), 1 DESC LIMIT 5; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit -> Sort diff --git a/src/test/regress/expected/multi_partition_pruning.out b/src/test/regress/expected/multi_partition_pruning.out index 6b0e9d3d8..1d65721b7 100644 --- a/src/test/regress/expected/multi_partition_pruning.out +++ b/src/test/regress/expected/multi_partition_pruning.out @@ -6,7 +6,7 @@ SET citus.next_shard_id TO 770000; -- Adding additional l_orderkey = 1 to make this query not router executable SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1 ORDER BY 1,2; - l_orderkey | l_linenumber | l_shipdate + l_orderkey | l_linenumber | l_shipdate --------------------------------------------------------------------- 1 | 1 | 03-13-1996 1 | 2 | 04-12-1996 @@ -28,23 +28,23 @@ SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 903 -- trigger the creation of toasted tables and indexes. This in turn prints -- non-deterministic debug messages. To avoid this chain, we use l_linenumber. SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; - sum | avg + sum | avg --------------------------------------------------------------------- 17999 | 3.0189533713518953 (1 row) SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem WHERE (l_orderkey < 4000 OR l_orderkey > 9030); - sum | avg + sum | avg --------------------------------------------------------------------- 30184 | 3.0159872102318145 (1 row) -- The following query should prune out all shards and return empty results SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem WHERE l_orderkey > 20000; - sum | avg + sum | avg --------------------------------------------------------------------- - | + | (1 row) -- The tests below verify that we can prune shards partitioned over different @@ -58,9 +58,9 @@ CREATE TABLE varchar_partitioned_table varchar_column varchar(100) ); SELECT create_distributed_table('varchar_partitioned_table', 'varchar_column', 'append'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- Create logical shards and shard placements with shardid 100,101 @@ -86,9 +86,9 @@ CREATE TABLE array_partitioned_table array_column text[] ); SELECT create_distributed_table('array_partitioned_table', 'array_column', 'append'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SET client_min_messages TO DEBUG2; @@ -122,9 +122,9 @@ CREATE TABLE composite_partitioned_table composite_column composite_type ); SELECT create_distributed_table('composite_partitioned_table', 'composite_column', 'append'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SET client_min_messages TO DEBUG2; @@ -150,7 +150,7 @@ INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, SET client_min_messages TO ERROR; EXPLAIN (COSTS OFF) SELECT count(*) FROM varchar_partitioned_table WHERE varchar_column = 'BA2'; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) @@ -163,7 +163,7 @@ SELECT count(*) FROM varchar_partitioned_table WHERE varchar_column = 'BA2'; EXPLAIN (COSTS OFF) SELECT count(*) FROM array_partitioned_table WHERE array_column > '{BA1000U2AMO4ZGX, BZZXSP27F21T6}'; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) @@ -176,7 +176,7 @@ SELECT count(*) FROM array_partitioned_table EXPLAIN (COSTS OFF) SELECT count(*) FROM composite_partitioned_table WHERE composite_column < '(b,5,c)'::composite_type; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) diff --git a/src/test/regress/expected/multi_partitioning.out b/src/test/regress/expected/multi_partitioning.out index ed33aa394..d7e1fb757 100644 --- a/src/test/regress/expected/multi_partitioning.out +++ b/src/test/regress/expected/multi_partitioning.out @@ -29,22 +29,22 @@ INSERT INTO partitioning_hash_test VALUES (4, 4); SELECT create_distributed_table('partitioning_test', 'id'); NOTICE: Copying data from local table... NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('partitioning_hash_test', 'id'); NOTICE: Copying data from local table... NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- see the data is loaded to shards SELECT * FROM partitioning_test ORDER BY 1; - id | time + id | time --------------------------------------------------------------------- 1 | 06-06-2009 2 | 07-07-2010 @@ -53,7 +53,7 @@ SELECT * FROM partitioning_test ORDER BY 1; (4 rows) SELECT * FROM partitioning_hash_test ORDER BY 1; - id | subid + id | subid --------------------------------------------------------------------- 1 | 2 2 | 13 @@ -69,7 +69,7 @@ FROM WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; - logicalrelid + logicalrelid --------------------------------------------------------------------- partitioning_test partitioning_test_2009 @@ -84,7 +84,7 @@ GROUP BY logicalrelid ORDER BY 1,2; - logicalrelid | count + logicalrelid | count --------------------------------------------------------------------- partitioning_test | 4 partitioning_test_2009 | 4 @@ -98,7 +98,7 @@ FROM WHERE logicalrelid IN ('partitioning_hash_test', 'partitioning_hash_test_0', 'partitioning_hash_test_1') ORDER BY 1; - logicalrelid + logicalrelid --------------------------------------------------------------------- partitioning_hash_test partitioning_hash_test_0 @@ -113,7 +113,7 @@ GROUP BY logicalrelid ORDER BY 1,2; - logicalrelid | count + logicalrelid | count --------------------------------------------------------------------- partitioning_hash_test | 4 partitioning_hash_test_0 | 4 @@ -130,7 +130,7 @@ FROM WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2011') ORDER BY 1; - logicalrelid + logicalrelid --------------------------------------------------------------------- partitioning_test partitioning_test_2011 @@ -144,7 +144,7 @@ GROUP BY logicalrelid ORDER BY 1,2; - logicalrelid | count + logicalrelid | count --------------------------------------------------------------------- partitioning_test | 4 partitioning_test_2011 | 4 @@ -165,7 +165,7 @@ FROM WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2012') ORDER BY 1; - logicalrelid + logicalrelid --------------------------------------------------------------------- partitioning_test partitioning_test_2012 @@ -179,7 +179,7 @@ GROUP BY logicalrelid ORDER BY 1,2; - logicalrelid | count + logicalrelid | count --------------------------------------------------------------------- partitioning_test | 4 partitioning_test_2012 | 4 @@ -202,7 +202,7 @@ NOTICE: Copying data from local table... INSERT INTO partitioning_hash_test VALUES (9, 12); -- see the data is loaded to shards SELECT * FROM partitioning_test ORDER BY 1; - id | time + id | time --------------------------------------------------------------------- 1 | 06-06-2009 2 | 07-07-2010 @@ -213,7 +213,7 @@ SELECT * FROM partitioning_test ORDER BY 1; (6 rows) SELECT * FROM partitioning_hash_test ORDER BY 1; - id | subid + id | subid --------------------------------------------------------------------- 1 | 2 2 | 13 @@ -226,9 +226,9 @@ SELECT * FROM partitioning_hash_test ORDER BY 1; -- 4-) Attaching distributed table to distributed table CREATE TABLE partitioning_test_2013(id int, time date); SELECT create_distributed_table('partitioning_test_2013', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- load some data @@ -237,7 +237,7 @@ INSERT INTO partitioning_test_2013 VALUES (8, '2013-07-07'); ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2013 FOR VALUES FROM ('2013-01-01') TO ('2014-01-01'); -- see the data is loaded to shards SELECT * FROM partitioning_test ORDER BY 1; - id | time + id | time --------------------------------------------------------------------- 1 | 06-06-2009 2 | 07-07-2010 @@ -269,9 +269,9 @@ SET citus.shard_replication_factor TO 1; DROP TABLE partitioning_test_failure_2009; CREATE TABLE partitioning_test_failure_2009(id int, time date); SELECT create_distributed_table('partitioning_test_failure_2009', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE partitioning_test_failure ATTACH PARTITION partitioning_test_failure_2009 FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); @@ -286,9 +286,9 @@ DETAIL: Relation "partitioning_test_failure_2009" is partitioned table itself a -- multi-level partitioning is not allowed in different order DROP TABLE partitioning_test_failure_2009; SELECT create_distributed_table('partitioning_test_failure', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE partitioning_test_failure_2009 PARTITION OF partitioning_test_failure FOR VALUES FROM ('2009-01-01') TO ('2010-01-01') PARTITION BY RANGE (time); @@ -304,7 +304,7 @@ COPY partitioning_test FROM STDIN WITH CSV; COPY partitioning_test_2009 FROM STDIN WITH CSV; -- see the data is loaded to shards SELECT * FROM partitioning_test WHERE id >= 9 ORDER BY 1; - id | time + id | time --------------------------------------------------------------------- 9 | 01-01-2009 10 | 01-01-2010 @@ -325,7 +325,7 @@ INSERT INTO partitioning_test VALUES(19, '2009-02-02'); INSERT INTO partitioning_test VALUES(20, '2010-02-02'); -- see the data is loaded to shards SELECT * FROM partitioning_test WHERE id >= 15 ORDER BY 1; - id | time + id | time --------------------------------------------------------------------- 15 | 02-01-2009 16 | 02-01-2010 @@ -342,7 +342,7 @@ INSERT INTO partitioning_test SELECT * FROM partitioning_test_2011; INSERT INTO partitioning_test_2012 SELECT * FROM partitioning_test WHERE time >= '2012-01-01' AND time < '2013-01-01'; -- see the data is loaded to shards (rows in the given range should be duplicated) SELECT * FROM partitioning_test WHERE time >= '2011-01-01' AND time < '2013-01-01' ORDER BY 1; - id | time + id | time --------------------------------------------------------------------- 5 | 06-06-2012 5 | 06-06-2012 @@ -365,7 +365,7 @@ UPDATE partitioning_test SET time = '2013-07-07' WHERE id = 7; UPDATE partitioning_test_2013 SET time = '2013-08-08' WHERE id = 8; -- see the data is updated SELECT * FROM partitioning_test WHERE id = 7 OR id = 8 ORDER BY 1; - id | time + id | time --------------------------------------------------------------------- 7 | 07-07-2013 8 | 08-08-2013 @@ -392,7 +392,7 @@ WHERE id IN (SELECT id FROM partitioning_test WHERE id = 2); -- see the data is updated SELECT * FROM partitioning_test WHERE id = 1 OR id = 2 ORDER BY 1; - id | time + id | time --------------------------------------------------------------------- 1 | 06-07-2009 2 | 07-07-2010 @@ -405,7 +405,7 @@ DELETE FROM partitioning_test WHERE id = 9; DELETE FROM partitioning_test_2010 WHERE id = 10; -- see the data is deleted SELECT * FROM partitioning_test WHERE id = 9 OR id = 10 ORDER BY 1; - id | time + id | time --------------------------------------------------------------------- (0 rows) @@ -413,10 +413,10 @@ SELECT * FROM partitioning_test WHERE id = 9 OR id = 10 ORDER BY 1; CREATE TABLE partitioning_test_default PARTITION OF partitioning_test DEFAULT; \d+ partitioning_test Partitioned table "public.partitioning_test" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------------------------------------------------------------------- - id | integer | | | | plain | | - time | date | | | | plain | | + id | integer | | | | plain | | + time | date | | | | plain | | Partition key: RANGE ("time") Partitions: partitioning_test_2009 FOR VALUES FROM ('01-01-2009') TO ('01-01-2010'), partitioning_test_2010 FOR VALUES FROM ('01-01-2010') TO ('01-01-2011'), @@ -429,14 +429,14 @@ INSERT INTO partitioning_test VALUES(21, '2014-02-02'); INSERT INTO partitioning_test VALUES(22, '2015-04-02'); -- see they are inserted into default partition SELECT * FROM partitioning_test WHERE id > 20 ORDER BY 1, 2; - id | time + id | time --------------------------------------------------------------------- 21 | 02-02-2014 22 | 04-02-2015 (2 rows) SELECT * FROM partitioning_test_default ORDER BY 1, 2; - id | time + id | time --------------------------------------------------------------------- 21 | 02-02-2014 22 | 04-02-2015 @@ -455,14 +455,14 @@ ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_default DEFAULT END; -- see data is in the table, but some moved out from default partition SELECT * FROM partitioning_test WHERE id > 20 ORDER BY 1, 2; - id | time + id | time --------------------------------------------------------------------- 21 | 02-02-2014 22 | 04-02-2015 (2 rows) SELECT * FROM partitioning_test_default ORDER BY 1, 2; - id | time + id | time --------------------------------------------------------------------- 22 | 04-02-2015 (1 row) @@ -471,7 +471,7 @@ SELECT * FROM partitioning_test_default ORDER BY 1, 2; UPDATE partitioning_test SET time = time + INTERVAL '1 day'; -- see rows are UPDATED SELECT * FROM partitioning_test ORDER BY 1; - id | time + id | time --------------------------------------------------------------------- 1 | 06-08-2009 2 | 07-08-2010 @@ -505,7 +505,7 @@ SELECT * FROM partitioning_test ORDER BY 1; UPDATE partitioning_test_2009 SET time = time + INTERVAL '1 day'; -- see rows are UPDATED SELECT * FROM partitioning_test_2009 ORDER BY 1; - id | time + id | time --------------------------------------------------------------------- 1 | 06-09-2009 3 | 09-11-2009 @@ -533,7 +533,7 @@ CREATE INDEX partitioning_2009_index ON partitioning_test_2009(id); CREATE INDEX CONCURRENTLY partitioned_2010_index ON partitioning_test_2010(id); -- see index is created SELECT tablename, indexname FROM pg_indexes WHERE tablename LIKE 'partitioning_test_%' ORDER BY indexname; - tablename | indexname + tablename | indexname --------------------------------------------------------------------- partitioning_test_2010 | partitioned_2010_index partitioning_test_2009 | partitioning_2009_index @@ -564,7 +564,7 @@ FOR VALUES FROM (0) TO (10); CREATE INDEX non_distributed_partitioned_table_index ON non_distributed_partitioned_table(a); -- see index is created SELECT tablename, indexname FROM pg_indexes WHERE tablename LIKE 'non_distributed_partitioned_table_%' ORDER BY indexname; - tablename | indexname + tablename | indexname --------------------------------------------------------------------- non_distributed_partitioned_table_1 | non_distributed_partitioned_table_1_a_idx (1 row) @@ -572,7 +572,7 @@ SELECT tablename, indexname FROM pg_indexes WHERE tablename LIKE 'non_distribute -- drop the index and see it is dropped DROP INDEX non_distributed_partitioned_table_index; SELECT tablename, indexname FROM pg_indexes WHERE tablename LIKE 'non_distributed%' ORDER BY indexname; - tablename | indexname + tablename | indexname --------------------------------------------------------------------- (0 rows) @@ -584,7 +584,7 @@ ALTER TABLE partitioning_test_2010 ADD new_column_2 int; ERROR: cannot add column to a partition -- see additional column is created SELECT name, type FROM table_attrs WHERE relid = 'partitioning_test'::regclass ORDER BY 1; - name | type + name | type --------------------------------------------------------------------- id | integer new_column | integer @@ -592,7 +592,7 @@ SELECT name, type FROM table_attrs WHERE relid = 'partitioning_test'::regclass O (3 rows) SELECT name, type FROM table_attrs WHERE relid = 'partitioning_test_2010'::regclass ORDER BY 1; - name | type + name | type --------------------------------------------------------------------- id | integer new_column | integer @@ -616,7 +616,7 @@ FROM WHERE table_name = 'partitioning_test_2009' AND constraint_name = 'partitioning_2009_primary'; - table_name | constraint_name | constraint_type + table_name | constraint_name | constraint_type --------------------------------------------------------------------- partitioning_test_2009 | partitioning_2009_primary | PRIMARY KEY (1 row) @@ -634,7 +634,7 @@ WHERE table_name LIKE 'partitioning_hash_test%' AND constraint_type = 'PRIMARY KEY' ORDER BY 1; - table_name | constraint_name | constraint_type + table_name | constraint_name | constraint_type --------------------------------------------------------------------- partitioning_hash_test | partitioning_hash_primary | PRIMARY KEY partitioning_hash_test_0 | partitioning_hash_test_0_pkey | PRIMARY KEY @@ -654,7 +654,7 @@ INSERT INTO partitioning_test_2009 VALUES(18, '2009-02-01'); ALTER TABLE partitioning_test_2012 ADD CONSTRAINT partitioning_2012_foreign FOREIGN KEY (id) REFERENCES partitioning_test_2009 (id) ON DELETE CASCADE; -- see FOREIGN KEY is created SELECT "Constraint" FROM table_fkeys WHERE relid = 'partitioning_test_2012'::regclass ORDER BY 1; - Constraint + Constraint --------------------------------------------------------------------- partitioning_2012_foreign (1 row) @@ -663,12 +663,12 @@ SELECT "Constraint" FROM table_fkeys WHERE relid = 'partitioning_test_2012'::reg DELETE FROM partitioning_test_2009 WHERE id = 5; -- see that element is deleted from both partitions SELECT * FROM partitioning_test_2009 WHERE id = 5 ORDER BY 1; - id | time | new_column + id | time | new_column --------------------------------------------------------------------- (0 rows) SELECT * FROM partitioning_test_2012 WHERE id = 5 ORDER BY 1; - id | time | new_column + id | time | new_column --------------------------------------------------------------------- (0 rows) @@ -676,14 +676,14 @@ SELECT * FROM partitioning_test_2012 WHERE id = 5 ORDER BY 1; ALTER TABLE partitioning_test DETACH PARTITION partitioning_test_2009; -- see DETACHed partitions content is not accessible from partitioning_test; SELECT * FROM partitioning_test WHERE time >= '2009-01-01' AND time < '2010-01-01' ORDER BY 1; - id | time | new_column + id | time | new_column --------------------------------------------------------------------- (0 rows) -- delete from default partition DELETE FROM partitioning_test WHERE time >= '2015-01-01'; SELECT * FROM partitioning_test_default; - id | time | new_column + id | time | new_column --------------------------------------------------------------------- (0 rows) @@ -692,17 +692,17 @@ CREATE TABLE partitioning_test_reference(id int PRIMARY KEY, subid int); INSERT INTO partitioning_test_reference SELECT a, a FROM generate_series(1, 50) a; SELECT create_reference_table('partitioning_test_reference'); NOTICE: Copying data from local table... - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE partitioning_test ADD CONSTRAINT partitioning_reference_fkey FOREIGN KEY (id) REFERENCES partitioning_test_reference(id) ON DELETE CASCADE; CREATE TABLE partitioning_test_foreign_key(id int PRIMARY KEY, value int); SELECT create_distributed_table('partitioning_test_foreign_key', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO partitioning_test_foreign_key SELECT * FROM partitioning_test_reference; @@ -715,7 +715,7 @@ WHERE constraint_type = 'FOREIGN KEY' ORDER BY 1,2; - table_name | constraint_name | constraint_type + table_name | constraint_name | constraint_type --------------------------------------------------------------------- partitioning_hash_test | partitioning_reference_fk_test | FOREIGN KEY partitioning_hash_test_0 | partitioning_reference_fk_test | FOREIGN KEY @@ -744,7 +744,7 @@ SELECT right(table_name, 7)::int as shardid, * FROM ( ) q $$) ) w ORDER BY 1, 2, 3, 4; - shardid | table_name | constraint_name | constraint_type + shardid | table_name | constraint_name | constraint_type --------------------------------------------------------------------- 1660012 | partitioning_hash_test_1660012 | partitioning_reference_fk_test_1660012 | FOREIGN KEY 1660013 | partitioning_hash_test_1660013 | partitioning_reference_fk_test_1660013 | FOREIGN KEY @@ -769,17 +769,17 @@ DROP TYPE foreign_key_details; -- after connection re-establishment SET citus.shard_replication_factor TO 1; SELECT * FROM partitioning_test WHERE id = 11 or id = 12; - id | time | new_column + id | time | new_column --------------------------------------------------------------------- - 11 | 01-02-2011 | - 11 | 01-02-2011 | - 12 | 01-02-2012 | - 12 | 01-02-2012 | + 11 | 01-02-2011 | + 11 | 01-02-2011 | + 12 | 01-02-2012 | + 12 | 01-02-2012 | (4 rows) DELETE FROM partitioning_test_reference WHERE id = 11 or id = 12; SELECT * FROM partitioning_hash_test ORDER BY 1, 2; - id | subid + id | subid --------------------------------------------------------------------- 1 | 2 2 | 13 @@ -792,12 +792,12 @@ SELECT * FROM partitioning_hash_test ORDER BY 1, 2; DELETE FROM partitioning_test_foreign_key WHERE id = 2 OR id = 9; -- see data is deleted from referencing table SELECT * FROM partitioning_test WHERE id = 11 or id = 12; - id | time | new_column + id | time | new_column --------------------------------------------------------------------- (0 rows) SELECT * FROM partitioning_hash_test ORDER BY 1, 2; - id | subid + id | subid --------------------------------------------------------------------- 1 | 2 3 | 7 @@ -813,7 +813,7 @@ BEGIN; ALTER TABLE partitioning_test ADD newer_column int; -- see additional column is created SELECT name, type FROM table_attrs WHERE relid = 'partitioning_test'::regclass ORDER BY 1; - name | type + name | type --------------------------------------------------------------------- id | integer new_column | integer @@ -824,7 +824,7 @@ SELECT name, type FROM table_attrs WHERE relid = 'partitioning_test'::regclass O ROLLBACK; -- see rollback is successful SELECT name, type FROM table_attrs WHERE relid = 'partitioning_test'::regclass ORDER BY 1; - name | type + name | type --------------------------------------------------------------------- id | integer new_column | integer @@ -836,19 +836,19 @@ BEGIN; COPY partitioning_test FROM STDIN WITH CSV; -- see the data is loaded to shards SELECT * FROM partitioning_test WHERE id = 22 ORDER BY 1; - id | time | new_column + id | time | new_column --------------------------------------------------------------------- 22 | 01-01-2010 | 22 (1 row) SELECT * FROM partitioning_test WHERE id = 23 ORDER BY 1; - id | time | new_column + id | time | new_column --------------------------------------------------------------------- 23 | 01-01-2011 | 23 (1 row) SELECT * FROM partitioning_test WHERE id = 24 ORDER BY 1; - id | time | new_column + id | time | new_column --------------------------------------------------------------------- 24 | 01-01-2013 | 24 (1 row) @@ -856,7 +856,7 @@ SELECT * FROM partitioning_test WHERE id = 24 ORDER BY 1; ROLLBACK; -- see rollback is successful SELECT * FROM partitioning_test WHERE id >= 22 ORDER BY 1; - id | time | new_column + id | time | new_column --------------------------------------------------------------------- (0 rows) @@ -866,29 +866,29 @@ BEGIN; INSERT INTO partitioning_test VALUES(25, '2010-02-02'); -- see the data is loaded to shards SELECT * FROM partitioning_test WHERE id = 25 ORDER BY 1; - id | time | new_column + id | time | new_column --------------------------------------------------------------------- - 25 | 02-02-2010 | + 25 | 02-02-2010 | (1 row) -- INSERT/SELECT in transaction INSERT INTO partitioning_test SELECT * FROM partitioning_test WHERE id = 25; -- see the data is loaded to shards SELECT * FROM partitioning_test WHERE id = 25 ORDER BY 1; - id | time | new_column + id | time | new_column --------------------------------------------------------------------- - 25 | 02-02-2010 | - 25 | 02-02-2010 | + 25 | 02-02-2010 | + 25 | 02-02-2010 | (2 rows) -- UPDATE in transaction UPDATE partitioning_test SET time = '2010-10-10' WHERE id = 25; -- see the data is updated SELECT * FROM partitioning_test WHERE id = 25 ORDER BY 1; - id | time | new_column + id | time | new_column --------------------------------------------------------------------- - 25 | 10-10-2010 | - 25 | 10-10-2010 | + 25 | 10-10-2010 | + 25 | 10-10-2010 | (2 rows) -- perform operations on partition and partioned tables together @@ -898,7 +898,7 @@ COPY partitioning_test FROM STDIN WITH CSV; COPY partitioning_test_2010 FROM STDIN WITH CSV; -- see the data is loaded to shards (we should see 4 rows with same content) SELECT * FROM partitioning_test WHERE id = 26 ORDER BY 1; - id | time | new_column + id | time | new_column --------------------------------------------------------------------- 26 | 02-02-2010 | 26 26 | 02-02-2010 | 26 @@ -909,7 +909,7 @@ SELECT * FROM partitioning_test WHERE id = 26 ORDER BY 1; ROLLBACK; -- see rollback is successful SELECT * FROM partitioning_test WHERE id = 26 ORDER BY 1; - id | time | new_column + id | time | new_column --------------------------------------------------------------------- (0 rows) @@ -920,7 +920,7 @@ DROP TABLE partitioning_test_2011; COMMIT; -- see DROPed partitions content is not accessible SELECT * FROM partitioning_test WHERE time >= '2011-01-01' AND time < '2012-01-01' ORDER BY 1; - id | time | new_column + id | time | new_column --------------------------------------------------------------------- (0 rows) @@ -932,7 +932,7 @@ SELECT * FROM partitioning_test WHERE time >= '2011-01-01' AND time < '2012-01-0 TRUNCATE partitioning_test_2012; -- see partition is TRUNCATEd SELECT * FROM partitioning_test_2012 ORDER BY 1; - id | time | new_column + id | time | new_column --------------------------------------------------------------------- (0 rows) @@ -940,7 +940,7 @@ SELECT * FROM partitioning_test_2012 ORDER BY 1; TRUNCATE partitioning_test; -- see partitioned table is TRUNCATEd SELECT * FROM partitioning_test ORDER BY 1; - id | time | new_column + id | time | new_column --------------------------------------------------------------------- (0 rows) @@ -950,7 +950,7 @@ INSERT INTO partitioning_test_2010 VALUES(27, '2010-02-01'); DROP TABLE partitioning_test_2010; -- see DROPped partitions content is not accessible from partitioning_test; SELECT * FROM partitioning_test WHERE time >= '2010-01-01' AND time < '2011-01-01' ORDER BY 1; - id | time | new_column + id | time | new_column --------------------------------------------------------------------- (0 rows) @@ -959,7 +959,7 @@ DROP TABLE partitioning_test; DROP TABLE partitioning_test_reference; -- dropping the parent should CASCADE to the children as well SELECT table_name FROM information_schema.tables WHERE table_name LIKE 'partitioning_test%' ORDER BY 1; - table_name + table_name --------------------------------------------------------------------- partitioning_test_2009 partitioning_test_failure @@ -970,15 +970,15 @@ SELECT table_name FROM information_schema.tables WHERE table_name LIKE 'partitio CREATE TABLE partitioned_users_table (user_id int, time timestamp, value_1 int, value_2 int, value_3 float, value_4 bigint) PARTITION BY RANGE (time); CREATE TABLE partitioned_events_table (user_id int, time timestamp, event_type int, value_2 int, value_3 float, value_4 bigint) PARTITION BY RANGE (time); SELECT create_distributed_table('partitioned_users_table', 'user_id', colocate_with => 'users_table'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('partitioned_events_table', 'user_id', colocate_with => 'events_table'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- INSERT/SELECT from regular table to partitioned table @@ -1031,7 +1031,7 @@ FROM ) AS final_query GROUP BY types ORDER BY types; - types | sumofeventtype + types | sumofeventtype --------------------------------------------------------------------- 0 | 43 1 | 44 @@ -1110,7 +1110,7 @@ GROUP BY types ORDER BY types; - types | sumofeventtype + types | sumofeventtype --------------------------------------------------------------------- 0 | 367 2 | 360 @@ -1124,9 +1124,9 @@ CREATE TABLE list_partitioned_events_table_2014_01_06_10 PARTITION OF list_parti CREATE TABLE list_partitioned_events_table_2014_01_11_15 PARTITION OF list_partitioned_events_table FOR VALUES IN ('2017-12-01', '2017-12-02', '2017-12-03', '2017-12-04', '2017-12-05'); -- test distributing partitioned table colocated with another partitioned table SELECT create_distributed_table('list_partitioned_events_table', 'user_id', colocate_with => 'partitioned_events_table'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- INSERT/SELECT from partitioned table to partitioned table @@ -1181,7 +1181,7 @@ count(*) AS cnt, "generated_group_field" ORDER BY cnt DESC, generated_group_field ASC LIMIT 10; - cnt | generated_group_field + cnt | generated_group_field --------------------------------------------------------------------- 1851 | 1 1077 | 4 @@ -1198,9 +1198,9 @@ count(*) AS cnt, "generated_group_field" CREATE TABLE multi_column_partitioning(c1 int, c2 int) PARTITION BY RANGE (c1, c2); CREATE TABLE multi_column_partitioning_0_0_10_0 PARTITION OF multi_column_partitioning FOR VALUES FROM (0, 0) TO (10, 0); SELECT create_distributed_table('multi_column_partitioning', 'c1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- test INSERT to multi-column partitioned table @@ -1223,7 +1223,7 @@ DETAIL: Partition key of the failing row contains (c1, c2) = (20, -20). CONTEXT: while executing command on localhost:xxxxx -- see data is loaded to multi-column partitioned table SELECT * FROM multi_column_partitioning ORDER BY 1, 2; - c1 | c2 + c1 | c2 --------------------------------------------------------------------- 1 | 1 5 | -5 @@ -1240,20 +1240,20 @@ CREATE TABLE partitioning_locks_2009 PARTITION OF partitioning_locks FOR VALUES CREATE TABLE partitioning_locks_2010 PARTITION OF partitioning_locks FOR VALUES FROM ('2010-01-01') TO ('2011-01-01'); -- distribute partitioned table SELECT create_distributed_table('partitioning_locks', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- test locks on router SELECT BEGIN; SELECT * FROM partitioning_locks WHERE id = 1 ORDER BY 1, 2; - id | ref_id | time + id | ref_id | time --------------------------------------------------------------------- (0 rows) SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; - relation | locktype | mode + relation | locktype | mode --------------------------------------------------------------------- partitioning_locks | relation | AccessShareLock partitioning_locks_2009 | relation | AccessShareLock @@ -1264,12 +1264,12 @@ COMMIT; -- test locks on real-time SELECT BEGIN; SELECT * FROM partitioning_locks ORDER BY 1, 2; - id | ref_id | time + id | ref_id | time --------------------------------------------------------------------- (0 rows) SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; - relation | locktype | mode + relation | locktype | mode --------------------------------------------------------------------- partitioning_locks | relation | AccessShareLock partitioning_locks_2009 | relation | AccessShareLock @@ -1281,12 +1281,12 @@ COMMIT; SET citus.task_executor_type TO 'task-tracker'; BEGIN; SELECT * FROM partitioning_locks AS pl1 JOIN partitioning_locks AS pl2 ON pl1.id = pl2.ref_id ORDER BY 1, 2; - id | ref_id | time | id | ref_id | time + id | ref_id | time | id | ref_id | time --------------------------------------------------------------------- (0 rows) SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; - relation | locktype | mode + relation | locktype | mode --------------------------------------------------------------------- partitioning_locks | relation | AccessShareLock partitioning_locks_2009 | relation | AccessShareLock @@ -1299,7 +1299,7 @@ RESET citus.task_executor_type; BEGIN; INSERT INTO partitioning_locks VALUES(1, 1, '2009-01-01'); SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; - relation | locktype | mode + relation | locktype | mode --------------------------------------------------------------------- partitioning_locks | relation | AccessShareLock partitioning_locks | relation | RowExclusiveLock @@ -1314,7 +1314,7 @@ COMMIT; BEGIN; UPDATE partitioning_locks SET time = '2009-02-01' WHERE id = 1; SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; - relation | locktype | mode + relation | locktype | mode --------------------------------------------------------------------- partitioning_locks | relation | AccessShareLock partitioning_locks | relation | RowExclusiveLock @@ -1329,7 +1329,7 @@ COMMIT; BEGIN; DELETE FROM partitioning_locks WHERE id = 1; SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; - relation | locktype | mode + relation | locktype | mode --------------------------------------------------------------------- partitioning_locks | relation | AccessShareLock partitioning_locks | relation | RowExclusiveLock @@ -1343,15 +1343,15 @@ COMMIT; -- test locks on INSERT/SELECT CREATE TABLE partitioning_locks_for_select(id int, ref_id int, time date); SELECT create_distributed_table('partitioning_locks_for_select', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) BEGIN; INSERT INTO partitioning_locks SELECT * FROM partitioning_locks_for_select; SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; - relation | locktype | mode + relation | locktype | mode --------------------------------------------------------------------- partitioning_locks | relation | AccessShareLock partitioning_locks | relation | RowExclusiveLock @@ -1367,7 +1367,7 @@ COMMIT; BEGIN; INSERT INTO partitioning_locks SELECT * FROM partitioning_locks_for_select LIMIT 5; SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; - relation | locktype | mode + relation | locktype | mode --------------------------------------------------------------------- partitioning_locks | relation | AccessShareLock partitioning_locks | relation | RowExclusiveLock @@ -1381,7 +1381,7 @@ COMMIT; BEGIN; UPDATE partitioning_locks SET time = '2009-03-01'; SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; - relation | locktype | mode + relation | locktype | mode --------------------------------------------------------------------- partitioning_locks | relation | AccessShareLock partitioning_locks | relation | RowExclusiveLock @@ -1396,7 +1396,7 @@ COMMIT; BEGIN; ALTER TABLE partitioning_locks ADD COLUMN new_column int; SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; - relation | locktype | mode + relation | locktype | mode --------------------------------------------------------------------- partitioning_locks | relation | AccessExclusiveLock partitioning_locks | relation | AccessShareLock @@ -1411,7 +1411,7 @@ COMMIT; BEGIN; TRUNCATE partitioning_locks; SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; - relation | locktype | mode + relation | locktype | mode --------------------------------------------------------------------- partitioning_locks | relation | AccessExclusiveLock partitioning_locks | relation | AccessShareLock @@ -1441,7 +1441,7 @@ WHERE pid = pg_backend_pid() ORDER BY 1, 2, 3; - logicalrelid | locktype | mode + logicalrelid | locktype | mode --------------------------------------------------------------------- partitioning_locks | advisory | ShareUpdateExclusiveLock partitioning_locks | advisory | ShareUpdateExclusiveLock @@ -1475,7 +1475,7 @@ WHERE pid = pg_backend_pid() ORDER BY 1, 2, 3; - logicalrelid | locktype | mode + logicalrelid | locktype | mode --------------------------------------------------------------------- partitioning_locks_2009 | advisory | ShareLock partitioning_locks_2009 | advisory | ShareLock @@ -1501,7 +1501,7 @@ WHERE pid = pg_backend_pid() ORDER BY 1, 2, 3; - logicalrelid | locktype | mode + logicalrelid | locktype | mode --------------------------------------------------------------------- partitioning_locks | advisory | ShareUpdateExclusiveLock partitioning_locks | advisory | ShareUpdateExclusiveLock @@ -1524,48 +1524,48 @@ CREATE TABLE partitioning_hash_join_test_0 PARTITION OF partitioning_hash_join_t CREATE TABLE partitioning_hash_join_test_1 PARTITION OF partitioning_hash_join_test FOR VALUES WITH (MODULUS 3, REMAINDER 1); CREATE TABLE partitioning_hash_join_test_2 PARTITION OF partitioning_hash_join_test FOR VALUES WITH (MODULUS 3, REMAINDER 2); SELECT create_distributed_table('partitioning_hash_join_test', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT success FROM run_command_on_workers('alter system set enable_mergejoin to off'); - success + success --------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('alter system set enable_nestloop to off'); - success + success --------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('alter system set enable_indexscan to off'); - success + success --------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('alter system set enable_indexonlyscan to off'); - success + success --------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('alter system set enable_partitionwise_join to off'); - success + success --------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('select pg_reload_conf()'); - success + success --------------------------------------------------------------------- t t @@ -1573,7 +1573,7 @@ SELECT success FROM run_command_on_workers('select pg_reload_conf()'); EXPLAIN (COSTS OFF) SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id, subid); - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 4 @@ -1597,21 +1597,21 @@ SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id, -- set partition-wise join on and parallel to off SELECT success FROM run_command_on_workers('alter system set max_parallel_workers_per_gather = 0'); - success + success --------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('alter system set enable_partitionwise_join to on'); - success + success --------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('select pg_reload_conf()'); - success + success --------------------------------------------------------------------- t t @@ -1621,7 +1621,7 @@ SET enable_partitionwise_join TO on; ANALYZE partitioning_hash_test, partitioning_hash_join_test; EXPLAIN (COSTS OFF) SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id, subid); - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 4 @@ -1651,7 +1651,7 @@ SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id, -- partitions EXPLAIN (COSTS OFF) SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id); - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 4 @@ -1673,49 +1673,49 @@ SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id) -- reset partition-wise join SELECT success FROM run_command_on_workers('alter system reset enable_partitionwise_join'); - success + success --------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('alter system reset enable_mergejoin'); - success + success --------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('alter system reset enable_nestloop'); - success + success --------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('alter system reset enable_indexscan'); - success + success --------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('alter system reset enable_indexonlyscan'); - success + success --------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('alter system reset max_parallel_workers_per_gather'); - success + success --------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('select pg_reload_conf()'); - success + success --------------------------------------------------------------------- t t @@ -1736,9 +1736,9 @@ SET citus.replication_model TO 'streaming'; CREATE TABLE partitioning_test(id int, time date) PARTITION BY RANGE (time); CREATE TABLE partitioning_test_2009 PARTITION OF partitioning_test FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); SELECT create_distributed_table('partitioning_test', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) DROP TABLE partitioning_test; @@ -1746,9 +1746,9 @@ DROP TABLE partitioning_test; CREATE SCHEMA partitioning_schema; CREATE TABLE partitioning_schema."schema-test"(id int, time date) PARTITION BY RANGE (time); SELECT create_distributed_table('partitioning_schema."schema-test"', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE partitioning_schema."schema-test_2009"(id int, time date); @@ -1761,7 +1761,7 @@ FROM WHERE logicalrelid IN ('partitioning_schema."schema-test"'::regclass, 'partitioning_schema."schema-test_2009"'::regclass) ORDER BY 1; - logicalrelid + logicalrelid --------------------------------------------------------------------- partitioning_schema."schema-test" partitioning_schema."schema-test_2009" @@ -1777,7 +1777,7 @@ GROUP BY logicalrelid ORDER BY 1,2; - logicalrelid | count + logicalrelid | count --------------------------------------------------------------------- partitioning_schema."schema-test" | 4 partitioning_schema."schema-test_2009" | 4 @@ -1787,9 +1787,9 @@ DROP TABLE partitioning_schema."schema-test"; -- make sure we can create partition of a distributed table in a schema CREATE TABLE partitioning_schema."schema-test"(id int, time date) PARTITION BY RANGE (time); SELECT create_distributed_table('partitioning_schema."schema-test"', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE partitioning_schema."schema-test_2009" PARTITION OF partitioning_schema."schema-test" FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); @@ -1801,7 +1801,7 @@ FROM WHERE logicalrelid IN ('partitioning_schema."schema-test"'::regclass, 'partitioning_schema."schema-test_2009"'::regclass) ORDER BY 1; - logicalrelid + logicalrelid --------------------------------------------------------------------- partitioning_schema."schema-test" partitioning_schema."schema-test_2009" @@ -1817,7 +1817,7 @@ GROUP BY logicalrelid ORDER BY 1,2; - logicalrelid | count + logicalrelid | count --------------------------------------------------------------------- partitioning_schema."schema-test" | 4 partitioning_schema."schema-test_2009" | 4 @@ -1828,9 +1828,9 @@ DROP TABLE partitioning_schema."schema-test"; CREATE TABLE partitioning_schema."schema-test"(id int, time date) PARTITION BY RANGE (time); SET search_path = partitioning_schema; SELECT create_distributed_table('"schema-test"', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE partitioning_schema."schema-test_2009" PARTITION OF "schema-test" FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); @@ -1842,7 +1842,7 @@ FROM WHERE logicalrelid IN ('partitioning_schema."schema-test"'::regclass, 'partitioning_schema."schema-test_2009"'::regclass) ORDER BY 1; - logicalrelid + logicalrelid --------------------------------------------------------------------- "schema-test" "schema-test_2009" @@ -1858,7 +1858,7 @@ GROUP BY logicalrelid ORDER BY 1,2; - logicalrelid | count + logicalrelid | count --------------------------------------------------------------------- "schema-test" | 4 "schema-test_2009" | 4 @@ -1868,16 +1868,16 @@ ORDER BY -- tables with foreign keys CREATE TABLE reference_table(id int PRIMARY KEY); SELECT create_reference_table('reference_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE reference_table_2(id int PRIMARY KEY); SELECT create_reference_table('reference_table_2'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE partitioning_test(id int, time date) PARTITION BY RANGE (time); @@ -1887,27 +1887,27 @@ CREATE TABLE partitioning_test_2010 (LIKE partitioning_test); CREATE TABLE partitioning_test_2011 (LIKE partitioning_test); -- distributing partitioning_test will also distribute partitioning_test_2008 SELECT create_distributed_table('partitioning_test', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('partitioning_test_2009', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('partitioning_test_2010', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('partitioning_test_2011', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE partitioning_test ADD CONSTRAINT partitioning_reference_fkey diff --git a/src/test/regress/expected/multi_partitioning_utils.out b/src/test/regress/expected/multi_partitioning_utils.out index 7588140e8..68fa39791 100644 --- a/src/test/regress/expected/multi_partitioning_utils.out +++ b/src/test/regress/expected/multi_partitioning_utils.out @@ -74,23 +74,23 @@ $function$; CREATE TABLE date_partitioned_table(id int, time date) PARTITION BY RANGE (time); -- we should be able to get the partitioning information even if there are no partitions SELECT generate_partition_information('date_partitioned_table'); - generate_partition_information + generate_partition_information --------------------------------------------------------------------- RANGE ("time") (1 row) -- we should be able to drop and re-create the partitioned table using the command that Citus generate SELECT drop_and_recreate_partitioned_table('date_partitioned_table'); - drop_and_recreate_partitioned_table + drop_and_recreate_partitioned_table --------------------------------------------------------------------- - + (1 row) -- we should also be able to see the PARTITION BY ... for the parent table SELECT master_get_table_ddl_events('date_partitioned_table'); - master_get_table_ddl_events + master_get_table_ddl_events --------------------------------------------------------------------- - CREATE TABLE public.date_partitioned_table (id integer, "time" date) PARTITION BY RANGE ("time") + CREATE TABLE public.date_partitioned_table (id integer, "time" date) PARTITION BY RANGE ("time") ALTER TABLE public.date_partitioned_table OWNER TO postgres (2 rows) @@ -99,20 +99,20 @@ CREATE TABLE date_partition_2006 PARTITION OF date_partitioned_table FOR VALUES CREATE TABLE date_partition_2007 PARTITION OF date_partitioned_table FOR VALUES FROM ('2007-01-01') TO ('2008-01-01'); -- we should be able to get the partitioning information after the partitions are created SELECT generate_partition_information('date_partitioned_table'); - generate_partition_information + generate_partition_information --------------------------------------------------------------------- RANGE ("time") (1 row) -- lets get the attach partition commands SELECT generate_alter_table_attach_partition_command('date_partition_2006'); - generate_alter_table_attach_partition_command + generate_alter_table_attach_partition_command --------------------------------------------------------------------- ALTER TABLE public.date_partitioned_table ATTACH PARTITION public.date_partition_2006 FOR VALUES FROM ('01-01-2006') TO ('01-01-2007'); (1 row) SELECT generate_alter_table_attach_partition_command('date_partition_2007'); - generate_alter_table_attach_partition_command + generate_alter_table_attach_partition_command --------------------------------------------------------------------- ALTER TABLE public.date_partitioned_table ATTACH PARTITION public.date_partition_2007 FOR VALUES FROM ('01-01-2007') TO ('01-01-2008'); (1 row) @@ -120,27 +120,27 @@ SELECT generate_alter_table_attach_partition_command('date_partition_2007'); -- detach and attach the partition by the command generated by us \d+ date_partitioned_table Table "public.date_partitioned_table" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------------------------------------------------------------------- - id | integer | | | | plain | | - time | date | | | | plain | | + id | integer | | | | plain | | + time | date | | | | plain | | Partition key: RANGE ("time") Partitions: date_partition_2006 FOR VALUES FROM ('01-01-2006') TO ('01-01-2007'), date_partition_2007 FOR VALUES FROM ('01-01-2007') TO ('01-01-2008') SELECT detach_and_attach_partition('date_partition_2007', 'date_partitioned_table'); - detach_and_attach_partition + detach_and_attach_partition --------------------------------------------------------------------- - + (1 row) -- check that both partitions are visiable \d+ date_partitioned_table Table "public.date_partitioned_table" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------------------------------------------------------------------- - id | integer | | | | plain | | - time | date | | | | plain | | + id | integer | | | | plain | | + time | date | | | | plain | | Partition key: RANGE ("time") Partitions: date_partition_2006 FOR VALUES FROM ('01-01-2006') TO ('01-01-2007'), date_partition_2007 FOR VALUES FROM ('01-01-2007') TO ('01-01-2008') @@ -150,52 +150,52 @@ Partitions: date_partition_2006 FOR VALUES FROM ('01-01-2006') TO ('01-01-2007') CREATE TABLE date_partitioned_table_100 (id int, time date) PARTITION BY RANGE (time); CREATE TABLE date_partition_2007_100 (id int, time date ); -- now create the partitioning hierarcy -SELECT worker_apply_inter_shard_ddl_command(referencing_shard:=100, referencing_schema_name:='public', - referenced_shard:=100, referenced_schema_name:='public', +SELECT worker_apply_inter_shard_ddl_command(referencing_shard:=100, referencing_schema_name:='public', + referenced_shard:=100, referenced_schema_name:='public', command:='ALTER TABLE date_partitioned_table ATTACH PARTITION date_partition_2007 FOR VALUES FROM (''2007-01-01'') TO (''2008-01-02'')' ); - worker_apply_inter_shard_ddl_command + worker_apply_inter_shard_ddl_command --------------------------------------------------------------------- - + (1 row) -- the hierarcy is successfully created \d+ date_partitioned_table_100 Table "public.date_partitioned_table_100" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------------------------------------------------------------------- - id | integer | | | | plain | | - time | date | | | | plain | | + id | integer | | | | plain | | + time | date | | | | plain | | Partition key: RANGE ("time") Partitions: date_partition_2007_100 FOR VALUES FROM ('01-01-2007') TO ('01-02-2008') -- Citus can also get the DDL events for the partitions as regular tables SELECT master_get_table_ddl_events('date_partition_2007_100'); - master_get_table_ddl_events + master_get_table_ddl_events --------------------------------------------------------------------- CREATE TABLE public.date_partition_2007_100 (id integer, "time" date) ALTER TABLE public.date_partition_2007_100 OWNER TO postgres (2 rows) -- now break the partitioning hierarcy -SELECT worker_apply_inter_shard_ddl_command(referencing_shard:=100, referencing_schema_name:='public', - referenced_shard:=100, referenced_schema_name:='public', +SELECT worker_apply_inter_shard_ddl_command(referencing_shard:=100, referencing_schema_name:='public', + referenced_shard:=100, referenced_schema_name:='public', command:='ALTER TABLE date_partitioned_table DETACH PARTITION date_partition_2007' ); - worker_apply_inter_shard_ddl_command + worker_apply_inter_shard_ddl_command --------------------------------------------------------------------- - + (1 row) -- the hierarcy is successfully broken \d+ date_partitioned_table_100 Table "public.date_partitioned_table_100" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------------------------------------------------------------------- - id | integer | | | | plain | | - time | date | | | | plain | | + id | integer | | | | plain | | + time | date | | | | plain | | Partition key: RANGE ("time") Number of partitions: 0 --- now lets have some more complex partitioning hierarcies with +-- now lets have some more complex partitioning hierarcies with -- tables on different schemas and constraints on the tables CREATE SCHEMA partition_parent_schema; CREATE TABLE partition_parent_schema.parent_table (id int NOT NULL, time date DEFAULT now()) PARTITION BY RANGE (time); @@ -205,37 +205,37 @@ CREATE SCHEMA partition_child_2_schema; CREATE TABLE partition_child_2_schema.child_2 (id int NOT NULL, time date ); -- we should be able to get the partitioning information even if there are no partitions SELECT generate_partition_information('partition_parent_schema.parent_table'); - generate_partition_information + generate_partition_information --------------------------------------------------------------------- RANGE ("time") (1 row) -- we should be able to drop and re-create the partitioned table using the command that Citus generate SELECT drop_and_recreate_partitioned_table('partition_parent_schema.parent_table'); - drop_and_recreate_partitioned_table + drop_and_recreate_partitioned_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE partition_parent_schema.parent_table ATTACH PARTITION partition_child_1_schema.child_1 FOR VALUES FROM ('2009-01-01') TO ('2010-01-02'); SET search_path = 'partition_parent_schema'; ALTER TABLE parent_table ATTACH PARTITION partition_child_2_schema.child_2 FOR VALUES FROM ('2006-01-01') TO ('2007-01-01'); SELECT public.generate_partition_information('parent_table'); - generate_partition_information + generate_partition_information --------------------------------------------------------------------- RANGE ("time") (1 row) -- lets get the attach partition commands SELECT public.generate_alter_table_attach_partition_command('partition_child_1_schema.child_1'); - generate_alter_table_attach_partition_command + generate_alter_table_attach_partition_command --------------------------------------------------------------------- ALTER TABLE partition_parent_schema.parent_table ATTACH PARTITION partition_child_1_schema.child_1 FOR VALUES FROM ('01-01-2009') TO ('01-02-2010'); (1 row) SET search_path = 'partition_child_2_schema'; SELECT public.generate_alter_table_attach_partition_command('child_2'); - generate_alter_table_attach_partition_command + generate_alter_table_attach_partition_command --------------------------------------------------------------------- ALTER TABLE partition_parent_schema.parent_table ATTACH PARTITION partition_child_2_schema.child_2 FOR VALUES FROM ('01-01-2006') TO ('01-01-2007'); (1 row) @@ -244,27 +244,27 @@ SET search_path = 'partition_parent_schema'; -- detach and attach the partition by the command generated by us \d+ parent_table Table "partition_parent_schema.parent_table" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------------------------------------------------------------------- - id | integer | | not null | | plain | | - time | date | | | now() | plain | | + id | integer | | not null | | plain | | + time | date | | | now() | plain | | Partition key: RANGE ("time") Partitions: partition_child_1_schema.child_1 FOR VALUES FROM ('01-01-2009') TO ('01-02-2010'), partition_child_2_schema.child_2 FOR VALUES FROM ('01-01-2006') TO ('01-01-2007') SELECT public.detach_and_attach_partition('partition_child_1_schema.child_1', 'parent_table'); - detach_and_attach_partition + detach_and_attach_partition --------------------------------------------------------------------- - + (1 row) -- check that both partitions are visiable \d+ parent_table Table "partition_parent_schema.parent_table" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------------------------------------------------------------------- - id | integer | | not null | | plain | | - time | date | | | now() | plain | | + id | integer | | not null | | plain | | + time | date | | | now() | plain | | Partition key: RANGE ("time") Partitions: partition_child_1_schema.child_1 FOR VALUES FROM ('01-01-2009') TO ('01-02-2010'), partition_child_2_schema.child_2 FOR VALUES FROM ('01-01-2006') TO ('01-01-2007') @@ -278,7 +278,7 @@ SELECT public.print_partitions('partition_child_1_schema.child_1'); ERROR: "child_1" is not a parent table -- now pring the partitions SELECT public.print_partitions('parent_table'); - print_partitions + print_partitions --------------------------------------------------------------------- child_1,child_2 (1 row) @@ -309,41 +309,41 @@ CREATE TABLE multi_column_partition_2( ); -- partitioning information SELECT generate_partition_information('multi_column_partitioned'); - generate_partition_information + generate_partition_information --------------------------------------------------------------------- RANGE (a, (((a + b) + 1)), some_function(upper(c))) (1 row) SELECT master_get_table_ddl_events('multi_column_partitioned'); - master_get_table_ddl_events + master_get_table_ddl_events --------------------------------------------------------------------- - CREATE TABLE public.multi_column_partitioned (a integer, b integer, c text) PARTITION BY RANGE (a, (((a + b) + 1)), public.some_function(upper(c))) + CREATE TABLE public.multi_column_partitioned (a integer, b integer, c text) PARTITION BY RANGE (a, (((a + b) + 1)), public.some_function(upper(c))) ALTER TABLE public.multi_column_partitioned OWNER TO postgres (2 rows) SELECT drop_and_recreate_partitioned_table('multi_column_partitioned'); - drop_and_recreate_partitioned_table + drop_and_recreate_partitioned_table --------------------------------------------------------------------- - + (1 row) -- partitions and their ranges ALTER TABLE multi_column_partitioned ATTACH PARTITION multi_column_partition_1 FOR VALUES FROM (1, 10, '250') TO (1, 20, '250'); SELECT generate_alter_table_attach_partition_command('multi_column_partition_1'); - generate_alter_table_attach_partition_command + generate_alter_table_attach_partition_command --------------------------------------------------------------------- ALTER TABLE public.multi_column_partitioned ATTACH PARTITION public.multi_column_partition_1 FOR VALUES FROM (1, 10, '250') TO (1, 20, '250'); (1 row) ALTER TABLE multi_column_partitioned ATTACH PARTITION multi_column_partition_2 FOR VALUES FROM (10, 1000, '2500') TO (MAXVALUE, MAXVALUE, MAXVALUE); SELECT generate_alter_table_attach_partition_command('multi_column_partition_2'); - generate_alter_table_attach_partition_command + generate_alter_table_attach_partition_command --------------------------------------------------------------------- ALTER TABLE public.multi_column_partitioned ATTACH PARTITION public.multi_column_partition_2 FOR VALUES FROM (10, 1000, '2500') TO (MAXVALUE, MAXVALUE, MAXVALUE); (1 row) SELECT generate_alter_table_detach_partition_command('multi_column_partition_2'); - generate_alter_table_detach_partition_command + generate_alter_table_detach_partition_command --------------------------------------------------------------------- ALTER TABLE IF EXISTS public.multi_column_partitioned DETACH PARTITION public.multi_column_partition_2; (1 row) @@ -351,27 +351,27 @@ SELECT generate_alter_table_detach_partition_command('multi_column_partition_2') -- finally a test with LIST partitioning CREATE TABLE list_partitioned (col1 NUMERIC, col2 NUMERIC, col3 VARCHAR(10)) PARTITION BY LIST (col1) ; SELECT generate_partition_information('list_partitioned'); - generate_partition_information + generate_partition_information --------------------------------------------------------------------- LIST (col1) (1 row) SELECT master_get_table_ddl_events('list_partitioned'); - master_get_table_ddl_events + master_get_table_ddl_events --------------------------------------------------------------------- - CREATE TABLE public.list_partitioned (col1 numeric, col2 numeric, col3 character varying(10)) PARTITION BY LIST (col1) + CREATE TABLE public.list_partitioned (col1 numeric, col2 numeric, col3 character varying(10)) PARTITION BY LIST (col1) ALTER TABLE public.list_partitioned OWNER TO postgres (2 rows) SELECT drop_and_recreate_partitioned_table('list_partitioned'); - drop_and_recreate_partitioned_table + drop_and_recreate_partitioned_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE list_partitioned_1 PARTITION OF list_partitioned FOR VALUES IN (100, 101, 102, 103, 104); SELECT generate_alter_table_attach_partition_command('list_partitioned_1'); - generate_alter_table_attach_partition_command + generate_alter_table_attach_partition_command --------------------------------------------------------------------- ALTER TABLE public.list_partitioned ATTACH PARTITION public.list_partitioned_1 FOR VALUES IN ('100', '101', '102', '103', '104'); (1 row) @@ -387,30 +387,30 @@ CREATE TABLE capitals ( ) INHERITS (cities); -- returns true since capitals inherits from cities SELECT table_inherits('capitals'); - table_inherits + table_inherits --------------------------------------------------------------------- t (1 row) --- although date_partition_2006 inherits from its parent +-- although date_partition_2006 inherits from its parent -- returns false since the hierarcy is formed via partitioning SELECT table_inherits('date_partition_2006'); - table_inherits + table_inherits --------------------------------------------------------------------- f (1 row) -- returns true since cities inherited by capitals SELECT table_inherited('cities'); - table_inherited + table_inherited --------------------------------------------------------------------- t (1 row) --- although date_partitioned_table inherited by its partitions +-- although date_partitioned_table inherited by its partitions -- returns false since the hierarcy is formed via partitioning SELECT table_inherited('date_partitioned_table'); - table_inherited + table_inherited --------------------------------------------------------------------- f (1 row) diff --git a/src/test/regress/expected/multi_prepare_plsql.out b/src/test/regress/expected/multi_prepare_plsql.out index d2f9c99c2..b8e8608dc 100644 --- a/src/test/regress/expected/multi_prepare_plsql.out +++ b/src/test/regress/expected/multi_prepare_plsql.out @@ -5,8 +5,8 @@ -- and converted into both plain SQL and PL/pgsql functions, which -- use prepared statements internally. -- many of the tests in this file is intended for testing non-fast-path --- router planner, so we're explicitly disabling it in this file. --- We've bunch of other tests that triggers fast-path-router +-- router planner, so we're explicitly disabling it in this file. +-- We've bunch of other tests that triggers fast-path-router SET citus.enable_fast_path_router_planner TO false; CREATE FUNCTION plpgsql_test_1() RETURNS TABLE(count bigint) AS $$ DECLARE @@ -141,111 +141,111 @@ SET citus.task_executor_type TO 'task-tracker'; SET client_min_messages TO INFO; -- now, run PL/pgsql functions SELECT plpgsql_test_1(); - plpgsql_test_1 + plpgsql_test_1 --------------------------------------------------------------------- 2985 (1 row) SELECT plpgsql_test_2(); - plpgsql_test_2 + plpgsql_test_2 --------------------------------------------------------------------- 12000 (1 row) SELECT plpgsql_test_3(); - plpgsql_test_3 + plpgsql_test_3 --------------------------------------------------------------------- 1956 (1 row) SELECT plpgsql_test_4(); - plpgsql_test_4 + plpgsql_test_4 --------------------------------------------------------------------- 7806 (1 row) SELECT plpgsql_test_5(); - plpgsql_test_5 + plpgsql_test_5 --------------------------------------------------------------------- 39 (1 row) -- run PL/pgsql functions with different parameters SELECT plpgsql_test_6(155); - plpgsql_test_6 + plpgsql_test_6 --------------------------------------------------------------------- 11813 (1 row) SELECT plpgsql_test_6(1555); - plpgsql_test_6 + plpgsql_test_6 --------------------------------------------------------------------- 10185 (1 row) SELECT plpgsql_test_7('UNITED KINGDOM', 'CHINA'); - plpgsql_test_7 + plpgsql_test_7 --------------------------------------------------------------------- ("UNITED KINGDOM",CHINA,1996,18560.22) (1 row) SELECT plpgsql_test_7('FRANCE', 'GERMANY'); - plpgsql_test_7 + plpgsql_test_7 --------------------------------------------------------------------- (GERMANY,FRANCE,1995,2399.2948) (1 row) -- now, PL/pgsql functions with random order SELECT plpgsql_test_6(155); - plpgsql_test_6 + plpgsql_test_6 --------------------------------------------------------------------- 11813 (1 row) SELECT plpgsql_test_3(); - plpgsql_test_3 + plpgsql_test_3 --------------------------------------------------------------------- 1956 (1 row) SELECT plpgsql_test_7('FRANCE', 'GERMANY'); - plpgsql_test_7 + plpgsql_test_7 --------------------------------------------------------------------- (GERMANY,FRANCE,1995,2399.2948) (1 row) SELECT plpgsql_test_5(); - plpgsql_test_5 + plpgsql_test_5 --------------------------------------------------------------------- 39 (1 row) SELECT plpgsql_test_1(); - plpgsql_test_1 + plpgsql_test_1 --------------------------------------------------------------------- 2985 (1 row) SELECT plpgsql_test_6(1555); - plpgsql_test_6 + plpgsql_test_6 --------------------------------------------------------------------- 10185 (1 row) SELECT plpgsql_test_4(); - plpgsql_test_4 + plpgsql_test_4 --------------------------------------------------------------------- 7806 (1 row) SELECT plpgsql_test_7('UNITED KINGDOM', 'CHINA'); - plpgsql_test_7 + plpgsql_test_7 --------------------------------------------------------------------- ("UNITED KINGDOM",CHINA,1996,18560.22) (1 row) SELECT plpgsql_test_2(); - plpgsql_test_2 + plpgsql_test_2 --------------------------------------------------------------------- 12000 (1 row) @@ -255,26 +255,26 @@ SELECT plpgsql_test_2(); RESET citus.task_executor_type; -- now, run PL/pgsql functions SELECT plpgsql_test_1(); - plpgsql_test_1 + plpgsql_test_1 --------------------------------------------------------------------- 2985 (1 row) SELECT plpgsql_test_2(); - plpgsql_test_2 + plpgsql_test_2 --------------------------------------------------------------------- 12000 (1 row) -- run PL/pgsql functions with different parameters SELECT plpgsql_test_6(155); - plpgsql_test_6 + plpgsql_test_6 --------------------------------------------------------------------- 11813 (1 row) SELECT plpgsql_test_6(1555); - plpgsql_test_6 + plpgsql_test_6 --------------------------------------------------------------------- 10185 (1 row) @@ -286,9 +286,9 @@ CREATE TABLE plpgsql_table ( ); SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('plpgsql_table','key','hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE FUNCTION no_parameter_insert() RETURNS void as $$ @@ -298,39 +298,39 @@ END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT no_parameter_insert(); - no_parameter_insert + no_parameter_insert --------------------------------------------------------------------- - + (1 row) SELECT no_parameter_insert(); - no_parameter_insert + no_parameter_insert --------------------------------------------------------------------- - + (1 row) SELECT no_parameter_insert(); - no_parameter_insert + no_parameter_insert --------------------------------------------------------------------- - + (1 row) SELECT no_parameter_insert(); - no_parameter_insert + no_parameter_insert --------------------------------------------------------------------- - + (1 row) SELECT no_parameter_insert(); - no_parameter_insert + no_parameter_insert --------------------------------------------------------------------- - + (1 row) SELECT no_parameter_insert(); - no_parameter_insert + no_parameter_insert --------------------------------------------------------------------- - + (1 row) CREATE FUNCTION single_parameter_insert(key_arg int) @@ -341,39 +341,39 @@ END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT single_parameter_insert(1); - single_parameter_insert + single_parameter_insert --------------------------------------------------------------------- - + (1 row) SELECT single_parameter_insert(2); - single_parameter_insert + single_parameter_insert --------------------------------------------------------------------- - + (1 row) SELECT single_parameter_insert(3); - single_parameter_insert + single_parameter_insert --------------------------------------------------------------------- - + (1 row) SELECT single_parameter_insert(4); - single_parameter_insert + single_parameter_insert --------------------------------------------------------------------- - + (1 row) SELECT single_parameter_insert(5); - single_parameter_insert + single_parameter_insert --------------------------------------------------------------------- - + (1 row) SELECT single_parameter_insert(6); - single_parameter_insert + single_parameter_insert --------------------------------------------------------------------- - + (1 row) CREATE FUNCTION double_parameter_insert(key_arg int, value_arg int) @@ -384,39 +384,39 @@ END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT double_parameter_insert(1, 10); - double_parameter_insert + double_parameter_insert --------------------------------------------------------------------- - + (1 row) SELECT double_parameter_insert(2, 20); - double_parameter_insert + double_parameter_insert --------------------------------------------------------------------- - + (1 row) SELECT double_parameter_insert(3, 30); - double_parameter_insert + double_parameter_insert --------------------------------------------------------------------- - + (1 row) SELECT double_parameter_insert(4, 40); - double_parameter_insert + double_parameter_insert --------------------------------------------------------------------- - + (1 row) SELECT double_parameter_insert(5, 50); - double_parameter_insert + double_parameter_insert --------------------------------------------------------------------- - + (1 row) SELECT double_parameter_insert(6, 60); - double_parameter_insert + double_parameter_insert --------------------------------------------------------------------- - + (1 row) CREATE FUNCTION non_partition_parameter_insert(value_arg int) @@ -427,44 +427,44 @@ END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT non_partition_parameter_insert(10); - non_partition_parameter_insert + non_partition_parameter_insert --------------------------------------------------------------------- - + (1 row) SELECT non_partition_parameter_insert(20); - non_partition_parameter_insert + non_partition_parameter_insert --------------------------------------------------------------------- - + (1 row) SELECT non_partition_parameter_insert(30); - non_partition_parameter_insert + non_partition_parameter_insert --------------------------------------------------------------------- - + (1 row) SELECT non_partition_parameter_insert(40); - non_partition_parameter_insert + non_partition_parameter_insert --------------------------------------------------------------------- - + (1 row) SELECT non_partition_parameter_insert(50); - non_partition_parameter_insert + non_partition_parameter_insert --------------------------------------------------------------------- - + (1 row) SELECT non_partition_parameter_insert(60); - non_partition_parameter_insert + non_partition_parameter_insert --------------------------------------------------------------------- - + (1 row) -- check inserted values SELECT * FROM plpgsql_table ORDER BY key, value; - key | value + key | value --------------------------------------------------------------------- 0 | 10 0 | 20 @@ -472,24 +472,24 @@ SELECT * FROM plpgsql_table ORDER BY key, value; 0 | 40 0 | 50 0 | 60 - 0 | - 0 | - 0 | - 0 | - 0 | - 0 | + 0 | + 0 | + 0 | + 0 | + 0 | + 0 | 1 | 10 - 1 | + 1 | 2 | 20 - 2 | + 2 | 3 | 30 - 3 | + 3 | 4 | 40 - 4 | + 4 | 5 | 50 - 5 | + 5 | 6 | 60 - 6 | + 6 | (24 rows) -- check router executor select @@ -512,42 +512,42 @@ END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT router_partition_column_select(1); - router_partition_column_select + router_partition_column_select --------------------------------------------------------------------- (1,10) (1,) (2 rows) SELECT router_partition_column_select(2); - router_partition_column_select + router_partition_column_select --------------------------------------------------------------------- (2,20) (2,) (2 rows) SELECT router_partition_column_select(3); - router_partition_column_select + router_partition_column_select --------------------------------------------------------------------- (3,30) (3,) (2 rows) SELECT router_partition_column_select(4); - router_partition_column_select + router_partition_column_select --------------------------------------------------------------------- (4,40) (4,) (2 rows) SELECT router_partition_column_select(5); - router_partition_column_select + router_partition_column_select --------------------------------------------------------------------- (5,50) (5,) (2 rows) SELECT router_partition_column_select(6); - router_partition_column_select + router_partition_column_select --------------------------------------------------------------------- (6,60) (6,) @@ -573,37 +573,37 @@ END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT router_non_partition_column_select(10); - router_non_partition_column_select + router_non_partition_column_select --------------------------------------------------------------------- (0,10) (1 row) SELECT router_non_partition_column_select(20); - router_non_partition_column_select + router_non_partition_column_select --------------------------------------------------------------------- (0,20) (1 row) SELECT router_non_partition_column_select(30); - router_non_partition_column_select + router_non_partition_column_select --------------------------------------------------------------------- (0,30) (1 row) SELECT router_non_partition_column_select(40); - router_non_partition_column_select + router_non_partition_column_select --------------------------------------------------------------------- (0,40) (1 row) SELECT router_non_partition_column_select(50); - router_non_partition_column_select + router_non_partition_column_select --------------------------------------------------------------------- (0,50) (1 row) SELECT router_non_partition_column_select(60); - router_non_partition_column_select + router_non_partition_column_select --------------------------------------------------------------------- (0,60) (1 row) @@ -628,42 +628,42 @@ END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT real_time_non_partition_column_select(10); - real_time_non_partition_column_select + real_time_non_partition_column_select --------------------------------------------------------------------- (0,10) (1,10) (2 rows) SELECT real_time_non_partition_column_select(20); - real_time_non_partition_column_select + real_time_non_partition_column_select --------------------------------------------------------------------- (0,20) (2,20) (2 rows) SELECT real_time_non_partition_column_select(30); - real_time_non_partition_column_select + real_time_non_partition_column_select --------------------------------------------------------------------- (0,30) (3,30) (2 rows) SELECT real_time_non_partition_column_select(40); - real_time_non_partition_column_select + real_time_non_partition_column_select --------------------------------------------------------------------- (0,40) (4,40) (2 rows) SELECT real_time_non_partition_column_select(50); - real_time_non_partition_column_select + real_time_non_partition_column_select --------------------------------------------------------------------- (0,50) (5,50) (2 rows) SELECT real_time_non_partition_column_select(60); - real_time_non_partition_column_select + real_time_non_partition_column_select --------------------------------------------------------------------- (0,60) (6,60) @@ -689,7 +689,7 @@ END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT real_time_partition_column_select(1); - real_time_partition_column_select + real_time_partition_column_select --------------------------------------------------------------------- (0,10) (1,10) @@ -697,7 +697,7 @@ SELECT real_time_partition_column_select(1); (3 rows) SELECT real_time_partition_column_select(2); - real_time_partition_column_select + real_time_partition_column_select --------------------------------------------------------------------- (0,10) (1,10) @@ -706,7 +706,7 @@ SELECT real_time_partition_column_select(2); (4 rows) SELECT real_time_partition_column_select(3); - real_time_partition_column_select + real_time_partition_column_select --------------------------------------------------------------------- (0,10) (1,10) @@ -715,7 +715,7 @@ SELECT real_time_partition_column_select(3); (4 rows) SELECT real_time_partition_column_select(4); - real_time_partition_column_select + real_time_partition_column_select --------------------------------------------------------------------- (0,10) (1,10) @@ -724,7 +724,7 @@ SELECT real_time_partition_column_select(4); (4 rows) SELECT real_time_partition_column_select(5); - real_time_partition_column_select + real_time_partition_column_select --------------------------------------------------------------------- (0,10) (1,10) @@ -733,7 +733,7 @@ SELECT real_time_partition_column_select(5); (4 rows) SELECT real_time_partition_column_select(6); - real_time_partition_column_select + real_time_partition_column_select --------------------------------------------------------------------- (0,10) (1,10) @@ -762,42 +762,42 @@ END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT task_tracker_non_partition_column_select(10); - task_tracker_non_partition_column_select + task_tracker_non_partition_column_select --------------------------------------------------------------------- (0,10) (1,10) (2 rows) SELECT task_tracker_non_partition_column_select(20); - task_tracker_non_partition_column_select + task_tracker_non_partition_column_select --------------------------------------------------------------------- (0,20) (2,20) (2 rows) SELECT task_tracker_non_partition_column_select(30); - task_tracker_non_partition_column_select + task_tracker_non_partition_column_select --------------------------------------------------------------------- (0,30) (3,30) (2 rows) SELECT task_tracker_non_partition_column_select(40); - task_tracker_non_partition_column_select + task_tracker_non_partition_column_select --------------------------------------------------------------------- (0,40) (4,40) (2 rows) SELECT task_tracker_non_partition_column_select(50); - task_tracker_non_partition_column_select + task_tracker_non_partition_column_select --------------------------------------------------------------------- (0,50) (5,50) (2 rows) SELECT real_time_non_partition_column_select(60); - real_time_non_partition_column_select + real_time_non_partition_column_select --------------------------------------------------------------------- (0,60) (6,60) @@ -823,7 +823,7 @@ END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT task_tracker_partition_column_select(1); - task_tracker_partition_column_select + task_tracker_partition_column_select --------------------------------------------------------------------- (0,10) (1,10) @@ -831,7 +831,7 @@ SELECT task_tracker_partition_column_select(1); (3 rows) SELECT task_tracker_partition_column_select(2); - task_tracker_partition_column_select + task_tracker_partition_column_select --------------------------------------------------------------------- (0,10) (1,10) @@ -840,7 +840,7 @@ SELECT task_tracker_partition_column_select(2); (4 rows) SELECT task_tracker_partition_column_select(3); - task_tracker_partition_column_select + task_tracker_partition_column_select --------------------------------------------------------------------- (0,10) (1,10) @@ -849,7 +849,7 @@ SELECT task_tracker_partition_column_select(3); (4 rows) SELECT task_tracker_partition_column_select(4); - task_tracker_partition_column_select + task_tracker_partition_column_select --------------------------------------------------------------------- (0,10) (1,10) @@ -858,7 +858,7 @@ SELECT task_tracker_partition_column_select(4); (4 rows) SELECT task_tracker_partition_column_select(5); - task_tracker_partition_column_select + task_tracker_partition_column_select --------------------------------------------------------------------- (0,10) (1,10) @@ -867,7 +867,7 @@ SELECT task_tracker_partition_column_select(5); (4 rows) SELECT task_tracker_partition_column_select(6); - task_tracker_partition_column_select + task_tracker_partition_column_select --------------------------------------------------------------------- (0,10) (1,10) @@ -884,39 +884,39 @@ END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT partition_parameter_update(1, 11); - partition_parameter_update + partition_parameter_update --------------------------------------------------------------------- - + (1 row) SELECT partition_parameter_update(2, 21); - partition_parameter_update + partition_parameter_update --------------------------------------------------------------------- - + (1 row) SELECT partition_parameter_update(3, 31); - partition_parameter_update + partition_parameter_update --------------------------------------------------------------------- - + (1 row) SELECT partition_parameter_update(4, 41); - partition_parameter_update + partition_parameter_update --------------------------------------------------------------------- - + (1 row) SELECT partition_parameter_update(5, 51); - partition_parameter_update + partition_parameter_update --------------------------------------------------------------------- - + (1 row) SELECT partition_parameter_update(6, 61); - partition_parameter_update + partition_parameter_update --------------------------------------------------------------------- - + (1 row) CREATE FUNCTION non_partition_parameter_update(int, int) RETURNS void as $$ @@ -926,44 +926,44 @@ END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT non_partition_parameter_update(10, 12); - non_partition_parameter_update + non_partition_parameter_update --------------------------------------------------------------------- - + (1 row) SELECT non_partition_parameter_update(20, 22); - non_partition_parameter_update + non_partition_parameter_update --------------------------------------------------------------------- - + (1 row) SELECT non_partition_parameter_update(30, 32); - non_partition_parameter_update + non_partition_parameter_update --------------------------------------------------------------------- - + (1 row) SELECT non_partition_parameter_update(40, 42); - non_partition_parameter_update + non_partition_parameter_update --------------------------------------------------------------------- - + (1 row) SELECT non_partition_parameter_update(50, 52); - non_partition_parameter_update + non_partition_parameter_update --------------------------------------------------------------------- - + (1 row) SELECT non_partition_parameter_update(60, 62); - non_partition_parameter_update + non_partition_parameter_update --------------------------------------------------------------------- - + (1 row) -- check table after updates SELECT * FROM plpgsql_table ORDER BY key, value; - key | value + key | value --------------------------------------------------------------------- 0 | 12 0 | 22 @@ -971,12 +971,12 @@ SELECT * FROM plpgsql_table ORDER BY key, value; 0 | 42 0 | 52 0 | 62 - 0 | - 0 | - 0 | - 0 | - 0 | - 0 | + 0 | + 0 | + 0 | + 0 | + 0 | + 0 | 1 | 11 1 | 11 2 | 21 @@ -999,39 +999,39 @@ END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT partition_parameter_delete(1, 11); - partition_parameter_delete + partition_parameter_delete --------------------------------------------------------------------- - + (1 row) SELECT partition_parameter_delete(2, 21); - partition_parameter_delete + partition_parameter_delete --------------------------------------------------------------------- - + (1 row) SELECT partition_parameter_delete(3, 31); - partition_parameter_delete + partition_parameter_delete --------------------------------------------------------------------- - + (1 row) SELECT partition_parameter_delete(4, 41); - partition_parameter_delete + partition_parameter_delete --------------------------------------------------------------------- - + (1 row) SELECT partition_parameter_delete(5, 51); - partition_parameter_delete + partition_parameter_delete --------------------------------------------------------------------- - + (1 row) SELECT partition_parameter_delete(6, 61); - partition_parameter_delete + partition_parameter_delete --------------------------------------------------------------------- - + (1 row) CREATE FUNCTION non_partition_parameter_delete(int) RETURNS void as $$ @@ -1041,59 +1041,59 @@ END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT non_partition_parameter_delete(12); - non_partition_parameter_delete + non_partition_parameter_delete --------------------------------------------------------------------- - + (1 row) SELECT non_partition_parameter_delete(22); - non_partition_parameter_delete + non_partition_parameter_delete --------------------------------------------------------------------- - + (1 row) SELECT non_partition_parameter_delete(32); - non_partition_parameter_delete + non_partition_parameter_delete --------------------------------------------------------------------- - + (1 row) SELECT non_partition_parameter_delete(42); - non_partition_parameter_delete + non_partition_parameter_delete --------------------------------------------------------------------- - + (1 row) SELECT non_partition_parameter_delete(52); - non_partition_parameter_delete + non_partition_parameter_delete --------------------------------------------------------------------- - + (1 row) SELECT non_partition_parameter_delete(62); - non_partition_parameter_delete + non_partition_parameter_delete --------------------------------------------------------------------- - + (1 row) -- check table after deletes SELECT * FROM plpgsql_table ORDER BY key, value; - key | value + key | value --------------------------------------------------------------------- - 0 | - 0 | - 0 | - 0 | - 0 | - 0 | + 0 | + 0 | + 0 | + 0 | + 0 | + 0 | (6 rows) -- check whether we can handle execute parameters CREATE TABLE execute_parameter_test (key int, val date); SELECT create_distributed_table('execute_parameter_test', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) DO $$ @@ -1112,9 +1112,9 @@ CREATE TABLE func_parameter_test ( PRIMARY KEY (key, seq) ); SELECT create_distributed_table('func_parameter_test', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE OR REPLACE FUNCTION insert_with_max(pkey text) RETURNS VOID AS @@ -1135,43 +1135,43 @@ $BODY$ $BODY$ LANGUAGE plpgsql; SELECT insert_with_max('key'); - insert_with_max + insert_with_max --------------------------------------------------------------------- - + (1 row) SELECT insert_with_max('key'); - insert_with_max + insert_with_max --------------------------------------------------------------------- - + (1 row) SELECT insert_with_max('key'); - insert_with_max + insert_with_max --------------------------------------------------------------------- - + (1 row) SELECT insert_with_max('key'); - insert_with_max + insert_with_max --------------------------------------------------------------------- - + (1 row) SELECT insert_with_max('key'); - insert_with_max + insert_with_max --------------------------------------------------------------------- - + (1 row) SELECT insert_with_max('key'); - insert_with_max + insert_with_max --------------------------------------------------------------------- - + (1 row) SELECT key, seq FROM func_parameter_test ORDER BY seq; - key | seq + key | seq --------------------------------------------------------------------- key | 1 key | 2 @@ -1187,9 +1187,9 @@ DROP TABLE func_parameter_test; SET citus.multi_shard_commit_protocol TO '2pc'; CREATE TABLE prepare_ddl (x int, y int); SELECT create_distributed_table('prepare_ddl', 'x'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE OR REPLACE FUNCTION ddl_in_plpgsql() @@ -1201,15 +1201,15 @@ BEGIN END; $BODY$ LANGUAGE plpgsql; SELECT ddl_in_plpgsql(); - ddl_in_plpgsql + ddl_in_plpgsql --------------------------------------------------------------------- - + (1 row) SELECT ddl_in_plpgsql(); - ddl_in_plpgsql + ddl_in_plpgsql --------------------------------------------------------------------- - + (1 row) -- test prepared ddl with multi search path to make sure the schema name doesn't leak on @@ -1224,9 +1224,9 @@ $BODY$ LANGUAGE plpgsql; CREATE SCHEMA otherschema; SET search_path TO otherschema, public; SELECT ddl_in_plpgsql(); - ddl_in_plpgsql + ddl_in_plpgsql --------------------------------------------------------------------- - + (1 row) DROP INDEX prepared_index; @@ -1234,20 +1234,20 @@ DROP INDEX prepared_index; -- created on this table, but instead on the table in the public schema CREATE TABLE prepare_ddl (x int, y int); SELECT create_distributed_table('prepare_ddl', 'x'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT ddl_in_plpgsql(); - ddl_in_plpgsql + ddl_in_plpgsql --------------------------------------------------------------------- - + (1 row) -- verify the index is created in the correct schema SELECT schemaname, indexrelname FROM pg_stat_all_indexes WHERE indexrelname = 'prepared_index'; - schemaname | indexrelname + schemaname | indexrelname --------------------------------------------------------------------- otherschema | prepared_index (1 row) @@ -1264,15 +1264,15 @@ BEGIN END; $BODY$ LANGUAGE plpgsql; SELECT copy_in_plpgsql(); - copy_in_plpgsql + copy_in_plpgsql --------------------------------------------------------------------- - + (1 row) SELECT copy_in_plpgsql(); - copy_in_plpgsql + copy_in_plpgsql --------------------------------------------------------------------- - + (1 row) -- test prepared COPY on a non-distributed table @@ -1285,15 +1285,15 @@ BEGIN END; $BODY$ LANGUAGE plpgsql; SELECT local_copy_in_plpgsql(); - local_copy_in_plpgsql + local_copy_in_plpgsql --------------------------------------------------------------------- - + (1 row) SELECT local_copy_in_plpgsql(); - local_copy_in_plpgsql + local_copy_in_plpgsql --------------------------------------------------------------------- - + (1 row) -- types statements should not crash nor leak schema specifications on to cached statements @@ -1309,22 +1309,22 @@ BEGIN END; $function$; SELECT type_ddl_plpgsql(); - type_ddl_plpgsql + type_ddl_plpgsql --------------------------------------------------------------------- - + (1 row) -- create same type in new schema, owner of this new type should change CREATE TYPE prepare_ddl_type AS (x int, y int); SELECT type_ddl_plpgsql(); - type_ddl_plpgsql + type_ddl_plpgsql --------------------------------------------------------------------- - + (1 row) -- find all renamed types to verify the schema name didn't leak, nor a crash happened SELECT nspname, typname FROM pg_type JOIN pg_namespace ON pg_namespace.oid = pg_type.typnamespace WHERE typname = 'prepare_ddl_type_backup'; - nspname | typname + nspname | typname --------------------------------------------------------------------- public | prepare_ddl_type_backup otherschema | prepare_ddl_type_backup diff --git a/src/test/regress/expected/multi_prepare_sql.out b/src/test/regress/expected/multi_prepare_sql.out index c5e3adcae..8338ee3f9 100644 --- a/src/test/regress/expected/multi_prepare_sql.out +++ b/src/test/regress/expected/multi_prepare_sql.out @@ -2,8 +2,8 @@ -- MULTI_PREPARE_SQL -- -- many of the tests in this file is intended for testing non-fast-path --- router planner, so we're explicitly disabling it in this file. --- We've bunch of other tests that triggers fast-path-router +-- router planner, so we're explicitly disabling it in this file. +-- We've bunch of other tests that triggers fast-path-router SET citus.enable_fast_path_router_planner TO false; -- Tests covering PREPARE statements. Many of the queries are -- taken from other regression test files and converted into @@ -103,111 +103,111 @@ SET citus.task_executor_type TO 'task-tracker'; SET client_min_messages TO INFO; -- execute prepared statements EXECUTE prepared_test_1; - count + count --------------------------------------------------------------------- 2985 (1 row) EXECUTE prepared_test_2; - count + count --------------------------------------------------------------------- 12000 (1 row) EXECUTE prepared_test_3; - count + count --------------------------------------------------------------------- 1956 (1 row) EXECUTE prepared_test_4; - count + count --------------------------------------------------------------------- 7806 (1 row) EXECUTE prepared_test_5; - count + count --------------------------------------------------------------------- 39 (1 row) -- execute prepared statements with different parameters EXECUTE prepared_test_6(155); - count + count --------------------------------------------------------------------- 11813 (1 row) EXECUTE prepared_test_6(1555); - count + count --------------------------------------------------------------------- 10185 (1 row) EXECUTE prepared_test_7('UNITED KINGDOM', 'CHINA'); - supp_nation | cust_nation | l_year | revenue + supp_nation | cust_nation | l_year | revenue --------------------------------------------------------------------- UNITED KINGDOM | CHINA | 1996 | 18560.2200 (1 row) EXECUTE prepared_test_7('FRANCE', 'GERMANY'); - supp_nation | cust_nation | l_year | revenue + supp_nation | cust_nation | l_year | revenue --------------------------------------------------------------------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) -- now, execute prepared statements with random order EXECUTE prepared_test_6(155); - count + count --------------------------------------------------------------------- 11813 (1 row) EXECUTE prepared_test_3; - count + count --------------------------------------------------------------------- 1956 (1 row) EXECUTE prepared_test_7('FRANCE', 'GERMANY'); - supp_nation | cust_nation | l_year | revenue + supp_nation | cust_nation | l_year | revenue --------------------------------------------------------------------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) EXECUTE prepared_test_5; - count + count --------------------------------------------------------------------- 39 (1 row) EXECUTE prepared_test_1; - count + count --------------------------------------------------------------------- 2985 (1 row) EXECUTE prepared_test_6(1555); - count + count --------------------------------------------------------------------- 10185 (1 row) EXECUTE prepared_test_4; - count + count --------------------------------------------------------------------- 7806 (1 row) EXECUTE prepared_test_7('UNITED KINGDOM', 'CHINA'); - supp_nation | cust_nation | l_year | revenue + supp_nation | cust_nation | l_year | revenue --------------------------------------------------------------------- UNITED KINGDOM | CHINA | 1996 | 18560.2200 (1 row) EXECUTE prepared_test_2; - count + count --------------------------------------------------------------------- 12000 (1 row) @@ -215,7 +215,7 @@ EXECUTE prepared_test_2; -- CREATE TABLE ... AS EXECUTE prepared_statement tests CREATE TEMP TABLE prepared_sql_test_7 AS EXECUTE prepared_test_7('UNITED KINGDOM', 'CHINA'); SELECT * from prepared_sql_test_7; - supp_nation | cust_nation | l_year | revenue + supp_nation | cust_nation | l_year | revenue --------------------------------------------------------------------- UNITED KINGDOM | CHINA | 1996 | 18560.2200 (1 row) @@ -224,26 +224,26 @@ SELECT * from prepared_sql_test_7; RESET citus.task_executor_type; -- execute prepared statements EXECUTE prepared_test_1; - count + count --------------------------------------------------------------------- 2985 (1 row) EXECUTE prepared_test_2; - count + count --------------------------------------------------------------------- 12000 (1 row) -- execute prepared statements with different parameters EXECUTE prepared_test_6(155); - count + count --------------------------------------------------------------------- 11813 (1 row) EXECUTE prepared_test_6(1555); - count + count --------------------------------------------------------------------- 10185 (1 row) @@ -261,9 +261,9 @@ CREATE TABLE router_executor_table ( ); SET citus.shard_count TO 2; SELECT create_distributed_table('router_executor_table', 'id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- test parameterized inserts @@ -276,7 +276,7 @@ EXECUTE prepared_insert('comment-4', '(4, 40)'); EXECUTE prepared_insert('comment-5', '(5, 50)'); EXECUTE prepared_insert('comment-6', '(6, 60)'); SELECT * FROM router_executor_table ORDER BY comment; - id | comment | stats + id | comment | stats --------------------------------------------------------------------- 1 | comment-1 | (1,10) 1 | comment-2 | (2,20) @@ -291,37 +291,37 @@ PREPARE prepared_select(integer, integer) AS SELECT count(*) FROM router_executor_table WHERE id = 1 AND stats = ROW($1, $2)::test_composite_type; EXECUTE prepared_select(1, 10); - count + count --------------------------------------------------------------------- 1 (1 row) EXECUTE prepared_select(2, 20); - count + count --------------------------------------------------------------------- 1 (1 row) EXECUTE prepared_select(3, 30); - count + count --------------------------------------------------------------------- 1 (1 row) EXECUTE prepared_select(4, 40); - count + count --------------------------------------------------------------------- 1 (1 row) EXECUTE prepared_select(5, 50); - count + count --------------------------------------------------------------------- 1 (1 row) EXECUTE prepared_select(6, 60); - count + count --------------------------------------------------------------------- 1 (1 row) @@ -348,9 +348,9 @@ CREATE TABLE prepare_table ( SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('prepare_table','key','hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) PREPARE prepared_no_parameter_insert AS @@ -400,7 +400,7 @@ EXECUTE prepared_non_partition_parameter_insert(50); EXECUTE prepared_non_partition_parameter_insert(60); -- check inserted values SELECT * FROM prepare_table ORDER BY key, value; - key | value + key | value --------------------------------------------------------------------- 0 | 10 0 | 20 @@ -408,24 +408,24 @@ SELECT * FROM prepare_table ORDER BY key, value; 0 | 40 0 | 50 0 | 60 - 0 | - 0 | - 0 | - 0 | - 0 | - 0 | + 0 | + 0 | + 0 | + 0 | + 0 | + 0 | 1 | 10 - 1 | + 1 | 2 | 20 - 2 | + 2 | 3 | 30 - 3 | + 3 | 4 | 40 - 4 | + 4 | 5 | 50 - 5 | + 5 | 6 | 60 - 6 | + 6 | 7 | 70 8 | 80 9 | 90 @@ -454,45 +454,45 @@ PREPARE prepared_router_partition_column_select(int) AS key, value; EXECUTE prepared_router_partition_column_select(1); - key | value + key | value --------------------------------------------------------------------- 1 | 10 - 1 | + 1 | (2 rows) EXECUTE prepared_router_partition_column_select(2); - key | value + key | value --------------------------------------------------------------------- 2 | 20 - 2 | + 2 | (2 rows) EXECUTE prepared_router_partition_column_select(3); - key | value + key | value --------------------------------------------------------------------- 3 | 30 - 3 | + 3 | (2 rows) EXECUTE prepared_router_partition_column_select(4); - key | value + key | value --------------------------------------------------------------------- 4 | 40 - 4 | + 4 | (2 rows) EXECUTE prepared_router_partition_column_select(5); - key | value + key | value --------------------------------------------------------------------- 5 | 50 - 5 | + 5 | (2 rows) EXECUTE prepared_router_partition_column_select(6); - key | value + key | value --------------------------------------------------------------------- 6 | 60 - 6 | + 6 | (2 rows) PREPARE prepared_router_non_partition_column_select(int) AS @@ -508,37 +508,37 @@ PREPARE prepared_router_non_partition_column_select(int) AS key, value; EXECUTE prepared_router_non_partition_column_select(10); - key | value + key | value --------------------------------------------------------------------- 0 | 10 (1 row) EXECUTE prepared_router_non_partition_column_select(20); - key | value + key | value --------------------------------------------------------------------- 0 | 20 (1 row) EXECUTE prepared_router_non_partition_column_select(30); - key | value + key | value --------------------------------------------------------------------- 0 | 30 (1 row) EXECUTE prepared_router_non_partition_column_select(40); - key | value + key | value --------------------------------------------------------------------- 0 | 40 (1 row) EXECUTE prepared_router_non_partition_column_select(50); - key | value + key | value --------------------------------------------------------------------- 0 | 50 (1 row) EXECUTE prepared_router_non_partition_column_select(60); - key | value + key | value --------------------------------------------------------------------- 0 | 60 (1 row) @@ -556,42 +556,42 @@ PREPARE prepared_real_time_non_partition_column_select(int) AS key, value; EXECUTE prepared_real_time_non_partition_column_select(10); - key | value + key | value --------------------------------------------------------------------- 0 | 10 1 | 10 (2 rows) EXECUTE prepared_real_time_non_partition_column_select(20); - key | value + key | value --------------------------------------------------------------------- 0 | 20 2 | 20 (2 rows) EXECUTE prepared_real_time_non_partition_column_select(30); - key | value + key | value --------------------------------------------------------------------- 0 | 30 3 | 30 (2 rows) EXECUTE prepared_real_time_non_partition_column_select(40); - key | value + key | value --------------------------------------------------------------------- 0 | 40 4 | 40 (2 rows) EXECUTE prepared_real_time_non_partition_column_select(50); - key | value + key | value --------------------------------------------------------------------- 0 | 50 5 | 50 (2 rows) EXECUTE prepared_real_time_non_partition_column_select(60); - key | value + key | value --------------------------------------------------------------------- 0 | 60 6 | 60 @@ -610,56 +610,56 @@ PREPARE prepared_real_time_partition_column_select(int) AS key, value; EXECUTE prepared_real_time_partition_column_select(1); - key | value + key | value --------------------------------------------------------------------- 0 | 10 1 | 10 - 1 | + 1 | (3 rows) EXECUTE prepared_real_time_partition_column_select(2); - key | value + key | value --------------------------------------------------------------------- 0 | 10 1 | 10 2 | 20 - 2 | + 2 | (4 rows) EXECUTE prepared_real_time_partition_column_select(3); - key | value + key | value --------------------------------------------------------------------- 0 | 10 1 | 10 3 | 30 - 3 | + 3 | (4 rows) EXECUTE prepared_real_time_partition_column_select(4); - key | value + key | value --------------------------------------------------------------------- 0 | 10 1 | 10 4 | 40 - 4 | + 4 | (4 rows) EXECUTE prepared_real_time_partition_column_select(5); - key | value + key | value --------------------------------------------------------------------- 0 | 10 1 | 10 5 | 50 - 5 | + 5 | (4 rows) EXECUTE prepared_real_time_partition_column_select(6); - key | value + key | value --------------------------------------------------------------------- 0 | 10 1 | 10 6 | 60 - 6 | + 6 | (4 rows) -- check task-tracker executor @@ -676,42 +676,42 @@ PREPARE prepared_task_tracker_non_partition_column_select(int) AS key, value; EXECUTE prepared_task_tracker_non_partition_column_select(10); - key | value + key | value --------------------------------------------------------------------- 0 | 10 1 | 10 (2 rows) EXECUTE prepared_task_tracker_non_partition_column_select(20); - key | value + key | value --------------------------------------------------------------------- 0 | 20 2 | 20 (2 rows) EXECUTE prepared_task_tracker_non_partition_column_select(30); - key | value + key | value --------------------------------------------------------------------- 0 | 30 3 | 30 (2 rows) EXECUTE prepared_task_tracker_non_partition_column_select(40); - key | value + key | value --------------------------------------------------------------------- 0 | 40 4 | 40 (2 rows) EXECUTE prepared_task_tracker_non_partition_column_select(50); - key | value + key | value --------------------------------------------------------------------- 0 | 50 5 | 50 (2 rows) EXECUTE prepared_task_tracker_non_partition_column_select(60); - key | value + key | value --------------------------------------------------------------------- 0 | 60 6 | 60 @@ -730,56 +730,56 @@ PREPARE prepared_task_tracker_partition_column_select(int) AS key, value; EXECUTE prepared_task_tracker_partition_column_select(1); - key | value + key | value --------------------------------------------------------------------- 0 | 10 1 | 10 - 1 | + 1 | (3 rows) EXECUTE prepared_task_tracker_partition_column_select(2); - key | value + key | value --------------------------------------------------------------------- 0 | 10 1 | 10 2 | 20 - 2 | + 2 | (4 rows) EXECUTE prepared_task_tracker_partition_column_select(3); - key | value + key | value --------------------------------------------------------------------- 0 | 10 1 | 10 3 | 30 - 3 | + 3 | (4 rows) EXECUTE prepared_task_tracker_partition_column_select(4); - key | value + key | value --------------------------------------------------------------------- 0 | 10 1 | 10 4 | 40 - 4 | + 4 | (4 rows) EXECUTE prepared_task_tracker_partition_column_select(5); - key | value + key | value --------------------------------------------------------------------- 0 | 10 1 | 10 5 | 50 - 5 | + 5 | (4 rows) EXECUTE prepared_task_tracker_partition_column_select(6); - key | value + key | value --------------------------------------------------------------------- 0 | 10 1 | 10 6 | 60 - 6 | + 6 | (4 rows) RESET citus.task_executor_type; @@ -804,7 +804,7 @@ EXECUTE prepared_non_partition_parameter_update(50, 52); EXECUTE prepared_non_partition_parameter_update(60, 62); -- check after updates SELECT * FROM prepare_table ORDER BY key, value; - key | value + key | value --------------------------------------------------------------------- 0 | 12 0 | 22 @@ -812,12 +812,12 @@ SELECT * FROM prepare_table ORDER BY key, value; 0 | 42 0 | 52 0 | 62 - 0 | - 0 | - 0 | - 0 | - 0 | - 0 | + 0 | + 0 | + 0 | + 0 | + 0 | + 0 | 1 | 11 1 | 11 2 | 21 @@ -852,14 +852,14 @@ EXECUTE prepared_non_partition_parameter_delete(52); EXECUTE prepared_non_partition_parameter_delete(62); -- check after deletes SELECT * FROM prepare_table ORDER BY key, value; - key | value + key | value --------------------------------------------------------------------- - 0 | - 0 | - 0 | - 0 | - 0 | - 0 | + 0 | + 0 | + 0 | + 0 | + 0 | + 0 | (6 rows) -- Testing parameters + function evaluation @@ -870,9 +870,9 @@ CREATE TABLE prepare_func_table ( value3 timestamptz DEFAULT now() ); SELECT create_distributed_table('prepare_func_table', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- test function evaluation with parameters in an expression @@ -886,7 +886,7 @@ EXECUTE prepared_function_evaluation_insert(4); EXECUTE prepared_function_evaluation_insert(5); EXECUTE prepared_function_evaluation_insert(6); SELECT key, value1 FROM prepare_func_table ORDER BY key; - key | value1 + key | value1 --------------------------------------------------------------------- 2 | 0 3 | 0 @@ -907,7 +907,7 @@ EXECUTE wrapped_parameter_evaluation('key', ARRAY['value']); EXECUTE wrapped_parameter_evaluation('key', ARRAY['value']); EXECUTE wrapped_parameter_evaluation('key', ARRAY['value']); SELECT key, value2 FROM prepare_func_table; - key | value2 + key | value2 --------------------------------------------------------------------- key | value key | value @@ -924,9 +924,9 @@ CREATE TABLE text_partition_column_table ( value int ); SELECT create_distributed_table('text_partition_column_table', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) PREPARE prepared_relabel_insert(varchar) AS @@ -938,7 +938,7 @@ EXECUTE prepared_relabel_insert('test'); EXECUTE prepared_relabel_insert('test'); EXECUTE prepared_relabel_insert('test'); SELECT key, value FROM text_partition_column_table ORDER BY key; - key | value + key | value --------------------------------------------------------------------- test | 1 test | 1 @@ -954,7 +954,7 @@ CREATE DOMAIN test_key AS text CHECK(VALUE ~ '^test-\d$'); SELECT run_command_on_workers($$ CREATE DOMAIN test_key AS text CHECK(VALUE ~ '^test-\d$') $$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"CREATE DOMAIN") (localhost,57638,t,"CREATE DOMAIN") @@ -965,9 +965,9 @@ CREATE TABLE domain_partition_column_table ( value int ); SELECT create_distributed_table('domain_partition_column_table', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) PREPARE prepared_coercion_to_domain_insert(text) AS @@ -979,7 +979,7 @@ EXECUTE prepared_coercion_to_domain_insert('test-4'); EXECUTE prepared_coercion_to_domain_insert('test-5'); EXECUTE prepared_coercion_to_domain_insert('test-6'); SELECT key, value FROM domain_partition_column_table ORDER BY key; - key | value + key | value --------------------------------------------------------------------- test-1 | 1 test-2 | 1 @@ -1001,9 +1001,9 @@ CREATE TABLE http_request ( response_time_msec INT ); SELECT create_distributed_table('http_request', 'site_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) PREPARE FOO AS INSERT INTO http_request ( @@ -1020,7 +1020,7 @@ EXECUTE foo; EXECUTE foo; EXECUTE foo; SELECT count(distinct ingest_time) FROM http_request WHERE site_id = 1; - count + count --------------------------------------------------------------------- 6 (1 row) @@ -1044,9 +1044,9 @@ CREATE TABLE test_table (test_id integer NOT NULL, data text); SET citus.shard_count TO 2; SET citus.shard_replication_factor TO 2; SELECT create_distributed_table('test_table', 'test_id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- avoid 9.6+ only context messages @@ -1054,7 +1054,7 @@ SELECT create_distributed_table('test_table', 'test_id', 'hash'); --plain statement, needs planning SELECT count(*) FROM test_table HAVING COUNT(*) = immutable_bleat('replanning'); NOTICE: replanning - count + count --------------------------------------------------------------------- (0 rows) @@ -1062,12 +1062,12 @@ NOTICE: replanning PREPARE countsome AS SELECT count(*) FROM test_table HAVING COUNT(*) = immutable_bleat('replanning'); EXECUTE countsome; -- should indicate planning NOTICE: replanning - count + count --------------------------------------------------------------------- (0 rows) EXECUTE countsome; -- no replanning - count + count --------------------------------------------------------------------- (0 rows) @@ -1078,12 +1078,12 @@ WHERE shardid IN ( AND nodeport = :worker_1_port; EXECUTE countsome; -- should indicate replanning NOTICE: replanning - count + count --------------------------------------------------------------------- (0 rows) EXECUTE countsome; -- no replanning - count + count --------------------------------------------------------------------- (0 rows) @@ -1093,20 +1093,20 @@ FROM pg_dist_shard_placement WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_table'::regclass) AND nodeport = :worker_1_port; - master_copy_shard_placement + master_copy_shard_placement --------------------------------------------------------------------- - - + + (2 rows) EXECUTE countsome; -- should indicate replanning NOTICE: replanning - count + count --------------------------------------------------------------------- (0 rows) EXECUTE countsome; -- no replanning - count + count --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/multi_prune_shard_list.out b/src/test/regress/expected/multi_prune_shard_list.out index 20ebd7cfe..02d1a72e9 100644 --- a/src/test/regress/expected/multi_prune_shard_list.out +++ b/src/test/regress/expected/multi_prune_shard_list.out @@ -35,63 +35,63 @@ CREATE FUNCTION print_sorted_shard_intervals(regclass) CREATE TABLE pruning ( species text, last_pruned date, plant_id integer ); SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('pruning', 'species', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- with no values, expect all shards SELECT prune_using_no_values('pruning'); - prune_using_no_values + prune_using_no_values --------------------------------------------------------------------- {800000,800001,800002,800003} (1 row) -- with a single value, expect a single shard SELECT prune_using_single_value('pruning', 'tomato'); - prune_using_single_value + prune_using_single_value --------------------------------------------------------------------- {800002} (1 row) -- null values should result in no pruning SELECT prune_using_single_value('pruning', NULL); - prune_using_single_value + prune_using_single_value --------------------------------------------------------------------- {800000,800001,800002,800003} (1 row) -- build an OR clause and expect more than one sahrd SELECT prune_using_either_value('pruning', 'tomato', 'petunia'); - prune_using_either_value + prune_using_either_value --------------------------------------------------------------------- {800002,800001} (1 row) -- an AND clause with values on different shards returns no shards SELECT prune_using_both_values('pruning', 'tomato', 'petunia'); - prune_using_both_values + prune_using_both_values --------------------------------------------------------------------- {} (1 row) -- even if both values are on the same shard, a value can't be equal to two others SELECT prune_using_both_values('pruning', 'tomato', 'rose'); - prune_using_both_values + prune_using_both_values --------------------------------------------------------------------- {} (1 row) -- unit test of the equality expression generation code SELECT debug_equality_expression('pruning'); - debug_equality_expression + debug_equality_expression --------------------------------------------------------------------- {OPEXPR :opno 98 :opfuncid 67 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 100 :args ({VAR :varno 1 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 100 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} {CONST :consttype 25 :consttypmod -1 :constcollid 100 :constlen -1 :constbyval false :constisnull true :location -1 :constvalue <>}) :location -1} (1 row) -- print the initial ordering of shard intervals SELECT print_sorted_shard_intervals('pruning'); - print_sorted_shard_intervals + print_sorted_shard_intervals --------------------------------------------------------------------- {800000,800001,800002,800003} (1 row) @@ -105,32 +105,32 @@ UPDATE pg_dist_shard set shardminvalue = -1073741824 WHERE shardid = 800001; -- create range distributed table observe shard pruning CREATE TABLE pruning_range ( species text, last_pruned date, plant_id integer ); SELECT create_distributed_table('pruning_range', 'species', 'range'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- create worker shards SELECT master_create_empty_shard('pruning_range'); - master_create_empty_shard + master_create_empty_shard --------------------------------------------------------------------- 800004 (1 row) SELECT master_create_empty_shard('pruning_range'); - master_create_empty_shard + master_create_empty_shard --------------------------------------------------------------------- 800005 (1 row) SELECT master_create_empty_shard('pruning_range'); - master_create_empty_shard + master_create_empty_shard --------------------------------------------------------------------- 800006 (1 row) SELECT master_create_empty_shard('pruning_range'); - master_create_empty_shard + master_create_empty_shard --------------------------------------------------------------------- 800007 (1 row) @@ -142,7 +142,7 @@ UPDATE pg_dist_shard SET shardminvalue = 'e', shardmaxvalue = 'f' WHERE shardid UPDATE pg_dist_shard SET shardminvalue = 'g', shardmaxvalue = 'h' WHERE shardid = 800007; -- print the ordering of shard intervals with range partitioning as well SELECT print_sorted_shard_intervals('pruning_range'); - print_sorted_shard_intervals + print_sorted_shard_intervals --------------------------------------------------------------------- {800004,800005,800006,800007} (1 row) @@ -150,7 +150,7 @@ SELECT print_sorted_shard_intervals('pruning_range'); -- update only min value for one shard UPDATE pg_dist_shard set shardminvalue = NULL WHERE shardid = 800005; SELECT print_sorted_shard_intervals('pruning_range'); - print_sorted_shard_intervals + print_sorted_shard_intervals --------------------------------------------------------------------- {800004,800006,800007,800005} (1 row) @@ -158,7 +158,7 @@ SELECT print_sorted_shard_intervals('pruning_range'); -- now lets have one more shard without min/max values UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardid = 800006; SELECT print_sorted_shard_intervals('pruning_range'); - print_sorted_shard_intervals + print_sorted_shard_intervals --------------------------------------------------------------------- {800004,800007,800005,800006} (1 row) @@ -166,7 +166,7 @@ SELECT print_sorted_shard_intervals('pruning_range'); -- now lets have one more shard without min/max values UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardid = 800004; SELECT print_sorted_shard_intervals('pruning_range'); - print_sorted_shard_intervals + print_sorted_shard_intervals --------------------------------------------------------------------- {800007,800004,800005,800006} (1 row) @@ -174,7 +174,7 @@ SELECT print_sorted_shard_intervals('pruning_range'); -- all shard placements are uninitialized UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardid = 800007; SELECT print_sorted_shard_intervals('pruning_range'); - print_sorted_shard_intervals + print_sorted_shard_intervals --------------------------------------------------------------------- {800004,800005,800006,800007} (1 row) @@ -187,23 +187,23 @@ CREATE TABLE coerce_hash ( value text NOT NULL ); SELECT create_distributed_table('coerce_hash', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO coerce_hash VALUES (1, 'test value'); -- All three of the following should return the same results... -- SELECT with same type as partition column SELECT * FROM coerce_hash WHERE id = 1::bigint; - id | value + id | value --------------------------------------------------------------------- 1 | test value (1 row) -- SELECT with similar type to partition column SELECT * FROM coerce_hash WHERE id = 1; - id | value + id | value --------------------------------------------------------------------- 1 | test value (1 row) @@ -216,13 +216,13 @@ SELECT * FROM coerce_hash WHERE id = 1; -- before the underlying issue was addressed. It looks like a boring -- test now, but if the old behavior is restored, it should crash again. SELECT * FROM coerce_hash WHERE id = 1.0; - id | value + id | value --------------------------------------------------------------------- 1 | test value (1 row) SELECT * FROM coerce_hash WHERE id = 1.0::numeric; - id | value + id | value --------------------------------------------------------------------- 1 | test value (1 row) diff --git a/src/test/regress/expected/multi_query_directory_cleanup.out b/src/test/regress/expected/multi_query_directory_cleanup.out index 70259e929..5ce27e966 100644 --- a/src/test/regress/expected/multi_query_directory_cleanup.out +++ b/src/test/regress/expected/multi_query_directory_cleanup.out @@ -17,7 +17,7 @@ with silence as ( ) select count(*) * 0 zero from silence; - zero + zero --------------------------------------------------------------------- 0 (1 row) @@ -27,61 +27,61 @@ BEGIN; -- here so that the regression output becomes independent of the -- number of jobs executed prior to running this test. SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) SELECT pg_ls_dir('base/pgsql_job_cache'); - pg_ls_dir + pg_ls_dir --------------------------------------------------------------------- (0 rows) COMMIT; SELECT pg_ls_dir('base/pgsql_job_cache'); - pg_ls_dir + pg_ls_dir --------------------------------------------------------------------- (0 rows) BEGIN; SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) SELECT pg_ls_dir('base/pgsql_job_cache'); - pg_ls_dir + pg_ls_dir --------------------------------------------------------------------- (0 rows) ROLLBACK; SELECT pg_ls_dir('base/pgsql_job_cache'); - pg_ls_dir + pg_ls_dir --------------------------------------------------------------------- (0 rows) @@ -91,146 +91,146 @@ SELECT pg_ls_dir('base/pgsql_job_cache'); BEGIN; DECLARE c_00 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_00; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_01 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_01; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_02 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_02; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_03 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_03; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_04 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_04; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_05 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_05; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_06 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_06; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_07 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_07; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_08 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_08; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_09 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_09; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_10 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_10; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_11 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_11; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_12 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_12; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_13 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_13; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_14 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_14; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_15 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_15; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_16 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_16; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_17 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_17; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_18 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_18; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_19 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_19; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) SELECT * FROM pg_ls_dir('base/pgsql_job_cache') f ORDER BY f; - f + f --------------------------------------------------------------------- (0 rows) @@ -239,13 +239,13 @@ CLOSE c_00; CLOSE c_16; CLOSE c_19; SELECT * FROM pg_ls_dir('base/pgsql_job_cache') f ORDER BY f; - f + f --------------------------------------------------------------------- (0 rows) ROLLBACK; SELECT pg_ls_dir('base/pgsql_job_cache'); - pg_ls_dir + pg_ls_dir --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/multi_query_directory_cleanup_0.out b/src/test/regress/expected/multi_query_directory_cleanup_0.out index 1af0a6514..37fbcc364 100644 --- a/src/test/regress/expected/multi_query_directory_cleanup_0.out +++ b/src/test/regress/expected/multi_query_directory_cleanup_0.out @@ -17,7 +17,7 @@ with silence as ( ) select count(*) * 0 zero from silence; - zero + zero --------------------------------------------------------------------- 0 (1 row) @@ -27,61 +27,61 @@ BEGIN; -- here so that the regression output becomes independent of the -- number of jobs executed prior to running this test. SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) SELECT pg_ls_dir('base/pgsql_job_cache'); - pg_ls_dir + pg_ls_dir --------------------------------------------------------------------- (0 rows) COMMIT; SELECT pg_ls_dir('base/pgsql_job_cache'); - pg_ls_dir + pg_ls_dir --------------------------------------------------------------------- (0 rows) BEGIN; SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) SELECT pg_ls_dir('base/pgsql_job_cache'); - pg_ls_dir + pg_ls_dir --------------------------------------------------------------------- (0 rows) ROLLBACK; SELECT pg_ls_dir('base/pgsql_job_cache'); - pg_ls_dir + pg_ls_dir --------------------------------------------------------------------- (0 rows) @@ -91,146 +91,146 @@ SELECT pg_ls_dir('base/pgsql_job_cache'); BEGIN; DECLARE c_00 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_00; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_01 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_01; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_02 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_02; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_03 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_03; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_04 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_04; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_05 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_05; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_06 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_06; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_07 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_07; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_08 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_08; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_09 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_09; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_10 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_10; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_11 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_11; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_12 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_12; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_13 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_13; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_14 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_14; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_15 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_15; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_16 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_16; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_17 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_17; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_18 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_18; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) DECLARE c_19 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_19; - revenue + revenue --------------------------------------------------------------------- 22770844.7654 (1 row) SELECT * FROM pg_ls_dir('base/pgsql_job_cache') f ORDER BY f; - f + f --------------------------------------------------------------------- master_job_0007 master_job_0008 @@ -259,7 +259,7 @@ CLOSE c_00; CLOSE c_16; CLOSE c_19; SELECT * FROM pg_ls_dir('base/pgsql_job_cache') f ORDER BY f; - f + f --------------------------------------------------------------------- master_job_0008 master_job_0009 @@ -282,7 +282,7 @@ SELECT * FROM pg_ls_dir('base/pgsql_job_cache') f ORDER BY f; ROLLBACK; SELECT pg_ls_dir('base/pgsql_job_cache'); - pg_ls_dir + pg_ls_dir --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/multi_read_from_secondaries.out b/src/test/regress/expected/multi_read_from_secondaries.out index 6df549840..40d879aa8 100644 --- a/src/test/regress/expected/multi_read_from_secondaries.out +++ b/src/test/regress/expected/multi_read_from_secondaries.out @@ -8,15 +8,15 @@ ERROR: writing to worker nodes is not currently allowed DETAIL: citus.use_secondary_nodes is set to 'always' \c "dbname=regression options='-c\ citus.use_secondary_nodes=never'" SELECT create_distributed_table('dest_table', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('source_table', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO dest_table (a, b) VALUES (1, 1); @@ -24,7 +24,7 @@ INSERT INTO dest_table (a, b) VALUES (2, 1); INSERT INTO source_table (a, b) VALUES (10, 10); -- simluate actually having secondary nodes SELECT nodeid, groupid, nodename, nodeport, noderack, isactive, noderole, nodecluster FROM pg_dist_node; - nodeid | groupid | nodename | nodeport | noderack | isactive | noderole | nodecluster + nodeid | groupid | nodename | nodeport | noderack | isactive | noderole | nodecluster --------------------------------------------------------------------- 1 | 1 | localhost | 57637 | default | t | primary | default 2 | 2 | localhost | 57638 | default | t | primary | default @@ -38,14 +38,14 @@ ERROR: writing to worker nodes is not currently allowed DETAIL: citus.use_secondary_nodes is set to 'always' -- router selects are allowed SELECT a FROM dest_table WHERE a = 1 ORDER BY 1; - a + a --------------------------------------------------------------------- 1 (1 row) -- real-time selects are also allowed SELECT a FROM dest_table ORDER BY 1; - a + a --------------------------------------------------------------------- 1 2 @@ -58,19 +58,19 @@ SELECT FROM ( WITH cte AS ( - SELECT - DISTINCT dest_table.a - FROM - dest_table, source_table - WHERE - source_table.a = dest_table.a AND + SELECT + DISTINCT dest_table.a + FROM + dest_table, source_table + WHERE + source_table.a = dest_table.a AND dest_table.b IN (1,2,3,4) ) SELECT * FROM cte ORDER BY 1 DESC LIMIT 5 ) as foo ORDER BY 1; DEBUG: generating subplan 4_1 for CTE cte: SELECT DISTINCT dest_table.a FROM public.dest_table, public.source_table WHERE ((source_table.a OPERATOR(pg_catalog.=) dest_table.a) AND (dest_table.b OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: generating subplan 4_2 for subquery SELECT a FROM (SELECT intermediate_result.a FROM read_intermediate_result('4_1'::text, 'binary'::citus_copy_format) intermediate_result(a integer)) cte ORDER BY a DESC LIMIT 5 DEBUG: Plan 4 query after replacing subqueries and CTEs: SELECT a FROM (SELECT intermediate_result.a FROM read_intermediate_result('4_2'::text, 'binary'::citus_copy_format) intermediate_result(a integer)) foo ORDER BY a - a + a --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/multi_real_time_transaction.out b/src/test/regress/expected/multi_real_time_transaction.out index 3c25cb7db..795ce8464 100644 --- a/src/test/regress/expected/multi_real_time_transaction.out +++ b/src/test/regress/expected/multi_real_time_transaction.out @@ -7,39 +7,39 @@ SET search_path = 'multi_real_time_transaction'; SET citus.shard_replication_factor to 1; CREATE TABLE test_table(id int, col_1 int, col_2 text); SELECT create_distributed_table('test_table','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) \COPY test_table FROM stdin delimiter ','; CREATE TABLE co_test_table(id int, col_1 int, col_2 text); SELECT create_distributed_table('co_test_table','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) \COPY co_test_table FROM stdin delimiter ','; CREATE TABLE ref_test_table(id int, col_1 int, col_2 text); SELECT create_reference_table('ref_test_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) \COPY ref_test_table FROM stdin delimiter ','; -- Test with select and router insert BEGIN; SELECT COUNT(*) FROM test_table; - count + count --------------------------------------------------------------------- 6 (1 row) INSERT INTO test_table VALUES(7,8,'gg'); SELECT COUNT(*) FROM test_table; - count + count --------------------------------------------------------------------- 7 (1 row) @@ -48,14 +48,14 @@ ROLLBACK; -- Test with select and multi-row insert BEGIN; SELECT COUNT(*) FROM test_table; - count + count --------------------------------------------------------------------- 6 (1 row) INSERT INTO test_table VALUES (7,8,'gg'),(8,9,'hh'),(9,10,'ii'); SELECT COUNT(*) FROM test_table; - count + count --------------------------------------------------------------------- 9 (1 row) @@ -64,14 +64,14 @@ ROLLBACK; -- Test with INSERT .. SELECT BEGIN; SELECT COUNT(*) FROM test_table; - count + count --------------------------------------------------------------------- 6 (1 row) INSERT INTO test_table SELECT * FROM co_test_table; SELECT COUNT(*) FROM test_table; - count + count --------------------------------------------------------------------- 12 (1 row) @@ -80,14 +80,14 @@ ROLLBACK; -- Test with COPY BEGIN; SELECT COUNT(*) FROM test_table; - count + count --------------------------------------------------------------------- 6 (1 row) \COPY test_table FROM stdin delimiter ','; SELECT COUNT(*) FROM test_table; - count + count --------------------------------------------------------------------- 9 (1 row) @@ -96,7 +96,7 @@ ROLLBACK; -- Test with router update BEGIN; SELECT SUM(col_1) FROM test_table; - sum + sum --------------------------------------------------------------------- 27 (1 row) @@ -104,7 +104,7 @@ SELECT SUM(col_1) FROM test_table; UPDATE test_table SET col_1 = 0 WHERE id = 2; DELETE FROM test_table WHERE id = 3; SELECT SUM(col_1) FROM test_table; - sum + sum --------------------------------------------------------------------- 20 (1 row) @@ -113,14 +113,14 @@ ROLLBACK; -- Test with multi-shard update BEGIN; SELECT SUM(col_1) FROM test_table; - sum + sum --------------------------------------------------------------------- 27 (1 row) UPDATE test_table SET col_1 = 5; SELECT SUM(col_1) FROM test_table; - sum + sum --------------------------------------------------------------------- 30 (1 row) @@ -129,7 +129,7 @@ ROLLBACK; -- Test with subqueries BEGIN; SELECT SUM(col_1) FROM test_table; - sum + sum --------------------------------------------------------------------- 27 (1 row) @@ -142,7 +142,7 @@ WHERE test_table.col_1 IN (SELECT co_test_table.col_1 FROM co_test_table WHERE co_test_table.id = 1) AND test_table.id = 1; SELECT SUM(col_1) FROM test_table; - sum + sum --------------------------------------------------------------------- 29 (1 row) @@ -160,14 +160,14 @@ INSERT INTO partitioning_test VALUES (2, '2010-07-07'); SELECT create_distributed_table('partitioning_test', 'id'); NOTICE: Copying data from local table... NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) BEGIN; SELECT COUNT(*) FROM partitioning_test; - count + count --------------------------------------------------------------------- 2 (1 row) @@ -175,7 +175,7 @@ SELECT COUNT(*) FROM partitioning_test; INSERT INTO partitioning_test_2009 VALUES (3, '2009-09-09'); INSERT INTO partitioning_test_2010 VALUES (4, '2010-03-03'); SELECT COUNT(*) FROM partitioning_test; - count + count --------------------------------------------------------------------- 4 (1 row) @@ -186,14 +186,14 @@ DROP TABLE partitioning_test; BEGIN; CREATE TABLE test_table_inn(id int, num_1 int); SELECT create_distributed_table('test_table_inn','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO test_table_inn VALUES(1,3),(4,5),(6,7); SELECT COUNT(*) FROM test_table_inn; - count + count --------------------------------------------------------------------- 3 (1 row) @@ -203,7 +203,7 @@ COMMIT; -- Test with utility functions BEGIN; SELECT COUNT(*) FROM test_table; - count + count --------------------------------------------------------------------- 6 (1 row) @@ -211,7 +211,7 @@ SELECT COUNT(*) FROM test_table; CREATE INDEX tt_ind_1 ON test_table(col_1); ALTER TABLE test_table ADD CONSTRAINT num_check CHECK (col_1 < 50); SELECT COUNT(*) FROM test_table; - count + count --------------------------------------------------------------------- 6 (1 row) @@ -219,7 +219,7 @@ SELECT COUNT(*) FROM test_table; ROLLBACK; -- We don't get a distributed transaction id outside a transaction block SELECT (get_current_transaction_id()).transaction_number > 0 FROM test_table LIMIT 1; - ?column? + ?column? --------------------------------------------------------------------- f (1 row) @@ -227,7 +227,7 @@ SELECT (get_current_transaction_id()).transaction_number > 0 FROM test_table LIM -- We should get a distributed transaction id inside a transaction block BEGIN; SELECT (get_current_transaction_id()).transaction_number > 0 FROM test_table LIMIT 1; - ?column? + ?column? --------------------------------------------------------------------- t (1 row) @@ -244,22 +244,22 @@ BEGIN END; $BODY$ LANGUAGE plpgsql; $$); - run_command_on_master_and_workers + run_command_on_master_and_workers --------------------------------------------------------------------- - + (1 row) -- SELECT should be rolled back because we send BEGIN BEGIN; SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 6 (1 row) -- Sneakily insert directly into shards SELECT insert_row_test(pg_typeof(test_table)::name) FROM test_table; - insert_row_test + insert_row_test --------------------------------------------------------------------- t t @@ -270,14 +270,14 @@ SELECT insert_row_test(pg_typeof(test_table)::name) FROM test_table; (6 rows) SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 12 (1 row) ABORT; SELECT count(*) FROM test_table; - count + count --------------------------------------------------------------------- 6 (1 row) @@ -288,7 +288,7 @@ ALTER TABLE co_test_table ADD CONSTRAINT f_key_ctt FOREIGN KEY (id) REFERENCES t BEGIN; DELETE FROM test_table where id = 1 or id = 3; SELECT * FROM co_test_table; - id | col_1 | col_2 + id | col_1 | col_2 --------------------------------------------------------------------- 2 | 30 | 'bb10' (1 row) @@ -299,7 +299,7 @@ ROLLBACK; SET client_min_messages TO ERROR; alter system set deadlock_timeout TO '250ms'; SELECT pg_reload_conf(); - pg_reload_conf + pg_reload_conf --------------------------------------------------------------------- t (1 row) @@ -348,7 +348,7 @@ ROLLBACK; -- gonna need a non-superuser as we'll use RLS to test GUC propagation CREATE USER rls_user; SELECT run_command_on_workers('CREATE USER rls_user'); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") @@ -357,14 +357,14 @@ SELECT run_command_on_workers('CREATE USER rls_user'); GRANT ALL ON SCHEMA multi_real_time_transaction TO rls_user; GRANT ALL ON ALL TABLES IN SCHEMA multi_real_time_transaction TO rls_user; SELECT run_command_on_workers('GRANT ALL ON SCHEMA multi_real_time_transaction TO rls_user'); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,GRANT) (localhost,57638,t,GRANT) (2 rows) SELECT run_command_on_workers('GRANT ALL ON ALL TABLES IN SCHEMA multi_real_time_transaction TO rls_user'); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,GRANT) (localhost,57638,t,GRANT) @@ -381,7 +381,7 @@ SET ROLE rls_user; SET search_path = 'multi_real_time_transaction'; -- shouldn't see all rows because of RLS SELECT COUNT(*) FROM test_table; - count + count --------------------------------------------------------------------- 4 (1 row) @@ -390,7 +390,7 @@ BEGIN; -- without enabling SET LOCAL prop, still won't work SET LOCAL app.show_rows TO TRUE; SELECT COUNT(*) FROM test_table; - count + count --------------------------------------------------------------------- 4 (1 row) @@ -399,7 +399,7 @@ SET LOCAL citus.propagate_set_commands TO 'local'; -- now we should be good to go SET LOCAL app.show_rows TO TRUE; SELECT COUNT(*) FROM test_table; - count + count --------------------------------------------------------------------- 6 (1 row) @@ -407,14 +407,14 @@ SELECT COUNT(*) FROM test_table; SAVEPOINT disable_rls; SET LOCAL app.show_rows TO FALSE; SELECT COUNT(*) FROM test_table; - count + count --------------------------------------------------------------------- 4 (1 row) ROLLBACK TO SAVEPOINT disable_rls; SELECT COUNT(*) FROM test_table; - count + count --------------------------------------------------------------------- 6 (1 row) @@ -423,7 +423,7 @@ SAVEPOINT disable_rls_for_real; SET LOCAL app.show_rows TO FALSE; RELEASE SAVEPOINT disable_rls_for_real; SELECT COUNT(*) FROM test_table; - count + count --------------------------------------------------------------------- 4 (1 row) @@ -436,7 +436,7 @@ RESET ROLE; SET ROLE rls_user; SET search_path = 'multi_real_time_transaction'; SELECT * FROM co_test_table ORDER BY id, col_1; - id | col_1 | col_2 + id | col_1 | col_2 --------------------------------------------------------------------- 1 | 2 | 'cc2' 1 | 20 | 'aa10' @@ -450,14 +450,14 @@ SELECT * FROM co_test_table ORDER BY id, col_1; SET search_path = 'multi_real_time_transaction'; -- shard xxxxx contains data from tenant id 1 SELECT * FROM co_test_table_1610004 ORDER BY id, col_1; - id | col_1 | col_2 + id | col_1 | col_2 --------------------------------------------------------------------- 1 | 2 | 'cc2' 1 | 20 | 'aa10' (2 rows) SELECT * FROM co_test_table_1610006 ORDER BY id, col_1; - id | col_1 | col_2 + id | col_1 | col_2 --------------------------------------------------------------------- (0 rows) @@ -465,7 +465,7 @@ SELECT * FROM co_test_table_1610006 ORDER BY id, col_1; SET search_path = 'multi_real_time_transaction'; -- shard xxxxx contains data from tenant id 3 SELECT * FROM co_test_table_1610005 ORDER BY id, col_1; - id | col_1 | col_2 + id | col_1 | col_2 --------------------------------------------------------------------- 3 | 4 | 'cc1' 3 | 5 | 'cc2' @@ -474,7 +474,7 @@ SELECT * FROM co_test_table_1610005 ORDER BY id, col_1; -- shard xxxxx contains data from tenant id 2 SELECT * FROM co_test_table_1610007 ORDER BY id, col_1; - id | col_1 | col_2 + id | col_1 | col_2 --------------------------------------------------------------------- 2 | 30 | 'bb10' (1 row) @@ -488,7 +488,7 @@ CREATE POLICY filter_by_tenant_id ON co_test_table TO PUBLIC SET citus.enable_ddl_propagation to on; SELECT run_command_on_shards('co_test_table', $cmd$CREATE POLICY filter_by_tenant_id ON %s TO PUBLIC USING (id = ANY(string_to_array(current_setting('app.tenant_id'), ',')::int[]));$cmd$); - run_command_on_shards + run_command_on_shards --------------------------------------------------------------------- (1610004,t,"CREATE POLICY") (1610005,t,"CREATE POLICY") @@ -501,7 +501,7 @@ SET citus.enable_ddl_propagation to off; ALTER TABLE co_test_table ENABLE ROW LEVEL SECURITY; SET citus.enable_ddl_propagation to on; SELECT run_command_on_shards('co_test_table','ALTER TABLE %s ENABLE ROW LEVEL SECURITY;'); - run_command_on_shards + run_command_on_shards --------------------------------------------------------------------- (1610004,t,"ALTER TABLE") (1610005,t,"ALTER TABLE") @@ -517,7 +517,7 @@ SET LOCAL citus.propagate_set_commands TO 'local'; -- Only tenant id 1 will be fetched, and so on. SET LOCAL app.tenant_id TO 1; SELECT * FROM co_test_table ORDER BY id, col_1; - id | col_1 | col_2 + id | col_1 | col_2 --------------------------------------------------------------------- 1 | 2 | 'cc2' 1 | 20 | 'aa10' @@ -526,7 +526,7 @@ SELECT * FROM co_test_table ORDER BY id, col_1; SAVEPOINT disable_rls; SET LOCAL app.tenant_id TO 3; SELECT * FROM co_test_table ORDER BY id, col_1; - id | col_1 | col_2 + id | col_1 | col_2 --------------------------------------------------------------------- 3 | 4 | 'cc1' 3 | 5 | 'cc2' @@ -535,7 +535,7 @@ SELECT * FROM co_test_table ORDER BY id, col_1; ROLLBACK TO SAVEPOINT disable_rls; SELECT * FROM co_test_table ORDER BY id, col_1; - id | col_1 | col_2 + id | col_1 | col_2 --------------------------------------------------------------------- 1 | 2 | 'cc2' 1 | 20 | 'aa10' @@ -545,7 +545,7 @@ SAVEPOINT disable_rls_for_real; SET LOCAL app.tenant_id TO 3; RELEASE SAVEPOINT disable_rls_for_real; SELECT * FROM co_test_table ORDER BY id, col_1; - id | col_1 | col_2 + id | col_1 | col_2 --------------------------------------------------------------------- 3 | 4 | 'cc1' 3 | 5 | 'cc2' @@ -557,7 +557,7 @@ RELEASE SAVEPOINT disable_rls; -- via RLS policies that use GUCs. SET LOCAL app.tenant_id TO '1,3'; SELECT * FROM co_test_table ORDER BY id, col_1; - id | col_1 | col_2 + id | col_1 | col_2 --------------------------------------------------------------------- 1 | 2 | 'cc2' 1 | 20 | 'aa10' @@ -573,7 +573,7 @@ SET citus.enable_ddl_propagation to off; ALTER TABLE co_test_table DISABLE ROW LEVEL SECURITY; SET citus.enable_ddl_propagation to on; SELECT run_command_on_shards('co_test_table','ALTER TABLE %s DISABLE ROW LEVEL SECURITY;'); - run_command_on_shards + run_command_on_shards --------------------------------------------------------------------- (1610004,t,"ALTER TABLE") (1610005,t,"ALTER TABLE") @@ -585,7 +585,7 @@ SET citus.enable_ddl_propagation to off; DROP POLICY filter_by_tenant_id ON co_test_table; SET citus.enable_ddl_propagation to on; SELECT run_command_on_shards('co_test_table', 'DROP POLICY filter_by_tenant_id ON %s;'); - run_command_on_shards + run_command_on_shards --------------------------------------------------------------------- (1610004,t,"DROP POLICY") (1610005,t,"DROP POLICY") @@ -598,21 +598,21 @@ SELECT run_command_on_shards('co_test_table', 'DROP POLICY filter_by_tenant_id O BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT id, pg_advisory_lock(15) FROM test_table ORDER BY 1 DESC; - id | pg_advisory_lock + id | pg_advisory_lock --------------------------------------------------------------------- - 6 | - 5 | - 4 | - 3 | - 2 | - 1 | + 6 | + 5 | + 4 | + 3 | + 2 | + 1 | (6 rows) ROLLBACK; SET client_min_messages TO DEFAULT; alter system set deadlock_timeout TO DEFAULT; SELECT pg_reload_conf(); - pg_reload_conf + pg_reload_conf --------------------------------------------------------------------- t (1 row) @@ -623,14 +623,14 @@ SET citus.select_opens_transaction_block TO off; -- we use a different advisory lock because previous tests -- still holds the advisory locks since the sessions are still active SELECT id, pg_advisory_xact_lock(16) FROM test_table ORDER BY id; - id | pg_advisory_xact_lock + id | pg_advisory_xact_lock --------------------------------------------------------------------- - 1 | - 2 | - 3 | - 4 | - 5 | - 6 | + 1 | + 2 | + 3 | + 4 | + 5 | + 6 | (6 rows) END; diff --git a/src/test/regress/expected/multi_reference_table.out b/src/test/regress/expected/multi_reference_table.out index 23f0c8f10..4a4f79b7f 100644 --- a/src/test/regress/expected/multi_reference_table.out +++ b/src/test/regress/expected/multi_reference_table.out @@ -5,9 +5,9 @@ INSERT INTO reference_table_test VALUES (1, 1.0, '1', '2016-12-01'); -- create the reference table SELECT create_reference_table('reference_table_test'); NOTICE: Copying data from local table... - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- see that partkey is NULL @@ -17,7 +17,7 @@ FROM pg_dist_partition WHERE logicalrelid = 'reference_table_test'::regclass; - partmethod | partkeyisnull | repmodel + partmethod | partkeyisnull | repmodel --------------------------------------------------------------------- n | t | t (1 row) @@ -29,7 +29,7 @@ FROM pg_dist_shard WHERE logicalrelid = 'reference_table_test'::regclass; - shardid | shardminvalueisnull | shardmaxvalueisnull + shardid | shardminvalueisnull | shardmaxvalueisnull --------------------------------------------------------------------- 1250000 | t | t (1 row) @@ -43,14 +43,14 @@ WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'reference_table_test'::regclass) GROUP BY shardid ORDER BY shardid; - shardid | all_placements_healthy | replicated_to_all + shardid | all_placements_healthy | replicated_to_all --------------------------------------------------------------------- 1250000 | t | t (1 row) -- check whether data was copied into distributed table SELECT * FROM reference_table_test; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) @@ -68,7 +68,7 @@ SELECT * FROM reference_table_test; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 @@ -83,7 +83,7 @@ FROM reference_table_test WHERE value_1 = 1; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) @@ -95,7 +95,7 @@ FROM reference_table_test ORDER BY 2 ASC LIMIT 3; - value_1 | value_2 + value_1 | value_2 --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -110,7 +110,7 @@ WHERE value_2 >= 4 ORDER BY 2 LIMIT 3; - value_1 | value_3 + value_1 | value_3 --------------------------------------------------------------------- 4 | 4 5 | 5 @@ -123,7 +123,7 @@ FROM ORDER BY 2 ASC LIMIT 2; - value_1 | ?column? + value_1 | ?column? --------------------------------------------------------------------- 1 | 15 2 | 30 @@ -135,7 +135,7 @@ FROM reference_table_test ORDER BY 2 ASC LIMIT 2 OFFSET 2; - value_1 | ?column? + value_1 | ?column? --------------------------------------------------------------------- 3 | 45 4 | 60 @@ -147,7 +147,7 @@ FROM reference_table_test WHERE value_2 = 2 OR value_2 = 3; - value_2 | value_4 + value_2 | value_4 --------------------------------------------------------------------- 2 | Fri Dec 02 00:00:00 2016 3 | Sat Dec 03 00:00:00 2016 @@ -159,7 +159,7 @@ FROM reference_table_test WHERE value_2 = 2 AND value_2 = 3; - value_2 | value_4 + value_2 | value_4 --------------------------------------------------------------------- (0 rows) @@ -169,7 +169,7 @@ FROM reference_table_test WHERE value_3 = '2' OR value_1 = 3; - value_2 | value_4 + value_2 | value_4 --------------------------------------------------------------------- 2 | Fri Dec 02 00:00:00 2016 3 | Sat Dec 03 00:00:00 2016 @@ -184,7 +184,7 @@ WHERE value_3 = '2' OR value_1 = 3 ) AND FALSE; - value_2 | value_4 + value_2 | value_4 --------------------------------------------------------------------- (0 rows) @@ -201,7 +201,7 @@ WHERE reference_table_test ) AND value_1 < 3; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 @@ -216,7 +216,7 @@ WHERE ( '1', '2' ); - value_4 + value_4 --------------------------------------------------------------------- Thu Dec 01 00:00:00 2016 Fri Dec 02 00:00:00 2016 @@ -231,7 +231,7 @@ WHERE ( '5', '2' ); - date_part + date_part --------------------------------------------------------------------- 2 5 @@ -243,7 +243,7 @@ FROM reference_table_test WHERE value_2 <= 2 AND value_2 >= 4; - value_4 + value_4 --------------------------------------------------------------------- (0 rows) @@ -253,7 +253,7 @@ FROM reference_table_test WHERE value_2 <= 20 AND value_2 >= 4; - value_4 + value_4 --------------------------------------------------------------------- Sun Dec 04 00:00:00 2016 Mon Dec 05 00:00:00 2016 @@ -265,7 +265,7 @@ FROM reference_table_test WHERE value_2 >= 5 AND value_2 <= random(); - value_4 + value_4 --------------------------------------------------------------------- (0 rows) @@ -275,7 +275,7 @@ FROM reference_table_test WHERE value_4 BETWEEN '2016-12-01' AND '2016-12-03'; - value_1 + value_1 --------------------------------------------------------------------- 1 2 @@ -288,7 +288,7 @@ FROM reference_table_test WHERE FALSE; - value_1 + value_1 --------------------------------------------------------------------- (0 rows) @@ -298,7 +298,7 @@ FROM reference_table_test WHERE int4eq(1, 2); - value_1 + value_1 --------------------------------------------------------------------- (0 rows) @@ -307,7 +307,7 @@ SELECT value_1 as id, value_2 * 15 as age FROM reference_table_test; - id | age + id | age --------------------------------------------------------------------- 1 | 15 2 | 30 @@ -322,7 +322,7 @@ SELECT * FROM some_data; - value_2 | value_4 + value_2 | value_4 --------------------------------------------------------------------- 3 | Sat Dec 03 00:00:00 2016 4 | Sun Dec 04 00:00:00 2016 @@ -332,7 +332,7 @@ FROM -- queries with CTEs are supported even if CTE is not referenced inside query WITH some_data AS ( SELECT value_2, value_4 FROM reference_table_test WHERE value_2 >=3) SELECT * FROM reference_table_test ORDER BY 1 LIMIT 1; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) @@ -344,7 +344,7 @@ FROM reference_table_test, position('om' in 'Thomas') WHERE value_1 = 1; - value_1 | value_2 | value_3 | value_4 | position + value_1 | value_2 | value_3 | value_4 | position --------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 | 3 (1 row) @@ -355,7 +355,7 @@ FROM reference_table_test, position('om' in 'Thomas') WHERE value_1 = 1 OR value_1 = 2; - value_1 | value_2 | value_3 | value_4 | position + value_1 | value_2 | value_3 | value_4 | position --------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 | 3 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 | 3 @@ -368,7 +368,7 @@ SELECT * FROM ( SELECT * FROM reference_table_test WHERE value_1 = 3 ) AS combination ORDER BY value_1; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 3 | 3 | 3 | Sat Dec 03 00:00:00 2016 @@ -380,7 +380,7 @@ SELECT * FROM ( SELECT * FROM reference_table_test WHERE value_1 = 3 ) AS combination ORDER BY value_1; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) @@ -391,7 +391,7 @@ SELECT * FROM ( SELECT * FROM reference_table_test WHERE value_1 = 3 ) AS combination ORDER BY value_1; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- (0 rows) @@ -410,7 +410,7 @@ HAVING SUM(value_2) > 3 ORDER BY 1; - value_4 | sum + value_4 | sum --------------------------------------------------------------------- Fri Dec 02 00:00:00 2016 | 4 Sat Dec 03 00:00:00 2016 | 6 @@ -427,7 +427,7 @@ FROM GROUP BY GROUPING sets ((value_4), (value_3)) ORDER BY 1, 2, 3; - value_4 | value_3 | sum + value_4 | value_3 | sum --------------------------------------------------------------------- Thu Dec 01 00:00:00 2016 | | 2 Fri Dec 02 00:00:00 2016 | | 4 @@ -448,7 +448,7 @@ FROM reference_table_test ORDER BY 1; - value_4 + value_4 --------------------------------------------------------------------- Thu Dec 01 00:00:00 2016 Fri Dec 02 00:00:00 2016 @@ -462,7 +462,7 @@ SELECT value_4, RANK() OVER (PARTITION BY value_1 ORDER BY value_4) FROM reference_table_test; - value_4 | rank + value_4 | rank --------------------------------------------------------------------- Thu Dec 01 00:00:00 2016 | 1 Thu Dec 01 00:00:00 2016 | 1 @@ -479,7 +479,7 @@ SELECT value_4, AVG(value_1) OVER (PARTITION BY value_4 ORDER BY value_4) FROM reference_table_test; - value_4 | avg + value_4 | avg --------------------------------------------------------------------- Thu Dec 01 00:00:00 2016 | 1.00000000000000000000 Thu Dec 01 00:00:00 2016 | 1.00000000000000000000 @@ -502,7 +502,7 @@ SELECT END) as c FROM reference_table_test; - c + c --------------------------------------------------------------------- 3 (1 row) @@ -523,7 +523,7 @@ SELECT value_1 ORDER BY 1; - value_1 | c + value_1 | c --------------------------------------------------------------------- 1 | 0 2 | 0 @@ -535,7 +535,7 @@ SELECT -- selects inside a transaction works fine as well BEGIN; SELECT * FROM reference_table_test; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 @@ -548,7 +548,7 @@ SELECT * FROM reference_table_test; (8 rows) SELECT * FROM reference_table_test WHERE value_1 = 1; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 @@ -563,13 +563,13 @@ DECLARE test_cursor CURSOR FOR WHERE value_1 = 1 OR value_1 = 2 ORDER BY value_1; FETCH test_cursor; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) FETCH ALL test_cursor; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 @@ -577,12 +577,12 @@ FETCH ALL test_cursor; (3 rows) FETCH test_cursor; -- fetch one row after the last - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- (0 rows) FETCH BACKWARD test_cursor; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 (1 row) @@ -597,16 +597,16 @@ CREATE TEMP TABLE temp_reference_test as -- first create two more tables CREATE TABLE reference_table_test_second (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test_second'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE reference_table_test_third (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test_third'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- ingest some data to both tables @@ -624,7 +624,7 @@ WHERE t1.value_2 = t2.value_2 ORDER BY 1; - value_1 + value_1 --------------------------------------------------------------------- 1 2 @@ -639,7 +639,7 @@ WHERE t1.value_2 = t3.value_2 ORDER BY 1; - value_1 + value_1 --------------------------------------------------------------------- 4 5 @@ -653,7 +653,7 @@ WHERE t2.value_2 = t3.value_2 ORDER BY 1; - value_1 + value_1 --------------------------------------------------------------------- (0 rows) @@ -666,7 +666,7 @@ WHERE t1.value_2 = t2.value_1 ORDER BY 1; - value_1 + value_1 --------------------------------------------------------------------- 1 2 @@ -681,7 +681,7 @@ WHERE t1.value_2 = t2.value_3::int ORDER BY 1; - value_1 + value_1 --------------------------------------------------------------------- 1 2 @@ -696,7 +696,7 @@ WHERE t1.value_2 = date_part('day', t2.value_4) ORDER BY 1; - value_1 + value_1 --------------------------------------------------------------------- 1 2 @@ -713,7 +713,7 @@ WHERE t1.value_2 = date_part('day', t2.value_4) AND t3.value_2 = t1.value_2 ORDER BY 1; - value_1 + value_1 --------------------------------------------------------------------- 3 (1 row) @@ -727,7 +727,7 @@ WHERE t1.value_1 = date_part('day', t2.value_4) AND t3.value_2 = t1.value_1 ORDER BY 1; - value_1 + value_1 --------------------------------------------------------------------- 3 (1 row) @@ -740,7 +740,7 @@ FROM JOIN reference_table_test_third t3 USING (value_1) ORDER BY 1; - value_1 + value_1 --------------------------------------------------------------------- 3 (1 row) @@ -753,7 +753,7 @@ FROM LEFT JOIN reference_table_test_third t3 USING (value_1) ORDER BY 1; - value_1 + value_1 --------------------------------------------------------------------- 1 2 @@ -769,18 +769,18 @@ FROM RIGHT JOIN reference_table_test_third t3 USING (value_1) ORDER BY 1; - value_1 + value_1 --------------------------------------------------------------------- 3 - + (2 rows) -- now, lets have some tests on UPSERTs and uniquness CREATE TABLE reference_table_test_fourth (value_1 int, value_2 float PRIMARY KEY, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test_fourth'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) \set VERBOSITY terse @@ -795,14 +795,14 @@ ERROR: null value in column "value_2" violates not-null constraint \set VERBOSITY default -- lets run some upserts INSERT INTO reference_table_test_fourth VALUES (1, 1.0, '1', '2016-12-01') ON CONFLICT DO NOTHING RETURNING *; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- (0 rows) INSERT INTO reference_table_test_fourth VALUES (1, 1.0, '10', '2016-12-01') ON CONFLICT (value_2) DO UPDATE SET value_3 = EXCLUDED.value_3, value_2 = EXCLUDED.value_2 RETURNING *; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- 1 | 1 | 10 | Thu Dec 01 00:00:00 2016 (1 row) @@ -811,7 +811,7 @@ INSERT INTO reference_table_test_fourth VALUES (1, 1.0, '10', '2016-12-01') ON C INSERT INTO reference_table_test_fourth VALUES (1, 1.0, '10', '2016-12-01') ON CONFLICT (value_2) DO UPDATE SET value_3 = EXCLUDED.value_3 || '+10', value_2 = EXCLUDED.value_2 + 10, value_1 = EXCLUDED.value_1 + 10, value_4 = '2016-12-10' RETURNING *; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- 11 | 11 | 10+10 | Sat Dec 10 00:00:00 2016 (1 row) @@ -825,7 +825,7 @@ WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'reference_table_test_fourth'::regclass) GROUP BY shardid ORDER BY shardid; - shardid | all_placements_healthy | replicated_to_all + shardid | all_placements_healthy | replicated_to_all --------------------------------------------------------------------- 1250003 | t | t (1 row) @@ -836,7 +836,7 @@ DELETE FROM WHERE value_1 = 1 RETURNING *; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 @@ -847,7 +847,7 @@ DELETE FROM WHERE value_4 = '2016-12-05' RETURNING *; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- 5 | 5 | 5 | Mon Dec 05 00:00:00 2016 (1 row) @@ -859,7 +859,7 @@ SET WHERE value_2 = 2 RETURNING *; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- 2 | 15 | 2 | Fri Dec 02 00:00:00 2016 2 | 15 | 2 | Fri Dec 02 00:00:00 2016 @@ -871,7 +871,7 @@ UPDATE SET value_2 = 15, value_1 = 45 RETURNING *; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- 45 | 15 | 2 | Fri Dec 02 00:00:00 2016 45 | 15 | 2 | Fri Dec 02 00:00:00 2016 @@ -883,7 +883,7 @@ RETURNING *; DELETE FROM reference_table_test RETURNING *; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- 45 | 15 | 2 | Fri Dec 02 00:00:00 2016 45 | 15 | 2 | Fri Dec 02 00:00:00 2016 @@ -895,9 +895,9 @@ RETURNING *; -- some tests with function evaluation and sequences CREATE TABLE reference_table_test_fifth (value_1 serial PRIMARY KEY, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test_fifth'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE SEQUENCE example_ref_value_seq; @@ -905,7 +905,7 @@ CREATE SEQUENCE example_ref_value_seq; INSERT INTO reference_table_test_fifth (value_2) VALUES (2) RETURNING value_1, value_2; - value_1 | value_2 + value_1 | value_2 --------------------------------------------------------------------- 1 | 2 (1 row) @@ -913,7 +913,7 @@ RETURNING value_1, value_2; INSERT INTO reference_table_test_fifth (value_2) VALUES (2) RETURNING value_1, value_2; - value_1 | value_2 + value_1 | value_2 --------------------------------------------------------------------- 2 | 2 (1 row) @@ -921,7 +921,7 @@ RETURNING value_1, value_2; INSERT INTO reference_table_test_fifth (value_2, value_3) VALUES (nextval('example_ref_value_seq'), nextval('example_ref_value_seq')::text) RETURNING value_1, value_2, value_3; - value_1 | value_2 | value_3 + value_1 | value_2 | value_3 --------------------------------------------------------------------- 3 | 1 | 2 (1 row) @@ -931,7 +931,7 @@ UPDATE WHERE value_1 = 1 RETURNING value_1, value_2, value_4 > '2000-01-01'; - value_1 | value_2 | ?column? + value_1 | value_2 | ?column? --------------------------------------------------------------------- 1 | 2 | t (1 row) @@ -959,12 +959,12 @@ INSERT INTO FROM reference_table_test RETURNING *; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- 1 | 1 | 1 | Fri Jan 01 00:00:00 2016 | 2 | 2 | Sat Jan 02 00:00:00 2016 - | | 3 | - | | | + | | 3 | + | | | (4 rows) INSERT INTO @@ -974,25 +974,25 @@ INSERT INTO FROM reference_table_test JOIN reference_table_test_second USING (value_1) RETURNING *; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - | 1 | | + | 1 | | (1 row) SET citus.shard_count TO 6; SET citus.shard_replication_factor TO 2; CREATE TABLE colocated_table_test (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_distributed_table('colocated_table_test', 'value_1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE colocated_table_test_2 (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_distributed_table('colocated_table_test_2', 'value_1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) DELETE FROM reference_table_test; @@ -1011,7 +1011,7 @@ FROM WHERE colocated_table_test.value_1 = reference_table_test.value_1; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ] - value_1 + value_1 --------------------------------------------------------------------- 1 2 @@ -1024,7 +1024,7 @@ FROM WHERE colocated_table_test.value_2 = reference_table_test.value_2; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ] - value_2 + value_2 --------------------------------------------------------------------- 1 2 @@ -1037,7 +1037,7 @@ FROM WHERE reference_table_test.value_1 = colocated_table_test.value_1; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ] - value_2 + value_2 --------------------------------------------------------------------- 1 2 @@ -1052,7 +1052,7 @@ WHERE colocated_table_test.value_2 = reference_table_test.value_2 ORDER BY colocated_table_test.value_2; LOG: join order: [ "colocated_table_test_2" ][ cartesian product reference join "reference_table_test" ][ dual partition join "colocated_table_test" ] - value_2 + value_2 --------------------------------------------------------------------- 1 1 @@ -1068,7 +1068,7 @@ FROM WHERE colocated_table_test.value_1 = colocated_table_test_2.value_1 AND colocated_table_test.value_2 = reference_table_test.value_2; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ][ local partition join "colocated_table_test_2" ] - value_2 + value_2 --------------------------------------------------------------------- 1 2 @@ -1082,7 +1082,7 @@ FROM WHERE colocated_table_test.value_2 = colocated_table_test_2.value_2 AND colocated_table_test.value_2 = reference_table_test.value_2; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ][ dual partition join "colocated_table_test_2" ] - value_2 + value_2 --------------------------------------------------------------------- 1 2 @@ -1095,7 +1095,7 @@ FROM WHERE colocated_table_test.value_1 = reference_table_test.value_1 AND colocated_table_test_2.value_1 = reference_table_test.value_1; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ][ dual partition join "colocated_table_test_2" ] - value_2 + value_2 --------------------------------------------------------------------- 1 2 @@ -1138,7 +1138,7 @@ FROM WHERE colocated_table_test_2.value_4 = reference_table_test.value_4 RETURNING value_1, value_2; - value_1 | value_2 + value_1 | value_2 --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -1154,7 +1154,7 @@ FROM WHERE colocated_table_test_2.value_1 > reference_table_test.value_2 RETURNING value_1, value_2; - value_1 | value_2 + value_1 | value_2 --------------------------------------------------------------------- 2 | 1 (1 row) @@ -1190,9 +1190,9 @@ ERROR: cannot colocate tables colocated_table_test_2 and reference_table_test DETAIL: Replication models don't match for colocated_table_test_2 and reference_table_test. -- should work sliently SELECT mark_tables_colocated('reference_table_test', ARRAY['reference_table_test_fifth']); - mark_tables_colocated + mark_tables_colocated --------------------------------------------------------------------- - + (1 row) -- ensure that reference tables on @@ -1201,18 +1201,18 @@ CREATE SCHEMA reference_schema; -- create with schema prefix CREATE TABLE reference_schema.reference_table_test_sixth (value_1 serial PRIMARY KEY, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_schema.reference_table_test_sixth'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SET search_path TO 'reference_schema'; -- create on the schema CREATE TABLE reference_table_test_seventh (value_1 serial PRIMARY KEY, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test_seventh'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- ingest some data @@ -1227,7 +1227,7 @@ SELECT value_1 FROM reference_schema.reference_table_test_sixth; - value_1 + value_1 --------------------------------------------------------------------- 1 2 @@ -1240,7 +1240,7 @@ FROM reference_table_test_sixth, reference_table_test_seventh WHERE reference_table_test_sixth.value_4 = reference_table_test_seventh.value_4; - value_1 + value_1 --------------------------------------------------------------------- 1 2 @@ -1254,7 +1254,7 @@ FROM colocated_table_test_2, reference_schema.reference_table_test_sixth as reftable WHERE colocated_table_test_2.value_4 = reftable.value_4; - value_2 | value_1 + value_2 | value_1 --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -1272,7 +1272,7 @@ SELECT count(*) FROM reference_table_test; - count + count --------------------------------------------------------------------- 5 (1 row) @@ -1283,7 +1283,7 @@ SELECT count(*) FROM reference_table_test; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -1291,34 +1291,34 @@ FROM -- now try dropping one of the existing reference tables -- and check the metadata SELECT logicalrelid FROM pg_dist_partition WHERE logicalrelid::regclass::text LIKE '%reference_table_test_fifth%'; - logicalrelid + logicalrelid --------------------------------------------------------------------- reference_table_test_fifth (1 row) SELECT logicalrelid FROM pg_dist_shard WHERE logicalrelid::regclass::text LIKE '%reference_table_test_fifth%'; - logicalrelid + logicalrelid --------------------------------------------------------------------- reference_table_test_fifth (1 row) DROP TABLE reference_table_test_fifth; SELECT logicalrelid FROM pg_dist_partition WHERE logicalrelid::regclass::text LIKE '%reference_table_test_fifth%'; - logicalrelid + logicalrelid --------------------------------------------------------------------- (0 rows) SELECT logicalrelid FROM pg_dist_shard WHERE logicalrelid::regclass::text LIKE '%reference_table_test_fifth%'; - logicalrelid + logicalrelid --------------------------------------------------------------------- (0 rows) -- now test DDL changes CREATE TABLE reference_schema.reference_table_ddl (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_schema.reference_table_ddl'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- CREATE & DROP index and check the workers @@ -1334,17 +1334,17 @@ ALTER TABLE reference_schema.reference_table_ddl ALTER COLUMN value_2 SET DEFAUL ALTER TABLE reference_schema.reference_table_ddl ALTER COLUMN value_3 SET NOT NULL; -- see that Citus applied all DDLs to the table SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='reference_schema.reference_table_ddl'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- value_2 | double precision | default 25.0 value_3 | text | not null - value_4 | timestamp without time zone | - value_5 | double precision | + value_4 | timestamp without time zone | + value_5 | double precision | (4 rows) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'reference_schema.reference_index_2'::regclass; - Column | Type | Definition + Column | Type | Definition --------------------------------------------------------------------- value_2 | double precision | value_2 value_3 | text | value_3 @@ -1353,17 +1353,17 @@ SELECT "Column", "Type", "Definition" FROM index_attrs WHERE -- also to the shard placements \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='reference_schema.reference_table_ddl_1250019'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- value_2 | double precision | default 25.0 value_3 | text | not null - value_4 | timestamp without time zone | - value_5 | double precision | + value_4 | timestamp without time zone | + value_5 | double precision | (4 rows) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'reference_schema.reference_index_2_1250019'::regclass; - Column | Type | Definition + Column | Type | Definition --------------------------------------------------------------------- value_2 | double precision | value_2 value_3 | text | value_3 @@ -1373,17 +1373,17 @@ SELECT "Column", "Type", "Definition" FROM index_attrs WHERE DROP INDEX reference_schema.reference_index_2; \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='reference_schema.reference_table_ddl_1250019'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- value_2 | double precision | default 25.0 value_3 | text | not null - value_4 | timestamp without time zone | - value_5 | double precision | + value_4 | timestamp without time zone | + value_5 | double precision | (4 rows) \di reference_schema.reference_index_2* List of relations - Schema | Name | Type | Owner | Table + Schema | Name | Type | Owner | Table --------------------------------------------------------------------- (0 rows) @@ -1404,14 +1404,14 @@ DETAIL: We currently don't support creating shards on reference tables SELECT part_storage_type, part_key, part_replica_count, part_max_size, part_placement_policy FROM master_get_table_metadata('reference_schema.reference_table_ddl'); - part_storage_type | part_key | part_replica_count | part_max_size | part_placement_policy + part_storage_type | part_key | part_replica_count | part_max_size | part_placement_policy --------------------------------------------------------------------- t | | 2 | 1536000 | 2 (1 row) SELECT shardid AS a_shard_id FROM pg_dist_shard WHERE logicalrelid = 'reference_schema.reference_table_ddl'::regclass \gset SELECT master_update_shard_statistics(:a_shard_id); - master_update_shard_statistics + master_update_shard_statistics --------------------------------------------------------------------- 8192 (1 row) @@ -1421,7 +1421,7 @@ SELECT master_append_table_to_shard(:a_shard_id, 'append_reference_tmp_table', ERROR: cannot append to shardId 1250019 DETAIL: We currently don't support appending to shards in hash-partitioned or reference tables SELECT master_get_table_ddl_events('reference_schema.reference_table_ddl'); - master_get_table_ddl_events + master_get_table_ddl_events --------------------------------------------------------------------- CREATE TABLE reference_schema.reference_table_ddl (value_2 double precision DEFAULT 25.0, value_3 text NOT NULL, value_4 timestamp without time zone, value_5 double precision) ALTER TABLE reference_schema.reference_table_ddl OWNER TO postgres @@ -1433,13 +1433,13 @@ SELECT placementid AS a_placement_id FROM pg_dist_shard_placement WHERE shardid SELECT placementid AS b_placement_id FROM pg_dist_shard_placement WHERE shardid = :a_shard_id AND nodeport = :worker_2_port \gset UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE placementid = :a_placement_id; SELECT master_copy_shard_placement(:a_shard_id, 'localhost', :worker_2_port, 'localhost', :worker_1_port); - master_copy_shard_placement + master_copy_shard_placement --------------------------------------------------------------------- - + (1 row) SELECT shardid, shardstate FROM pg_dist_shard_placement WHERE placementid = :a_placement_id; - shardid | shardstate + shardid | shardstate --------------------------------------------------------------------- 1250019 | 1 (1 row) @@ -1457,49 +1457,49 @@ RETURNS void AS ' ' LANGUAGE SQL; TRUNCATE reference_table_test; SELECT select_count_all(); - select_count_all + select_count_all --------------------------------------------------------------------- 0 (1 row) SELECT insert_into_ref_table(1, 1.0, '1', '2016-12-01'); - insert_into_ref_table + insert_into_ref_table --------------------------------------------------------------------- - + (1 row) SELECT insert_into_ref_table(2, 2.0, '2', '2016-12-02'); - insert_into_ref_table + insert_into_ref_table --------------------------------------------------------------------- - + (1 row) SELECT insert_into_ref_table(3, 3.0, '3', '2016-12-03'); - insert_into_ref_table + insert_into_ref_table --------------------------------------------------------------------- - + (1 row) SELECT insert_into_ref_table(4, 4.0, '4', '2016-12-04'); - insert_into_ref_table + insert_into_ref_table --------------------------------------------------------------------- - + (1 row) SELECT insert_into_ref_table(5, 5.0, '5', '2016-12-05'); - insert_into_ref_table + insert_into_ref_table --------------------------------------------------------------------- - + (1 row) SELECT insert_into_ref_table(6, 6.0, '6', '2016-12-06'); - insert_into_ref_table + insert_into_ref_table --------------------------------------------------------------------- - + (1 row) SELECT select_count_all(); - select_count_all + select_count_all --------------------------------------------------------------------- 6 (1 row) @@ -1517,7 +1517,7 @@ EXECUTE insert_into_ref_table_pr(5, 5.0, '5', '2016-12-05'); EXECUTE insert_into_ref_table_pr(6, 6.0, '6', '2016-12-06'); -- see the count, then truncate the table SELECT select_count_all(); - select_count_all + select_count_all --------------------------------------------------------------------- 6 (1 row) @@ -1530,23 +1530,23 @@ TRUNCATE reference_table_test; CREATE TYPE reference_comp_key as (key text, value text); CREATE TABLE reference_table_composite (id int PRIMARY KEY, data reference_comp_key); SELECT create_reference_table('reference_table_composite'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- insert and query some data INSERT INTO reference_table_composite (id, data) VALUES (1, ('key_1', 'value_1')::reference_comp_key); INSERT INTO reference_table_composite (id, data) VALUES (2, ('key_2', 'value_2')::reference_comp_key); SELECT * FROM reference_table_composite; - id | data + id | data --------------------------------------------------------------------- 1 | (key_1,value_1) 2 | (key_2,value_2) (2 rows) SELECT (data).key FROM reference_table_composite; - key + key --------------------------------------------------------------------- key_1 key_2 @@ -1557,14 +1557,14 @@ TRUNCATE reference_table_test; BEGIN; INSERT INTO reference_table_test VALUES (1, 1.0, '1', '2016-12-01'); SELECT * FROM reference_table_test; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) ROLLBACK; SELECT * FROM reference_table_test; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- (0 rows) @@ -1573,7 +1573,7 @@ BEGIN; INSERT INTO reference_table_test VALUES (2, 2.0, '2', '2016-12-02'); COMMIT; SELECT * FROM reference_table_test; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 (1 row) @@ -1583,7 +1583,7 @@ BEGIN; UPDATE reference_table_test SET value_1 = 10 WHERE value_1 = 2; COMMIT; SELECT * FROM reference_table_test; - value_1 | value_2 | value_3 | value_4 + value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- 10 | 2 | 2 | Fri Dec 02 00:00:00 2016 (1 row) diff --git a/src/test/regress/expected/multi_remove_node_reference_table.out b/src/test/regress/expected/multi_remove_node_reference_table.out index 28f04bb92..28fd7f502 100644 --- a/src/test/regress/expected/multi_remove_node_reference_table.out +++ b/src/test/regress/expected/multi_remove_node_reference_table.out @@ -11,9 +11,9 @@ CREATE TABLE tmp_shard_placement AS SELECT * FROM pg_dist_shard_placement WHERE DELETE FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; -- make worker 1 receive metadata changes SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node + start_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) -- remove non-existing node @@ -22,20 +22,20 @@ ERROR: node at "localhost:xxxxx" does not exist -- remove a node with no reference tables -- verify node exist before removal SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 1 (1 row) SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) -- verify node is removed SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -45,7 +45,7 @@ SELECT master_add_node('localhost', :worker_2_port) AS worker_2_nodeid \gset SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeid=:worker_2_nodeid \gset -- add a secondary to check we don't attempt to replicate the table to it SELECT 1 FROM master_add_node('localhost', 9000, groupid=>:worker_2_group, noderole=>'secondary'); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) @@ -53,72 +53,72 @@ SELECT 1 FROM master_add_node('localhost', 9000, groupid=>:worker_2_group, noder -- remove a node with reference table CREATE TABLE remove_node_reference_table(column1 int); SELECT create_reference_table('remove_node_reference_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- make sure when we add a secondary we don't attempt to add placements to it SELECT 1 FROM master_add_node('localhost', 9001, groupid=>:worker_2_group, noderole=>'secondary'); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM pg_dist_placement WHERE groupid = :worker_2_group; - count + count --------------------------------------------------------------------- 1 (1 row) -- make sure when we disable a secondary we don't remove any placements SELECT master_disable_node('localhost', 9001); - master_disable_node + master_disable_node --------------------------------------------------------------------- - + (1 row) SELECT isactive FROM pg_dist_node WHERE nodeport = 9001; - isactive + isactive --------------------------------------------------------------------- f (1 row) SELECT count(*) FROM pg_dist_placement WHERE groupid = :worker_2_group; - count + count --------------------------------------------------------------------- 1 (1 row) -- make sure when we activate a secondary we don't add any placements SELECT 1 FROM master_activate_node('localhost', 9001); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM pg_dist_placement WHERE groupid = :worker_2_group; - count + count --------------------------------------------------------------------- 1 (1 row) -- make sure when we remove a secondary we don't remove any placements SELECT master_remove_node('localhost', 9001); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_dist_placement WHERE groupid = :worker_2_group; - count + count --------------------------------------------------------------------- 1 (1 row) -- status before master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -129,7 +129,7 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) @@ -140,14 +140,14 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -158,21 +158,21 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) \c - - - :master_port SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) -- status after master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -183,7 +183,7 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- (0 rows) @@ -193,14 +193,14 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -211,7 +211,7 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- (0 rows) @@ -222,28 +222,28 @@ ERROR: node at "localhost:xxxxx" does not exist -- re-add the node for next tests SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:xxxxx - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) -- try to disable the node before removing it (this used to crash) SELECT master_disable_node('localhost', :worker_2_port); - master_disable_node + master_disable_node --------------------------------------------------------------------- - + (1 row) SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) -- re-add the node for the next test SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:xxxxx - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) @@ -251,7 +251,7 @@ NOTICE: Replicating reference table "remove_node_reference_table" to the node l -- remove node in a transaction and ROLLBACK -- status before master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -262,7 +262,7 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) @@ -273,14 +273,14 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -291,7 +291,7 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) @@ -299,15 +299,15 @@ WHERE \c - - - :master_port BEGIN; SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) ROLLBACK; -- status after master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -318,7 +318,7 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) @@ -329,14 +329,14 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -347,7 +347,7 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) @@ -356,7 +356,7 @@ WHERE -- remove node in a transaction and COMMIT -- status before master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -367,7 +367,7 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) @@ -378,14 +378,14 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -396,7 +396,7 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) @@ -404,15 +404,15 @@ WHERE \c - - - :master_port BEGIN; SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) COMMIT; -- status after master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -423,7 +423,7 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- (0 rows) @@ -433,14 +433,14 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -451,7 +451,7 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- (0 rows) @@ -459,7 +459,7 @@ WHERE -- re-add the node for next tests SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:xxxxx - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) @@ -467,7 +467,7 @@ NOTICE: Replicating reference table "remove_node_reference_table" to the node l -- test inserting a value then removing a node in a transaction -- status before master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -478,7 +478,7 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) @@ -489,14 +489,14 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -507,7 +507,7 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) @@ -516,15 +516,15 @@ WHERE BEGIN; INSERT INTO remove_node_reference_table VALUES(1); SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) COMMIT; -- status after master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -535,7 +535,7 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- (0 rows) @@ -545,21 +545,21 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) --verify the data is inserted SELECT * FROM remove_node_reference_table; - column1 + column1 --------------------------------------------------------------------- 1 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -570,12 +570,12 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- (0 rows) SELECT * FROM remove_node_reference_table; - column1 + column1 --------------------------------------------------------------------- 1 (1 row) @@ -584,7 +584,7 @@ SELECT * FROM remove_node_reference_table; -- re-add the node for next tests SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:xxxxx - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) @@ -592,7 +592,7 @@ NOTICE: Replicating reference table "remove_node_reference_table" to the node l -- test executing DDL command then removing a node in a transaction -- status before master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -603,7 +603,7 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) @@ -614,14 +614,14 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -632,7 +632,7 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) @@ -641,15 +641,15 @@ WHERE BEGIN; ALTER TABLE remove_node_reference_table ADD column2 int; SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) COMMIT; -- status after master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -660,7 +660,7 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- (0 rows) @@ -670,14 +670,14 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -688,7 +688,7 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- (0 rows) @@ -696,16 +696,16 @@ WHERE SET citus.next_shard_id TO 1380001; -- verify table structure is changed SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.remove_node_reference_table'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- - column1 | integer | - column2 | integer | + column1 | integer | + column2 | integer | (2 rows) -- re-add the node for next tests SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:xxxxx - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) @@ -713,7 +713,7 @@ NOTICE: Replicating reference table "remove_node_reference_table" to the node l -- test DROP table after removing a node in a transaction -- status before master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -724,7 +724,7 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- 1380000 | 1 | 0 | localhost | 57638 (1 row) @@ -735,23 +735,23 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) BEGIN; SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) DROP TABLE remove_node_reference_table; COMMIT; -- status after master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -762,18 +762,18 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- (0 rows) SELECT * FROM pg_dist_colocation WHERE colocationid = 1380000; - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- (0 rows) -- re-add the node for next tests SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) @@ -781,23 +781,23 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port); -- re-create remove_node_reference_table CREATE TABLE remove_node_reference_table(column1 int); SELECT create_reference_table('remove_node_reference_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- test removing a node while there is a reference table at another schema CREATE SCHEMA remove_node_reference_table_schema; CREATE TABLE remove_node_reference_table_schema.table1(column1 int); SELECT create_reference_table('remove_node_reference_table_schema.table1'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- status before master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -810,7 +810,7 @@ WHERE nodeport = :worker_2_port ORDER BY shardid; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- 1380001 | 1 | 0 | localhost | 57638 1380002 | 1 | 0 | localhost | 57638 @@ -822,14 +822,14 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table_schema.table1'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -842,7 +842,7 @@ WHERE nodeport = :worker_2_port ORDER BY shardid; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- 1380001 | 1 | 0 | localhost | 57638 1380002 | 1 | 0 | localhost | 57638 @@ -850,14 +850,14 @@ ORDER BY \c - - - :master_port SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) -- status after master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -868,7 +868,7 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- (0 rows) @@ -878,14 +878,14 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table_schema.table1'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -896,7 +896,7 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- (0 rows) @@ -905,7 +905,7 @@ WHERE SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:xxxxx NOTICE: Replicating reference table "table1" to the node localhost:xxxxx - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) @@ -913,7 +913,7 @@ NOTICE: Replicating reference table "table1" to the node localhost:xxxxx -- test with master_disable_node -- status before master_disable_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -926,7 +926,7 @@ WHERE nodeport = :worker_2_port ORDER BY shardid; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- 1380001 | 1 | 0 | localhost | 57638 1380002 | 1 | 0 | localhost | 57638 @@ -938,14 +938,14 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -957,7 +957,7 @@ FROM WHERE nodeport = :worker_2_port ORDER BY shardid ASC; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- 1380001 | 1 | 0 | localhost | 57638 1380002 | 1 | 0 | localhost | 57638 @@ -965,14 +965,14 @@ ORDER BY shardid ASC; \c - - - :master_port SELECT master_disable_node('localhost', :worker_2_port); - master_disable_node + master_disable_node --------------------------------------------------------------------- - + (1 row) -- status after master_disable_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -983,7 +983,7 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- (0 rows) @@ -993,14 +993,14 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -1011,7 +1011,7 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- (0 rows) @@ -1020,7 +1020,7 @@ WHERE SELECT 1 FROM master_activate_node('localhost', :worker_2_port); NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:xxxxx NOTICE: Replicating reference table "table1" to the node localhost:xxxxx - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) @@ -1030,9 +1030,9 @@ DROP TABLE remove_node_reference_table; DROP TABLE remove_node_reference_table_schema.table1; DROP SCHEMA remove_node_reference_table_schema CASCADE; SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); - stop_metadata_sync_to_node + stop_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) -- reload pg_dist_shard_placement table diff --git a/src/test/regress/expected/multi_repair_shards.out b/src/test/regress/expected/multi_repair_shards.out index 93926ec0c..44ce0818f 100644 --- a/src/test/regress/expected/multi_repair_shards.out +++ b/src/test/regress/expected/multi_repair_shards.out @@ -15,9 +15,9 @@ CREATE INDEX ON customer_engagements (event_data); SET citus.shard_count TO 1; SET citus.shard_replication_factor TO 2; SELECT create_distributed_table('customer_engagements', 'id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- ingest some data for the tests @@ -27,7 +27,7 @@ INSERT INTO customer_engagements VALUES (1, '03-01-2015', 'third event'); -- the following queries does the following: -- (i) create a new shard -- (ii) mark the second shard placements as unhealthy --- (iii) do basic checks i.e., only allow copy from healthy placement to unhealthy ones +-- (iii) do basic checks i.e., only allow copy from healthy placement to unhealthy ones -- (iv) do a successful master_copy_shard_placement from the first placement to the second -- (v) mark the first placement as unhealthy and execute a query that is routed to the second placement -- get the newshardid @@ -50,18 +50,18 @@ ROLLBACK; -- modifications after reparing a shard are fine (will use new metadata) BEGIN; SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); - master_copy_shard_placement + master_copy_shard_placement --------------------------------------------------------------------- - + (1 row) ALTER TABLE customer_engagements ADD COLUMN value float; ROLLBACK; BEGIN; SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); - master_copy_shard_placement + master_copy_shard_placement --------------------------------------------------------------------- - + (1 row) INSERT INTO customer_engagements VALUES (4, '04-01-2015', 'fourth event'); @@ -76,16 +76,16 @@ SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_2_port, 'lo ERROR: source placement must be in finalized state -- "copy" this shard from the first placement to the second one SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); - master_copy_shard_placement + master_copy_shard_placement --------------------------------------------------------------------- - + (1 row) -- now, update first placement as unhealthy (and raise a notice) so that queries are not routed to there UPDATE pg_dist_placement SET shardstate = 3 WHERE shardid = :newshardid AND groupid = :worker_1_group; -- get the data from the second placement SELECT * FROM customer_engagements; - id | created_at | event_data + id | created_at | event_data --------------------------------------------------------------------- 1 | 01-01-2015 | first event 2 | 02-01-2015 | second event @@ -104,9 +104,9 @@ SET citus.shard_count TO 1; SET citus.shard_replication_factor TO 2; SELECT create_distributed_table('remote_engagements', 'id', 'hash'); NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- get the newshardid diff --git a/src/test/regress/expected/multi_repartition_join_planning.out b/src/test/regress/expected/multi_repartition_join_planning.out index 8e403d670..3f3340469 100644 --- a/src/test/regress/expected/multi_repartition_join_planning.out +++ b/src/test/regress/expected/multi_repartition_join_planning.out @@ -28,15 +28,15 @@ CREATE TABLE stock ( PRIMARY KEY (s_w_id,s_i_id) ); SELECT create_distributed_table('order_line','ol_w_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('stock','s_w_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -112,7 +112,7 @@ DEBUG: completed cleanup query for job 2 DEBUG: completed cleanup query for job 2 DEBUG: completed cleanup query for job 1 DEBUG: completed cleanup query for job 1 - l_partkey | o_orderkey | count + l_partkey | o_orderkey | count --------------------------------------------------------------------- 18 | 12005 | 1 79 | 5121 | 1 @@ -215,7 +215,7 @@ DEBUG: completed cleanup query for job 4 DEBUG: completed cleanup query for job 4 DEBUG: completed cleanup query for job 5 DEBUG: completed cleanup query for job 5 - l_partkey | o_orderkey | count + l_partkey | o_orderkey | count --------------------------------------------------------------------- (0 rows) @@ -289,7 +289,7 @@ DEBUG: completed cleanup query for job 7 DEBUG: completed cleanup query for job 7 DEBUG: completed cleanup query for job 8 DEBUG: completed cleanup query for job 8 - o_orderkey | o_shippriority | count + o_orderkey | o_shippriority | count --------------------------------------------------------------------- (0 rows) @@ -365,7 +365,7 @@ DEBUG: completed cleanup query for job 10 DEBUG: completed cleanup query for job 10 DEBUG: completed cleanup query for job 11 DEBUG: completed cleanup query for job 11 - o_orderkey | o_shippriority | count + o_orderkey | o_shippriority | count --------------------------------------------------------------------- (0 rows) @@ -439,7 +439,7 @@ DEBUG: completed cleanup query for job 13 DEBUG: completed cleanup query for job 13 DEBUG: completed cleanup query for job 14 DEBUG: completed cleanup query for job 14 - o_orderkey | any_value + o_orderkey | any_value --------------------------------------------------------------------- (0 rows) @@ -523,7 +523,7 @@ DEBUG: completed cleanup query for job 16 DEBUG: completed cleanup query for job 16 DEBUG: completed cleanup query for job 17 DEBUG: completed cleanup query for job 17 - s_i_id + s_i_id --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/multi_repartition_join_pruning.out b/src/test/regress/expected/multi_repartition_join_pruning.out index 427754fa4..b32d88cd8 100644 --- a/src/test/regress/expected/multi_repartition_join_pruning.out +++ b/src/test/regress/expected/multi_repartition_join_pruning.out @@ -28,7 +28,7 @@ DEBUG: pruning merge fetch taskId 3 DETAIL: Creating dependency on merge taskId 6 DEBUG: pruning merge fetch taskId 5 DETAIL: Creating dependency on merge taskId 9 - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Task-Tracker) @@ -58,7 +58,7 @@ DEBUG: pruning merge fetch taskId 3 DETAIL: Creating dependency on merge taskId 6 DEBUG: pruning merge fetch taskId 5 DETAIL: Creating dependency on merge taskId 9 - count + count --------------------------------------------------------------------- 2985 (1 row) @@ -74,7 +74,7 @@ WHERE o_custkey = c_custkey AND o_orderkey < 0; DEBUG: Router planner does not support append-partitioned tables. - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Task-Tracker) @@ -93,7 +93,7 @@ WHERE o_custkey = c_custkey AND o_orderkey < 0; DEBUG: Router planner does not support append-partitioned tables. - count + count --------------------------------------------------------------------- 0 (1 row) @@ -109,7 +109,7 @@ WHERE o_custkey = c_custkey AND c_custkey < 0; DEBUG: Router planner does not support append-partitioned tables. - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Task-Tracker) @@ -128,7 +128,7 @@ WHERE o_custkey = c_custkey AND c_custkey < 0; DEBUG: Router planner does not support append-partitioned tables. - count + count --------------------------------------------------------------------- 0 (1 row) @@ -172,7 +172,7 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 16 - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Task-Tracker) @@ -221,7 +221,7 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 16 - count + count --------------------------------------------------------------------- 125 (1 row) @@ -237,7 +237,7 @@ WHERE l_partkey = c_nationkey AND l_orderkey < 0; DEBUG: Router planner does not support append-partitioned tables. - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Task-Tracker) @@ -259,7 +259,7 @@ WHERE l_partkey = c_nationkey AND l_orderkey < 0; DEBUG: Router planner does not support append-partitioned tables. - count + count --------------------------------------------------------------------- 0 (1 row) @@ -273,7 +273,7 @@ FROM WHERE false; DEBUG: Router planner does not support append-partitioned tables. - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Task-Tracker) Task Count: 0 @@ -291,7 +291,7 @@ FROM WHERE false; DEBUG: Router planner does not support append-partitioned tables. - o_orderkey + o_orderkey --------------------------------------------------------------------- (0 rows) @@ -303,7 +303,7 @@ FROM WHERE 1=0 AND c_custkey < 0; DEBUG: Router planner does not support append-partitioned tables. - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Task-Tracker) Task Count: 0 @@ -319,7 +319,7 @@ SELECT FROM orders INNER JOIN customer_append ON (o_custkey = c_custkey AND false); DEBUG: Router planner does not support append-partitioned tables. - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Task-Tracker) Task Count: 0 @@ -334,7 +334,7 @@ FROM WHERE o_custkey = c_custkey AND false; DEBUG: Router planner does not support append-partitioned tables. - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Task-Tracker) Task Count: 0 diff --git a/src/test/regress/expected/multi_repartition_join_ref.out b/src/test/regress/expected/multi_repartition_join_ref.out index 85e101dc9..9d14058ed 100644 --- a/src/test/regress/expected/multi_repartition_join_ref.out +++ b/src/test/regress/expected/multi_repartition_join_ref.out @@ -15,7 +15,7 @@ ORDER BY LIMIT 10; LOG: join order: [ "lineitem" ][ reference join "supplier" ][ single range partition join "part_append" ] DEBUG: push down of limit count: 10 - l_partkey | l_suppkey | count + l_partkey | l_suppkey | count --------------------------------------------------------------------- 195 | 196 | 804 245 | 246 | 754 @@ -43,7 +43,7 @@ ORDER BY LIMIT 10; LOG: join order: [ "lineitem" ][ reference join "supplier" ][ single range partition join "part_append" ] DEBUG: push down of limit count: 10 - l_partkey | l_suppkey | count + l_partkey | l_suppkey | count --------------------------------------------------------------------- 195 | 196 | 1 245 | 246 | 1 @@ -71,7 +71,7 @@ ORDER BY LIMIT 10; LOG: join order: [ "lineitem" ][ reference join "supplier" ][ single range partition join "part_append" ] DEBUG: push down of limit count: 10 - l_partkey | l_suppkey | count + l_partkey | l_suppkey | count --------------------------------------------------------------------- 195 | 196 | 1 245 | 246 | 1 @@ -98,7 +98,7 @@ ORDER BY LIMIT 10; LOG: join order: [ "lineitem" ][ single range partition join "part_append" ][ cartesian product reference join "supplier" ] DEBUG: push down of limit count: 10 - l_partkey | l_suppkey | count + l_partkey | l_suppkey | count --------------------------------------------------------------------- 18 | 7519 | 1000 79 | 7580 | 1000 @@ -126,7 +126,7 @@ ORDER BY LIMIT 10; LOG: join order: [ "lineitem" ][ reference join "supplier" ][ single range partition join "part_append" ] DEBUG: push down of limit count: 10 - l_partkey | l_suppkey | count + l_partkey | l_suppkey | count --------------------------------------------------------------------- 195 | 196 | 1 245 | 246 | 1 @@ -154,7 +154,7 @@ ORDER BY LIMIT 10; LOG: join order: [ "lineitem" ][ reference join "supplier" ][ single range partition join "part_append" ] DEBUG: push down of limit count: 10 - l_partkey | l_suppkey | count + l_partkey | l_suppkey | count --------------------------------------------------------------------- 195 | 196 | 1 245 | 246 | 1 @@ -182,7 +182,7 @@ ORDER BY LIMIT 10; LOG: join order: [ "lineitem" ][ reference join "supplier" ][ single range partition join "part_append" ] DEBUG: push down of limit count: 10 - l_partkey | l_suppkey | count + l_partkey | l_suppkey | count --------------------------------------------------------------------- 18 | 7519 | 1 79 | 7580 | 1 @@ -210,7 +210,7 @@ ORDER BY LIMIT 10; LOG: join order: [ "lineitem" ][ single range partition join "part_append" ][ reference join "supplier" ] DEBUG: push down of limit count: 10 - l_partkey | l_suppkey | count + l_partkey | l_suppkey | count --------------------------------------------------------------------- 18 | 7519 | 1 79 | 7580 | 1 diff --git a/src/test/regress/expected/multi_repartition_join_task_assignment.out b/src/test/regress/expected/multi_repartition_join_task_assignment.out index eeca76778..d3146999d 100644 --- a/src/test/regress/expected/multi_repartition_join_task_assignment.out +++ b/src/test/regress/expected/multi_repartition_join_task_assignment.out @@ -35,7 +35,7 @@ DETAIL: Creating dependency on merge taskId 9 DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx - count + count --------------------------------------------------------------------- 2985 (1 row) @@ -63,7 +63,7 @@ DEBUG: pruning merge fetch taskId 3 DETAIL: Creating dependency on merge taskId 8 DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx - count + count --------------------------------------------------------------------- 12000 (1 row) @@ -114,7 +114,7 @@ DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx - count + count --------------------------------------------------------------------- 125 (1 row) diff --git a/src/test/regress/expected/multi_repartition_udt.out b/src/test/regress/expected/multi_repartition_udt.out index 96ce66c26..a1fc0c27d 100644 --- a/src/test/regress/expected/multi_repartition_udt.out +++ b/src/test/regress/expected/multi_repartition_udt.out @@ -47,7 +47,7 @@ CREATE TABLE repartition_udt_other ( udtcol test_udt, txtcol text ); --- Connect directly to a worker, create and drop the type, then +-- Connect directly to a worker, create and drop the type, then -- proceed with type creation as above; thus the OIDs will be different. -- so that the OID is off. \c - - - :worker_1_port @@ -126,16 +126,16 @@ FUNCTION 1 test_udt_hash(test_udt); SET citus.shard_count TO 3; SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('repartition_udt', 'pk', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SET citus.shard_count TO 5; SELECT create_distributed_table('repartition_udt_other', 'pk', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO repartition_udt values (1, '(1,1)'::test_udt, 'foo'); @@ -153,11 +153,11 @@ INSERT INTO repartition_udt_other values (12, '(2,3)'::test_udt, 'foo'); SET client_min_messages = LOG; -- This query was intended to test "Query that should result in a repartition -- join on int column, and be empty." In order to remove broadcast logic, we --- manually make the query router plannable. +-- manually make the query router plannable. SELECT * FROM repartition_udt JOIN repartition_udt_other ON repartition_udt.pk = repartition_udt_other.pk WHERE repartition_udt.pk = 1; - pk | udtcol | txtcol | pk | udtcol | txtcol + pk | udtcol | txtcol | pk | udtcol | txtcol --------------------------------------------------------------------- (0 rows) @@ -168,7 +168,7 @@ EXPLAIN SELECT * FROM repartition_udt JOIN repartition_udt_other ON repartition_udt.udtcol = repartition_udt_other.udtcol WHERE repartition_udt.pk > 1; LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) Task Count: 4 @@ -186,7 +186,7 @@ SELECT * FROM repartition_udt JOIN repartition_udt_other WHERE repartition_udt.pk > 1 ORDER BY repartition_udt.pk; LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ] - pk | udtcol | txtcol | pk | udtcol | txtcol + pk | udtcol | txtcol | pk | udtcol | txtcol --------------------------------------------------------------------- 2 | (1,2) | foo | 8 | (1,2) | foo 3 | (1,3) | foo | 9 | (1,3) | foo diff --git a/src/test/regress/expected/multi_repartitioned_subquery_udf.out b/src/test/regress/expected/multi_repartitioned_subquery_udf.out index 493915991..12de639f6 100644 --- a/src/test/regress/expected/multi_repartitioned_subquery_udf.out +++ b/src/test/regress/expected/multi_repartitioned_subquery_udf.out @@ -6,32 +6,32 @@ SET citus.next_shard_id TO 830000; \c - - - :master_port DROP FUNCTION IF EXISTS median(double precision[]); NOTICE: function median(pg_catalog.float8[]) does not exist, skipping -CREATE FUNCTION median(double precision[]) RETURNS double precision -LANGUAGE sql IMMUTABLE AS $_$ - SELECT AVG(val) FROM - (SELECT val FROM unnest($1) val - ORDER BY 1 LIMIT 2 - MOD(array_upper($1, 1), 2) - OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub; +CREATE FUNCTION median(double precision[]) RETURNS double precision +LANGUAGE sql IMMUTABLE AS $_$ + SELECT AVG(val) FROM + (SELECT val FROM unnest($1) val + ORDER BY 1 LIMIT 2 - MOD(array_upper($1, 1), 2) + OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub; $_$; \c - - - :worker_1_port DROP FUNCTION IF EXISTS median(double precision[]); NOTICE: function median(pg_catalog.float8[]) does not exist, skipping -CREATE FUNCTION median(double precision[]) RETURNS double precision -LANGUAGE sql IMMUTABLE AS $_$ - SELECT AVG(val) FROM - (SELECT val FROM unnest($1) val - ORDER BY 1 LIMIT 2 - MOD(array_upper($1, 1), 2) - OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub; +CREATE FUNCTION median(double precision[]) RETURNS double precision +LANGUAGE sql IMMUTABLE AS $_$ + SELECT AVG(val) FROM + (SELECT val FROM unnest($1) val + ORDER BY 1 LIMIT 2 - MOD(array_upper($1, 1), 2) + OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub; $_$; \c - - - :worker_2_port DROP FUNCTION IF EXISTS median(double precision[]); NOTICE: function median(pg_catalog.float8[]) does not exist, skipping -CREATE FUNCTION median(double precision[]) RETURNS double precision -LANGUAGE sql IMMUTABLE AS $_$ - SELECT AVG(val) FROM - (SELECT val FROM unnest($1) val - ORDER BY 1 LIMIT 2 - MOD(array_upper($1, 1), 2) - OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub; +CREATE FUNCTION median(double precision[]) RETURNS double precision +LANGUAGE sql IMMUTABLE AS $_$ + SELECT AVG(val) FROM + (SELECT val FROM unnest($1) val + ORDER BY 1 LIMIT 2 - MOD(array_upper($1, 1), 2) + OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub; $_$; -- Run query on master \c - - - :master_port @@ -39,7 +39,7 @@ SET citus.task_executor_type TO 'task-tracker'; SELECT * FROM (SELECT median(ARRAY[1,2,sum(l_suppkey)]) as median, count(*) FROM lineitem GROUP BY l_partkey) AS a WHERE median > 2; - median | count + median | count --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/multi_replicate_reference_table.out b/src/test/regress/expected/multi_replicate_reference_table.out index 17ea5c3fa..714e8353e 100644 --- a/src/test/regress/expected/multi_replicate_reference_table.out +++ b/src/test/regress/expected/multi_replicate_reference_table.out @@ -10,28 +10,28 @@ ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 1370000; CREATE TABLE tmp_shard_placement AS SELECT * FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; DELETE FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) -- test adding new node with no reference tables -- verify there is no node with nodeport = :worker_2_port before adding the node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) -- verify node is added SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -43,29 +43,29 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- (0 rows) -- test adding new node with a reference table which does not have any healthy placement SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) -- verify there is no node with nodeport = :worker_2_port before adding the node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 0 (1 row) CREATE TABLE replicate_reference_table_unhealthy(column1 int); SELECT create_reference_table('replicate_reference_table_unhealthy'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = 1370000; @@ -73,7 +73,7 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port); ERROR: could not find any healthy placement for shard xxxxx -- verify node is not added SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -85,7 +85,7 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- (0 rows) @@ -93,9 +93,9 @@ DROP TABLE replicate_reference_table_unhealthy; -- test replicating a reference table when a new node added CREATE TABLE replicate_reference_table_valid(column1 int); SELECT create_reference_table('replicate_reference_table_valid'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- status before master_add_node @@ -106,7 +106,7 @@ FROM WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- (0 rows) @@ -116,14 +116,14 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_valid'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "replicate_reference_table_valid" to the node localhost:xxxxx - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) @@ -136,7 +136,7 @@ FROM WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- 1370001 | 1 | 0 | localhost | 57638 (1 row) @@ -147,7 +147,7 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_valid'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -161,7 +161,7 @@ FROM WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- 1370001 | 1 | 0 | localhost | 57638 (1 row) @@ -172,13 +172,13 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_valid'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) @@ -191,7 +191,7 @@ FROM WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- 1370001 | 1 | 0 | localhost | 57638 (1 row) @@ -202,7 +202,7 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_valid'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -210,16 +210,16 @@ WHERE colocationid IN DROP TABLE replicate_reference_table_valid; -- test replicating a reference table when a new node added in TRANSACTION + ROLLBACK SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) CREATE TABLE replicate_reference_table_rollback(column1 int); SELECT create_reference_table('replicate_reference_table_rollback'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- status before master_add_node @@ -230,7 +230,7 @@ FROM WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- (0 rows) @@ -240,7 +240,7 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_rollback'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -248,7 +248,7 @@ WHERE colocationid IN BEGIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "replicate_reference_table_rollback" to the node localhost:xxxxx - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) @@ -262,7 +262,7 @@ FROM WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- (0 rows) @@ -272,7 +272,7 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_rollback'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -281,9 +281,9 @@ DROP TABLE replicate_reference_table_rollback; -- test replicating a reference table when a new node added in TRANSACTION + COMMIT CREATE TABLE replicate_reference_table_commit(column1 int); SELECT create_reference_table('replicate_reference_table_commit'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- status before master_add_node @@ -294,7 +294,7 @@ FROM WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- (0 rows) @@ -304,7 +304,7 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_commit'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -312,7 +312,7 @@ WHERE colocationid IN BEGIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "replicate_reference_table_commit" to the node localhost:xxxxx - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) @@ -326,7 +326,7 @@ FROM WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- 1370003 | 1 | 0 | localhost | 57638 (1 row) @@ -337,7 +337,7 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_commit'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -345,16 +345,16 @@ WHERE colocationid IN DROP TABLE replicate_reference_table_commit; -- test adding new node + upgrading another hash distributed table to reference table + creating new reference table in TRANSACTION SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) CREATE TABLE replicate_reference_table_reference_one(column1 int); SELECT create_reference_table('replicate_reference_table_reference_one'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SET citus.shard_count TO 1; @@ -362,9 +362,9 @@ SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; CREATE TABLE replicate_reference_table_hash(column1 int); SELECT create_distributed_table('replicate_reference_table_hash', 'column1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- update replication model to statement-based replication since streaming replicated tables cannot be upgraded to reference tables @@ -377,7 +377,7 @@ FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- (0 rows) @@ -387,7 +387,7 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_reference_one'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -400,7 +400,7 @@ FROM WHERE logicalrelid IN ('replicate_reference_table_reference_one', 'replicate_reference_table_hash', 'replicate_reference_table_reference_two') ORDER BY logicalrelid; - logicalrelid | partmethod | ?column? | repmodel + logicalrelid | partmethod | ?column? | repmodel --------------------------------------------------------------------- replicate_reference_table_reference_one | n | t | t replicate_reference_table_hash | h | f | c @@ -409,21 +409,21 @@ ORDER BY logicalrelid; BEGIN; SET LOCAL client_min_messages TO ERROR; SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) SELECT upgrade_to_reference_table('replicate_reference_table_hash'); - upgrade_to_reference_table + upgrade_to_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_reference_table('replicate_reference_table_reference_two'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) COMMIT; @@ -435,7 +435,7 @@ FROM WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- 1370004 | 1 | 0 | localhost | 57638 1370005 | 1 | 0 | localhost | 57638 @@ -448,7 +448,7 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_reference_one'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -459,9 +459,9 @@ FROM pg_dist_partition WHERE logicalrelid IN ('replicate_reference_table_reference_one', 'replicate_reference_table_hash', 'replicate_reference_table_reference_two') -ORDER BY +ORDER BY logicalrelid; - logicalrelid | partmethod | ?column? | repmodel + logicalrelid | partmethod | ?column? | repmodel --------------------------------------------------------------------- replicate_reference_table_reference_one | n | t | t replicate_reference_table_hash | n | t | t @@ -473,16 +473,16 @@ DROP TABLE replicate_reference_table_hash; DROP TABLE replicate_reference_table_reference_two; -- test inserting a value then adding a new node in a transaction SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) CREATE TABLE replicate_reference_table_insert(column1 int); SELECT create_reference_table('replicate_reference_table_insert'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -494,9 +494,9 @@ DROP TABLE replicate_reference_table_insert; -- test COPY then adding a new node in a transaction CREATE TABLE replicate_reference_table_copy(column1 int); SELECT create_reference_table('replicate_reference_table_copy'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -508,9 +508,9 @@ DROP TABLE replicate_reference_table_copy; -- test executing DDL command then adding a new node in a transaction CREATE TABLE replicate_reference_table_ddl(column1 int); SELECT create_reference_table('replicate_reference_table_ddl'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -522,9 +522,9 @@ DROP TABLE replicate_reference_table_ddl; -- test DROP table after adding new node in a transaction CREATE TABLE replicate_reference_table_drop(column1 int); SELECT create_reference_table('replicate_reference_table_drop'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- status before master_add_node @@ -535,7 +535,7 @@ FROM WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- (0 rows) @@ -545,7 +545,7 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_drop'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -553,7 +553,7 @@ WHERE colocationid IN BEGIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "replicate_reference_table_drop" to the node localhost:xxxxx - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) @@ -568,28 +568,28 @@ FROM WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- (0 rows) SELECT * FROM pg_dist_colocation WHERE colocationid = 1370009; - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- (0 rows) -- test adding a node while there is a reference table at another schema SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) CREATE SCHEMA replicate_reference_table_schema; CREATE TABLE replicate_reference_table_schema.table1(column1 int); SELECT create_reference_table('replicate_reference_table_schema.table1'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- status before master_add_node @@ -600,7 +600,7 @@ FROM WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- (0 rows) @@ -610,14 +610,14 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_schema.table1'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "table1" to the node localhost:xxxxx - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) @@ -630,7 +630,7 @@ FROM WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- 1370011 | 1 | 0 | localhost | 57638 (1 row) @@ -641,7 +641,7 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_schema.table1'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -650,9 +650,9 @@ DROP TABLE replicate_reference_table_schema.table1; DROP SCHEMA replicate_reference_table_schema CASCADE; -- test adding a node when there are foreign keys between reference tables SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) CREATE TABLE ref_table_1(id int primary key, v int); @@ -661,9 +661,9 @@ CREATE TABLE ref_table_3(id int primary key, v int references ref_table_2(id)); SELECT create_reference_table('ref_table_1'), create_reference_table('ref_table_2'), create_reference_table('ref_table_3'); - create_reference_table | create_reference_table | create_reference_table + create_reference_table | create_reference_table | create_reference_table --------------------------------------------------------------------- - | | + | | (1 row) -- status before master_add_node @@ -674,7 +674,7 @@ FROM WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- (0 rows) @@ -682,7 +682,7 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "ref_table_1" to the node localhost:xxxxx NOTICE: Replicating reference table "ref_table_2" to the node localhost:xxxxx NOTICE: Replicating reference table "ref_table_3" to the node localhost:xxxxx - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) @@ -695,7 +695,7 @@ FROM WHERE nodeport = :worker_2_port ORDER BY shardid, nodeport; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- 1370012 | 1 | 0 | localhost | 57638 1370013 | 1 | 0 | localhost | 57638 @@ -704,7 +704,7 @@ ORDER BY shardid, nodeport; -- verify constraints have been created on the new node SELECT run_command_on_workers('select count(*) from pg_constraint where contype=''f'' AND conname like ''ref_table%'';'); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,2) (localhost,57638,t,2) @@ -713,20 +713,20 @@ SELECT run_command_on_workers('select count(*) from pg_constraint where contype= DROP TABLE ref_table_1, ref_table_2, ref_table_3; -- do some tests with inactive node SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) CREATE TABLE initially_not_replicated_reference_table (key int); SELECT create_reference_table('initially_not_replicated_reference_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT 1 FROM master_add_inactive_node('localhost', :worker_2_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) @@ -737,15 +737,15 @@ SELECT FROM pg_dist_shard_placement WHERE - shardid IN (SELECT - shardid - FROM - pg_dist_shard - WHERE + shardid IN (SELECT + shardid + FROM + pg_dist_shard + WHERE logicalrelid = 'initially_not_replicated_reference_table'::regclass) AND nodeport != :master_port ORDER BY 1,4,5; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- 1370015 | 1 | 0 | localhost | 57637 (1 row) @@ -753,7 +753,7 @@ ORDER BY 1,4,5; -- we should see the two shard placements after activation SELECT 1 FROM master_activate_node('localhost', :worker_2_port); NOTICE: Replicating reference table "initially_not_replicated_reference_table" to the node localhost:xxxxx - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) @@ -763,15 +763,15 @@ SELECT FROM pg_dist_shard_placement WHERE - shardid IN (SELECT - shardid - FROM - pg_dist_shard - WHERE + shardid IN (SELECT + shardid + FROM + pg_dist_shard + WHERE logicalrelid = 'initially_not_replicated_reference_table'::regclass) AND nodeport != :master_port ORDER BY 1,4,5; - shardid | shardstate | shardlength | nodename | nodeport + shardid | shardstate | shardlength | nodename | nodeport --------------------------------------------------------------------- 1370015 | 1 | 0 | localhost | 57637 1370015 | 1 | 0 | localhost | 57638 @@ -779,7 +779,7 @@ ORDER BY 1,4,5; -- this should have no effect SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) diff --git a/src/test/regress/expected/multi_router_planner.out b/src/test/regress/expected/multi_router_planner.out index a5b23de7d..d0bbde78d 100644 --- a/src/test/regress/expected/multi_router_planner.out +++ b/src/test/regress/expected/multi_router_planner.out @@ -33,40 +33,40 @@ CREATE TABLE authors_reference ( name varchar(20), id bigint ); -- this table is used in router executor tests CREATE TABLE articles_single_shard_hash (LIKE articles_hash); SELECT master_create_distributed_table('articles_hash', 'author_id', 'hash'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_create_distributed_table('articles_single_shard_hash', 'author_id', 'hash'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) -- test when a table is distributed but no shards created yet SELECT count(*) from articles_hash; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT master_create_worker_shards('articles_hash', 2, 1); - master_create_worker_shards + master_create_worker_shards --------------------------------------------------------------------- - + (1 row) SELECT master_create_worker_shards('articles_single_shard_hash', 1, 1); - master_create_worker_shards + master_create_worker_shards --------------------------------------------------------------------- - + (1 row) SELECT create_reference_table('authors_reference'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- create a bunch of test data @@ -133,7 +133,7 @@ SELECT * FROM articles_hash WHERE author_id = 10 AND id = 50; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 50 | 10 | anjanette | 19519 (1 row) @@ -143,7 +143,7 @@ SELECT title FROM articles_hash WHERE author_id = 10; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - title + title --------------------------------------------------------------------- aggrandize absentness @@ -159,7 +159,7 @@ SELECT title, word_count FROM articles_hash DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - title | word_count + title | word_count --------------------------------------------------------------------- anjanette | 19519 aggrandize | 17277 @@ -176,7 +176,7 @@ SELECT title, id FROM articles_hash DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 - title | id + title | id --------------------------------------------------------------------- aruru | 5 adversa | 15 @@ -189,7 +189,7 @@ SELECT title, author_id FROM articles_hash ORDER BY author_id ASC, id; DEBUG: Creating router plan DEBUG: Plan is router executable - title | author_id + title | author_id --------------------------------------------------------------------- aseptic | 7 auriga | 7 @@ -208,7 +208,7 @@ SELECT title, author_id FROM articles_hash WHERE author_id = 7 OR author_id = 8; DEBUG: Creating router plan DEBUG: Plan is router executable - title | author_id + title | author_id --------------------------------------------------------------------- aseptic | 7 agatized | 8 @@ -231,7 +231,7 @@ SELECT author_id, sum(word_count) AS corpus_size FROM articles_hash ORDER BY sum(word_count) DESC; DEBUG: Creating router plan DEBUG: Plan is router executable - author_id | corpus_size + author_id | corpus_size --------------------------------------------------------------------- 10 | 59955 8 | 55410 @@ -248,7 +248,7 @@ SELECT author_id, sum(word_count) AS corpus_size FROM articles_hash DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - author_id | corpus_size + author_id | corpus_size --------------------------------------------------------------------- 1 | 35894 (1 row) @@ -257,7 +257,7 @@ DETAIL: distribution column value: 1 -- not router-plannable due to <= and IN SELECT * FROM articles_hash WHERE author_id <= 1 ORDER BY id; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -269,7 +269,7 @@ DEBUG: Router planner cannot handle multi-shard select queries SELECT * FROM articles_hash WHERE author_id IN (1, 3) ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 3 | 3 | asternal | 10480 @@ -287,7 +287,7 @@ SELECT * FROM articles_hash WHERE author_id IN (1, NULL) ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -302,7 +302,7 @@ SELECT * FROM first_author; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id + id --------------------------------------------------------------------- 1 11 @@ -317,7 +317,7 @@ SELECT title FROM articles_hash WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - title + title --------------------------------------------------------------------- arsenous alamo @@ -333,7 +333,7 @@ SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | id | title + id | author_id | id | title --------------------------------------------------------------------- 1 | 1 | 1 | arsenous 11 | 1 | 11 | alamo @@ -347,7 +347,7 @@ id_title AS (SELECT id, title from articles_hash WHERE author_id = 3) SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | id | title + id | author_id | id | title --------------------------------------------------------------------- (0 rows) @@ -367,22 +367,22 @@ DETAIL: distribution column value: 2 DEBUG: Plan 68 query after replacing subqueries and CTEs: SELECT id_author.id, id_author.author_id, id_title.id, id_title.title FROM (SELECT intermediate_result.id, intermediate_result.author_id FROM read_intermediate_result('68_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint)) id_author, (SELECT intermediate_result.id, intermediate_result.title FROM read_intermediate_result('68_2'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, title character varying(20))) id_title WHERE (id_author.id OPERATOR(pg_catalog.=) id_title.id) DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | id | title + id | author_id | id | title --------------------------------------------------------------------- (0 rows) -- recursive CTEs are supported when filtered on partition column CREATE TABLE company_employees (company_id int, employee_id int, manager_id int); SELECT master_create_distributed_table('company_employees', 'company_id', 'hash'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_create_worker_shards('company_employees', 4, 1); - master_create_worker_shards + master_create_worker_shards --------------------------------------------------------------------- - + (1 row) INSERT INTO company_employees values(1, 1, 0); @@ -432,7 +432,7 @@ SELECT * FROM hierarchy WHERE LEVEL <= 2; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - company_id | employee_id | manager_id | level + company_id | employee_id | manager_id | level --------------------------------------------------------------------- 1 | 1 | 0 | 1 1 | 2 | 1 | 2 @@ -481,7 +481,7 @@ DETAIL: distribution column value: 1 DEBUG: Plan 82 query after replacing subqueries and CTEs: SELECT id, author_id, title, word_count FROM (SELECT intermediate_result.id, intermediate_result.author_id, intermediate_result.title, intermediate_result.word_count FROM read_intermediate_result('82_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint, title character varying(20), word_count integer)) new_article DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9 (1 row) @@ -497,7 +497,7 @@ DEBUG: Plan is router executable DEBUG: Plan 84 query after replacing subqueries and CTEs: SELECT id, author_id, title, word_count FROM (SELECT intermediate_result.id, intermediate_result.author_id, intermediate_result.title, intermediate_result.word_count FROM read_intermediate_result('84_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint, title character varying(20), word_count integer)) update_article DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 10 (1 row) @@ -513,7 +513,7 @@ DEBUG: Plan is router executable DEBUG: Plan 86 query after replacing subqueries and CTEs: SELECT id, author_id, title, word_count FROM (SELECT intermediate_result.id, intermediate_result.author_id, intermediate_result.title, intermediate_result.word_count FROM read_intermediate_result('86_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint, title character varying(20), word_count integer)) delete_article DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 10 (1 row) @@ -544,7 +544,7 @@ SELECT ORDER BY id, subtitle; DEBUG: Creating router plan DEBUG: Plan is router executable - id | subtitle | count + id | subtitle | count --------------------------------------------------------------------- 1 | | 1 3 | | 1 @@ -581,7 +581,7 @@ SELECT * FROM articles_hash, position('om' in 'Thomas') WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count | position + id | author_id | title | word_count | position --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 | 3 11 | 1 | alamo | 1347 | 3 @@ -593,7 +593,7 @@ DETAIL: distribution column value: 1 SELECT * FROM articles_hash, position('om' in 'Thomas') WHERE author_id = 1 or author_id = 3; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count | position + id | author_id | title | word_count | position --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 | 3 3 | 3 | asternal | 10480 | 3 @@ -611,7 +611,7 @@ DEBUG: Plan is router executable SELECT * FROM articles_hash, position('om' in 'Thomas') WHERE author_id = 1 or author_id = 2 ORDER BY 4 DESC, 1 DESC, 2 DESC LIMIT 5; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: push down of limit count: 5 - id | author_id | title | word_count | position + id | author_id | title | word_count | position --------------------------------------------------------------------- 12 | 2 | archiblast | 18185 | 3 42 | 2 | ausable | 15885 | 3 @@ -627,7 +627,7 @@ ORDER BY articles_hash.id; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 2 | 2 | abducing | 13642 12 | 2 | archiblast | 18185 @@ -646,7 +646,7 @@ DEBUG: generating subplan 94_1 for subquery SELECT id, word_count FROM public.a DEBUG: Plan 94 query after replacing subqueries and CTEs: SELECT articles_hash.id, test.word_count FROM public.articles_hash, (SELECT intermediate_result.id, intermediate_result.word_count FROM read_intermediate_result('94_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, word_count integer)) test WHERE (test.id OPERATOR(pg_catalog.=) articles_hash.id) ORDER BY test.word_count DESC, articles_hash.id LIMIT 5 DEBUG: Router planner cannot handle multi-shard select queries DEBUG: push down of limit count: 5 - id | word_count + id | word_count --------------------------------------------------------------------- 50 | 19519 14 | 19094 @@ -666,7 +666,7 @@ DEBUG: Plan 96 query after replacing subqueries and CTEs: SELECT articles_hash. DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | word_count + id | word_count --------------------------------------------------------------------- 1 | 9572 11 | 1347 @@ -689,7 +689,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -704,7 +704,7 @@ SELECT * WHERE author_id = 1 OR author_id = 17; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -720,7 +720,7 @@ SELECT * WHERE author_id = 1 OR author_id = 18 ORDER BY 4 DESC, 3 DESC, 2 DESC, 1 DESC; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 41 | 1 | aznavour | 11814 1 | 1 | arsenous | 9572 @@ -736,7 +736,7 @@ SELECT id as article_id, word_count * id as random_value DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - article_id | random_value + article_id | random_value --------------------------------------------------------------------- 1 | 9572 11 | 14817 @@ -753,7 +753,7 @@ SELECT a.author_id as first_author, b.word_count as second_word_count DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - first_author | second_word_count + first_author | second_word_count --------------------------------------------------------------------- 10 | 17277 10 | 1820 @@ -769,7 +769,7 @@ SELECT a.author_id as first_author, b.word_count as second_word_count DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - first_author | second_word_count + first_author | second_word_count --------------------------------------------------------------------- 10 | 19519 10 | 19519 @@ -793,7 +793,7 @@ DEBUG: Plan 105 query after replacing subqueries and CTEs: SELECT a.author_id A DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - first_author | second_word_count + first_author | second_word_count --------------------------------------------------------------------- (0 rows) @@ -805,7 +805,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -821,7 +821,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -837,7 +837,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 31 | 1 | athwartships | 7271 21 | 1 | arcading | 5890 @@ -852,7 +852,7 @@ SELECT id DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id + id --------------------------------------------------------------------- 1 11 @@ -869,7 +869,7 @@ SELECT DISTINCT id DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id + id --------------------------------------------------------------------- 1 11 @@ -885,7 +885,7 @@ SELECT avg(word_count) DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - avg + avg --------------------------------------------------------------------- 12356.400000000000 (1 row) @@ -898,7 +898,7 @@ SELECT max(word_count) as max, min(word_count) as min, DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - max | min | sum | cnt + max | min | sum | cnt --------------------------------------------------------------------- 18185 | 2728 | 61782 | 5 (1 row) @@ -911,7 +911,7 @@ SELECT max(word_count) DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - max + max --------------------------------------------------------------------- 11814 (1 row) @@ -925,7 +925,7 @@ SELECT * FROM ( ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 3 | 3 | asternal | 10480 @@ -944,7 +944,7 @@ UNION (SELECT LEFT(title, 1) FROM articles_hash WHERE author_id = 3); DEBUG: Creating router plan DEBUG: Plan is router executable - left + left --------------------------------------------------------------------- a (1 row) @@ -954,7 +954,7 @@ INTERSECT (SELECT LEFT(title, 1) FROM articles_hash WHERE author_id = 3); DEBUG: Creating router plan DEBUG: Plan is router executable - left + left --------------------------------------------------------------------- a (1 row) @@ -967,7 +967,7 @@ SELECT * FROM ( ORDER BY 1; DEBUG: Creating router plan DEBUG: Plan is router executable - left + left --------------------------------------------------------------------- al ar @@ -985,7 +985,7 @@ SET client_min_messages to 'NOTICE'; UNION (SELECT * FROM articles_hash WHERE author_id = 2) ORDER BY 1,2,3; - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 2 | 2 | abducing | 13642 @@ -1011,7 +1011,7 @@ SELECT * FROM ( (SELECT * FROM articles_hash WHERE author_id = 2)) uu ORDER BY 1, 2 LIMIT 5; - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 2 | 2 | abducing | 13642 @@ -1032,7 +1032,7 @@ SELECT * FROM articles_hash a, articles_hash b WHERE a.id = b.id AND a.author_id = 1 ORDER BY 1 DESC; - id | author_id | title | word_count | id | author_id | title | word_count + id | author_id | title | word_count | id | author_id | title | word_count --------------------------------------------------------------------- 41 | 1 | aznavour | 11814 | 41 | 1 | aznavour | 11814 31 | 1 | athwartships | 7271 | 31 | 1 | athwartships | 7271 @@ -1048,7 +1048,7 @@ SELECT * FROM articles_hash WHERE author_id >= 1 AND author_id <= 3 ORDER BY 1,2,3,4; - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 2 | 2 | abducing | 13642 @@ -1078,7 +1078,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1092,7 +1092,7 @@ SELECT * FROM articles_hash WHERE author_id = 1 or id = 1; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1108,7 +1108,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 41 | 1 | aznavour | 11814 @@ -1121,7 +1121,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -1130,7 +1130,7 @@ SELECT * FROM articles_hash WHERE author_id = (random()::int * 0 + 1); DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1144,7 +1144,7 @@ SELECT * FROM articles_hash WHERE author_id = 1 or id = 1; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1160,7 +1160,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1174,7 +1174,7 @@ SELECT * FROM articles_hash WHERE 1 = abs(author_id); DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1188,7 +1188,7 @@ SELECT * FROM articles_hash WHERE author_id = abs(author_id - 2); DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1204,7 +1204,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 (1 row) @@ -1214,7 +1214,7 @@ SELECT * FROM articles_hash WHERE (author_id = 1) is true; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1230,7 +1230,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1246,7 +1246,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1259,7 +1259,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 31 | 1 | athwartships | 7271 @@ -1272,7 +1272,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 31 | 1 | athwartships | 7271 @@ -1285,7 +1285,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1301,7 +1301,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1315,7 +1315,7 @@ SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 - prev | title | word_count + prev | title | word_count --------------------------------------------------------------------- | afrasia | 864 afrasia | adversa | 3164 @@ -1331,7 +1331,7 @@ SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 - prev | title | word_count + prev | title | word_count --------------------------------------------------------------------- aminate | aruru | 11389 antehall | aminate | 9089 @@ -1346,7 +1346,7 @@ SELECT id, MIN(id) over (order by word_count) DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | min + id | min --------------------------------------------------------------------- 11 | 11 21 | 11 @@ -1361,7 +1361,7 @@ SELECT id, word_count, AVG(word_count) over (order by word_count) DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | word_count | avg + id | word_count | avg --------------------------------------------------------------------- 11 | 1347 | 1347.0000000000000000 21 | 5890 | 3618.5000000000000000 @@ -1376,7 +1376,7 @@ SELECT word_count, rank() OVER (PARTITION BY author_id ORDER BY word_count) DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - word_count | rank + word_count | rank --------------------------------------------------------------------- 1347 | 1 5890 | 2 @@ -1404,7 +1404,7 @@ SELECT * WHERE false; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -1413,7 +1413,7 @@ SELECT * WHERE author_id = 1 and false; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -1422,7 +1422,7 @@ SELECT * WHERE author_id = 1 and 1=0; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -1431,7 +1431,7 @@ SELECT a.author_id as first_author, b.word_count as second_word_count WHERE a.author_id = 10 and a.author_id = b.author_id and false; DEBUG: Creating router plan DEBUG: Plan is router executable - first_author | second_word_count + first_author | second_word_count --------------------------------------------------------------------- (0 rows) @@ -1440,7 +1440,7 @@ SELECT * WHERE null; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -1450,7 +1450,7 @@ SELECT * WHERE a.author_id = 10 and int4eq(1, 2); DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -1459,7 +1459,7 @@ SELECT * WHERE int4eq(1, 2); DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -1469,7 +1469,7 @@ SELECT a.author_id as first_author, b.word_count as second_word_count DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - first_author | second_word_count + first_author | second_word_count --------------------------------------------------------------------- 10 | 19519 10 | 19519 @@ -1483,7 +1483,7 @@ SELECT a.author_id as first_author, b.word_count as second_word_count WHERE a.author_id = 10 and a.author_id = b.author_id and int4eq(1, 2); DEBUG: Creating router plan DEBUG: Plan is router executable - first_author | second_word_count + first_author | second_word_count --------------------------------------------------------------------- (0 rows) @@ -1493,7 +1493,7 @@ SELECT * FROM articles_hash a WHERE a.author_id is null; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -1504,7 +1504,7 @@ SELECT * WHERE a.author_id = null; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -1514,7 +1514,7 @@ SELECT * WHERE date_ne_timestamp('1954-04-11', '1954-04-11'::timestamp); DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -1524,7 +1524,7 @@ SELECT a.author_id as first_author, b.word_count as second_word_count date_ne_timestamp('1954-04-11', '1954-04-11'::timestamp); DEBUG: Creating router plan DEBUG: Plan is router executable - first_author | second_word_count + first_author | second_word_count --------------------------------------------------------------------- (0 rows) @@ -1540,7 +1540,7 @@ ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1558,7 +1558,7 @@ ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1573,7 +1573,7 @@ INTERSECT DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -1585,7 +1585,7 @@ id_title AS (SELECT id, title from articles_hash WHERE author_id = 1 and 1=0) SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | id | title + id | author_id | id | title --------------------------------------------------------------------- (0 rows) @@ -1594,7 +1594,7 @@ id_title AS (SELECT id, title from articles_hash WHERE author_id = 1) SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id and 1=0; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | id | title + id | author_id | id | title --------------------------------------------------------------------- (0 rows) @@ -1613,7 +1613,7 @@ SELECT * FROM hierarchy WHERE LEVEL <= 2 and 1=0; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - company_id | employee_id | manager_id | level + company_id | employee_id | manager_id | level --------------------------------------------------------------------- (0 rows) @@ -1631,7 +1631,7 @@ SELECT * FROM hierarchy WHERE LEVEL <= 2; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - company_id | employee_id | manager_id | level + company_id | employee_id | manager_id | level --------------------------------------------------------------------- 1 | 1 | 0 | 1 (1 row) @@ -1650,7 +1650,7 @@ SELECT * FROM hierarchy WHERE LEVEL <= 2; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - company_id | employee_id | manager_id | level + company_id | employee_id | manager_id | level --------------------------------------------------------------------- (0 rows) @@ -1660,7 +1660,7 @@ SELECT word_count, rank() OVER (PARTITION BY author_id ORDER BY word_count) WHERE author_id = 1 and 1=0; DEBUG: Creating router plan DEBUG: Plan is router executable - word_count | rank + word_count | rank --------------------------------------------------------------------- (0 rows) @@ -1673,7 +1673,7 @@ SELECT author_id FROM articles_hash LIMIT 1; DEBUG: Router planner cannot handle multi-shard select queries DEBUG: push down of limit count: 1 - author_id + author_id --------------------------------------------------------------------- 1 (1 row) @@ -1687,7 +1687,7 @@ SELECT author_id FROM articles_hash LIMIT 1; DEBUG: Creating router plan DEBUG: Plan is router executable - author_id + author_id --------------------------------------------------------------------- (0 rows) @@ -1696,15 +1696,15 @@ DEBUG: Plan is router executable -- they are 'co-located' pairwise SET citus.shard_replication_factor TO 1; SELECT master_create_distributed_table('authors_range', 'id', 'range'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_create_distributed_table('articles_range', 'author_id', 'range'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_create_empty_shard('authors_range') as shard_id \gset @@ -1727,14 +1727,14 @@ UPDATE pg_dist_shard SET shardminvalue = 31, shardmaxvalue=40 WHERE shardid = :s SELECT * FROM articles_range where author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) SELECT * FROM articles_range where author_id = 1 or author_id = 5; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -1742,7 +1742,7 @@ DEBUG: Plan is router executable SELECT * FROM articles_range where author_id = 1 and author_id = 2; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -1751,7 +1751,7 @@ SELECT * FROM articles_range ar join authors_range au on (ar.author_id = au.id) WHERE ar.author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count | name | id + id | author_id | title | word_count | name | id --------------------------------------------------------------------- (0 rows) @@ -1760,7 +1760,7 @@ SELECT * FROM articles_range ar join authors_range au on (ar.author_id = au.id) WHERE ar.author_id = 1 and au.id = 2; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count | name | id + id | author_id | title | word_count | name | id --------------------------------------------------------------------- (0 rows) @@ -1798,7 +1798,7 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 20 - id | author_id | title | word_count | name | id + id | author_id | title | word_count | name | id --------------------------------------------------------------------- (0 rows) @@ -1836,7 +1836,7 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 20 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 20 - id | author_id | title | word_count | name | id + id | author_id | title | word_count | name | id --------------------------------------------------------------------- (0 rows) @@ -1846,7 +1846,7 @@ SELECT * FROM articles_range ar join authors_range au on (ar.id = au.id) WHERE ar.author_id = 1 and au.id < 10; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count | name | id + id | author_id | title | word_count | name | id --------------------------------------------------------------------- (0 rows) @@ -1859,7 +1859,7 @@ SELECT * FROM articles_hash ar join authors_range au on (ar.author_id = au.id) DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - id | author_id | title | word_count | name | id + id | author_id | title | word_count | name | id --------------------------------------------------------------------- (0 rows) @@ -1899,7 +1899,7 @@ SELECT * FROM articles_range ar join authors_reference au on (ar.author_id = au. WHERE ar.author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count | name | id + id | author_id | title | word_count | name | id --------------------------------------------------------------------- (0 rows) @@ -1908,7 +1908,7 @@ SELECT * FROM articles_range ar join authors_reference au on (ar.author_id = au. WHERE ar.author_id = 1 or ar.author_id = 5; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count | name | id + id | author_id | title | word_count | name | id --------------------------------------------------------------------- (0 rows) @@ -1916,7 +1916,7 @@ DEBUG: Plan is router executable SELECT * FROM articles_range ar join authors_reference au on (ar.author_id = au.id) WHERE ar.author_id = 1 or ar.author_id = 15; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count | name | id + id | author_id | title | word_count | name | id --------------------------------------------------------------------- (0 rows) @@ -1924,9 +1924,9 @@ DEBUG: Router planner cannot handle multi-shard select queries -- evaluated at master before going to worker -- need to use a append distributed table here SELECT master_create_distributed_table('articles_append', 'author_id', 'append'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SET citus.shard_replication_factor TO 1; @@ -2003,7 +2003,7 @@ SELECT * FROM articles_hash ORDER BY author_id, id LIMIT 5; - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -2044,7 +2044,7 @@ SELECT DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 - c + c --------------------------------------------------------------------- 5 (1 row) @@ -2065,7 +2065,7 @@ SELECT author_id ORDER BY c; DEBUG: Router planner cannot handle multi-shard select queries - c + c --------------------------------------------------------------------- 4 5 @@ -2088,7 +2088,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -2108,7 +2108,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -2129,13 +2129,13 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 FETCH test_cursor; - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 (1 row) FETCH ALL test_cursor; - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -2144,12 +2144,12 @@ FETCH ALL test_cursor; (4 rows) FETCH test_cursor; -- fetch one row after the last - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) FETCH BACKWARD test_cursor; - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 41 | 1 | aznavour | 11814 (1 row) @@ -2185,7 +2185,7 @@ SELECT count(*), count(*) FILTER (WHERE id < 3) DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - count | count + count | count --------------------------------------------------------------------- 5 | 1 (1 row) @@ -2195,7 +2195,7 @@ SELECT count(*), count(*) FILTER (WHERE id < 3) FROM articles_hash WHERE author_id = 1 or author_id = 2; DEBUG: Router planner cannot handle multi-shard select queries - count | count + count | count --------------------------------------------------------------------- 10 | 2 (1 row) @@ -2209,7 +2209,7 @@ EXECUTE author_1_articles; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -2227,7 +2227,7 @@ EXECUTE author_articles(1); DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -2257,7 +2257,7 @@ DETAIL: distribution column value: 1 CONTEXT: SQL statement "SELECT MAX(id) FROM articles_hash ah WHERE author_id = 1" PL/pgSQL function author_articles_max_id() line 5 at SQL statement - author_articles_max_id + author_articles_max_id --------------------------------------------------------------------- 41 (1 row) @@ -2285,7 +2285,7 @@ CONTEXT: SQL statement "SELECT ah.id, ah.word_count FROM articles_hash ah WHERE author_id = 1" PL/pgSQL function author_articles_id_word_count() line 4 at RETURN QUERY - id | word_count + id | word_count --------------------------------------------------------------------- 1 | 9572 11 | 1347 @@ -2301,7 +2301,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 SELECT * FROM mv_articles_hash_empty; - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -2314,7 +2314,7 @@ CREATE MATERIALIZED VIEW mv_articles_hash_data AS SELECT * FROM articles_hash WHERE author_id in (1,2); DEBUG: Router planner cannot handle multi-shard select queries SELECT * FROM mv_articles_hash_data ORDER BY 1, 2, 3, 4; - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 2 | 2 | abducing | 13642 @@ -2336,7 +2336,7 @@ SELECT id DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id + id --------------------------------------------------------------------- 1 11 @@ -2356,7 +2356,7 @@ SELECT id DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id + id --------------------------------------------------------------------- 1 11 @@ -2372,15 +2372,15 @@ SET client_min_messages to 'NOTICE'; SET citus.shard_replication_factor TO 2; CREATE TABLE failure_test (a int, b int); SELECT master_create_distributed_table('failure_test', 'a', 'hash'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_create_worker_shards('failure_test', 2); - master_create_worker_shards + master_create_worker_shards --------------------------------------------------------------------- - + (1 row) SET citus.enable_ddl_propagation TO off; @@ -2406,7 +2406,7 @@ SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE logicalrelid = 'failure_test'::regclass ) ORDER BY placementid; - shardid | shardstate | nodename | nodeport + shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- 840017 | 1 | localhost | 57637 840017 | 3 | localhost | 57638 @@ -2424,7 +2424,7 @@ SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE logicalrelid = 'failure_test'::regclass ) ORDER BY placementid; - shardid | shardstate | nodename | nodeport + shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- 840017 | 1 | localhost | 57637 840017 | 1 | localhost | 57638 diff --git a/src/test/regress/expected/multi_router_planner_fast_path.out b/src/test/regress/expected/multi_router_planner_fast_path.out index 2e80af069..4963baef6 100644 --- a/src/test/regress/expected/multi_router_planner_fast_path.out +++ b/src/test/regress/expected/multi_router_planner_fast_path.out @@ -36,16 +36,16 @@ CREATE TABLE authors_range ( name varchar(20), id bigint ); SET citus.shard_replication_factor TO 1; SET citus.shard_count TO 2; SELECT create_distributed_table('articles_hash', 'author_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE authors_reference ( name varchar(20), id bigint ); SELECT create_reference_table('authors_reference'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- create a bunch of test data @@ -70,7 +70,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 50 | 10 | anjanette | 19519 (1 row) @@ -81,7 +81,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - title + title --------------------------------------------------------------------- aggrandize absentness @@ -98,7 +98,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - title | word_count + title | word_count --------------------------------------------------------------------- anjanette | 19519 aggrandize | 17277 @@ -116,7 +116,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 - title | id + title | id --------------------------------------------------------------------- aruru | 5 adversa | 15 @@ -130,7 +130,7 @@ SELECT title, author_id FROM articles_hash ORDER BY author_id ASC, id; DEBUG: Creating router plan DEBUG: Plan is router executable - title | author_id + title | author_id --------------------------------------------------------------------- aseptic | 7 auriga | 7 @@ -155,7 +155,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - author_id | corpus_size + author_id | corpus_size --------------------------------------------------------------------- 1 | 35894 (1 row) @@ -163,7 +163,7 @@ DETAIL: distribution column value: 1 -- fast path planner only support = operator SELECT * FROM articles_hash WHERE author_id <= 1; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -175,7 +175,7 @@ DEBUG: Router planner cannot handle multi-shard select queries SELECT * FROM articles_hash WHERE author_id IN (1, 3); DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 3 | 3 | asternal | 10480 @@ -195,7 +195,7 @@ SELECT * FROM first_author; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id + id --------------------------------------------------------------------- 1 11 @@ -211,7 +211,7 @@ SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | id | title + id | author_id | id | title --------------------------------------------------------------------- 1 | 1 | 1 | arsenous 11 | 1 | 11 | alamo @@ -239,23 +239,23 @@ DETAIL: distribution column value: 2 DEBUG: Plan 12 query after replacing subqueries and CTEs: SELECT id_author.id, id_author.author_id, id_title.id, id_title.title FROM (SELECT intermediate_result.id, intermediate_result.author_id FROM read_intermediate_result('12_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint)) id_author, (SELECT intermediate_result.id, intermediate_result.title FROM read_intermediate_result('12_2'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, title character varying(20))) id_title WHERE (id_author.id OPERATOR(pg_catalog.=) id_title.id) DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | id | title + id | author_id | id | title --------------------------------------------------------------------- (0 rows) CREATE TABLE company_employees (company_id int, employee_id int, manager_id int); SELECT master_create_distributed_table('company_employees', 'company_id', 'hash'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) -- do not print notices from workers since the order is not deterministic SET client_min_messages TO DEFAULT; SELECT master_create_worker_shards('company_employees', 4, 1); - master_create_worker_shards + master_create_worker_shards --------------------------------------------------------------------- - + (1 row) SET client_min_messages TO 'DEBUG2'; @@ -307,7 +307,7 @@ SELECT * FROM hierarchy WHERE LEVEL <= 2; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - company_id | employee_id | manager_id | level + company_id | employee_id | manager_id | level --------------------------------------------------------------------- 1 | 1 | 0 | 1 1 | 2 | 1 | 2 @@ -325,7 +325,7 @@ DEBUG: Plan is router executable DEBUG: Plan 24 query after replacing subqueries and CTEs: SELECT id, author_id, title, word_count FROM (SELECT intermediate_result.id, intermediate_result.author_id, intermediate_result.title, intermediate_result.word_count FROM read_intermediate_result('24_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint, title character varying(20), word_count integer)) update_article DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -340,7 +340,7 @@ DEBUG: Plan is router executable DEBUG: Plan 26 query after replacing subqueries and CTEs: SELECT id, author_id, title, word_count FROM (SELECT intermediate_result.id, intermediate_result.author_id, intermediate_result.title, intermediate_result.word_count FROM read_intermediate_result('26_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, author_id bigint, title character varying(20), word_count integer)) delete_article DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -355,7 +355,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | subtitle | count + id | subtitle | count --------------------------------------------------------------------- 1 | | 1 11 | | 1 @@ -383,7 +383,7 @@ SELECT * FROM articles_hash, position('om' in 'Thomas') WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count | position + id | author_id | title | word_count | position --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 | 3 11 | 1 | alamo | 1347 | 3 @@ -399,7 +399,7 @@ ORDER BY articles_hash.id; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 2 | 2 | abducing | 13642 12 | 2 | archiblast | 18185 @@ -418,7 +418,7 @@ DEBUG: generating subplan 32_1 for subquery SELECT id, word_count FROM fast_pat DEBUG: Plan 32 query after replacing subqueries and CTEs: SELECT articles_hash.id, test.word_count FROM fast_path_router_select.articles_hash, (SELECT intermediate_result.id, intermediate_result.word_count FROM read_intermediate_result('32_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, word_count integer)) test WHERE (test.id OPERATOR(pg_catalog.=) articles_hash.id) ORDER BY test.word_count DESC, articles_hash.id LIMIT 5 DEBUG: Router planner cannot handle multi-shard select queries DEBUG: push down of limit count: 5 - id | word_count + id | word_count --------------------------------------------------------------------- 50 | 19519 14 | 19094 @@ -438,7 +438,7 @@ DEBUG: Plan 34 query after replacing subqueries and CTEs: SELECT articles_hash. DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | word_count + id | word_count --------------------------------------------------------------------- 1 | 9572 11 | 1347 @@ -462,7 +462,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -478,7 +478,7 @@ SELECT * WHERE author_id = 1 OR author_id = 17; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -495,7 +495,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - article_id | random_value + article_id | random_value --------------------------------------------------------------------- 1 | 9572 11 | 14817 @@ -512,7 +512,7 @@ SELECT a.author_id as first_author, b.word_count as second_word_count DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - first_author | second_word_count + first_author | second_word_count --------------------------------------------------------------------- 10 | 17277 10 | 1820 @@ -528,7 +528,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -545,7 +545,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -562,7 +562,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 31 | 1 | athwartships | 7271 21 | 1 | arcading | 5890 @@ -578,7 +578,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id + id --------------------------------------------------------------------- 1 11 @@ -596,7 +596,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id + id --------------------------------------------------------------------- 1 11 @@ -613,7 +613,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - avg + avg --------------------------------------------------------------------- 12356.400000000000 (1 row) @@ -627,7 +627,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - max | min | sum | cnt + max | min | sum | cnt --------------------------------------------------------------------- 18185 | 2728 | 61782 | 5 (1 row) @@ -641,7 +641,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - max + max --------------------------------------------------------------------- 11814 (1 row) @@ -655,7 +655,7 @@ SELECT * FROM ( ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 3 | 3 | asternal | 10480 @@ -684,7 +684,7 @@ LIMIT 5; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -703,7 +703,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -726,7 +726,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 68719476736 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -738,7 +738,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -753,7 +753,7 @@ SELECT * FROM articles_hash WHERE author_id = 1 or id = 1; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -772,7 +772,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 41 | 1 | aznavour | 11814 @@ -784,7 +784,7 @@ SELECT * FROM articles_hash WHERE author_id = 1 and id = 1 or id = 41; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 41 | 1 | aznavour | 11814 @@ -800,7 +800,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -809,7 +809,7 @@ SELECT * FROM articles_hash WHERE author_id = (random()::int * 0 + 1); DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -826,7 +826,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -841,7 +841,7 @@ SELECT * FROM articles_hash WHERE 1 = abs(author_id); DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -856,7 +856,7 @@ SELECT * FROM articles_hash WHERE author_id = abs(author_id - 2); DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -874,7 +874,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 (1 row) @@ -884,7 +884,7 @@ SELECT * FROM articles_hash WHERE (author_id = 1) is true; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -900,7 +900,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -915,28 +915,28 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 15 - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM articles_hash WHERE (author_id = 15) OR (id = 1 AND word_count > 5); DEBUG: Router planner cannot handle multi-shard select queries - count + count --------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM articles_hash WHERE (id = 15) OR (author_id = 1 AND word_count > 5); DEBUG: Router planner cannot handle multi-shard select queries - count + count --------------------------------------------------------------------- 6 (1 row) SELECT count(*) FROM articles_hash WHERE (id = 15) AND (author_id = 1 OR word_count > 5); DEBUG: Router planner cannot handle multi-shard select queries - count + count --------------------------------------------------------------------- 1 (1 row) @@ -946,14 +946,14 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM articles_hash WHERE (id = 15) AND (title ilike 'a%' AND (word_count > 5 OR author_id = 2)); DEBUG: Router planner cannot handle multi-shard select queries - count + count --------------------------------------------------------------------- 1 (1 row) @@ -963,7 +963,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - count + count --------------------------------------------------------------------- 0 (1 row) @@ -973,7 +973,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - count + count --------------------------------------------------------------------- 0 (1 row) @@ -986,7 +986,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1000,7 +1000,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 31 | 1 | athwartships | 7271 @@ -1014,7 +1014,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 31 | 1 | athwartships | 7271 @@ -1028,7 +1028,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1045,7 +1045,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1060,7 +1060,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 - prev | title | word_count + prev | title | word_count --------------------------------------------------------------------- | afrasia | 864 afrasia | adversa | 3164 @@ -1077,7 +1077,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 - prev | title | word_count + prev | title | word_count --------------------------------------------------------------------- aminate | aruru | 11389 antehall | aminate | 9089 @@ -1093,7 +1093,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | min + id | min --------------------------------------------------------------------- 11 | 11 21 | 11 @@ -1109,7 +1109,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | word_count | avg + id | word_count | avg --------------------------------------------------------------------- 11 | 1347 | 1347.0000000000000000 21 | 5890 | 3618.5000000000000000 @@ -1125,7 +1125,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - word_count | rank + word_count | rank --------------------------------------------------------------------- 1347 | 1 5890 | 2 @@ -1150,7 +1150,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - author_id | id | t1 | cnt_with_filter | cnt_with_filter_2 | case_cnt | coalesce + author_id | id | t1 | cnt_with_filter | cnt_with_filter_2 | case_cnt | coalesce --------------------------------------------------------------------- 1 | 1 | 83.20028854345579490574 | 0 | 1 | | 0 1 | 11 | 629.20816629547141796586 | 1 | 1 | 44.0000000000000000 | 1 @@ -1165,7 +1165,7 @@ SELECT * WHERE false; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -1176,7 +1176,7 @@ SELECT * DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -1188,7 +1188,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -1198,7 +1198,7 @@ SELECT * DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -1210,7 +1210,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 2 | 2 | abducing | 13642 12 | 2 | archiblast | 18185 @@ -1228,7 +1228,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -1239,7 +1239,7 @@ SELECT * FROM articles_hash a WHERE a.author_id is null; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -1251,7 +1251,7 @@ SELECT * WHERE a.author_id = null; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -1267,7 +1267,7 @@ ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1285,7 +1285,7 @@ SELECT * FROM ( ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -1297,7 +1297,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - word_count | rank + word_count | rank --------------------------------------------------------------------- (0 rows) @@ -1360,7 +1360,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 - c + c --------------------------------------------------------------------- 5 (1 row) @@ -1375,7 +1375,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1396,7 +1396,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1419,13 +1419,13 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 FETCH test_cursor; - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 (1 row) FETCH ALL test_cursor; - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 @@ -1434,12 +1434,12 @@ FETCH ALL test_cursor; (4 rows) FETCH test_cursor; -- fetch one row after the last - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) FETCH BACKWARD test_cursor; - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 41 | 1 | aznavour | 11814 (1 row) @@ -1478,7 +1478,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - count | count + count | count --------------------------------------------------------------------- 5 | 1 (1 row) @@ -1493,7 +1493,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1503,7 +1503,7 @@ DETAIL: distribution column value: 1 (5 rows) EXECUTE author_1_articles; - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1513,7 +1513,7 @@ EXECUTE author_1_articles; (5 rows) EXECUTE author_1_articles; - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1523,7 +1523,7 @@ EXECUTE author_1_articles; (5 rows) EXECUTE author_1_articles; - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1533,7 +1533,7 @@ EXECUTE author_1_articles; (5 rows) EXECUTE author_1_articles; - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1543,7 +1543,7 @@ EXECUTE author_1_articles; (5 rows) EXECUTE author_1_articles; - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1562,7 +1562,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1576,7 +1576,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1590,7 +1590,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1604,7 +1604,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1618,7 +1618,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1633,7 +1633,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -1661,37 +1661,37 @@ SELECT author_articles_max_id(); DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - author_articles_max_id + author_articles_max_id --------------------------------------------------------------------- 41 (1 row) SELECT author_articles_max_id(); - author_articles_max_id + author_articles_max_id --------------------------------------------------------------------- 41 (1 row) SELECT author_articles_max_id(); - author_articles_max_id + author_articles_max_id --------------------------------------------------------------------- 41 (1 row) SELECT author_articles_max_id(); - author_articles_max_id + author_articles_max_id --------------------------------------------------------------------- 41 (1 row) SELECT author_articles_max_id(); - author_articles_max_id + author_articles_max_id --------------------------------------------------------------------- 41 (1 row) SELECT author_articles_max_id(); - author_articles_max_id + author_articles_max_id --------------------------------------------------------------------- 41 (1 row) @@ -1711,7 +1711,7 @@ SELECT author_articles_max_id(1); DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - author_articles_max_id + author_articles_max_id --------------------------------------------------------------------- 41 (1 row) @@ -1720,7 +1720,7 @@ SELECT author_articles_max_id(1); DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - author_articles_max_id + author_articles_max_id --------------------------------------------------------------------- 41 (1 row) @@ -1729,7 +1729,7 @@ SELECT author_articles_max_id(1); DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - author_articles_max_id + author_articles_max_id --------------------------------------------------------------------- 41 (1 row) @@ -1738,7 +1738,7 @@ SELECT author_articles_max_id(1); DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - author_articles_max_id + author_articles_max_id --------------------------------------------------------------------- 41 (1 row) @@ -1747,7 +1747,7 @@ SELECT author_articles_max_id(1); DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - author_articles_max_id + author_articles_max_id --------------------------------------------------------------------- 41 (1 row) @@ -1757,7 +1757,7 @@ DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - author_articles_max_id + author_articles_max_id --------------------------------------------------------------------- 41 (1 row) @@ -1777,7 +1777,7 @@ SELECT * FROM author_articles_id_word_count(); DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - id | word_count + id | word_count --------------------------------------------------------------------- 1 | 9572 11 | 1347 @@ -1787,7 +1787,7 @@ DEBUG: Plan is router executable (5 rows) SELECT * FROM author_articles_id_word_count(); - id | word_count + id | word_count --------------------------------------------------------------------- 1 | 9572 11 | 1347 @@ -1797,7 +1797,7 @@ SELECT * FROM author_articles_id_word_count(); (5 rows) SELECT * FROM author_articles_id_word_count(); - id | word_count + id | word_count --------------------------------------------------------------------- 1 | 9572 11 | 1347 @@ -1807,7 +1807,7 @@ SELECT * FROM author_articles_id_word_count(); (5 rows) SELECT * FROM author_articles_id_word_count(); - id | word_count + id | word_count --------------------------------------------------------------------- 1 | 9572 11 | 1347 @@ -1817,7 +1817,7 @@ SELECT * FROM author_articles_id_word_count(); (5 rows) SELECT * FROM author_articles_id_word_count(); - id | word_count + id | word_count --------------------------------------------------------------------- 1 | 9572 11 | 1347 @@ -1827,7 +1827,7 @@ SELECT * FROM author_articles_id_word_count(); (5 rows) SELECT * FROM author_articles_id_word_count(); - id | word_count + id | word_count --------------------------------------------------------------------- 1 | 9572 11 | 1347 @@ -1851,7 +1851,7 @@ SELECT * FROM author_articles_id_word_count(1); DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - id | word_count + id | word_count --------------------------------------------------------------------- 1 | 9572 11 | 1347 @@ -1864,7 +1864,7 @@ SELECT * FROM author_articles_id_word_count(1); DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - id | word_count + id | word_count --------------------------------------------------------------------- 1 | 9572 11 | 1347 @@ -1877,7 +1877,7 @@ SELECT * FROM author_articles_id_word_count(1); DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - id | word_count + id | word_count --------------------------------------------------------------------- 1 | 9572 11 | 1347 @@ -1890,7 +1890,7 @@ SELECT * FROM author_articles_id_word_count(1); DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - id | word_count + id | word_count --------------------------------------------------------------------- 1 | 9572 11 | 1347 @@ -1903,7 +1903,7 @@ SELECT * FROM author_articles_id_word_count(1); DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - id | word_count + id | word_count --------------------------------------------------------------------- 1 | 9572 11 | 1347 @@ -1917,7 +1917,7 @@ DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - id | word_count + id | word_count --------------------------------------------------------------------- 1 | 9572 11 | 1347 @@ -1987,7 +1987,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - count + count --------------------------------------------------------------------- 0 (1 row) @@ -1997,7 +1997,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - count + count --------------------------------------------------------------------- 0 (1 row) @@ -2007,7 +2007,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 3 - count + count --------------------------------------------------------------------- 0 (1 row) @@ -2017,7 +2017,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 4 - count + count --------------------------------------------------------------------- 0 (1 row) @@ -2027,7 +2027,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 5 - count + count --------------------------------------------------------------------- 0 (1 row) @@ -2038,7 +2038,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 6 - count + count --------------------------------------------------------------------- 0 (1 row) @@ -2050,7 +2050,7 @@ SELECT * FROM test_view; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -2067,7 +2067,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 SELECT * FROM mv_articles_hash_empty; - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -2085,7 +2085,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id + id --------------------------------------------------------------------- 1 11 @@ -2106,7 +2106,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id + id --------------------------------------------------------------------- 1 11 @@ -2134,9 +2134,9 @@ CREATE TABLE collections_list_2 -- we don't need many shards SET citus.shard_count TO 2; SELECT create_distributed_table('collections_list', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO collections_list SELECT i % 10, now(), (i % 2) + 1, i*i FROM generate_series(0, 50)i; @@ -2146,7 +2146,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 4 - count + count --------------------------------------------------------------------- 5 (1 row) @@ -2156,7 +2156,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 4 - count + count --------------------------------------------------------------------- 5 (1 row) @@ -2166,7 +2166,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 4 - count + count --------------------------------------------------------------------- 0 (1 row) @@ -2181,7 +2181,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 4 - count + count --------------------------------------------------------------------- 5 (1 row) @@ -2191,7 +2191,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 4 - count + count --------------------------------------------------------------------- 5 (1 row) @@ -2201,7 +2201,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 4 - count + count --------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/multi_schema_support.out b/src/test/regress/expected/multi_schema_support.out index 5b9632ab1..3a225cca9 100644 --- a/src/test/regress/expected/multi_schema_support.out +++ b/src/test/regress/expected/multi_schema_support.out @@ -20,27 +20,27 @@ CREATE TABLE test_schema_support.nation_append( n_comment varchar(152) ); SELECT master_create_distributed_table('test_schema_support.nation_append', 'n_nationkey', 'append'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_create_empty_shard('test_schema_support.nation_append'); - master_create_empty_shard + master_create_empty_shard --------------------------------------------------------------------- 1190000 (1 row) -- append table to shard SELECT master_append_table_to_shard(1190000, 'public.nation_local', 'localhost', :master_port); - master_append_table_to_shard + master_append_table_to_shard --------------------------------------------------------------------- 0.00533333 (1 row) -- verify table actually appended to shard SELECT COUNT(*) FROM test_schema_support.nation_append; - count + count --------------------------------------------------------------------- 6 (1 row) @@ -52,26 +52,26 @@ CREATE TABLE test_schema_support."nation._'append" ( n_regionkey integer not null, n_comment varchar(152)); SELECT master_create_distributed_table('test_schema_support."nation._''append"', 'n_nationkey', 'append'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_create_empty_shard('test_schema_support."nation._''append"'); - master_create_empty_shard + master_create_empty_shard --------------------------------------------------------------------- 1190001 (1 row) SELECT master_append_table_to_shard(1190001, 'nation_local', 'localhost', :master_port); - master_append_table_to_shard + master_append_table_to_shard --------------------------------------------------------------------- 0.00533333 (1 row) -- verify table actually appended to shard SELECT COUNT(*) FROM test_schema_support."nation._'append"; - count + count --------------------------------------------------------------------- 6 (1 row) @@ -79,28 +79,28 @@ SELECT COUNT(*) FROM test_schema_support."nation._'append"; -- test master_append_table_to_shard with schema with search_path is set SET search_path TO test_schema_support; SELECT master_append_table_to_shard(1190000, 'public.nation_local', 'localhost', :master_port); - master_append_table_to_shard + master_append_table_to_shard --------------------------------------------------------------------- 0.00533333 (1 row) -- verify table actually appended to shard SELECT COUNT(*) FROM nation_append; - count + count --------------------------------------------------------------------- 12 (1 row) -- test with search_path is set and shard name contains special characters SELECT master_append_table_to_shard(1190001, 'nation_local', 'localhost', :master_port); - master_append_table_to_shard + master_append_table_to_shard --------------------------------------------------------------------- 0.00533333 (1 row) -- verify table actually appended to shard SELECT COUNT(*) FROM "nation._'append"; - count + count --------------------------------------------------------------------- 12 (1 row) @@ -116,9 +116,9 @@ CREATE TABLE nation_append_search_path( n_comment varchar(152) ); SELECT master_create_distributed_table('nation_append_search_path', 'n_nationkey', 'append'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) \copy nation_append_search_path FROM STDIN with delimiter '|'; @@ -130,15 +130,15 @@ CREATE TABLE test_schema_support.nation_hash( n_comment varchar(152) ); SELECT master_create_distributed_table('test_schema_support.nation_hash', 'n_nationkey', 'hash'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_create_worker_shards('test_schema_support.nation_hash', 4, 2); - master_create_worker_shards + master_create_worker_shards --------------------------------------------------------------------- - + (1 row) -- test cursors @@ -149,19 +149,19 @@ DECLARE test_cursor CURSOR FOR FROM test_schema_support.nation_append WHERE n_nationkey = 1; FETCH test_cursor; - n_nationkey | n_name | n_regionkey | n_comment + n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) FETCH test_cursor; - n_nationkey | n_name | n_regionkey | n_comment + n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) FETCH BACKWARD test_cursor; - n_nationkey | n_name | n_regionkey | n_comment + n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) @@ -175,19 +175,19 @@ DECLARE test_cursor CURSOR FOR FROM nation_append WHERE n_nationkey = 1; FETCH test_cursor; - n_nationkey | n_name | n_regionkey | n_comment + n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) FETCH test_cursor; - n_nationkey | n_name | n_regionkey | n_comment + n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) FETCH BACKWARD test_cursor; - n_nationkey | n_name | n_regionkey | n_comment + n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) @@ -198,9 +198,9 @@ SET search_path TO public; INSERT INTO test_schema_support.nation_hash(n_nationkey, n_name, n_regionkey) VALUES (6, 'FRANCE', 3); -- verify insertion SELECT * FROM test_schema_support.nation_hash WHERE n_nationkey = 6; - n_nationkey | n_name | n_regionkey | n_comment + n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- - 6 | FRANCE | 3 | + 6 | FRANCE | 3 | (1 row) -- test with search_path is set @@ -208,9 +208,9 @@ SET search_path TO test_schema_support; INSERT INTO nation_hash(n_nationkey, n_name, n_regionkey) VALUES (7, 'GERMANY', 3); -- verify insertion SELECT * FROM nation_hash WHERE n_nationkey = 7; - n_nationkey | n_name | n_regionkey | n_comment + n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- - 7 | GERMANY | 3 | + 7 | GERMANY | 3 | (1 row) -- test UDFs with schemas @@ -254,7 +254,7 @@ LANGUAGE 'plpgsql' IMMUTABLE; \c - - - :master_port -- UDF in public, table in a schema other than public, search_path is not set SELECT dummyFunction(n_nationkey) FROM test_schema_support.nation_hash GROUP BY 1 ORDER BY 1; - dummyfunction + dummyfunction --------------------------------------------------------------------- 1 10 @@ -269,7 +269,7 @@ SELECT dummyFunction(n_nationkey) FROM test_schema_support.nation_hash GROUP BY -- UDF in public, table in a schema other than public, search_path is set SET search_path TO test_schema_support; SELECT public.dummyFunction(n_nationkey) FROM test_schema_support.nation_hash GROUP BY 1 ORDER BY 1; - dummyfunction + dummyfunction --------------------------------------------------------------------- 1 10 @@ -323,7 +323,7 @@ LANGUAGE 'plpgsql' IMMUTABLE; -- UDF in schema, table in a schema other than public, search_path is not set SET search_path TO public; SELECT test_schema_support.dummyFunction2(n_nationkey) FROM test_schema_support.nation_hash GROUP BY 1 ORDER BY 1; - dummyfunction2 + dummyfunction2 --------------------------------------------------------------------- 1 10 @@ -338,7 +338,7 @@ SELECT test_schema_support.dummyFunction2(n_nationkey) FROM test_schema_support. -- UDF in schema, table in a schema other than public, search_path is set SET search_path TO test_schema_support; SELECT dummyFunction2(n_nationkey) FROM nation_hash GROUP BY 1 ORDER BY 1; - dummyfunction2 + dummyfunction2 --------------------------------------------------------------------- 1 10 @@ -384,7 +384,7 @@ CREATE OPERATOR test_schema_support.=== ( \c - - - :master_port -- test with search_path is not set SELECT * FROM test_schema_support.nation_hash WHERE n_nationkey OPERATOR(test_schema_support.===) 1; - n_nationkey | n_name | n_regionkey | n_comment + n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) @@ -392,7 +392,7 @@ SELECT * FROM test_schema_support.nation_hash WHERE n_nationkey OPERATOR(test_s -- test with search_path is set SET search_path TO test_schema_support; SELECT * FROM nation_hash WHERE n_nationkey OPERATOR(===) 1; - n_nationkey | n_name | n_regionkey | n_comment + n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) @@ -402,7 +402,7 @@ SET search_path TO public; UPDATE test_schema_support.nation_hash SET n_regionkey = n_regionkey + 1; --verify modification SELECT * FROM test_schema_support.nation_hash ORDER BY 1,2,3,4; - n_nationkey | n_name | n_regionkey | n_comment + n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- 0 | ALGERIA | 1 | haggle. carefully final deposits detect slyly agai 1 | ARGENTINA | 2 | al foxes promise slyly according to the regular accounts. bold requests alon @@ -410,8 +410,8 @@ SELECT * FROM test_schema_support.nation_hash ORDER BY 1,2,3,4; 3 | CANADA | 2 | eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold 4 | EGYPT | 5 | y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 5 | ETHIOPIA | 1 | ven packages wake quickly. regu - 6 | FRANCE | 4 | - 7 | GERMANY | 4 | + 6 | FRANCE | 4 | + 7 | GERMANY | 4 | (8 rows) --test with search_path is set @@ -419,7 +419,7 @@ SET search_path TO test_schema_support; UPDATE nation_hash SET n_regionkey = n_regionkey + 1; --verify modification SELECT * FROM nation_hash ORDER BY 1,2,3,4; - n_nationkey | n_name | n_regionkey | n_comment + n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- 0 | ALGERIA | 2 | haggle. carefully final deposits detect slyly agai 1 | ARGENTINA | 3 | al foxes promise slyly according to the regular accounts. bold requests alon @@ -427,8 +427,8 @@ SELECT * FROM nation_hash ORDER BY 1,2,3,4; 3 | CANADA | 3 | eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold 4 | EGYPT | 6 | y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 5 | ETHIOPIA | 2 | ven packages wake quickly. regu - 6 | FRANCE | 5 | - 7 | GERMANY | 5 | + 6 | FRANCE | 5 | + 7 | GERMANY | 5 | (8 rows) --test COLLATION with schema @@ -443,27 +443,27 @@ CREATE TABLE test_schema_support.nation_hash_collation( n_comment varchar(152) ); SELECT master_get_table_ddl_events('test_schema_support.nation_hash_collation') ORDER BY 1; - master_get_table_ddl_events + master_get_table_ddl_events --------------------------------------------------------------------- ALTER TABLE test_schema_support.nation_hash_collation OWNER TO postgres CREATE TABLE test_schema_support.nation_hash_collation (n_nationkey integer NOT NULL, n_name character(25) NOT NULL COLLATE test_schema_support.english, n_regionkey integer NOT NULL, n_comment character varying(152)) (2 rows) SELECT master_create_distributed_table('test_schema_support.nation_hash_collation', 'n_nationkey', 'hash'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_create_worker_shards('test_schema_support.nation_hash_collation', 4, 2); - master_create_worker_shards + master_create_worker_shards --------------------------------------------------------------------- - + (1 row) \copy test_schema_support.nation_hash_collation FROM STDIN with delimiter '|'; SELECT * FROM test_schema_support.nation_hash_collation ORDER BY 1,2,3,4; - n_nationkey | n_name | n_regionkey | n_comment + n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon @@ -474,7 +474,7 @@ SELECT * FROM test_schema_support.nation_hash_collation ORDER BY 1,2,3,4; (6 rows) SELECT n_comment FROM test_schema_support.nation_hash_collation ORDER BY n_comment COLLATE test_schema_support.english; - n_comment + n_comment --------------------------------------------------------------------- al foxes promise slyly according to the regular accounts. bold requests alon eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold @@ -493,20 +493,20 @@ CREATE TABLE nation_hash_collation_search_path( n_comment varchar(152) ); SELECT master_create_distributed_table('nation_hash_collation_search_path', 'n_nationkey', 'hash'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_create_worker_shards('nation_hash_collation_search_path', 4, 2); - master_create_worker_shards + master_create_worker_shards --------------------------------------------------------------------- - + (1 row) \copy nation_hash_collation_search_path FROM STDIN with delimiter '|'; SELECT * FROM nation_hash_collation_search_path ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC; - n_nationkey | n_name | n_regionkey | n_comment + n_nationkey | n_name | n_regionkey | n_comment --------------------------------------------------------------------- 5 | ETHIOPIA | 0 | ven packages wake quickly. regu 4 | EGYPT | 4 | y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d @@ -517,7 +517,7 @@ SELECT * FROM nation_hash_collation_search_path ORDER BY 1 DESC, 2 DESC, 3 DESC, (6 rows) SELECT n_comment FROM nation_hash_collation_search_path ORDER BY n_comment COLLATE english; - n_comment + n_comment --------------------------------------------------------------------- al foxes promise slyly according to the regular accounts. bold requests alon eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold @@ -538,21 +538,21 @@ CREATE TABLE test_schema_support.nation_hash_composite_types( test_col test_schema_support.new_composite_type ); SELECT master_create_distributed_table('test_schema_support.nation_hash_composite_types', 'n_nationkey', 'hash'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_create_worker_shards('test_schema_support.nation_hash_composite_types', 4, 2); - master_create_worker_shards + master_create_worker_shards --------------------------------------------------------------------- - + (1 row) -- insert some data to verify composite type queries \copy test_schema_support.nation_hash_composite_types FROM STDIN with delimiter '|'; SELECT * FROM test_schema_support.nation_hash_composite_types WHERE test_col = '(a,a)'::test_schema_support.new_composite_type; - n_nationkey | n_name | n_regionkey | n_comment | test_col + n_nationkey | n_name | n_regionkey | n_comment | test_col --------------------------------------------------------------------- 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai | (a,a) (1 row) @@ -560,7 +560,7 @@ SELECT * FROM test_schema_support.nation_hash_composite_types WHERE test_col = ' --test with search_path is set SET search_path TO test_schema_support; SELECT * FROM nation_hash_composite_types WHERE test_col = '(a,a)'::new_composite_type; - n_nationkey | n_name | n_regionkey | n_comment | test_col + n_nationkey | n_name | n_regionkey | n_comment | test_col --------------------------------------------------------------------- 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai | (a,a) (1 row) @@ -570,24 +570,24 @@ SET search_path TO public; ALTER TABLE test_schema_support.nation_hash ADD COLUMN new_col INT; -- verify column is added SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_support.nation_hash'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null - n_comment | character varying(152) | - new_col | integer | + n_comment | character varying(152) | + new_col | integer | (5 rows) \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_support.nation_hash_1190003'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null - n_comment | character varying(152) | - new_col | integer | + n_comment | character varying(152) | + new_col | integer | (5 rows) \c - - - :master_port @@ -596,22 +596,22 @@ NOTICE: column "non_existent_column" of relation "nation_hash" does not exist, ALTER TABLE test_schema_support.nation_hash DROP COLUMN IF EXISTS new_col; -- verify column is dropped SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_support.nation_hash'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null - n_comment | character varying(152) | + n_comment | character varying(152) | (4 rows) \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_support.nation_hash_1190003'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null - n_comment | character varying(152) | + n_comment | character varying(152) | (4 rows) \c - - - :master_port @@ -620,24 +620,24 @@ SET search_path TO test_schema_support; ALTER TABLE nation_hash ADD COLUMN new_col INT; -- verify column is added SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_schema_support.nation_hash'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null - n_comment | character varying(152) | - new_col | integer | + n_comment | character varying(152) | + new_col | integer | (5 rows) \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_schema_support.nation_hash_1190003'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null - n_comment | character varying(152) | - new_col | integer | + n_comment | character varying(152) | + new_col | integer | (5 rows) \c - - - :master_port @@ -647,22 +647,22 @@ NOTICE: column "non_existent_column" of relation "nation_hash" does not exist, ALTER TABLE nation_hash DROP COLUMN IF EXISTS new_col; -- verify column is dropped SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_schema_support.nation_hash'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null - n_comment | character varying(152) | + n_comment | character varying(152) | (4 rows) \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_schema_support.nation_hash_1190003'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null - n_comment | character varying(152) | + n_comment | character varying(152) | (4 rows) \c - - - :master_port @@ -673,7 +673,7 @@ CREATE INDEX index1 ON test_schema_support.nation_hash(n_name); --verify INDEX is created SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'test_schema_support.index1'::regclass; - Column | Type | Definition + Column | Type | Definition --------------------------------------------------------------------- n_name | character(25) | n_name (1 row) @@ -681,7 +681,7 @@ SELECT "Column", "Type", "Definition" FROM index_attrs WHERE \c - - - :worker_1_port SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'test_schema_support.index1_1190003'::regclass; - Column | Type | Definition + Column | Type | Definition --------------------------------------------------------------------- n_name | character(25) | n_name (1 row) @@ -701,7 +701,7 @@ CREATE INDEX index1 ON nation_hash(n_name); --verify INDEX is created SELECT "Column", "Type", "Definition" FROM public.index_attrs WHERE relid = 'test_schema_support.index1'::regclass; - Column | Type | Definition + Column | Type | Definition --------------------------------------------------------------------- n_name | character(25) | n_name (1 row) @@ -709,7 +709,7 @@ SELECT "Column", "Type", "Definition" FROM public.index_attrs WHERE \c - - - :worker_1_port SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'test_schema_support.index1_1190003'::regclass; - Column | Type | Definition + Column | Type | Definition --------------------------------------------------------------------- n_name | character(25) | n_name (1 row) @@ -728,14 +728,14 @@ SET search_path TO public; -- mark shard as inactive UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = 1190000 and nodeport = :worker_1_port; SELECT master_copy_shard_placement(1190000, 'localhost', :worker_2_port, 'localhost', :worker_1_port); - master_copy_shard_placement + master_copy_shard_placement --------------------------------------------------------------------- - + (1 row) -- verify shardstate SELECT shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid = 1190000 ORDER BY nodeport; - shardstate | nodename | nodeport + shardstate | nodename | nodeport --------------------------------------------------------------------- 1 | localhost | 57637 1 | localhost | 57638 @@ -746,14 +746,14 @@ SET search_path TO test_schema_support; -- mark shard as inactive UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = 1190000 and nodeport = :worker_1_port; SELECT master_copy_shard_placement(1190000, 'localhost', :worker_2_port, 'localhost', :worker_1_port); - master_copy_shard_placement + master_copy_shard_placement --------------------------------------------------------------------- - + (1 row) -- verify shardstate SELECT shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid = 1190000 ORDER BY nodeport; - shardstate | nodename | nodeport + shardstate | nodename | nodeport --------------------------------------------------------------------- 1 | localhost | 57637 1 | localhost | 57638 @@ -762,7 +762,7 @@ SELECT shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid -- test master_apply_delete_command with schemas SET search_path TO public; SELECT master_apply_delete_command('DELETE FROM test_schema_support.nation_append') ; - master_apply_delete_command + master_apply_delete_command --------------------------------------------------------------------- 1 (1 row) @@ -775,7 +775,7 @@ SELECT master_apply_delete_command('DELETE FROM test_schema_support.nation_appen SET search_path TO test_schema_support; \copy nation_append FROM STDIN with delimiter '|'; SELECT master_apply_delete_command('DELETE FROM nation_append') ; - master_apply_delete_command + master_apply_delete_command --------------------------------------------------------------------- 1 (1 row) @@ -808,23 +808,23 @@ CREATE TABLE test_schema_support_join_2.nation_hash ( n_regionkey integer not null, n_comment varchar(152)); SELECT create_distributed_table('test_schema_support_join_1.nation_hash', 'n_nationkey'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) \copy test_schema_support_join_1.nation_hash FROM STDIN with delimiter '|'; SELECT create_distributed_table('test_schema_support_join_1.nation_hash_2', 'n_nationkey'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) \copy test_schema_support_join_1.nation_hash_2 FROM STDIN with delimiter '|'; SELECT create_distributed_table('test_schema_support_join_2.nation_hash', 'n_nationkey'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) \copy test_schema_support_join_2.nation_hash FROM STDIN with delimiter '|'; @@ -838,7 +838,7 @@ FROM test_schema_support_join_1.nation_hash n1, test_schema_support_join_2.nation_hash n2 WHERE n1.n_nationkey = n2.n_nationkey; - count + count --------------------------------------------------------------------- 6 (1 row) @@ -853,7 +853,7 @@ FROM nation_hash n1, test_schema_support_join_2.nation_hash n2 WHERE n1.n_nationkey = n2.n_nationkey; - count + count --------------------------------------------------------------------- 6 (1 row) @@ -868,7 +868,7 @@ FROM test_schema_support_join_1.nation_hash n1, test_schema_support_join_1.nation_hash_2 n2 WHERE n1.n_nationkey = n2.n_nationkey; - count + count --------------------------------------------------------------------- 6 (1 row) @@ -883,7 +883,7 @@ FROM nation_hash n1, nation_hash_2 n2 WHERE n1.n_nationkey = n2.n_nationkey; - count + count --------------------------------------------------------------------- 6 (1 row) @@ -900,7 +900,7 @@ FROM test_schema_support_join_1.nation_hash n1, test_schema_support_join_2.nation_hash n2 WHERE n1.n_nationkey = n2.n_regionkey; - count + count --------------------------------------------------------------------- 6 (1 row) @@ -915,7 +915,7 @@ FROM nation_hash n1, test_schema_support_join_2.nation_hash n2 WHERE n1.n_nationkey = n2.n_regionkey; - count + count --------------------------------------------------------------------- 6 (1 row) @@ -930,7 +930,7 @@ FROM nation_hash n1, nation_hash_2 n2 WHERE n1.n_nationkey = n2.n_regionkey; - count + count --------------------------------------------------------------------- 6 (1 row) @@ -946,7 +946,7 @@ FROM test_schema_support_join_1.nation_hash n1, test_schema_support_join_2.nation_hash n2 WHERE n1.n_regionkey = n2.n_regionkey; - count + count --------------------------------------------------------------------- 14 (1 row) @@ -961,7 +961,7 @@ FROM nation_hash n1, test_schema_support_join_2.nation_hash n2 WHERE n1.n_regionkey = n2.n_regionkey; - count + count --------------------------------------------------------------------- 14 (1 row) @@ -976,7 +976,7 @@ FROM nation_hash n1, nation_hash_2 n2 WHERE n1.n_regionkey = n2.n_regionkey; - count + count --------------------------------------------------------------------- 14 (1 row) @@ -995,30 +995,30 @@ NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. CONTEXT: SQL statement "CREATE USER "test-user"" PL/pgSQL function run_command_on_coordinator_and_workers(text) line 3 at EXECUTE - run_command_on_coordinator_and_workers + run_command_on_coordinator_and_workers --------------------------------------------------------------------- - + (1 row) SELECT run_command_on_coordinator_and_workers('GRANT ALL ON DATABASE postgres to "test-user"'); - run_command_on_coordinator_and_workers + run_command_on_coordinator_and_workers --------------------------------------------------------------------- - + (1 row) CREATE SCHEMA schema_with_user AUTHORIZATION "test-user"; CREATE TABLE schema_with_user.test_table(column1 int); SELECT create_reference_table('schema_with_user.test_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- verify that owner of the created schema is test-user \c - - - :worker_1_port \dn schema_with_user List of schemas - Name | Owner + Name | Owner --------------------------------------------------------------------- schema_with_user | test-user (1 row) @@ -1028,16 +1028,16 @@ SELECT create_reference_table('schema_with_user.test_table'); DROP OWNED BY "test-user" CASCADE; NOTICE: drop cascades to table schema_with_user.test_table SELECT run_command_on_workers('DROP OWNED BY "test-user" CASCADE'); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"DROP OWNED") (localhost,57638,t,"DROP OWNED") (2 rows) SELECT run_command_on_coordinator_and_workers('DROP USER "test-user"'); - run_command_on_coordinator_and_workers + run_command_on_coordinator_and_workers --------------------------------------------------------------------- - + (1 row) DROP FUNCTION run_command_on_coordinator_and_workers(p_sql text); @@ -1045,9 +1045,9 @@ DROP FUNCTION run_command_on_coordinator_and_workers(p_sql text); CREATE SCHEMA run_test_schema; CREATE TABLE run_test_schema.test_table(id int); SELECT create_distributed_table('run_test_schema.test_table','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- randomly insert data to evaluate below UDFs better @@ -1056,13 +1056,13 @@ INSERT INTO run_test_schema.test_table VALUES(7); INSERT INTO run_test_schema.test_table VALUES(9); -- try UDFs which call shard_name as a subroutine SELECT sum(result::int) FROM run_command_on_placements('run_test_schema.test_table','SELECT pg_table_size(''%s'')'); - sum + sum --------------------------------------------------------------------- 49152 (1 row) SELECT sum(result::int) FROM run_command_on_shards('run_test_schema.test_table','SELECT pg_table_size(''%s'')'); - sum + sum --------------------------------------------------------------------- 24576 (1 row) @@ -1077,15 +1077,15 @@ CREATE TABLE "CiTuS.TeeN"."TeeNTabLE.1!?!"(id int, "TeNANt_Id" int); CREATE TABLE "CiTUS.TEEN2"."CAPITAL_TABLE"(i int, j int); -- create distributed table with weird names SELECT create_distributed_table('"CiTuS.TeeN"."TeeNTabLE.1!?!"', 'TeNANt_Id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('"CiTUS.TEEN2"."CAPITAL_TABLE"', 'i'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- truncate tables with weird names @@ -1093,7 +1093,7 @@ INSERT INTO "CiTuS.TeeN"."TeeNTabLE.1!?!" VALUES(1, 1); INSERT INTO "CiTUS.TEEN2"."CAPITAL_TABLE" VALUES(0, 1); TRUNCATE "CiTuS.TeeN"."TeeNTabLE.1!?!", "CiTUS.TEEN2"."CAPITAL_TABLE"; SELECT count(*) FROM "CiTUS.TEEN2"."CAPITAL_TABLE"; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -1106,7 +1106,7 @@ SELECT * FROM "CiTuS.TeeN"."TeeNTabLE.1!?!", "CiTUS.TEEN2"."CAPITAL_TABLE" WHERE "CiTUS.TEEN2"."CAPITAL_TABLE".i = "CiTuS.TeeN"."TeeNTabLE.1!?!"."TeNANt_Id" ORDER BY 1,2,3,4; - id | TeNANt_Id | i | j + id | TeNANt_Id | i | j --------------------------------------------------------------------- 0 | 1 | 1 | 0 1 | 0 | 0 | 1 @@ -1123,7 +1123,7 @@ FROM "CiTuS.TeeN"."TeeNTabLE.1!?!", "CiTUS.TEEN2"."CAPITAL_TABLE" WHERE "CiTUS.TEEN2"."CAPITAL_TABLE".i = "CiTuS.TeeN"."TeeNTabLE.1!?!"."TeNANt_Id" GROUP BY "TeNANt_Id", id, i, j HAVING "TeNANt_Id" > 0 AND j >= id ORDER BY "TeNANt_Id"; - id | TeNANt_Id | i | j + id | TeNANt_Id | i | j --------------------------------------------------------------------- 0 | 1 | 1 | 0 2 | 3 | 3 | 2 @@ -1136,7 +1136,7 @@ FROM "CiTuS.TeeN"."TeeNTabLE.1!?!" join "CiTUS.TEEN2"."CAPITAL_TABLE" on GROUP BY "TeNANt_Id", id, i, j HAVING "TeNANt_Id" > 0 AND j >= id ORDER BY 1,2,3,4; - id | TeNANt_Id | i | j + id | TeNANt_Id | i | j --------------------------------------------------------------------- 0 | 1 | 1 | 0 2 | 3 | 3 | 2 @@ -1153,7 +1153,7 @@ SELECT * FROM "cTE" join "CiTUS.TEEN2"."CAPITAL_TABLE" on GROUP BY "TeNANt_Id", id, i, j HAVING "TeNANt_Id" > 0 AND j >= id ORDER BY 1,2,3,4; - id | TeNANt_Id | i | j + id | TeNANt_Id | i | j --------------------------------------------------------------------- 0 | 1 | 1 | 0 2 | 3 | 3 | 2 @@ -1172,7 +1172,7 @@ join "CiTUS.TEEN2"."CAPITAL_TABLE" on GROUP BY "TeNANt_Id", id, i, j HAVING "TeNANt_Id" > 0 AND j >= id ORDER BY 1,2,3,4; - id | TeNANt_Id | i | j + id | TeNANt_Id | i | j --------------------------------------------------------------------- 0 | 1 | 1 | 0 2 | 3 | 3 | 2 diff --git a/src/test/regress/expected/multi_select_distinct.out b/src/test/regress/expected/multi_select_distinct.out index da645625c..433c420dc 100644 --- a/src/test/regress/expected/multi_select_distinct.out +++ b/src/test/regress/expected/multi_select_distinct.out @@ -6,12 +6,12 @@ ANALYZE lineitem_hash_part; -- function calls are supported SELECT DISTINCT l_orderkey, now() FROM lineitem_hash_part LIMIT 0; - l_orderkey | now + l_orderkey | now --------------------------------------------------------------------- (0 rows) SELECT DISTINCT l_partkey, 1 + (random() * 0)::int FROM lineitem_hash_part ORDER BY 1 DESC LIMIT 3; - l_partkey | ?column? + l_partkey | ?column? --------------------------------------------------------------------- 199973 | 1 199946 | 1 @@ -20,7 +20,7 @@ SELECT DISTINCT l_partkey, 1 + (random() * 0)::int FROM lineitem_hash_part ORDER -- const expressions are supported SELECT DISTINCT l_orderkey, 1+1 FROM lineitem_hash_part ORDER BY 1 LIMIT 5; - l_orderkey | ?column? + l_orderkey | ?column? --------------------------------------------------------------------- 1 | 2 2 | 2 @@ -31,7 +31,7 @@ SELECT DISTINCT l_orderkey, 1+1 FROM lineitem_hash_part ORDER BY 1 LIMIT 5; -- non const expressions are also supported SELECT DISTINCT l_orderkey, l_partkey + 1 FROM lineitem_hash_part ORDER BY 1, 2 LIMIT 5; - l_orderkey | ?column? + l_orderkey | ?column? --------------------------------------------------------------------- 1 | 2133 1 | 15636 @@ -42,7 +42,7 @@ SELECT DISTINCT l_orderkey, l_partkey + 1 FROM lineitem_hash_part ORDER BY 1, 2 -- column expressions are supported SELECT DISTINCT l_orderkey, l_shipinstruct || l_shipmode FROM lineitem_hash_part ORDER BY 2 , 1 LIMIT 5; - l_orderkey | ?column? + l_orderkey | ?column? --------------------------------------------------------------------- 32 | COLLECT CODAIR 39 | COLLECT CODAIR @@ -53,7 +53,7 @@ SELECT DISTINCT l_orderkey, l_shipinstruct || l_shipmode FROM lineitem_hash_part -- function calls with const input are supported SELECT DISTINCT l_orderkey, strpos('AIR', 'A') FROM lineitem_hash_part ORDER BY 1,2 LIMIT 5; - l_orderkey | strpos + l_orderkey | strpos --------------------------------------------------------------------- 1 | 1 2 | 1 @@ -68,7 +68,7 @@ SELECT DISTINCT l_orderkey, strpos(l_shipmode, 'I') WHERE strpos(l_shipmode, 'I') > 1 ORDER BY 2, 1 LIMIT 5; - l_orderkey | strpos + l_orderkey | strpos --------------------------------------------------------------------- 1 | 2 3 | 2 @@ -79,7 +79,7 @@ SELECT DISTINCT l_orderkey, strpos(l_shipmode, 'I') -- row types are supported SELECT DISTINCT (l_orderkey, l_partkey) AS pair FROM lineitem_hash_part ORDER BY 1 LIMIT 5; - pair + pair --------------------------------------------------------------------- (1,2132) (1,15635) @@ -92,19 +92,19 @@ SELECT DISTINCT (l_orderkey, l_partkey) AS pair FROM lineitem_hash_part ORDER BY -- verify counts match with respect to count(distinct) CREATE TEMP TABLE temp_orderkeys AS SELECT DISTINCT l_orderkey FROM lineitem_hash_part; SELECT COUNT(*) FROM temp_orderkeys; - count + count --------------------------------------------------------------------- 2985 (1 row) SELECT COUNT(DISTINCT l_orderkey) FROM lineitem_hash_part; - count + count --------------------------------------------------------------------- 2985 (1 row) SELECT DISTINCT l_orderkey FROM lineitem_hash_part WHERE l_orderkey < 500 and l_partkey < 5000 order by 1; - l_orderkey + l_orderkey --------------------------------------------------------------------- 1 3 @@ -128,7 +128,7 @@ SELECT DISTINCT l_orderkey FROM lineitem_hash_part WHERE l_orderkey < 500 and l_ -- distinct on non-partition column SELECT DISTINCT l_partkey FROM lineitem_hash_part WHERE l_orderkey > 5 and l_orderkey < 20 order by 1; - l_partkey + l_partkey --------------------------------------------------------------------- 79251 94780 @@ -141,15 +141,15 @@ SELECT DISTINCT l_partkey FROM lineitem_hash_part WHERE l_orderkey > 5 and l_ord (8 rows) SELECT DISTINCT l_shipmode FROM lineitem_hash_part ORDER BY 1 DESC; - l_shipmode + l_shipmode --------------------------------------------------------------------- - TRUCK - SHIP - REG AIR - RAIL - MAIL - FOB - AIR + TRUCK + SHIP + REG AIR + RAIL + MAIL + FOB + AIR (7 rows) -- distinct with multiple columns @@ -157,7 +157,7 @@ SELECT DISTINCT l_orderkey, o_orderdate FROM lineitem_hash_part JOIN orders_hash_part ON (l_orderkey = o_orderkey) WHERE l_orderkey < 10 ORDER BY l_orderkey; - l_orderkey | o_orderdate + l_orderkey | o_orderdate --------------------------------------------------------------------- 1 | 01-02-1996 2 | 12-01-1996 @@ -176,7 +176,7 @@ SELECT DISTINCT l_orderkey, count(*) GROUP BY 1 HAVING count(*) > 5 ORDER BY 2 DESC, 1; - l_orderkey | count + l_orderkey | count --------------------------------------------------------------------- 7 | 7 68 | 7 @@ -207,7 +207,7 @@ EXPLAIN (COSTS FALSE) GROUP BY 1 HAVING count(*) > 5 ORDER BY 2 DESC, 1; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Sort Sort Key: remote_scan.count DESC, remote_scan.l_orderkey @@ -234,7 +234,7 @@ EXPLAIN (COSTS FALSE) GROUP BY 1 HAVING count(*) > 5 ORDER BY 2 DESC, 1; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Sort Sort Key: remote_scan.count DESC, remote_scan.l_orderkey @@ -260,7 +260,7 @@ SELECT DISTINCT count(*) FROM lineitem_hash_part GROUP BY l_suppkey, l_linenumber ORDER BY 1; - count + count --------------------------------------------------------------------- 1 2 @@ -276,7 +276,7 @@ EXPLAIN (COSTS FALSE) FROM lineitem_hash_part GROUP BY l_suppkey, l_linenumber ORDER BY 1; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Sort Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count))::bigint, '0'::bigint)) @@ -302,7 +302,7 @@ EXPLAIN (COSTS FALSE) FROM lineitem_hash_part GROUP BY l_suppkey, l_linenumber ORDER BY 1; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Sort Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count))::bigint, '0'::bigint)) @@ -331,7 +331,7 @@ SELECT DISTINCT l_suppkey, count(*) GROUP BY l_suppkey, l_linenumber ORDER BY 1 LIMIT 10; - l_suppkey | count + l_suppkey | count --------------------------------------------------------------------- 1 | 1 2 | 1 @@ -352,7 +352,7 @@ EXPLAIN (COSTS FALSE) GROUP BY l_suppkey, l_linenumber ORDER BY 1 LIMIT 10; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit -> Sort @@ -380,7 +380,7 @@ EXPLAIN (COSTS FALSE) GROUP BY l_suppkey, l_linenumber ORDER BY 1 LIMIT 10; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit -> Sort @@ -410,7 +410,7 @@ SELECT DISTINCT l_suppkey, avg(l_partkey) GROUP BY l_suppkey, l_linenumber ORDER BY 1,2 LIMIT 10; - l_suppkey | avg + l_suppkey | avg --------------------------------------------------------------------- 1 | 190000.000000000000 2 | 172450.000000000000 @@ -432,7 +432,7 @@ EXPLAIN (COSTS FALSE) GROUP BY l_suppkey, l_linenumber ORDER BY 1,2 LIMIT 10; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit -> Sort @@ -460,7 +460,7 @@ EXPLAIN (COSTS FALSE) GROUP BY l_suppkey, l_linenumber ORDER BY 1,2 LIMIT 10; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit -> Sort @@ -489,7 +489,7 @@ SELECT DISTINCT ON (l_suppkey) avg(l_partkey) GROUP BY l_suppkey, l_linenumber ORDER BY l_suppkey,1 LIMIT 10; - avg + avg --------------------------------------------------------------------- 190000.000000000000 172450.000000000000 @@ -511,7 +511,7 @@ EXPLAIN (COSTS FALSE) GROUP BY l_suppkey, l_linenumber ORDER BY l_suppkey,1 LIMIT 10; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit -> Unique @@ -538,7 +538,7 @@ EXPLAIN (COSTS FALSE) GROUP BY l_suppkey, l_linenumber ORDER BY l_suppkey,1 LIMIT 10; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit -> Unique @@ -565,7 +565,7 @@ SELECT DISTINCT avg(ceil(l_partkey / 2)) GROUP BY l_suppkey, l_linenumber ORDER BY 1 LIMIT 10; - avg + avg --------------------------------------------------------------------- 9 39 @@ -586,7 +586,7 @@ EXPLAIN (COSTS FALSE) GROUP BY l_suppkey, l_linenumber ORDER BY 1 LIMIT 10; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit -> Sort @@ -614,7 +614,7 @@ EXPLAIN (COSTS FALSE) GROUP BY l_suppkey, l_linenumber ORDER BY 1 LIMIT 10; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit -> Sort @@ -643,7 +643,7 @@ SELECT DISTINCT sum(l_suppkey) + count(l_partkey) AS dis GROUP BY l_suppkey, l_linenumber ORDER BY 1 LIMIT 10; - dis + dis --------------------------------------------------------------------- 2 3 @@ -664,7 +664,7 @@ EXPLAIN (COSTS FALSE) GROUP BY l_suppkey, l_linenumber ORDER BY 1 LIMIT 10; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit -> Sort @@ -692,7 +692,7 @@ EXPLAIN (COSTS FALSE) GROUP BY l_suppkey, l_linenumber ORDER BY 1 LIMIT 10; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit -> Sort @@ -722,13 +722,13 @@ SELECT DISTINCT * GROUP BY 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16 ORDER BY 1,2 LIMIT 10; - l_orderkey | l_partkey | l_suppkey | l_linenumber | l_quantity | l_extendedprice | l_discount | l_tax | l_returnflag | l_linestatus | l_shipdate | l_commitdate | l_receiptdate | l_shipinstruct | l_shipmode | l_comment + l_orderkey | l_partkey | l_suppkey | l_linenumber | l_quantity | l_extendedprice | l_discount | l_tax | l_returnflag | l_linestatus | l_shipdate | l_commitdate | l_receiptdate | l_shipinstruct | l_shipmode | l_comment --------------------------------------------------------------------- 1 | 2132 | 4633 | 4 | 28.00 | 28955.64 | 0.09 | 0.06 | N | O | 04-21-1996 | 03-30-1996 | 05-16-1996 | NONE | AIR | lites. fluffily even de 1 | 15635 | 638 | 6 | 32.00 | 49620.16 | 0.07 | 0.02 | N | O | 01-30-1996 | 02-07-1996 | 02-03-1996 | DELIVER IN PERSON | MAIL | arefully slyly ex 1 | 24027 | 1534 | 5 | 24.00 | 22824.48 | 0.10 | 0.04 | N | O | 03-30-1996 | 03-14-1996 | 04-01-1996 | NONE | FOB | pending foxes. slyly re 1 | 63700 | 3701 | 3 | 8.00 | 13309.60 | 0.10 | 0.02 | N | O | 01-29-1996 | 03-05-1996 | 01-31-1996 | TAKE BACK RETURN | REG AIR | riously. regular, express dep - 1 | 67310 | 7311 | 2 | 36.00 | 45983.16 | 0.09 | 0.06 | N | O | 04-12-1996 | 02-28-1996 | 04-20-1996 | TAKE BACK RETURN | MAIL | ly final dependencies: slyly bold + 1 | 67310 | 7311 | 2 | 36.00 | 45983.16 | 0.09 | 0.06 | N | O | 04-12-1996 | 02-28-1996 | 04-20-1996 | TAKE BACK RETURN | MAIL | ly final dependencies: slyly bold 1 | 155190 | 7706 | 1 | 17.00 | 21168.23 | 0.04 | 0.02 | N | O | 03-13-1996 | 02-12-1996 | 03-22-1996 | DELIVER IN PERSON | TRUCK | egular courts above the 2 | 106170 | 1191 | 1 | 38.00 | 44694.46 | 0.00 | 0.05 | N | O | 01-28-1997 | 01-14-1997 | 02-02-1997 | TAKE BACK RETURN | RAIL | ven requests. deposits breach a 3 | 4297 | 1798 | 1 | 45.00 | 54058.05 | 0.06 | 0.00 | R | F | 02-02-1994 | 01-04-1994 | 02-23-1994 | NONE | AIR | ongside of the furiously brave acco @@ -744,7 +744,7 @@ EXPLAIN (COSTS FALSE) GROUP BY 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16 ORDER BY 1,2 LIMIT 10; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit -> Sort @@ -774,7 +774,7 @@ EXPLAIN (COSTS FALSE) GROUP BY 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16 ORDER BY 1,2 LIMIT 10; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit -> Sort @@ -802,7 +802,7 @@ SELECT DISTINCT count(DISTINCT l_partkey), count(DISTINCT l_shipmode) FROM lineitem_hash_part GROUP BY l_orderkey ORDER BY 1,2; - count | count + count | count --------------------------------------------------------------------- 1 | 1 2 | 1 @@ -838,7 +838,7 @@ EXPLAIN (COSTS FALSE) FROM lineitem_hash_part GROUP BY l_orderkey ORDER BY 1,2; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Sort Sort Key: remote_scan.count, remote_scan.count_1 @@ -864,7 +864,7 @@ EXPLAIN (COSTS FALSE) FROM lineitem_hash_part GROUP BY l_orderkey ORDER BY 1,2; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Sort Sort Key: remote_scan.count, remote_scan.count_1 @@ -889,7 +889,7 @@ SELECT DISTINCT ceil(count(case when l_partkey > 100000 THEN 1 ELSE 0 END) / 2) FROM lineitem_hash_part GROUP BY l_suppkey ORDER BY 1; - count + count --------------------------------------------------------------------- 0 1 @@ -904,7 +904,7 @@ EXPLAIN (COSTS FALSE) FROM lineitem_hash_part GROUP BY l_suppkey ORDER BY 1; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Sort Sort Key: (ceil(((COALESCE((pg_catalog.sum(remote_scan.count))::bigint, '0'::bigint) / 2))::double precision)) @@ -929,7 +929,7 @@ EXPLAIN (COSTS FALSE) FROM lineitem_hash_part GROUP BY l_suppkey ORDER BY 1; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Sort Sort Key: (ceil(((COALESCE((pg_catalog.sum(remote_scan.count))::bigint, '0'::bigint) / 2))::double precision)) @@ -958,7 +958,7 @@ EXPLAIN (COSTS FALSE) GROUP BY l_orderkey ORDER BY 2 LIMIT 15; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit -> Sort @@ -985,7 +985,7 @@ EXPLAIN (COSTS FALSE) GROUP BY l_orderkey ORDER BY 2 LIMIT 15; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit -> Sort @@ -1013,7 +1013,7 @@ SELECT DISTINCT l_partkey, count(*) GROUP BY 1 HAVING count(*) > 2 ORDER BY 1; - l_partkey | count + l_partkey | count --------------------------------------------------------------------- 1051 | 3 1927 | 3 @@ -1035,7 +1035,7 @@ EXPLAIN (COSTS FALSE) GROUP BY 1 HAVING count(*) > 2 ORDER BY 1; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Sort Sort Key: remote_scan.l_partkey @@ -1059,7 +1059,7 @@ SELECT DISTINCT l_partkey, avg(l_linenumber) GROUP BY 1 HAVING avg(l_linenumber) > 2 ORDER BY 1; - l_partkey | avg + l_partkey | avg --------------------------------------------------------------------- 18 | 7.0000000000000000 79 | 6.0000000000000000 @@ -1083,7 +1083,7 @@ SELECT DISTINCT l_partkey, l_suppkey FROM lineitem_hash_part WHERE l_shipmode = 'AIR' AND l_orderkey < 100 ORDER BY 1, 2; - l_partkey | l_suppkey + l_partkey | l_suppkey --------------------------------------------------------------------- 2132 | 4633 4297 | 1798 @@ -1107,7 +1107,7 @@ EXPLAIN (COSTS FALSE) FROM lineitem_hash_part WHERE l_shipmode = 'AIR' AND l_orderkey < 100 ORDER BY 1, 2; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Sort Sort Key: remote_scan.l_partkey, remote_scan.l_suppkey @@ -1130,7 +1130,7 @@ SELECT DISTINCT ON (l_orderkey) l_orderkey, l_partkey, l_suppkey FROM lineitem_hash_part WHERE l_orderkey < 35 ORDER BY 1; - l_orderkey | l_partkey | l_suppkey + l_orderkey | l_partkey | l_suppkey --------------------------------------------------------------------- 1 | 155190 | 7706 2 | 106170 | 1191 @@ -1149,7 +1149,7 @@ EXPLAIN (COSTS FALSE) FROM lineitem_hash_part WHERE l_orderkey < 35 ORDER BY 1; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Unique -> Sort @@ -1174,7 +1174,7 @@ SELECT DISTINCT ON (l_partkey) l_partkey, l_orderkey FROM lineitem_hash_part ORDER BY 1,2 LIMIT 20; - l_partkey | l_orderkey + l_partkey | l_orderkey --------------------------------------------------------------------- 18 | 12005 79 | 5121 @@ -1203,7 +1203,7 @@ EXPLAIN (COSTS FALSE) FROM lineitem_hash_part ORDER BY 1,2 LIMIT 20; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit -> Unique @@ -1227,7 +1227,7 @@ SELECT DISTINCT ON (o_custkey) o_custkey, l_orderkey FROM lineitem_hash_part JOIN orders_hash_part ON (l_orderkey = o_orderkey) WHERE o_custkey < 15 ORDER BY 1,2; - o_custkey | l_orderkey + o_custkey | l_orderkey --------------------------------------------------------------------- 1 | 9154 2 | 10563 @@ -1248,7 +1248,7 @@ EXPLAIN (COSTS FALSE) WHERE o_custkey < 15 ORDER BY 1,2; $Q$); - coordinator_plan + coordinator_plan --------------------------------------------------------------------- Unique -> Sort @@ -1265,7 +1265,7 @@ EXPLAIN (COSTS FALSE) FROM lineitem_hash_part JOIN orders_hash_part ON (l_orderkey = o_orderkey) WHERE o_custkey < 15; $Q$); - coordinator_plan + coordinator_plan --------------------------------------------------------------------- Unique -> Sort @@ -1279,7 +1279,7 @@ SELECT DISTINCT ON (o_custkey, l_orderkey) o_custkey, l_orderkey, l_linenumber, FROM lineitem_hash_part JOIN orders_hash_part ON (l_orderkey = o_orderkey) WHERE o_custkey < 20 ORDER BY 1,2,3; - o_custkey | l_orderkey | l_linenumber | l_partkey + o_custkey | l_orderkey | l_linenumber | l_partkey --------------------------------------------------------------------- 1 | 9154 | 1 | 86513 1 | 14656 | 1 | 59539 @@ -1326,7 +1326,7 @@ EXPLAIN (COSTS FALSE) FROM lineitem_hash_part JOIN orders_hash_part ON (l_orderkey = o_orderkey) WHERE o_custkey < 20; $Q$); - coordinator_plan + coordinator_plan --------------------------------------------------------------------- Unique -> Sort @@ -1340,7 +1340,7 @@ SELECT DISTINCT ON (o_custkey, l_orderkey) o_custkey, l_orderkey, l_linenumber, FROM lineitem_hash_part JOIN orders_hash_part ON (l_orderkey = o_orderkey) WHERE o_custkey < 15 ORDER BY 1,2,3 DESC; - o_custkey | l_orderkey | l_linenumber | l_partkey + o_custkey | l_orderkey | l_linenumber | l_partkey --------------------------------------------------------------------- 1 | 9154 | 7 | 173448 1 | 14656 | 1 | 59539 @@ -1380,7 +1380,7 @@ SELECT DISTINCT l_orderkey, l_partkey ) q ORDER BY 1,2 LIMIT 10; - l_orderkey | l_partkey + l_orderkey | l_partkey --------------------------------------------------------------------- 1 | 2132 1 | 15635 @@ -1402,7 +1402,7 @@ EXPLAIN (COSTS FALSE) ) q ORDER BY 1,2 LIMIT 10; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit -> Sort @@ -1430,7 +1430,7 @@ SELECT DISTINCT l_orderkey, cnt ) q ORDER BY 1,2 LIMIT 10; - l_orderkey | cnt + l_orderkey | cnt --------------------------------------------------------------------- 1 | 6 2 | 1 @@ -1453,7 +1453,7 @@ EXPLAIN (COSTS FALSE) ) q ORDER BY 1,2 LIMIT 10; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit -> Sort @@ -1485,7 +1485,7 @@ SELECT DISTINCT ON (l_orderkey) l_orderkey, l_partkey WHERE r > 1 ORDER BY 1,2 LIMIT 10; - l_orderkey | l_partkey + l_orderkey | l_partkey --------------------------------------------------------------------- 1 | 2132 2 | 106170 @@ -1508,7 +1508,7 @@ EXPLAIN (COSTS FALSE) WHERE r > 1 ORDER BY 1,2 LIMIT 10; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit -> Unique @@ -1537,7 +1537,7 @@ SELECT DISTINCT ON (l_partkey) l_orderkey, l_partkey WHERE r > 1 ORDER BY 2,1 LIMIT 10; - l_orderkey | l_partkey + l_orderkey | l_partkey --------------------------------------------------------------------- 12005 | 18 5121 | 79 @@ -1560,7 +1560,7 @@ EXPLAIN (COSTS FALSE) WHERE r > 1 ORDER BY 2,1 LIMIT 10; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit -> Unique diff --git a/src/test/regress/expected/multi_select_for_update.out b/src/test/regress/expected/multi_select_for_update.out index 64dfeee83..5267e7503 100644 --- a/src/test/regress/expected/multi_select_for_update.out +++ b/src/test/regress/expected/multi_select_for_update.out @@ -2,66 +2,66 @@ -- MULTI_SIZE_QUERIES -- -- Test checks whether size of distributed tables can be obtained with citus_table_size. --- To find the relation size and total relation size citus_relation_size and +-- To find the relation size and total relation size citus_relation_size and -- citus_total_relation_size are also tested. SET citus.next_shard_id TO 1460000; SET citus.shard_replication_factor to 1; CREATE TABLE test_table_1_rf1(id int, val_1 int); SELECT create_distributed_table('test_table_1_rf1','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO test_table_1_rf1 values(1,2),(2,3),(3,4),(15,16); CREATE TABLE test_table_2_rf1(id int, val_1 int); SELECT create_distributed_table('test_table_2_rf1','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO test_table_2_rf1 values(1,2),(2,3),(3,4); CREATE TABLE ref_table(id int, val_1 int); SELECT create_reference_table('ref_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) INSERT INTO ref_table values(1,2),(3,4),(5,6); CREATE TABLE ref_table_2(id int, val_1 int); SELECT create_reference_table('ref_table_2'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) INSERT INTO ref_table_2 values(3,4),(5,6),(8,9); SET citus.shard_replication_factor to 2; CREATE TABLE test_table_3_rf2(id int, val_1 int); SELECT create_distributed_table('test_table_3_rf2','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO test_table_3_rf2 values(1,2),(2,3),(3,4); CREATE TABLE test_table_4_rf2(id int, val_1 int); SELECT create_distributed_table('test_table_4_rf2','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO test_table_4_rf2 values(1,2),(2,3),(3,4); -- Hash tables with RF = 1 is supported for router planner queries SELECT * FROM test_table_1_rf1 as tt1 INNER JOIN test_table_1_rf1 as tt2 on tt1.id = tt2.id - WHERE tt1.id = 1 + WHERE tt1.id = 1 ORDER BY 1 FOR UPDATE; - id | val_1 | id | val_1 + id | val_1 | id | val_1 --------------------------------------------------------------------- 1 | 2 | 1 | 2 (1 row) @@ -71,7 +71,7 @@ SELECT * FROM test_table_1_rf1 as tt1 WHERE tt1.id = 1 OR tt1.id = 15 ORDER BY 1 FOR UPDATE; - id | val_1 + id | val_1 --------------------------------------------------------------------- 1 | 2 15 | 16 @@ -109,7 +109,7 @@ SELECT * FROM WHERE tt1.id = 1 ORDER BY 1 FOR UPDATE; - id | val_1 | id | val_1 + id | val_1 | id | val_1 --------------------------------------------------------------------- 1 | 2 | 1 | 2 (1 row) @@ -120,7 +120,7 @@ SELECT * FROM WHERE tt1.id = 1 ORDER BY 1 FOR SHARE; - id | val_1 | id | val_1 + id | val_1 | id | val_1 --------------------------------------------------------------------- 1 | 2 | 1 | 2 (1 row) @@ -131,7 +131,7 @@ SELECT * FROM ORDER BY 1 FOR UPDATE OF rt1; - id | val_1 | id | val_1 + id | val_1 | id | val_1 --------------------------------------------------------------------- 3 | 4 | 3 | 4 5 | 6 | 5 | 6 @@ -144,17 +144,17 @@ SELECT * FROM FOR UPDATE OF rt1 NOWAIT; - id | val_1 | id | val_1 + id | val_1 | id | val_1 --------------------------------------------------------------------- 3 | 4 | 3 | 4 5 | 6 | 5 | 6 (2 rows) -- queries with CTEs are supported -WITH first_value AS ( +WITH first_value AS ( SELECT val_1 FROM test_table_1_rf1 WHERE id = 1 FOR UPDATE) SELECT * FROM first_value; - val_1 + val_1 --------------------------------------------------------------------- 2 (1 row) @@ -164,14 +164,14 @@ WITH update_table AS ( UPDATE test_table_1_rf1 SET val_1 = 10 WHERE id = 1 RETURNING * ) SELECT * FROM update_table FOR UPDATE; - id | val_1 + id | val_1 --------------------------------------------------------------------- 1 | 10 (1 row) -- Subqueries also supported SELECT * FROM (SELECT * FROM test_table_1_rf1 FOR UPDATE) foo WHERE id = 1; - id | val_1 + id | val_1 --------------------------------------------------------------------- 1 | 10 (1 row) diff --git a/src/test/regress/expected/multi_shard_modify.out b/src/test/regress/expected/multi_shard_modify.out index 1eed1c72e..61eb0921e 100644 --- a/src/test/regress/expected/multi_shard_modify.out +++ b/src/test/regress/expected/multi_shard_modify.out @@ -8,9 +8,9 @@ CREATE TABLE multi_shard_modify_test ( t_name varchar(25) not null, t_value integer not null); SELECT create_distributed_table('multi_shard_modify_test', 't_key', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) COPY multi_shard_modify_test (t_key, t_name, t_value) FROM STDIN WITH (FORMAT 'csv'); @@ -20,7 +20,7 @@ BEGIN; SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key > 10 AND t_key <= 13'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards + master_modify_multiple_shards --------------------------------------------------------------------- 0 (1 row) @@ -28,14 +28,14 @@ HINT: Run the command directly SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = 202'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards + master_modify_multiple_shards --------------------------------------------------------------------- 0 (1 row) ROLLBACK; SELECT count(*) FROM multi_shard_modify_test; - count + count --------------------------------------------------------------------- 27 (1 row) @@ -53,7 +53,7 @@ ERROR: functions used in the WHERE clause of modification queries on distribute SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = abs(-3)'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards + master_modify_multiple_shards --------------------------------------------------------------------- 0 (1 row) @@ -62,7 +62,7 @@ HINT: Run the command directly SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = (3*18-40)'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards + master_modify_multiple_shards --------------------------------------------------------------------- 0 (1 row) @@ -75,9 +75,9 @@ HINT: Run the command directly ERROR: relation temp_nations is not distributed -- commands with a USING clause are unsupported SELECT create_distributed_table('temp_nations', 'name', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test USING temp_nations WHERE multi_shard_modify_test.t_value = temp_nations.key AND temp_nations.name = ''foobar'' '); @@ -88,7 +88,7 @@ ERROR: complex joins are only supported when all distributed tables are co-loca SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = 3 RETURNING *'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards + master_modify_multiple_shards --------------------------------------------------------------------- 0 (1 row) @@ -101,7 +101,7 @@ ERROR: cannot perform an INSERT without a partition column value -- Check that we can successfully delete from multiple shards with 1PC SET citus.multi_shard_commit_protocol TO '1pc'; SELECT count(*) FROM multi_shard_modify_test; - count + count --------------------------------------------------------------------- 25 (1 row) @@ -109,13 +109,13 @@ SELECT count(*) FROM multi_shard_modify_test; SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key > 200'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards + master_modify_multiple_shards --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM multi_shard_modify_test; - count + count --------------------------------------------------------------------- 23 (1 row) @@ -125,13 +125,13 @@ SET citus.multi_shard_commit_protocol TO '2pc'; SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key > 100'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards + master_modify_multiple_shards --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM multi_shard_modify_test; - count + count --------------------------------------------------------------------- 21 (1 row) @@ -145,7 +145,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 15 - master_modify_multiple_shards + master_modify_multiple_shards --------------------------------------------------------------------- 0 (1 row) @@ -155,7 +155,7 @@ SET client_min_messages TO NOTICE; SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_name LIKE ''barce%'' '); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards + master_modify_multiple_shards --------------------------------------------------------------------- 0 (1 row) @@ -164,13 +164,13 @@ HINT: Run the command directly SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name=''warsaw'' WHERE t_key=17'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards + master_modify_multiple_shards --------------------------------------------------------------------- 0 (1 row) SELECT t_name FROM multi_shard_modify_test WHERE t_key=17; - t_name + t_name --------------------------------------------------------------------- warsaw (1 row) @@ -179,13 +179,13 @@ SELECT t_name FROM multi_shard_modify_test WHERE t_key=17; SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name=''???'' WHERE t_key>30 AND t_key<35'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards + master_modify_multiple_shards --------------------------------------------------------------------- 0 (1 row) SELECT t_name FROM multi_shard_modify_test WHERE t_key>30 AND t_key<35; - t_name + t_name --------------------------------------------------------------------- ??? ??? @@ -197,13 +197,13 @@ SELECT t_name FROM multi_shard_modify_test WHERE t_key>30 AND t_key<35; SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value=8*37 WHERE t_key>30 AND t_key<35'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards + master_modify_multiple_shards --------------------------------------------------------------------- 0 (1 row) SELECT t_value FROM multi_shard_modify_test WHERE t_key>30 AND t_key<35; - t_value + t_value --------------------------------------------------------------------- 296 296 @@ -215,13 +215,13 @@ SELECT t_value FROM multi_shard_modify_test WHERE t_key>30 AND t_key<35; SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name=''somename'', t_value=333 WHERE t_key>30 AND t_key<35'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards + master_modify_multiple_shards --------------------------------------------------------------------- 0 (1 row) SELECT t_name, t_value FROM multi_shard_modify_test WHERE t_key>30 AND t_key<35; - t_name | t_value + t_name | t_value --------------------------------------------------------------------- somename | 333 somename | 333 @@ -233,13 +233,13 @@ SELECT t_name, t_value FROM multi_shard_modify_test WHERE t_key>30 AND t_key<35; SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name=''nice city'' WHERE t_value < 0'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards + master_modify_multiple_shards --------------------------------------------------------------------- 0 (1 row) SELECT t_name FROM multi_shard_modify_test WHERE t_value < 0; - t_name + t_name --------------------------------------------------------------------- nice city nice city @@ -254,7 +254,7 @@ ERROR: modifying the partition value of rows is not allowed SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name = ''FAIL'' FROM temp_nations WHERE multi_shard_modify_test.t_key = 3 AND multi_shard_modify_test.t_value = temp_nations.key AND temp_nations.name = ''dummy'' '); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards + master_modify_multiple_shards --------------------------------------------------------------------- 0 (1 row) @@ -263,7 +263,7 @@ HINT: Run the command directly SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name=''FAIL'' WHERE t_key=4 RETURNING *'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards + master_modify_multiple_shards --------------------------------------------------------------------- 0 (1 row) @@ -277,13 +277,13 @@ ERROR: cannot perform an INSERT without a partition column value SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value=t_key WHERE t_key = 10'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards + master_modify_multiple_shards --------------------------------------------------------------------- 0 (1 row) SELECT t_value FROM multi_shard_modify_test WHERE t_key=10; - t_value + t_value --------------------------------------------------------------------- 10 (1 row) @@ -292,13 +292,13 @@ SELECT t_value FROM multi_shard_modify_test WHERE t_key=10; SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value = t_value + 37 WHERE t_key = 10'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards + master_modify_multiple_shards --------------------------------------------------------------------- 0 (1 row) SELECT t_value FROM multi_shard_modify_test WHERE t_key=10; - t_value + t_value --------------------------------------------------------------------- 47 (1 row) @@ -308,7 +308,7 @@ CREATE FUNCTION temp_stable_func() RETURNS integer AS 'SELECT 10;' LANGUAGE SQL SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name = ''FAIL!'' WHERE t_key = temp_stable_func()'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards + master_modify_multiple_shards --------------------------------------------------------------------- 0 (1 row) @@ -317,13 +317,13 @@ HINT: Run the command directly SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value = abs(-78) WHERE t_key = 10'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards + master_modify_multiple_shards --------------------------------------------------------------------- 0 (1 row) SELECT t_value FROM multi_shard_modify_test WHERE t_key=10; - t_value + t_value --------------------------------------------------------------------- 78 (1 row) @@ -332,7 +332,7 @@ SELECT t_value FROM multi_shard_modify_test WHERE t_key=10; SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value = temp_stable_func() * 2 WHERE t_key = 10'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards + master_modify_multiple_shards --------------------------------------------------------------------- 0 (1 row) @@ -346,7 +346,7 @@ ERROR: functions used in UPDATE queries on distributed tables must not be VOLAT SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = temp_stable_func()'); WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release. HINT: Run the command directly - master_modify_multiple_shards + master_modify_multiple_shards --------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/multi_shard_update_delete.out b/src/test/regress/expected/multi_shard_update_delete.out index 96e9c8ec7..f00fe3570 100644 --- a/src/test/regress/expected/multi_shard_update_delete.out +++ b/src/test/regress/expected/multi_shard_update_delete.out @@ -7,73 +7,73 @@ SET citus.shard_replication_factor to 1; SET citus.multi_shard_modify_mode to 'parallel'; CREATE TABLE users_test_table(user_id int, value_1 int, value_2 int, value_3 int); SELECT create_distributed_table('users_test_table', 'user_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) \COPY users_test_table FROM STDIN DELIMITER AS ','; CREATE TABLE events_test_table (user_id int, value_1 int, value_2 int, value_3 int); SELECT create_distributed_table('events_test_table', 'user_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) \COPY events_test_table FROM STDIN DELIMITER AS ','; CREATE TABLE events_reference_copy_table (like events_test_table); SELECT create_reference_table('events_reference_copy_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) INSERT INTO events_reference_copy_table SELECT * FROM events_test_table; CREATE TABLE users_reference_copy_table (like users_test_table); SELECT create_reference_table('users_reference_copy_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) INSERT INTO users_reference_copy_table SELECT * FROM users_test_table; -- Run multi shard updates and deletes without transaction on hash distributed tables UPDATE users_test_table SET value_1 = 1; SELECT COUNT(*), SUM(value_1) FROM users_test_table; - count | sum + count | sum --------------------------------------------------------------------- 15 | 15 (1 row) SELECT COUNT(*), SUM(value_2) FROM users_test_table WHERE user_id = 1 or user_id = 3; - count | sum + count | sum --------------------------------------------------------------------- 4 | 52 (1 row) UPDATE users_test_table SET value_2 = value_2 + 1 WHERE user_id = 1 or user_id = 3; SELECT COUNT(*), SUM(value_2) FROM users_test_table WHERE user_id = 1 or user_id = 3; - count | sum + count | sum --------------------------------------------------------------------- 4 | 56 (1 row) UPDATE users_test_table SET value_3 = 0 WHERE user_id <> 5; SELECT SUM(value_3) FROM users_test_table WHERE user_id <> 5; - sum + sum --------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) FROM users_test_table WHERE user_id = 3 or user_id = 5; - count + count --------------------------------------------------------------------- 4 (1 row) DELETE FROM users_test_table WHERE user_id = 3 or user_id = 5; SELECT COUNT(*) FROM users_test_table WHERE user_id = 3 or user_id = 5; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -83,7 +83,7 @@ BEGIN; UPDATE users_test_table SET value_3 = 0; END; SELECT SUM(value_3) FROM users_test_table; - sum + sum --------------------------------------------------------------------- 0 (1 row) @@ -93,7 +93,7 @@ BEGIN; UPDATE users_test_table SET value_3 = 1; ROLLBACK; SELECT SUM(value_3) FROM users_test_table; - sum + sum --------------------------------------------------------------------- 0 (1 row) @@ -106,7 +106,7 @@ SET citus.multi_shard_modify_mode to sequential; UPDATE users_test_table SET value_3 = 1; END; SELECT SUM(value_3) FROM users_test_table; - sum + sum --------------------------------------------------------------------- 16 (1 row) @@ -127,7 +127,7 @@ UPDATE users_test_table SET value_3 = 2; UPDATE users_test_table SET value_3 = 0; END; SELECT SUM(value_3) FROM users_test_table; - sum + sum --------------------------------------------------------------------- 0 (1 row) @@ -135,14 +135,14 @@ SELECT SUM(value_3) FROM users_test_table; -- Check with kind of constraints UPDATE users_test_table SET value_3 = 1 WHERE user_id = 3 or true; SELECT COUNT(*), SUM(value_3) FROM users_test_table; - count | sum + count | sum --------------------------------------------------------------------- 16 | 16 (1 row) UPDATE users_test_table SET value_3 = 0 WHERE user_id = 20 and false; SELECT COUNT(*), SUM(value_3) FROM users_test_table; - count | sum + count | sum --------------------------------------------------------------------- 16 | 16 (1 row) @@ -156,7 +156,7 @@ EXECUTE foo_plan(7,35); EXECUTE foo_plan(9,45); EXECUTE foo_plan(0,0); SELECT SUM(value_1), SUM(value_3) FROM users_test_table; - sum | sum + sum | sum --------------------------------------------------------------------- 0 | 0 (1 row) @@ -174,38 +174,38 @@ INSERT INTO append_stage_table_2 VALUES(9,2); INSERT INTO append_stage_table_2 VALUES(10,4); CREATE TABLE test_append_table(id int, col_2 int); SELECT create_distributed_table('test_append_table','id','append'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_create_empty_shard('test_append_table'); - master_create_empty_shard + master_create_empty_shard --------------------------------------------------------------------- 1440010 (1 row) SELECT * FROM master_append_table_to_shard(1440010, 'append_stage_table', 'localhost', :master_port); - master_append_table_to_shard + master_append_table_to_shard --------------------------------------------------------------------- 0.00533333 (1 row) SELECT master_create_empty_shard('test_append_table') AS new_shard_id; - new_shard_id + new_shard_id --------------------------------------------------------------------- 1440011 (1 row) SELECT * FROM master_append_table_to_shard(1440011, 'append_stage_table_2', 'localhost', :master_port); - master_append_table_to_shard + master_append_table_to_shard --------------------------------------------------------------------- 0.00533333 (1 row) UPDATE test_append_table SET col_2 = 5; SELECT * FROM test_append_table ORDER BY 1 DESC, 2 DESC; - id | col_2 + id | col_2 --------------------------------------------------------------------- 10 | 5 9 | 5 @@ -227,15 +227,15 @@ CREATE TABLE tt1_1120 partition of tt1 for VALUES FROM (11) to (20); INSERT INTO tt1 VALUES (1,11), (3,15), (5,17), (6,19), (8,17), (2,12); SELECT create_distributed_table('tt1','id'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) UPDATE tt1 SET col_2 = 13; DELETE FROM tt1 WHERE id = 1 or id = 3 or id = 5; SELECT * FROM tt1 ORDER BY 1 DESC, 2 DESC; - id | col_2 + id | col_2 --------------------------------------------------------------------- 8 | 13 6 | 13 @@ -253,7 +253,7 @@ UPDATE tt1 SET col_2 = 12 WHERE col_2 > 10 and col_2 < 20; UPDATE tt1 SET col_2 = 7 WHERE col_2 < 10 and col_2 > 5; COMMIT; SELECT * FROM tt1 ORDER BY id; - id | col_2 + id | col_2 --------------------------------------------------------------------- 2 | 12 4 | 7 @@ -271,7 +271,7 @@ DELETE FROM tt1_510; DELETE FROM tt1_1120; COMMIT; SELECT * FROM tt1 ORDER BY id; - id | col_2 + id | col_2 --------------------------------------------------------------------- (0 rows) @@ -279,9 +279,9 @@ DROP TABLE tt1; -- Update and copy in the same transaction CREATE TABLE tt2(id int, col_2 int); SELECT create_distributed_table('tt2','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -289,7 +289,7 @@ BEGIN; UPDATE tt2 SET col_2 = 1; COMMIT; SELECT * FROM tt2 ORDER BY id; - id | col_2 + id | col_2 --------------------------------------------------------------------- 1 | 1 2 | 1 @@ -300,7 +300,7 @@ SELECT * FROM tt2 ORDER BY id; -- Test returning with both type of executors UPDATE tt2 SET col_2 = 5 RETURNING id, col_2; - id | col_2 + id | col_2 --------------------------------------------------------------------- 1 | 5 2 | 5 @@ -311,7 +311,7 @@ UPDATE tt2 SET col_2 = 5 RETURNING id, col_2; SET citus.multi_shard_modify_mode to sequential; UPDATE tt2 SET col_2 = 3 RETURNING id, col_2; - id | col_2 + id | col_2 --------------------------------------------------------------------- 1 | 3 2 | 3 @@ -327,9 +327,9 @@ SET citus.multi_shard_modify_mode to DEFAULT; SET citus.shard_count to 6; CREATE TABLE events_test_table_2 (user_id int, value_1 int, value_2 int, value_3 int); SELECT create_distributed_table('events_test_table_2', 'user_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) \COPY events_test_table_2 FROM STDIN DELIMITER AS ','; @@ -341,9 +341,9 @@ INSERT INTO test_table_1 VALUES(2, '2015-02-01 08:31:16', 7); INSERT INTO test_table_1 VALUES(3, '2111-01-12 08:35:19', 9); SELECT create_distributed_table('test_table_1', 'id'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- We can pushdown query if there is partition key equality @@ -367,7 +367,7 @@ WHERE now() > (SELECT max(date_col) WHERE test_table_1.id = events_test_table_2.user_id GROUP BY id) RETURNING *; - user_id | value_1 | value_2 | value_3 + user_id | value_1 | value_2 | value_3 --------------------------------------------------------------------- 1 | 5 | 7 | 7 1 | 20 | 12 | 25 @@ -393,7 +393,7 @@ WHERE user_id IN (SELECT user_id UNION SELECT user_id FROM events_test_table) returning value_3; - value_3 + value_3 --------------------------------------------------------------------- 0 0 @@ -415,7 +415,7 @@ WHERE user_id IN (SELECT user_id UNION ALL SELECT user_id FROM events_test_table) returning value_3; - value_3 + value_3 --------------------------------------------------------------------- 0 0 @@ -491,7 +491,7 @@ INSERT INTO users_test_table SELECT * FROM events_test_table WHERE events_test_table.user_id = 1 OR events_test_table.user_id = 5; SELECT SUM(value_2) FROM users_test_table; - sum + sum --------------------------------------------------------------------- 169 (1 row) @@ -501,7 +501,7 @@ SET value_2 = 1 FROM events_test_table WHERE users_test_table.user_id = events_test_table.user_id; SELECT SUM(value_2) FROM users_test_table; - sum + sum --------------------------------------------------------------------- 97 (1 row) @@ -511,9 +511,9 @@ COMMIT; CREATE SCHEMA sec_schema; CREATE TABLE sec_schema.tt1(id int, value_1 int); SELECT create_distributed_table('sec_schema.tt1','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO sec_schema.tt1 values(1,1),(2,2),(7,7),(9,9); @@ -523,7 +523,7 @@ WHERE id < (SELECT max(value_2) FROM events_test_table_2 WHERE sec_schema.tt1.id = events_test_table_2.user_id GROUP BY user_id) RETURNING *; - id | value_1 + id | value_1 --------------------------------------------------------------------- 7 | 11 9 | 11 @@ -561,7 +561,7 @@ SET col_3 = 6 WHERE date_col IN (SELECT now()); -- Test with prepared statements SELECT COUNT(*) FROM users_test_table WHERE value_1 = 0; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -577,7 +577,7 @@ EXECUTE foo_plan_2(7,35); EXECUTE foo_plan_2(9,45); EXECUTE foo_plan_2(0,0); SELECT COUNT(*) FROM users_test_table WHERE value_1 = 0; - count + count --------------------------------------------------------------------- 6 (1 row) @@ -619,7 +619,7 @@ UPDATE users_reference_copy_table SET value_2 = 5 FROM events_test_table WHERE users_reference_copy_table.user_id = events_test_table.user_id; -ERROR: only reference tables may be queried when targeting a reference table with multi shard UPDATE/DELETE queries with multiple tables +ERROR: only reference tables may be queried when targeting a reference table with multi shard UPDATE/DELETE queries with multiple tables -- We cannot push down it if the query has outer join and using UPDATE events_test_table SET value_2 = users_test_table.user_id @@ -752,7 +752,7 @@ ERROR: more than one row returned by a subquery used as an expression BEGIN; DECLARE test_cursor CURSOR FOR SELECT * FROM users_test_table ORDER BY user_id; FETCH test_cursor; - user_id | value_1 | value_2 | value_3 + user_id | value_1 | value_2 | value_3 --------------------------------------------------------------------- 1 | 2 | 5 | 0 (1 row) @@ -762,7 +762,7 @@ ERROR: cannot run DML queries with cursors ROLLBACK; -- Stable functions are supported SELECT * FROM test_table_1 ORDER BY 1 DESC, 2 DESC, 3 DESC; - id | date_col | col_3 + id | date_col | col_3 --------------------------------------------------------------------- 3 | Mon Jan 12 08:35:19 2111 PST | 9 2 | Sun Feb 01 08:31:16 2015 PST | 7 @@ -771,7 +771,7 @@ SELECT * FROM test_table_1 ORDER BY 1 DESC, 2 DESC, 3 DESC; UPDATE test_table_1 SET col_3 = 3 WHERE date_col < now(); SELECT * FROM test_table_1 ORDER BY 1 DESC, 2 DESC, 3 DESC; - id | date_col | col_3 + id | date_col | col_3 --------------------------------------------------------------------- 3 | Mon Jan 12 08:35:19 2111 PST | 9 2 | Sun Feb 01 08:31:16 2015 PST | 3 @@ -780,7 +780,7 @@ SELECT * FROM test_table_1 ORDER BY 1 DESC, 2 DESC, 3 DESC; DELETE FROM test_table_1 WHERE date_col < current_timestamp; SELECT * FROM test_table_1 ORDER BY 1 DESC, 2 DESC, 3 DESC; - id | date_col | col_3 + id | date_col | col_3 --------------------------------------------------------------------- 3 | Mon Jan 12 08:35:19 2111 PST | 9 (1 row) @@ -793,9 +793,9 @@ INSERT INTO test_table_2 VALUES(2, random()); INSERT INTO test_table_2 VALUES(3, random()); SELECT create_distributed_table('test_table_2', 'id'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) UPDATE test_table_2 SET double_col = random(); @@ -803,41 +803,41 @@ ERROR: functions used in UPDATE queries on distributed tables must not be VOLAT DROP TABLE test_table_2; -- Run multi shard updates and deletes without transaction on reference tables SELECT COUNT(*) FROM users_reference_copy_table; - count + count --------------------------------------------------------------------- 15 (1 row) UPDATE users_reference_copy_table SET value_1 = 1; SELECT SUM(value_1) FROM users_reference_copy_table; - sum + sum --------------------------------------------------------------------- 15 (1 row) SELECT COUNT(*), SUM(value_2) FROM users_reference_copy_table WHERE user_id = 3 or user_id = 5; - count | sum + count | sum --------------------------------------------------------------------- 4 | 52 (1 row) UPDATE users_reference_copy_table SET value_2 = value_2 + 1 WHERE user_id = 3 or user_id = 5; SELECT COUNT(*), SUM(value_2) FROM users_reference_copy_table WHERE user_id = 3 or user_id = 5; - count | sum + count | sum --------------------------------------------------------------------- 4 | 56 (1 row) UPDATE users_reference_copy_table SET value_3 = 0 WHERE user_id <> 3; SELECT SUM(value_3) FROM users_reference_copy_table WHERE user_id <> 3; - sum + sum --------------------------------------------------------------------- 0 (1 row) DELETE FROM users_reference_copy_table WHERE user_id = 3 or user_id = 5; SELECT COUNT(*) FROM users_reference_copy_table WHERE user_id = 3 or user_id = 5; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -847,49 +847,49 @@ DROP TABLE users_test_table; SET citus.shard_replication_factor to 2; CREATE TABLE users_test_table(user_id int, value_1 int, value_2 int, value_3 int); SELECT create_distributed_table('users_test_table', 'user_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) \COPY users_test_table FROM STDIN DELIMITER AS ','; -- Run multi shard updates and deletes without transaction on hash distributed tables UPDATE users_test_table SET value_1 = 1; SELECT COUNT(*), SUM(value_1) FROM users_test_table; - count | sum + count | sum --------------------------------------------------------------------- 15 | 15 (1 row) SELECT COUNT(*), SUM(value_2) FROM users_test_table WHERE user_id = 1 or user_id = 3; - count | sum + count | sum --------------------------------------------------------------------- 4 | 52 (1 row) UPDATE users_test_table SET value_2 = value_2 + 1 WHERE user_id = 1 or user_id = 3; SELECT COUNT(*), SUM(value_2) FROM users_test_table WHERE user_id = 1 or user_id = 3; - count | sum + count | sum --------------------------------------------------------------------- 4 | 56 (1 row) UPDATE users_test_table SET value_3 = 0 WHERE user_id <> 5; SELECT SUM(value_3) FROM users_test_table WHERE user_id <> 5; - sum + sum --------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) FROM users_test_table WHERE user_id = 3 or user_id = 5; - count + count --------------------------------------------------------------------- 4 (1 row) DELETE FROM users_test_table WHERE user_id = 3 or user_id = 5; SELECT COUNT(*) FROM users_test_table WHERE user_id = 3 or user_id = 5; - count + count --------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/multi_simple_queries.out b/src/test/regress/expected/multi_simple_queries.out index c85b9839b..fb999d87c 100644 --- a/src/test/regress/expected/multi_simple_queries.out +++ b/src/test/regress/expected/multi_simple_queries.out @@ -1,7 +1,7 @@ SET citus.next_shard_id TO 850000; -- many of the tests in this file is intended for testing non-fast-path --- router planner, so we're explicitly disabling it in this file. --- We've bunch of other tests that triggers fast-path-router +-- router planner, so we're explicitly disabling it in this file. +-- We've bunch of other tests that triggers fast-path-router SET citus.enable_fast_path_router_planner TO false; -- =================================================================== -- test end-to-end query functionality @@ -17,27 +17,27 @@ CREATE TABLE authors ( name text, id bigint ); -- this table is used in router executor tests CREATE TABLE articles_single_shard (LIKE articles); SELECT master_create_distributed_table('articles', 'author_id', 'hash'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_create_distributed_table('articles_single_shard', 'author_id', 'hash'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_create_worker_shards('articles', 2, 1); - master_create_worker_shards + master_create_worker_shards --------------------------------------------------------------------- - + (1 row) SELECT master_create_worker_shards('articles_single_shard', 1, 1); - master_create_worker_shards + master_create_worker_shards --------------------------------------------------------------------- - + (1 row) -- create a bunch of test data @@ -100,14 +100,14 @@ DELETE FROM articles WHERE author_id = 1 AND author_id = 2; -- single-shard tests -- test simple select for a single row SELECT * FROM articles WHERE author_id = 10 AND id = 50; - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 50 | 10 | anjanette | 19519 (1 row) -- get all titles by a single author SELECT title FROM articles WHERE author_id = 10; - title + title --------------------------------------------------------------------- aggrandize absentness @@ -120,7 +120,7 @@ SELECT title FROM articles WHERE author_id = 10; SELECT title, word_count FROM articles WHERE author_id = 10 ORDER BY word_count DESC NULLS LAST; - title | word_count + title | word_count --------------------------------------------------------------------- anjanette | 19519 aggrandize | 17277 @@ -134,7 +134,7 @@ SELECT title, id FROM articles WHERE author_id = 5 ORDER BY id LIMIT 2; - title | id + title | id --------------------------------------------------------------------- aruru | 5 adversa | 15 @@ -144,7 +144,7 @@ SELECT title, id FROM articles SELECT title, author_id FROM articles WHERE author_id = 7 OR author_id = 8 ORDER BY author_id ASC, id; - title | author_id + title | author_id --------------------------------------------------------------------- aseptic | 7 auriga | 7 @@ -164,7 +164,7 @@ SELECT author_id, sum(word_count) AS corpus_size FROM articles GROUP BY author_id HAVING sum(word_count) > 40000 ORDER BY sum(word_count) DESC; - author_id | corpus_size + author_id | corpus_size --------------------------------------------------------------------- 2 | 61782 10 | 59955 @@ -175,7 +175,7 @@ SELECT author_id, sum(word_count) AS corpus_size FROM articles SELECT * FROM articles WHERE author_id = 10 UNION SELECT * FROM articles WHERE author_id = 2 ORDER BY 1,2,3; - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 2 | 2 | abducing | 13642 10 | 10 | aggrandize | 17277 @@ -192,7 +192,7 @@ ORDER BY 1,2,3; -- queries using CTEs are supported WITH long_names AS ( SELECT id FROM authors WHERE char_length(name) > 15 ) SELECT title FROM articles ORDER BY 1 LIMIT 5; - title + title --------------------------------------------------------------------- abducing abeyance @@ -203,7 +203,7 @@ SELECT title FROM articles ORDER BY 1 LIMIT 5; -- queries which involve functions in FROM clause are recursively planned SELECT * FROM articles, position('om' in 'Thomas') ORDER BY 2 DESC, 1 DESC, 3 DESC LIMIT 5; - id | author_id | title | word_count | position + id | author_id | title | word_count | position --------------------------------------------------------------------- 50 | 10 | anjanette | 19519 | 3 40 | 10 | attemper | 14976 | 3 @@ -214,7 +214,7 @@ SELECT * FROM articles, position('om' in 'Thomas') ORDER BY 2 DESC, 1 DESC, 3 DE -- subqueries are supported in WHERE clause in Citus even if the relations are not distributed SELECT * FROM articles WHERE author_id IN (SELECT id FROM authors WHERE name LIKE '%a'); - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -222,7 +222,7 @@ SELECT * FROM articles WHERE author_id IN (SELECT id FROM authors WHERE name LIK SELECT articles.id,test.word_count FROM articles, (SELECT id, word_count FROM articles) AS test WHERE test.id = articles.id ORDER BY articles.id; - id | word_count + id | word_count --------------------------------------------------------------------- 1 | 9572 2 | 13642 @@ -305,7 +305,7 @@ HINT: If you want to discard the results of a SELECT, use PERFORM instead. CONTEXT: PL/pgSQL function inline_code_block line 3 at SQL statement -- test cross-shard queries SELECT COUNT(*) FROM articles; - count + count --------------------------------------------------------------------- 50 (1 row) @@ -329,7 +329,7 @@ SELECT author_id, sum(word_count) AS corpus_size FROM articles HAVING sum(word_count) > 25000 ORDER BY sum(word_count) DESC LIMIT 5; - author_id | corpus_size + author_id | corpus_size --------------------------------------------------------------------- 4 | 66325 2 | 61782 @@ -342,7 +342,7 @@ SELECT author_id FROM articles GROUP BY author_id HAVING sum(word_count) > 50000 ORDER BY author_id; - author_id + author_id --------------------------------------------------------------------- 2 4 @@ -355,7 +355,7 @@ SELECT author_id FROM articles GROUP BY author_id HAVING sum(word_count) > 50000 AND author_id < 5 ORDER BY author_id; - author_id + author_id --------------------------------------------------------------------- 2 4 @@ -365,7 +365,7 @@ SELECT author_id FROM articles GROUP BY author_id HAVING sum(word_count) > 50000 OR author_id < 5 ORDER BY author_id; - author_id + author_id --------------------------------------------------------------------- 1 2 @@ -380,18 +380,18 @@ SELECT author_id FROM articles GROUP BY author_id HAVING author_id <= 2 OR author_id = 8 ORDER BY author_id; - author_id + author_id --------------------------------------------------------------------- 1 2 8 (3 rows) -SELECT o_orderstatus, count(*), avg(o_totalprice) FROM orders +SELECT o_orderstatus, count(*), avg(o_totalprice) FROM orders GROUP BY o_orderstatus HAVING count(*) > 1450 OR avg(o_totalprice) > 150000 ORDER BY o_orderstatus; - o_orderstatus | count | avg + o_orderstatus | count | avg --------------------------------------------------------------------- O | 1461 | 143326.447029431896 P | 75 | 164847.914533333333 @@ -402,7 +402,7 @@ SELECT o_orderstatus, sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders GROUP BY o_orderstatus HAVING sum(l_linenumber) > 1000 ORDER BY o_orderstatus; - o_orderstatus | sum | avg + o_orderstatus | sum | avg --------------------------------------------------------------------- F | 8559 | 3.0126715945089757 O | 8904 | 3.0040485829959514 @@ -419,7 +419,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -434,7 +434,7 @@ SELECT * WHERE author_id = 1 OR author_id = 17; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -448,7 +448,7 @@ SELECT * FROM articles WHERE author_id = 1 OR author_id = 18; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -464,7 +464,7 @@ SELECT id as article_id, word_count * id as random_value DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - article_id | random_value + article_id | random_value --------------------------------------------------------------------- 1 | 9572 11 | 14817 @@ -482,7 +482,7 @@ SELECT a.author_id as first_author, b.word_count as second_word_count DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - first_author | second_word_count + first_author | second_word_count --------------------------------------------------------------------- 10 | 17277 10 | 1820 @@ -498,7 +498,7 @@ SELECT a.author_id as first_author, b.word_count as second_word_count DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - first_author | second_word_count + first_author | second_word_count --------------------------------------------------------------------- 10 | 19519 10 | 19519 @@ -513,7 +513,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -530,7 +530,7 @@ SELECT id DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id + id --------------------------------------------------------------------- 1 11 @@ -550,7 +550,7 @@ SELECT avg(word_count) DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - avg + avg --------------------------------------------------------------------- 12356.400000000000 (1 row) @@ -566,7 +566,7 @@ ERROR: unsupported aggregate function invalid DROP AGGREGATE invalid(int); SET client_min_messages to 'DEBUG2'; -- max, min, sum, count is somehow implemented --- differently in distributed planning +-- differently in distributed planning SELECT max(word_count) as max, min(word_count) as min, sum(word_count) as sum, count(word_count) as cnt FROM articles @@ -574,7 +574,7 @@ SELECT max(word_count) as max, min(word_count) as min, DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - max | min | sum | cnt + max | min | sum | cnt --------------------------------------------------------------------- 18185 | 2728 | 61782 | 5 (1 row) @@ -626,7 +626,7 @@ SELECT count(*) FROM ( xmax IS NOT NULL ) x; DEBUG: Router planner cannot handle multi-shard select queries - count + count --------------------------------------------------------------------- 50 (1 row) @@ -636,7 +636,7 @@ SELECT * FROM articles TABLESAMPLE SYSTEM (0) WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -644,7 +644,7 @@ SELECT * FROM articles TABLESAMPLE BERNOULLI (0) WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -652,7 +652,7 @@ SELECT * FROM articles TABLESAMPLE SYSTEM (100) WHERE author_id = 1 ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -665,7 +665,7 @@ SELECT * FROM articles TABLESAMPLE BERNOULLI (100) WHERE author_id = 1 ORDER BY DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -681,7 +681,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -690,7 +690,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -699,7 +699,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -713,7 +713,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 diff --git a/src/test/regress/expected/multi_simple_queries_0.out b/src/test/regress/expected/multi_simple_queries_0.out index c69518eea..134074c8d 100644 --- a/src/test/regress/expected/multi_simple_queries_0.out +++ b/src/test/regress/expected/multi_simple_queries_0.out @@ -1,7 +1,7 @@ SET citus.next_shard_id TO 850000; -- many of the tests in this file is intended for testing non-fast-path --- router planner, so we're explicitly disabling it in this file. --- We've bunch of other tests that triggers fast-path-router +-- router planner, so we're explicitly disabling it in this file. +-- We've bunch of other tests that triggers fast-path-router SET citus.enable_fast_path_router_planner TO false; -- =================================================================== -- test end-to-end query functionality @@ -17,27 +17,27 @@ CREATE TABLE authors ( name text, id bigint ); -- this table is used in router executor tests CREATE TABLE articles_single_shard (LIKE articles); SELECT master_create_distributed_table('articles', 'author_id', 'hash'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_create_distributed_table('articles_single_shard', 'author_id', 'hash'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_create_worker_shards('articles', 2, 1); - master_create_worker_shards + master_create_worker_shards --------------------------------------------------------------------- - + (1 row) SELECT master_create_worker_shards('articles_single_shard', 1, 1); - master_create_worker_shards + master_create_worker_shards --------------------------------------------------------------------- - + (1 row) -- create a bunch of test data @@ -100,14 +100,14 @@ DELETE FROM articles WHERE author_id = 1 AND author_id = 2; -- single-shard tests -- test simple select for a single row SELECT * FROM articles WHERE author_id = 10 AND id = 50; - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 50 | 10 | anjanette | 19519 (1 row) -- get all titles by a single author SELECT title FROM articles WHERE author_id = 10; - title + title --------------------------------------------------------------------- aggrandize absentness @@ -120,7 +120,7 @@ SELECT title FROM articles WHERE author_id = 10; SELECT title, word_count FROM articles WHERE author_id = 10 ORDER BY word_count DESC NULLS LAST; - title | word_count + title | word_count --------------------------------------------------------------------- anjanette | 19519 aggrandize | 17277 @@ -134,7 +134,7 @@ SELECT title, id FROM articles WHERE author_id = 5 ORDER BY id LIMIT 2; - title | id + title | id --------------------------------------------------------------------- aruru | 5 adversa | 15 @@ -144,7 +144,7 @@ SELECT title, id FROM articles SELECT title, author_id FROM articles WHERE author_id = 7 OR author_id = 8 ORDER BY author_id ASC, id; - title | author_id + title | author_id --------------------------------------------------------------------- aseptic | 7 auriga | 7 @@ -164,7 +164,7 @@ SELECT author_id, sum(word_count) AS corpus_size FROM articles GROUP BY author_id HAVING sum(word_count) > 40000 ORDER BY sum(word_count) DESC; - author_id | corpus_size + author_id | corpus_size --------------------------------------------------------------------- 2 | 61782 10 | 59955 @@ -175,7 +175,7 @@ SELECT author_id, sum(word_count) AS corpus_size FROM articles SELECT * FROM articles WHERE author_id = 10 UNION SELECT * FROM articles WHERE author_id = 2 ORDER BY 1,2,3; - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 2 | 2 | abducing | 13642 10 | 10 | aggrandize | 17277 @@ -192,7 +192,7 @@ ORDER BY 1,2,3; -- queries using CTEs are supported WITH long_names AS ( SELECT id FROM authors WHERE char_length(name) > 15 ) SELECT title FROM articles ORDER BY 1 LIMIT 5; - title + title --------------------------------------------------------------------- abducing abeyance @@ -203,7 +203,7 @@ SELECT title FROM articles ORDER BY 1 LIMIT 5; -- queries which involve functions in FROM clause are recursively planned SELECT * FROM articles, position('om' in 'Thomas') ORDER BY 2 DESC, 1 DESC, 3 DESC LIMIT 5; - id | author_id | title | word_count | position + id | author_id | title | word_count | position --------------------------------------------------------------------- 50 | 10 | anjanette | 19519 | 3 40 | 10 | attemper | 14976 | 3 @@ -249,7 +249,7 @@ HINT: If you want to discard the results of a SELECT, use PERFORM instead. CONTEXT: PL/pgSQL function inline_code_block line 3 at SQL statement -- test cross-shard queries SELECT COUNT(*) FROM articles; - count + count --------------------------------------------------------------------- 50 (1 row) @@ -273,7 +273,7 @@ SELECT author_id, sum(word_count) AS corpus_size FROM articles HAVING sum(word_count) > 25000 ORDER BY sum(word_count) DESC LIMIT 5; - author_id | corpus_size + author_id | corpus_size --------------------------------------------------------------------- 4 | 66325 2 | 61782 @@ -286,7 +286,7 @@ SELECT author_id FROM articles GROUP BY author_id HAVING sum(word_count) > 50000 ORDER BY author_id; - author_id + author_id --------------------------------------------------------------------- 2 4 @@ -299,7 +299,7 @@ SELECT author_id FROM articles GROUP BY author_id HAVING sum(word_count) > 50000 AND author_id < 5 ORDER BY author_id; - author_id + author_id --------------------------------------------------------------------- 2 4 @@ -309,7 +309,7 @@ SELECT author_id FROM articles GROUP BY author_id HAVING sum(word_count) > 50000 OR author_id < 5 ORDER BY author_id; - author_id + author_id --------------------------------------------------------------------- 1 2 @@ -324,18 +324,18 @@ SELECT author_id FROM articles GROUP BY author_id HAVING author_id <= 2 OR author_id = 8 ORDER BY author_id; - author_id + author_id --------------------------------------------------------------------- 1 2 8 (3 rows) -SELECT o_orderstatus, count(*), avg(o_totalprice) FROM orders +SELECT o_orderstatus, count(*), avg(o_totalprice) FROM orders GROUP BY o_orderstatus HAVING count(*) > 1450 OR avg(o_totalprice) > 150000 ORDER BY o_orderstatus; - o_orderstatus | count | avg + o_orderstatus | count | avg --------------------------------------------------------------------- O | 1461 | 143326.447029431896 P | 75 | 164847.914533333333 @@ -346,7 +346,7 @@ SELECT o_orderstatus, sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders GROUP BY o_orderstatus HAVING sum(l_linenumber) > 1000 ORDER BY o_orderstatus; - o_orderstatus | sum | avg + o_orderstatus | sum | avg --------------------------------------------------------------------- F | 8559 | 3.0126715945089757 O | 8904 | 3.0040485829959514 @@ -363,7 +363,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -378,7 +378,7 @@ SELECT * WHERE author_id = 1 OR author_id = 17; DEBUG: Creating router plan DEBUG: Plan is router executable - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -392,7 +392,7 @@ SELECT * FROM articles WHERE author_id = 1 OR author_id = 18; DEBUG: Router planner cannot handle multi-shard select queries - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -408,7 +408,7 @@ SELECT id as article_id, word_count * id as random_value DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - article_id | random_value + article_id | random_value --------------------------------------------------------------------- 1 | 9572 11 | 14817 @@ -426,7 +426,7 @@ SELECT a.author_id as first_author, b.word_count as second_word_count DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - first_author | second_word_count + first_author | second_word_count --------------------------------------------------------------------- 10 | 17277 10 | 1820 @@ -442,7 +442,7 @@ SELECT a.author_id as first_author, b.word_count as second_word_count DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 10 - first_author | second_word_count + first_author | second_word_count --------------------------------------------------------------------- 10 | 19519 10 | 19519 @@ -457,7 +457,7 @@ SELECT * DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -474,7 +474,7 @@ SELECT id DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id + id --------------------------------------------------------------------- 1 11 @@ -494,7 +494,7 @@ SELECT avg(word_count) DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - avg + avg --------------------------------------------------------------------- 12356.400000000000 (1 row) @@ -510,7 +510,7 @@ ERROR: unsupported aggregate function invalid DROP AGGREGATE invalid(int); SET client_min_messages to 'DEBUG2'; -- max, min, sum, count is somehow implemented --- differently in distributed planning +-- differently in distributed planning SELECT max(word_count) as max, min(word_count) as min, sum(word_count) as sum, count(word_count) as cnt FROM articles @@ -518,7 +518,7 @@ SELECT max(word_count) as max, min(word_count) as min, DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 2 - max | min | sum | cnt + max | min | sum | cnt --------------------------------------------------------------------- 18185 | 2728 | 61782 | 5 (1 row) @@ -570,7 +570,7 @@ SELECT count(*) FROM ( xmax IS NOT NULL ) x; DEBUG: Router planner cannot handle multi-shard select queries - count + count --------------------------------------------------------------------- 50 (1 row) @@ -580,7 +580,7 @@ SELECT * FROM articles TABLESAMPLE SYSTEM (0) WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -588,7 +588,7 @@ SELECT * FROM articles TABLESAMPLE BERNOULLI (0) WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -596,7 +596,7 @@ SELECT * FROM articles TABLESAMPLE SYSTEM (100) WHERE author_id = 1 ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -609,7 +609,7 @@ SELECT * FROM articles TABLESAMPLE BERNOULLI (100) WHERE author_id = 1 ORDER BY DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -625,7 +625,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -634,7 +634,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- (0 rows) @@ -643,7 +643,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 @@ -657,7 +657,7 @@ DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable DETAIL: distribution column value: 1 - id | author_id | title | word_count + id | author_id | title | word_count --------------------------------------------------------------------- 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 diff --git a/src/test/regress/expected/multi_single_relation_subquery.out b/src/test/regress/expected/multi_single_relation_subquery.out index ecc692c56..aa097a015 100644 --- a/src/test/regress/expected/multi_single_relation_subquery.out +++ b/src/test/regress/expected/multi_single_relation_subquery.out @@ -27,7 +27,7 @@ order by total desc, number_sum desc limit 10; - number_sum | total | avg_count + number_sum | total | avg_count --------------------------------------------------------------------- 10 | 136 | 2.3970588235294118 11 | 97 | 2.6082474226804124 @@ -65,7 +65,7 @@ order by total desc, number_sum desc limit 10; - number_sum | total | avg_count + number_sum | total | avg_count --------------------------------------------------------------------- 10 | 136 | 2.3970588235294118 11 | 97 | 2.6082474226804124 @@ -97,7 +97,7 @@ group by order by avg_count desc, suppkey_bin DESC limit 20; - suppkey_bin | avg_count + suppkey_bin | avg_count --------------------------------------------------------------------- 95 | 1.4851485148514851 90 | 1.4761904761904762 @@ -150,7 +150,7 @@ group by total order by total; - total | total_avg_count + total | total_avg_count --------------------------------------------------------------------- 1 | 4.8000000000000000 6 | 3.0000000000000000 @@ -174,7 +174,7 @@ from group by (l_orderkey/4)::int, l_suppkey ) as distributed_table; - avg + avg --------------------------------------------------------------------- 1.00083402835696413678 (1 row) @@ -196,7 +196,7 @@ from limit 100) as distributed_table group by l_suppkey - ORDER BY 2 DESC, 1 DESC + ORDER BY 2 DESC, 1 DESC LIMIT 5; ERROR: cannot perform distributed planning on this query DETAIL: Subqueries with limit are not supported yet @@ -224,7 +224,7 @@ from lineitem group by l_partkey) as distributed_table; - avg + avg --------------------------------------------------------------------- 1.02907126318497555956 (1 row) @@ -238,9 +238,9 @@ from lineitem group by l_partkey - having + having count(distinct l_shipdate) >= 2) as distributed_table; - avg + avg --------------------------------------------------------------------- 2.0335365853658537 (1 row) @@ -261,7 +261,7 @@ SELECT max(l_suppkey) FROM GROUP BY l_suppkey) z ) y; - max + max --------------------------------------------------------------------- 9999 (1 row) diff --git a/src/test/regress/expected/multi_size_queries.out b/src/test/regress/expected/multi_size_queries.out index d2be0c2fb..3e4961e1e 100644 --- a/src/test/regress/expected/multi_size_queries.out +++ b/src/test/regress/expected/multi_size_queries.out @@ -2,7 +2,7 @@ -- MULTI_SIZE_QUERIES -- -- Test checks whether size of distributed tables can be obtained with citus_table_size. --- To find the relation size and total relation size citus_relation_size and +-- To find the relation size and total relation size citus_relation_size and -- citus_total_relation_size are also tested. SET citus.next_shard_id TO 1390000; -- Tests with invalid relation IDs @@ -31,19 +31,19 @@ ERROR: cannot calculate the size because replication factor is greater than 1 VACUUM (FULL) customer_copy_hash; -- Tests on distributed tables with streaming replication. SELECT citus_table_size('customer_copy_hash'); - citus_table_size + citus_table_size --------------------------------------------------------------------- 548864 (1 row) SELECT citus_relation_size('customer_copy_hash'); - citus_relation_size + citus_relation_size --------------------------------------------------------------------- 548864 (1 row) SELECT citus_total_relation_size('customer_copy_hash'); - citus_total_relation_size + citus_total_relation_size --------------------------------------------------------------------- 1597440 (1 row) @@ -52,7 +52,7 @@ SELECT citus_total_relation_size('customer_copy_hash'); SELECT citus_table_size('customer_copy_hash'), citus_table_size('customer_copy_hash'), citus_table_size('supplier'); - citus_table_size | citus_table_size | citus_table_size + citus_table_size | citus_table_size | citus_table_size --------------------------------------------------------------------- 548864 | 548864 | 401408 (1 row) @@ -61,19 +61,19 @@ CREATE INDEX index_1 on customer_copy_hash(c_custkey); VACUUM (FULL) customer_copy_hash; -- Tests on distributed table with index. SELECT citus_table_size('customer_copy_hash'); - citus_table_size + citus_table_size --------------------------------------------------------------------- 548864 (1 row) SELECT citus_relation_size('customer_copy_hash'); - citus_relation_size + citus_relation_size --------------------------------------------------------------------- 548864 (1 row) SELECT citus_total_relation_size('customer_copy_hash'); - citus_total_relation_size + citus_total_relation_size --------------------------------------------------------------------- 2646016 (1 row) @@ -81,19 +81,19 @@ SELECT citus_total_relation_size('customer_copy_hash'); -- Tests on reference table VACUUM (FULL) supplier; SELECT citus_table_size('supplier'); - citus_table_size + citus_table_size --------------------------------------------------------------------- 376832 (1 row) SELECT citus_relation_size('supplier'); - citus_relation_size + citus_relation_size --------------------------------------------------------------------- 376832 (1 row) SELECT citus_total_relation_size('supplier'); - citus_total_relation_size + citus_total_relation_size --------------------------------------------------------------------- 376832 (1 row) @@ -101,19 +101,19 @@ SELECT citus_total_relation_size('supplier'); CREATE INDEX index_2 on supplier(s_suppkey); VACUUM (FULL) supplier; SELECT citus_table_size('supplier'); - citus_table_size + citus_table_size --------------------------------------------------------------------- 376832 (1 row) SELECT citus_relation_size('supplier'); - citus_relation_size + citus_relation_size --------------------------------------------------------------------- 376832 (1 row) SELECT citus_total_relation_size('supplier'); - citus_total_relation_size + citus_total_relation_size --------------------------------------------------------------------- 458752 (1 row) @@ -125,14 +125,14 @@ select citus_table_size('supplier'); ERROR: citus size functions cannot be called in transaction blocks which contain multi-shard data modifications END; show citus.node_conninfo; - citus.node_conninfo + citus.node_conninfo --------------------------------------------------------------------- sslmode=require (1 row) ALTER SYSTEM SET citus.node_conninfo = 'sslmode=require'; SELECT pg_reload_conf(); - pg_reload_conf + pg_reload_conf --------------------------------------------------------------------- t (1 row) @@ -140,19 +140,19 @@ SELECT pg_reload_conf(); -- make sure that any invalidation to the connection info -- wouldn't prevent future commands to fail SELECT citus_total_relation_size('customer_copy_hash'); - citus_total_relation_size + citus_total_relation_size --------------------------------------------------------------------- 2646016 (1 row) SELECT pg_reload_conf(); - pg_reload_conf + pg_reload_conf --------------------------------------------------------------------- t (1 row) SELECT citus_total_relation_size('customer_copy_hash'); - citus_total_relation_size + citus_total_relation_size --------------------------------------------------------------------- 2646016 (1 row) @@ -160,7 +160,7 @@ SELECT citus_total_relation_size('customer_copy_hash'); -- reset back to the original node_conninfo ALTER SYSTEM RESET citus.node_conninfo; SELECT pg_reload_conf(); - pg_reload_conf + pg_reload_conf --------------------------------------------------------------------- t (1 row) diff --git a/src/test/regress/expected/multi_sql_function.out b/src/test/regress/expected/multi_sql_function.out index d7ae68408..4cd18c81c 100644 --- a/src/test/regress/expected/multi_sql_function.out +++ b/src/test/regress/expected/multi_sql_function.out @@ -37,25 +37,25 @@ SET citus.task_executor_type TO 'task-tracker'; SET client_min_messages TO INFO; -- now, run plain SQL functions SELECT sql_test_no_1(); - sql_test_no_1 + sql_test_no_1 --------------------------------------------------------------------- 2985 (1 row) SELECT sql_test_no_2(); - sql_test_no_2 + sql_test_no_2 --------------------------------------------------------------------- 12000 (1 row) SELECT sql_test_no_3(); - sql_test_no_3 + sql_test_no_3 --------------------------------------------------------------------- 1956 (1 row) SELECT sql_test_no_4(); - sql_test_no_4 + sql_test_no_4 --------------------------------------------------------------------- 7806 (1 row) @@ -65,13 +65,13 @@ SELECT sql_test_no_4(); RESET citus.task_executor_type; -- now, run plain SQL functions SELECT sql_test_no_1(); - sql_test_no_1 + sql_test_no_1 --------------------------------------------------------------------- 2985 (1 row) SELECT sql_test_no_2(); - sql_test_no_2 + sql_test_no_2 --------------------------------------------------------------------- 12000 (1 row) @@ -83,9 +83,9 @@ CREATE TABLE temp_table ( ); SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('temp_table','key','hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE FUNCTION no_parameter_insert_sql() RETURNS void AS $$ @@ -93,39 +93,39 @@ CREATE FUNCTION no_parameter_insert_sql() RETURNS void AS $$ $$ LANGUAGE SQL; -- execute 6 times SELECT no_parameter_insert_sql(); - no_parameter_insert_sql + no_parameter_insert_sql --------------------------------------------------------------------- - + (1 row) SELECT no_parameter_insert_sql(); - no_parameter_insert_sql + no_parameter_insert_sql --------------------------------------------------------------------- - + (1 row) SELECT no_parameter_insert_sql(); - no_parameter_insert_sql + no_parameter_insert_sql --------------------------------------------------------------------- - + (1 row) SELECT no_parameter_insert_sql(); - no_parameter_insert_sql + no_parameter_insert_sql --------------------------------------------------------------------- - + (1 row) SELECT no_parameter_insert_sql(); - no_parameter_insert_sql + no_parameter_insert_sql --------------------------------------------------------------------- - + (1 row) SELECT no_parameter_insert_sql(); - no_parameter_insert_sql + no_parameter_insert_sql --------------------------------------------------------------------- - + (1 row) CREATE FUNCTION non_partition_parameter_insert_sql(int) RETURNS void AS $$ @@ -133,44 +133,44 @@ CREATE FUNCTION non_partition_parameter_insert_sql(int) RETURNS void AS $$ $$ LANGUAGE SQL; -- execute 6 times SELECT non_partition_parameter_insert_sql(10); - non_partition_parameter_insert_sql + non_partition_parameter_insert_sql --------------------------------------------------------------------- - + (1 row) SELECT non_partition_parameter_insert_sql(20); - non_partition_parameter_insert_sql + non_partition_parameter_insert_sql --------------------------------------------------------------------- - + (1 row) SELECT non_partition_parameter_insert_sql(30); - non_partition_parameter_insert_sql + non_partition_parameter_insert_sql --------------------------------------------------------------------- - + (1 row) SELECT non_partition_parameter_insert_sql(40); - non_partition_parameter_insert_sql + non_partition_parameter_insert_sql --------------------------------------------------------------------- - + (1 row) SELECT non_partition_parameter_insert_sql(50); - non_partition_parameter_insert_sql + non_partition_parameter_insert_sql --------------------------------------------------------------------- - + (1 row) SELECT non_partition_parameter_insert_sql(60); - non_partition_parameter_insert_sql + non_partition_parameter_insert_sql --------------------------------------------------------------------- - + (1 row) -- check inserted values SELECT * FROM temp_table ORDER BY key, value; - key | value + key | value --------------------------------------------------------------------- 0 | 10 0 | 20 @@ -178,12 +178,12 @@ SELECT * FROM temp_table ORDER BY key, value; 0 | 40 0 | 50 0 | 60 - 0 | - 0 | - 0 | - 0 | - 0 | - 0 | + 0 | + 0 | + 0 | + 0 | + 0 | + 0 | (12 rows) -- check updates @@ -192,44 +192,44 @@ CREATE FUNCTION non_partition_parameter_update_sql(int, int) RETURNS void AS $$ $$ LANGUAGE SQL; -- execute 6 times SELECT non_partition_parameter_update_sql(10, 12); - non_partition_parameter_update_sql + non_partition_parameter_update_sql --------------------------------------------------------------------- - + (1 row) SELECT non_partition_parameter_update_sql(20, 22); - non_partition_parameter_update_sql + non_partition_parameter_update_sql --------------------------------------------------------------------- - + (1 row) SELECT non_partition_parameter_update_sql(30, 32); - non_partition_parameter_update_sql + non_partition_parameter_update_sql --------------------------------------------------------------------- - + (1 row) SELECT non_partition_parameter_update_sql(40, 42); - non_partition_parameter_update_sql + non_partition_parameter_update_sql --------------------------------------------------------------------- - + (1 row) SELECT non_partition_parameter_update_sql(50, 52); - non_partition_parameter_update_sql + non_partition_parameter_update_sql --------------------------------------------------------------------- - + (1 row) SELECT non_partition_parameter_update_sql(60, 62); - non_partition_parameter_update_sql + non_partition_parameter_update_sql --------------------------------------------------------------------- - + (1 row) -- check after updates SELECT * FROM temp_table ORDER BY key, value; - key | value + key | value --------------------------------------------------------------------- 0 | 12 0 | 22 @@ -237,12 +237,12 @@ SELECT * FROM temp_table ORDER BY key, value; 0 | 42 0 | 52 0 | 62 - 0 | - 0 | - 0 | - 0 | - 0 | - 0 | + 0 | + 0 | + 0 | + 0 | + 0 | + 0 | (12 rows) -- check deletes @@ -251,59 +251,59 @@ CREATE FUNCTION non_partition_parameter_delete_sql(int) RETURNS void AS $$ $$ LANGUAGE SQL; -- execute 6 times to trigger prepared statement usage SELECT non_partition_parameter_delete_sql(12); - non_partition_parameter_delete_sql + non_partition_parameter_delete_sql --------------------------------------------------------------------- - + (1 row) SELECT non_partition_parameter_delete_sql(22); - non_partition_parameter_delete_sql + non_partition_parameter_delete_sql --------------------------------------------------------------------- - + (1 row) SELECT non_partition_parameter_delete_sql(32); - non_partition_parameter_delete_sql + non_partition_parameter_delete_sql --------------------------------------------------------------------- - + (1 row) SELECT non_partition_parameter_delete_sql(42); - non_partition_parameter_delete_sql + non_partition_parameter_delete_sql --------------------------------------------------------------------- - + (1 row) SELECT non_partition_parameter_delete_sql(52); - non_partition_parameter_delete_sql + non_partition_parameter_delete_sql --------------------------------------------------------------------- - + (1 row) SELECT non_partition_parameter_delete_sql(62); - non_partition_parameter_delete_sql + non_partition_parameter_delete_sql --------------------------------------------------------------------- - + (1 row) -- check after deletes SELECT * FROM temp_table ORDER BY key, value; - key | value + key | value --------------------------------------------------------------------- - 0 | - 0 | - 0 | - 0 | - 0 | - 0 | + 0 | + 0 | + 0 | + 0 | + 0 | + 0 | (6 rows) -- test running parameterized SQL function CREATE TABLE test_parameterized_sql(id integer, org_id integer); select create_distributed_table('test_parameterized_sql','org_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE OR REPLACE FUNCTION test_parameterized_sql_function(org_id_val integer) @@ -337,9 +337,9 @@ CONTEXT: SQL function "test_parameterized_sql_function_in_subquery_where" state -- don't go over 2PC if they are not part of a bigger transaction. CREATE TABLE table_with_unique_constraint (a int UNIQUE); SELECT create_distributed_table('table_with_unique_constraint', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO table_with_unique_constraint VALUES (1), (2), (3); @@ -354,7 +354,7 @@ DETAIL: Key (a)=(4) already exists. CONTEXT: while executing command on localhost:xxxxx SQL function "insert_twice" statement 2 SELECT * FROM table_with_unique_constraint ORDER BY a; - a + a --------------------------------------------------------------------- 1 2 diff --git a/src/test/regress/expected/multi_subquery.out b/src/test/regress/expected/multi_subquery.out index 39afbf5ec..8ce03bc7b 100644 --- a/src/test/regress/expected/multi_subquery.out +++ b/src/test/regress/expected/multi_subquery.out @@ -17,7 +17,7 @@ FROM l_orderkey = o_orderkey GROUP BY l_orderkey) AS unit_prices; - avg + avg --------------------------------------------------------------------- 142158.8766934673366834 (1 row) @@ -44,7 +44,7 @@ FROM l_suppkey) AS order_counts; DEBUG: generating subplan 2_1 for subquery SELECT l_suppkey, count(*) AS order_count FROM public.lineitem_subquery GROUP BY l_suppkey DEBUG: Plan 2 query after replacing subqueries and CTEs: SELECT avg(order_count) AS avg FROM (SELECT intermediate_result.l_suppkey, intermediate_result.order_count FROM read_intermediate_result('2_1'::text, 'binary'::citus_copy_format) intermediate_result(l_suppkey integer, order_count bigint)) order_counts - avg + avg --------------------------------------------------------------------- 1.7199369356456930 (1 row) @@ -68,7 +68,7 @@ RESET client_min_messages; SELECT count(*) FROM ( SELECT l_orderkey FROM lineitem_subquery JOIN (SELECT random()::int r) sub ON (l_orderkey = r) WHERE r > 10 ) b; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -89,7 +89,7 @@ DEBUG: generating subplan 7_2 for subquery SELECT intermediate_result.l_orderke DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.l_orderkey FROM read_intermediate_result('7_2'::text, 'binary'::citus_copy_format) intermediate_result(l_orderkey bigint)) b DEBUG: Creating router plan DEBUG: Plan is router executable - count + count --------------------------------------------------------------------- 12001 (1 row) @@ -111,7 +111,7 @@ DEBUG: generating subplan 10_3 for subquery SELECT intermediate_result.l_orderk DEBUG: Plan 10 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.l_orderkey FROM read_intermediate_result('10_3'::text, 'binary'::citus_copy_format) intermediate_result(l_orderkey bigint)) b DEBUG: Creating router plan DEBUG: Plan is router executable - count + count --------------------------------------------------------------------- 14496 (1 row) @@ -123,7 +123,7 @@ SELECT count(*) FROM (SELECT l_orderkey FROM lineitem_subquery) ) b; DEBUG: Router planner cannot handle multi-shard select queries - count + count --------------------------------------------------------------------- 2985 (1 row) @@ -150,7 +150,7 @@ FROM orders_subquery WHERE lineitem_quantities.l_orderkey = o_orderkey) orders_price ON true; - avg + avg --------------------------------------------------------------------- 17470.0940725222668915 (1 row) @@ -192,7 +192,7 @@ WHERE (o_orderkey < l_quantity) ORDER BY l_orderkey DESC LIMIT 10; - l_orderkey + l_orderkey --------------------------------------------------------------------- 39 39 @@ -218,7 +218,7 @@ WHERE (o_orderkey < l_quantity + 3) ORDER BY l_orderkey DESC LIMIT 10; - l_orderkey + l_orderkey --------------------------------------------------------------------- 39 39 @@ -243,7 +243,7 @@ WHERE (o_orderkey < l_quantity + 3) ORDER BY l_orderkey DESC LIMIT 10; - l_orderkey + l_orderkey --------------------------------------------------------------------- 39 39 @@ -278,7 +278,7 @@ JOIN ON (l_orderkey::int8 = o_orderkey::int8) ORDER BY l_orderkey DESC LIMIT 10; - l_orderkey + l_orderkey --------------------------------------------------------------------- 14947 14947 @@ -314,7 +314,7 @@ WHERE (o_orderkey::int8 < l_quantity::int8 + 3) ORDER BY l_orderkey DESC LIMIT 10; - l_orderkey + l_orderkey --------------------------------------------------------------------- 39 39 @@ -339,7 +339,7 @@ WHERE (o_orderkey::int4 < l_quantity::int8 + 3) ORDER BY l_orderkey DESC LIMIT 10; - l_orderkey + l_orderkey --------------------------------------------------------------------- 39 39 @@ -396,7 +396,7 @@ FROM events_table t1 LEFT JOIN users_reference_table t2 ON t1.user_id = trunc(t2.user_id) ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC LIMIT 5; - user_id | value_1 | value_2 | value_3 + user_id | value_1 | value_2 | value_3 --------------------------------------------------------------------- 6 | 5 | 2 | 0 5 | 5 | 5 | 1 @@ -411,7 +411,7 @@ FROM events_table t1 LEFT JOIN users_reference_table t2 ON t1.user_id > t2.user_id ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC LIMIT 5; - user_id | value_1 | value_2 | value_3 + user_id | value_1 | value_2 | value_3 --------------------------------------------------------------------- 6 | 5 | 5 | 3 5 | 5 | 5 | 3 @@ -433,11 +433,11 @@ FROM events_table t1 LEFT JOIN users_reference_table t2 ON t1.user_id = (CASE WHEN t2.user_id > 3 THEN 3 ELSE t2.user_id END) ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC LIMIT 5; - user_id | value_1 | value_2 | value_3 + user_id | value_1 | value_2 | value_3 --------------------------------------------------------------------- - 6 | | | - 5 | | | - 4 | | | + 6 | | | + 5 | | | + 4 | | | 3 | 5 | 5 | 3 2 | 4 | 4 | 5 (5 rows) @@ -450,7 +450,7 @@ SELECT DISTINCT ON (t1.user_id) t1.user_id, t2.value_1, t2.value_2, t2.value_3 LEFT JOIN users_reference_table t2 ON t1.user_id = trunc(t2.user_id) ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC LIMIT 5; - user_id | value_1 | value_2 | value_3 + user_id | value_1 | value_2 | value_3 --------------------------------------------------------------------- 6 | 5 | 2 | 0 5 | 5 | 5 | 1 @@ -478,7 +478,7 @@ FROM ( ) lo ORDER BY 1, 2, 3 LIMIT 5; - user_id | value_1 | event_type + user_id | value_1 | event_type --------------------------------------------------------------------- 1 | 1 | 0 1 | 1 | 0 @@ -493,7 +493,7 @@ FROM events_table t1 JOIN users_reference_table t2 ON t1.user_id = trunc(t2.user_id) ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC LIMIT 5; - user_id | value_1 | value_2 | value_3 + user_id | value_1 | value_2 | value_3 --------------------------------------------------------------------- 6 | 5 | 2 | 0 5 | 5 | 5 | 1 @@ -513,7 +513,7 @@ WHERE (o_orderkey < l_quantity) ORDER BY l_orderkey DESC LIMIT 10; - l_orderkey + l_orderkey --------------------------------------------------------------------- 39 38 @@ -536,7 +536,7 @@ JOIN ON (l_orderkey = o_orderkey) WHERE (o_orderkey < l_quantity); - count + count --------------------------------------------------------------------- 13 (1 row) @@ -552,7 +552,7 @@ WHERE (o_orderkey < l_quantity) ORDER BY l_quantity DESC LIMIT 10; - l_quantity + l_quantity --------------------------------------------------------------------- 50.00 49.00 @@ -577,7 +577,7 @@ WHERE (o_orderkey < l_quantity) ORDER BY l_quantity DESC LIMIT 10; - l_quantity + l_quantity --------------------------------------------------------------------- 50.00 49.00 @@ -600,7 +600,7 @@ JOIN ON (l_orderkey = o_orderkey) WHERE (o_orderkey < l_quantity); - count + count --------------------------------------------------------------------- 25 (1 row) @@ -616,7 +616,7 @@ FROM ( GROUP BY l_orderkey ) z; - count + count --------------------------------------------------------------------- 7 (1 row) @@ -668,7 +668,7 @@ ORDER BY total_order_count DESC, o_custkey ASC LIMIT 10; - o_custkey | total_order_count + o_custkey | total_order_count --------------------------------------------------------------------- 1462 | 9 619 | 8 @@ -698,7 +698,7 @@ FROM WHERE unit_price > 1000 AND unit_price < 10000; - avg + avg --------------------------------------------------------------------- 4968.4946466804019323 (1 row) @@ -717,7 +717,7 @@ SELECT count(*) FROM ) a WHERE l_orderkey = 1 ) b; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -730,7 +730,7 @@ SELECT count(*) FROM ) a WHERE l_orderkey = 1 ) b; - count + count --------------------------------------------------------------------- 6 (1 row) @@ -748,7 +748,7 @@ SELECT max(l_orderkey) FROM l_orderkey ) z ) y; - max + max --------------------------------------------------------------------- 14947 (1 row) @@ -769,7 +769,7 @@ FROM WHERE user_id = 3 GROUP BY user_id) AS bar WHERE foo.user_id = bar.user_id ) AS baz; - user_id | counter | user_id | counter + user_id | counter | user_id | counter --------------------------------------------------------------------- (0 rows) @@ -792,7 +792,7 @@ FROM WHERE foo.user_id = bar.user_id ) AS baz ORDER BY 1,2 LIMIT 5; - user_id | counter | user_id | counter + user_id | counter | user_id | counter --------------------------------------------------------------------- 2 | 57 | 2 | 57 (1 row) @@ -826,9 +826,9 @@ CREATE TABLE subquery_pruning_varchar_test_table ); SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('subquery_pruning_varchar_test_table', 'a', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- temporarily disable router executor to test pruning behaviour of subquery pushdown @@ -838,7 +838,7 @@ SELECT * FROM (SELECT count(*) FROM subquery_pruning_varchar_test_table WHERE a = 'onder' GROUP BY a) AS foo; DEBUG: Router planner not enabled. - count + count --------------------------------------------------------------------- (0 rows) @@ -846,7 +846,7 @@ SELECT * FROM (SELECT count(*) FROM subquery_pruning_varchar_test_table WHERE 'eren' = a GROUP BY a) AS foo; DEBUG: Router planner not enabled. - count + count --------------------------------------------------------------------- (0 rows) @@ -879,7 +879,7 @@ SELECT * FROM GROUP BY a_inner) AS foo; - a + a --------------------------------------------------------------------- (0 rows) @@ -910,7 +910,7 @@ FROM GROUP BY tenant_id, user_id) AS subquery; - event_average + event_average --------------------------------------------------------------------- 3.6666666666666667 (1 row) @@ -979,7 +979,7 @@ GROUP BY hasdone ORDER BY event_average DESC; - event_average | hasdone + event_average | hasdone --------------------------------------------------------------------- 4.0000000000000000 | Has not done paying 2.5000000000000000 | Has done paying @@ -1055,7 +1055,7 @@ GROUP BY count_pay ORDER BY count_pay; - event_average | count_pay + event_average | count_pay --------------------------------------------------------------------- 3.0000000000000000 | 0 (1 row) @@ -1112,7 +1112,7 @@ ORDER BY user_lastseen DESC LIMIT 10; - tenant_id | user_id | user_lastseen | event_array + tenant_id | user_id | user_lastseen | event_array --------------------------------------------------------------------- 1 | 1003 | 1472807315 | {click,click,click,submit} 1 | 1002 | 1472807215 | {click,click,submit,pay} diff --git a/src/test/regress/expected/multi_subquery_behavioral_analytics.out b/src/test/regress/expected/multi_subquery_behavioral_analytics.out index d10e41eef..33d61361e 100644 --- a/src/test/regress/expected/multi_subquery_behavioral_analytics.out +++ b/src/test/regress/expected/multi_subquery_behavioral_analytics.out @@ -27,7 +27,7 @@ FROM ( GROUP BY user_id ) q ORDER BY 2 DESC, 1; - user_id | array_length + user_id | array_length --------------------------------------------------------------------- 3 | 187 2 | 180 @@ -75,7 +75,7 @@ FROM ( GROUP BY t1.user_id, hasdone_event ) t GROUP BY user_id, hasdone_event ORDER BY user_id; - user_id | sum | length | hasdone_event + user_id | sum | length | hasdone_event --------------------------------------------------------------------- 1 | 12 | 14 | Has done event 2 | 20 | 14 | Has done event @@ -122,7 +122,7 @@ FROM ( GROUP BY t1.user_id, hasdone_event ) t GROUP BY user_id, hasdone_event ORDER BY user_id) u; - count + count --------------------------------------------------------------------- 3 (1 row) @@ -157,7 +157,7 @@ FROM ( GROUP BY t1.user_id, hasdone_event ) t GROUP BY user_id, hasdone_event ORDER BY user_id; - user_id | sum | length | hasdone_event + user_id | sum | length | hasdone_event --------------------------------------------------------------------- 1 | 12 | 14 | Has done event 2 | 20 | 14 | Has done event @@ -196,7 +196,7 @@ FROM ( GROUP BY t1.user_id, hasdone_event ) t GROUP BY user_id, hasdone_event ORDER BY user_id) u; - count + count --------------------------------------------------------------------- 3 (1 row) @@ -268,7 +268,7 @@ GROUP BY count_pay, user_id ORDER BY event_average DESC, count_pay DESC, user_id DESC; - user_id | event_average | count_pay + user_id | event_average | count_pay --------------------------------------------------------------------- 3 | 19.0000000000000000 | 7 2 | 12.0000000000000000 | 9 @@ -341,7 +341,7 @@ HAVING avg(array_length(events_table, 1)) > 0 ORDER BY event_average DESC, count_pay DESC, user_id DESC; - user_id | event_average | count_pay + user_id | event_average | count_pay --------------------------------------------------------------------- 3 | 19.0000000000000000 | 3 2 | 12.0000000000000000 | 4 @@ -406,7 +406,7 @@ GROUP BY count_pay, user_id ORDER BY event_average DESC, count_pay DESC, user_id DESC; - user_id | event_average | count_pay + user_id | event_average | count_pay --------------------------------------------------------------------- 3 | 12.0000000000000000 | 4 2 | 9.0000000000000000 | 5 @@ -467,7 +467,7 @@ HAVING avg(array_length(events_table, 1)) > 0 ORDER BY event_average DESC, count_pay DESC, user_id DESC; - user_id | event_average | count_pay + user_id | event_average | count_pay --------------------------------------------------------------------- 3 | 12.0000000000000000 | 4 2 | 9.0000000000000000 | 5 @@ -504,7 +504,7 @@ FROM ( GROUP BY user_id ) AS shard_union ORDER BY user_lastseen DESC, user_id; - user_id | user_lastseen | array_length + user_id | user_lastseen | array_length --------------------------------------------------------------------- 2 | Thu Nov 23 11:47:26.900284 2017 | 12 3 | Thu Nov 23 11:18:53.114408 2017 | 14 @@ -523,7 +523,7 @@ GROUP BY ORDER BY user_id DESC LIMIT 5; - user_id + user_id --------------------------------------------------------------------- 6 5 @@ -541,7 +541,7 @@ SELECT user_id, value_2 FROM users_table WHERE AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 1 AND event_type < 3 AND value_3 > 1 AND user_id = users_table.user_id) ORDER BY 2 DESC, 1 DESC LIMIT 5; - user_id | value_2 + user_id | value_2 --------------------------------------------------------------------- 6 | 4 6 | 4 @@ -559,7 +559,7 @@ SELECT user_id, value_2 FROM users_table WHERE AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type=2 AND value_3 > 1 AND user_id = users_table.user_id) ORDER BY 1 DESC, 2 DESC LIMIT 3; - user_id | value_2 + user_id | value_2 --------------------------------------------------------------------- 5 | 5 5 | 5 @@ -578,7 +578,7 @@ GROUP BY user_id ORDER BY cnt DESC, user_id DESC LIMIT 5; - user_id | cnt + user_id | cnt --------------------------------------------------------------------- 4 | 43 2 | 37 @@ -596,7 +596,7 @@ SELECT user_id, value_2 FROM users_table WHERE AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 3 AND event_type <= 4 AND value_3 > 1 AND user_id = users_table.user_id) ORDER BY 2 DESC, 1 DESC LIMIT 4; - user_id | value_2 + user_id | value_2 --------------------------------------------------------------------- 5 | 5 5 | 5 @@ -626,7 +626,7 @@ GROUP BY ORDER BY 1 DESC, 2 DESC LIMIT 5; - user_id | avg + user_id | avg --------------------------------------------------------------------- 4 | 2.0000000000000000 3 | 2.0000000000000000 @@ -648,7 +648,7 @@ SELECT user_id, value_1 from ) AS a ORDER BY user_id ASC, value_1 ASC; - user_id | value_1 + user_id | value_1 --------------------------------------------------------------------- 2 | 0 2 | 2 @@ -669,7 +669,7 @@ SELECT user_id, value_1 from ) AS a ORDER BY user_id ASC, value_1 ASC; - user_id | value_1 + user_id | value_1 --------------------------------------------------------------------- 2 | 0 2 | 2 @@ -697,7 +697,7 @@ WHERE value_1 = 1 AND value_2 > 2 ) ORDER BY 1; - user_id + user_id --------------------------------------------------------------------- 1 2 @@ -717,7 +717,7 @@ GROUP BY user_id, event_type ORDER BY 2 DESC, 1 LIMIT 3; - user_id | event_type + user_id | event_type --------------------------------------------------------------------- 1 | 6 2 | 5 @@ -742,7 +742,7 @@ SELECT user_id FROM ) AS a ORDER BY user_id; - user_id + user_id --------------------------------------------------------------------- 1 2 @@ -772,7 +772,7 @@ FROM users_table.value_1 < 2; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM assets; - count | count | avg + count | count | avg --------------------------------------------------------------------- 732 | 6 | 3.3934426229508197 (1 row) @@ -797,7 +797,7 @@ SELECT count(*) FROM ) as foo; DEBUG: generating subplan 23_1 for subquery SELECT user_id FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.=) 4) DEBUG: Plan 23 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT users_table.user_id FROM public.users_table WHERE (((users_table.value_1 OPERATOR(pg_catalog.=) 1) OR (users_table.value_1 OPERATOR(pg_catalog.=) 3)) AND (NOT (users_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('23_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))))) GROUP BY users_table.user_id HAVING (count(DISTINCT users_table.value_1) OPERATOR(pg_catalog.=) 2)) foo - count + count --------------------------------------------------------------------- 1 (1 row) @@ -831,7 +831,7 @@ SELECT subquery_count FROM GROUP BY a.user_id ) AS inner_subquery; - subquery_count + subquery_count --------------------------------------------------------------------- 1 (1 row) @@ -864,7 +864,7 @@ WHERE b.user_id IS NULL GROUP BY a.user_id; - subquery_count + subquery_count --------------------------------------------------------------------- 1 (1 row) @@ -940,7 +940,7 @@ WHERE GROUP BY e1.user_id LIMIT 1; - user_id | viewed_homepage | use_demo | entered_credit_card | submit_card_info | see_bought_screen + user_id | viewed_homepage | use_demo | entered_credit_card | submit_card_info | see_bought_screen --------------------------------------------------------------------- 1 | 1 | 1 | 1 | 1 | 1 (1 row) @@ -1005,7 +1005,7 @@ FROM ( GROUP BY e1.user_id ORDER BY 6 DESC NULLS LAST, 5 DESC NULLS LAST, 4 DESC NULLS LAST, 3 DESC NULLS LAST, 2 DESC NULLS LAST, 1 LIMIT 15; - user_id | viewed_homepage | use_demo | entered_credit_card | submit_card_info | see_bought_screen + user_id | viewed_homepage | use_demo | entered_credit_card | submit_card_info | see_bought_screen --------------------------------------------------------------------- 2 | 1080 | 1080 | 1080 | 1080 | 1080 3 | 540 | 540 | 540 | 540 | 540 @@ -1076,7 +1076,7 @@ group by e1.user_id HAVING sum(submit_card_info) > 0 ORDER BY 6 DESC NULLS LAST, 5 DESC NULLS LAST, 4 DESC NULLS LAST, 3 DESC NULLS LAST, 2 DESC NULLS LAST, 1 LIMIT 15; - user_id | viewed_homepage | use_demo | entered_credit_card | submit_card_info | see_bought_screen + user_id | viewed_homepage | use_demo | entered_credit_card | submit_card_info | see_bought_screen --------------------------------------------------------------------- 2 | 1080 | 1080 | 1080 | 1080 | 1080 3 | 540 | 540 | 540 | 540 | 540 @@ -1116,7 +1116,7 @@ GROUP BY ORDER BY avg(b.value_3), 2, 1 LIMIT 5; - user_id | subquery_avg + user_id | subquery_avg --------------------------------------------------------------------- 3 | 3.6000000000000000 5 | 2.1666666666666667 @@ -1153,7 +1153,7 @@ HAVING ORDER BY avg(b.value_3), 2, 1 LIMIT 5; - user_id | subquery_avg + user_id | subquery_avg --------------------------------------------------------------------- 3 | 3.6000000000000000 5 | 2.1666666666666667 @@ -1192,7 +1192,7 @@ GROUP BY ORDER BY avg(b.value_3) DESC, 2, 1 LIMIT 5; - user_id | subquery_avg | avg + user_id | subquery_avg | avg --------------------------------------------------------------------- 1 | 2.3333333333333333 | 3.33333333333333 4 | 2.6666666666666667 | 2.55555555555556 @@ -1226,7 +1226,7 @@ GROUP BY ORDER BY 4 DESC, 1 DESC, 2 ASC, 3 ASC LIMIT 10; - user_id | value_2 | value_3 | counts + user_id | value_2 | value_3 | counts --------------------------------------------------------------------- 5 | 3 | 4 | 160 2 | 3 | 5 | 156 @@ -1253,7 +1253,7 @@ GROUP BY ORDER BY users_count desc, avg_type DESC LIMIT 5; - avg_type | users_count + avg_type | users_count --------------------------------------------------------------------- 2.3750000000000000 | 24 2.5714285714285714 | 21 @@ -1282,7 +1282,7 @@ FROM events_table ORDER BY users_count.ct desc, event_type DESC LIMIT 5; - event_type | ct + event_type | ct --------------------------------------------------------------------- 5 | 26 4 | 26 @@ -1314,7 +1314,7 @@ FROM ORDER BY total_count DESC, count_1 DESC, 1 DESC LIMIT 10; - user_id | count_1 | total_count + user_id | count_1 | total_count --------------------------------------------------------------------- 2 | 18 | 7 3 | 17 | 7 @@ -1356,7 +1356,7 @@ WHERE b.user_id IS NOT NULL GROUP BY a.user_id ORDER BY avg(b.value_3), 2, 1 LIMIT 5; - user_id | subquery_avg + user_id | subquery_avg --------------------------------------------------------------------- 5 | 0.00000000000000000000 3 | 2.0000000000000000 @@ -1424,7 +1424,7 @@ GROUP BY ORDER BY avg(b.value_3), 2, 1 LIMIT 5; - user_id | subquery_avg + user_id | subquery_avg --------------------------------------------------------------------- 3 | 3.3333333333333333 5 | 2.2000000000000000 @@ -1462,7 +1462,7 @@ FROM ORDER BY prob DESC, value_2 DESC, user_id DESC, event_type DESC LIMIT 10; - user_id | event_type + user_id | event_type --------------------------------------------------------------------- 3 | 5 3 | 4 @@ -1500,7 +1500,7 @@ FROM ORDER BY prob DESC, event_type DESC, user_id DESC LIMIT 10; - user_id | event_type + user_id | event_type --------------------------------------------------------------------- 3 | 5 2 | 5 @@ -1546,7 +1546,7 @@ FROM ORDER BY prob DESC, event_type DESC, user_id DESC LIMIT 10; - user_id | event_type + user_id | event_type --------------------------------------------------------------------- 3 | 5 2 | 5 @@ -1571,7 +1571,7 @@ SELECT * FROM run_command_on_workers('CREATE OR REPLACE FUNCTION array_index(AN LIMIT 1; $$ LANGUAGE sql') ORDER BY 1,2; - nodename | nodeport | success | result + nodename | nodeport | success | result --------------------------------------------------------------------- localhost | 57637 | t | CREATE FUNCTION localhost | 57638 | t | CREATE FUNCTION @@ -1645,7 +1645,7 @@ AS outer_outer_sub_q ORDER BY value_3 ASC, user_id DESC, array_index(ARRAY [1, 2, 3], (value_2 % 3)) ASC, event_type_e DESC LIMIT 10; - user_id_e | event_type_e | value_2 | value_3 | user_id + user_id_e | event_type_e | value_2 | value_3 | user_id --------------------------------------------------------------------- 5 | 5 | 2 | 0 | 5 5 | 5 | 2 | 0 | 5 @@ -1728,7 +1728,7 @@ FROM ORDER BY value_3 ASC, user_id DESC, array_index(ARRAY [1, 2, 3], (value_2 % 3)) ASC, event_type_e DESC LIMIT 10; - user_id_e | event_type_e | value_2 | value_3 | user_id + user_id_e | event_type_e | value_2 | value_3 | user_id --------------------------------------------------------------------- 5 | 5 | 2 | 0 | 5 5 | 5 | 2 | 0 | 5 @@ -1745,7 +1745,7 @@ LIMIT 10; -- drop created functions SELECT * FROM run_command_on_workers('DROP FUNCTION array_index(ANYARRAY, ANYELEMENT)') ORDER BY 1,2; - nodename | nodeport | success | result + nodename | nodeport | success | result --------------------------------------------------------------------- localhost | 57637 | t | DROP FUNCTION localhost | 57638 | t | DROP FUNCTION @@ -1771,7 +1771,7 @@ FROM ( ON a.user_id = b.user_id WHERE b.user_id IS NULL GROUP BY a.user_id; - subquery_count + subquery_count --------------------------------------------------------------------- 1 1 @@ -1843,7 +1843,7 @@ FROM ( ORDER BY 2 DESC, 1 LIMIT 1+1 OFFSET 1::smallint; DEBUG: push down of limit count: 3 - user_id | array_length + user_id | array_length --------------------------------------------------------------------- 4 | 184 2 | 180 @@ -1867,7 +1867,7 @@ FROM ( ORDER BY 2 DESC, 1 LIMIT '3' OFFSET 2+1; DEBUG: push down of limit count: 6 - user_id | array_length + user_id | array_length --------------------------------------------------------------------- 5 | 156 6 | 40 @@ -1897,7 +1897,7 @@ FROM ( ORDER BY 2 DESC, 1 LIMIT volatile_func_test() + (ROW(1,2,NULL) < ROW(1,3,0))::int OFFSET volatile_func_test() + volatile_func_test(); DEBUG: push down of limit count: 4 - user_id | array_length + user_id | array_length --------------------------------------------------------------------- 3 | 340 5 | 312 @@ -1926,7 +1926,7 @@ LIMIT (5 > 4)::int OFFSET ELSE 2 END; DEBUG: push down of limit count: 3 - user_id | array_length + user_id | array_length --------------------------------------------------------------------- 2 | 180 (1 row) @@ -1949,7 +1949,7 @@ FROM ( LIMIT $1 OFFSET $2; EXECUTE parametrized_limit(1,1); DEBUG: push down of limit count: 2 - user_id | array_length + user_id | array_length --------------------------------------------------------------------- 4 | 184 (1 row) @@ -1971,7 +1971,7 @@ FROM ( LIMIT 1 OFFSET $1; EXECUTE parametrized_offset(1); DEBUG: push down of limit count: 2 - user_id | array_length + user_id | array_length --------------------------------------------------------------------- 4 | 184 (1 row) @@ -1992,7 +1992,7 @@ CREATE FUNCTION test_join_function_2(integer, integer) RETURNS bool RETURNS NULL ON NULL INPUT; $f$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"CREATE FUNCTION") (localhost,57638,t,"CREATE FUNCTION") @@ -2030,7 +2030,7 @@ FROM ON users_table.user_id = temp.user_id WHERE users_table.value_1 < 3 AND test_join_function_2(users_table.user_id, temp.user_id); - user_id | value_1 | prob + user_id | value_1 | prob --------------------------------------------------------------------- (0 rows) @@ -2054,7 +2054,7 @@ FROM users_table.value_1 < 3 ORDER BY 2 DESC, 1 DESC LIMIT 10; - user_id | value_1 | prob + user_id | value_1 | prob --------------------------------------------------------------------- 6 | 2 | 0.50000000000000000000 6 | 2 | 0.50000000000000000000 @@ -2082,7 +2082,7 @@ FROM events_table.time > users_table.time AND events_table.value_2 IN (0, 4) ) as foo; - count + count --------------------------------------------------------------------- 180 (1 row) @@ -2123,7 +2123,7 @@ FROM ) as bar WHERE foo.event_type > bar.event_type AND foo.user_id = bar.user_id; - count + count --------------------------------------------------------------------- 11971 (1 row) @@ -2168,7 +2168,7 @@ FROM WHERE temp.value_1 < 3 ORDER BY 1 LIMIT 5; - user_id + user_id --------------------------------------------------------------------- 1 2 @@ -2194,7 +2194,7 @@ FROM WHERE temp.value_1 < 3 ORDER BY 1, 2 LIMIT 5; - user_id | value_1 | prob + user_id | value_1 | prob --------------------------------------------------------------------- 1 | 1 | 0.50000000000000000000 2 | 0 | 0.50000000000000000000 @@ -2219,7 +2219,7 @@ FROM ON users_ids.user_id = temp.user_id ORDER BY 1,2 LIMIT 5; - user_id | value_1 | prob + user_id | value_1 | prob --------------------------------------------------------------------- 1 | 1 | 0.50000000000000000000 2 | 0 | 0.50000000000000000000 @@ -2244,9 +2244,9 @@ FROM GROUP BY 1 ) AS temp; - count | avg + count | avg --------------------------------------------------------------------- - 6 | + 6 | (1 row) -- Test the case when a subquery has a lateral reference to two levels upper @@ -2275,7 +2275,7 @@ LATERAL ( ) b ORDER BY user_id, value_2, cnt LIMIT 1; - user_id | value_2 | cnt + user_id | value_2 | cnt --------------------------------------------------------------------- 2 | 0 | 1 (1 row) @@ -2286,7 +2286,7 @@ SELECT run_command_on_workers($f$ DROP FUNCTION test_join_function_2(integer, integer); $f$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"DROP FUNCTION") (localhost,57638,t,"DROP FUNCTION") diff --git a/src/test/regress/expected/multi_subquery_complex_queries.out b/src/test/regress/expected/multi_subquery_complex_queries.out index 4570d533a..ce2ff4eb4 100644 --- a/src/test/regress/expected/multi_subquery_complex_queries.out +++ b/src/test/regress/expected/multi_subquery_complex_queries.out @@ -68,7 +68,7 @@ GROUP BY types ORDER BY types; - types | sumofeventtype + types | sumofeventtype --------------------------------------------------------------------- 0 | 449 1 | 433 @@ -133,7 +133,7 @@ GROUP BY types ORDER BY types; - types | sumofeventtype + types | sumofeventtype --------------------------------------------------------------------- 0 | 449 1 | 433 @@ -199,7 +199,7 @@ GROUP BY types ORDER BY types; - types | sumofeventtype + types | sumofeventtype --------------------------------------------------------------------- 0 | 449 1 | 234 @@ -264,7 +264,7 @@ GROUP BY types ORDER BY types; - types | sumofeventtype + types | sumofeventtype --------------------------------------------------------------------- 0 | 449 1 | 369 @@ -343,7 +343,7 @@ GROUP BY types ORDER BY types; - types | sumofeventtype + types | sumofeventtype --------------------------------------------------------------------- 0 | 449 2 | 433 @@ -429,7 +429,7 @@ DEBUG: generating subplan 16_3 for subquery SELECT "time", event, user_id FROM DEBUG: generating subplan 16_4 for subquery SELECT "time", event, user_id FROM (SELECT events."time", 3 AS event, events.user_id FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6]))) events_subquery_4 DEBUG: generating subplan 16_5 for subquery SELECT intermediate_result."time", intermediate_result.event, intermediate_result.user_id FROM read_intermediate_result('16_2'::text, 'binary'::citus_copy_format) intermediate_result("time" timestamp without time zone, event integer, user_id integer) UNION SELECT events_subquery_2.max, events_subquery_2.event, events_subquery_2.user_id FROM (SELECT events_subquery_5.max, events_subquery_5.event, events_subquery_5.user_id FROM (SELECT intermediate_result.max, intermediate_result.event, intermediate_result.user_id FROM read_intermediate_result('16_1'::text, 'binary'::citus_copy_format) intermediate_result(max timestamp without time zone, event integer, user_id integer)) events_subquery_5) events_subquery_2 UNION SELECT intermediate_result."time", intermediate_result.event, intermediate_result.user_id FROM read_intermediate_result('16_3'::text, 'binary'::citus_copy_format) intermediate_result("time" timestamp without time zone, event integer, user_id integer) UNION SELECT intermediate_result."time", intermediate_result.event, intermediate_result.user_id FROM read_intermediate_result('16_4'::text, 'binary'::citus_copy_format) intermediate_result("time" timestamp without time zone, event integer, user_id integer) DEBUG: Plan 16 query after replacing subqueries and CTEs: SELECT event_types AS types, count(*) AS sumofeventtype FROM (SELECT q.user_id, q."time", q.event_types, t.user_id, random() AS random FROM ((SELECT t_1.user_id, t_1."time", unnest(t_1.collected_events) AS event_types FROM (SELECT t1.user_id, min(t1."time") AS "time", array_agg(t1.event ORDER BY t1."time", t1.event DESC) AS collected_events FROM (SELECT intermediate_result."time", intermediate_result.event, intermediate_result.user_id FROM read_intermediate_result('16_5'::text, 'binary'::citus_copy_format) intermediate_result("time" timestamp without time zone, event integer, user_id integer)) t1 GROUP BY t1.user_id) t_1) q JOIN (SELECT users.user_id FROM public.users_table users WHERE ((users.value_1 OPERATOR(pg_catalog.>) 0) AND (users.value_1 OPERATOR(pg_catalog.<) 4))) t ON ((t.user_id OPERATOR(pg_catalog.=) q.user_id)))) final_query(user_id, "time", event_types, user_id_1, random) GROUP BY event_types ORDER BY event_types - types | sumofeventtype + types | sumofeventtype --------------------------------------------------------------------- 0 | 449 2 | 433 @@ -502,7 +502,7 @@ ORDER BY types; DEBUG: generating subplan 22_1 for subquery SELECT user_id, "time", unnest(collected_events) AS event_types FROM (SELECT t1.user_id, min(t1."time") AS "time", array_agg(t1.event ORDER BY t1."time", t1.event DESC) AS collected_events FROM (SELECT events_subquery_1.user_id, events_subquery_1."time", events_subquery_1.event FROM (SELECT events.user_id, events."time", 0 AS event FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2]))) events_subquery_1 UNION SELECT events_subquery_2.user_id, events_subquery_2."time", events_subquery_2.event FROM (SELECT events.user_id, events."time", 1 AS event FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[3, 4]))) events_subquery_2 UNION SELECT events_subquery_3.user_id, events_subquery_3."time", events_subquery_3.event FROM (SELECT events.user_id, events."time", 2 AS event FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6]))) events_subquery_3 UNION SELECT events_subquery_4.user_id, events_subquery_4."time", events_subquery_4.event FROM (SELECT events.user_id, events."time", 3 AS event FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[4, 5]))) events_subquery_4) t1 GROUP BY t1.user_id) t DEBUG: Plan 22 query after replacing subqueries and CTEs: SELECT event_types AS types, count(*) AS sumofeventtype FROM (SELECT q.user_id, q."time", q.event_types, t.user_id, random() AS random FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.event_types FROM read_intermediate_result('22_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, event_types integer)) q JOIN (SELECT users.user_id FROM public.users_table users WHERE ((users.value_1 OPERATOR(pg_catalog.>) 0) AND (users.value_1 OPERATOR(pg_catalog.<) 4))) t ON ((t.user_id OPERATOR(pg_catalog.<>) q.user_id)))) final_query(user_id, "time", event_types, user_id_1, random) GROUP BY event_types ORDER BY event_types - types | sumofeventtype + types | sumofeventtype --------------------------------------------------------------------- 0 | 2088 1 | 2163 @@ -652,7 +652,7 @@ INNER JOIN WHERE value_1 > 0 and value_1 < 4) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; - types | sumofeventtype + types | sumofeventtype --------------------------------------------------------------------- 0 | 449 1 | 434 @@ -716,7 +716,7 @@ INNER JOIN WHERE value_1 > 0 and value_1 < 4) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; - types | sumofeventtype + types | sumofeventtype --------------------------------------------------------------------- 0 | 449 1 | 433 @@ -779,7 +779,7 @@ INNER JOIN ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; - types | sumofeventtype + types | sumofeventtype --------------------------------------------------------------------- 0 | 449 1 | 433 @@ -838,7 +838,7 @@ INNER JOIN WHERE value_1 > 0 and value_1 < 4) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; - types | sumofeventtype + types | sumofeventtype --------------------------------------------------------------------- 0 | 449 1 | 433 @@ -902,7 +902,7 @@ INNER JOIN ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; - types | sumofeventtype + types | sumofeventtype --------------------------------------------------------------------- 0 | 449 1 | 433 @@ -982,7 +982,7 @@ INNER JOIN GROUP BY user_id ORDER BY cnt DESC, user_id DESC LIMIT 10; - user_id | cnt + user_id | cnt --------------------------------------------------------------------- 3 | 275 6 | 72 @@ -1064,7 +1064,7 @@ GROUP BY LIMIT 10; DEBUG: generating subplan 42_1 for subquery SELECT DISTINCT user_id FROM public.events_table events WHERE (event_type OPERATOR(pg_catalog.=) ANY (ARRAY[0, 6])) GROUP BY user_id DEBUG: Plan 42 query after replacing subqueries and CTEs: SELECT user_id, count(*) AS cnt FROM (SELECT first_query.user_id, random() AS random FROM ((SELECT t.user_id, t."time", unnest(t.collected_events) AS event_types FROM (SELECT t1.user_id, min(t1."time") AS "time", array_agg(t1.event ORDER BY t1."time", t1.event DESC) AS collected_events FROM (SELECT events_subquery_1.user_id, events_subquery_1."time", events_subquery_1.event FROM (SELECT events.user_id, events."time", 0 AS event FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2]))) events_subquery_1 UNION ALL SELECT events_subquery_2.user_id, events_subquery_2."time", events_subquery_2.event FROM (SELECT events.user_id, events."time", 1 AS event FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[3, 4]))) events_subquery_2 UNION ALL SELECT events_subquery_3.user_id, events_subquery_3."time", events_subquery_3.event FROM (SELECT events.user_id, events."time", 2 AS event FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6]))) events_subquery_3 UNION ALL SELECT events_subquery_4.user_id, events_subquery_4."time", events_subquery_4.event FROM (SELECT events.user_id, events."time", 3 AS event FROM public.events_table events WHERE (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6]))) events_subquery_4) t1 GROUP BY t1.user_id) t) first_query JOIN (SELECT t.user_id FROM ((SELECT users.user_id FROM public.users_table users WHERE ((users.value_1 OPERATOR(pg_catalog.>) 0) AND (users.value_1 OPERATOR(pg_catalog.<) 4))) t LEFT JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('42_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) t2 ON ((t2.user_id OPERATOR(pg_catalog.>) t.user_id))) WHERE (t2.user_id IS NULL)) second_query ON ((first_query.user_id OPERATOR(pg_catalog.=) second_query.user_id)))) final_query GROUP BY user_id ORDER BY (count(*)) DESC, user_id DESC LIMIT 10 - user_id | cnt + user_id | cnt --------------------------------------------------------------------- 5 | 324 6 | 72 @@ -1145,7 +1145,7 @@ INNER JOIN GROUP BY user_id ORDER BY cnt DESC, user_id DESC LIMIT 10; - user_id | cnt + user_id | cnt --------------------------------------------------------------------- 3 | 275 6 | 72 @@ -1191,7 +1191,7 @@ FROM order BY user_id LIMIT 50; - user_id | lastseen + user_id | lastseen --------------------------------------------------------------------- 2 | Thu Nov 23 17:26:14.563216 2017 3 | Thu Nov 23 18:08:26.550729 2017 @@ -1230,7 +1230,7 @@ FROM ORDER BY user_id limit 50; - user_id | lastseen + user_id | lastseen --------------------------------------------------------------------- 2 | Thu Nov 23 17:26:14.563216 2017 3 | Thu Nov 23 18:08:26.550729 2017 @@ -1371,7 +1371,7 @@ FROM ORDER BY user_id DESC, lastseen DESC LIMIT 10; - user_id | lastseen + user_id | lastseen --------------------------------------------------------------------- 2 | Thu Nov 23 17:26:14.563216 2017 2 | Thu Nov 23 17:26:14.563216 2017 @@ -1435,7 +1435,7 @@ SELECT "some_users_data".user_id, MAX(lastseen), count(*) GROUP BY 1 ORDER BY 2, 1 DESC LIMIT 10; - user_id | max | count + user_id | max | count --------------------------------------------------------------------- 2 | Thu Nov 23 17:26:14.563216 2017 | 10 (1 row) @@ -1716,7 +1716,7 @@ GROUP BY "generated_group_field" ORDER BY generated_group_field DESC, value DESC; - value | generated_group_field + value | generated_group_field --------------------------------------------------------------------- 1 | 5 2 | 2 @@ -1770,7 +1770,7 @@ ORDER BY generated_group_field DESC, value DESC; DEBUG: generating subplan 64_1 for subquery SELECT user_id, value_2 FROM public.users_table users WHERE ((user_id OPERATOR(pg_catalog.>) 1) AND (user_id OPERATOR(pg_catalog.<) 4) AND (value_3 OPERATOR(pg_catalog.>) (3)::double precision)) DEBUG: Plan 64 query after replacing subqueries and CTEs: SELECT count(*) AS value, generated_group_field FROM (SELECT DISTINCT "pushedDownQuery_1".real_user_id, "pushedDownQuery_1".generated_group_field FROM (SELECT "eventQuery".real_user_id, "eventQuery"."time", random() AS random, "eventQuery".value_2 AS generated_group_field FROM (SELECT temp_data_queries."time", temp_data_queries.user_id, temp_data_queries.value_2, user_filters_1.real_user_id FROM ((SELECT events."time", events.user_id, events.value_2 FROM public.events_table events WHERE ((events.user_id OPERATOR(pg_catalog.>) 1) AND (events.user_id OPERATOR(pg_catalog.<) 4) AND (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[4, 5])))) temp_data_queries JOIN (SELECT user_where_1_1.real_user_id FROM ((SELECT users.user_id AS real_user_id FROM public.users_table users WHERE ((users.user_id OPERATOR(pg_catalog.>) 1) AND (users.user_id OPERATOR(pg_catalog.<) 4) AND (users.value_2 OPERATOR(pg_catalog.>) 3))) user_where_1_1 JOIN (SELECT intermediate_result.user_id, intermediate_result.value_2 FROM read_intermediate_result('64_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_2 integer)) user_where_1_join_1 ON ((user_where_1_1.real_user_id OPERATOR(pg_catalog.=) user_where_1_join_1.value_2)))) user_filters_1 ON ((temp_data_queries.user_id OPERATOR(pg_catalog.=) user_filters_1.real_user_id)))) "eventQuery") "pushedDownQuery_1") "pushedDownQuery" GROUP BY generated_group_field ORDER BY generated_group_field DESC, (count(*)) DESC - value | generated_group_field + value | generated_group_field --------------------------------------------------------------------- 1 | 5 2 | 2 @@ -1822,7 +1822,7 @@ ORDER BY generated_group_field DESC, value DESC; DEBUG: generating subplan 66_1 for subquery SELECT user_id, value_2 FROM public.users_table users WHERE ((user_id OPERATOR(pg_catalog.>) 1) AND (user_id OPERATOR(pg_catalog.<) 4) AND (value_3 OPERATOR(pg_catalog.>) (3)::double precision)) DEBUG: Plan 66 query after replacing subqueries and CTEs: SELECT count(*) AS value, generated_group_field FROM (SELECT DISTINCT "pushedDownQuery_1".real_user_id, "pushedDownQuery_1".generated_group_field FROM (SELECT "eventQuery".real_user_id, "eventQuery"."time", random() AS random, "eventQuery".value_2 AS generated_group_field FROM (SELECT temp_data_queries."time", temp_data_queries.user_id, temp_data_queries.value_2, user_filters_1.real_user_id FROM ((SELECT events."time", events.user_id, events.value_2 FROM public.events_table events WHERE ((events.user_id OPERATOR(pg_catalog.>) 1) AND (events.user_id OPERATOR(pg_catalog.<) 4) AND (events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[4, 5])))) temp_data_queries JOIN (SELECT user_where_1_1.real_user_id FROM ((SELECT users.user_id AS real_user_id FROM public.users_table users WHERE ((users.user_id OPERATOR(pg_catalog.>) 1) AND (users.user_id OPERATOR(pg_catalog.<) 4) AND (users.value_2 OPERATOR(pg_catalog.>) 3))) user_where_1_1 JOIN (SELECT intermediate_result.user_id, intermediate_result.value_2 FROM read_intermediate_result('66_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_2 integer)) user_where_1_join_1 ON ((user_where_1_1.real_user_id OPERATOR(pg_catalog.>=) user_where_1_join_1.user_id)))) user_filters_1 ON ((temp_data_queries.user_id OPERATOR(pg_catalog.=) user_filters_1.real_user_id)))) "eventQuery") "pushedDownQuery_1") "pushedDownQuery" GROUP BY generated_group_field ORDER BY generated_group_field DESC, (count(*)) DESC - value | generated_group_field + value | generated_group_field --------------------------------------------------------------------- 1 | 5 2 | 2 @@ -1869,7 +1869,7 @@ FROM ) segmentalias_1) "tempQuery" GROUP BY "value_3" ORDER BY cnt, value_3 DESC LIMIT 10; - value_3 | cnt + value_3 | cnt --------------------------------------------------------------------- 0 | 7 10 | 21 @@ -1922,7 +1922,7 @@ GROUP BY "value_3" ORDER BY cnt, value_3 DESC LIMIT 10; DEBUG: generating subplan 69_1 for subquery SELECT user_id FROM public.users_table users WHERE ((user_id OPERATOR(pg_catalog.>) 1) AND (user_id OPERATOR(pg_catalog.<) 4) AND (value_2 OPERATOR(pg_catalog.>) 3)) DEBUG: Plan 69 query after replacing subqueries and CTEs: SELECT value_3, count(*) AS cnt FROM (SELECT segmentalias_1.value_3, segmentalias_1.user_id, random() AS random FROM (SELECT users_in_segment_1.user_id, users_in_segment_1.value_3 FROM ((SELECT all_buckets_1.user_id, (all_buckets_1.value_3 OPERATOR(pg_catalog.*) (2)::double precision) AS value_3 FROM (SELECT simple_user_where_1.user_id, simple_user_where_1.value_3 FROM (SELECT users.user_id, users.value_3 FROM public.users_table users WHERE ((users.user_id OPERATOR(pg_catalog.>) 1) AND (users.user_id OPERATOR(pg_catalog.<) 4) AND (users.value_2 OPERATOR(pg_catalog.>) 2))) simple_user_where_1) all_buckets_1) users_in_segment_1 JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('69_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) some_users_data ON (true))) segmentalias_1) "tempQuery" GROUP BY value_3 ORDER BY (count(*)), value_3 DESC LIMIT 10 - value_3 | cnt + value_3 | cnt --------------------------------------------------------------------- 0 | 14 10 | 42 @@ -1977,7 +1977,7 @@ LIMIT 10) "some_users" ORDER BY value_3 DESC, user_id ASC LIMIT 10; - user_id | value_3 + user_id | value_3 --------------------------------------------------------------------- 2 | 5 2 | 5 @@ -2031,7 +2031,7 @@ FROM ORDER BY value_3 DESC, user_id ASC LIMIT 10; - user_id | value_3 + user_id | value_3 --------------------------------------------------------------------- 2 | 5 2 | 5 @@ -2086,7 +2086,7 @@ FROM ORDER BY value_3 DESC, user_id DESC LIMIT 10; - user_id | value_3 + user_id | value_3 --------------------------------------------------------------------- 3 | 5 3 | 5 @@ -2139,7 +2139,7 @@ FROM ) "some_users_data" ON TRUE ORDER BY value_3 DESC, user_id DESC LIMIT 10; - user_id | value_3 + user_id | value_3 --------------------------------------------------------------------- 3 | 5 3 | 5 @@ -2190,7 +2190,7 @@ count(*) AS cnt, "generated_group_field" ORDER BY cnt DESC, generated_group_field ASC LIMIT 10; - cnt | generated_group_field + cnt | generated_group_field --------------------------------------------------------------------- 336 | 2 210 | 1 @@ -2218,7 +2218,7 @@ GROUP BY ORDER BY cnt DESC, user_id DESC LIMIT 10; - cnt | user_id + cnt | user_id --------------------------------------------------------------------- 11 | 3 10 | 2 @@ -2267,7 +2267,7 @@ FROM ORDER BY value_2 DESC, user_id DESC LIMIT 10; - user_id | value_2 + user_id | value_2 --------------------------------------------------------------------- 2 | 5 (1 row) @@ -2377,7 +2377,7 @@ GROUP BY types ORDER BY types; - types | sumofeventtype + types | sumofeventtype --------------------------------------------------------------------- 0 | 449 1 | 433 @@ -2444,7 +2444,7 @@ GROUP BY types ORDER BY types; - types | sumofeventtype + types | sumofeventtype --------------------------------------------------------------------- 0 | 449 1 | 425 @@ -2510,7 +2510,7 @@ GROUP BY types ORDER BY types; - types | sumofeventtype + types | sumofeventtype --------------------------------------------------------------------- 0 | 449 1 | 433 @@ -2571,7 +2571,7 @@ GROUP BY types ORDER BY types; - types | sumofeventtype + types | sumofeventtype --------------------------------------------------------------------- 0 | 4 1 | 8 @@ -2608,7 +2608,7 @@ FROM ( ORDER BY 1,2,3,4 LIMIT 5; - uid | event_type | value_2 | value_3 + uid | event_type | value_2 | value_3 --------------------------------------------------------------------- 1 | 1 | 0 | 2 1 | 1 | 0 | 2 @@ -2645,12 +2645,12 @@ FROM USING (user_id) GROUP BY user_id ORDER BY 1, 2; - user_id | subquery_avg + user_id | subquery_avg --------------------------------------------------------------------- 1 | 2.3333333333333333 3 | 5.0000000000000000 4 | 1.00000000000000000000 - 5 | + 5 | (4 rows) -- see the comment for the above query @@ -2680,12 +2680,12 @@ FROM USING (user_id) GROUP BY a.user_id ORDER BY 1, 2; - user_id | subquery_avg + user_id | subquery_avg --------------------------------------------------------------------- 1 | 2.3333333333333333 3 | 5.0000000000000000 4 | 1.00000000000000000000 - 5 | + 5 | (4 rows) -- queries where column aliases are used @@ -2697,7 +2697,7 @@ FROM ( FROM (users_table JOIN events_table USING (user_id)) k (k1, k2, k3)) l ORDER BY k1 LIMIT 5; - k1 + k1 --------------------------------------------------------------------- 1 1 @@ -2712,7 +2712,7 @@ FROM ( FROM (users_table JOIN events_table USING (user_id)) k (k1, k2, k3)) l ORDER BY k1 LIMIT 5; - k1 + k1 --------------------------------------------------------------------- 1 2 @@ -2725,7 +2725,7 @@ SELECT x1, x3, value_2 FROM (users_table u FULL JOIN events_table e ON (u.user_id = e.user_id)) k(x1, x2, x3, x4, x5) ORDER BY 1, 2, 3 LIMIT 5; - x1 | x3 | value_2 + x1 | x3 | value_2 --------------------------------------------------------------------- 1 | 1 | 1 1 | 1 | 1 @@ -2738,7 +2738,7 @@ SELECT x1, x3, value_2 FROM (users_table u FULL JOIN events_table e USING (user_id)) k(x1, x2, x3, x4, x5) ORDER BY 1, 2, 3 LIMIT 5; - x1 | x3 | value_2 + x1 | x3 | value_2 --------------------------------------------------------------------- 1 | 1 | 1 1 | 1 | 1 @@ -2752,7 +2752,7 @@ FROM (users_table LEFT OUTER JOIN events_table ON (users_table.user_id = events INNER JOIN users_table as u2 ON (test.c_custkey = u2.user_id) ORDER BY 1 DESC LIMIT 10; - c_custkey + c_custkey --------------------------------------------------------------------- 6 6 @@ -2772,7 +2772,7 @@ FROM (users_table LEFT OUTER JOIN events_table ON (users_table.user_id = events GROUP BY 1 ORDER BY 2, 1 LIMIT 10; - c_custkey | date_trunc + c_custkey | date_trunc --------------------------------------------------------------------- 2 | Thu Nov 23 13:52:00 2017 6 | Thu Nov 23 14:43:00 2017 @@ -2789,7 +2789,7 @@ GROUP BY 1 HAVING extract(minute from max(c_nationkey)) >= 45 ORDER BY 2, 1 LIMIT 10; - c_custkey | date_trunc + c_custkey | date_trunc --------------------------------------------------------------------- 2 | Thu Nov 23 13:52:00 2017 5 | Thu Nov 23 16:48:00 2017 @@ -2800,7 +2800,7 @@ FROM (users_table JOIN events_table USING (user_id)) AS test(user_id, c_nationke FULL JOIN users_table AS u2 USING (user_id) ORDER BY 1 DESC LIMIT 10; - user_id + user_id --------------------------------------------------------------------- 6 6 @@ -2824,7 +2824,7 @@ FROM ((users_table GROUP BY 1,2 ORDER BY 2 DESC, 1 DESC LIMIT 10; - bar | value_3 + bar | value_3 --------------------------------------------------------------------- 3 | 5 2 | 5 @@ -2852,7 +2852,7 @@ JOIN LATERAL GROUP BY 1, 2 ORDER BY 2 DESC, 1 DESC LIMIT 10; - bar | value_3 + bar | value_3 --------------------------------------------------------------------- 3 | 5 2 | 5 @@ -2881,7 +2881,7 @@ SELECT bar, foo.value_3, c_custkey, test_2.time_2 FROM ON (users_table.user_id = deeper_join_2.user_id_deep)) AS test_2(c_custkey, time_2) WHERE foo.bar = test_2.c_custkey ORDER BY 2 DESC, 1 DESC, 3 DESC, 4 DESC LIMIT 10; - bar | value_3 | c_custkey | time_2 + bar | value_3 | c_custkey | time_2 --------------------------------------------------------------------- 3 | 5 | 3 | Thu Nov 23 17:18:51.048758 2017 3 | 5 | 3 | Thu Nov 23 17:18:51.048758 2017 diff --git a/src/test/regress/expected/multi_subquery_complex_reference_clause.out b/src/test/regress/expected/multi_subquery_complex_reference_clause.out index 2e867de71..e4aa19319 100644 --- a/src/test/regress/expected/multi_subquery_complex_reference_clause.out +++ b/src/test/regress/expected/multi_subquery_complex_reference_clause.out @@ -7,9 +7,9 @@ -- SET citus.next_shard_id TO 1400000; CREATE TABLE user_buy_test_table(user_id int, item_id int, buy_count int); SELECT create_distributed_table('user_buy_test_table', 'user_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO user_buy_test_table VALUES(1,2,1); @@ -18,9 +18,9 @@ INSERT INTO user_buy_test_table VALUES(3,4,2); INSERT INTO user_buy_test_table VALUES(7,5,2); CREATE TABLE users_return_test_table(user_id int, item_id int, buy_count int); SELECT create_distributed_table('users_return_test_table', 'user_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO users_return_test_table VALUES(4,1,1); @@ -30,7 +30,7 @@ INSERT INTO users_return_test_table VALUES(3,2,2); SELECT count(*) FROM (SELECT random() FROM user_buy_test_table JOIN users_ref_test_table ON user_buy_test_table.user_id = users_ref_test_table.id) subquery_1; - count + count --------------------------------------------------------------------- 3 (1 row) @@ -39,7 +39,7 @@ SELECT count(*) FROM SELECT count(*) FROM (SELECT random(), k_no FROM user_buy_test_table LEFT JOIN users_ref_test_table ON user_buy_test_table.user_id = users_ref_test_table.id) subquery_1 WHERE k_no = 47; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -49,7 +49,7 @@ SELECT subquery_1.item_id FROM (SELECT user_buy_test_table.item_id, random() FROM user_buy_test_table LEFT JOIN users_ref_test_table ON user_buy_test_table.item_id = users_ref_test_table.id) subquery_1 ORDER BY 1; - item_id + item_id --------------------------------------------------------------------- 2 3 @@ -62,7 +62,7 @@ SELECT subquery_1.user_id FROM (SELECT user_buy_test_table.user_id, random() FROM user_buy_test_table LEFT JOIN users_ref_test_table ON user_buy_test_table.user_id > users_ref_test_table.id) subquery_1 ORDER BY 1; - user_id + user_id --------------------------------------------------------------------- 1 2 @@ -86,7 +86,7 @@ DETAIL: There exist a reference table in the outer part of the outer join SELECT count(*) FROM (SELECT random() FROM users_ref_test_table RIGHT JOIN user_buy_test_table ON user_buy_test_table.user_id = users_ref_test_table.id) subquery_1; - count + count --------------------------------------------------------------------- 4 (1 row) @@ -101,7 +101,7 @@ DETAIL: There exist a reference table in the outer part of the outer join SELECT count(*) FROM (SELECT random() FROM user_buy_test_table JOIN users_ref_test_table ON user_buy_test_table.item_id = users_ref_test_table.id) subquery_1; - count + count --------------------------------------------------------------------- 4 (1 row) @@ -110,7 +110,7 @@ SELECT count(*) FROM SELECT count(*) FROM (SELECT random() FROM user_buy_test_table JOIN users_ref_test_table ON user_buy_test_table.item_id > users_ref_test_table.id) subquery_1; - count + count --------------------------------------------------------------------- 10 (1 row) @@ -119,7 +119,7 @@ SELECT count(*) FROM SELECT count(*) FROM (SELECT random() FROM user_buy_test_table LEFT JOIN users_ref_test_table ON user_buy_test_table.item_id > users_ref_test_table.id) subquery_1; - count + count --------------------------------------------------------------------- 10 (1 row) @@ -131,7 +131,7 @@ SELECT count(*) FROM LEFT JOIN (SELECT tt1.user_id, random() FROM user_buy_test_table as tt1 LEFT JOIN users_ref_test_table as ref ON tt1.user_id = ref.id) subquery_2 ON subquery_1.user_id = subquery_2.user_id; - count + count --------------------------------------------------------------------- 2 (1 row) @@ -144,7 +144,7 @@ SELECT count(*) FROM (SELECT DISTINCT user_buy_test_table.user_id, random() FROM user_buy_test_table LEFT JOIN users_ref_test_table ON user_buy_test_table.user_id > users_ref_test_table.id AND users_ref_test_table.k_no > 44 AND user_buy_test_table.user_id > 44) subquery_2 WHERE subquery_1.user_id = subquery_2.user_id ; - count + count --------------------------------------------------------------------- 4 (1 row) @@ -165,7 +165,7 @@ SELECT subquery_2.id ON tt1.user_id = ref.id) subquery_2 ON subquery_1.user_id = subquery_2.user_id ORDER BY 1 DESC LIMIT 5; - id + id --------------------------------------------------------------------- 3 2 @@ -203,7 +203,7 @@ FROM INNER JOIN events_reference_table ON (events_reference_table.value_2 = users_table.user_id) ) as foo GROUP BY user_id ORDER BY 2 DESC LIMIT 10; - user_id | sum + user_id | sum --------------------------------------------------------------------- 2 | 31248 3 | 15120 @@ -223,7 +223,7 @@ FROM INNER JOIN (SELECT *, random() FROM events_reference_table) as ref_all ON (ref_all.value_2 = users_table.user_id) ) as foo GROUP BY user_id ORDER BY 2 DESC LIMIT 10; - user_id | sum + user_id | sum --------------------------------------------------------------------- 2 | 31248 3 | 15120 @@ -236,7 +236,7 @@ FROM SELECT count(*) FROM (SELECT random() FROM user_buy_test_table JOIN generate_series(1,10) AS users_ref_test_table(id) ON user_buy_test_table.item_id > users_ref_test_table.id) subquery_1; - count + count --------------------------------------------------------------------- 10 (1 row) @@ -244,7 +244,7 @@ SELECT count(*) FROM -- table function cannot be used without subquery pushdown SELECT count(*) FROM user_buy_test_table JOIN generate_series(1,10) AS users_ref_test_table(id) ON user_buy_test_table.item_id = users_ref_test_table.id; - count + count --------------------------------------------------------------------- 4 (1 row) @@ -253,14 +253,14 @@ SELECT count(*) FROM user_buy_test_table JOIN generate_series(1,10) AS users_ref SELECT count(*) FROM (SELECT random() FROM user_buy_test_table LEFT JOIN generate_series(1,10) AS users_ref_test_table(id) ON user_buy_test_table.item_id > users_ref_test_table.id) subquery_1; - count + count --------------------------------------------------------------------- 10 (1 row) SELECT count(*) FROM user_buy_test_table LEFT JOIN (SELECT * FROM generate_series(1,10) id) users_ref_test_table ON user_buy_test_table.item_id = users_ref_test_table.id; - count + count --------------------------------------------------------------------- 4 (1 row) @@ -284,7 +284,7 @@ DEBUG: Router planner cannot handle multi-shard select queries DEBUG: generating subplan 30_1 for subquery SELECT id FROM random() users_ref_test_table(id) DEBUG: Plan 30 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT random() AS random FROM (public.user_buy_test_table JOIN (SELECT intermediate_result.id FROM read_intermediate_result('30_1'::text, 'binary'::citus_copy_format) intermediate_result(id double precision)) users_ref_test_table(id) ON (((user_buy_test_table.item_id)::double precision OPERATOR(pg_catalog.>) users_ref_test_table.id)))) subquery_1 DEBUG: Router planner cannot handle multi-shard select queries - count + count --------------------------------------------------------------------- 4 (1 row) @@ -298,7 +298,7 @@ DEBUG: Router planner cannot handle multi-shard select queries DEBUG: generating subplan 31_1 for subquery SELECT id FROM generate_series((random())::integer, 10) users_ref_test_table(id) DEBUG: Plan 31 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT user_buy_test_table.item_id FROM (public.user_buy_test_table JOIN (SELECT intermediate_result.id FROM read_intermediate_result('31_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) users_ref_test_table(id) ON ((user_buy_test_table.item_id OPERATOR(pg_catalog.>) users_ref_test_table.id)))) subquery_1 WHERE (item_id OPERATOR(pg_catalog.=) 6) DEBUG: Router planner cannot handle multi-shard select queries - count + count --------------------------------------------------------------------- 0 (1 row) @@ -317,7 +317,7 @@ DEBUG: generating subplan 32_2 for subquery SELECT intermediate_result.user_id DEBUG: Plan 32 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('32_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) subquery_1 DEBUG: Creating router plan DEBUG: Plan is router executable - count + count --------------------------------------------------------------------- 14 (1 row) @@ -327,7 +327,7 @@ RESET client_min_messages; SELECT count(*) FROM (SELECT random() FROM user_buy_test_table JOIN (SELECT 4 AS id) users_ref_test_table ON user_buy_test_table.item_id > users_ref_test_table.id) subquery_1; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -335,7 +335,7 @@ SELECT count(*) FROM -- subquery without FROM triggers subquery pushdown SELECT count(*) FROM user_buy_test_table JOIN (SELECT 5 AS id) users_ref_test_table ON user_buy_test_table.item_id = users_ref_test_table.id; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -343,7 +343,7 @@ ON user_buy_test_table.item_id = users_ref_test_table.id; -- subquery without FROM can be the inner relationship in an outer join SELECT count(*) FROM user_buy_test_table LEFT JOIN (SELECT 5 AS id) users_ref_test_table ON user_buy_test_table.item_id = users_ref_test_table.id; - count + count --------------------------------------------------------------------- 4 (1 row) @@ -369,7 +369,7 @@ DEBUG: generating subplan 39_2 for subquery SELECT intermediate_result.user_id DEBUG: Plan 39 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('39_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) subquery_1 DEBUG: Creating router plan DEBUG: Plan is router executable - count + count --------------------------------------------------------------------- 5 (1 row) @@ -390,7 +390,7 @@ DEBUG: generating subplan 42_2 for subquery SELECT users_ref_test_table.id FROM DEBUG: Plan 42 query after replacing subqueries and CTEs: SELECT id FROM (SELECT intermediate_result.id FROM read_intermediate_result('42_2'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) sub ORDER BY id DESC DEBUG: Creating router plan DEBUG: Plan is router executable - id + id --------------------------------------------------------------------- 7 6 @@ -416,7 +416,7 @@ DEBUG: generating subplan 45_2 for subquery SELECT sub1.id, (random() OPERATOR( DEBUG: Plan 45 query after replacing subqueries and CTEs: SELECT id, "?column?" FROM (SELECT intermediate_result.id, intermediate_result."?column?" FROM read_intermediate_result('45_2'::text, 'binary'::citus_copy_format) intermediate_result(id integer, "?column?" double precision)) sub ORDER BY id DESC DEBUG: Creating router plan DEBUG: Plan is router executable - id | ?column? + id | ?column? --------------------------------------------------------------------- 7 | 0 6 | 0 @@ -436,7 +436,7 @@ SELECT * FROM SELECT user_id FROM user_buy_test_table) sub ORDER BY 1 DESC; DEBUG: Router planner cannot handle multi-shard select queries - user_id + user_id --------------------------------------------------------------------- 7 3 @@ -452,7 +452,7 @@ SELECT * FROM SELECT user_id, random() * 0 FROM (SELECT user_id FROM user_buy_test_table) sub2) sub ORDER BY 1 DESC; DEBUG: Router planner cannot handle multi-shard select queries - user_id | ?column? + user_id | ?column? --------------------------------------------------------------------- 7 | 0 3 | 0 @@ -476,7 +476,7 @@ SELECT * FROM SELECT user_id FROM user_buy_test_table WHERE user_id in (select id from users_ref_test_table)) sub ORDER BY 1 DESC; DEBUG: Router planner cannot handle multi-shard select queries - user_id + user_id --------------------------------------------------------------------- 3 2 @@ -493,7 +493,7 @@ SELECT * FROM SELECT user_id FROM user_buy_test_table) sub ORDER BY 1 DESC; DEBUG: Router planner cannot handle multi-shard select queries - user_id + user_id --------------------------------------------------------------------- 7 3 @@ -526,7 +526,7 @@ FROM LEFT JOIN events_reference_table ON (events_reference_table.value_2 = users_table.user_id) ) as foo GROUP BY user_id ORDER BY 2 DESC LIMIT 10; - user_id | sum + user_id | sum --------------------------------------------------------------------- 2 | 31248 3 | 15120 @@ -594,7 +594,7 @@ SELECT * FROM user_id > 2 and value_2 = 1) as foo_in ON (event_val_2 = user_id)) as foo LEFT JOIN (SELECT user_id as user_user_id FROM users_table) as fooo ON (user_id = user_user_id)) as bar ORDER BY 1; - user_id + user_id --------------------------------------------------------------------- 3 4 @@ -631,7 +631,7 @@ FROM FROM events_reference_table INNER JOIN users_table ON (users_table.user_id = events_reference_table.user_id) GROUP BY users_table.user_id) AS events_all LEFT JOIN events_table ON (events_all.usr_id = events_table.user_id) GROUP BY 2 ORDER BY 1 DESC, 2 DESC LIMIT 5; - max | usr_id + max | usr_id --------------------------------------------------------------------- 432 | 2 391 | 4 @@ -715,7 +715,7 @@ FROM ORDER BY user_id DESC LIMIT 10; - user_id | lastseen + user_id | lastseen --------------------------------------------------------------------- 1 | Thu Nov 23 21:54:46.924477 2017 1 | Thu Nov 23 21:54:46.924477 2017 @@ -772,7 +772,7 @@ GROUP BY "generated_group_field" ORDER BY generated_group_field DESC, value DESC; - value | generated_group_field + value | generated_group_field --------------------------------------------------------------------- 2 | 5 1 | 3 @@ -818,7 +818,7 @@ FROM ) segmentalias_1) "tempQuery" GROUP BY "value_3" ORDER BY cnt, value_3 DESC LIMIT 10; - value_3 | cnt + value_3 | cnt --------------------------------------------------------------------- 0 | 7 10 | 21 @@ -871,7 +871,7 @@ LIMIT 10) "some_users" ORDER BY value_3 DESC LIMIT 10; - user_id | value_3 + user_id | value_3 --------------------------------------------------------------------- 3 | 5 3 | 5 @@ -920,7 +920,7 @@ count(*) AS cnt, "generated_group_field" ORDER BY cnt DESC, generated_group_field ASC LIMIT 10; - cnt | generated_group_field + cnt | generated_group_field --------------------------------------------------------------------- 336 | 2 210 | 1 @@ -999,7 +999,7 @@ FROM ( GROUP BY t1.user_id, hasdone_event ) t GROUP BY user_id, hasdone_event ORDER BY user_id; - user_id | sum | length | hasdone_event + user_id | sum | length | hasdone_event --------------------------------------------------------------------- 2 | 72 | 14 | Has done event 3 | 238 | 14 | Has done event @@ -1034,7 +1034,7 @@ FROM ( GROUP BY t1.user_id, hasdone_event ) t GROUP BY user_id, hasdone_event ORDER BY user_id; - user_id | sum | length | hasdone_event + user_id | sum | length | hasdone_event --------------------------------------------------------------------- 1 | 55 | 14 | Has done event 2 | 88 | 14 | Has done event @@ -1080,7 +1080,7 @@ count(*) AS cnt, "generated_group_field" ORDER BY cnt DESC, generated_group_field ASC LIMIT 10; - cnt | generated_group_field + cnt | generated_group_field --------------------------------------------------------------------- 737 | 5 679 | 1 @@ -1184,7 +1184,7 @@ INNER JOIN ORDER BY types LIMIT 5; - types + types --------------------------------------------------------------------- 0 0 @@ -1264,7 +1264,7 @@ GROUP BY types ORDER BY types; - types | sumofeventtype + types | sumofeventtype --------------------------------------------------------------------- 0 | 217 2 | 191 @@ -1324,7 +1324,7 @@ INNER JOIN WHERE value_1 > 2 and value_1 < 4) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; - types | sumofeventtype + types | sumofeventtype --------------------------------------------------------------------- 0 | 191 1 | 191 @@ -1349,7 +1349,7 @@ SELECT count(*) FROM WHERE subquery_1.user_id != subquery_2.user_id ; DEBUG: generating subplan 84_1 for subquery SELECT user_buy_test_table.user_id, random() AS random FROM (public.user_buy_test_table LEFT JOIN public.users_ref_test_table ON ((user_buy_test_table.user_id OPERATOR(pg_catalog.>) users_ref_test_table.id))) DEBUG: Plan 84 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT user_buy_test_table.user_id, random() AS random FROM (public.user_buy_test_table LEFT JOIN public.users_ref_test_table ON ((user_buy_test_table.item_id OPERATOR(pg_catalog.>) users_ref_test_table.id)))) subquery_1, (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('84_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) subquery_2 WHERE (subquery_1.user_id OPERATOR(pg_catalog.<>) subquery_2.user_id) - count + count --------------------------------------------------------------------- 67 (1 row) @@ -1426,7 +1426,7 @@ SELECT foo.user_id FROM SELECT m.user_id, random() FROM users_table m JOIN events_reference_table r ON int4eq(m.user_id, r.user_id) WHERE event_type > 100 ) as foo; - user_id + user_id --------------------------------------------------------------------- (0 rows) @@ -1438,7 +1438,7 @@ SELECT foo.user_id FROM GROUP BY r.user_id ) as foo ORDER BY 1 DESC; - user_id + user_id --------------------------------------------------------------------- 6 5 @@ -1456,7 +1456,7 @@ SELECT foo.user_id FROM GROUP BY r.user_id, m.user_id ) as foo ORDER BY 1 LIMIT 3; - user_id + user_id --------------------------------------------------------------------- 1 2 @@ -1471,7 +1471,7 @@ SELECT foo.user_id FROM ) as foo ORDER BY 1 DESC LIMIT 5; - user_id + user_id --------------------------------------------------------------------- 6 6 @@ -1486,7 +1486,7 @@ SELECT foo.user_id FROM ( SELECT DISTINCT ON(r.user_id) r.user_id, random() FROM users_table m JOIN events_reference_table r ON int4eq(m.user_id, r.user_id) ) as foo; - user_id + user_id --------------------------------------------------------------------- 1 2 @@ -1502,7 +1502,7 @@ SELECT foo.user_id FROM SELECT DISTINCT ON(r.user_id, m.user_id) r.user_id, random() FROM users_table m JOIN events_reference_table r ON int4eq(m.user_id, r.user_id) ) as foo ORDER BY 1 LIMIT 3; - user_id + user_id --------------------------------------------------------------------- 1 2 @@ -1519,7 +1519,7 @@ FROM ORDER BY time DESC LIMIT 5 OFFSET 0; - distinct_users | event_type | time + distinct_users | event_type | time --------------------------------------------------------------------- 1 | 6 | Thu Nov 23 21:54:46.924477 2017 4 | 1 | Thu Nov 23 18:10:21.338399 2017 @@ -1539,7 +1539,7 @@ ON (events_dist.user_id = users_ref.distinct_users) ORDER BY time DESC LIMIT 5 OFFSET 0; - distinct_users | event_type | time + distinct_users | event_type | time --------------------------------------------------------------------- 1 | 6 | Thu Nov 23 21:54:46.924477 2017 4 | 1 | Thu Nov 23 18:10:21.338399 2017 @@ -1557,7 +1557,7 @@ FROM ORDER BY time DESC LIMIT 5 OFFSET 0; - distinct_users | event_type | time + distinct_users | event_type | time --------------------------------------------------------------------- 1 | 6 | Thu Nov 23 21:54:46.924477 2017 4 | 1 | Thu Nov 23 18:10:21.338399 2017 @@ -1573,7 +1573,7 @@ SELECT * FROM ( SELECT DISTINCT users_reference_table.user_id FROM users_reference_table, events_table WHERE users_reference_table.user_id = events_table.value_4 ) as foo; - user_id + user_id --------------------------------------------------------------------- (0 rows) @@ -1582,7 +1582,7 @@ SELECT * FROM SELECT users_reference_table.user_id FROM users_reference_table, events_table WHERE users_reference_table.user_id = events_table.value_4 GROUP BY 1 ) as foo; - user_id + user_id --------------------------------------------------------------------- (0 rows) @@ -1593,7 +1593,7 @@ SELECT * FROM SELECT DISTINCT users_reference_table.user_id FROM users_reference_table, (SELECT user_id, random() FROM events_table) as us_events WHERE users_reference_table.user_id = us_events.user_id ) as foo ORDER BY 1; - user_id + user_id --------------------------------------------------------------------- 1 2 @@ -1610,7 +1610,7 @@ SELECT * FROM ) as foo ORDER BY 1 DESC LIMIT 4; - user_id | user_id + user_id | user_id --------------------------------------------------------------------- 6 | 6 5 | 5 @@ -1633,18 +1633,18 @@ SELECT * FROM ) as foo ORDER BY 1 DESC LIMIT 4; - user_id | value_4 + user_id | value_4 --------------------------------------------------------------------- - 6 | - 5 | - 4 | - 3 | + 6 | + 5 | + 4 | + 3 | (4 rows) -- test the read_intermediate_result() for GROUP BYs BEGIN; SELECT broadcast_intermediate_result('squares', 'SELECT s, s*s FROM generate_series(1,200) s'); - broadcast_intermediate_result + broadcast_intermediate_result --------------------------------------------------------------------- 200 (1 row) @@ -1663,7 +1663,7 @@ GROUP BY res.val_square) squares ON (mx = user_id) ORDER BY 1 LIMIT 5; - user_id + user_id --------------------------------------------------------------------- 1 2 @@ -1680,7 +1680,7 @@ JOIN FROM read_intermediate_result('squares', 'binary') AS res (val int, val_square int)) squares ON (mx = user_id) ORDER BY 1 LIMIT 5; - user_id + user_id --------------------------------------------------------------------- 1 2 @@ -1704,7 +1704,7 @@ squares ON (mx = user_id) ORDER BY 1 LIMIT 5; - user_id + user_id --------------------------------------------------------------------- 1 2 @@ -1729,7 +1729,7 @@ GROUP BY res2.val_square) squares ON (mx = user_id) ORDER BY 1 LIMIT 5; - user_id + user_id --------------------------------------------------------------------- 1 4 @@ -1753,7 +1753,7 @@ FROM ON (mx = user_id) ORDER BY 1 LIMIT 5; - user_id + user_id --------------------------------------------------------------------- 1 2 @@ -1778,7 +1778,7 @@ JOIN ON (mx = user_id) ORDER BY 1 LIMIT 5; - user_id + user_id --------------------------------------------------------------------- 1 4 @@ -1798,7 +1798,7 @@ FROM ON (mx = user_id) ORDER BY 1 LIMIT 5; - user_id + user_id --------------------------------------------------------------------- 5 6 @@ -1822,7 +1822,7 @@ JOIN ON (mx = user_id) ORDER BY 1 LIMIT 5; - user_id + user_id --------------------------------------------------------------------- 6 (1 row) diff --git a/src/test/regress/expected/multi_subquery_in_where_clause.out b/src/test/regress/expected/multi_subquery_in_where_clause.out index 413538b36..5bc42953a 100644 --- a/src/test/regress/expected/multi_subquery_in_where_clause.out +++ b/src/test/regress/expected/multi_subquery_in_where_clause.out @@ -24,7 +24,7 @@ GROUP BY user_id HAVING count(*) > 2 ORDER BY user_id LIMIT 5; - user_id + user_id --------------------------------------------------------------------- 1 5 @@ -52,7 +52,7 @@ GROUP BY user_id HAVING count(*) > 1 ORDER BY user_id LIMIT 5; - user_id + user_id --------------------------------------------------------------------- 1 5 @@ -94,7 +94,7 @@ GROUP BY ORDER BY 1 DESC LIMIT 3; - user_id + user_id --------------------------------------------------------------------- 4 3 @@ -117,7 +117,7 @@ WHERE e1.user_id = e2.user_id ) ORDER BY 1; - user_id + user_id --------------------------------------------------------------------- 2 2 @@ -156,7 +156,7 @@ WHERE GROUP BY 1 HAVING count(*) > 2 ORDER BY 1; - user_id + user_id --------------------------------------------------------------------- 1 2 @@ -173,7 +173,7 @@ FROM users_table WHERE user_id =ANY(SELECT user_id FROM users_table WHERE value_1 >= 1 AND value_1 <= 2) GROUP BY 1 ORDER BY 2 DESC LIMIT 5; - user_id | count + user_id | count --------------------------------------------------------------------- 5 | 26 4 | 23 @@ -200,7 +200,7 @@ GROUP BY user_id ORDER BY user_id; - user_id + user_id --------------------------------------------------------------------- 1 2 @@ -273,7 +273,7 @@ SELECT user_id, value_2 FROM users_table WHERE HAVING sum(submit_card_info) > 0 ) ORDER BY 1, 2; - user_id | value_2 + user_id | value_2 --------------------------------------------------------------------- 2 | 2 2 | 2 @@ -363,7 +363,7 @@ WHERE GROUP BY user_id HAVING count(*) > 1 AND sum(value_2) > 29 ORDER BY 1; - user_id + user_id --------------------------------------------------------------------- 2 3 @@ -396,7 +396,7 @@ FROM ( GROUP BY user_id ) q ORDER BY 2 DESC, 1; - user_id | array_length + user_id | array_length --------------------------------------------------------------------- 5 | 364 (1 row) @@ -572,7 +572,7 @@ WHERE (SELECT 1) ORDER BY 1 ASC LIMIT 2; - user_id + user_id --------------------------------------------------------------------- 1 1 @@ -589,7 +589,7 @@ WHERE (SELECT random()) AND user_id < 0 ORDER BY 1 ASC LIMIT 2; - user_id + user_id --------------------------------------------------------------------- (0 rows) @@ -640,7 +640,7 @@ WHERE user_id ) as f_outer WHERE f_inner.user_id = f_outer.user_id ) ORDER BY 1 LIMIT 3; - user_id + user_id --------------------------------------------------------------------- 1 (1 row) @@ -655,7 +655,7 @@ WHERE user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 1 AND value_1 DEBUG: generating subplan 26_1 for subquery SELECT user_id FROM public.users_table WHERE ((value_1 OPERATOR(pg_catalog.>=) 5) AND (value_1 OPERATOR(pg_catalog.<=) 6)) DEBUG: Plan 26 query after replacing subqueries and CTEs: SELECT user_id FROM public.users_table WHERE ((user_id OPERATOR(pg_catalog.=) ANY (SELECT users_table_1.user_id FROM public.users_table users_table_1 WHERE ((users_table_1.value_1 OPERATOR(pg_catalog.>=) 1) AND (users_table_1.value_1 OPERATOR(pg_catalog.<=) 2)))) AND (user_id OPERATOR(pg_catalog.=) ANY (SELECT users_table_1.user_id FROM public.users_table users_table_1 WHERE ((users_table_1.value_1 OPERATOR(pg_catalog.>=) 3) AND (users_table_1.value_1 OPERATOR(pg_catalog.<=) 4)))) AND (value_2 OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('26_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)))) ORDER BY user_id DESC LIMIT 3 DEBUG: push down of limit count: 3 - user_id + user_id --------------------------------------------------------------------- 6 6 diff --git a/src/test/regress/expected/multi_subquery_in_where_reference_clause.out b/src/test/regress/expected/multi_subquery_in_where_reference_clause.out index d246aaff0..91b133ad4 100644 --- a/src/test/regress/expected/multi_subquery_in_where_reference_clause.out +++ b/src/test/regress/expected/multi_subquery_in_where_reference_clause.out @@ -1,23 +1,23 @@ -- -- queries to test the subquery pushdown on reference tables -- subqueries in WHERE with IN operator -SELECT +SELECT user_id -FROM +FROM users_table -WHERE - value_2 IN - (SELECT - value_2 - FROM - events_reference_table - WHERE +WHERE + value_2 IN + (SELECT + value_2 + FROM + events_reference_table + WHERE users_table.user_id = events_reference_table.user_id ) GROUP BY user_id ORDER BY user_id LIMIT 3; - user_id + user_id --------------------------------------------------------------------- 1 2 @@ -26,39 +26,39 @@ LIMIT 3; -- subqueries in WHERE with NOT EXISTS operator, should work since -- reference table in the inner part of the join -SELECT +SELECT user_id -FROM +FROM users_table -WHERE - NOT EXISTS - (SELECT - value_2 - FROM - events_reference_table - WHERE +WHERE + NOT EXISTS + (SELECT + value_2 + FROM + events_reference_table + WHERE users_table.user_id = events_reference_table.user_id ) GROUP BY user_id ORDER BY user_id LIMIT 3; - user_id + user_id --------------------------------------------------------------------- (0 rows) --- subqueries in WHERE with NOT EXISTS operator, should not work since +-- subqueries in WHERE with NOT EXISTS operator, should not work since -- there is a correlated subquery in WHERE clause -SELECT +SELECT user_id -FROM +FROM users_reference_table -WHERE - NOT EXISTS - (SELECT - value_2 - FROM - events_table - WHERE +WHERE + NOT EXISTS + (SELECT + value_2 + FROM + events_table + WHERE users_reference_table.user_id = events_table.user_id ) LIMIT 3; @@ -119,7 +119,7 @@ WHERE ) ORDER BY user_id LIMIT 3; - user_id + user_id --------------------------------------------------------------------- 2 3 @@ -142,7 +142,7 @@ WHERE ) ORDER BY user_id LIMIT 3; - user_id + user_id --------------------------------------------------------------------- 1 2 @@ -168,23 +168,23 @@ LIMIT 3; ERROR: cannot pushdown the subquery DETAIL: There exist a reference table in the outer part of the outer join -- subqueries in WHERE with IN operator without equality -SELECT +SELECT users_table.user_id, count(*) -FROM +FROM users_table -WHERE - value_2 IN - (SELECT - value_2 - FROM - events_reference_table - WHERE +WHERE + value_2 IN + (SELECT + value_2 + FROM + events_reference_table + WHERE users_table.user_id > events_reference_table.user_id ) GROUP BY users_table.user_id ORDER BY 2 DESC, 1 DESC LIMIT 3; - user_id | count + user_id | count --------------------------------------------------------------------- 5 | 26 4 | 23 @@ -208,7 +208,7 @@ WHERE GROUP BY users_table.user_id ORDER BY 2 DESC, 1 DESC LIMIT 3; - user_id | count + user_id | count --------------------------------------------------------------------- 5 | 26 4 | 23 @@ -232,7 +232,7 @@ WHERE GROUP BY users_table.user_id ORDER BY 2 DESC, 1 DESC LIMIT 3; - user_id | count + user_id | count --------------------------------------------------------------------- 6 | 10 (1 row) @@ -298,7 +298,7 @@ SELECT user_id, value_2 FROM users_table WHERE HAVING sum(submit_card_info) > 0 ) ORDER BY 1, 2; - user_id | value_2 + user_id | value_2 --------------------------------------------------------------------- 5 | 5 5 | 5 @@ -368,14 +368,14 @@ ORDER BY 1, 2; ERROR: cannot pushdown the subquery DETAIL: There exist a reference table in the outer part of the outer join -- non-partition key equality with reference table - SELECT - user_id, count(*) -FROM - users_table -WHERE - value_3 =ANY(SELECT value_2 FROM users_reference_table WHERE value_1 >= 1 AND value_1 <= 2) + SELECT + user_id, count(*) +FROM + users_table +WHERE + value_3 =ANY(SELECT value_2 FROM users_reference_table WHERE value_1 >= 1 AND value_1 <= 2) GROUP BY 1 ORDER BY 2 DESC, 1 DESC LIMIT 5; - user_id | count + user_id | count --------------------------------------------------------------------- 5 | 26 4 | 23 @@ -385,25 +385,25 @@ WHERE (5 rows) -- non-partition key comparison with reference table -SELECT +SELECT user_id, count(*) -FROM +FROM events_table as e1 WHERE event_type IN - (SELECT + (SELECT event_type - FROM + FROM events_reference_table as e2 WHERE value_2 = 2 AND value_3 > 3 AND e1.value_2 > e2.value_2 - ) + ) GROUP BY 1 ORDER BY 2 DESC, 1 DESC LIMIT 5; - user_id | count + user_id | count --------------------------------------------------------------------- 2 | 7 5 | 6 @@ -413,8 +413,8 @@ LIMIT 5; (5 rows) -- subqueries in both WHERE and FROM clauses --- should work since reference table is on the --- inner part of the join +-- should work since reference table is on the +-- inner part of the join SELECT user_id, value_2 FROM users_table WHERE value_1 > 1 AND value_1 < 3 AND value_2 >= 5 @@ -475,7 +475,7 @@ SELECT user_id, value_2 FROM users_table WHERE HAVING sum(submit_card_info) > 0 ) ORDER BY 1, 2; - user_id | value_2 + user_id | value_2 --------------------------------------------------------------------- 5 | 5 5 | 5 @@ -485,15 +485,15 @@ ORDER BY 1, 2; SET client_min_messages TO DEBUG1; -- recursively planning subqueries in WHERE clause due to recurring table in FROM SELECT - count(*) -FROM - users_reference_table -WHERE user_id + count(*) +FROM + users_reference_table +WHERE user_id NOT IN (SELECT users_table.value_2 FROM users_table JOIN users_reference_table as u2 ON users_table.value_2 = u2.value_2); DEBUG: generating subplan 18_1 for subquery SELECT users_table.value_2 FROM (public.users_table JOIN public.users_reference_table u2 ON ((users_table.value_2 OPERATOR(pg_catalog.=) u2.value_2))) DEBUG: Plan 18 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.users_reference_table WHERE (NOT (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('18_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)))) - count + count --------------------------------------------------------------------- 10 (1 row) @@ -501,7 +501,7 @@ DEBUG: Plan 18 query after replacing subqueries and CTEs: SELECT count(*) AS co -- recursively planning subqueries in WHERE clause due to recurring table in FROM SELECT count(*) FROM - (SELECT + (SELECT user_id, random() FROM users_reference_table) AS vals WHERE vals.user_id NOT IN (SELECT users_table.value_2 @@ -509,7 +509,7 @@ FROM JOIN users_reference_table AS u2 ON users_table.value_2 = u2.value_2); DEBUG: generating subplan 20_1 for subquery SELECT users_table.value_2 FROM (public.users_table JOIN public.users_reference_table u2 ON ((users_table.value_2 OPERATOR(pg_catalog.=) u2.value_2))) DEBUG: Plan 20 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT users_reference_table.user_id, random() AS random FROM public.users_reference_table) vals WHERE (NOT (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('20_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)))) - count + count --------------------------------------------------------------------- 10 (1 row) @@ -543,28 +543,28 @@ WHERE user_id IN ORDER BY 1,2,3 LIMIT 5; DEBUG: push down of limit count: 5 - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | - 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | + 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | + 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | (5 rows) SET client_min_messages TO DEFAULT; -- not supported since GROUP BY references to an upper level query -SELECT +SELECT user_id -FROM +FROM users_table -WHERE - value_2 > - (SELECT - max(value_2) - FROM - events_reference_table - WHERE +WHERE + value_2 > + (SELECT + max(value_2) + FROM + events_reference_table + WHERE users_table.user_id = events_reference_table.user_id AND event_type = 2 GROUP BY users_table.user_id @@ -577,17 +577,17 @@ ERROR: cannot push down this subquery DETAIL: Group by list without partition column is currently unsupported when a subquery references a column from another query -- similar query with slightly more complex group by -- though the error message is a bit confusing -SELECT +SELECT user_id -FROM +FROM users_table -WHERE - value_2 > - (SELECT - max(value_2) - FROM - events_reference_table - WHERE +WHERE + value_2 > + (SELECT + max(value_2) + FROM + events_reference_table + WHERE users_table.user_id = events_reference_table.user_id AND event_type = 2 GROUP BY (users_table.user_id * 2) diff --git a/src/test/regress/expected/multi_subquery_misc.out b/src/test/regress/expected/multi_subquery_misc.out index bbaa8f047..3c8abc67d 100644 --- a/src/test/regress/expected/multi_subquery_misc.out +++ b/src/test/regress/expected/multi_subquery_misc.out @@ -32,7 +32,7 @@ FROM ( ) AS shard_union ORDER BY user_lastseen DESC, user_id; EXECUTE prepared_subquery_1; - user_id | user_lastseen | array_length + user_id | user_lastseen | array_length --------------------------------------------------------------------- 2 | Thu Nov 23 11:47:26.900284 2017 | 12 3 | Thu Nov 23 11:18:53.114408 2017 | 14 @@ -66,49 +66,49 @@ FROM ( ORDER BY user_lastseen DESC, user_id; -- should be fine with more than five executions EXECUTE prepared_subquery_2(1, 3); - user_id | user_lastseen | array_length + user_id | user_lastseen | array_length --------------------------------------------------------------------- 2 | Thu Nov 23 11:47:26.900284 2017 | 12 3 | Thu Nov 23 11:18:53.114408 2017 | 14 (2 rows) EXECUTE prepared_subquery_2(1, 3); - user_id | user_lastseen | array_length + user_id | user_lastseen | array_length --------------------------------------------------------------------- 2 | Thu Nov 23 11:47:26.900284 2017 | 12 3 | Thu Nov 23 11:18:53.114408 2017 | 14 (2 rows) EXECUTE prepared_subquery_2(1, 3); - user_id | user_lastseen | array_length + user_id | user_lastseen | array_length --------------------------------------------------------------------- 2 | Thu Nov 23 11:47:26.900284 2017 | 12 3 | Thu Nov 23 11:18:53.114408 2017 | 14 (2 rows) EXECUTE prepared_subquery_2(1, 3); - user_id | user_lastseen | array_length + user_id | user_lastseen | array_length --------------------------------------------------------------------- 2 | Thu Nov 23 11:47:26.900284 2017 | 12 3 | Thu Nov 23 11:18:53.114408 2017 | 14 (2 rows) EXECUTE prepared_subquery_2(1, 3); - user_id | user_lastseen | array_length + user_id | user_lastseen | array_length --------------------------------------------------------------------- 2 | Thu Nov 23 11:47:26.900284 2017 | 12 3 | Thu Nov 23 11:18:53.114408 2017 | 14 (2 rows) EXECUTE prepared_subquery_2(1, 3); - user_id | user_lastseen | array_length + user_id | user_lastseen | array_length --------------------------------------------------------------------- 2 | Thu Nov 23 11:47:26.900284 2017 | 12 3 | Thu Nov 23 11:18:53.114408 2017 | 14 (2 rows) EXECUTE prepared_subquery_2(1, 3); - user_id | user_lastseen | array_length + user_id | user_lastseen | array_length --------------------------------------------------------------------- 2 | Thu Nov 23 11:47:26.900284 2017 | 12 3 | Thu Nov 23 11:18:53.114408 2017 | 14 @@ -121,14 +121,14 @@ FROM users_table WHERE user_id IN (SELECT user_id FROM users_table WHERE value_1 >= $4 AND value_1 <= $3) AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= $5 AND value_1 <= $6) AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= $1 AND value_1 <= $2) -GROUP BY +GROUP BY user_id ORDER BY user_id DESC LIMIT 5; -- enough times (6+) to actually use prepared statements EXECUTE prepared_subquery_3(4, 5, 1, 0, 2, 3); - user_id + user_id --------------------------------------------------------------------- 6 5 @@ -138,7 +138,7 @@ EXECUTE prepared_subquery_3(4, 5, 1, 0, 2, 3); (5 rows) EXECUTE prepared_subquery_3(4, 5, 1, 0, 2, 3); - user_id + user_id --------------------------------------------------------------------- 6 5 @@ -148,7 +148,7 @@ EXECUTE prepared_subquery_3(4, 5, 1, 0, 2, 3); (5 rows) EXECUTE prepared_subquery_3(4, 5, 1, 0, 2, 3); - user_id + user_id --------------------------------------------------------------------- 6 5 @@ -158,7 +158,7 @@ EXECUTE prepared_subquery_3(4, 5, 1, 0, 2, 3); (5 rows) EXECUTE prepared_subquery_3(4, 5, 1, 0, 2, 3); - user_id + user_id --------------------------------------------------------------------- 6 5 @@ -168,7 +168,7 @@ EXECUTE prepared_subquery_3(4, 5, 1, 0, 2, 3); (5 rows) EXECUTE prepared_subquery_3(4, 5, 1, 0, 2, 3); - user_id + user_id --------------------------------------------------------------------- 6 5 @@ -178,7 +178,7 @@ EXECUTE prepared_subquery_3(4, 5, 1, 0, 2, 3); (5 rows) EXECUTE prepared_subquery_3(4, 5, 1, 0, 2, 3); - user_id + user_id --------------------------------------------------------------------- 6 5 @@ -204,51 +204,51 @@ BEGIN short_list.user_id = ma.user_id and ma.value_1 < $1 and short_list.event_type < 3 ) temp ON users_table.user_id = temp.user_id - WHERE + WHERE users_table.value_1 < $2; END; $$ LANGUAGE plpgsql; -- enough times (6+) to actually use prepared statements SELECT plpgsql_subquery_test(1, 2); - plpgsql_subquery_test + plpgsql_subquery_test --------------------------------------------------------------------- 539 (1 row) SELECT plpgsql_subquery_test(1, 2); - plpgsql_subquery_test + plpgsql_subquery_test --------------------------------------------------------------------- 539 (1 row) SELECT plpgsql_subquery_test(1, 2); - plpgsql_subquery_test + plpgsql_subquery_test --------------------------------------------------------------------- 539 (1 row) SELECT plpgsql_subquery_test(1, 2); - plpgsql_subquery_test + plpgsql_subquery_test --------------------------------------------------------------------- 539 (1 row) SELECT plpgsql_subquery_test(1, 2); - plpgsql_subquery_test + plpgsql_subquery_test --------------------------------------------------------------------- 539 (1 row) SELECT plpgsql_subquery_test(1, 2); - plpgsql_subquery_test + plpgsql_subquery_test --------------------------------------------------------------------- 539 (1 row) -- this should also work, but should return 0 given that int = NULL is always returns false SELECT plpgsql_subquery_test(1, NULL); - plpgsql_subquery_test + plpgsql_subquery_test --------------------------------------------------------------------- 0 (1 row) @@ -267,7 +267,7 @@ CREATE FUNCTION sql_subquery_test(int, int) RETURNS bigint AS $$ short_list.user_id = ma.user_id and ma.value_1 < $1 and short_list.event_type < 3 ) temp ON users_table.user_id = temp.user_id - WHERE + WHERE users_table.value_1 < $2; $$ LANGUAGE SQL; -- should error out @@ -277,7 +277,7 @@ DETAIL: Possibly this is caused by the use of parameters in SQL functions, whic HINT: Consider using PL/pgSQL functions instead. CONTEXT: SQL function "sql_subquery_test" statement 1 -- the joins are actually removed since they are --- not needed by PostgreSQL planner (e.g., target list +-- not needed by PostgreSQL planner (e.g., target list -- doesn't contain anything from there) -- but Citus can still pushdown this query SELECT @@ -302,7 +302,7 @@ INNER JOIN ( ) t3 ON t1.user_id = t3.user_id GROUP BY 1 ORDER BY 2 DESC; - user_id | count + user_id | count --------------------------------------------------------------------- 5 | 676 4 | 529 @@ -313,7 +313,7 @@ ORDER BY 2 DESC; (6 rows) -- the joins are actually removed since they are --- not needed by PostgreSQL planner (e.g., target list +-- not needed by PostgreSQL planner (e.g., target list -- doesn't contain anything from there) -- but Citus can still plan this query even though the query -- is not safe to pushdown @@ -339,7 +339,7 @@ INNER JOIN ( ) t3 ON t1.user_id = t3.user_id GROUP BY 1 ORDER BY 2 DESC; - user_id | count + user_id | count --------------------------------------------------------------------- 5 | 676 4 | 529 @@ -374,7 +374,7 @@ INNER JOIN ( ) t3 ON t1.user_id = t3.user_id ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC, 5 DESC, 6 DESC, 7 DESC, 8 DESC LIMIT 5; - user_id | time | value_1 | value_2 | value_3 | value_4 | user_id | user_id + user_id | time | value_1 | value_2 | value_3 | value_4 | user_id | user_id --------------------------------------------------------------------- 6 | Thu Nov 23 14:43:18.024104 2017 | 3 | 2 | 5 | | 6 | 6 6 | Thu Nov 23 14:43:18.024104 2017 | 3 | 2 | 5 | | 6 | 6 diff --git a/src/test/regress/expected/multi_subquery_union.out b/src/test/regress/expected/multi_subquery_union.out index 73a7d44b0..2206e5a4a 100644 --- a/src/test/regress/expected/multi_subquery_union.out +++ b/src/test/regress/expected/multi_subquery_union.out @@ -13,7 +13,7 @@ FROM ( ) user_id ORDER BY 2 DESC,1 LIMIT 5; - user_id | counter + user_id | counter --------------------------------------------------------------------- 2 | 5 3 | 5 @@ -31,7 +31,7 @@ FROM ( ) user_id ORDER BY 2 DESC,1 LIMIT 5; - user_id | max + user_id | max --------------------------------------------------------------------- 5 | 5 1 | 4 @@ -46,7 +46,7 @@ FROM ( ) user_id ORDER BY 2 DESC,1 LIMIT 5; - user_id | counter + user_id | counter --------------------------------------------------------------------- 2 | 5 3 | 5 @@ -64,7 +64,7 @@ FROM ( ) user_id ORDER BY 2 DESC,1 LIMIT 5; - user_id | counter + user_id | counter --------------------------------------------------------------------- 2 | 5 2 | 5 @@ -82,7 +82,7 @@ FROM ( ) user_id ORDER BY 2 DESC,1 LIMIT 5; - user_id | counter + user_id | counter --------------------------------------------------------------------- 2 | 5 2 | 5 @@ -101,7 +101,7 @@ FROM ( GROUP BY 1 ORDER BY 2 DESC,1 LIMIT 5; - user_id | sum + user_id | sum --------------------------------------------------------------------- 2 | 15 3 | 15 @@ -120,7 +120,7 @@ FROM ( GROUP BY 1 ORDER BY 2 DESC,1 LIMIT 5; - user_id | sum + user_id | sum --------------------------------------------------------------------- 2 | 32 3 | 32 @@ -139,7 +139,7 @@ FROM ( GROUP BY 1 ORDER BY 2 DESC,1 LIMIT 5; - user_id | sum + user_id | sum --------------------------------------------------------------------- 2 | 15 3 | 15 @@ -159,7 +159,7 @@ GROUP BY user_id --HAVING sum(counter) > 900 ORDER BY 1,2 DESC LIMIT 5; - user_id | sum + user_id | sum --------------------------------------------------------------------- 1 | 7 2 | 15 @@ -180,7 +180,7 @@ GROUP BY user_id --HAVING sum(counter) > 900 ORDER BY 1,2 DESC LIMIT 5; - user_id | sum + user_id | sum --------------------------------------------------------------------- 1 | 7 2 | 15 @@ -203,7 +203,7 @@ FROM ( SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 5 and value_1 < 6 GROUP BY user_id HAVING sum(value_2) > 25 ) user_id GROUP BY user_id ORDER BY 1 DESC LIMIT 5; - sum + sum --------------------------------------------------------------------- 141 94 @@ -225,7 +225,7 @@ FROM ( SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 5 and value_1 < 6 GROUP BY user_id HAVING sum(value_2) > 25 ) user_id GROUP BY user_id ORDER BY 1 DESC LIMIT 5; - sum + sum --------------------------------------------------------------------- 135 87 @@ -247,7 +247,7 @@ FROM ( SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 5 and value_1 < 6 GROUP BY user_id HAVING sum(value_2) > 25 ) user_id GROUP BY user_id ORDER BY 1 DESC LIMIT 5; - sum + sum --------------------------------------------------------------------- 135 87 @@ -297,7 +297,7 @@ FROM ( user_id)) AS ftop ORDER BY 2 DESC, 1 DESC LIMIT 5; - user_id | sum + user_id | sum --------------------------------------------------------------------- 2 | 107 3 | 101 @@ -348,7 +348,7 @@ FROM ( user_id)) AS ftop ORDER BY 2 DESC, 1 DESC LIMIT 5; - user_id | sum + user_id | sum --------------------------------------------------------------------- 2 | 107 3 | 101 @@ -405,7 +405,7 @@ FROM ) as final_query GROUP BY types ORDER BY types; - types | sumofeventtype + types | sumofeventtype --------------------------------------------------------------------- 0 | 43 1 | 42 @@ -456,7 +456,7 @@ FROM ) as final_query GROUP BY types ORDER BY types; - types | sumofeventtype + types | sumofeventtype --------------------------------------------------------------------- 0 | 43 1 | 42 @@ -501,7 +501,7 @@ FROM GROUP BY "t1"."user_id") AS t) "q" GROUP BY types ORDER BY types; - types | sumofeventtype + types | sumofeventtype --------------------------------------------------------------------- 0 | 43 1 | 42 @@ -546,7 +546,7 @@ FROM ) AS t) "q" ORDER BY 1 LIMIT 5; - user_id + user_id --------------------------------------------------------------------- 1 1 @@ -592,7 +592,7 @@ FROM GROUP BY "t1"."user_id") AS t) "q" GROUP BY types ORDER BY types; - types | sumofeventtype + types | sumofeventtype --------------------------------------------------------------------- 0 | 43 1 | 42 @@ -609,7 +609,7 @@ FROM UNION ALL (SELECT user_id FROM events_table) ) b; - count + count --------------------------------------------------------------------- 202 (1 row) @@ -623,7 +623,7 @@ FROM UNION ALL (SELECT user_id FROM events_reference_table) ) b; - count + count --------------------------------------------------------------------- 202 (1 row) @@ -639,7 +639,7 @@ FROM ) b ORDER BY 1 DESC LIMIT 5; - user_id + user_id --------------------------------------------------------------------- 6 6 @@ -659,7 +659,7 @@ FROM ) b ORDER BY 1 DESC, 2 DESC LIMIT 5; - user_id | value_3 + user_id | value_3 --------------------------------------------------------------------- 6 | 5 6 | 5 @@ -679,7 +679,7 @@ FROM ) b ORDER BY 2 DESC, 1 DESC LIMIT 5; - user_id | value_3_sum + user_id | value_3_sum --------------------------------------------------------------------- 4 | 65 4 | 65 @@ -700,7 +700,7 @@ FROM GROUP BY 1 ORDER BY 2 DESC, 1 DESC LIMIT 5; - user_id | sum + user_id | sum --------------------------------------------------------------------- 2 | 119 4 | 111 @@ -728,7 +728,7 @@ FROM ) b ORDER BY 1 DESC, 2 DESC LIMIT 5; - user_id | value_3 + user_id | value_3 --------------------------------------------------------------------- 6 | 5 6 | 5 @@ -757,7 +757,7 @@ FROM GROUP BY user_id ORDER BY 1 DESC LIMIT 5; - max + max --------------------------------------------------------------------- 5 5 @@ -776,7 +776,7 @@ FROM ( ) user_id GROUP BY user_id ORDER BY 1,2; - user_id | sum + user_id | sum --------------------------------------------------------------------- 0 | 31 1 | 76 @@ -801,7 +801,7 @@ FROM ( SELECT 2 * user_id, sum(value_2) AS counter FROM users_table where value_1 < 5 and value_1 < 6 GROUP BY user_id HAVING sum(value_2) > 25 ) user_id GROUP BY user_id ORDER BY 1 DESC LIMIT 5; - sum + sum --------------------------------------------------------------------- 80 76 @@ -833,7 +833,7 @@ UNION GROUP BY user_id) ) as ftop ORDER BY 1,2; - user_id | sum + user_id | sum --------------------------------------------------------------------- 1 | 20 1 | 62 @@ -891,7 +891,7 @@ UNION ) as ftop ORDER BY 2, 1 LIMIT 10; - user_id | sum + user_id | sum --------------------------------------------------------------------- 6 | 43 1 | 62 @@ -923,7 +923,7 @@ UNION ) ftop ORDER BY 2, 1 LIMIT 10; - sum | user_id + sum | user_id --------------------------------------------------------------------- 300 | 1 1200 | 2 @@ -948,7 +948,7 @@ UNION ) ftop ORDER BY 2, 1 LIMIT 10; - value_2 | user_id + value_2 | user_id --------------------------------------------------------------------- 0 | 1 2 | 1 @@ -980,7 +980,7 @@ UNION ALL ) ftop ORDER BY 2, 1 LIMIT 10; - sum | user_id + sum | user_id --------------------------------------------------------------------- 300 | 1 300 | 1 @@ -1003,7 +1003,7 @@ FROM ( ) user_id GROUP BY user_id ORDER BY 1,2; - user_id | sum + user_id | sum --------------------------------------------------------------------- 3 | 101 4 | 91 @@ -1052,7 +1052,7 @@ FROM ( GROUP BY user_id)) AS ftop ORDER BY 1,2; - user_id | sum + user_id | sum --------------------------------------------------------------------- 1 | 20 1 | 62 @@ -1083,7 +1083,7 @@ FROM UNION ALL (SELECT 2 * user_id FROM events_table) ) b; - count + count --------------------------------------------------------------------- 202 (1 row) @@ -1107,7 +1107,7 @@ FROM ) b ORDER BY 1 DESC, 2 DESC LIMIT 5; - user_id | value_3 + user_id | value_3 --------------------------------------------------------------------- 6 | 5 6 | 5 @@ -1125,7 +1125,7 @@ FROM UNION ALL (SELECT users_table.user_id FROM events_table, users_table WHERE events_table.user_id = users_table.user_id) ) b; - count + count --------------------------------------------------------------------- 1850 (1 row) @@ -1139,7 +1139,7 @@ FROM UNION ALL (SELECT 1) ) b; - count + count --------------------------------------------------------------------- 102 (1 row) @@ -1153,7 +1153,7 @@ FROM UNION ALL (SELECT (random() * 100)::int) ) b; - count + count --------------------------------------------------------------------- 102 (1 row) @@ -1177,7 +1177,7 @@ FROM ) b ORDER BY 1 DESC, 2 DESC LIMIT 5; - user_id | value_3 + user_id | value_3 --------------------------------------------------------------------- 6 | 5 6 | 5 @@ -1229,7 +1229,7 @@ FROM ) as final_query GROUP BY types ORDER BY types; - types | sumofeventtype + types | sumofeventtype --------------------------------------------------------------------- 0 | 43 1 | 42 diff --git a/src/test/regress/expected/multi_subquery_window_functions.out b/src/test/regress/expected/multi_subquery_window_functions.out index 100c92133..aabfb9068 100644 --- a/src/test/regress/expected/multi_subquery_window_functions.out +++ b/src/test/regress/expected/multi_subquery_window_functions.out @@ -23,7 +23,7 @@ ORDER BY 3 DESC, 1 DESC, 2 DESC LIMIT 10; - user_id | time | rnk + user_id | time | rnk --------------------------------------------------------------------- 2 | Wed Nov 22 20:16:16.614779 2017 | 24 2 | Wed Nov 22 22:06:12.107108 2017 | 23 @@ -51,7 +51,7 @@ ORDER BY 3 DESC, 1 DESC, 2 DESC LIMIT 10; - user_id | time | rnk + user_id | time | rnk --------------------------------------------------------------------- 2 | Wed Nov 22 20:16:16.614779 2017 | 24 2 | Wed Nov 22 22:06:12.107108 2017 | 23 @@ -79,7 +79,7 @@ ORDER BY 4 DESC, 3 DESC NULLS LAST, 1 DESC, 2 DESC LIMIT 10; - user_id | time | lag_event_type | row_no + user_id | time | lag_event_type | row_no --------------------------------------------------------------------- 2 | Wed Nov 22 20:16:16.614779 2017 | 0 | 24 2 | Wed Nov 22 22:06:12.107108 2017 | 3 | 23 @@ -110,7 +110,7 @@ ORDER BY 2 DESC, 1 DESC, 3 DESC LIMIT 10; - user_id | rnk | avg_val_2 + user_id | rnk | avg_val_2 --------------------------------------------------------------------- 6 | 2 | 2.0000000000000000 5 | 2 | 2.0909090909090909 @@ -140,7 +140,7 @@ ORDER BY 3 DESC NULLS LAST, 1 DESC, 2 DESC LIMIT 10; - min | min | lag_event_type | count + min | min | lag_event_type | count --------------------------------------------------------------------- 1 | Thu Nov 23 11:09:38.074595 2017 | 6 | 1 2 | Wed Nov 22 19:00:10.396739 2017 | 5 | 7 @@ -166,7 +166,7 @@ SELECT * FROM ) as foo ORDER BY 3 DESC, 1 DESC, 2 DESC NULLS LAST LIMIT 10; - user_id | lag | rank + user_id | lag | rank --------------------------------------------------------------------- 2 | 2 | 109 5 | 5 | 105 @@ -195,7 +195,7 @@ SELECT * FROM ) as foo ORDER BY 3 DESC, 1 DESC, 2 DESC NULLS LAST LIMIT 10; - user_id | lag | rank + user_id | lag | rank --------------------------------------------------------------------- 2 | 2 | 73 4 | 4 | 70 @@ -239,7 +239,7 @@ JOIN sub_1.user_id ORDER BY 3 DESC, 4 DESC, 1 DESC, 2 DESC NULLS LAST LIMIT 10; - user_id | max | max | max + user_id | max | max | max --------------------------------------------------------------------- 2 | 2 | 73 | 73 4 | 4 | 70 | 70 @@ -270,7 +270,7 @@ ORDER BY 3 DESC, 1 DESC,2 DESC LIMIT 10; - avg | max | my_rank + avg | max | my_rank --------------------------------------------------------------------- 3.5000000000000000 | Wed Nov 22 00:00:00 2017 | 2 (1 row) @@ -296,7 +296,7 @@ ORDER BY 3 DESC, 1 DESC,2 DESC LIMIT 10; - avg | max | my_rank + avg | max | my_rank --------------------------------------------------------------------- 3.7500000000000000 | Wed Nov 22 00:00:00 2017 | 2 3.3750000000000000 | Thu Nov 23 00:00:00 2017 | 1 @@ -322,7 +322,7 @@ ORDER BY 2 DESC, 1 DESC LIMIT 10; - avg | my_rank + avg | my_rank --------------------------------------------------------------------- 3.5000000000000000 | 1 (1 row) @@ -346,7 +346,7 @@ ORDER BY 1, 2, 3 DESC LIMIT 10; - user_id | time | sum + user_id | time | sum --------------------------------------------------------------------- 1 | Wed Nov 22 00:00:00 2017 | 1 1 | Thu Nov 23 00:00:00 2017 | 7 @@ -374,7 +374,7 @@ ORDER BY 1, 2, 3 LIMIT 20; - user_id | it_name | count + user_id | it_name | count --------------------------------------------------------------------- 2 | User_1 | 2 3 | User_1 | 6 @@ -395,7 +395,7 @@ ORDER BY 2 DESC, 1 LIMIT 10; - user_id | sum + user_id | sum --------------------------------------------------------------------- 3 | 44 5 | 43 @@ -417,7 +417,7 @@ GROUP BY user_id ORDER BY 2 DESC,1 LIMIT 10; - user_id | max + user_id | max --------------------------------------------------------------------- 3 | 15 4 | 13 @@ -440,7 +440,7 @@ ORDER BY 2 DESC, 1 LIMIT 10; - user_id | rank + user_id | rank --------------------------------------------------------------------- 5 | 6 2 | 5 @@ -462,7 +462,7 @@ ORDER BY 2 DESC, 1 LIMIT 10; - user_id | rank + user_id | rank --------------------------------------------------------------------- 5 | 6 2 | 5 @@ -496,7 +496,7 @@ FROM GROUP BY user_id ORDER BY 1 DESC LIMIT 5; - max + max --------------------------------------------------------------------- 5 3.5 @@ -538,7 +538,7 @@ FROM ( user_id)) AS ftop ORDER BY 2 DESC, 1 DESC LIMIT 5; - user_id | sum + user_id | sum --------------------------------------------------------------------- 2 | 107 3 | 101 @@ -568,7 +568,7 @@ ORDER BY user_id DESC LIMIT 3; - user_id + user_id --------------------------------------------------------------------- 4 3 @@ -592,7 +592,7 @@ GROUP BY ORDER BY difference DESC, rank DESC, user_id LIMIT 20; - user_id | rank | difference | distinct_users + user_id | rank | difference | distinct_users --------------------------------------------------------------------- 4 | 12 | 306 | 9 5 | 12 | 136 | 8 @@ -641,7 +641,7 @@ WHERE ORDER BY abs DESC, user_id LIMIT 10; - user_id | abs + user_id | abs --------------------------------------------------------------------- 6 | 2 1 | 1 @@ -666,7 +666,7 @@ ORDER BY 1 DESC LIMIT 5; - user_id | count + user_id | count --------------------------------------------------------------------- 6 | 1 5 | 1 @@ -709,7 +709,7 @@ EXPLAIN (COSTS FALSE, VERBOSE TRUE) user_id)) AS ftop ORDER BY 2 DESC, 1 DESC LIMIT 5; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit Output: remote_scan.user_id, remote_scan.sum diff --git a/src/test/regress/expected/multi_subtransactions.out b/src/test/regress/expected/multi_subtransactions.out index ce867aaac..aaa8f2ef5 100644 --- a/src/test/regress/expected/multi_subtransactions.out +++ b/src/test/regress/expected/multi_subtransactions.out @@ -3,9 +3,9 @@ CREATE TABLE artists ( name text NOT NULL ); SELECT create_distributed_table('artists', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- add some data @@ -21,7 +21,7 @@ DELETE FROM artists WHERE id=5; RELEASE SAVEPOINT s1; COMMIT; SELECT * FROM artists WHERE id=5; - id | name + id | name --------------------------------------------------------------------- (0 rows) @@ -33,7 +33,7 @@ DELETE FROM artists WHERE id=5; ROLLBACK TO SAVEPOINT s1; COMMIT; SELECT * FROM artists WHERE id=5; - id | name + id | name --------------------------------------------------------------------- 5 | Asher Lev (1 row) @@ -48,7 +48,7 @@ INSERT INTO artists VALUES (5, 'Jacob Kahn'); RELEASE SAVEPOINT s2; COMMIT; SELECT * FROM artists WHERE id=5; - id | name + id | name --------------------------------------------------------------------- 5 | Jacob Kahn (1 row) @@ -63,7 +63,7 @@ DELETE FROM artists WHERE id=5; ROLLBACK TO SAVEPOINT s2; COMMIT; SELECT * FROM artists WHERE id=5; - id | name + id | name --------------------------------------------------------------------- 5 | Jacob Kahn (1 row) @@ -81,7 +81,7 @@ ROLLBACK TO SAVEPOINT s0; INSERT INTO artists VALUES (6, 'Emily Carr'); COMMIT; SELECT * FROM artists WHERE id=6; - id | name + id | name --------------------------------------------------------------------- 6 | Emily Carr (1 row) @@ -97,7 +97,7 @@ ROLLBACK TO s2; RELEASE SAVEPOINT s2; COMMIT; SELECT * FROM artists WHERE id=7; - id | name + id | name --------------------------------------------------------------------- (0 rows) @@ -131,7 +131,7 @@ ROLLBACK TO s1; INSERT INTO artists VALUES (9, 'Mohsen Namjoo'); COMMIT; SELECT * FROM artists WHERE id IN (7, 8, 9) ORDER BY id; - id | name + id | name --------------------------------------------------------------------- 8 | Sogand 9 | Mohsen Namjoo @@ -148,7 +148,7 @@ ROLLBACK TO s1; INSERT INTO artists VALUES (10, 'Mahmoud Farshchian'); COMMIT; SELECT * FROM artists WHERE id IN (9, 10) ORDER BY id; - id | name + id | name --------------------------------------------------------------------- 10 | Mahmoud Farshchian (1 row) @@ -164,7 +164,7 @@ ROLLBACK TO s1; INSERT INTO artists VALUES (11, 'Egon Schiele'); COMMIT; SELECT * FROM artists WHERE id IN (10, 11) ORDER BY id; - id | name + id | name --------------------------------------------------------------------- 11 | Egon Schiele (1 row) @@ -180,7 +180,7 @@ ROLLBACK TO s1; INSERT INTO artists VALUES (12, 'Marc Chagall'); COMMIT; SELECT * FROM artists WHERE id IN (11, 12) ORDER BY id; - id | name + id | name --------------------------------------------------------------------- 12 | Marc Chagall (1 row) @@ -191,9 +191,9 @@ create table t2(a int, b int CHECK(b > 0)); ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1190000; select create_distributed_table('t1', 'a'), create_distributed_table('t2', 'a'); - create_distributed_table | create_distributed_table + create_distributed_table | create_distributed_table --------------------------------------------------------------------- - | + | (1 row) begin; @@ -236,7 +236,7 @@ with r AS ( ) insert into t1 select * from r; commit; select * from t2 order by a, b; - a | b + a | b --------------------------------------------------------------------- 1 | 4 2 | 5 @@ -244,7 +244,7 @@ select * from t2 order by a, b; (3 rows) select * from t1 order by a, b; - a | b + a | b --------------------------------------------------------------------- 1 | 3 1 | 4 @@ -265,9 +265,9 @@ CREATE TABLE researchers ( ); SET citus.shard_count TO 2; SELECT create_distributed_table('researchers', 'lab_id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- Basic rollback and release @@ -279,7 +279,7 @@ ROLLBACK TO s1; RELEASE SAVEPOINT s1; COMMIT; SELECT * FROM researchers WHERE id in (7, 8); - id | lab_id | name + id | lab_id | name --------------------------------------------------------------------- 7 | 4 | Jan Plaza (1 row) @@ -294,7 +294,7 @@ ROLLBACK TO SAVEPOINT s1; INSERT INTO researchers VALUES (12, 10, 'Stephen Kleene'); COMMIT; SELECT * FROM researchers WHERE lab_id=10; - id | lab_id | name + id | lab_id | name --------------------------------------------------------------------- 12 | 10 | Stephen Kleene (1 row) @@ -310,7 +310,7 @@ SAVEPOINT s2; ERROR: current transaction is aborted, commands ignored until end of transaction block ROLLBACK; SELECT * FROM researchers WHERE lab_id=10; - id | lab_id | name + id | lab_id | name --------------------------------------------------------------------- 12 | 10 | Stephen Kleene (1 row) @@ -326,7 +326,7 @@ SAVEPOINT s2; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; SELECT * FROM researchers WHERE lab_id=10; - id | lab_id | name + id | lab_id | name --------------------------------------------------------------------- 12 | 10 | Stephen Kleene (1 row) @@ -344,7 +344,7 @@ END $$; NOTICE: caught not_null_violation COMMIT; SELECT * FROM researchers WHERE lab_id=10; - id | lab_id | name + id | lab_id | name --------------------------------------------------------------------- 12 | 10 | Stephen Kleene (1 row) @@ -361,7 +361,7 @@ END $$; NOTICE: caught manual plpgsql_error COMMIT; SELECT * FROM researchers WHERE lab_id=10; - id | lab_id | name + id | lab_id | name --------------------------------------------------------------------- 12 | 10 | Stephen Kleene (1 row) @@ -378,7 +378,7 @@ END $$; ERROR: not_null_violation COMMIT; SELECT * FROM researchers WHERE lab_id=10; - id | lab_id | name + id | lab_id | name --------------------------------------------------------------------- 12 | 10 | Stephen Kleene (1 row) @@ -395,7 +395,7 @@ EXCEPTION END $$; COMMIT; SELECT * FROM researchers WHERE lab_id=10; - id | lab_id | name + id | lab_id | name --------------------------------------------------------------------- 12 | 10 | Stephen Kleene 32 | 10 | Raymond Smullyan diff --git a/src/test/regress/expected/multi_table_ddl.out b/src/test/regress/expected/multi_table_ddl.out index ec7f9487a..826b7aad1 100644 --- a/src/test/regress/expected/multi_table_ddl.out +++ b/src/test/regress/expected/multi_table_ddl.out @@ -5,9 +5,9 @@ SET citus.next_shard_id TO 870000; CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); SELECT create_distributed_table('testtableddl', 'distributecol', 'append'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- verify that the citus extension can't be dropped while distributed tables exist @@ -30,9 +30,9 @@ COMMIT; -- recreate testtableddl CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); SELECT create_distributed_table('testtableddl', 'distributecol', 'append'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- verify that the table can be dropped @@ -42,13 +42,13 @@ CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); -- create table and do create empty shard test here, too SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('testtableddl', 'distributecol', 'append'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT 1 FROM master_create_empty_shard('testtableddl'); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) @@ -58,17 +58,17 @@ DROP TABLE testtableddl; RESET citus.shard_replication_factor; -- ensure no metadata of distributed tables are remaining SELECT * FROM pg_dist_partition; - logicalrelid | partmethod | partkey | colocationid | repmodel + logicalrelid | partmethod | partkey | colocationid | repmodel --------------------------------------------------------------------- (0 rows) SELECT * FROM pg_dist_shard; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue --------------------------------------------------------------------- (0 rows) SELECT * FROM pg_dist_shard_placement; - shardid | shardstate | shardlength | nodename | nodeport | placementid + shardid | shardstate | shardlength | nodename | nodeport | placementid --------------------------------------------------------------------- (0 rows) @@ -77,13 +77,13 @@ DROP EXTENSION citus; CREATE EXTENSION citus; -- re-add the nodes to the cluster SELECT 1 FROM master_add_node('localhost', :worker_1_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) @@ -93,9 +93,9 @@ CREATE TABLE testserialtable(id serial, group_id integer); SET citus.shard_count TO 2; SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('testserialtable', 'group_id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- should not be able to add additional serial columns @@ -122,7 +122,7 @@ DROP TABLE testserialtable; \c - - - :worker_1_port \ds List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/multi_task_assignment_policy.out b/src/test/regress/expected/multi_task_assignment_policy.out index 7c6f27218..67f29166d 100644 --- a/src/test/regress/expected/multi_task_assignment_policy.out +++ b/src/test/regress/expected/multi_task_assignment_policy.out @@ -29,9 +29,9 @@ SET citus.explain_distributed_queries TO off; -- and check that tasks are assigned to worker nodes as expected. CREATE TABLE task_assignment_test_table (test_id integer); SELECT create_distributed_table('task_assignment_test_table', 'test_id', 'append'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- Create logical shards with shardids 200, 201, and 202 @@ -73,7 +73,7 @@ DEBUG: Router planner does not support append-partitioned tables. DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) @@ -85,7 +85,7 @@ DEBUG: Router planner does not support append-partitioned tables. DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) @@ -99,7 +99,7 @@ DEBUG: Router planner does not support append-partitioned tables. DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) @@ -111,7 +111,7 @@ DEBUG: Router planner does not support append-partitioned tables. DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) @@ -121,9 +121,9 @@ DEBUG: assigned task to node localhost:xxxxx COMMIT; CREATE TABLE task_assignment_reference_table (test_id integer); SELECT create_reference_table('task_assignment_reference_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -135,7 +135,7 @@ EXPLAIN (COSTS FALSE) SELECT * FROM task_assignment_reference_table; DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Adaptive) explain statements for distributed queries are not enabled @@ -145,7 +145,7 @@ EXPLAIN (COSTS FALSE) SELECT * FROM task_assignment_reference_table; DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Adaptive) explain statements for distributed queries are not enabled @@ -156,7 +156,7 @@ EXPLAIN (COSTS FALSE) SELECT * FROM task_assignment_reference_table; DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Adaptive) explain statements for distributed queries are not enabled @@ -166,7 +166,7 @@ EXPLAIN (COSTS FALSE) SELECT * FROM task_assignment_reference_table; DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: Plan is router executable - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Adaptive) explain statements for distributed queries are not enabled @@ -191,7 +191,7 @@ INSERT INTO explain_outputs SELECT parse_explain_output('EXPLAIN SELECT count(*) FROM task_assignment_reference_table;', 'task_assignment_reference_table'); -- given that we're in the same transaction, the count should be 1 SELECT count(DISTINCT value) FROM explain_outputs; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -210,7 +210,7 @@ INSERT INTO explain_outputs -- given that we're in the same transaction, the count should be 2 -- since there are two different worker nodes SELECT count(DISTINCT value) FROM explain_outputs; - count + count --------------------------------------------------------------------- 2 (1 row) @@ -223,9 +223,9 @@ TRUNCATE explain_outputs; SET citus.shard_replication_factor TO 2; CREATE TABLE task_assignment_replicated_hash (test_id integer); SELECT create_distributed_table('task_assignment_replicated_hash', 'test_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -237,7 +237,7 @@ INSERT INTO explain_outputs SELECT parse_explain_output('EXPLAIN SELECT count(*) FROM task_assignment_replicated_hash;', 'task_assignment_replicated_hash'); -- given that we're in the same transaction, the count should be 1 SELECT count(DISTINCT value) FROM explain_outputs; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -256,7 +256,7 @@ INSERT INTO explain_outputs -- given that we're in the same transaction, the count should be 2 -- since there are two different worker nodes SELECT count(DISTINCT value) FROM explain_outputs; - count + count --------------------------------------------------------------------- 2 (1 row) @@ -267,9 +267,9 @@ TRUNCATE explain_outputs; SET citus.shard_replication_factor TO 1; CREATE TABLE task_assignment_nonreplicated_hash (test_id integer, ref_id integer); SELECT create_distributed_table('task_assignment_nonreplicated_hash', 'test_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- run the query two times to make sure that it hits the correct worker every time @@ -289,7 +289,7 @@ FROM (SELECT * FROM task_assignment_nonreplicated_hash WHERE test_id = 3) AS dis $cmd$, 'task_assignment_nonreplicated_hash'); -- The count should be 1 since the shard exists in only one worker node SELECT count(DISTINCT value) FROM explain_outputs; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -299,9 +299,9 @@ TRUNCATE explain_outputs; -- only contains intermediate results CREATE TABLE task_assignment_test_table_2 (test_id integer); SELECT create_distributed_table('task_assignment_test_table_2', 'test_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SET citus.task_assignment_policy TO 'round-robin'; @@ -318,7 +318,7 @@ $cmd$, 'task_assignment_test_table_2'); -- The count should be 2 since the intermediate results are processed on -- different workers SELECT count(DISTINCT value) FROM explain_outputs; - count + count --------------------------------------------------------------------- 2 (1 row) diff --git a/src/test/regress/expected/multi_task_string_size.out b/src/test/regress/expected/multi_task_string_size.out index 97eea8e57..7e4a3c188 100644 --- a/src/test/regress/expected/multi_task_string_size.out +++ b/src/test/regress/expected/multi_task_string_size.out @@ -206,14 +206,14 @@ CREATE TABLE wide_table long_column_200 int ); SELECT create_distributed_table('wide_table', 'long_column_001'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SET citus.task_executor_type TO 'task-tracker'; SHOW citus.max_task_string_size; - citus.max_task_string_size + citus.max_task_string_size --------------------------------------------------------------------- 12288 (1 row) @@ -232,7 +232,7 @@ ERROR: Task failed to execute CONTEXT: PL/pgSQL function raise_failed_execution(text) line 6 at RAISE -- following will succeed since it fetches few columns SELECT u.long_column_001, u.long_column_002, u.long_column_003 FROM wide_table u JOIN wide_table v ON (u.long_column_002 = v.long_column_003); - long_column_001 | long_column_002 | long_column_003 + long_column_001 | long_column_002 | long_column_003 --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/multi_test_catalog_views.out b/src/test/regress/expected/multi_test_catalog_views.out index dbf97c81f..f99b3d7d7 100644 --- a/src/test/regress/expected/multi_test_catalog_views.out +++ b/src/test/regress/expected/multi_test_catalog_views.out @@ -97,8 +97,8 @@ ORDER BY a.attrelid, a.attnum; $desc_views$ ); - run_command_on_master_and_workers + run_command_on_master_and_workers --------------------------------------------------------------------- - + (1 row) diff --git a/src/test/regress/expected/multi_test_helpers.out b/src/test/regress/expected/multi_test_helpers.out index 8924f58fe..850f143a1 100644 --- a/src/test/regress/expected/multi_test_helpers.out +++ b/src/test/regress/expected/multi_test_helpers.out @@ -88,7 +88,7 @@ CREATE OR REPLACE FUNCTION wait_until_metadata_sync(timeout INTEGER DEFAULT 1500 ALTER SYSTEM SET citus.metadata_sync_interval TO 3000; ALTER SYSTEM SET citus.metadata_sync_retry_interval TO 500; SELECT pg_reload_conf(); - pg_reload_conf + pg_reload_conf --------------------------------------------------------------------- t (1 row) diff --git a/src/test/regress/expected/multi_tpch_query1.out b/src/test/regress/expected/multi_tpch_query1.out index 1debbf26d..c5dba76df 100644 --- a/src/test/regress/expected/multi_tpch_query1.out +++ b/src/test/regress/expected/multi_tpch_query1.out @@ -23,7 +23,7 @@ GROUP BY ORDER BY l_returnflag, l_linestatus; - l_returnflag | l_linestatus | sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order + l_returnflag | l_linestatus | sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order --------------------------------------------------------------------- A | F | 75465.00 | 113619873.63 | 107841287.0728 | 112171153.245923 | 25.6334918478260870 | 38593.707075407609 | 0.05055027173913043478 | 2944 N | F | 2022.00 | 3102551.45 | 2952540.7118 | 3072642.770652 | 26.6052631578947368 | 40823.045394736842 | 0.05263157894736842105 | 76 diff --git a/src/test/regress/expected/multi_tpch_query10.out b/src/test/regress/expected/multi_tpch_query10.out index 102a54036..e5905b64b 100644 --- a/src/test/regress/expected/multi_tpch_query10.out +++ b/src/test/regress/expected/multi_tpch_query10.out @@ -34,18 +34,18 @@ GROUP BY ORDER BY revenue DESC LIMIT 20; - c_custkey | c_name | revenue | c_acctbal | n_name | c_address | c_phone | c_comment + c_custkey | c_name | revenue | c_acctbal | n_name | c_address | c_phone | c_comment --------------------------------------------------------------------- 436 | Customer#000000436 | 255187.7382 | 5896.87 | ROMANIA | 4DCNzAT842cVYTcaUS94kR0QXHSRM5oco0D6Z | 29-927-687-6390 | olites engage carefully. slyly ironic asymptotes about the ironi - 640 | Customer#000000640 | 251941.1430 | 3025.84 | BRAZIL | j3vjr0 n,pJFG4gIOtC | 12-702-315-6637 | lly. furiously quick deposits haggle quickly regular packages. pinto - 361 | Customer#000000361 | 239204.0858 | 7451.84 | SAUDI ARABIA | l0F8jMJVe63cb | 30-164-267-4590 | fully busy ideas. regular foxes cajole + 640 | Customer#000000640 | 251941.1430 | 3025.84 | BRAZIL | j3vjr0 n,pJFG4gIOtC | 12-702-315-6637 | lly. furiously quick deposits haggle quickly regular packages. pinto + 361 | Customer#000000361 | 239204.0858 | 7451.84 | SAUDI ARABIA | l0F8jMJVe63cb | 30-164-267-4590 | fully busy ideas. regular foxes cajole 223 | Customer#000000223 | 218652.8040 | 7476.20 | SAUDI ARABIA | ftau6Pk,brboMyEl,,kFm | 30-193-643-1517 | al, regular requests run furiously blithely silent packages. blithely ironic accounts across the furious 613 | Customer#000000613 | 186092.2017 | 6679.75 | EGYPT | AJT,26RbanTdEHOBgTWg | 14-275-416-1669 | ironic, pending deposits: quickl 355 | Customer#000000355 | 168184.4825 | 8727.90 | KENYA | 205r3Xg9ZWjPZNX1z | 24-656-787-6091 | ly bold requests detect furiously. unusual instructions sleep aft 872 | Customer#000000872 | 166831.7294 | -858.61 | PERU | vLP7iNZBK4B,HANFTKabVI3AO Y9O8H | 27-357-139-7164 | detect. packages wake slyly express foxes. even deposits ru 805 | Customer#000000805 | 165239.8440 | 511.69 | IRAN | wCKx5zcHvwpSffyc9qfi9dvqcm9LT,cLAG | 20-732-989-5653 | busy sentiments. pending packages haggle among the express requests-- slyly regular excuses above the slyl 427 | Customer#000000427 | 148033.5226 | 4376.80 | BRAZIL | LHzXa71U2AGqfbqj1yYYqw2MEXq99dWmY | 12-124-309-3821 | y even multipliers according to the regu - 581 | Customer#000000581 | 146369.1712 | 3242.10 | UNITED STATES | s9SoN9XeVuCri | 34-415-978-2518 | ns. quickly regular pinto beans must sleep fluffily + 581 | Customer#000000581 | 146369.1712 | 3242.10 | UNITED STATES | s9SoN9XeVuCri | 34-415-978-2518 | ns. quickly regular pinto beans must sleep fluffily 679 | Customer#000000679 | 145188.0664 | 1394.44 | IRAN | IJf1FlZL9I9m,rvofcoKy5pRUOjUQV | 20-146-696-9508 | ely pending frays boost carefully 160 | Customer#000000160 | 138511.7370 | 4363.17 | JORDAN | 5soVQ3dOCRBWBS | 23-428-666-4806 | olites. silently ironic accounts cajole furious 883 | Customer#000000883 | 128224.1349 | 479.96 | CANADA | qVQ8rWNU5KZYDcS | 13-526-239-6950 | uctions are carefully across the regular, regular asymptote diff --git a/src/test/regress/expected/multi_tpch_query12.out b/src/test/regress/expected/multi_tpch_query12.out index 608901a60..664609088 100644 --- a/src/test/regress/expected/multi_tpch_query12.out +++ b/src/test/regress/expected/multi_tpch_query12.out @@ -30,7 +30,7 @@ GROUP BY l_shipmode ORDER BY l_shipmode; - l_shipmode | high_line_count | low_line_count + l_shipmode | high_line_count | low_line_count --------------------------------------------------------------------- MAIL | 11 | 15 SHIP | 11 | 19 diff --git a/src/test/regress/expected/multi_tpch_query14.out b/src/test/regress/expected/multi_tpch_query14.out index f3dba6d63..14a8358d7 100644 --- a/src/test/regress/expected/multi_tpch_query14.out +++ b/src/test/regress/expected/multi_tpch_query14.out @@ -15,7 +15,7 @@ WHERE l_partkey = p_partkey AND l_shipdate >= date '1995-09-01' AND l_shipdate < date '1995-09-01' + interval '1' year; - promo_revenue + promo_revenue --------------------------------------------------------------------- 32.1126387112005225 (1 row) diff --git a/src/test/regress/expected/multi_tpch_query19.out b/src/test/regress/expected/multi_tpch_query19.out index 94a26cb17..c00a5a825 100644 --- a/src/test/regress/expected/multi_tpch_query19.out +++ b/src/test/regress/expected/multi_tpch_query19.out @@ -32,7 +32,7 @@ WHERE AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ); - revenue + revenue --------------------------------------------------------------------- 144747.0857 (1 row) diff --git a/src/test/regress/expected/multi_tpch_query3.out b/src/test/regress/expected/multi_tpch_query3.out index d09962a32..f136b7227 100644 --- a/src/test/regress/expected/multi_tpch_query3.out +++ b/src/test/regress/expected/multi_tpch_query3.out @@ -24,7 +24,7 @@ GROUP BY ORDER BY revenue DESC, o_orderdate; - l_orderkey | revenue | o_orderdate | o_shippriority + l_orderkey | revenue | o_orderdate | o_shippriority --------------------------------------------------------------------- 1637 | 268170.6408 | 02-08-1995 | 0 9696 | 252014.5497 | 02-20-1995 | 0 diff --git a/src/test/regress/expected/multi_tpch_query6.out b/src/test/regress/expected/multi_tpch_query6.out index 40d2b2a4b..a0da69eda 100644 --- a/src/test/regress/expected/multi_tpch_query6.out +++ b/src/test/regress/expected/multi_tpch_query6.out @@ -11,7 +11,7 @@ WHERE and l_shipdate < date '1994-01-01' + interval '1 year' and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24; - revenue + revenue --------------------------------------------------------------------- 243277.7858 (1 row) diff --git a/src/test/regress/expected/multi_tpch_query7.out b/src/test/regress/expected/multi_tpch_query7.out index 816202e81..dfb057d4f 100644 --- a/src/test/regress/expected/multi_tpch_query7.out +++ b/src/test/regress/expected/multi_tpch_query7.out @@ -41,7 +41,7 @@ ORDER BY supp_nation, cust_nation, l_year; - supp_nation | cust_nation | l_year | revenue + supp_nation | cust_nation | l_year | revenue --------------------------------------------------------------------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) diff --git a/src/test/regress/expected/multi_tpch_query7_nested.out b/src/test/regress/expected/multi_tpch_query7_nested.out index 4184d2aa3..73ff0bf73 100644 --- a/src/test/regress/expected/multi_tpch_query7_nested.out +++ b/src/test/regress/expected/multi_tpch_query7_nested.out @@ -20,18 +20,18 @@ FROM orders, customer, ( - SELECT + SELECT n1.n_nationkey AS supp_nation_key, n2.n_nationkey AS cust_nation_key, n1.n_name AS supp_nation, n2.n_name AS cust_nation - FROM + FROM nation n1, nation n2 - WHERE + WHERE ( (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY') - OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE') + OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE') ) ) AS temp WHERE @@ -50,7 +50,7 @@ ORDER BY supp_nation, cust_nation, l_year; - supp_nation | cust_nation | l_year | revenue + supp_nation | cust_nation | l_year | revenue --------------------------------------------------------------------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) diff --git a/src/test/regress/expected/multi_transaction_recovery.out b/src/test/regress/expected/multi_transaction_recovery.out index 8d2ed2d9f..7c5cef8e4 100644 --- a/src/test/regress/expected/multi_transaction_recovery.out +++ b/src/test/regress/expected/multi_transaction_recovery.out @@ -5,7 +5,7 @@ SET citus.next_shard_id TO 1220000; -- properly. SET client_min_messages TO ERROR; SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) @@ -17,14 +17,14 @@ SET citus.force_max_query_parallelization TO ON; -- Disable auto-recovery for the initial tests ALTER SYSTEM SET citus.recover_2pc_interval TO -1; SELECT pg_reload_conf(); - pg_reload_conf + pg_reload_conf --------------------------------------------------------------------- t (1 row) -- Ensure pg_dist_transaction is empty SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) @@ -57,25 +57,25 @@ INSERT INTO pg_dist_transaction VALUES (1, 'citus_0_should_commit'), INSERT INTO pg_dist_transaction VALUES (1, 'citus_0_should_be_forgotten'), (0, 'citus_0_should_be_forgotten'); SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 6 (1 row) SELECT count(*) FROM pg_dist_transaction; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM pg_tables WHERE tablename = 'should_abort'; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM pg_tables WHERE tablename = 'should_commit'; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -83,13 +83,13 @@ SELECT count(*) FROM pg_tables WHERE tablename = 'should_commit'; -- Confirm that transactions were correctly rolled forward \c - - - :worker_1_port SELECT count(*) FROM pg_tables WHERE tablename = 'should_abort'; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM pg_tables WHERE tablename = 'should_commit'; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -105,13 +105,13 @@ SET citus.multi_shard_commit_protocol TO '2pc'; -- there are at least 2 entries CREATE TABLE test_recovery (x text); SELECT create_distributed_table('test_recovery', 'x'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT count(*) >= 2 FROM pg_dist_transaction; - ?column? + ?column? --------------------------------------------------------------------- t (1 row) @@ -119,19 +119,19 @@ SELECT count(*) >= 2 FROM pg_dist_transaction; -- create_reference_table should add another 2 recovery records CREATE TABLE test_recovery_ref (x text); SELECT create_reference_table('test_recovery_ref'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT count(*) >= 4 FROM pg_dist_transaction; - ?column? + ?column? --------------------------------------------------------------------- t (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) @@ -139,7 +139,7 @@ SELECT recover_prepared_transactions(); -- plain INSERT does not use 2PC INSERT INTO test_recovery VALUES ('hello'); SELECT count(*) FROM pg_dist_transaction; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -149,7 +149,7 @@ BEGIN; ALTER TABLE test_recovery ADD COLUMN y text; ROLLBACK; SELECT count(*) FROM pg_dist_transaction; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -157,19 +157,19 @@ SELECT count(*) FROM pg_dist_transaction; -- Committed DDL commands should write 4 transaction recovery records ALTER TABLE test_recovery ADD COLUMN y text; SELECT count(*) FROM pg_dist_transaction; - count + count --------------------------------------------------------------------- 4 (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) SELECT count(*) FROM pg_dist_transaction; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -179,7 +179,7 @@ BEGIN; INSERT INTO test_recovery SELECT x, 'earth' FROM test_recovery; ROLLBACK; SELECT count(*) FROM pg_dist_transaction; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -187,13 +187,13 @@ SELECT count(*) FROM pg_dist_transaction; -- Committed INSERT..SELECT should write 4 transaction recovery records INSERT INTO test_recovery SELECT x, 'earth' FROM test_recovery; SELECT count(*) FROM pg_dist_transaction; - count + count --------------------------------------------------------------------- 4 (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) @@ -201,13 +201,13 @@ SELECT recover_prepared_transactions(); -- Committed INSERT..SELECT via coordinator should write 4 transaction recovery records INSERT INTO test_recovery (x) SELECT 'hello-'||s FROM generate_series(1,100) s; SELECT count(*) FROM pg_dist_transaction; - count + count --------------------------------------------------------------------- 4 (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) @@ -215,13 +215,13 @@ SELECT recover_prepared_transactions(); -- Committed COPY should write 4 transaction records COPY test_recovery (x) FROM STDIN CSV; SELECT count(*) FROM pg_dist_transaction; - count + count --------------------------------------------------------------------- 4 (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) @@ -232,9 +232,9 @@ CREATE TABLE test_recovery_single (LIKE test_recovery); -- creating distributed table should write 2 transaction recovery records -- one connection/transaction per node SELECT create_distributed_table('test_recovery_single', 'x'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- Multi-statement transactions should write 2 transaction recovery records @@ -245,13 +245,13 @@ INSERT INTO test_recovery_single VALUES ('hello-0'); INSERT INTO test_recovery_single VALUES ('hello-2'); COMMIT; SELECT count(*) FROM pg_dist_transaction; - count + count --------------------------------------------------------------------- 4 (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) @@ -264,13 +264,13 @@ INSERT INTO test_recovery_single VALUES ('hello-0'); INSERT INTO test_recovery_single VALUES ('hello-2'); COMMIT; SELECT count(*) FROM pg_dist_transaction; - count + count --------------------------------------------------------------------- 2 (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) @@ -280,13 +280,13 @@ SELECT recover_prepared_transactions(); SET citus.force_max_query_parallelization TO OFF; BEGIN; SELECT count(*) FROM test_recovery_single WHERE x = 'hello-0'; - count + count --------------------------------------------------------------------- 2 (1 row) SELECT count(*) FROM test_recovery_single WHERE x = 'hello-2'; - count + count --------------------------------------------------------------------- 2 (1 row) @@ -295,13 +295,13 @@ INSERT INTO test_recovery_single VALUES ('hello-0'); INSERT INTO test_recovery_single VALUES ('hello-2'); COMMIT; SELECT count(*) FROM pg_dist_transaction; - count + count --------------------------------------------------------------------- 2 (1 row) SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) @@ -311,13 +311,13 @@ SELECT recover_prepared_transactions(); SET citus.force_max_query_parallelization TO ON; BEGIN; SELECT count(*) FROM test_recovery_single WHERE x = 'hello-0'; - count + count --------------------------------------------------------------------- 3 (1 row) SELECT count(*) FROM test_recovery_single WHERE x = 'hello-2'; - count + count --------------------------------------------------------------------- 3 (1 row) @@ -326,7 +326,7 @@ INSERT INTO test_recovery_single VALUES ('hello-0'); INSERT INTO test_recovery_single VALUES ('hello-2'); COMMIT; SELECT count(*) FROM pg_dist_transaction; - count + count --------------------------------------------------------------------- 2 (1 row) @@ -334,27 +334,27 @@ SELECT count(*) FROM pg_dist_transaction; -- Test whether auto-recovery runs ALTER SYSTEM SET citus.recover_2pc_interval TO 10; SELECT pg_reload_conf(); - pg_reload_conf + pg_reload_conf --------------------------------------------------------------------- t (1 row) -- Sleep 1 second to give Valgrind enough time to clear transactions SELECT pg_sleep(1); - pg_sleep + pg_sleep --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_dist_transaction; - count + count --------------------------------------------------------------------- 0 (1 row) ALTER SYSTEM RESET citus.recover_2pc_interval; SELECT pg_reload_conf(); - pg_reload_conf + pg_reload_conf --------------------------------------------------------------------- t (1 row) @@ -363,7 +363,7 @@ DROP TABLE test_recovery_ref; DROP TABLE test_recovery; DROP TABLE test_recovery_single; SELECT 1 FROM master_remove_node('localhost', :master_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) diff --git a/src/test/regress/expected/multi_transactional_drop_shards.out b/src/test/regress/expected/multi_transactional_drop_shards.out index 6b6f8e62c..867e434c0 100644 --- a/src/test/regress/expected/multi_transactional_drop_shards.out +++ b/src/test/regress/expected/multi_transactional_drop_shards.out @@ -7,9 +7,9 @@ SET citus.shard_count TO 4; -- test DROP TABLE(ergo master_drop_all_shards) in transaction, then ROLLBACK CREATE TABLE transactional_drop_shards(column1 int); SELECT create_distributed_table('transactional_drop_shards', 'column1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -17,7 +17,7 @@ DROP TABLE transactional_drop_shards; ROLLBACK; -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid; - shardid + shardid --------------------------------------------------------------------- 1410000 1410001 @@ -33,7 +33,7 @@ WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; - shardid | shardstate | nodename | nodeport + shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- 1410000 | 1 | localhost | 57637 1410000 | 1 | localhost | 57638 @@ -48,7 +48,7 @@ ORDER BY -- verify table is not dropped \dt transactional_drop_shards List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- public | transactional_drop_shards | table | postgres (1 row) @@ -57,7 +57,7 @@ ORDER BY \c - - - :worker_1_port \dt transactional_drop_shards_* List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- public | transactional_drop_shards_1410000 | table | postgres public | transactional_drop_shards_1410001 | table | postgres @@ -72,7 +72,7 @@ DROP TABLE transactional_drop_shards; COMMIT; -- verify metadata is deleted SELECT shardid FROM pg_dist_shard WHERE shardid IN (1410000, 1410001, 1410002, 1410003) ORDER BY shardid; - shardid + shardid --------------------------------------------------------------------- (0 rows) @@ -84,14 +84,14 @@ WHERE shardid IN (1410000, 1410001, 1410002, 1410003) ORDER BY shardid, nodename, nodeport; - shardid | shardstate | nodename | nodeport + shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- (0 rows) -- verify table is dropped \dt transactional_drop_shards List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- (0 rows) @@ -99,7 +99,7 @@ ORDER BY \c - - - :worker_1_port \dt transactional_drop_shards_* List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- (0 rows) @@ -107,20 +107,20 @@ ORDER BY -- test master_delete_protocol in transaction, then ROLLBACK CREATE TABLE transactional_drop_shards(column1 int); SELECT create_distributed_table('transactional_drop_shards', 'column1', 'append'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_create_empty_shard('transactional_drop_shards'); - master_create_empty_shard + master_create_empty_shard --------------------------------------------------------------------- 1410004 (1 row) BEGIN; SELECT master_apply_delete_command('DELETE FROM transactional_drop_shards'); - master_apply_delete_command + master_apply_delete_command --------------------------------------------------------------------- 1 (1 row) @@ -128,7 +128,7 @@ SELECT master_apply_delete_command('DELETE FROM transactional_drop_shards'); ROLLBACK; -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid; - shardid + shardid --------------------------------------------------------------------- 1410004 (1 row) @@ -141,7 +141,7 @@ WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; - shardid | shardstate | nodename | nodeport + shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- 1410004 | 1 | localhost | 57637 1410004 | 1 | localhost | 57638 @@ -151,7 +151,7 @@ ORDER BY \c - - - :worker_1_port \dt transactional_drop_shards_* List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- public | transactional_drop_shards_1410004 | table | postgres (1 row) @@ -160,7 +160,7 @@ ORDER BY -- test master_delete_protocol in transaction, then COMMIT BEGIN; SELECT master_apply_delete_command('DELETE FROM transactional_drop_shards'); - master_apply_delete_command + master_apply_delete_command --------------------------------------------------------------------- 1 (1 row) @@ -168,7 +168,7 @@ SELECT master_apply_delete_command('DELETE FROM transactional_drop_shards'); COMMIT; -- verify metadata is deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid; - shardid + shardid --------------------------------------------------------------------- (0 rows) @@ -180,7 +180,7 @@ WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; - shardid | shardstate | nodename | nodeport + shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- (0 rows) @@ -188,14 +188,14 @@ ORDER BY \c - - - :worker_1_port \dt transactional_drop_shards_* List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- (0 rows) \c - - - :master_port -- test DROP table in a transaction after insertion SELECT master_create_empty_shard('transactional_drop_shards'); - master_create_empty_shard + master_create_empty_shard --------------------------------------------------------------------- 1410005 (1 row) @@ -206,7 +206,7 @@ DROP TABLE transactional_drop_shards; ROLLBACK; -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid; - shardid + shardid --------------------------------------------------------------------- 1410005 (1 row) @@ -219,7 +219,7 @@ WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; - shardid | shardstate | nodename | nodeport + shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- 1410005 | 1 | localhost | 57637 1410005 | 1 | localhost | 57638 @@ -228,7 +228,7 @@ ORDER BY -- verify table is not dropped \dt transactional_drop_shards List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- public | transactional_drop_shards | table | postgres (1 row) @@ -237,7 +237,7 @@ ORDER BY \c - - - :worker_1_port \dt transactional_drop_shards_* List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- public | transactional_drop_shards_1410005 | table | postgres (1 row) @@ -247,7 +247,7 @@ ORDER BY BEGIN; INSERT INTO transactional_drop_shards VALUES (1); SELECT master_apply_delete_command('DELETE FROM transactional_drop_shards'); - master_apply_delete_command + master_apply_delete_command --------------------------------------------------------------------- 1 (1 row) @@ -255,7 +255,7 @@ SELECT master_apply_delete_command('DELETE FROM transactional_drop_shards'); ROLLBACK; -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid; - shardid + shardid --------------------------------------------------------------------- 1410005 (1 row) @@ -268,7 +268,7 @@ WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; - shardid | shardstate | nodename | nodeport + shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- 1410005 | 1 | localhost | 57637 1410005 | 1 | localhost | 57638 @@ -278,7 +278,7 @@ ORDER BY \c - - - :worker_1_port \dt transactional_drop_shards_* List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- public | transactional_drop_shards_1410005 | table | postgres (1 row) @@ -297,7 +297,7 @@ ERROR: illegal value \set VERBOSITY default -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid; - shardid + shardid --------------------------------------------------------------------- 1410005 (1 row) @@ -310,7 +310,7 @@ WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; - shardid | shardstate | nodename | nodeport + shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- 1410005 | 1 | localhost | 57637 1410005 | 1 | localhost | 57638 @@ -319,7 +319,7 @@ ORDER BY -- verify table is not dropped \dt transactional_drop_shards List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- public | transactional_drop_shards | table | postgres (1 row) @@ -328,7 +328,7 @@ ORDER BY \c - - - :worker_1_port \dt transactional_drop_shards_* List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- public | transactional_drop_shards_1410005 | table | postgres (1 row) @@ -337,9 +337,9 @@ ORDER BY -- test DROP reference table with failing worker CREATE TABLE transactional_drop_reference(column1 int); SELECT create_reference_table('transactional_drop_reference'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) \set VERBOSITY terse @@ -348,7 +348,7 @@ ERROR: illegal value \set VERBOSITY default -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_reference'::regclass ORDER BY shardid; - shardid + shardid --------------------------------------------------------------------- 1410006 (1 row) @@ -361,7 +361,7 @@ WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_reference'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; - shardid | shardstate | nodename | nodeport + shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- 1410006 | 1 | localhost | 57637 1410006 | 1 | localhost | 57638 @@ -370,7 +370,7 @@ ORDER BY -- verify table is not dropped \dt transactional_drop_reference List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- public | transactional_drop_reference | table | postgres (1 row) @@ -379,7 +379,7 @@ ORDER BY \c - - - :worker_1_port \dt transactional_drop_reference* List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- public | transactional_drop_reference_1410006 | table | postgres (1 row) @@ -392,7 +392,7 @@ ERROR: illegal value \set VERBOSITY default -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid; - shardid + shardid --------------------------------------------------------------------- 1410005 (1 row) @@ -405,7 +405,7 @@ WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; - shardid | shardstate | nodename | nodeport + shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- 1410005 | 1 | localhost | 57637 1410005 | 1 | localhost | 57638 @@ -415,7 +415,7 @@ ORDER BY \c - - - :worker_1_port \dt transactional_drop_shards_* List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- public | transactional_drop_shards_1410005 | table | postgres (1 row) @@ -426,9 +426,9 @@ DROP EVENT TRIGGER fail_drop_table; SET citus.shard_count TO 8; CREATE TABLE transactional_drop_serial(column1 int, column2 SERIAL); SELECT create_distributed_table('transactional_drop_serial', 'column1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- test DROP TABLE(ergo master_drop_all_shards) in transaction, then ROLLBACK @@ -437,7 +437,7 @@ DROP TABLE transactional_drop_serial; ROLLBACK; -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_serial'::regclass ORDER BY shardid; - shardid + shardid --------------------------------------------------------------------- 1410007 1410008 @@ -457,7 +457,7 @@ WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_serial'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; - shardid | shardstate | nodename | nodeport + shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- 1410007 | 1 | localhost | 57637 1410007 | 1 | localhost | 57638 @@ -480,7 +480,7 @@ ORDER BY -- verify table is not dropped \dt transactional_drop_serial List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- public | transactional_drop_serial | table | postgres (1 row) @@ -489,7 +489,7 @@ ORDER BY \c - - - :worker_1_port \dt transactional_drop_serial_* List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- public | transactional_drop_serial_1410007 | table | postgres public | transactional_drop_serial_1410008 | table | postgres @@ -503,7 +503,7 @@ ORDER BY \ds transactional_drop_serial_column2_seq List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- (0 rows) @@ -514,7 +514,7 @@ DROP TABLE transactional_drop_serial; COMMIT; -- verify metadata is deleted SELECT shardid FROM pg_dist_shard WHERE shardid IN (1410007, 1410008, 1410009, 1410010, 1410011, 1410012, 1410013, 1410014) ORDER BY shardid; - shardid + shardid --------------------------------------------------------------------- (0 rows) @@ -526,14 +526,14 @@ WHERE shardid IN (1410007, 1410008, 1410009, 1410010, 1410011, 1410012, 1410013, 1410014) ORDER BY shardid, nodename, nodeport; - shardid | shardstate | nodename | nodeport + shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- (0 rows) -- verify table is dropped \dt transactional_drop_serial List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- (0 rows) @@ -541,13 +541,13 @@ ORDER BY \c - - - :worker_1_port \dt transactional_drop_serial_* List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- (0 rows) \ds transactional_drop_serial_column2_seq List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- (0 rows) @@ -557,23 +557,23 @@ SET citus.shard_replication_factor TO 1; SET citus.shard_count TO 4; CREATE TABLE transactional_drop_mx(column1 int); SELECT create_distributed_table('transactional_drop_mx', 'column1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='transactional_drop_mx'::regclass; -- make worker 1 receive metadata changes SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node + start_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) -- see metadata is propogated to the worker \c - - - :worker_1_port SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_mx'::regclass ORDER BY shardid; - shardid + shardid --------------------------------------------------------------------- 1410015 1410016 @@ -589,7 +589,7 @@ WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_mx'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; - shardid | shardstate | nodename | nodeport + shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- 1410015 | 1 | localhost | 57637 1410016 | 1 | localhost | 57638 @@ -604,7 +604,7 @@ ROLLBACK; -- verify metadata is not deleted \c - - - :worker_1_port SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_mx'::regclass ORDER BY shardid; - shardid + shardid --------------------------------------------------------------------- 1410015 1410016 @@ -620,7 +620,7 @@ WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_mx'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; - shardid | shardstate | nodename | nodeport + shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- 1410015 | 1 | localhost | 57637 1410016 | 1 | localhost | 57638 @@ -636,7 +636,7 @@ COMMIT; -- verify metadata is deleted \c - - - :worker_1_port SELECT shardid FROM pg_dist_shard WHERE shardid IN (1410015, 1410016, 1410017, 1410018) ORDER BY shardid; - shardid + shardid --------------------------------------------------------------------- (0 rows) @@ -648,7 +648,7 @@ WHERE shardid IN (1410015, 1410016, 1410017, 1410018) ORDER BY shardid, nodename, nodeport; - shardid | shardstate | nodename | nodeport + shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- (0 rows) @@ -656,32 +656,32 @@ ORDER BY -- try using the coordinator as a worker and then dropping the table SELECT 1 FROM master_add_node('localhost', :master_port); NOTICE: Replicating reference table "transactional_drop_reference" to the node localhost:xxxxx - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) CREATE TABLE citus_local (id serial, k int); SELECT create_distributed_table('citus_local', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO citus_local (k) VALUES (2); DROP TABLE citus_local; SELECT master_remove_node('localhost', :master_port); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) -- clean the workspace DROP TABLE transactional_drop_shards, transactional_drop_reference; SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); - stop_metadata_sync_to_node + stop_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) -- test DROP TABLE as a non-superuser in a transaction block @@ -690,14 +690,14 @@ NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. GRANT ALL ON SCHEMA public TO try_drop_table; SELECT run_command_on_workers('CREATE USER try_drop_table WITH LOGIN'); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"CREATE ROLE") (localhost,57638,t,"CREATE ROLE") (2 rows) SELECT run_command_on_workers('GRANT ALL ON SCHEMA public TO try_drop_table'); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,GRANT) (localhost,57638,t,GRANT) @@ -707,9 +707,9 @@ SELECT run_command_on_workers('GRANT ALL ON SCHEMA public TO try_drop_table'); BEGIN; CREATE TABLE temp_dist_table (x int, y int); SELECT create_distributed_table('temp_dist_table','x'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) DROP TABLE temp_dist_table; diff --git a/src/test/regress/expected/multi_truncate.out b/src/test/regress/expected/multi_truncate.out index 06fd2dfbb..74778c2ff 100644 --- a/src/test/regress/expected/multi_truncate.out +++ b/src/test/regress/expected/multi_truncate.out @@ -10,9 +10,9 @@ SET search_path TO multi_truncate; -- CREATE TABLE test_truncate_append(a int); SELECT create_distributed_table('test_truncate_append', 'a', 'append'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- verify no error is thrown when no shards are present @@ -21,34 +21,34 @@ SELECT master_create_empty_shard('test_truncate_append') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 500 WHERE shardid = :new_shard_id; SELECT count(*) FROM test_truncate_append; - count + count --------------------------------------------------------------------- 0 (1 row) INSERT INTO test_truncate_append values (1); SELECT count(*) FROM test_truncate_append; - count + count --------------------------------------------------------------------- 1 (1 row) -- create some more shards SELECT master_create_empty_shard('test_truncate_append'); - master_create_empty_shard + master_create_empty_shard --------------------------------------------------------------------- 1210001 (1 row) SELECT master_create_empty_shard('test_truncate_append'); - master_create_empty_shard + master_create_empty_shard --------------------------------------------------------------------- 1210002 (1 row) -- verify 3 shards are presents SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_append'::regclass ORDER BY shardid; - shardid + shardid --------------------------------------------------------------------- 1210000 1210001 @@ -58,14 +58,14 @@ SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_append'::r TRUNCATE TABLE test_truncate_append; -- verify data is truncated from the table SELECT count(*) FROM test_truncate_append; - count + count --------------------------------------------------------------------- 0 (1 row) -- verify no shard exists anymore SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_append'::regclass; - shardid + shardid --------------------------------------------------------------------- (0 rows) @@ -78,9 +78,9 @@ DROP TABLE test_truncate_append; -- CREATE TABLE test_truncate_range(a int); SELECT create_distributed_table('test_truncate_range', 'a', 'range'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- verify no error is thrown when no shards are present @@ -95,7 +95,7 @@ SELECT master_create_empty_shard('test_truncate_range') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 1501, shardmaxvalue = 2500 WHERE shardid = :new_shard_id; SELECT count(*) FROM test_truncate_range; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -105,14 +105,14 @@ INSERT INTO test_truncate_range values (1001); INSERT INTO test_truncate_range values (2000); INSERT INTO test_truncate_range values (100); SELECT count(*) FROM test_truncate_range; - count + count --------------------------------------------------------------------- 4 (1 row) -- verify 3 shards are presents SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_range'::regclass ORDER BY shardid; - shardid + shardid --------------------------------------------------------------------- 1210003 1210004 @@ -122,14 +122,14 @@ SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_range'::re TRUNCATE TABLE test_truncate_range; -- verify data is truncated from the table SELECT count(*) FROM test_truncate_range; - count + count --------------------------------------------------------------------- 0 (1 row) -- verify 3 shards are still present SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_range'::regclass ORDER BY shardid; - shardid + shardid --------------------------------------------------------------------- 1210003 1210004 @@ -140,7 +140,7 @@ SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_range'::re INSERT INTO test_truncate_range VALUES (1); BEGIN; TRUNCATE TABLE test_truncate_range; ROLLBACK; SELECT count(*) FROM test_truncate_range; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -152,15 +152,15 @@ DROP TABLE test_truncate_range; -- CREATE TABLE test_truncate_hash(a int); SELECT master_create_distributed_table('test_truncate_hash', 'a', 'hash'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) -- verify no error is thrown when no shards are present TRUNCATE TABLE test_truncate_hash; SELECT count(*) FROM test_truncate_hash; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -182,22 +182,22 @@ ERROR: could not find any shards DETAIL: No shards exist for distributed table "test_truncate_hash". HINT: Run master_create_worker_shards to create shards and try again. SELECT count(*) FROM test_truncate_hash; - count + count --------------------------------------------------------------------- 0 (1 row) -- verify 4 shards are present SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_hash'::regclass ORDER BY shardid; - shardid + shardid --------------------------------------------------------------------- (0 rows) TRUNCATE TABLE test_truncate_hash; SELECT master_create_worker_shards('test_truncate_hash', 4, 1); - master_create_worker_shards + master_create_worker_shards --------------------------------------------------------------------- - + (1 row) INSERT INTO test_truncate_hash values (1); @@ -205,7 +205,7 @@ INSERT INTO test_truncate_hash values (1001); INSERT INTO test_truncate_hash values (2000); INSERT INTO test_truncate_hash values (100); SELECT count(*) FROM test_truncate_hash; - count + count --------------------------------------------------------------------- 4 (1 row) @@ -213,14 +213,14 @@ SELECT count(*) FROM test_truncate_hash; TRUNCATE TABLE test_truncate_hash; -- verify data is truncated from the table SELECT count(*) FROM test_truncate_hash; - count + count --------------------------------------------------------------------- 0 (1 row) -- verify 4 shards are still presents SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_hash'::regclass ORDER BY shardid; - shardid + shardid --------------------------------------------------------------------- 1210006 1210007 @@ -232,7 +232,7 @@ SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_hash'::reg INSERT INTO test_truncate_hash VALUES (1); BEGIN; TRUNCATE TABLE test_truncate_hash; ROLLBACK; SELECT count(*) FROM test_truncate_hash; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -242,21 +242,21 @@ DROP TABLE test_truncate_hash; SET citus.shard_replication_factor TO 1; CREATE TABLE "a b hash" (a int, b int); SELECT create_distributed_table('"a b hash"', 'a', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO "a b hash" values (1, 0); SELECT * from "a b hash"; - a | b + a | b --------------------------------------------------------------------- 1 | 0 (1 row) TRUNCATE TABLE "a b hash"; SELECT * from "a b hash"; - a | b + a | b --------------------------------------------------------------------- (0 rows) @@ -264,9 +264,9 @@ DROP TABLE "a b hash"; -- now with append CREATE TABLE "a b append" (a int, b int); SELECT create_distributed_table('"a b append"', 'a', 'append'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_create_empty_shard('"a b append"') AS new_shard_id \gset @@ -278,7 +278,7 @@ WHERE shardid = :new_shard_id; INSERT INTO "a b append" values (1, 1); INSERT INTO "a b append" values (600, 600); SELECT * FROM "a b append" ORDER BY a; - a | b + a | b --------------------------------------------------------------------- 1 | 1 600 | 600 @@ -287,7 +287,7 @@ SELECT * FROM "a b append" ORDER BY a; TRUNCATE TABLE "a b append"; -- verify all shards are dropped SELECT shardid FROM pg_dist_shard where logicalrelid = '"a b append"'::regclass; - shardid + shardid --------------------------------------------------------------------- (0 rows) @@ -297,9 +297,9 @@ CREATE TABLE test_local_truncate (x int, y int); INSERT INTO test_local_truncate VALUES (1,2); SELECT create_distributed_table('test_local_truncate', 'x', colocate_with => 'none'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -308,14 +308,14 @@ TRUNCATE test_local_truncate; COMMIT; -- Ensure distributed data is not truncated SELECT * FROM test_local_truncate; - x | y + x | y --------------------------------------------------------------------- 1 | 2 (1 row) -- Undistribute table SELECT master_drop_all_shards('test_local_truncate', 'pubic', 'test_local_truncate'); - master_drop_all_shards + master_drop_all_shards --------------------------------------------------------------------- 4 (1 row) @@ -323,7 +323,7 @@ SELECT master_drop_all_shards('test_local_truncate', 'pubic', 'test_local_trunca DELETE FROM pg_dist_partition WHERE logicalrelid = 'test_local_truncate'::regclass; -- Ensure local data is truncated SELECT * FROM test_local_truncate; - x | y + x | y --------------------------------------------------------------------- (0 rows) @@ -333,9 +333,9 @@ CREATE TABLE test_local_truncate (x int, y int); INSERT INTO test_local_truncate VALUES (1,2); SELECT create_distributed_table('test_local_truncate', 'x', colocate_with => 'none'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -344,14 +344,14 @@ TRUNCATE test_local_truncate; ROLLBACK; -- Ensure distributed data is not truncated SELECT * FROM test_local_truncate; - x | y + x | y --------------------------------------------------------------------- 1 | 2 (1 row) -- Undistribute table SELECT master_drop_all_shards('test_local_truncate', 'pubic', 'test_local_truncate'); - master_drop_all_shards + master_drop_all_shards --------------------------------------------------------------------- 4 (1 row) @@ -359,7 +359,7 @@ SELECT master_drop_all_shards('test_local_truncate', 'pubic', 'test_local_trunca DELETE FROM pg_dist_partition WHERE logicalrelid = 'test_local_truncate'::regclass; -- Ensure local data is not truncated SELECT * FROM test_local_truncate; - x | y + x | y --------------------------------------------------------------------- 1 | 2 (1 row) diff --git a/src/test/regress/expected/multi_unsupported_worker_operations.out b/src/test/regress/expected/multi_unsupported_worker_operations.out index a224117e2..af0b2421f 100644 --- a/src/test/regress/expected/multi_unsupported_worker_operations.out +++ b/src/test/regress/expected/multi_unsupported_worker_operations.out @@ -16,23 +16,23 @@ SET citus.shard_count TO 5; -- Create test tables CREATE TABLE mx_table (col_1 int, col_2 text, col_3 BIGSERIAL); SELECT create_distributed_table('mx_table', 'col_1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE mx_table_2 (col_1 int, col_2 text, col_3 BIGSERIAL); SELECT create_distributed_table('mx_table_2', 'col_1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE mx_ref_table (col_1 int, col_2 text); SELECT create_reference_table('mx_ref_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- Check that the created tables are colocated MX tables @@ -40,16 +40,16 @@ SELECT logicalrelid, repmodel, colocationid FROM pg_dist_partition WHERE logicalrelid IN ('mx_table'::regclass, 'mx_table_2'::regclass) ORDER BY logicalrelid; - logicalrelid | repmodel | colocationid + logicalrelid | repmodel | colocationid --------------------------------------------------------------------- mx_table | s | 150000 mx_table_2 | s | 150000 (2 rows) SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node + start_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) COPY mx_table (col_1, col_2) FROM STDIN WITH (FORMAT 'csv'); @@ -57,7 +57,7 @@ INSERT INTO mx_ref_table VALUES (-37, 'morbi'); INSERT INTO mx_ref_table VALUES (-78, 'sapien'); INSERT INTO mx_ref_table VALUES (-34, 'augue'); SELECT * FROM mx_table ORDER BY col_1; - col_1 | col_2 | col_3 + col_1 | col_2 | col_3 --------------------------------------------------------------------- -37 | 'lorem' | 1 80 | 'dolor' | 3 @@ -82,7 +82,7 @@ SELECT create_reference_table('mx_table_worker'); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT count(*) FROM pg_dist_partition WHERE logicalrelid='mx_table_worker'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -96,14 +96,14 @@ SELECT master_create_worker_shards('mx_table', 5, 1); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='mx_table'::regclass; - count + count --------------------------------------------------------------------- 0 (1 row) INSERT INTO pg_dist_shard SELECT * FROM pg_dist_shard_temp; SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='mx_table'::regclass; - count + count --------------------------------------------------------------------- 5 (1 row) @@ -121,10 +121,10 @@ ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. -- DDL commands SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.mx_table'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- - col_1 | integer | - col_2 | text | + col_1 | integer | + col_2 | text | col_3 | bigint | not null default nextval('mx_table_col_3_seq'::regclass) (3 rows) @@ -138,10 +138,10 @@ ALTER TABLE mx_table_2 ADD CONSTRAINT mx_fk_constraint FOREIGN KEY(col_1) REFERE ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.mx_table'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- - col_1 | integer | - col_2 | text | + col_1 | integer | + col_2 | text | col_3 | bigint | not null default nextval('mx_table_col_3_seq'::regclass) (3 rows) @@ -151,7 +151,7 @@ SELECT master_drop_all_shards('mx_table'::regclass, 'public', 'mx_table'); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid='mx_table'::regclass; - count + count --------------------------------------------------------------------- 5 (1 row) @@ -161,7 +161,7 @@ SELECT master_apply_delete_command('DELETE FROM mx_table'); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT count(*) FROM mx_table; - count + count --------------------------------------------------------------------- 5 (1 row) @@ -171,7 +171,7 @@ SELECT 1 FROM master_add_inactive_node('localhost', 5432); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT count(1) FROM pg_dist_node WHERE nodename='localhost' AND nodeport=5432; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -180,7 +180,7 @@ SELECT count(1) FROM pg_dist_node WHERE nodename='localhost' AND nodeport=5432; \c - - - :master_port DROP INDEX mx_test_uniq_index; SELECT 1 FROM master_add_inactive_node('localhost', 5432); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) @@ -190,16 +190,16 @@ SELECT master_remove_node('localhost', 5432); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT count(1) FROM pg_dist_node WHERE nodename='localhost' AND nodeport=5432; - count + count --------------------------------------------------------------------- 1 (1 row) \c - - - :master_port SELECT master_remove_node('localhost', 5432); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) \c - - - :worker_1_port @@ -209,7 +209,7 @@ SELECT mark_tables_colocated('mx_table', ARRAY['mx_table_2']); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT colocationid FROM pg_dist_partition WHERE logicalrelid='mx_table_2'::regclass; - colocationid + colocationid --------------------------------------------------------------------- 0 (1 row) @@ -225,7 +225,7 @@ SELECT start_metadata_sync_to_node('localhost', :worker_2_port); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; - hasmetadata + hasmetadata --------------------------------------------------------------------- f (1 row) @@ -233,9 +233,9 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; -- stop_metadata_sync_to_node \c - - - :master_port SELECT start_metadata_sync_to_node('localhost', :worker_2_port); - start_metadata_sync_to_node + start_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) \c - - - :worker_1_port @@ -244,29 +244,29 @@ ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. \c - - - :master_port SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; - hasmetadata + hasmetadata --------------------------------------------------------------------- t (1 row) SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); - stop_metadata_sync_to_node + stop_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; - hasmetadata + hasmetadata --------------------------------------------------------------------- f (1 row) \c - - - :worker_2_port SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition; - worker_drop_distributed_table + worker_drop_distributed_table --------------------------------------------------------------------- - - + + (2 rows) DELETE FROM pg_dist_node; @@ -278,7 +278,7 @@ DROP TABLE mx_table; ERROR: operation is not allowed on this node \set VERBOSITY default SELECT count(*) FROM mx_table; - count + count --------------------------------------------------------------------- 5 (1 row) @@ -291,7 +291,7 @@ SELECT master_remove_partition_metadata('mx_table'::regclass, 'public', 'mx_tabl ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT count(*) FROM mx_table; - count + count --------------------------------------------------------------------- 5 (1 row) @@ -312,7 +312,7 @@ SELECT shardid, nodename, nodeport, shardstate FROM pg_dist_shard_placement WHERE shardid = :testshardid ORDER BY nodeport; - shardid | nodename | nodeport | shardstate + shardid | nodename | nodeport | shardstate --------------------------------------------------------------------- 1270000 | localhost | 57637 | 1 1270000 | localhost | 57638 | 3 @@ -331,10 +331,10 @@ DROP SEQUENCE some_sequence; -- Show that dropping the sequence of an MX table with cascade harms the table and shards BEGIN; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.mx_table'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- - col_1 | integer | - col_2 | text | + col_1 | integer | + col_2 | text | col_3 | bigint | not null default nextval('mx_table_col_3_seq'::regclass) (3 rows) @@ -343,10 +343,10 @@ SET client_min_messages TO 'WARNING'; DROP SEQUENCE mx_table_col_3_seq CASCADE; RESET client_min_messages; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.mx_table'::regclass; - Column | Type | Modifiers + Column | Type | Modifiers --------------------------------------------------------------------- - col_1 | integer | - col_2 | text | + col_1 | integer | + col_2 | text | col_3 | bigint | not null (3 rows) @@ -356,15 +356,15 @@ ROLLBACK; DROP TABLE mx_table; DROP TABLE mx_table_2; SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); - stop_metadata_sync_to_node + stop_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) \c - - - :worker_1_port DELETE FROM pg_dist_node; SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition; - worker_drop_distributed_table + worker_drop_distributed_table --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/multi_upgrade_reference_table.out b/src/test/regress/expected/multi_upgrade_reference_table.out index 3414b70c8..010bf690a 100644 --- a/src/test/regress/expected/multi_upgrade_reference_table.out +++ b/src/test/regress/expected/multi_upgrade_reference_table.out @@ -22,9 +22,9 @@ DROP TABLE upgrade_reference_table_local; SET citus.shard_count TO 4; CREATE TABLE upgrade_reference_table_multiple_shard(column1 int); SELECT create_distributed_table('upgrade_reference_table_multiple_shard', 'column1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT upgrade_to_reference_table('upgrade_reference_table_multiple_shard'); @@ -34,9 +34,9 @@ DROP TABLE upgrade_reference_table_multiple_shard; -- test with table which has no shard CREATE TABLE upgrade_reference_table_no_shard(column1 int); SELECT create_distributed_table('upgrade_reference_table_no_shard', 'column1', 'append'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT upgrade_to_reference_table('upgrade_reference_table_no_shard'); @@ -48,16 +48,16 @@ SET citus.shard_count TO 1; SET citus.shard_replication_factor TO 1; CREATE TABLE upgrade_reference_table_referenced(column1 int PRIMARY KEY); SELECT create_distributed_table('upgrade_reference_table_referenced', 'column1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE upgrade_reference_table_referencing(column1 int REFERENCES upgrade_reference_table_referenced(column1)); SELECT create_distributed_table('upgrade_reference_table_referencing', 'column1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- update replication model to statement-based replication since streaming replicated tables cannot be upgraded to reference tables @@ -74,9 +74,9 @@ DROP TABLE upgrade_reference_table_referenced; -- test with no healthy placements CREATE TABLE upgrade_reference_table_unhealthy(column1 int); SELECT create_distributed_table('upgrade_reference_table_unhealthy', 'column1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid='upgrade_reference_table_unhealthy'::regclass; @@ -91,16 +91,16 @@ SET citus.shard_count TO 1; SET citus.shard_replication_factor TO 1; CREATE TABLE upgrade_reference_table_composite(column1 int, column2 upgrade_test_composite_type); SELECT create_distributed_table('upgrade_reference_table_composite', 'column1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid='upgrade_reference_table_composite'::regclass; SELECT upgrade_to_reference_table('upgrade_reference_table_composite'); - upgrade_to_reference_table + upgrade_to_reference_table --------------------------------------------------------------------- - + (1 row) DROP TABLE upgrade_reference_table_composite; @@ -108,9 +108,9 @@ DROP TYPE upgrade_test_composite_type; -- test with reference table CREATE TABLE upgrade_reference_table_reference(column1 int); SELECT create_reference_table('upgrade_reference_table_reference'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT upgrade_to_reference_table('upgrade_reference_table_reference'); @@ -120,9 +120,9 @@ DROP TABLE upgrade_reference_table_reference; -- test valid cases, append distributed table CREATE TABLE upgrade_reference_table_append(column1 int); SELECT create_distributed_table('upgrade_reference_table_append', 'column1', 'append'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) COPY upgrade_reference_table_append FROM STDIN; @@ -133,7 +133,7 @@ FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_append'::regclass; - partmethod | partkeyisnull | colocationid | repmodel + partmethod | partkeyisnull | colocationid | repmodel --------------------------------------------------------------------- a | f | 0 | c (1 row) @@ -144,7 +144,7 @@ FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_append'::regclass; - shardid | shardminvalueisnull | shardmaxvalueisnull + shardid | shardminvalueisnull | shardmaxvalueisnull --------------------------------------------------------------------- 1360009 | f | f (1 row) @@ -155,7 +155,7 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_append'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- (0 rows) @@ -169,15 +169,15 @@ WHERE shardid IN WHERE logicalrelid = 'upgrade_reference_table_append'::regclass) GROUP BY shardid ORDER BY shardid; - shardid | ?column? + shardid | ?column? --------------------------------------------------------------------- 1360009 | f (1 row) SELECT upgrade_to_reference_table('upgrade_reference_table_append'); - upgrade_to_reference_table + upgrade_to_reference_table --------------------------------------------------------------------- - + (1 row) -- situation after upgrade_reference_table @@ -187,7 +187,7 @@ FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_append'::regclass; - partmethod | partkeyisnull | colocationid | repmodel + partmethod | partkeyisnull | colocationid | repmodel --------------------------------------------------------------------- n | t | 10004 | t (1 row) @@ -198,7 +198,7 @@ FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_append'::regclass; - shardid | shardminvalueisnull | shardmaxvalueisnull + shardid | shardminvalueisnull | shardmaxvalueisnull --------------------------------------------------------------------- 1360009 | t | t (1 row) @@ -209,7 +209,7 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_append'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -223,7 +223,7 @@ WHERE shardid IN WHERE logicalrelid = 'upgrade_reference_table_append'::regclass) GROUP BY shardid ORDER BY shardid; - shardid | ?column? + shardid | ?column? --------------------------------------------------------------------- 1360009 | t (1 row) @@ -232,9 +232,9 @@ DROP TABLE upgrade_reference_table_append; -- test valid cases, shard exists at one worker CREATE TABLE upgrade_reference_table_one_worker(column1 int); SELECT create_distributed_table('upgrade_reference_table_one_worker', 'column1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid='upgrade_reference_table_one_worker'::regclass; @@ -245,7 +245,7 @@ FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass; - partmethod | partkeyisnull | colocationid | repmodel + partmethod | partkeyisnull | colocationid | repmodel --------------------------------------------------------------------- h | f | 1360001 | c (1 row) @@ -256,7 +256,7 @@ FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass; - shardid | shardminvalueisnull | shardmaxvalueisnull + shardid | shardminvalueisnull | shardmaxvalueisnull --------------------------------------------------------------------- 1360010 | f | f (1 row) @@ -267,7 +267,7 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 1360001 | 1 | 1 | 23 | 0 (1 row) @@ -281,15 +281,15 @@ WHERE shardid IN WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass) GROUP BY shardid ORDER BY shardid; - shardid | ?column? + shardid | ?column? --------------------------------------------------------------------- 1360010 | f (1 row) SELECT upgrade_to_reference_table('upgrade_reference_table_one_worker'); - upgrade_to_reference_table + upgrade_to_reference_table --------------------------------------------------------------------- - + (1 row) -- situation after upgrade_reference_table @@ -299,7 +299,7 @@ FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass; - partmethod | partkeyisnull | colocationid | repmodel + partmethod | partkeyisnull | colocationid | repmodel --------------------------------------------------------------------- n | t | 10004 | t (1 row) @@ -310,7 +310,7 @@ FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass; - shardid | shardminvalueisnull | shardmaxvalueisnull + shardid | shardminvalueisnull | shardmaxvalueisnull --------------------------------------------------------------------- 1360010 | t | t (1 row) @@ -321,7 +321,7 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -335,7 +335,7 @@ WHERE shardid IN WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass) GROUP BY shardid ORDER BY shardid; - shardid | ?column? + shardid | ?column? --------------------------------------------------------------------- 1360010 | t (1 row) @@ -345,9 +345,9 @@ DROP TABLE upgrade_reference_table_one_worker; SET citus.shard_replication_factor TO 2; CREATE TABLE upgrade_reference_table_one_unhealthy(column1 int); SELECT create_distributed_table('upgrade_reference_table_one_unhealthy', 'column1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) UPDATE pg_dist_shard_placement SET shardstate = 3 @@ -359,7 +359,7 @@ FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass; - partmethod | partkeyisnull | colocationid | repmodel + partmethod | partkeyisnull | colocationid | repmodel --------------------------------------------------------------------- h | f | 1360002 | c (1 row) @@ -370,7 +370,7 @@ FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass; - shardid | shardminvalueisnull | shardmaxvalueisnull + shardid | shardminvalueisnull | shardmaxvalueisnull --------------------------------------------------------------------- 1360011 | f | f (1 row) @@ -381,7 +381,7 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 1360002 | 1 | 2 | 23 | 0 (1 row) @@ -396,15 +396,15 @@ WHERE shardid IN AND shardstate = 1 GROUP BY shardid ORDER BY shardid; - shardid | ?column? + shardid | ?column? --------------------------------------------------------------------- 1360011 | f (1 row) SELECT upgrade_to_reference_table('upgrade_reference_table_one_unhealthy'); - upgrade_to_reference_table + upgrade_to_reference_table --------------------------------------------------------------------- - + (1 row) -- situation after upgrade_reference_table @@ -414,7 +414,7 @@ FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass; - partmethod | partkeyisnull | colocationid | repmodel + partmethod | partkeyisnull | colocationid | repmodel --------------------------------------------------------------------- n | t | 10004 | t (1 row) @@ -425,7 +425,7 @@ FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass; - shardid | shardminvalueisnull | shardmaxvalueisnull + shardid | shardminvalueisnull | shardmaxvalueisnull --------------------------------------------------------------------- 1360011 | t | t (1 row) @@ -436,7 +436,7 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -451,7 +451,7 @@ WHERE shardid IN AND shardstate = 1 GROUP BY shardid ORDER BY shardid; - shardid | ?column? + shardid | ?column? --------------------------------------------------------------------- 1360011 | t (1 row) @@ -460,9 +460,9 @@ DROP TABLE upgrade_reference_table_one_unhealthy; -- test valid cases, shard exists at both workers and both are healthy CREATE TABLE upgrade_reference_table_both_healthy(column1 int); SELECT create_distributed_table('upgrade_reference_table_both_healthy', 'column1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- situation before upgrade_reference_table @@ -472,7 +472,7 @@ FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass; - partmethod | partkeyisnull | colocationid | repmodel + partmethod | partkeyisnull | colocationid | repmodel --------------------------------------------------------------------- h | f | 1360003 | c (1 row) @@ -483,7 +483,7 @@ FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass; - shardid | shardminvalueisnull | shardmaxvalueisnull + shardid | shardminvalueisnull | shardmaxvalueisnull --------------------------------------------------------------------- 1360012 | f | f (1 row) @@ -494,7 +494,7 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 1360003 | 1 | 2 | 23 | 0 (1 row) @@ -508,15 +508,15 @@ WHERE shardid IN WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass) GROUP BY shardid ORDER BY shardid; - shardid + shardid --------------------------------------------------------------------- 1360012 (1 row) SELECT upgrade_to_reference_table('upgrade_reference_table_both_healthy'); - upgrade_to_reference_table + upgrade_to_reference_table --------------------------------------------------------------------- - + (1 row) -- situation after upgrade_reference_table @@ -526,7 +526,7 @@ FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass; - partmethod | partkeyisnull | colocationid | repmodel + partmethod | partkeyisnull | colocationid | repmodel --------------------------------------------------------------------- n | t | 10004 | t (1 row) @@ -537,7 +537,7 @@ FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass; - shardid | shardminvalueisnull | shardmaxvalueisnull + shardid | shardminvalueisnull | shardmaxvalueisnull --------------------------------------------------------------------- 1360012 | t | t (1 row) @@ -548,7 +548,7 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -562,7 +562,7 @@ WHERE shardid IN WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass) GROUP BY shardid ORDER BY shardid; - shardid | ?column? + shardid | ?column? --------------------------------------------------------------------- 1360012 | t (1 row) @@ -572,9 +572,9 @@ DROP TABLE upgrade_reference_table_both_healthy; SET citus.shard_replication_factor TO 1; CREATE TABLE upgrade_reference_table_transaction_rollback(column1 int); SELECT create_distributed_table('upgrade_reference_table_transaction_rollback', 'column1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid='upgrade_reference_table_transaction_rollback'::regclass; @@ -585,7 +585,7 @@ FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass; - partmethod | partkeyisnull | colocationid | repmodel + partmethod | partkeyisnull | colocationid | repmodel --------------------------------------------------------------------- h | f | 1360004 | c (1 row) @@ -596,7 +596,7 @@ FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass; - shardid | shardminvalueisnull | shardmaxvalueisnull + shardid | shardminvalueisnull | shardmaxvalueisnull --------------------------------------------------------------------- 1360013 | f | f (1 row) @@ -607,7 +607,7 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 1360004 | 1 | 1 | 23 | 0 (1 row) @@ -621,16 +621,16 @@ WHERE shardid IN WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass) GROUP BY shardid ORDER BY shardid; - shardid | ?column? + shardid | ?column? --------------------------------------------------------------------- 1360013 | f (1 row) BEGIN; SELECT upgrade_to_reference_table('upgrade_reference_table_transaction_rollback'); - upgrade_to_reference_table + upgrade_to_reference_table --------------------------------------------------------------------- - + (1 row) ROLLBACK; @@ -641,7 +641,7 @@ FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass; - partmethod | partkeyisnull | colocationid | repmodel + partmethod | partkeyisnull | colocationid | repmodel --------------------------------------------------------------------- h | f | 1360004 | c (1 row) @@ -652,7 +652,7 @@ FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass; - shardid | shardminvalueisnull | shardmaxvalueisnull + shardid | shardminvalueisnull | shardmaxvalueisnull --------------------------------------------------------------------- 1360013 | f | f (1 row) @@ -663,7 +663,7 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 1360004 | 1 | 1 | 23 | 0 (1 row) @@ -677,7 +677,7 @@ WHERE shardid IN WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass) GROUP BY shardid ORDER BY shardid; - shardid | ?column? + shardid | ?column? --------------------------------------------------------------------- 1360013 | f (1 row) @@ -687,9 +687,9 @@ DROP TABLE upgrade_reference_table_transaction_rollback; SET citus.shard_replication_factor TO 1; CREATE TABLE upgrade_reference_table_transaction_commit(column1 int); SELECT create_distributed_table('upgrade_reference_table_transaction_commit', 'column1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid='upgrade_reference_table_transaction_commit'::regclass; @@ -700,7 +700,7 @@ FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass; - partmethod | partkeyisnull | colocationid | repmodel + partmethod | partkeyisnull | colocationid | repmodel --------------------------------------------------------------------- h | f | 1360004 | c (1 row) @@ -711,7 +711,7 @@ FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass; - shardid | shardminvalueisnull | shardmaxvalueisnull + shardid | shardminvalueisnull | shardmaxvalueisnull --------------------------------------------------------------------- 1360014 | f | f (1 row) @@ -722,7 +722,7 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 1360004 | 1 | 1 | 23 | 0 (1 row) @@ -736,16 +736,16 @@ WHERE shardid IN WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass) GROUP BY shardid ORDER BY shardid; - shardid | ?column? + shardid | ?column? --------------------------------------------------------------------- 1360014 | f (1 row) BEGIN; SELECT upgrade_to_reference_table('upgrade_reference_table_transaction_commit'); - upgrade_to_reference_table + upgrade_to_reference_table --------------------------------------------------------------------- - + (1 row) COMMIT; @@ -756,7 +756,7 @@ FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass; - partmethod | partkeyisnull | colocationid | repmodel + partmethod | partkeyisnull | colocationid | repmodel --------------------------------------------------------------------- n | t | 10004 | t (1 row) @@ -767,7 +767,7 @@ FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass; - shardid | shardminvalueisnull | shardmaxvalueisnull + shardid | shardminvalueisnull | shardmaxvalueisnull --------------------------------------------------------------------- 1360014 | t | t (1 row) @@ -778,7 +778,7 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -792,7 +792,7 @@ WHERE shardid IN WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass) GROUP BY shardid ORDER BY shardid; - shardid | ?column? + shardid | ?column? --------------------------------------------------------------------- 1360014 | t (1 row) @@ -801,7 +801,7 @@ ORDER BY shardid; \c - - - :worker_2_port \dt upgrade_reference_table_transaction_commit_* List of relations - Schema | Name | Type | Owner + Schema | Name | Type | Owner --------------------------------------------------------------------- public | upgrade_reference_table_transaction_commit_1360014 | table | postgres (1 row) @@ -814,9 +814,9 @@ SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; CREATE TABLE upgrade_reference_table_mx(column1 int); SELECT create_distributed_table('upgrade_reference_table_mx', 'column1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- verify that streaming replicated tables cannot be upgraded to reference tables @@ -826,7 +826,7 @@ FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; - partmethod | partkeyisnull | colocationid | repmodel + partmethod | partkeyisnull | colocationid | repmodel --------------------------------------------------------------------- h | f | 1360005 | s (1 row) @@ -837,7 +837,7 @@ FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; - shardid | shardminvalueisnull | shardmaxvalueisnull + shardid | shardminvalueisnull | shardmaxvalueisnull --------------------------------------------------------------------- 1360015 | f | f (1 row) @@ -848,7 +848,7 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 1360005 | 1 | 1 | 23 | 0 (1 row) @@ -862,7 +862,7 @@ WHERE shardid IN WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass) GROUP BY shardid ORDER BY shardid; - shardid + shardid --------------------------------------------------------------------- 1360015 (1 row) @@ -877,7 +877,7 @@ FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; - partmethod | partkeyisnull | colocationid | repmodel + partmethod | partkeyisnull | colocationid | repmodel --------------------------------------------------------------------- h | f | 1360005 | s (1 row) @@ -888,7 +888,7 @@ FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; - shardid | shardminvalueisnull | shardmaxvalueisnull + shardid | shardminvalueisnull | shardmaxvalueisnull --------------------------------------------------------------------- 1360015 | f | f (1 row) @@ -899,7 +899,7 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 1360005 | 1 | 1 | 23 | 0 (1 row) @@ -913,7 +913,7 @@ WHERE shardid IN WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass) GROUP BY shardid ORDER BY shardid; - shardid | ?column? + shardid | ?column? --------------------------------------------------------------------- 1360015 | f (1 row) @@ -925,18 +925,18 @@ SET citus.shard_replication_factor TO 2; RESET citus.replication_model; CREATE TABLE upgrade_reference_table_mx(column1 int); SELECT create_distributed_table('upgrade_reference_table_mx', 'column1'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE nodeport = :worker_2_port AND shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid='upgrade_reference_table_mx'::regclass); SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node + start_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) -- situation before upgrade_reference_table @@ -946,7 +946,7 @@ FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; - partmethod | partkeyisnull | colocationid | repmodel + partmethod | partkeyisnull | colocationid | repmodel --------------------------------------------------------------------- h | f | 1360006 | c (1 row) @@ -957,7 +957,7 @@ FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; - shardid | shardminvalueisnull | shardmaxvalueisnull + shardid | shardminvalueisnull | shardmaxvalueisnull --------------------------------------------------------------------- 1360016 | f | f (1 row) @@ -968,7 +968,7 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 1360006 | 1 | 2 | 23 | 0 (1 row) @@ -982,16 +982,16 @@ WHERE shardid IN WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass) GROUP BY shardid ORDER BY shardid; - shardid + shardid --------------------------------------------------------------------- 1360016 (1 row) SET client_min_messages TO WARNING; SELECT upgrade_to_reference_table('upgrade_reference_table_mx'); - upgrade_to_reference_table + upgrade_to_reference_table --------------------------------------------------------------------- - + (1 row) -- situation after upgrade_reference_table @@ -1001,7 +1001,7 @@ FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; - partmethod | partkeyisnull | colocationid | repmodel + partmethod | partkeyisnull | colocationid | repmodel --------------------------------------------------------------------- n | t | 10004 | t (1 row) @@ -1012,7 +1012,7 @@ FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; - shardid | shardminvalueisnull | shardmaxvalueisnull + shardid | shardminvalueisnull | shardmaxvalueisnull --------------------------------------------------------------------- 1360016 | t | t (1 row) @@ -1023,7 +1023,7 @@ WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass); - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation + colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- 10004 | 1 | -1 | 0 | 0 (1 row) @@ -1037,7 +1037,7 @@ WHERE shardid IN WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass) GROUP BY shardid ORDER BY shardid; - shardid | ?column? + shardid | ?column? --------------------------------------------------------------------- 1360016 | t (1 row) @@ -1050,7 +1050,7 @@ FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; - partmethod | partkeyisnull | colocationid | repmodel + partmethod | partkeyisnull | colocationid | repmodel --------------------------------------------------------------------- n | t | 10004 | t (1 row) @@ -1061,7 +1061,7 @@ FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; - shardid | shardminvalueisnull | shardmaxvalueisnull + shardid | shardminvalueisnull | shardmaxvalueisnull --------------------------------------------------------------------- 1360016 | t | t (1 row) @@ -1075,7 +1075,7 @@ WHERE shardid IN WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass) GROUP BY shardid ORDER BY shardid; - shardid | ?column? + shardid | ?column? --------------------------------------------------------------------- 1360016 | t (1 row) @@ -1083,9 +1083,9 @@ ORDER BY shardid; \c - - - :master_port DROP TABLE upgrade_reference_table_mx; SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); - stop_metadata_sync_to_node + stop_metadata_sync_to_node --------------------------------------------------------------------- - + (1 row) RESET client_min_messages; diff --git a/src/test/regress/expected/multi_upsert.out b/src/test/regress/expected/multi_upsert.out index 3e52671e3..444484a10 100644 --- a/src/test/regress/expected/multi_upsert.out +++ b/src/test/regress/expected/multi_upsert.out @@ -8,9 +8,9 @@ CREATE TABLE upsert_test ); -- distribute the table and create shards SELECT create_distributed_table('upsert_test', 'part_key', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- do a regular insert @@ -26,10 +26,10 @@ INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_key) DO UPDATE SET other_col = 2, third_col = 4; -- see the results SELECT * FROM upsert_test ORDER BY part_key ASC; - part_key | other_col | third_col + part_key | other_col | third_col --------------------------------------------------------------------- 1 | 2 | 4 - 2 | 2 | + 2 | 2 | (2 rows) -- do a multi-row DO NOTHING insert @@ -41,11 +41,11 @@ ON CONFLICT (part_key) DO UPDATE SET other_col = EXCLUDED.other_col WHERE upsert_test.part_key != 1; -- see the results SELECT * FROM upsert_test ORDER BY part_key ASC; - part_key | other_col | third_col + part_key | other_col | third_col --------------------------------------------------------------------- 1 | 2 | 4 - 2 | 20 | - 3 | 30 | + 2 | 20 | + 3 | 30 | (3 rows) DELETE FROM upsert_test WHERE part_key = 2; @@ -55,7 +55,7 @@ INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_ke DO UPDATE SET other_col = 30 WHERE upsert_test.other_col = 3; -- see the results SELECT * FROM upsert_test; - part_key | other_col | third_col + part_key | other_col | third_col --------------------------------------------------------------------- 1 | 2 | 4 (1 row) @@ -65,7 +65,7 @@ INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_ke DO UPDATE SET other_col = 30 WHERE upsert_test.other_col = 2; -- see the results SELECT * FROM upsert_test; - part_key | other_col | third_col + part_key | other_col | third_col --------------------------------------------------------------------- 1 | 30 | 4 (1 row) @@ -78,7 +78,7 @@ INSERT INTO upsert_test (part_key, other_col, third_col) VALUES (1, 1, 100) ON C DO UPDATE SET other_col = EXCLUDED.third_col; -- see the results SELECT * FROM upsert_test; - part_key | other_col | third_col + part_key | other_col | third_col --------------------------------------------------------------------- 1 | 100 | 4 (1 row) @@ -88,7 +88,7 @@ INSERT INTO upsert_test as ups_test (part_key) VALUES (1) ON CONFLICT (part_key) DO UPDATE SET other_col = ups_test.other_col + 50, third_col = 200; -- see the results SELECT * FROM upsert_test; - part_key | other_col | third_col + part_key | other_col | third_col --------------------------------------------------------------------- 1 | 150 | 200 (1 row) @@ -99,7 +99,7 @@ INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_ke third_col = upsert_test.third_col + (EXCLUDED.part_key + EXCLUDED.other_col) + 670; -- see the results SELECT * FROM upsert_test; - part_key | other_col | third_col + part_key | other_col | third_col --------------------------------------------------------------------- 1 | 151 | 872 (1 row) @@ -110,7 +110,7 @@ INSERT INTO upsert_test as ups_test (part_key, other_col) VALUES (1, 1) ON CONFL WHERE ups_test.third_col < 1000 + ups_test.other_col; -- see the results SELECT * FROM upsert_test; - part_key | other_col | third_col + part_key | other_col | third_col --------------------------------------------------------------------- 1 | 5 | 872 (1 row) @@ -119,17 +119,17 @@ SELECT * FROM upsert_test; INSERT INTO upsert_test (part_key, other_col) VALUES (2, 2) ON CONFLICT (part_key) DO UPDATE SET other_col = 3 RETURNING *; - part_key | other_col | third_col + part_key | other_col | third_col --------------------------------------------------------------------- - 2 | 2 | + 2 | 2 | (1 row) INSERT INTO upsert_test (part_key, other_col) VALUES (2, 2) ON CONFLICT (part_key) DO UPDATE SET other_col = 3 RETURNING *; - part_key | other_col | third_col + part_key | other_col | third_col --------------------------------------------------------------------- - 2 | 3 | + 2 | 3 | (1 row) -- create another table @@ -142,9 +142,9 @@ CREATE TABLE upsert_test_2 ); -- distribute the table and create shards SELECT create_distributed_table('upsert_test_2', 'part_key', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- now show that Citus works with multiple columns as the PRIMARY KEY, including the partiton key @@ -163,9 +163,9 @@ CREATE TABLE upsert_test_3 CREATE INDEX idx_ups_test ON upsert_test_3(part_key); -- distribute the table and create shards SELECT create_distributed_table('upsert_test_3', 'part_key', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- since there are no unique indexes, error-out @@ -179,9 +179,9 @@ CREATE TABLE upsert_test_4 ); -- distribute the table and create shards SELECT create_distributed_table('upsert_test_4', 'part_key', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- a single row insert @@ -195,7 +195,7 @@ INSERT INTO upsert_test_4 VALUES (1, 0) ON CONFLICT(part_key) DO UPDATE SET coun INSERT INTO upsert_test_4 VALUES (1, 0) ON CONFLICT(part_key) DO UPDATE SET count = upsert_test_4.count + 1; -- now see the results SELECT * FROM upsert_test_4; - part_key | count + part_key | count --------------------------------------------------------------------- 1 | 6 (1 row) @@ -204,9 +204,9 @@ SELECT * FROM upsert_test_4; SET citus.shard_replication_factor TO 1; CREATE TABLE dropcol_distributed(key int primary key, drop1 int, keep1 text, drop2 numeric, keep2 float); SELECT create_distributed_table('dropcol_distributed', 'key', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO dropcol_distributed AS dropcol (key, keep1, keep2) VALUES (1, '5', 5) ON CONFLICT(key) diff --git a/src/test/regress/expected/multi_utilities.out b/src/test/regress/expected/multi_utilities.out index 699f9079a..aed692172 100644 --- a/src/test/regress/expected/multi_utilities.out +++ b/src/test/regress/expected/multi_utilities.out @@ -6,9 +6,9 @@ SET citus.shard_count TO 2; SET citus.shard_replication_factor TO 1; CREATE TABLE sharded_table ( name text, id bigint ); SELECT create_distributed_table('sharded_table', 'id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- COPY out is supported with distributed tables @@ -35,27 +35,27 @@ PREPARE sharded_update AS UPDATE sharded_table SET name = 'bob' WHERE id = 1; PREPARE sharded_delete AS DELETE FROM sharded_table WHERE id = 1; PREPARE sharded_query AS SELECT name FROM sharded_table WHERE id = 1; EXECUTE sharded_query; - name + name --------------------------------------------------------------------- (0 rows) EXECUTE sharded_insert; EXECUTE sharded_query; - name + name --------------------------------------------------------------------- adam (1 row) EXECUTE sharded_update; EXECUTE sharded_query; - name + name --------------------------------------------------------------------- bob (1 row) EXECUTE sharded_delete; EXECUTE sharded_query; - name + name --------------------------------------------------------------------- (0 rows) @@ -72,22 +72,22 @@ HINT: Use the DELETE command instead. -- lock shard metadata: take some share locks and exclusive locks BEGIN; SELECT lock_shard_metadata(5, ARRAY[999001, 999002, 999002]); - lock_shard_metadata + lock_shard_metadata --------------------------------------------------------------------- - + (1 row) SELECT lock_shard_metadata(7, ARRAY[999001, 999003, 999004]); - lock_shard_metadata + lock_shard_metadata --------------------------------------------------------------------- - + (1 row) SELECT locktype, objid, mode, granted FROM pg_locks WHERE objid IN (999001, 999002, 999003, 999004) ORDER BY objid, mode; - locktype | objid | mode | granted + locktype | objid | mode | granted --------------------------------------------------------------------- advisory | 999001 | ExclusiveLock | t advisory | 999001 | ShareLock | t @@ -102,9 +102,9 @@ SELECT lock_shard_metadata(0, ARRAY[990001, 999002]); ERROR: unsupported lockmode 0 -- lock shard metadata: invalid shard ID SELECT lock_shard_metadata(5, ARRAY[0]); - lock_shard_metadata + lock_shard_metadata --------------------------------------------------------------------- - + (1 row) -- lock shard metadata: lock nothing @@ -113,22 +113,22 @@ ERROR: no locks specified -- lock shard resources: take some share locks and exclusive locks BEGIN; SELECT lock_shard_resources(5, ARRAY[999001, 999002, 999002]); - lock_shard_resources + lock_shard_resources --------------------------------------------------------------------- - + (1 row) SELECT lock_shard_resources(7, ARRAY[999001, 999003, 999004]); - lock_shard_resources + lock_shard_resources --------------------------------------------------------------------- - + (1 row) SELECT locktype, objid, mode, granted FROM pg_locks WHERE objid IN (999001, 999002, 999003, 999004) ORDER BY objid, mode; - locktype | objid | mode | granted + locktype | objid | mode | granted --------------------------------------------------------------------- advisory | 999001 | ExclusiveLock | t advisory | 999001 | ShareLock | t @@ -143,9 +143,9 @@ SELECT lock_shard_resources(0, ARRAY[990001, 999002]); ERROR: unsupported lockmode 0 -- lock shard metadata: invalid shard ID SELECT lock_shard_resources(5, ARRAY[-1]); - lock_shard_resources + lock_shard_resources --------------------------------------------------------------------- - + (1 row) -- lock shard metadata: lock nothing @@ -159,24 +159,24 @@ SET citus.shard_count TO 1; SET citus.shard_replication_factor TO 2; CREATE TABLE dustbunnies (id integer, name text, age integer); SELECT create_distributed_table('dustbunnies', 'id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- add some data to the distributed table \copy dustbunnies (id, name) from stdin with csv CREATE TABLE second_dustbunnies(id integer, name text, age integer); SELECT master_create_distributed_table('second_dustbunnies', 'id', 'hash'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT master_create_worker_shards('second_dustbunnies', 1, 2); - master_create_worker_shards + master_create_worker_shards --------------------------------------------------------------------- - + (1 row) -- following approach adapted from PostgreSQL's stats.sql file @@ -266,20 +266,20 @@ ANALYZE dustbunnies; -- verify that the VACUUM and ANALYZE ran \c - - - :worker_1_port SELECT wait_for_stats(); - wait_for_stats + wait_for_stats --------------------------------------------------------------------- - + (1 row) REFRESH MATERIALIZED VIEW prevcounts; SELECT pg_stat_get_vacuum_count('dustbunnies_990002'::regclass); - pg_stat_get_vacuum_count + pg_stat_get_vacuum_count --------------------------------------------------------------------- 1 (1 row) SELECT pg_stat_get_analyze_count('dustbunnies_990002'::regclass); - pg_stat_get_analyze_count + pg_stat_get_analyze_count --------------------------------------------------------------------- 1 (1 row) @@ -295,26 +295,26 @@ VACUUM ANALYZE dustbunnies; \c - - - :worker_1_port SELECT relfilenode != :oldnode AS table_rewritten FROM pg_class WHERE oid='dustbunnies_990002'::regclass; - table_rewritten + table_rewritten --------------------------------------------------------------------- t (1 row) -- verify the VACUUM ANALYZE incremented both vacuum and analyze counts SELECT wait_for_stats(); - wait_for_stats + wait_for_stats --------------------------------------------------------------------- - + (1 row) SELECT pg_stat_get_vacuum_count('dustbunnies_990002'::regclass); - pg_stat_get_vacuum_count + pg_stat_get_vacuum_count --------------------------------------------------------------------- 2 (1 row) SELECT pg_stat_get_analyze_count('dustbunnies_990002'::regclass); - pg_stat_get_analyze_count + pg_stat_get_analyze_count --------------------------------------------------------------------- 2 (1 row) @@ -331,7 +331,7 @@ VACUUM (FREEZE) dustbunnies; \c - - - :worker_1_port SELECT relfrozenxid::text::integer > :frozenxid AS frozen_performed FROM pg_class WHERE oid='dustbunnies_990002'::regclass; - frozen_performed + frozen_performed --------------------------------------------------------------------- t (1 row) @@ -339,7 +339,7 @@ WHERE oid='dustbunnies_990002'::regclass; -- check there are no nulls in either column SELECT attname, null_frac FROM pg_stats WHERE tablename = 'dustbunnies_990002' ORDER BY attname; - attname | null_frac + attname | null_frac --------------------------------------------------------------------- age | 1 id | 0 @@ -354,7 +354,7 @@ ANALYZE dustbunnies (name); \c - - - :worker_1_port SELECT attname, null_frac FROM pg_stats WHERE tablename = 'dustbunnies_990002' ORDER BY attname; - attname | null_frac + attname | null_frac --------------------------------------------------------------------- age | 1 id | 0 @@ -370,21 +370,21 @@ HINT: Provide a specific table in order to VACUUM distributed tables. VACUUM dustbunnies, second_dustbunnies; -- check the current number of vacuum and analyze run on dustbunnies SELECT run_command_on_workers($$SELECT wait_for_stats()$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"") (localhost,57638,t,"") (2 rows) SELECT run_command_on_workers($$SELECT pg_stat_get_vacuum_count(tablename::regclass) from pg_tables where tablename LIKE 'dustbunnies_%' limit 1$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,4) (localhost,57638,t,4) (2 rows) SELECT run_command_on_workers($$SELECT pg_stat_get_analyze_count(tablename::regclass) from pg_tables where tablename LIKE 'dustbunnies_%' limit 1$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,3) (localhost,57638,t,3) @@ -401,21 +401,21 @@ HINT: Set citus.enable_ddl_propagation to true in order to send targeted ANALYZ SET citus.enable_ddl_propagation to DEFAULT; -- should not propagate the vacuum and analyze SELECT run_command_on_workers($$SELECT wait_for_stats()$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"") (localhost,57638,t,"") (2 rows) SELECT run_command_on_workers($$SELECT pg_stat_get_vacuum_count(tablename::regclass) from pg_tables where tablename LIKE 'dustbunnies_%' limit 1$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,4) (localhost,57638,t,4) (2 rows) SELECT run_command_on_workers($$SELECT pg_stat_get_analyze_count(tablename::regclass) from pg_tables where tablename LIKE 'dustbunnies_%' limit 1$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,3) (localhost,57638,t,3) @@ -423,13 +423,13 @@ SELECT run_command_on_workers($$SELECT pg_stat_get_analyze_count(tablename::regc -- test worker_hash SELECT worker_hash(123); - worker_hash + worker_hash --------------------------------------------------------------------- -205084363 (1 row) SELECT worker_hash('1997-08-08'::date); - worker_hash + worker_hash --------------------------------------------------------------------- -499701663 (1 row) @@ -439,7 +439,7 @@ SELECT worker_hash('(1, 2)'); ERROR: cannot find a hash function for the input type HINT: Cast input to a data type with a hash function. SELECT worker_hash('(1, 2)'::test_composite_type); - worker_hash + worker_hash --------------------------------------------------------------------- -1895345704 (1 row) @@ -448,7 +448,7 @@ SELECT citus_truncate_trigger(); ERROR: must be called as trigger -- confirm that citus_create_restore_point works SELECT 1 FROM citus_create_restore_point('regression-test'); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) diff --git a/src/test/regress/expected/multi_utility_statements.out b/src/test/regress/expected/multi_utility_statements.out index 2717c304f..1a4daa418 100644 --- a/src/test/regress/expected/multi_utility_statements.out +++ b/src/test/regress/expected/multi_utility_statements.out @@ -5,7 +5,7 @@ -- distributed tables. Currently we only support CREATE TABLE AS (SELECT..), -- DECLARE CURSOR, and COPY ... TO statements. SET citus.next_shard_id TO 1000000; -CREATE TEMP TABLE lineitem_pricing_summary AS +CREATE TEMP TABLE lineitem_pricing_summary AS ( SELECT l_returnflag, @@ -30,7 +30,7 @@ CREATE TEMP TABLE lineitem_pricing_summary AS l_linestatus ); SELECT * FROM lineitem_pricing_summary ORDER BY l_returnflag, l_linestatus; - l_returnflag | l_linestatus | sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order + l_returnflag | l_linestatus | sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order --------------------------------------------------------------------- A | F | 75465.00 | 113619873.63 | 107841287.0728 | 112171153.245923 | 25.6334918478260870 | 38593.707075407609 | 0.05055027173913043478 | 2944 N | F | 2022.00 | 3102551.45 | 2952540.7118 | 3072642.770652 | 26.6052631578947368 | 40823.045394736842 | 0.05263157894736842105 | 76 @@ -39,7 +39,7 @@ SELECT * FROM lineitem_pricing_summary ORDER BY l_returnflag, l_linestatus; (4 rows) -- Test we can handle joins -CREATE TABLE shipping_priority AS +CREATE TABLE shipping_priority AS ( SELECT l_orderkey, @@ -65,7 +65,7 @@ CREATE TABLE shipping_priority AS o_orderdate ); SELECT * FROM shipping_priority; - l_orderkey | revenue | o_orderdate | o_shippriority + l_orderkey | revenue | o_orderdate | o_shippriority --------------------------------------------------------------------- 1637 | 268170.6408 | 02-08-1995 | 0 9696 | 252014.5497 | 02-20-1995 | 0 @@ -138,7 +138,7 @@ COPY 25 COPY nation TO STDOUT; 0 ALGERIA 0 haggle. carefully final deposits detect slyly agai 1 ARGENTINA 1 al foxes promise slyly according to the regular accounts. bold requests alon -2 BRAZIL 1 y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special +2 BRAZIL 1 y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special 3 CANADA 1 eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold 4 EGYPT 4 y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 5 ETHIOPIA 0 ven packages wake quickly. regu @@ -146,7 +146,7 @@ COPY nation TO STDOUT; 7 GERMANY 3 l platelets. regular accounts x-ray: unusual, regular acco 8 INDIA 2 ss excuses cajole slyly across the packages. deposits print aroun 9 INDONESIA 2 slyly express asymptotes. regular deposits haggle slyly. carefully ironic hockey players sleep blithely. carefull -10 IRAN 4 efully alongside of the slyly final dependencies. +10 IRAN 4 efully alongside of the slyly final dependencies. 11 IRAQ 4 nic deposits boost atop the quickly final requests? quickly regula 12 JAPAN 2 ously. final, express gifts cajole a 13 JORDAN 4 ic deposits are blithely about the carefully regular pa @@ -157,75 +157,75 @@ COPY nation TO STDOUT; 18 CHINA 2 c dependencies. furiously express notornis sleep slyly regular accounts. ideas sleep. depos 19 ROMANIA 3 ular asymptotes are about the furious multipliers. express dependencies nag above the ironically ironic account 20 SAUDI ARABIA 4 ts. silent requests haggle. closely express packages sleep across the blithely -21 VIETNAM 2 hely enticingly express accounts. even, final +21 VIETNAM 2 hely enticingly express accounts. even, final 22 RUSSIA 3 requests against the platelets use never according to the quickly regular pint 23 UNITED KINGDOM 3 eans boost carefully special requests. accounts are. carefull 24 UNITED STATES 1 y final packages. slow foxes cajole quickly. quickly silent platelets breach ironic accounts. unusual pinto be -- ensure individual cols can be copied out, too COPY nation(n_name) TO STDOUT; -ALGERIA -ARGENTINA -BRAZIL -CANADA -EGYPT -ETHIOPIA -FRANCE -GERMANY -INDIA -INDONESIA -IRAN -IRAQ -JAPAN -JORDAN -KENYA -MOROCCO -MOZAMBIQUE -PERU -CHINA -ROMANIA -SAUDI ARABIA -VIETNAM -RUSSIA -UNITED KINGDOM -UNITED STATES +ALGERIA +ARGENTINA +BRAZIL +CANADA +EGYPT +ETHIOPIA +FRANCE +GERMANY +INDIA +INDONESIA +IRAN +IRAQ +JAPAN +JORDAN +KENYA +MOROCCO +MOZAMBIQUE +PERU +CHINA +ROMANIA +SAUDI ARABIA +VIETNAM +RUSSIA +UNITED KINGDOM +UNITED STATES -- Test that we can create on-commit drop tables, along with changing column names BEGIN; -CREATE TEMP TABLE customer_few (customer_key) ON COMMIT DROP AS +CREATE TEMP TABLE customer_few (customer_key) ON COMMIT DROP AS (SELECT * FROM customer WHERE c_nationkey = 1 ORDER BY c_custkey LIMIT 10); -SELECT customer_key, c_name, c_address +SELECT customer_key, c_name, c_address FROM customer_few ORDER BY customer_key LIMIT 5; - customer_key | c_name | c_address + customer_key | c_name | c_address --------------------------------------------------------------------- 3 | Customer#000000003 | MG9kdTD2WBHm - 14 | Customer#000000014 | KXkletMlL2JQEA + 14 | Customer#000000014 | KXkletMlL2JQEA 30 | Customer#000000030 | nJDsELGAavU63Jl0c5NKsKfL8rIJQQkQnYL2QJY 59 | Customer#000000059 | zLOCP0wh92OtBihgspOGl4 106 | Customer#000000106 | xGCOEAUjUNG (5 rows) COMMIT; -SELECT customer_key, c_name, c_address +SELECT customer_key, c_name, c_address FROM customer_few ORDER BY customer_key LIMIT 5; ERROR: relation "customer_few" does not exist -- Test DECLARE CURSOR .. WITH HOLD without parameters that calls ReScan on the top-level CustomScan CREATE TABLE cursor_me (x int, y int); SELECT create_distributed_table('cursor_me', 'x'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO cursor_me SELECT s/10, s FROM generate_series(1, 100) s; DECLARE holdCursor CURSOR WITH HOLD FOR SELECT * FROM cursor_me WHERE x = 1 ORDER BY y; FETCH NEXT FROM holdCursor; - x | y + x | y --------------------------------------------------------------------- 1 | 10 (1 row) FETCH FORWARD 3 FROM holdCursor; - x | y + x | y --------------------------------------------------------------------- 1 | 11 1 | 12 @@ -233,13 +233,13 @@ FETCH FORWARD 3 FROM holdCursor; (3 rows) FETCH LAST FROM holdCursor; - x | y + x | y --------------------------------------------------------------------- 1 | 19 (1 row) FETCH BACKWARD 3 FROM holdCursor; - x | y + x | y --------------------------------------------------------------------- 1 | 18 1 | 17 @@ -247,7 +247,7 @@ FETCH BACKWARD 3 FROM holdCursor; (3 rows) FETCH FORWARD 3 FROM holdCursor; - x | y + x | y --------------------------------------------------------------------- 1 | 17 1 | 18 @@ -263,7 +263,7 @@ $$ LANGUAGE SQL; SELECT declares_cursor(5); ERROR: Cursors for queries on distributed tables with parameters are currently unsupported CREATE OR REPLACE FUNCTION cursor_plpgsql(p int) -RETURNS SETOF int AS $$ +RETURNS SETOF int AS $$ DECLARE val int; my_cursor CURSOR (a INTEGER) FOR SELECT y FROM cursor_me WHERE x = $1 ORDER BY y; @@ -283,7 +283,7 @@ BEGIN END; $$ LANGUAGE plpgsql; SELECT cursor_plpgsql(4); - cursor_plpgsql + cursor_plpgsql --------------------------------------------------------------------- 40 41 @@ -303,16 +303,16 @@ DROP TABLE cursor_me; -- Test DECLARE CURSOR statement with SCROLL DECLARE holdCursor SCROLL CURSOR WITH HOLD FOR SELECT l_orderkey, l_linenumber, l_quantity, l_discount - FROM lineitem + FROM lineitem ORDER BY l_orderkey, l_linenumber; FETCH NEXT FROM holdCursor; - l_orderkey | l_linenumber | l_quantity | l_discount + l_orderkey | l_linenumber | l_quantity | l_discount --------------------------------------------------------------------- 1 | 1 | 17.00 | 0.04 (1 row) FETCH FORWARD 5 FROM holdCursor; - l_orderkey | l_linenumber | l_quantity | l_discount + l_orderkey | l_linenumber | l_quantity | l_discount --------------------------------------------------------------------- 1 | 2 | 36.00 | 0.09 1 | 3 | 8.00 | 0.10 @@ -322,13 +322,13 @@ FETCH FORWARD 5 FROM holdCursor; (5 rows) FETCH LAST FROM holdCursor; - l_orderkey | l_linenumber | l_quantity | l_discount + l_orderkey | l_linenumber | l_quantity | l_discount --------------------------------------------------------------------- 14947 | 2 | 29.00 | 0.04 (1 row) FETCH BACKWARD 5 FROM holdCursor; - l_orderkey | l_linenumber | l_quantity | l_discount + l_orderkey | l_linenumber | l_quantity | l_discount --------------------------------------------------------------------- 14947 | 1 | 14.00 | 0.09 14946 | 2 | 37.00 | 0.01 @@ -344,13 +344,13 @@ DECLARE noHoldCursor SCROLL CURSOR FOR FROM lineitem ORDER BY l_orderkey, l_linenumber; FETCH ABSOLUTE 5 FROM noHoldCursor; - l_orderkey | l_linenumber | l_quantity | l_discount + l_orderkey | l_linenumber | l_quantity | l_discount --------------------------------------------------------------------- 1 | 5 | 24.00 | 0.10 (1 row) FETCH BACKWARD noHoldCursor; - l_orderkey | l_linenumber | l_quantity | l_discount + l_orderkey | l_linenumber | l_quantity | l_discount --------------------------------------------------------------------- 1 | 4 | 28.00 | 0.09 (1 row) diff --git a/src/test/regress/expected/multi_view.out b/src/test/regress/expected/multi_view.out index 8919aaf83..ab5df27cb 100644 --- a/src/test/regress/expected/multi_view.out +++ b/src/test/regress/expected/multi_view.out @@ -6,13 +6,13 @@ -- router queries, single row inserts, multi row inserts via insert -- into select, multi row insert via copy commands. SELECT count(*) FROM lineitem_hash_part; - count + count --------------------------------------------------------------------- 12000 (1 row) SELECT count(*) FROM orders_hash_part; - count + count --------------------------------------------------------------------- 2985 (1 row) @@ -21,14 +21,14 @@ SELECT count(*) FROM orders_hash_part; CREATE VIEW priority_orders AS SELECT * FROM orders_hash_part WHERE o_orderpriority < '3-MEDIUM'; -- aggregate pushdown SELECT o_orderpriority, count(*) FROM priority_orders GROUP BY 1 ORDER BY 2, 1; - o_orderpriority | count + o_orderpriority | count --------------------------------------------------------------------- 2-HIGH | 593 1-URGENT | 604 (2 rows) SELECT o_orderpriority, count(*) FROM orders_hash_part WHERE o_orderpriority < '3-MEDIUM' GROUP BY 1 ORDER BY 2,1; - o_orderpriority | count + o_orderpriority | count --------------------------------------------------------------------- 2-HIGH | 593 1-URGENT | 604 @@ -36,7 +36,7 @@ SELECT o_orderpriority, count(*) FROM orders_hash_part WHERE o_orderpriority < -- filters SELECT o_orderpriority, count(*) as all, count(*) FILTER (WHERE o_orderstatus ='F') as fullfilled FROM priority_orders GROUP BY 1 ORDER BY 2, 1; - o_orderpriority | all | fullfilled + o_orderpriority | all | fullfilled --------------------------------------------------------------------- 2-HIGH | 593 | 271 1-URGENT | 604 | 280 @@ -44,7 +44,7 @@ SELECT o_orderpriority, count(*) as all, count(*) FILTER (WHERE o_orderstatus =' -- having SELECT o_orderdate, count(*) from priority_orders group by 1 having (count(*) > 3) order by 2 desc, 1 desc; - o_orderdate | count + o_orderdate | count --------------------------------------------------------------------- 08-20-1996 | 5 10-10-1994 | 4 @@ -55,7 +55,7 @@ SELECT o_orderdate, count(*) from priority_orders group by 1 having (count(*) > -- having with filters SELECT o_orderdate, count(*) as all, count(*) FILTER(WHERE o_orderstatus = 'F') from priority_orders group by 1 having (count(*) > 3) order by 2 desc, 1 desc; - o_orderdate | all | count + o_orderdate | all | count --------------------------------------------------------------------- 08-20-1996 | 5 | 0 10-10-1994 | 4 | 4 @@ -66,7 +66,7 @@ SELECT o_orderdate, count(*) as all, count(*) FILTER(WHERE o_orderstatus = 'F') -- limit SELECT o_orderkey, o_totalprice from orders_hash_part order by 2 desc, 1 asc limit 5 ; - o_orderkey | o_totalprice + o_orderkey | o_totalprice --------------------------------------------------------------------- 4421 | 401055.62 10209 | 400191.77 @@ -76,14 +76,14 @@ SELECT o_orderkey, o_totalprice from orders_hash_part order by 2 desc, 1 asc lim (5 rows) SELECT o_orderkey, o_totalprice from priority_orders order by 2 desc, 1 asc limit 1 ; - o_orderkey | o_totalprice + o_orderkey | o_totalprice --------------------------------------------------------------------- 14179 | 384265.43 (1 row) CREATE VIEW priority_lineitem AS SELECT li.* FROM lineitem_hash_part li JOIN priority_orders ON (l_orderkey = o_orderkey); SELECT l_orderkey, count(*) FROM priority_lineitem GROUP BY 1 ORDER BY 2 DESC, 1 LIMIT 5; - l_orderkey | count + l_orderkey | count --------------------------------------------------------------------- 7 | 7 225 | 7 @@ -95,28 +95,28 @@ SELECT l_orderkey, count(*) FROM priority_lineitem GROUP BY 1 ORDER BY 2 DESC, 1 CREATE VIEW air_shipped_lineitems AS SELECT * FROM lineitem_hash_part WHERE l_shipmode = 'AIR'; -- join between view and table SELECT count(*) FROM orders_hash_part join air_shipped_lineitems ON (o_orderkey = l_orderkey); - count + count --------------------------------------------------------------------- 1706 (1 row) -- join between views SELECT count(*) FROM priority_orders join air_shipped_lineitems ON (o_orderkey = l_orderkey); - count + count --------------------------------------------------------------------- 700 (1 row) -- count distinct on partition column is supported SELECT count(distinct o_orderkey) FROM priority_orders join air_shipped_lineitems ON (o_orderkey = l_orderkey); - count + count --------------------------------------------------------------------- 551 (1 row) -- count distinct on non-partition column is supported SELECT count(distinct o_orderpriority) FROM priority_orders join air_shipped_lineitems ON (o_orderkey = l_orderkey); - count + count --------------------------------------------------------------------- 2 (1 row) @@ -125,7 +125,7 @@ SELECT count(distinct o_orderpriority) FROM priority_orders join air_shipped_lin SELECT count(distinct o_orderkey) FROM priority_orders join air_shipped_lineitems ON (o_orderkey = l_orderkey) WHERE (o_orderkey = 231); - count + count --------------------------------------------------------------------- 1 (1 row) @@ -134,35 +134,35 @@ SELECT count(distinct o_orderkey) FROM priority_orders join air_shipped_lineitem SELECT distinct(o_orderkey) FROM priority_orders join air_shipped_lineitems ON (o_orderkey = l_orderkey) WHERE (o_orderkey = 231); - o_orderkey + o_orderkey --------------------------------------------------------------------- 231 (1 row) -- left join support depends on flattening of the query SELECT o_orderkey, l_orderkey FROM priority_orders left join air_shipped_lineitems ON (o_orderkey = l_orderkey) ORDER BY o_orderkey LIMIT 1; - o_orderkey | l_orderkey + o_orderkey | l_orderkey --------------------------------------------------------------------- - 2 | + 2 | (1 row) -- however, this works SELECT count(*) FROM priority_orders left join lineitem_hash_part ON (o_orderkey = l_orderkey) WHERE l_shipmode ='AIR'; - count + count --------------------------------------------------------------------- 700 (1 row) -- view on the inner side is supported SELECT count(*) FROM priority_orders right join lineitem_hash_part ON (o_orderkey = l_orderkey) WHERE l_shipmode ='AIR'; - count + count --------------------------------------------------------------------- 1706 (1 row) -- view on the outer side is supported SELECT count(*) FROM lineitem_hash_part right join priority_orders ON (o_orderkey = l_orderkey) WHERE l_shipmode ='AIR'; - count + count --------------------------------------------------------------------- 700 (1 row) @@ -170,9 +170,9 @@ SELECT count(*) FROM lineitem_hash_part right join priority_orders ON (o_orderke -- left join on router query is supported SELECT o_orderkey, l_linenumber FROM priority_orders left join air_shipped_lineitems ON (o_orderkey = l_orderkey) WHERE o_orderkey = 2; - o_orderkey | l_linenumber + o_orderkey | l_linenumber --------------------------------------------------------------------- - 2 | + 2 | (1 row) -- repartition query on view join @@ -182,9 +182,9 @@ SELECT * FROM priority_orders JOIN air_shipped_lineitems ON (o_custkey = l_suppk DEBUG: generating subplan 22_1 for subquery SELECT lineitem_hash_part.l_orderkey, lineitem_hash_part.l_partkey, lineitem_hash_part.l_suppkey, lineitem_hash_part.l_linenumber, lineitem_hash_part.l_quantity, lineitem_hash_part.l_extendedprice, lineitem_hash_part.l_discount, lineitem_hash_part.l_tax, lineitem_hash_part.l_returnflag, lineitem_hash_part.l_linestatus, lineitem_hash_part.l_shipdate, lineitem_hash_part.l_commitdate, lineitem_hash_part.l_receiptdate, lineitem_hash_part.l_shipinstruct, lineitem_hash_part.l_shipmode, lineitem_hash_part.l_comment FROM public.lineitem_hash_part WHERE (lineitem_hash_part.l_shipmode OPERATOR(pg_catalog.=) 'AIR'::bpchar) DEBUG: Plan 22 query after replacing subqueries and CTEs: SELECT priority_orders.o_orderkey, priority_orders.o_custkey, priority_orders.o_orderstatus, priority_orders.o_totalprice, priority_orders.o_orderdate, priority_orders.o_orderpriority, priority_orders.o_clerk, priority_orders.o_shippriority, priority_orders.o_comment, air_shipped_lineitems.l_orderkey, air_shipped_lineitems.l_partkey, air_shipped_lineitems.l_suppkey, air_shipped_lineitems.l_linenumber, air_shipped_lineitems.l_quantity, air_shipped_lineitems.l_extendedprice, air_shipped_lineitems.l_discount, air_shipped_lineitems.l_tax, air_shipped_lineitems.l_returnflag, air_shipped_lineitems.l_linestatus, air_shipped_lineitems.l_shipdate, air_shipped_lineitems.l_commitdate, air_shipped_lineitems.l_receiptdate, air_shipped_lineitems.l_shipinstruct, air_shipped_lineitems.l_shipmode, air_shipped_lineitems.l_comment FROM ((SELECT orders_hash_part.o_orderkey, orders_hash_part.o_custkey, orders_hash_part.o_orderstatus, orders_hash_part.o_totalprice, orders_hash_part.o_orderdate, orders_hash_part.o_orderpriority, orders_hash_part.o_clerk, orders_hash_part.o_shippriority, orders_hash_part.o_comment FROM public.orders_hash_part WHERE (orders_hash_part.o_orderpriority OPERATOR(pg_catalog.<) '3-MEDIUM'::bpchar)) priority_orders JOIN (SELECT intermediate_result.l_orderkey, intermediate_result.l_partkey, intermediate_result.l_suppkey, intermediate_result.l_linenumber, intermediate_result.l_quantity, intermediate_result.l_extendedprice, intermediate_result.l_discount, intermediate_result.l_tax, intermediate_result.l_returnflag, intermediate_result.l_linestatus, intermediate_result.l_shipdate, intermediate_result.l_commitdate, intermediate_result.l_receiptdate, intermediate_result.l_shipinstruct, intermediate_result.l_shipmode, intermediate_result.l_comment FROM read_intermediate_result('22_1'::text, 'binary'::citus_copy_format) intermediate_result(l_orderkey bigint, l_partkey integer, l_suppkey integer, l_linenumber integer, l_quantity numeric(15,2), l_extendedprice numeric(15,2), l_discount numeric(15,2), l_tax numeric(15,2), l_returnflag character(1), l_linestatus character(1), l_shipdate date, l_commitdate date, l_receiptdate date, l_shipinstruct character(25), l_shipmode character(10), l_comment character varying(44))) air_shipped_lineitems ON ((priority_orders.o_custkey OPERATOR(pg_catalog.=) air_shipped_lineitems.l_suppkey))) ORDER BY priority_orders.o_orderkey DESC, priority_orders.o_custkey DESC, priority_orders.o_orderpriority DESC LIMIT 5 DEBUG: push down of limit count: 5 - o_orderkey | o_custkey | o_orderstatus | o_totalprice | o_orderdate | o_orderpriority | o_clerk | o_shippriority | o_comment | l_orderkey | l_partkey | l_suppkey | l_linenumber | l_quantity | l_extendedprice | l_discount | l_tax | l_returnflag | l_linestatus | l_shipdate | l_commitdate | l_receiptdate | l_shipinstruct | l_shipmode | l_comment + o_orderkey | o_custkey | o_orderstatus | o_totalprice | o_orderdate | o_orderpriority | o_clerk | o_shippriority | o_comment | l_orderkey | l_partkey | l_suppkey | l_linenumber | l_quantity | l_extendedprice | l_discount | l_tax | l_returnflag | l_linestatus | l_shipdate | l_commitdate | l_receiptdate | l_shipinstruct | l_shipmode | l_comment --------------------------------------------------------------------- - 14821 | 1435 | O | 322002.95 | 06-12-1998 | 2-HIGH | Clerk#000000630 | 0 | n packages are furiously ironic ideas. d | 1607 | 118923 | 1435 | 2 | 37.00 | 71851.04 | 0.05 | 0.02 | N | O | 02-27-1996 | 02-18-1996 | 03-16-1996 | NONE | AIR | alongside + 14821 | 1435 | O | 322002.95 | 06-12-1998 | 2-HIGH | Clerk#000000630 | 0 | n packages are furiously ironic ideas. d | 1607 | 118923 | 1435 | 2 | 37.00 | 71851.04 | 0.05 | 0.02 | N | O | 02-27-1996 | 02-18-1996 | 03-16-1996 | NONE | AIR | alongside 14790 | 613 | O | 270163.54 | 08-21-1996 | 2-HIGH | Clerk#000000347 | 0 | p. regular deposits wake. final n | 2629 | 123076 | 613 | 2 | 31.00 | 34071.17 | 0.08 | 0.03 | N | O | 05-24-1998 | 05-26-1998 | 06-10-1998 | COLLECT COD | AIR | ate blithely bold, regular deposits. bold 14758 | 1225 | F | 37812.49 | 10-27-1993 | 2-HIGH | Clerk#000000687 | 0 | ages nag about the furio | 9156 | 176190 | 1225 | 2 | 22.00 | 27856.18 | 0.03 | 0.00 | R | F | 02-08-1994 | 04-01-1994 | 02-24-1994 | DELIVER IN PERSON | AIR | equests dete 14725 | 569 | O | 261801.45 | 06-17-1995 | 2-HIGH | Clerk#000000177 | 0 | ng asymptotes. final, ironic accounts cajole after | 14688 | 173017 | 569 | 3 | 10.00 | 10900.10 | 0.02 | 0.08 | N | O | 03-14-1997 | 04-22-1997 | 04-05-1997 | COLLECT COD | AIR | riously even packages sleep a @@ -193,7 +193,7 @@ DEBUG: push down of limit count: 5 RESET client_min_messages; SELECT count(*) FROM priority_orders JOIN air_shipped_lineitems ON (o_custkey = l_suppkey); - count + count --------------------------------------------------------------------- 192 (1 row) @@ -211,7 +211,7 @@ SELECT l_suppkey, count(*) FROM (SELECT l_suppkey, l_shipdate, count(*) FROM lineitem_hash_part WHERE l_shipmode = 'AIR' GROUP BY l_suppkey, l_shipdate) supps GROUP BY l_suppkey ORDER BY 2 DESC, 1 LIMIT 5; - l_suppkey | count + l_suppkey | count --------------------------------------------------------------------- 7680 | 4 160 | 3 @@ -231,7 +231,7 @@ DETAIL: Subqueries without group by clause are not supported yet -- repartition query on view with single table subquery CREATE VIEW supp_count_view AS SELECT * FROM (SELECT l_suppkey, count(*) FROM lineitem_hash_part GROUP BY 1) s1; SELECT * FROM supp_count_view ORDER BY 2 DESC, 1 LIMIT 10; - l_suppkey | count + l_suppkey | count --------------------------------------------------------------------- 6104 | 8 1868 | 6 @@ -251,7 +251,7 @@ CREATE VIEW lineitems_by_shipping_method AS SELECT l_shipmode, count(*) as cnt FROM lineitem_hash_part GROUP BY 1; -- following will be supported via recursive planning SELECT * FROM lineitems_by_shipping_method ORDER BY 1,2 LIMIT 5; - l_shipmode | cnt + l_shipmode | cnt --------------------------------------------------------------------- AIR | 1706 FOB | 1709 @@ -269,7 +269,7 @@ CREATE VIEW lineitems_by_orderkey AS GROUP BY 1; -- this should work since we're able to push down this query SELECT * FROM lineitems_by_orderkey ORDER BY 2 DESC, 1 ASC LIMIT 10; - l_orderkey | count + l_orderkey | count --------------------------------------------------------------------- 7 | 7 68 | 7 @@ -285,7 +285,7 @@ SELECT * FROM lineitems_by_orderkey ORDER BY 2 DESC, 1 ASC LIMIT 10; -- it would also work since it is made router plannable SELECT * FROM lineitems_by_orderkey WHERE l_orderkey = 100; - l_orderkey | count + l_orderkey | count --------------------------------------------------------------------- 100 | 5 (1 row) @@ -303,7 +303,7 @@ CREATE VIEW recent_users AS GROUP BY user_id HAVING max(time) > '2017-11-23 16:20:33.264457'::timestamp order by 2 DESC; SELECT * FROM recent_users ORDER BY 2 DESC, 1 DESC; - user_id | lastseen + user_id | lastseen --------------------------------------------------------------------- 1 | Thu Nov 23 17:30:34.635085 2017 3 | Thu Nov 23 17:18:51.048758 2017 @@ -315,14 +315,14 @@ CREATE VIEW recent_events AS SELECT user_id, time FROM events_table WHERE time > '2017-11-23 16:20:33.264457'::timestamp; SELECT count(*) FROM recent_events; - count + count --------------------------------------------------------------------- 6 (1 row) -- count number of events of recent_users SELECT count(*) FROM recent_users ru JOIN events_table et ON (ru.user_id = et.user_id); - count + count --------------------------------------------------------------------- 50 (1 row) @@ -334,7 +334,7 @@ SELECT ru.user_id, count(*) ON (ru.user_id = et.user_id) GROUP BY ru.user_id ORDER BY 2 DESC, 1; - user_id | count + user_id | count --------------------------------------------------------------------- 3 | 21 1 | 15 @@ -348,7 +348,7 @@ SELECT ru.user_id, count(*) ON (ru.user_id = et.user_id) GROUP BY ru.user_id ORDER BY 2 DESC, 1; - user_id | count + user_id | count --------------------------------------------------------------------- 3 | 21 1 | 15 @@ -364,7 +364,7 @@ SELECT * FROM GROUP BY ru.user_id ORDER BY 2 DESC, 1) s1 ORDER BY 2 DESC, 1; - user_id | count + user_id | count --------------------------------------------------------------------- 3 | 21 1 | 15 @@ -381,7 +381,7 @@ SELECT * FROM GROUP BY ru.user_id ORDER BY 2 DESC, 1) s1 ORDER BY 2 DESC, 1; - user_id | count + user_id | count --------------------------------------------------------------------- 1 | 24 3 | 23 @@ -391,7 +391,7 @@ ORDER BY 2 DESC, 1; -- join between views -- recent users who has an event in recent events SELECT ru.user_id FROM recent_users ru JOIN recent_events re USING(user_id) GROUP BY ru.user_id ORDER BY ru.user_id; - user_id + user_id --------------------------------------------------------------------- 1 3 @@ -403,7 +403,7 @@ SELECT count(*) FROM ( SELECT re.*, ru.user_id AS recent_user FROM recent_events re LEFT JOIN recent_users ru USING(user_id)) reu WHERE recent_user IS NULL; - count + count --------------------------------------------------------------------- 2 (1 row) @@ -412,7 +412,7 @@ SELECT count(*) FROM ( SELECT count(*) FROM recent_events re LEFT JOIN recent_users ru ON(ru.user_id = re.user_id) WHERE ru.user_id IS NULL; - count + count --------------------------------------------------------------------- 2 (1 row) @@ -420,29 +420,29 @@ SELECT count(*) -- join between view and table -- users who has recent activity and they have an entry with value_1 is less than 3 SELECT ut.* FROM recent_users ru JOIN users_table ut USING (user_id) WHERE ut.value_1 < 3 ORDER BY 1,2; - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | - 3 | Wed Nov 22 18:43:51.450263 2017 | 1 | 1 | 4 | - 3 | Wed Nov 22 20:43:31.008625 2017 | 1 | 3 | 2 | - 3 | Thu Nov 23 00:15:45.610845 2017 | 1 | 1 | 4 | - 3 | Thu Nov 23 03:23:24.702501 2017 | 1 | 2 | 5 | - 3 | Thu Nov 23 06:20:05.854857 2017 | 1 | 4 | 2 | - 3 | Thu Nov 23 09:57:41.540228 2017 | 2 | 2 | 3 | - 3 | Thu Nov 23 11:18:53.114408 2017 | 2 | 2 | 0 | - 3 | Thu Nov 23 12:56:49.29191 2017 | 0 | 5 | 1 | - 3 | Thu Nov 23 17:18:51.048758 2017 | 1 | 5 | 5 | - 5 | Wed Nov 22 20:43:18.667473 2017 | 0 | 3 | 2 | - 5 | Wed Nov 22 21:02:07.575129 2017 | 2 | 0 | 2 | - 5 | Wed Nov 22 22:10:24.315371 2017 | 1 | 2 | 1 | - 5 | Thu Nov 23 00:54:44.192608 2017 | 1 | 3 | 2 | - 5 | Thu Nov 23 07:47:09.542999 2017 | 1 | 4 | 3 | - 5 | Thu Nov 23 09:05:08.53142 2017 | 2 | 2 | 2 | - 5 | Thu Nov 23 09:17:47.706703 2017 | 2 | 5 | 3 | - 5 | Thu Nov 23 10:15:31.764558 2017 | 2 | 2 | 2 | - 5 | Thu Nov 23 14:29:02.557934 2017 | 2 | 1 | 2 | - 5 | Thu Nov 23 15:55:08.493462 2017 | 0 | 3 | 3 | - 5 | Thu Nov 23 16:28:38.455322 2017 | 2 | 5 | 4 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | + 3 | Wed Nov 22 18:43:51.450263 2017 | 1 | 1 | 4 | + 3 | Wed Nov 22 20:43:31.008625 2017 | 1 | 3 | 2 | + 3 | Thu Nov 23 00:15:45.610845 2017 | 1 | 1 | 4 | + 3 | Thu Nov 23 03:23:24.702501 2017 | 1 | 2 | 5 | + 3 | Thu Nov 23 06:20:05.854857 2017 | 1 | 4 | 2 | + 3 | Thu Nov 23 09:57:41.540228 2017 | 2 | 2 | 3 | + 3 | Thu Nov 23 11:18:53.114408 2017 | 2 | 2 | 0 | + 3 | Thu Nov 23 12:56:49.29191 2017 | 0 | 5 | 1 | + 3 | Thu Nov 23 17:18:51.048758 2017 | 1 | 5 | 5 | + 5 | Wed Nov 22 20:43:18.667473 2017 | 0 | 3 | 2 | + 5 | Wed Nov 22 21:02:07.575129 2017 | 2 | 0 | 2 | + 5 | Wed Nov 22 22:10:24.315371 2017 | 1 | 2 | 1 | + 5 | Thu Nov 23 00:54:44.192608 2017 | 1 | 3 | 2 | + 5 | Thu Nov 23 07:47:09.542999 2017 | 1 | 4 | 3 | + 5 | Thu Nov 23 09:05:08.53142 2017 | 2 | 2 | 2 | + 5 | Thu Nov 23 09:17:47.706703 2017 | 2 | 5 | 3 | + 5 | Thu Nov 23 10:15:31.764558 2017 | 2 | 2 | 2 | + 5 | Thu Nov 23 14:29:02.557934 2017 | 2 | 1 | 2 | + 5 | Thu Nov 23 15:55:08.493462 2017 | 0 | 3 | 3 | + 5 | Thu Nov 23 16:28:38.455322 2017 | 2 | 5 | 4 | (21 rows) -- determine if a recent user has done a given event type or not @@ -451,7 +451,7 @@ SELECT ru.user_id, CASE WHEN et.user_id IS NULL THEN 'NO' ELSE 'YES' END as done LEFT JOIN events_table et ON(ru.user_id = et.user_id AND et.event_type = 6) ORDER BY 2 DESC, 1; - user_id | done_event + user_id | done_event --------------------------------------------------------------------- 1 | YES 3 | NO @@ -466,7 +466,7 @@ SELECT * FROM ON(ru.user_id = et.user_id AND et.event_type = 6) ) s1 ORDER BY 2 DESC, 1; - user_id | done_event + user_id | done_event --------------------------------------------------------------------- 1 | YES 3 | NO @@ -488,7 +488,7 @@ DETAIL: Complex subqueries and CTEs cannot be in the outer part of the outer jo CREATE VIEW selected_users AS SELECT * FROM users_table WHERE value_1 >= 1 and value_1 <3; CREATE VIEW recent_selected_users AS SELECT su.* FROM selected_users su JOIN recent_users ru USING(user_id); SELECT user_id FROM recent_selected_users GROUP BY 1 ORDER BY 1; - user_id + user_id --------------------------------------------------------------------- 1 3 @@ -497,7 +497,7 @@ SELECT user_id FROM recent_selected_users GROUP BY 1 ORDER BY 1; -- this would be supported when we implement where partition_key in (subquery) support SELECT et.user_id, et.time FROM events_table et WHERE et.user_id IN (SELECT user_id FROM recent_selected_users) GROUP BY 1,2 ORDER BY 1 DESC,2 DESC LIMIT 5; - user_id | time + user_id | time --------------------------------------------------------------------- 5 | Thu Nov 23 16:11:02.929469 2017 5 | Thu Nov 23 14:40:40.467511 2017 @@ -508,7 +508,7 @@ SELECT et.user_id, et.time FROM events_table et WHERE et.user_id IN (SELECT user -- it is supported when it is a router query SELECT count(*) FROM events_table et WHERE et.user_id IN (SELECT user_id FROM recent_selected_users WHERE user_id = 1); - count + count --------------------------------------------------------------------- 15 (1 row) @@ -518,7 +518,7 @@ SELECT count(*) FROM events_table et WHERE et.user_id IN (SELECT user_id FROM re UNION (SELECT user_id FROM selected_users) ORDER BY 1; - user_id + user_id --------------------------------------------------------------------- 1 2 @@ -536,7 +536,7 @@ SELECT * (SELECT user_id FROM selected_users) ) u WHERE user_id < 2 AND user_id > 0 ORDER BY user_id; - user_id + user_id --------------------------------------------------------------------- 1 (1 row) @@ -549,7 +549,7 @@ SELECT * (SELECT user_id FROM selected_users) ) u WHERE user_id < 2 AND user_id > 0 ORDER BY user_id; - user_id + user_id --------------------------------------------------------------------- 1 1 @@ -561,7 +561,7 @@ SELECT count(*) UNION (SELECT user_id FROM selected_users) ) u WHERE user_id < 2 AND user_id > 0; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -573,7 +573,7 @@ SELECT count(*) UNION ALL (SELECT user_id FROM selected_users) ) u WHERE user_id < 2 AND user_id > 0; - count + count --------------------------------------------------------------------- 2 (1 row) @@ -588,7 +588,7 @@ SELECT count(*) UNION (SELECT user_id FROM (SELECT * FROM users_table WHERE value_1 >= 1 and value_1 < 3) bb) ) u WHERE user_id < 2 AND user_id > 0; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -602,7 +602,7 @@ SELECT count(*) UNION ALL (SELECT user_id FROM (SELECT * FROM users_table WHERE value_1 >= 1 and value_1 < 3) bb) ) u WHERE user_id < 2 AND user_id > 0; - count + count --------------------------------------------------------------------- 2 (1 row) @@ -611,7 +611,7 @@ SELECT count(*) -- distinct is supported if it is on a partition key CREATE VIEW distinct_user_with_value_1_3 AS SELECT DISTINCT user_id FROM users_table WHERE value_1 = 3; SELECT * FROM distinct_user_with_value_1_3 ORDER BY user_id; - user_id + user_id --------------------------------------------------------------------- 1 2 @@ -625,7 +625,7 @@ SELECT * FROM distinct_user_with_value_1_3 ORDER BY user_id; -- but will be supported via recursive planning CREATE VIEW distinct_value_1 AS SELECT DISTINCT value_1 FROM users_table WHERE value_2 = 3; SELECT * FROM distinct_value_1 ORDER BY 1 DESC LIMIT 5; - value_1 + value_1 --------------------------------------------------------------------- 5 4 @@ -638,52 +638,52 @@ SELECT * FROM distinct_value_1 ORDER BY 1 DESC LIMIT 5; CREATE VIEW cte_view_1 AS WITH c1 AS (SELECT * FROM users_table WHERE value_1 = 3) SELECT * FROM c1 WHERE value_2 < 4 AND EXISTS (SELECT * FROM c1); SELECT * FROM cte_view_1 ORDER BY 1,2,3,4,5 LIMIT 5; - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | - 2 | Thu Nov 23 13:52:54.83829 2017 | 3 | 1 | 4 | - 3 | Wed Nov 22 23:24:32.080584 2017 | 3 | 2 | 5 | - 4 | Wed Nov 22 23:59:46.493416 2017 | 3 | 1 | 3 | - 4 | Thu Nov 23 01:55:21.824618 2017 | 3 | 1 | 4 | + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | + 2 | Thu Nov 23 13:52:54.83829 2017 | 3 | 1 | 4 | + 3 | Wed Nov 22 23:24:32.080584 2017 | 3 | 2 | 5 | + 4 | Wed Nov 22 23:59:46.493416 2017 | 3 | 1 | 3 | + 4 | Thu Nov 23 01:55:21.824618 2017 | 3 | 1 | 4 | (5 rows) -- this is single shard query and still not supported since it has view + cte -- router planner can't detect it SELECT * FROM cte_view_1 WHERE user_id = 2 ORDER BY 1,2,3,4,5; - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 2 | Thu Nov 23 13:52:54.83829 2017 | 3 | 1 | 4 | + 2 | Thu Nov 23 13:52:54.83829 2017 | 3 | 1 | 4 | (1 row) -- if CTE itself prunes down to a single shard than the view is supported (router plannable) CREATE VIEW cte_view_2 AS WITH c1 AS (SELECT * FROM users_table WHERE user_id = 2) SELECT * FROM c1 WHERE value_1 = 3; SELECT * FROM cte_view_2; - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | - 2 | Thu Nov 23 13:52:54.83829 2017 | 3 | 1 | 4 | - 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | - 2 | Thu Nov 23 11:41:04.042936 2017 | 3 | 4 | 1 | + 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | + 2 | Thu Nov 23 13:52:54.83829 2017 | 3 | 1 | 4 | + 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | + 2 | Thu Nov 23 11:41:04.042936 2017 | 3 | 4 | 1 | (4 rows) CREATE VIEW router_view AS SELECT * FROM users_table WHERE user_id = 2; -- router plannable SELECT user_id FROM router_view GROUP BY 1; - user_id + user_id --------------------------------------------------------------------- 2 (1 row) -- join a router view SELECT * FROM (SELECT user_id FROM router_view GROUP BY 1) rv JOIN recent_events USING (user_id) ORDER BY 2 LIMIT 3; - user_id | time + user_id | time --------------------------------------------------------------------- 2 | Thu Nov 23 17:26:14.563216 2017 (1 row) SELECT * FROM (SELECT user_id FROM router_view GROUP BY 1) rv JOIN (SELECT * FROM recent_events) re USING (user_id) ORDER BY 2 LIMIT 3; - user_id | time + user_id | time --------------------------------------------------------------------- 2 | Thu Nov 23 17:26:14.563216 2017 (1 row) @@ -696,7 +696,7 @@ CREATE VIEW recent_10_users AS LIMIT 10; -- this is not supported since it has limit in it and subquery_pushdown is not set SELECT * FROM recent_10_users; - user_id | lastseen + user_id | lastseen --------------------------------------------------------------------- 1 | Thu Nov 23 17:30:34.635085 2017 3 | Thu Nov 23 17:18:51.048758 2017 @@ -718,7 +718,7 @@ ERROR: cannot push down this subquery DETAIL: Limit in subquery without limit in the outermost query is unsupported -- now both are supported when there is a limit on the outer most query SELECT * FROM recent_10_users ORDER BY lastseen DESC LIMIT 10; - user_id | lastseen + user_id | lastseen --------------------------------------------------------------------- 1 | Thu Nov 23 17:30:34.635085 2017 3 | Thu Nov 23 17:18:51.048758 2017 @@ -729,25 +729,25 @@ SELECT * FROM recent_10_users ORDER BY lastseen DESC LIMIT 10; (6 rows) SELECT et.* FROM recent_10_users JOIN events_table et USING(user_id) ORDER BY et.time DESC LIMIT 10; - user_id | time | event_type | value_2 | value_3 | value_4 + user_id | time | event_type | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 1 | Thu Nov 23 21:54:46.924477 2017 | 6 | 4 | 5 | - 4 | Thu Nov 23 18:10:21.338399 2017 | 1 | 2 | 4 | - 3 | Thu Nov 23 18:08:26.550729 2017 | 2 | 4 | 3 | - 2 | Thu Nov 23 17:26:14.563216 2017 | 1 | 5 | 3 | - 3 | Thu Nov 23 16:44:41.903713 2017 | 4 | 2 | 2 | - 3 | Thu Nov 23 16:31:56.219594 2017 | 5 | 1 | 2 | - 4 | Thu Nov 23 16:20:33.264457 2017 | 0 | 0 | 3 | - 5 | Thu Nov 23 16:11:02.929469 2017 | 4 | 2 | 0 | - 2 | Thu Nov 23 15:58:49.273421 2017 | 5 | 1 | 2 | - 5 | Thu Nov 23 14:40:40.467511 2017 | 1 | 4 | 1 | + 1 | Thu Nov 23 21:54:46.924477 2017 | 6 | 4 | 5 | + 4 | Thu Nov 23 18:10:21.338399 2017 | 1 | 2 | 4 | + 3 | Thu Nov 23 18:08:26.550729 2017 | 2 | 4 | 3 | + 2 | Thu Nov 23 17:26:14.563216 2017 | 1 | 5 | 3 | + 3 | Thu Nov 23 16:44:41.903713 2017 | 4 | 2 | 2 | + 3 | Thu Nov 23 16:31:56.219594 2017 | 5 | 1 | 2 | + 4 | Thu Nov 23 16:20:33.264457 2017 | 0 | 0 | 3 | + 5 | Thu Nov 23 16:11:02.929469 2017 | 4 | 2 | 0 | + 2 | Thu Nov 23 15:58:49.273421 2017 | 5 | 1 | 2 | + 5 | Thu Nov 23 14:40:40.467511 2017 | 1 | 4 | 1 | (10 rows) RESET citus.subquery_pushdown; VACUUM ANALYZE users_table; -- explain tests EXPLAIN (COSTS FALSE) SELECT user_id FROM recent_selected_users GROUP BY 1 ORDER BY 1; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Sort Sort Key: remote_scan.user_id @@ -779,7 +779,7 @@ EXPLAIN (COSTS FALSE) SELECT * (SELECT user_id FROM selected_users) ) u WHERE user_id < 4 AND user_id > 1 ORDER BY user_id; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Sort Sort Key: remote_scan.user_id @@ -807,7 +807,7 @@ EXPLAIN (COSTS FALSE) SELECT * (23 rows) EXPLAIN (COSTS FALSE) SELECT et.* FROM recent_10_users JOIN events_table et USING(user_id) ORDER BY et.time DESC LIMIT 10; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit -> Sort @@ -844,7 +844,7 @@ EXPLAIN (COSTS FALSE) SELECT et.* FROM recent_10_users JOIN events_table et USIN SET citus.subquery_pushdown to ON; EXPLAIN (COSTS FALSE) SELECT et.* FROM recent_10_users JOIN events_table et USING(user_id) ORDER BY et.time DESC LIMIT 10; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit -> Sort @@ -887,15 +887,15 @@ CREATE TABLE large (id int, tenant_id int); -- constraint id to be unique for "insert into on conflict" test CREATE TABLE small (id int, tenant_id int, unique(tenant_id)); SELECT create_distributed_table('large','tenant_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('small','tenant_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE VIEW small_view AS SELECT * from small where id < 100; @@ -911,7 +911,7 @@ ERROR: cannot modify views over distributed tables -- using views in modify statements' FROM / WHERE clauses is still valid UPDATE large SET id=20 FROM small_view WHERE small_view.id=large.id; SELECT * FROM large order by 1, 2; - id | tenant_id + id | tenant_id --------------------------------------------------------------------- 2 | 3 5 | 4 @@ -925,7 +925,7 @@ INSERT INTO small VALUES(14, 14); -- using views in subqueries within modify statements is still valid UPDATE large SET id=23 FROM (SELECT *, id*2 from small_view ORDER BY 1,2 LIMIT 5) as small_view WHERE small_view.id=large.id; SELECT * FROM large order by 1, 2; - id | tenant_id + id | tenant_id --------------------------------------------------------------------- 2 | 3 5 | 4 @@ -939,7 +939,7 @@ INSERT INTO large VALUES(14, 14); -- using views in modify statements' FROM / WHERE clauses is still valid UPDATE large SET id=27 FROM small_view WHERE small_view.tenant_id=large.tenant_id; SELECT * FROM large ORDER BY 1, 2; - id | tenant_id + id | tenant_id --------------------------------------------------------------------- 27 | 2 27 | 3 @@ -954,7 +954,7 @@ INSERT INTO large VALUES(14, 14); -- test on a router executable update statement UPDATE large SET id=28 FROM small_view WHERE small_view.id=large.id and small_view.tenant_id=14 and large.tenant_id=14; SELECT * FROM large ORDER BY 1, 2; - id | tenant_id + id | tenant_id --------------------------------------------------------------------- 27 | 2 27 | 3 @@ -972,14 +972,14 @@ INSERT INTO small VALUES(99, 99); -- run these tests with RETURNING clause to observe the functionality -- print the columns from the "view" as well to test "rewrite resjunk" behaviour UPDATE large SET id=36 FROM small_view WHERE small_view.id=large.id RETURNING large.id, large.tenant_id, small_view.tenant_id; - id | tenant_id | tenant_id + id | tenant_id | tenant_id --------------------------------------------------------------------- 36 | 14 | 14 36 | 78 | 99 (2 rows) SELECT * FROM large ORDER BY 1, 2; - id | tenant_id + id | tenant_id --------------------------------------------------------------------- 27 | 2 27 | 3 @@ -994,7 +994,7 @@ SELECT * FROM large ORDER BY 1, 2; -- below statement should not update anything. so it should return empty UPDATE large SET id=46 FROM small_view WHERE small_view.id=large.id and large.id=15 RETURNING large.id, large.tenant_id; - id | tenant_id + id | tenant_id --------------------------------------------------------------------- (0 rows) @@ -1003,7 +1003,7 @@ INSERT INTO large VALUES(14, 14); -- delete statement on large DELETE FROM large WHERE id in (SELECT id FROM small_view); SELECT * FROM large ORDER BY 1, 2; - id | tenant_id + id | tenant_id --------------------------------------------------------------------- 27 | 2 27 | 3 @@ -1022,7 +1022,7 @@ INSERT INTO large VALUES(14, 14); WITH all_small_view_ids AS (SELECT id FROM small_view) DELETE FROM large WHERE id in (SELECT * FROM all_small_view_ids); SELECT * FROM large ORDER BY 1, 2; - id | tenant_id + id | tenant_id --------------------------------------------------------------------- 27 | 2 27 | 3 @@ -1048,15 +1048,15 @@ CREATE TABLE large_partitioned_p1 PARTITION OF large_partitioned FOR VALUES FROM CREATE TABLE large_partitioned_p2 PARTITION OF large_partitioned FOR VALUES FROM (10) TO (20); CREATE TABLE large_partitioned_p3 PARTITION OF large_partitioned FOR VALUES FROM (20) TO (100); SELECT create_distributed_table('large_partitioned','tenant_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('small','tenant_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE VIEW small_view AS SELECT * from small where id < 100; @@ -1069,7 +1069,7 @@ DELETE FROM small_view; ERROR: cannot modify views over distributed tables UPDATE large_partitioned SET id=27 FROM small_view WHERE small_view.tenant_id=large_partitioned.tenant_id; SELECT * FROM large_partitioned ORDER BY 1, 2; - id | tenant_id + id | tenant_id --------------------------------------------------------------------- 6 | 5 26 | 32 @@ -1086,7 +1086,7 @@ INSERT INTO large_partitioned VALUES(14, 14); -- test on a router executable update statement UPDATE large_partitioned SET id=28 FROM small_view WHERE small_view.id=large_partitioned.id and small_view.tenant_id=14 and large_partitioned.tenant_id=14; SELECT * FROM large_partitioned ORDER BY 1, 2; - id | tenant_id + id | tenant_id --------------------------------------------------------------------- 6 | 5 26 | 32 @@ -1103,7 +1103,7 @@ INSERT INTO large_partitioned VALUES(14, 14); -- delete statement on large DELETE FROM large_partitioned WHERE tenant_id in (SELECT tenant_id FROM small_view); SELECT * FROM large_partitioned ORDER BY 1, 2; - id | tenant_id + id | tenant_id --------------------------------------------------------------------- 6 | 5 26 | 32 @@ -1117,7 +1117,7 @@ INSERT INTO large_partitioned VALUES(14, 14); WITH all_small_view_tenant_ids AS (SELECT tenant_id FROM small_view) DELETE FROM large_partitioned WHERE tenant_id in (SELECT * FROM all_small_view_tenant_ids); SELECT * FROM large_partitioned ORDER BY 1, 2; - id | tenant_id + id | tenant_id --------------------------------------------------------------------- 6 | 5 26 | 32 @@ -1134,15 +1134,15 @@ CREATE TABLE large (id int, tenant_id int); -- constraint id to be unique for "insert into on conflict" test CREATE TABLE small (id int, tenant_id int, unique(tenant_id)); SELECT create_distributed_table('large','tenant_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('small','tenant_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE VIEW small_view AS SELECT id, tenant_id FROM (SELECT *, id*2 FROM small WHERE id < 100 ORDER BY 1,2 LIMIT 5) as foo; @@ -1151,7 +1151,7 @@ CREATE VIEW small_view AS SELECT id, tenant_id FROM (SELECT *, id*2 FROM small W -- using views in modify statements' FROM / WHERE clauses is still valid UPDATE large SET id=20 FROM small_view WHERE small_view.id=large.id; SELECT * FROM large order by 1, 2; - id | tenant_id + id | tenant_id --------------------------------------------------------------------- 2 | 3 5 | 4 @@ -1165,7 +1165,7 @@ INSERT INTO small VALUES(14, 14); -- using views in subqueries within modify statements is still valid UPDATE large SET id=23 FROM (SELECT *, id*2 from small_view ORDER BY 1,2 LIMIT 5) as small_view WHERE small_view.id=large.id; SELECT * FROM large order by 1, 2; - id | tenant_id + id | tenant_id --------------------------------------------------------------------- 2 | 3 5 | 4 @@ -1179,7 +1179,7 @@ INSERT INTO large VALUES(14, 14); -- using views in modify statements' FROM / WHERE clauses is still valid UPDATE large SET id=27 FROM small_view WHERE small_view.tenant_id=large.tenant_id; SELECT * FROM large ORDER BY 1, 2; - id | tenant_id + id | tenant_id --------------------------------------------------------------------- 27 | 2 27 | 3 @@ -1194,7 +1194,7 @@ INSERT INTO large VALUES(14, 14); -- test on a router executable update statement UPDATE large SET id=28 FROM small_view WHERE small_view.id=large.id and small_view.tenant_id=14 and large.tenant_id=14; SELECT * FROM large ORDER BY 1, 2; - id | tenant_id + id | tenant_id --------------------------------------------------------------------- 27 | 2 27 | 3 @@ -1212,13 +1212,13 @@ INSERT INTO small VALUES(99, 99); -- run these tests with RETURNING clause to observe the functionality -- print the columns from the "view" as well to test "rewrite resjunk" behaviour UPDATE large SET id=36 FROM small_view WHERE small_view.id=large.id RETURNING large.id, large.tenant_id, small_view.tenant_id; - id | tenant_id | tenant_id + id | tenant_id | tenant_id --------------------------------------------------------------------- 36 | 14 | 14 (1 row) SELECT * FROM large ORDER BY 1, 2; - id | tenant_id + id | tenant_id --------------------------------------------------------------------- 27 | 2 27 | 3 @@ -1233,7 +1233,7 @@ SELECT * FROM large ORDER BY 1, 2; -- below statement should not update anything. so it should return empty UPDATE large SET id=46 FROM small_view WHERE small_view.id=large.id and large.id=15 RETURNING large.id, large.tenant_id; - id | tenant_id + id | tenant_id --------------------------------------------------------------------- (0 rows) @@ -1242,7 +1242,7 @@ INSERT INTO large VALUES(14, 14); -- delete statement on large DELETE FROM large WHERE id in (SELECT id FROM small_view); SELECT * FROM large ORDER BY 1, 2; - id | tenant_id + id | tenant_id --------------------------------------------------------------------- 27 | 2 27 | 3 @@ -1261,7 +1261,7 @@ INSERT INTO large VALUES(14, 14); WITH all_small_view_ids AS (SELECT id FROM small_view) DELETE FROM large WHERE id in (SELECT * FROM all_small_view_ids); SELECT * FROM large ORDER BY 1, 2; - id | tenant_id + id | tenant_id --------------------------------------------------------------------- 27 | 2 27 | 3 diff --git a/src/test/regress/expected/multi_working_columns.out b/src/test/regress/expected/multi_working_columns.out index 01ee93857..70a44b4ef 100644 --- a/src/test/regress/expected/multi_working_columns.out +++ b/src/test/regress/expected/multi_working_columns.out @@ -6,7 +6,7 @@ -- these columns are pulled to the master, and are correctly used in sorting and -- grouping. SELECT l_quantity FROM lineitem ORDER BY l_shipdate, l_quantity LIMIT 20; - l_quantity + l_quantity --------------------------------------------------------------------- 38.00 13.00 @@ -33,7 +33,7 @@ SELECT l_quantity FROM lineitem ORDER BY l_shipdate, l_quantity LIMIT 20; SELECT l_quantity, count(*) as count FROM lineitem GROUP BY l_quantity, l_shipdate ORDER BY l_quantity, count LIMIT 20; - l_quantity | count + l_quantity | count --------------------------------------------------------------------- 1.00 | 1 1.00 | 1 @@ -60,7 +60,7 @@ SELECT l_quantity, count(*) as count FROM lineitem SELECT l_quantity, l_shipdate, count(*) as count FROM lineitem GROUP BY l_quantity, l_shipdate ORDER BY l_quantity, count, l_shipdate LIMIT 20; - l_quantity | l_shipdate | count + l_quantity | l_shipdate | count --------------------------------------------------------------------- 1.00 | 02-07-1992 | 1 1.00 | 02-23-1992 | 1 diff --git a/src/test/regress/expected/mx_foreign_key_to_reference_table.out b/src/test/regress/expected/mx_foreign_key_to_reference_table.out index 7abc9a1ad..d99500a13 100644 --- a/src/test/regress/expected/mx_foreign_key_to_reference_table.out +++ b/src/test/regress/expected/mx_foreign_key_to_reference_table.out @@ -7,12 +7,12 @@ SET citus.next_placement_id TO 7000000; SET citus.replication_model TO streaming; -- Setup the view so that we can check if the foreign keys are created properly CREATE TYPE foreign_details AS (name text, relid text, refd_relid text); -CREATE VIEW table_fkeys_in_workers AS +CREATE VIEW table_fkeys_in_workers AS SELECT -(json_populate_record(NULL::foreign_details, - json_array_elements_text((run_command_on_workers( $$ +(json_populate_record(NULL::foreign_details, + json_array_elements_text((run_command_on_workers( $$ SELECT - COALESCE(json_agg(row_to_json(d)), '[]'::json) + COALESCE(json_agg(row_to_json(d)), '[]'::json) FROM ( SELECT @@ -20,7 +20,7 @@ SELECT relid::regclass::text, refd_relid::regclass::text FROM - table_fkey_cols + table_fkey_cols ) d $$ )).RESULT::json )::json )).* ; -- Check if MX can create foreign keys properly on foreign keys from distributed to reference tables @@ -30,26 +30,26 @@ CREATE TABLE referencing_table(id int, ref_id int); ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (id) REFERENCES referenced_table(test_column) ON DELETE CASCADE; ALTER TABLE referencing_table ADD CONSTRAINT foreign_key_2 FOREIGN KEY (id) REFERENCES referenced_table2(test_column2) ON DELETE CASCADE; SELECT create_reference_table('referenced_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_reference_table('referenced_table2'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('referencing_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SET search_path TO 'fkey_reference_table'; SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1, 2; - name | relid | refd_relid + name | relid | refd_relid --------------------------------------------------------------------- fkey_ref | fkey_reference_table.referencing_table | fkey_reference_table.referenced_table fkey_ref | fkey_reference_table.referencing_table | fkey_reference_table.referenced_table diff --git a/src/test/regress/expected/non_colocated_join_order.out b/src/test/regress/expected/non_colocated_join_order.out index 2c96af0c8..e8e95f8be 100644 --- a/src/test/regress/expected/non_colocated_join_order.out +++ b/src/test/regress/expected/non_colocated_join_order.out @@ -4,18 +4,18 @@ -- Tests to check placements of shards must be equal to choose local join logic. CREATE TABLE test_table_1(id int, value_1 int); SELECT master_create_distributed_table('test_table_1', 'id', 'append'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) \copy test_table_1 FROM STDIN DELIMITER ',' \copy test_table_1 FROM STDIN DELIMITER ',' CREATE TABLE test_table_2(id int, value_1 int); SELECT master_create_distributed_table('test_table_2', 'id', 'append'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) \copy test_table_2 FROM STDIN DELIMITER ',' @@ -26,7 +26,7 @@ SET client_min_messages to DEBUG1; -- local join logic will be triggered. SELECT count(*) FROM test_table_1, test_table_2 WHERE test_table_1.id = test_table_2.id; LOG: join order: [ "test_table_1" ][ local partition join "test_table_2" ] - count + count --------------------------------------------------------------------- 6 (1 row) @@ -42,7 +42,7 @@ SET citus.shard_replication_factor to 1; SET citus.enable_repartition_joins to ON; SELECT count(*) FROM test_table_1, test_table_2 WHERE test_table_1.id = test_table_2.id; LOG: join order: [ "test_table_1" ][ single range partition join "test_table_2" ] - count + count --------------------------------------------------------------------- 9 (1 row) diff --git a/src/test/regress/expected/non_colocated_leaf_subquery_joins.out b/src/test/regress/expected/non_colocated_leaf_subquery_joins.out index 4373ea227..0fd005174 100644 --- a/src/test/regress/expected/non_colocated_leaf_subquery_joins.out +++ b/src/test/regress/expected/non_colocated_leaf_subquery_joins.out @@ -23,7 +23,7 @@ BEGIN END; $BODY$ LANGUAGE plpgsql; SHOW log_error_verbosity; - log_error_verbosity + log_error_verbosity --------------------------------------------------------------------- terse (1 row) @@ -38,7 +38,7 @@ WHERE foo.user_id = bar.user_id;$$); DEBUG: generating subplan 1_1 for subquery SELECT users_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: Plan 1 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('1_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) - valid + valid --------------------------------------------------------------------- t (1 row) @@ -54,7 +54,7 @@ WHERE DEBUG: generating subplan 3_1 for subquery SELECT users_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: generating subplan 3_2 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('3_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) - valid + valid --------------------------------------------------------------------- t (1 row) @@ -65,34 +65,34 @@ SELECT true AS valid FROM explain_json($$SELECT FROM users_table WHERE - value_1 + value_1 IN - (SELECT - users_table.user_id - FROM - users_table, events_table - WHERE + (SELECT + users_table.user_id + FROM + users_table, events_table + WHERE users_table.user_id = events_table.value_2 AND event_type IN (5,6));$$); DEBUG: generating subplan 6_1 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6]))) DEBUG: Plan 6 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('6_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) - valid + valid --------------------------------------------------------------------- t (1 row) -- should work fine when used with CTEs SELECT true AS valid FROM explain_json($$ - WITH q1 AS (SELECT user_id FROM users_table) -SELECT count(*) FROM q1, (SELECT - users_table.user_id, random() - FROM - users_table, events_table - WHERE + WITH q1 AS (SELECT user_id FROM users_table) +SELECT count(*) FROM q1, (SELECT + users_table.user_id, random() + FROM + users_table, events_table + WHERE users_table.user_id = events_table.value_2 AND event_type IN (1,2,3,4)) as bar WHERE bar.user_id = q1.user_id ;$$); DEBUG: generating subplan 8_1 for CTE q1: SELECT user_id FROM public.users_table DEBUG: generating subplan 8_2 for subquery SELECT users_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: Plan 8 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('8_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) q1, (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('8_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) bar WHERE (bar.user_id OPERATOR(pg_catalog.=) q1.user_id) - valid + valid --------------------------------------------------------------------- t (1 row) @@ -104,7 +104,7 @@ SELECT true AS valid FROM explain_json($$ DEBUG: generating subplan 11_1 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: generating subplan 11_2 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: Plan 11 query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('11_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION SELECT intermediate_result.user_id FROM read_intermediate_result('11_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) - valid + valid --------------------------------------------------------------------- t (1 row) @@ -115,19 +115,19 @@ SELECT event, array_length(events_table, 1) FROM ( SELECT event, array_agg(t.user_id) AS events_table FROM ( - SELECT + SELECT DISTINCT ON(e.event_type::text) e.event_type::text as event, e.time, e.user_id - FROM + FROM users_table AS u, events_table AS e, (SELECT users_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.value_2 AND event_type IN (5,6,7,8)) as bar - WHERE u.user_id = e.user_id AND - u.user_id IN + WHERE u.user_id = e.user_id AND + u.user_id IN ( - SELECT - user_id - FROM - users_table + SELECT + user_id + FROM + users_table WHERE value_2 >= 5 AND EXISTS (SELECT users_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.value_2 AND event_type IN (1,2,3,4)) LIMIT 5 @@ -144,7 +144,7 @@ DEBUG: generating subplan 14_3 for subquery SELECT users_table.user_id FROM pub DEBUG: generating subplan 14_4 for subquery SELECT DISTINCT ON ((e.event_type)::text) (e.event_type)::text AS event, e."time", e.user_id FROM public.users_table u, public.events_table e, (SELECT intermediate_result.user_id FROM read_intermediate_result('14_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE ((u.user_id OPERATOR(pg_catalog.=) e.user_id) AND (u.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('14_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)))) DEBUG: generating subplan 14_5 for subquery SELECT t.event, array_agg(t.user_id) AS events_table FROM (SELECT intermediate_result.event, intermediate_result."time", intermediate_result.user_id FROM read_intermediate_result('14_4'::text, 'binary'::citus_copy_format) intermediate_result(event text, "time" timestamp without time zone, user_id integer)) t, public.users_table WHERE (users_table.value_1 OPERATOR(pg_catalog.=) (t.event)::integer) GROUP BY t.event DEBUG: Plan 14 query after replacing subqueries and CTEs: SELECT event, array_length(events_table, 1) AS array_length FROM (SELECT intermediate_result.event, intermediate_result.events_table FROM read_intermediate_result('14_5'::text, 'binary'::citus_copy_format) intermediate_result(event text, events_table integer[])) q ORDER BY (array_length(events_table, 1)) DESC, event - valid + valid --------------------------------------------------------------------- t (1 row) @@ -160,7 +160,7 @@ WHERE foo.user_id = bar.value_1;$$); DEBUG: generating subplan 20_1 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: Plan 20 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT users_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('20_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.value_1) - valid + valid --------------------------------------------------------------------- t (1 row) diff --git a/src/test/regress/expected/non_colocated_subquery_joins.out b/src/test/regress/expected/non_colocated_subquery_joins.out index 3382f9013..f0f270678 100644 --- a/src/test/regress/expected/non_colocated_subquery_joins.out +++ b/src/test/regress/expected/non_colocated_subquery_joins.out @@ -43,7 +43,7 @@ SELECT true AS valid FROM explain_json_2($$ $$); DEBUG: generating subplan 3_1 for subquery SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT foo.value_2 FROM (SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT intermediate_result.value_2 FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.value_2) - valid + valid --------------------------------------------------------------------- t (1 row) @@ -63,7 +63,7 @@ SELECT true AS valid FROM explain_json_2($$ $$); DEBUG: generating subplan 5_1 for subquery SELECT event_type FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 100) DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.events_table WHERE (event_type OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.event_type FROM read_intermediate_result('5_1'::text, 'binary'::citus_copy_format) intermediate_result(event_type integer))) - valid + valid --------------------------------------------------------------------- t (1 row) @@ -82,7 +82,7 @@ SELECT true AS valid FROM explain_json_2($$ $$); DEBUG: generating subplan 7_1 for subquery SELECT user_id FROM public.events_table WHERE (event_type OPERATOR(pg_catalog.=) 2) DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.events_table WHERE (NOT (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('7_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)))) - valid + valid --------------------------------------------------------------------- t (1 row) @@ -103,7 +103,7 @@ SELECT true AS valid FROM explain_json_2($$ $$); DEBUG: generating subplan 9_1 for subquery SELECT event_type FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 3) DEBUG: Plan 9 query after replacing subqueries and CTEs: SELECT foo.user_id FROM (SELECT users_table.user_id, events_table.event_type FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE ((foo.user_id OPERATOR(pg_catalog.=) bar.user_id) AND (foo.event_type OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.event_type FROM read_intermediate_result('9_1'::text, 'binary'::citus_copy_format) intermediate_result(event_type integer)))) - valid + valid --------------------------------------------------------------------- t (1 row) @@ -123,7 +123,7 @@ SELECT true AS valid FROM explain_json_2($$ $$); DEBUG: generating subplan 11_1 for subquery SELECT (users_table.user_id OPERATOR(pg_catalog./) 2) AS user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: Plan 11 query after replacing subqueries and CTEs: SELECT foo.user_id FROM (SELECT users_table.user_id, events_table.event_type FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('11_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE ((foo.user_id OPERATOR(pg_catalog.=) bar.user_id) AND (foo.user_id OPERATOR(pg_catalog.=) ANY (SELECT events_table.user_id FROM public.events_table WHERE (events_table.user_id OPERATOR(pg_catalog.<) 10)))) - valid + valid --------------------------------------------------------------------- t (1 row) @@ -144,7 +144,7 @@ $$); DEBUG: generating subplan 13_1 for subquery SELECT (users_table.user_id OPERATOR(pg_catalog./) 2) AS user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: generating subplan 13_2 for subquery SELECT user_id FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 10) DEBUG: Plan 13 query after replacing subqueries and CTEs: SELECT foo.user_id FROM (SELECT users_table.user_id, events_table.event_type FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('13_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE ((foo.user_id OPERATOR(pg_catalog.=) bar.user_id) AND (NOT (foo.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('13_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))))) - valid + valid --------------------------------------------------------------------- t (1 row) @@ -165,7 +165,7 @@ $$); DEBUG: generating subplan 16_1 for subquery SELECT users_table.user_id, events_table.event_type FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: generating subplan 16_2 for subquery SELECT event_type FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 4) DEBUG: Plan 16 query after replacing subqueries and CTEs: SELECT foo.user_id FROM (SELECT intermediate_result.user_id, intermediate_result.event_type FROM read_intermediate_result('16_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, event_type integer)) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE ((foo.user_id OPERATOR(pg_catalog.=) bar.user_id) AND (foo.event_type OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.event_type FROM read_intermediate_result('16_2'::text, 'binary'::citus_copy_format) intermediate_result(event_type integer)))) - valid + valid --------------------------------------------------------------------- t (1 row) @@ -191,7 +191,7 @@ DEBUG: generating subplan 19_2 for subquery SELECT users_table.user_id FROM pub DEBUG: generating subplan 19_3 for subquery SELECT event_type FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.=) 5) DEBUG: generating subplan 19_4 for subquery SELECT foo.user_id, random() AS random FROM (SELECT intermediate_result.user_id, intermediate_result.event_type FROM read_intermediate_result('19_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, event_type integer)) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('19_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE ((foo.user_id OPERATOR(pg_catalog.=) bar.user_id) AND (foo.event_type OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.event_type FROM read_intermediate_result('19_3'::text, 'binary'::citus_copy_format) intermediate_result(event_type integer)))) DEBUG: Plan 19 query after replacing subqueries and CTEs: SELECT foo_top.user_id, foo_top.random, events_table.user_id FROM (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('19_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) foo_top, public.events_table WHERE (events_table.user_id OPERATOR(pg_catalog.=) foo_top.user_id) - valid + valid --------------------------------------------------------------------- t (1 row) @@ -222,7 +222,7 @@ SELECT true AS valid FROM explain_json_2($$ $$); DEBUG: generating subplan 24_1 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[17, 18, 19, 20]))) DEBUG: Plan 24 query after replacing subqueries and CTEs: SELECT user_id, random FROM (SELECT foo1.user_id, random() AS random FROM (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo1, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) foo2, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12])))) foo3, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[13, 14, 15, 16])))) foo4, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('24_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) foo5 WHERE ((foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo2.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo3.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo5.value_1))) foo_top - valid + valid --------------------------------------------------------------------- t (1 row) @@ -253,7 +253,7 @@ SELECT true AS valid FROM explain_json_2($$ $$); DEBUG: generating subplan 26_1 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[17, 18, 19, 20]))) DEBUG: Plan 26 query after replacing subqueries and CTEs: SELECT user_id, random FROM (SELECT foo1.user_id, random() AS random FROM (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo1, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) foo2, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12])))) foo3, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[13, 14, 15, 16])))) foo4, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('26_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) foo5 WHERE ((foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo2.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo3.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo5.user_id))) foo_top - valid + valid --------------------------------------------------------------------- t (1 row) @@ -283,7 +283,7 @@ $$); DEBUG: generating subplan 28_1 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: generating subplan 28_2 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[17, 18, 19, 20]))) DEBUG: Plan 28 query after replacing subqueries and CTEs: SELECT user_id, random FROM (SELECT foo1.user_id, random() AS random FROM (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo1, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('28_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) foo2, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12])))) foo3, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[13, 14, 15, 16])))) foo4, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('28_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) foo5 WHERE ((foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo2.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo3.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo5.value_1))) foo_top - valid + valid --------------------------------------------------------------------- t (1 row) @@ -314,7 +314,7 @@ $$); DEBUG: generating subplan 31_1 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: generating subplan 31_2 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[17, 18, 19, 20]))) DEBUG: Plan 31 query after replacing subqueries and CTEs: SELECT user_id, random FROM (SELECT foo1.user_id, random() AS random FROM (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo1, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('31_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) foo2, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12])))) foo3, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[13, 14, 15, 16])))) foo4, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('31_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) foo5 WHERE ((foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo2.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo3.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo2.user_id OPERATOR(pg_catalog.=) foo5.value_1))) foo_top - valid + valid --------------------------------------------------------------------- t (1 row) @@ -347,7 +347,7 @@ $$); DEBUG: generating subplan 34_1 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: generating subplan 34_2 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: Plan 34 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT foo.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('34_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id)) foo_top JOIN (SELECT foo.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('34_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id)) bar_top ON ((foo_top.user_id OPERATOR(pg_catalog.=) bar_top.user_id))) - valid + valid --------------------------------------------------------------------- t (1 row) @@ -380,7 +380,7 @@ SELECT true AS valid FROM explain_json_2($$ $$); DEBUG: generating subplan 37_1 for subquery SELECT foo.user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12])))) foo, (SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[13, 14, 15, 16])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) DEBUG: Plan 37 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT foo.user_id, foo.value_2 FROM (SELECT DISTINCT users_table.user_id, users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id)) foo_top JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('37_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar_top ON ((foo_top.value_2 OPERATOR(pg_catalog.=) bar_top.user_id))) - valid + valid --------------------------------------------------------------------- t (1 row) @@ -412,7 +412,7 @@ $$); DEBUG: generating subplan 39_1 for subquery SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[13, 14, 15, 16]))) DEBUG: generating subplan 39_2 for subquery SELECT foo.user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12])))) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('39_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) DEBUG: Plan 39 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT foo.user_id, foo.value_2 FROM (SELECT DISTINCT users_table.user_id, users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id)) foo_top JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('39_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar_top ON ((foo_top.value_2 OPERATOR(pg_catalog.=) bar_top.user_id))) - valid + valid --------------------------------------------------------------------- t (1 row) @@ -432,7 +432,7 @@ SELECT true AS valid FROM explain_json_2($$ $$); DEBUG: generating subplan 42_1 for subquery SELECT events_table.user_id AS my_users FROM public.events_table, public.users_table WHERE (events_table.event_type OPERATOR(pg_catalog.=) users_table.user_id) DEBUG: Plan 42 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT mid_level_query.user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table, (SELECT intermediate_result.my_users FROM read_intermediate_result('42_1'::text, 'binary'::citus_copy_format) intermediate_result(my_users integer)) foo WHERE (foo.my_users OPERATOR(pg_catalog.=) users_table.user_id)) mid_level_query) bar - valid + valid --------------------------------------------------------------------- t (1 row) @@ -451,7 +451,7 @@ SELECT true AS valid FROM explain_json_2($$ $$); DEBUG: generating subplan 44_1 for subquery SELECT events_table.event_type AS my_users, random() AS random FROM public.events_table, public.users_table WHERE (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id) DEBUG: Plan 44 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT mid_level_query.user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table, (SELECT intermediate_result.my_users, intermediate_result.random FROM read_intermediate_result('44_1'::text, 'binary'::citus_copy_format) intermediate_result(my_users integer, random double precision)) foo WHERE (foo.my_users OPERATOR(pg_catalog.=) users_table.user_id)) mid_level_query) bar - valid + valid --------------------------------------------------------------------- t (1 row) @@ -474,7 +474,7 @@ DEBUG: Plan 44 query after replacing subqueries and CTEs: SELECT count(*) AS co $$); DEBUG: generating subplan 46_1 for subquery SELECT events_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) DEBUG: Plan 46 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT mid_level_query.user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table, (SELECT events_table.user_id AS my_users FROM public.events_table, (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('46_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) selected_users WHERE (events_table.event_type OPERATOR(pg_catalog.=) selected_users.user_id)) foo WHERE (foo.my_users OPERATOR(pg_catalog.=) users_table.user_id)) mid_level_query) bar - valid + valid --------------------------------------------------------------------- t (1 row) @@ -507,7 +507,7 @@ SELECT true AS valid FROM explain_json_2($$ $$); DEBUG: generating subplan 48_1 for subquery SELECT value_2 FROM public.events_table DEBUG: Plan 48 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT mid_level_query.user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table, (SELECT events_table.user_id AS my_users FROM public.events_table, (SELECT events_table_1.user_id FROM public.users_table users_table_1, public.events_table events_table_1 WHERE ((users_table_1.user_id OPERATOR(pg_catalog.=) events_table_1.user_id) AND (users_table_1.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('48_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))))) selected_users WHERE (events_table.user_id OPERATOR(pg_catalog.=) selected_users.user_id)) foo WHERE (foo.my_users OPERATOR(pg_catalog.=) users_table.user_id)) mid_level_query) bar - valid + valid --------------------------------------------------------------------- t (1 row) @@ -528,7 +528,7 @@ WHERE users_table.user_id = events_table.value_2 AND event_type IN (5,6));$$); DEBUG: generating subplan 50_1 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6]))) DEBUG: Plan 50 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('50_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) - valid + valid --------------------------------------------------------------------- t (1 row) @@ -545,7 +545,7 @@ SELECT count(*) FROM q1, (SELECT DEBUG: generating subplan 52_1 for CTE q1: SELECT user_id FROM public.users_table DEBUG: generating subplan 52_2 for subquery SELECT users_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: Plan 52 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('52_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) q1, (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('52_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) bar WHERE (bar.user_id OPERATOR(pg_catalog.=) q1.user_id) - valid + valid --------------------------------------------------------------------- t (1 row) @@ -561,7 +561,7 @@ SELECT true AS valid FROM explain_json_2($$ users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4)) as bar WHERE bar.user_id = q1.user_id ;$$); DEBUG: generating subplan 55_1 for CTE q1: SELECT user_id FROM public.users_table DEBUG: Plan 55 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('55_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) q1, (SELECT users_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) bar WHERE (bar.user_id OPERATOR(pg_catalog.=) q1.user_id) - valid + valid --------------------------------------------------------------------- t (1 row) @@ -573,7 +573,7 @@ SELECT true AS valid FROM explain_json_2($$ DEBUG: generating subplan 57_1 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: generating subplan 57_2 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: Plan 57 query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('57_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION SELECT intermediate_result.user_id FROM read_intermediate_result('57_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) - valid + valid --------------------------------------------------------------------- t (1 row) @@ -613,7 +613,7 @@ DEBUG: generating subplan 60_3 for subquery SELECT users_table.user_id FROM pub DEBUG: generating subplan 60_4 for subquery SELECT DISTINCT ON ((e.event_type)::text) (e.event_type)::text AS event, e."time", e.user_id FROM public.users_table u, public.events_table e, (SELECT intermediate_result.user_id FROM read_intermediate_result('60_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE ((u.user_id OPERATOR(pg_catalog.=) e.user_id) AND (u.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('60_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)))) DEBUG: generating subplan 60_5 for subquery SELECT t.event, array_agg(t.user_id) AS events_table FROM (SELECT intermediate_result.event, intermediate_result."time", intermediate_result.user_id FROM read_intermediate_result('60_4'::text, 'binary'::citus_copy_format) intermediate_result(event text, "time" timestamp without time zone, user_id integer)) t, public.users_table WHERE (users_table.value_1 OPERATOR(pg_catalog.=) (t.event)::integer) GROUP BY t.event DEBUG: Plan 60 query after replacing subqueries and CTEs: SELECT event, array_length(events_table, 1) AS array_length FROM (SELECT intermediate_result.event, intermediate_result.events_table FROM read_intermediate_result('60_5'::text, 'binary'::citus_copy_format) intermediate_result(event text, events_table integer[])) q ORDER BY (array_length(events_table, 1)) DESC, event - valid + valid --------------------------------------------------------------------- t (1 row) @@ -641,7 +641,7 @@ SELECT true AS valid FROM explain_json_2($$ $$); DEBUG: generating subplan 68_1 for subquery SELECT u1.value_1, u1.user_id, u1."time", u1.value_2, u1.value_3, u1.value_4, u2.user_id, u2."time", u2.value_2, u2.value_3, u2.value_4 FROM (public.users_table u1 JOIN public.users_table u2 USING (value_1)) DEBUG: Plan 68 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.value_1, intermediate_result.user_id, intermediate_result."time", intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4, intermediate_result.user_id_1 AS user_id, intermediate_result.time_1 AS "time", intermediate_result.value_2_1 AS value_2, intermediate_result.value_3_1 AS value_3, intermediate_result.value_4_1 AS value_4 FROM read_intermediate_result('68_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer, user_id integer, "time" timestamp without time zone, value_2 integer, value_3 double precision, value_4 bigint, user_id_1 integer, time_1 timestamp without time zone, value_2_1 integer, value_3_1 double precision, value_4_1 bigint)) a(value_1, user_id, "time", value_2, value_3, value_4, user_id_1, time_1, value_2_1, value_3_1, value_4_1) JOIN (SELECT users_table.value_1, random() AS random FROM public.users_table) u3 USING (value_1)) - valid + valid --------------------------------------------------------------------- t (1 row) @@ -660,7 +660,7 @@ SELECT true AS valid FROM explain_json_2($$ $$); DEBUG: generating subplan 70_1 for subquery SELECT value_2, random() AS random FROM public.users_table DEBUG: Plan 70 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.value_2, intermediate_result.random FROM read_intermediate_result('70_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer, random double precision)) u1 JOIN public.events_table USING (value_2)) - valid + valid --------------------------------------------------------------------- t (1 row) @@ -678,7 +678,7 @@ SELECT true AS valid FROM explain_json_2($$ $$); DEBUG: generating subplan 72_1 for subquery SELECT value_2, random() AS random FROM public.users_table DEBUG: Plan 72 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT users_table.value_2, random() AS random FROM public.users_table) u1 LEFT JOIN (SELECT intermediate_result.value_2, intermediate_result.random FROM read_intermediate_result('72_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer, random double precision)) u2 USING (value_2)) - valid + valid --------------------------------------------------------------------- t (1 row) @@ -720,7 +720,7 @@ DEBUG: generating subplan 77_2 for subquery SELECT user_id FROM public.users_ta DEBUG: Plan 77 query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('77_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION SELECT intermediate_result.user_id FROM read_intermediate_result('77_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) DEBUG: generating subplan 76_1 for subquery SELECT users_table.user_id FROM public.users_table UNION SELECT users_table.user_id FROM public.users_table DEBUG: Plan 76 query after replacing subqueries and CTEs: SELECT a.user_id, foo.value_1 FROM ((SELECT intermediate_result.user_id FROM read_intermediate_result('76_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) a JOIN (SELECT users_table.value_1 FROM public.users_table) foo ON ((a.user_id OPERATOR(pg_catalog.=) foo.value_1))) - valid + valid --------------------------------------------------------------------- t (1 row) @@ -744,7 +744,7 @@ DEBUG: generating subplan 81_2 for subquery SELECT user_id FROM public.users_ta DEBUG: Plan 81 query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('81_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION SELECT intermediate_result.user_id FROM read_intermediate_result('81_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) DEBUG: generating subplan 80_1 for subquery SELECT users_table.user_id FROM public.users_table UNION SELECT users_table.user_id FROM public.users_table DEBUG: Plan 80 query after replacing subqueries and CTEs: SELECT a.user_id, foo.user_id, foo."time", foo.value_1, foo.value_2, foo.value_3, foo.value_4 FROM ((SELECT intermediate_result.user_id FROM read_intermediate_result('80_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) a JOIN public.users_table foo ON ((a.user_id OPERATOR(pg_catalog.=) foo.value_1))) - valid + valid --------------------------------------------------------------------- t (1 row) @@ -774,7 +774,7 @@ SELECT true AS valid FROM explain_json_2($$ $$); DEBUG: generating subplan 84_1 for subquery SELECT value_1 FROM public.users_table DEBUG: Plan 84 query after replacing subqueries and CTEs: SELECT foo.user_id, a.user_id, bar.value_1 FROM (((SELECT users_table.user_id FROM public.users_table) foo JOIN (SELECT users_table.user_id FROM public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])) UNION SELECT users_table.user_id FROM public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) a ON ((a.user_id OPERATOR(pg_catalog.=) foo.user_id))) JOIN (SELECT intermediate_result.value_1 FROM read_intermediate_result('84_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer)) bar ON ((foo.user_id OPERATOR(pg_catalog.=) bar.value_1))) - valid + valid --------------------------------------------------------------------- t (1 row) @@ -818,7 +818,7 @@ DEBUG: generating subplan 86_2 for CTE non_colocated_subquery_2: SELECT count(* DEBUG: generating subplan 89_1 for subquery SELECT event_type FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 4) DEBUG: Plan 89 query after replacing subqueries and CTEs: SELECT count(*) AS cnt FROM public.events_table WHERE (event_type OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.event_type FROM read_intermediate_result('89_1'::text, 'binary'::citus_copy_format) intermediate_result(event_type integer))) DEBUG: Plan 86 query after replacing subqueries and CTEs: SELECT non_colocated_subquery.value_2, non_colocated_subquery_2.cnt FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('86_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) non_colocated_subquery, (SELECT intermediate_result.cnt FROM read_intermediate_result('86_2'::text, 'binary'::citus_copy_format) intermediate_result(cnt bigint)) non_colocated_subquery_2 WHERE (non_colocated_subquery.value_2 OPERATOR(pg_catalog.<>) non_colocated_subquery_2.cnt) - valid + valid --------------------------------------------------------------------- t (1 row) @@ -839,7 +839,7 @@ $$); DEBUG: generating subplan 91_1 for subquery SELECT users_table_local.value_2 FROM non_colocated_subquery.users_table_local, non_colocated_subquery.events_table_local WHERE ((users_table_local.user_id OPERATOR(pg_catalog.=) events_table_local.user_id) AND (events_table_local.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: generating subplan 91_2 for subquery SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12]))) DEBUG: Plan 91 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT intermediate_result.value_2 FROM read_intermediate_result('91_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) bar, (SELECT intermediate_result.value_2 FROM read_intermediate_result('91_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) baz WHERE ((foo.value_2 OPERATOR(pg_catalog.=) bar.value_2) AND (foo.value_2 OPERATOR(pg_catalog.=) baz.value_2)) - valid + valid --------------------------------------------------------------------- t (1 row) @@ -875,7 +875,7 @@ DEBUG: generating subplan 93_1 for subquery SELECT value_1, value_2 FROM public DEBUG: generating subplan 93_2 for subquery SELECT value_1 FROM public.users_table WHERE (value_2 OPERATOR(pg_catalog.<) 1) DEBUG: generating subplan 93_3 for subquery SELECT value_2 FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.<) 2) DEBUG: Plan 93 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT users_table.user_id FROM public.users_table) foo JOIN (SELECT users_table.user_id FROM public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])) UNION SELECT users_table.user_id FROM public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) a ON ((a.user_id OPERATOR(pg_catalog.=) foo.user_id))) JOIN (SELECT intermediate_result.value_1, intermediate_result.value_2 FROM read_intermediate_result('93_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer, value_2 integer)) bar ON ((foo.user_id OPERATOR(pg_catalog.=) bar.value_1))) WHERE ((bar.value_2 OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_1 FROM read_intermediate_result('93_2'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer))) AND (bar.value_1 OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('93_3'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))) AND (foo.user_id OPERATOR(pg_catalog.=) ANY (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2])))))) - valid + valid --------------------------------------------------------------------- t (1 row) @@ -895,7 +895,7 @@ SELECT true AS valid FROM explain_json_2($$ $$); DEBUG: generating subplan 97_1 for subquery SELECT user_id, value_2 FROM public.events_table DEBUG: Plan 97 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.users_reference_table users_table_ref, (SELECT users_table.user_id FROM public.users_table) foo, (SELECT intermediate_result.user_id, intermediate_result.value_2 FROM read_intermediate_result('97_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_2 integer)) bar WHERE ((users_table_ref.user_id OPERATOR(pg_catalog.=) foo.user_id) AND (foo.user_id OPERATOR(pg_catalog.=) bar.value_2)) - valid + valid --------------------------------------------------------------------- t (1 row) @@ -1047,15 +1047,15 @@ CREATE TABLE table2_p1 PARTITION OF table2 FOR VALUES FROM (1) TO (10); -- modifications on the partitons are only allowed with rep=1 SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('table2','tenant_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('table1','tenant_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- all of the above queries are non-colocated subquery joins diff --git a/src/test/regress/expected/partitioned_intermediate_results.out b/src/test/regress/expected/partitioned_intermediate_results.out index c3a865d54..12bfb4a9a 100644 --- a/src/test/regress/expected/partitioned_intermediate_results.out +++ b/src/test/regress/expected/partitioned_intermediate_results.out @@ -7,7 +7,7 @@ SELECT * FROM worker_partition_query_result('squares_hash', 'SELECT i, i * i FROM generate_series(1, 10) i', 0, 'hash', '{-2147483648,-1073741824,0,1073741824}'::text[], '{-1073741825,-1,1073741823,2147483647}'::text[], false); - partition_index | rows_written | bytes_written + partition_index | rows_written | bytes_written --------------------------------------------------------------------- 0 | 4 | 21 1 | 3 | 14 @@ -18,7 +18,7 @@ SELECT * FROM worker_partition_query_result('squares_hash', SELECT hashint4(x), x, x2 FROM read_intermediate_result('squares_hash_0', 'text') AS res (x int, x2 int) ORDER BY x; - hashint4 | x | x2 + hashint4 | x | x2 --------------------------------------------------------------------- -1905060026 | 1 | 1 -1330264708 | 5 | 25 @@ -29,7 +29,7 @@ ORDER BY x; SELECT hashint4(x), x, x2 FROM read_intermediate_result('squares_hash_1', 'text') AS res (x int, x2 int) ORDER BY x; - hashint4 | x | x2 + hashint4 | x | x2 --------------------------------------------------------------------- -28094569 | 3 | 9 -1011077333 | 4 | 16 @@ -39,7 +39,7 @@ ORDER BY x; SELECT hashint4(x), x, x2 FROM read_intermediate_result('squares_hash_2', 'text') AS res (x int, x2 int) ORDER BY x; - hashint4 | x | x2 + hashint4 | x | x2 --------------------------------------------------------------------- 566031088 | 6 | 36 (1 row) @@ -47,7 +47,7 @@ ORDER BY x; SELECT hashint4(x), x, x2 FROM read_intermediate_result('squares_hash_3', 'text') AS res (x int, x2 int) ORDER BY x; - hashint4 | x | x2 + hashint4 | x | x2 --------------------------------------------------------------------- 1134484726 | 2 | 4 1672378334 | 9 | 81 @@ -63,7 +63,7 @@ SELECT * FROM worker_partition_query_result('squares_range', '{0,21,41,61}'::text[], '{20,40,60,100}'::text[], true /* binary format */); - partition_index | rows_written | bytes_written + partition_index | rows_written | bytes_written --------------------------------------------------------------------- 0 | 4 | 93 1 | 2 | 57 @@ -74,7 +74,7 @@ SELECT * FROM worker_partition_query_result('squares_range', SELECT x, x2 FROM read_intermediate_result('squares_range_0', 'binary') AS res (x int, x2 int) ORDER BY x; - x | x2 + x | x2 --------------------------------------------------------------------- 1 | 1 2 | 4 @@ -85,7 +85,7 @@ ORDER BY x; SELECT x, x2 FROM read_intermediate_result('squares_range_1', 'binary') AS res (x int, x2 int) ORDER BY x; - x | x2 + x | x2 --------------------------------------------------------------------- 5 | 25 6 | 36 @@ -94,7 +94,7 @@ ORDER BY x; SELECT x, x2 FROM read_intermediate_result('squares_range_2', 'binary') AS res (x int, x2 int) ORDER BY x; - x | x2 + x | x2 --------------------------------------------------------------------- 7 | 49 (1 row) @@ -102,7 +102,7 @@ ORDER BY x; SELECT x, x2 FROM read_intermediate_result('squares_range_3', 'binary') AS res (x int, x2 int) ORDER BY x; - x | x2 + x | x2 --------------------------------------------------------------------- 8 | 64 9 | 81 @@ -116,7 +116,7 @@ SELECT * FROM worker_partition_query_result('doubles_hash', 'SELECT i, i * 2 FROM generate_series(1, 1000000) i', 0, 'hash', '{-2147483648,-1073741824,0,1073741824}'::text[], '{-1073741825,-1,1073741823,2147483647}'::text[], false); - partition_index | rows_written | bytes_written + partition_index | rows_written | bytes_written --------------------------------------------------------------------- 0 | 250199 | 3586179 1 | 249872 | 3581280 @@ -126,9 +126,9 @@ SELECT * FROM worker_partition_query_result('doubles_hash', SELECT count(*) FROM read_intermediate_results(ARRAY['doubles_hash_0', 'doubles_hash_1', - 'doubles_hash_2', + 'doubles_hash_2', 'doubles_hash_3'], 'text') AS res (x int, x2 int); - count + count --------------------------------------------------------------------- 1000000 (1 row) @@ -140,7 +140,7 @@ SELECT * FROM worker_partition_query_result('doubles_range', 'SELECT i, i * 2 FROM generate_series(1, 1000000) i', 0, 'range', '{0,250001,500001,750001}'::text[], '{250000,500000,750000,1000000}'::text[], true); - partition_index | rows_written | bytes_written + partition_index | rows_written | bytes_written --------------------------------------------------------------------- 0 | 250000 | 4500021 1 | 250000 | 4500021 @@ -150,9 +150,9 @@ SELECT * FROM worker_partition_query_result('doubles_range', SELECT count(*) FROM read_intermediate_results(ARRAY['doubles_range_0', 'doubles_range_1', - 'doubles_range_2', + 'doubles_range_2', 'doubles_range_3'], 'binary') AS res (x int, x2 int); - count + count --------------------------------------------------------------------- 1000000 (1 row) @@ -362,9 +362,9 @@ $$ LANGUAGE plpgsql; SET citus.shard_count TO 32; CREATE TABLE t(a int, b int); SELECT create_distributed_table('t', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CALL test_partition_query_results('t', 'SELECT x, x * x FROM generate_series(1, 100) x'); @@ -376,9 +376,9 @@ DROP TABLE t; SET citus.shard_count TO 1; CREATE TABLE t(a int, b int); SELECT create_distributed_table('t', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CALL test_partition_query_results('t', 'SELECT x, x * x FROM generate_series(1, 100) x'); @@ -390,9 +390,9 @@ DROP TABLE t; SET citus.shard_count TO 17; CREATE TABLE t(a int, b int); SELECT create_distributed_table('t', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CALL test_partition_query_results('t', 'SELECT x, x * x FROM generate_series(1, 100) x'); @@ -404,9 +404,9 @@ DROP TABLE t; SET citus.shard_count TO 8; CREATE TABLE t(a DATE, b int); SELECT create_distributed_table('t', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CALL test_partition_query_results('t', 'SELECT (''1985-05-18''::date + (x::text || '' days'')::interval)::date, x * x FROM generate_series(1, 100) x'); @@ -418,9 +418,9 @@ DROP TABLE t; SET citus.shard_count TO 8; CREATE TABLE t(a int4range, b int); SELECT create_distributed_table('t', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CALL test_partition_query_results('t', 'SELECT int4range(x,2*x+10), x * x FROM generate_series(1, 100) x'); @@ -431,9 +431,9 @@ DROP TABLE t; -- range partitioning, int partition column CREATE TABLE t(key int, value int); SELECT create_distributed_table('t', 'key', 'range'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CALL create_range_partitioned_shards('t', '{0,25,50,76}', @@ -446,9 +446,9 @@ DROP TABLE t; -- not covering ranges, should ERROR CREATE TABLE t(key int, value int); SELECT create_distributed_table('t', 'key', 'range'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CALL create_range_partitioned_shards('t', '{0,25,50,100}', @@ -459,9 +459,9 @@ DROP TABLE t; -- overlapping ranges, we allow this in range partitioned distributed tables, should be fine CREATE TABLE t(key int, value int); SELECT create_distributed_table('t', 'key', 'range'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CALL create_range_partitioned_shards('t', '{0,25,50,76}', @@ -476,9 +476,9 @@ CREATE TYPE composite_key_type AS (f1 int, f2 text); SET citus.shard_count TO 8; CREATE TABLE t(key composite_key_type, value int); SELECT create_distributed_table('t', 'key', 'range'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CALL create_range_partitioned_shards('t', '{"(0,a)","(25,a)","(50,a)","(75,a)"}', @@ -492,9 +492,9 @@ DROP TYPE composite_key_type; -- unsorted ranges CREATE TABLE t(key int, value int); SELECT create_distributed_table('t', 'key', 'range'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CALL create_range_partitioned_shards('t', '{50,25,76,0}', diff --git a/src/test/regress/expected/pg12.out b/src/test/regress/expected/pg12.out index 18394b27c..8d48bd454 100644 --- a/src/test/regress/expected/pg12.out +++ b/src/test/regress/expected/pg12.out @@ -35,9 +35,9 @@ insert into gen1 (id, val1) values (1,4),(3,6),(5,2),(7,2); insert into gen2 (id, val1) values (1,4),(3,6),(5,2),(7,2); select create_distributed_table('gen1', 'id'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) select create_distributed_table('gen2', 'val2'); @@ -46,7 +46,7 @@ DETAIL: Distribution column must not use GENERATED ALWAYS AS (...) STORED. insert into gen1 (id, val1) values (2,4),(4,6),(6,2),(8,2); insert into gen2 (id, val1) values (2,4),(4,6),(6,2),(8,2); select * from gen1 order by 1,2,3; - id | val1 | val2 + id | val1 | val2 --------------------------------------------------------------------- 1 | 4 | 6 2 | 4 | 6 @@ -59,7 +59,7 @@ select * from gen1 order by 1,2,3; (8 rows) select * from gen2 order by 1,2,3; - id | val1 | val2 + id | val1 | val2 --------------------------------------------------------------------- 1 | 4 | 6 2 | 4 | 6 @@ -79,9 +79,9 @@ vacuum (index_cleanup 1) gen1; -- COPY FROM create table cptest (id int, val int); select create_distributed_table('cptest', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) copy cptest from STDIN with csv where val < 4; @@ -99,15 +99,15 @@ ERROR: syntax error at or near "1" CREATE TABLE single_hash_repartition_first (id int, sum int, avg float); CREATE TABLE single_hash_repartition_second (id int primary key, sum int, avg float); SELECT create_distributed_table('single_hash_repartition_first', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('single_hash_repartition_second', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO single_hash_repartition_first @@ -129,7 +129,7 @@ SELECT count(*) FROM cte1, single_hash_repartition_second WHERE cte1.id = single_hash_repartition_second.id AND single_hash_repartition_second.id = 45; $Q$); - coordinator_plan + coordinator_plan --------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 1 @@ -148,7 +148,7 @@ SELECT count(*) FROM cte1, single_hash_repartition_second WHERE cte1.id = single_hash_repartition_second.id AND single_hash_repartition_second.id = 45; $Q$); - coordinator_plan + coordinator_plan --------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 1 @@ -168,7 +168,7 @@ SELECT count(*) FROM cte1, single_hash_repartition_second WHERE cte1.id = single_hash_repartition_second.id AND single_hash_repartition_second.id = 45; $Q$); - coordinator_plan + coordinator_plan --------------------------------------------------------------------- Custom Scan (Citus Adaptive) -> Distributed Subplan 7_1 @@ -191,7 +191,7 @@ SELECT count(*) FROM cte1, single_hash_repartition_second WHERE cte1.id = single_hash_repartition_second.id AND single_hash_repartition_second.id = 45; $Q$); - coordinator_plan + coordinator_plan --------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 1 @@ -212,7 +212,7 @@ SELECT count(*) FROM cte1, single_hash_repartition_second WHERE cte1.id = single_hash_repartition_second.id AND single_hash_repartition_second.sum = 45; $Q$); - coordinator_plan + coordinator_plan --------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) @@ -233,7 +233,7 @@ SELECT count(*) FROM cte1, single_hash_repartition_second WHERE cte1.id = single_hash_repartition_second.id AND single_hash_repartition_second.sum = 45; $Q$); - coordinator_plan + coordinator_plan --------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) @@ -268,16 +268,16 @@ ERROR: insert or update on table "collection_users" violates foreign key constr DETAIL: Key (key, collection_id)=(1, 1000) is not present in table "collections_list". SELECT create_distributed_table('collections_list', 'key'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('collection_users', 'key'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- should still fail because of fkey @@ -293,9 +293,9 @@ CREATE TABLE test (x int, y int); INSERT INTO test (x,y) SELECT i,i*3 from generate_series(1, 100) i; SELECT create_distributed_table('test', 'x'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- single shard queries with CHAIN @@ -303,7 +303,7 @@ BEGIN; UPDATE test SET y = 15 WHERE x = 1; COMMIT AND CHAIN; SELECT * FROM test WHERE x = 1; - x | y + x | y --------------------------------------------------------------------- 1 | 15 (1 row) @@ -313,7 +313,7 @@ BEGIN; UPDATE test SET y = 20 WHERE x = 1; ROLLBACK AND CHAIN; SELECT * FROM test WHERE x = 1; - x | y + x | y --------------------------------------------------------------------- 1 | 15 (1 row) @@ -324,7 +324,7 @@ BEGIN; UPDATE test SET y = 25; COMMIT AND CHAIN; SELECT DISTINCT y FROM test; - y + y --------------------------------------------------------------------- 25 (1 row) @@ -334,7 +334,7 @@ BEGIN; UPDATE test SET y = 30; ROLLBACK AND CHAIN; SELECT DISTINCT y FROM test; - y + y --------------------------------------------------------------------- 25 (1 row) @@ -347,7 +347,7 @@ UPDATE test SET y = 35; ERROR: cannot execute UPDATE in a read-only transaction COMMIT; SELECT DISTINCT y FROM test; - y + y --------------------------------------------------------------------- 25 (1 row) @@ -358,7 +358,7 @@ UPDATE test SET y = 40; ERROR: cannot execute UPDATE in a read-only transaction COMMIT; SELECT DISTINCT y FROM test; - y + y --------------------------------------------------------------------- 25 (1 row) @@ -380,9 +380,9 @@ select create_distributed_table('col_test', 'val'); ERROR: Hash distributed partition columns may not use a non deterministic collation select create_distributed_table('col_test', 'id'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) insert into col_test values @@ -390,7 +390,7 @@ insert into col_test values select count(*) from col_test where val = 'asdf'; - count + count --------------------------------------------------------------------- 3 (1 row) diff --git a/src/test/regress/expected/propagate_extension_commands.out b/src/test/regress/expected/propagate_extension_commands.out index 90f2a01c7..64166ac65 100644 --- a/src/test/regress/expected/propagate_extension_commands.out +++ b/src/test/regress/expected/propagate_extension_commands.out @@ -6,27 +6,27 @@ SET search_path TO "extension'test"; CREATE EXTENSION seg; -- make sure that both the schema and the extension is distributed SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname = 'seg'); - count + count --------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_namespace WHERE nspname = 'extension''test'); - count + count --------------------------------------------------------------------- 1 (1 row) CREATE TABLE test_table (key int, value seg); SELECT create_distributed_table('test_table', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- make sure that the table is also distributed now SELECT count(*) from pg_dist_partition where logicalrelid='extension''test.test_table'::regclass; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -34,7 +34,7 @@ SELECT count(*) from pg_dist_partition where logicalrelid='extension''test.test_ CREATE TYPE two_segs AS (seg_1 seg, seg_2 seg); -- verify that the type that depends on the extension is also marked as distributed SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_type WHERE typname = 'two_segs' AND typnamespace = (SELECT oid FROM pg_namespace WHERE nspname = 'extension''test')); - count + count --------------------------------------------------------------------- 1 (1 row) @@ -46,20 +46,20 @@ BEGIN; -- this should not succeed as we do not distribute extension commands within transaction blocks CREATE TABLE dist_table (key int, value public.issn); SELECT create_distributed_table('dist_table', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- we can even run queries (sequentially) over the distributed table SELECT * FROM dist_table; - key | value + key | value --------------------------------------------------------------------- (0 rows) INSERT INTO dist_table VALUES (1, public.issn('1436-4522')); INSERT INTO dist_table SELECT * FROM dist_table RETURNING *; - key | value + key | value --------------------------------------------------------------------- 1 | 1436-4522 (1 row) @@ -67,13 +67,13 @@ BEGIN; COMMIT; -- make sure that the extension is distributed even if we run create extension in a transaction block SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname = 'isn'); - count + count --------------------------------------------------------------------- 1 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'isn'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,1) (localhost,57638,t,1) @@ -82,9 +82,9 @@ SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname CREATE TABLE ref_table (a public.issn); -- now, create a reference table relying on the data types SELECT create_reference_table('ref_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- now, drop the extension, recreate it with an older version and update it to latest version @@ -94,7 +94,7 @@ CREATE EXTENSION isn WITH VERSION "1.1"; RESET client_min_messages; -- before updating the version, ensure the current version SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'isn'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,1.1) (localhost,57638,t,1.1) @@ -104,7 +104,7 @@ SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extnam ALTER EXTENSION isn UPDATE TO '1.2'; -- show that ALTER EXTENSION is propagated SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'isn'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,1.2) (localhost,57638,t,1.2) @@ -112,7 +112,7 @@ SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extnam -- before changing the schema, ensure the current schmea SELECT run_command_on_workers($$SELECT nspname from pg_namespace where oid=(SELECT extnamespace FROM pg_extension WHERE extname = 'isn')$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,extension'test) (localhost,57638,t,extension'test) @@ -124,14 +124,14 @@ ALTER EXTENSION isn SET SCHEMA public; SET search_path TO public; -- make sure that the extension is distributed SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname = 'isn'); - count + count --------------------------------------------------------------------- 1 (1 row) -- show that the ALTER EXTENSION command is propagated SELECT run_command_on_workers($$SELECT nspname from pg_namespace where oid=(SELECT extnamespace FROM pg_extension WHERE extname = 'isn')$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,public) (localhost,57638,t,public) @@ -154,7 +154,7 @@ DROP EXTENSION seg CASCADE; DROP SCHEMA "extension'test" CASCADE; RESET client_min_messages; SELECT 1 from master_remove_node('localhost', :worker_2_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) @@ -163,13 +163,13 @@ SELECT 1 from master_remove_node('localhost', :worker_2_port); CREATE EXTENSION seg; -- show that the extension is created on existing worker SELECT run_command_on_workers($$SELECT count(extnamespace) FROM pg_extension WHERE extname = 'seg'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,1) (1 row) SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'seg'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,1.3) (1 row) @@ -177,29 +177,29 @@ SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extnam -- now create the reference table CREATE TABLE ref_table_2 (x seg); SELECT create_reference_table('ref_table_2'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- and add the other node SELECT 1 from master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "ref_table_2" to the node localhost:xxxxx - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) -- show that the extension is created on both existing and new node SELECT run_command_on_workers($$SELECT count(extnamespace) FROM pg_extension WHERE extname = 'seg'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,1) (localhost,57638,t,1) (2 rows) SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'seg'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,1.3) (localhost,57638,t,1.3) @@ -207,13 +207,13 @@ SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extnam -- and similarly check for the reference table select count(*) from pg_dist_partition where partmethod='n' and logicalrelid='ref_table_2'::regclass; - count + count --------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='ref_table_2'::regclass; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -226,14 +226,14 @@ ROLLBACK; -- at the end of the transaction block, we did not create isn extension in coordinator or worker nodes as we rollback'ed -- make sure that the extension is not distributed SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname = 'isn'); - count + count --------------------------------------------------------------------- 0 (1 row) -- and the extension does not exist on workers SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'isn'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,0) (localhost,57638,t,0) @@ -261,7 +261,7 @@ set citus.enable_ddl_propagation to 'on'; SET client_min_messages TO WARNING; DROP EXTENSION pg_buffercache, isn CASCADE; SELECT count(*) FROM pg_extension WHERE extname IN ('pg_buffercache', 'isn'); - count + count --------------------------------------------------------------------- 0 (1 row) @@ -273,13 +273,13 @@ SET client_min_messages TO WARNING; -- drop extension should just work DROP EXTENSION seg CASCADE; SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname = 'seg'); - count + count --------------------------------------------------------------------- 0 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'seg'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,0) (localhost,57638,t,0) @@ -289,7 +289,7 @@ SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname RESET client_min_messages; -- make sure that the extension is not avaliable anymore as a distributed object SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname IN ('seg', 'isn')); - count + count --------------------------------------------------------------------- 0 (1 row) @@ -301,24 +301,24 @@ SET search_path TO "extension'test"; BEGIN; CREATE TABLE some_random_table (a int); SELECT create_distributed_table('some_random_table', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE EXTENSION seg; CREATE TABLE some_random_table_2 (a int, b seg); SELECT create_distributed_table('some_random_table_2', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ROLLBACK; -- show that the CREATE EXTENSION command propagated even if the transaction -- block is rollbacked, that's a shortcoming of dependency creation logic SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'seg'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,1.3) (localhost,57638,t,1.3) @@ -334,7 +334,7 @@ SET search_path TO "extension'test"; RESET client_min_messages; -- remove the node, we'll add back again SELECT 1 from master_remove_node('localhost', :worker_2_port); - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) @@ -349,37 +349,37 @@ BEGIN; CREATE TYPE test_type_2 AS (a int, b test_type); CREATE TABLE t2 (a int, b test_type_2, c issn); SELECT create_distributed_table('t2', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TYPE test_type_3 AS (a int, b test_type, c issn); CREATE TABLE t3 (a int, b test_type_3); SELECT create_reference_table('t3'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) COMMIT; -- add the node back SELECT 1 from master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "t3" to the node localhost:xxxxx - ?column? + ?column? --------------------------------------------------------------------- 1 (1 row) -- make sure that both extensions are created on both nodes SELECT count(*) FROM citus.pg_dist_object WHERE objid IN (SELECT oid FROM pg_extension WHERE extname IN ('seg', 'isn')); - count + count --------------------------------------------------------------------- 2 (1 row) SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname IN ('seg', 'isn')$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,2) (localhost,57638,t,2) diff --git a/src/test/regress/expected/propagate_set_commands.out b/src/test/regress/expected/propagate_set_commands.out index 4cb711c51..2a906162e 100644 --- a/src/test/regress/expected/propagate_set_commands.out +++ b/src/test/regress/expected/propagate_set_commands.out @@ -2,9 +2,9 @@ CREATE SCHEMA propagate_set_commands; SET search_path TO propagate_set_commands; CREATE TABLE test (id int, value int); SELECT create_distributed_table('test', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO test VALUES (1,1), (3,3); @@ -15,7 +15,7 @@ SET citus.task_executor_type TO 'adaptive'; SET citus.select_opens_transaction_block TO on; BEGIN; SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; - current_setting + current_setting --------------------------------------------------------------------- on (1 row) @@ -28,7 +28,7 @@ BEGIN; -- set session commands are not propagated SET enable_hashagg TO false; SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; - current_setting + current_setting --------------------------------------------------------------------- on (1 row) @@ -38,7 +38,7 @@ BEGIN; -- should not propagate exit_on_error SET LOCAL exit_on_error TO on; SELECT current_setting('exit_on_error') FROM test WHERE id = 1; - current_setting + current_setting --------------------------------------------------------------------- off (1 row) @@ -48,14 +48,14 @@ BEGIN; -- should be off on worker SET LOCAL enable_hashagg TO false; SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; - current_setting + current_setting --------------------------------------------------------------------- off (1 row) -- expand to new node, set should still apply SELECT current_setting('enable_hashagg') FROM test WHERE id = 3; - current_setting + current_setting --------------------------------------------------------------------- off (1 row) @@ -65,7 +65,7 @@ BEGIN; -- should be off on worker SET LOCAL enable_hashagg TO false; SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; - current_setting + current_setting --------------------------------------------------------------------- off (1 row) @@ -73,14 +73,14 @@ SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; -- should be back on after set to default SET LOCAL enable_hashagg TO DEFAULT; SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; - current_setting + current_setting --------------------------------------------------------------------- on (1 row) -- expand to new node, set to default should still apply SELECT current_setting('enable_hashagg') FROM test WHERE id = 3; - current_setting + current_setting --------------------------------------------------------------------- on (1 row) @@ -90,7 +90,7 @@ BEGIN; -- should be off on worker SET LOCAL enable_hashagg TO false; SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; - current_setting + current_setting --------------------------------------------------------------------- off (1 row) @@ -98,7 +98,7 @@ SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; -- does not have the LOCAL keyword, not propagated SET enable_hashagg TO DEFAULT; SELECT current_setting('enable_hashagg') FROM test WHERE id = 3; - current_setting + current_setting --------------------------------------------------------------------- off (1 row) @@ -108,7 +108,7 @@ BEGIN; -- should be off on worker SET LOCAL enable_hashagg TO false; SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; - current_setting + current_setting --------------------------------------------------------------------- off (1 row) @@ -116,14 +116,14 @@ SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; -- should be back on after reset RESET enable_hashagg; SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; - current_setting + current_setting --------------------------------------------------------------------- on (1 row) -- expand to new node, reset should still apply SELECT current_setting('enable_hashagg') FROM test WHERE id = 3; - current_setting + current_setting --------------------------------------------------------------------- on (1 row) @@ -133,7 +133,7 @@ BEGIN; -- should be off on worker SET LOCAL enable_hashagg TO false; SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; - current_setting + current_setting --------------------------------------------------------------------- off (1 row) @@ -142,7 +142,7 @@ SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; RESET ALL; SET search_path = 'propagate_set_commands'; SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; - current_setting + current_setting --------------------------------------------------------------------- on (1 row) @@ -150,7 +150,7 @@ SELECT current_setting('enable_hashagg') FROM test WHERE id = 1; -- funky case, we reset citus.propagate_set_commands, so not set again SET LOCAL enable_hashagg TO false; SELECT current_setting('enable_hashagg') FROM test WHERE id = 3; - current_setting + current_setting --------------------------------------------------------------------- on (1 row) diff --git a/src/test/regress/expected/recursive_dml_queries_mx.out b/src/test/regress/expected/recursive_dml_queries_mx.out index d627a5ac1..58b513ced 100644 --- a/src/test/regress/expected/recursive_dml_queries_mx.out +++ b/src/test/regress/expected/recursive_dml_queries_mx.out @@ -4,23 +4,23 @@ SET citus.shard_replication_factor TO 1; SET citus.replication_model TO streaming; CREATE TABLE recursive_dml_queries_mx.distributed_table (tenant_id text, dept int, info jsonb); SELECT create_distributed_table('distributed_table', 'tenant_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE recursive_dml_queries_mx.second_distributed_table (tenant_id text, dept int, info jsonb); SELECT create_distributed_table('second_distributed_table', 'tenant_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE recursive_dml_queries_mx.reference_table (id text, name text); SELECT create_reference_table('reference_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) INSERT INTO distributed_table SELECT i::text, i % 10, row_to_json(row(i, i*i)) FROM generate_series (0, 100) i; @@ -28,15 +28,15 @@ INSERT INTO second_distributed_table SELECT i::text, i % 10, row_to_json(row(i, INSERT INTO reference_table SELECT i::text, 'user_' || i FROM generate_series (0, 100) i; SET client_min_messages TO DEBUG1; -- the subquery foo is recursively planned -UPDATE - reference_table -SET - name = 'new_' || name -FROM +UPDATE + reference_table +SET + name = 'new_' || name +FROM ( - SELECT + SELECT avg(second_distributed_table.tenant_id::int) as avg_tenant_id - FROM + FROM second_distributed_table ) as foo WHERE @@ -45,27 +45,27 @@ DEBUG: generating subplan 4_1 for subquery SELECT avg((tenant_id)::integer) AS DEBUG: Plan 4 query after replacing subqueries and CTEs: UPDATE recursive_dml_queries_mx.reference_table SET name = ('new_'::text OPERATOR(pg_catalog.||) reference_table.name) FROM (SELECT intermediate_result.avg_tenant_id FROM read_intermediate_result('4_1'::text, 'binary'::citus_copy_format) intermediate_result(avg_tenant_id numeric)) foo WHERE (((foo.avg_tenant_id)::integer)::text OPERATOR(pg_catalog.=) reference_table.id) -- the subquery foo is recursively planned -- but note that the subquery foo itself is pushdownable -UPDATE - second_distributed_table -SET +UPDATE + second_distributed_table +SET dept = foo.max_dept * 2 -FROM +FROM ( - SELECT DISTINCT ON (tenant_id) tenant_id, max(dept) as max_dept FROM + SELECT DISTINCT ON (tenant_id) tenant_id, max(dept) as max_dept FROM ( - SELECT + SELECT second_distributed_table.dept, second_distributed_table.tenant_id - FROM + FROM second_distributed_table, distributed_table - WHERE + WHERE distributed_table.tenant_id = second_distributed_table.tenant_id ) foo_inner GROUP BY - tenant_id + tenant_id ORDER BY 1 DESC ) as foo WHERE - foo.tenant_id != second_distributed_table.tenant_id + foo.tenant_id != second_distributed_table.tenant_id AND second_distributed_table.dept IN (2); DEBUG: generating subplan 6_1 for subquery SELECT DISTINCT ON (tenant_id) tenant_id, max(dept) AS max_dept FROM (SELECT second_distributed_table.dept, second_distributed_table.tenant_id FROM recursive_dml_queries_mx.second_distributed_table, recursive_dml_queries_mx.distributed_table WHERE (distributed_table.tenant_id OPERATOR(pg_catalog.=) second_distributed_table.tenant_id)) foo_inner GROUP BY tenant_id ORDER BY tenant_id DESC DEBUG: Plan 6 query after replacing subqueries and CTEs: UPDATE recursive_dml_queries_mx.second_distributed_table SET dept = (foo.max_dept OPERATOR(pg_catalog.*) 2) FROM (SELECT intermediate_result.tenant_id, intermediate_result.max_dept FROM read_intermediate_result('6_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text, max_dept integer)) foo WHERE ((foo.tenant_id OPERATOR(pg_catalog.<>) second_distributed_table.tenant_id) AND (second_distributed_table.dept OPERATOR(pg_catalog.=) 2)) @@ -74,29 +74,29 @@ DEBUG: Plan 6 query after replacing subqueries and CTEs: UPDATE recursive_dml_q SET search_path TO recursive_dml_queries_mx, public; -- the subquery foo is recursively planned -- and foo itself is a non colocated subquery and recursively planned -UPDATE - second_distributed_table -SET +UPDATE + second_distributed_table +SET dept = foo.tenant_id::int / 4 -FROM +FROM ( - SELECT DISTINCT foo_inner_1.tenant_id FROM + SELECT DISTINCT foo_inner_1.tenant_id FROM ( - SELECT + SELECT second_distributed_table.dept, second_distributed_table.tenant_id - FROM + FROM second_distributed_table, distributed_table - WHERE + WHERE distributed_table.tenant_id = second_distributed_table.tenant_id AND second_distributed_table.dept IN (3,4) ) foo_inner_1, ( - SELECT - second_distributed_table.tenant_id - FROM + SELECT + second_distributed_table.tenant_id + FROM second_distributed_table, distributed_table - WHERE + WHERE distributed_table.tenant_id = second_distributed_table.tenant_id AND second_distributed_table.dept IN (4,5) @@ -104,59 +104,59 @@ FROM WHERE foo_inner_1.tenant_id != foo_inner_2.tenant_id ) as foo WHERE - foo.tenant_id != second_distributed_table.tenant_id + foo.tenant_id != second_distributed_table.tenant_id AND second_distributed_table.dept IN (3); -- use the second worker \c - - - :worker_2_port SET search_path TO recursive_dml_queries_mx, public; CREATE TABLE recursive_dml_queries_mx.local_table (id text, name text); INSERT INTO local_table SELECT i::text, 'user_' || i FROM generate_series (0, 100) i; -CREATE VIEW tenant_ids AS - SELECT - tenant_id, name - FROM +CREATE VIEW tenant_ids AS + SELECT + tenant_id, name + FROM distributed_table, reference_table - WHERE + WHERE distributed_table.dept::text = reference_table.id ORDER BY 2 DESC, 1 DESC; -- we currently do not allow local tables in modification queries -UPDATE - distributed_table -SET +UPDATE + distributed_table +SET dept = avg_tenant_id::int -FROM +FROM ( - SELECT + SELECT avg(local_table.id::int) as avg_tenant_id - FROM + FROM local_table ) as foo WHERE foo.avg_tenant_id::int::text = distributed_table.tenant_id RETURNING distributed_table.*; - tenant_id | dept | info + tenant_id | dept | info --------------------------------------------------------------------- 50 | 50 | {"f1": 50, "f2": 2500} (1 row) -- we currently do not allow views in modification queries -UPDATE - distributed_table -SET +UPDATE + distributed_table +SET dept = avg_tenant_id::int -FROM +FROM ( - SELECT + SELECT avg(tenant_id::int) as avg_tenant_id - FROM + FROM tenant_ids ) as foo WHERE foo.avg_tenant_id::int::text = distributed_table.tenant_id RETURNING distributed_table.*; - tenant_id | dept | info + tenant_id | dept | info --------------------------------------------------------------------- 50 | 50 | {"f1": 50, "f2": 2500} (1 row) diff --git a/src/test/regress/expected/recursive_dml_with_different_planners_executors.out b/src/test/regress/expected/recursive_dml_with_different_planners_executors.out index 35f79d40d..389053abf 100644 --- a/src/test/regress/expected/recursive_dml_with_different_planners_executors.out +++ b/src/test/regress/expected/recursive_dml_with_different_planners_executors.out @@ -2,23 +2,23 @@ CREATE SCHEMA recursive_dml_with_different_planner_executors; SET search_path TO recursive_dml_with_different_planner_executors, public; CREATE TABLE distributed_table (tenant_id text, dept int, info jsonb); SELECT create_distributed_table('distributed_table', 'tenant_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE second_distributed_table (tenant_id text, dept int, info jsonb); SELECT create_distributed_table('second_distributed_table', 'tenant_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE reference_table (id text, name text); SELECT create_reference_table('reference_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) INSERT INTO distributed_table SELECT i::text, i % 10, row_to_json(row(i, i*i)) FROM generate_series (0, 100) i; @@ -26,23 +26,23 @@ INSERT INTO second_distributed_table SELECT i::text, i % 10, row_to_json(row(i, SET client_min_messages TO DEBUG1; -- subquery with router planner -- joined with a real-time query -UPDATE - distributed_table -SET dept = foo.dept FROM - (SELECT tenant_id, dept FROM second_distributed_table WHERE dept = 1 ) as foo, +UPDATE + distributed_table +SET dept = foo.dept FROM + (SELECT tenant_id, dept FROM second_distributed_table WHERE dept = 1 ) as foo, (SELECT tenant_id FROM second_distributed_table WHERE dept IN (1, 2, 3, 4) OFFSET 0) as bar WHERE foo.tenant_id = bar.tenant_id - AND distributed_table.tenant_id = bar.tenant_id; + AND distributed_table.tenant_id = bar.tenant_id; DEBUG: generating subplan 3_1 for subquery SELECT tenant_id FROM recursive_dml_with_different_planner_executors.second_distributed_table WHERE (dept OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])) OFFSET 0 DEBUG: Plan 3 query after replacing subqueries and CTEs: UPDATE recursive_dml_with_different_planner_executors.distributed_table SET dept = foo.dept FROM (SELECT second_distributed_table.tenant_id, second_distributed_table.dept FROM recursive_dml_with_different_planner_executors.second_distributed_table WHERE (second_distributed_table.dept OPERATOR(pg_catalog.=) 1)) foo, (SELECT intermediate_result.tenant_id FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) bar WHERE ((foo.tenant_id OPERATOR(pg_catalog.=) bar.tenant_id) AND (distributed_table.tenant_id OPERATOR(pg_catalog.=) bar.tenant_id)) -- a non colocated subquery inside the UPDATE -UPDATE distributed_table SET dept = foo.max_dept FROM +UPDATE distributed_table SET dept = foo.max_dept FROM ( - SELECT + SELECT max(dept) as max_dept - FROM + FROM (SELECT DISTINCT tenant_id, dept FROM distributed_table) as distributed_table - WHERE tenant_id NOT IN + WHERE tenant_id NOT IN (SELECT tenant_id FROM second_distributed_table WHERE dept IN (1, 2, 3, 4)) ) as foo WHERE foo.max_dept > dept * 3; DEBUG: generating subplan 5_1 for subquery SELECT tenant_id FROM recursive_dml_with_different_planner_executors.second_distributed_table WHERE (dept OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])) @@ -50,7 +50,7 @@ DEBUG: generating subplan 5_2 for subquery SELECT max(dept) AS max_dept FROM (S DEBUG: Plan 5 query after replacing subqueries and CTEs: UPDATE recursive_dml_with_different_planner_executors.distributed_table SET dept = foo.max_dept FROM (SELECT intermediate_result.max_dept FROM read_intermediate_result('5_2'::text, 'binary'::citus_copy_format) intermediate_result(max_dept integer)) foo WHERE (foo.max_dept OPERATOR(pg_catalog.>) (distributed_table.dept OPERATOR(pg_catalog.*) 3)) -- subquery with repartition query SET citus.enable_repartition_joins to ON; -UPDATE distributed_table SET dept = foo.some_tenants::int FROM +UPDATE distributed_table SET dept = foo.some_tenants::int FROM ( SELECT DISTINCT second_distributed_table.tenant_id as some_tenants @@ -59,14 +59,14 @@ UPDATE distributed_table SET dept = foo.some_tenants::int FROM DEBUG: generating subplan 8_1 for subquery SELECT DISTINCT second_distributed_table.tenant_id AS some_tenants FROM recursive_dml_with_different_planner_executors.second_distributed_table, recursive_dml_with_different_planner_executors.distributed_table WHERE (second_distributed_table.dept OPERATOR(pg_catalog.=) distributed_table.dept) DEBUG: Plan 8 query after replacing subqueries and CTEs: UPDATE recursive_dml_with_different_planner_executors.distributed_table SET dept = (foo.some_tenants)::integer FROM (SELECT intermediate_result.some_tenants FROM read_intermediate_result('8_1'::text, 'binary'::citus_copy_format) intermediate_result(some_tenants text)) foo SET citus.enable_repartition_joins to OFF; --- final query is router -UPDATE distributed_table SET dept = foo.max_dept FROM +-- final query is router +UPDATE distributed_table SET dept = foo.max_dept FROM ( - SELECT + SELECT max(dept) as max_dept - FROM + FROM (SELECT DISTINCT tenant_id, dept FROM distributed_table) as distributed_table - WHERE tenant_id IN + WHERE tenant_id IN (SELECT tenant_id FROM second_distributed_table WHERE dept IN (1, 2, 3, 4)) ) as foo WHERE foo.max_dept >= dept and tenant_id = '8'; DEBUG: generating subplan 10_1 for subquery SELECT max(dept) AS max_dept FROM (SELECT DISTINCT distributed_table_1.tenant_id, distributed_table_1.dept FROM recursive_dml_with_different_planner_executors.distributed_table distributed_table_1) distributed_table WHERE (tenant_id OPERATOR(pg_catalog.=) ANY (SELECT second_distributed_table.tenant_id FROM recursive_dml_with_different_planner_executors.second_distributed_table WHERE (second_distributed_table.dept OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) diff --git a/src/test/regress/expected/relation_access_tracking.out b/src/test/regress/expected/relation_access_tracking.out index 89f47cd8e..de8b50c8e 100644 --- a/src/test/regress/expected/relation_access_tracking.out +++ b/src/test/regress/expected/relation_access_tracking.out @@ -54,44 +54,44 @@ CREATE VIEW relation_acesses AS SET citus.shard_replication_factor TO 1; CREATE TABLE table_1 (key int, value int); SELECT create_distributed_table('table_1', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE table_2 (key int, value int); SELECT create_distributed_table('table_2', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE table_3 (key int, value int); SELECT create_distributed_table('table_3', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE table_4 (key int, value int); SELECT create_distributed_table('table_4', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE table_5 (key int, value int); SELECT create_distributed_table('table_5', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE table_6 (key int, value int); SELECT create_reference_Table('table_6'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) INSERT INTO table_1 SELECT i, i FROM generate_series(0,100) i; @@ -104,13 +104,13 @@ INSERT INTO table_6 SELECT i, i FROM generate_series(0,100) i; BEGIN; CREATE TABLE table_7 (key int, value int); SELECT create_distributed_table('table_7', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('table_7') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_7 | not_parallel_accessed | not_parallel_accessed | parallel_access (1 row) @@ -118,13 +118,13 @@ BEGIN; COMMIT; -- outisde the transaction blocks, the function always returns zero SELECT count(*) FROM table_1; - count + count --------------------------------------------------------------------- 101 (1 row) SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (1 row) @@ -133,45 +133,45 @@ SELECT * FROM relation_acesses WHERE table_name = 'table_1'; -- and parallel SELECTs,DMLs, and DDLs BEGIN; SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (1 row) SELECT count(*) FROM table_1 WHERE key = 1; - count + count --------------------------------------------------------------------- 1 (1 row) SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (1 row) SELECT count(*) FROM table_1 WHERE key = 1 OR key = 2; - count + count --------------------------------------------------------------------- 2 (1 row) SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed (1 row) INSERT INTO table_1 VALUES (1,1); SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed (1 row) INSERT INTO table_1 VALUES (1,1), (2,2); SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed (1 row) @@ -179,7 +179,7 @@ BEGIN; ALTER TABLE table_1 ADD COLUMN test_col INT; -- now see that the other tables are not accessed at all SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | parallel_access (1 row) @@ -189,39 +189,39 @@ ROLLBACK; -- commands executed, we can treat the transaction as sequential BEGIN; SELECT count(*) FROM table_1 WHERE key = 1; - count + count --------------------------------------------------------------------- 1 (1 row) SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (1 row) SELECT count(*) FROM table_1 WHERE key = 2; - count + count --------------------------------------------------------------------- 1 (1 row) SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (1 row) INSERT INTO table_1 VALUES (1,1); SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (1 row) INSERT INTO table_1 VALUES (2,2); SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (1 row) @@ -231,7 +231,7 @@ ROLLBACK; BEGIN; ALTER TABLE table_1 ADD CONSTRAINT table_1_u UNIQUE (key); SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | parallel_access (1 row) @@ -247,13 +247,13 @@ BEGIN; table_1.key = table_2.key AND table_2.key = table_3.key AND table_3.key = table_4.key AND table_4.key = table_5.key AND table_1.key = 1; - count + count --------------------------------------------------------------------- 1 (1 row) SELECT * FROM relation_acesses WHERE table_name LIKE 'table_%' ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed table_2 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed @@ -273,13 +273,13 @@ BEGIN; table_1, table_2 WHERE table_1.key = table_2.key; - count + count --------------------------------------------------------------------- 101 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed table_2 | parallel_access | not_parallel_accessed | not_parallel_accessed @@ -296,13 +296,13 @@ BEGIN; table_1, table_2 WHERE table_1.key = table_2.key; - count + count --------------------------------------------------------------------- 101 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed table_2 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed @@ -323,13 +323,13 @@ BEGIN; table_1.key = table_2.key AND table_2.key = table_3.key AND table_3.key = table_4.key AND table_4.key = table_5.key ) as foo; - count + count --------------------------------------------------------------------- 101 (1 row) SELECT * FROM relation_acesses WHERE table_name LIKE 'table_%' ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed table_2 | parallel_access | not_parallel_accessed | not_parallel_accessed @@ -347,7 +347,7 @@ ROLLBACK; BEGIN; UPDATE table_1 SET value = 15; SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | parallel_access | parallel_access | not_parallel_accessed (1 row) @@ -355,7 +355,7 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode = 'sequential'; UPDATE table_2 SET value = 15; SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | parallel_access | parallel_access | not_parallel_accessed table_2 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed @@ -368,7 +368,7 @@ BEGIN; table_1 SET value = 15 WHERE key IN (SELECT key FROM table_2 JOIN table_3 USING (key) WHERE table_2.value = 15); SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2', 'table_3') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | parallel_access | parallel_access | not_parallel_accessed table_2 | parallel_access | not_parallel_accessed | not_parallel_accessed @@ -380,7 +380,7 @@ ROLLBACK; BEGIN; INSERT INTO table_2 SELECT * FROM table_1; SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed table_2 | not_parallel_accessed | parallel_access | not_parallel_accessed @@ -392,7 +392,7 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode = 'sequential'; INSERT INTO table_2 SELECT * FROM table_1; SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed table_2 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed @@ -403,7 +403,7 @@ ROLLBACK; BEGIN; INSERT INTO table_2 SELECT * FROM table_1 OFFSET 0; SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed table_2 | not_parallel_accessed | parallel_access | not_parallel_accessed @@ -424,13 +424,13 @@ BEGIN; table_1.key = table_2.key OFFSET 0 ) as foo; - count + count --------------------------------------------------------------------- 101 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed table_2 | parallel_access | not_parallel_accessed | not_parallel_accessed @@ -453,7 +453,7 @@ BEGIN; OFFSET 0 ) as foo; SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2', 'table_3') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed table_2 | parallel_access | not_parallel_accessed | not_parallel_accessed @@ -479,7 +479,7 @@ BEGIN; OFFSET 0 ) as foo; SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2', 'table_3') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed table_2 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed @@ -505,7 +505,7 @@ BEGIN; ) as foo ) AND value IN (SELECT key FROM table_4); SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2', 'table_3', 'table_4') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed table_2 | parallel_access | not_parallel_accessed | not_parallel_accessed @@ -521,7 +521,7 @@ BEGIN; 2 2 3 3 SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed (1 row) @@ -531,7 +531,7 @@ ROLLBACK; BEGIN; COPY table_1 FROM STDIN WITH CSV; SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | not_parallel_accessed | parallel_access | not_parallel_accessed (1 row) @@ -541,7 +541,7 @@ ROLLBACK; BEGIN; COPY table_1 FROM STDIN WITH CSV; SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (1 row) @@ -550,27 +550,27 @@ ROLLBACK; -- reference table accesses should always be a sequential BEGIN; SELECT count(*) FROM table_6; - count + count --------------------------------------------------------------------- 101 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('table_6'); - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_6 | reference_table_access | not_accessed | not_accessed (1 row) UPDATE table_6 SET value = 15; SELECT * FROM relation_acesses WHERE table_name IN ('table_6'); - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_6 | reference_table_access | reference_table_access | not_accessed (1 row) ALTER TABLE table_6 ADD COLUMN x INT; SELECT * FROM relation_acesses WHERE table_name IN ('table_6'); - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_6 | reference_table_access | reference_table_access | reference_table_access (1 row) @@ -579,13 +579,13 @@ ROLLBACK; -- reference table join with a distributed table BEGIN; SELECT count(*) FROM table_1 JOIN table_6 USING(key); - count + count --------------------------------------------------------------------- 101 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('table_6', 'table_1') ORDER BY 1,2; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed table_6 | parallel_access | not_accessed | not_accessed @@ -596,7 +596,7 @@ ROLLBACK; BEGIN; TRUNCATE table_1; SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | parallel_access (1 row) @@ -607,7 +607,7 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode = 'sequential'; TRUNCATE table_1; SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (1 row) @@ -617,7 +617,7 @@ ROLLBACK; BEGIN; TRUNCATE table_6; SELECT * FROM relation_acesses WHERE table_name IN ('table_6') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_6 | not_accessed | not_accessed | reference_table_access (1 row) @@ -628,7 +628,7 @@ ALTER TABLE table_1 ADD CONSTRAINT table_1_u UNIQUE (key); BEGIN; ALTER TABLE table_2 ADD CONSTRAINT table_2_u FOREIGN KEY (key) REFERENCES table_1(key); SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | parallel_access table_2 | not_parallel_accessed | not_parallel_accessed | parallel_access @@ -641,7 +641,7 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode = 'sequential'; ALTER TABLE table_2 ADD CONSTRAINT table_2_u FOREIGN KEY (key) REFERENCES table_1(key); SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed table_2 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed @@ -650,16 +650,16 @@ BEGIN; ROLLBACK; CREATE TABLE partitioning_test(id int, time date) PARTITION BY RANGE (time); SELECT create_distributed_table('partitioning_test', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- Adding partition tables via CREATE TABLE should have DDL access the partitioned table as well BEGIN; CREATE TABLE partitioning_test_2009 PARTITION OF partitioning_test FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- partitioning_test | not_parallel_accessed | not_parallel_accessed | parallel_access partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | parallel_access @@ -671,7 +671,7 @@ CREATE TABLE partitioning_test_2009 AS SELECT * FROM partitioning_test; BEGIN; ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2009 FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- partitioning_test | not_parallel_accessed | not_parallel_accessed | parallel_access partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | parallel_access @@ -681,15 +681,15 @@ COMMIT; -- Adding partition tables via ATTACH PARTITION on distributed tables should have DDL access the partitioned table as well CREATE TABLE partitioning_test_2010 AS SELECT * FROM partitioning_test; SELECT create_distributed_table('partitioning_test_2010', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) BEGIN; ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2010 FOR VALUES FROM ('2010-01-01') TO ('2011-01-01'); SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2010') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- partitioning_test | not_parallel_accessed | not_parallel_accessed | parallel_access partitioning_test_2010 | not_parallel_accessed | not_parallel_accessed | parallel_access @@ -699,13 +699,13 @@ COMMIT; -- reading from partitioned table marks all of its partitions BEGIN; SELECT count(*) FROM partitioning_test; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- partitioning_test | parallel_access | not_parallel_accessed | not_parallel_accessed partitioning_test_2009 | parallel_access | not_parallel_accessed | not_parallel_accessed @@ -717,13 +717,13 @@ COMMIT; BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT count(*) FROM partitioning_test; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- partitioning_test | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed @@ -735,7 +735,7 @@ COMMIT; BEGIN; UPDATE partitioning_test SET time = now(); SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- partitioning_test | parallel_access | parallel_access | not_parallel_accessed partitioning_test_2009 | parallel_access | parallel_access | not_parallel_accessed @@ -748,7 +748,7 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; UPDATE partitioning_test SET time = now(); SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- partitioning_test | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed @@ -760,7 +760,7 @@ COMMIT; BEGIN; ALTER TABLE partitioning_test ADD COLUMN X INT; SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- partitioning_test | not_parallel_accessed | not_parallel_accessed | parallel_access partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | parallel_access @@ -773,7 +773,7 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; ALTER TABLE partitioning_test ADD COLUMN X INT; SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- partitioning_test | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed @@ -784,13 +784,13 @@ ROLLBACK; -- reading from partition table marks its parent BEGIN; SELECT count(*) FROM partitioning_test_2009; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- partitioning_test | parallel_access | not_parallel_accessed | not_parallel_accessed partitioning_test_2009 | parallel_access | not_parallel_accessed | not_parallel_accessed @@ -802,13 +802,13 @@ COMMIT; BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT count(*) FROM partitioning_test_2009; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- partitioning_test | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed @@ -820,7 +820,7 @@ COMMIT; BEGIN; UPDATE partitioning_test_2009 SET time = now(); SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- partitioning_test | parallel_access | parallel_access | not_parallel_accessed partitioning_test_2009 | parallel_access | parallel_access | not_parallel_accessed @@ -833,7 +833,7 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; UPDATE partitioning_test_2009 SET time = now(); SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- partitioning_test | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed @@ -845,7 +845,7 @@ COMMIT; BEGIN; CREATE INDEX i1000000 ON partitioning_test_2009 (id); SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- partitioning_test | not_parallel_accessed | not_parallel_accessed | parallel_access partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | parallel_access @@ -858,7 +858,7 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; CREATE INDEX i1000000 ON partitioning_test_2009 (id); SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- partitioning_test | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed @@ -872,7 +872,7 @@ BEGIN; TRUNCATE table_1 CASCADE; NOTICE: truncate cascades to table "table_2" SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | parallel_access table_2 | not_parallel_accessed | not_parallel_accessed | parallel_access @@ -883,13 +883,13 @@ ROLLBACK; BEGIN; WITH cte AS (SELECT count(*) FROM table_1) SELECT * FROM cte; - count + count --------------------------------------------------------------------- 101 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed (1 row) @@ -900,13 +900,13 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode = 'sequential'; WITH cte AS (SELECT count(*) FROM table_1) SELECT * FROM cte; - count + count --------------------------------------------------------------------- 101 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (1 row) @@ -916,7 +916,7 @@ COMMIT; BEGIN; WITH cte_1 AS (INSERT INTO table_1 VALUES (1000,1000), (1001, 1001), (1002, 1002) RETURNING *) SELECT * FROM cte_1 ORDER BY 1; - key | value + key | value --------------------------------------------------------------------- 1000 | 1000 1001 | 1001 @@ -924,7 +924,7 @@ BEGIN; (3 rows) SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed (1 row) @@ -934,13 +934,13 @@ ROLLBACK; BEGIN; WITH cte_1 AS (UPDATE table_1 SET value = 15 RETURNING *) SELECT count(*) FROM cte_1 ORDER BY 1; - count + count --------------------------------------------------------------------- 101 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | parallel_access | parallel_access | not_parallel_accessed (1 row) @@ -950,13 +950,13 @@ ROLLBACK; BEGIN; WITH cte_1 AS (UPDATE table_1 SET value = 15 RETURNING *) SELECT count(*) FROM cte_1 ORDER BY 1; - count + count --------------------------------------------------------------------- 101 (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_1 | parallel_access | parallel_access | not_parallel_accessed (1 row) @@ -970,13 +970,13 @@ INSERT INTO table_3 SELECT i, i FROM generate_series(0,100) i; BEGIN; SELECT create_distributed_table('table_3', 'key'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT * FROM relation_acesses WHERE table_name IN ('table_3') ORDER BY 1; - table_name | select_access | dml_access | ddl_access + table_name | select_access | dml_access | ddl_access --------------------------------------------------------------------- table_3 | not_parallel_accessed | parallel_access | parallel_access (1 row) diff --git a/src/test/regress/expected/remove_coordinator.out b/src/test/regress/expected/remove_coordinator.out index a863c6530..e59a1f89e 100644 --- a/src/test/regress/expected/remove_coordinator.out +++ b/src/test/regress/expected/remove_coordinator.out @@ -1,7 +1,7 @@ -- removing coordinator from pg_dist_node should update pg_dist_colocation SELECT master_remove_node('localhost', :master_port); - master_remove_node + master_remove_node --------------------------------------------------------------------- - + (1 row) diff --git a/src/test/regress/expected/replicate_reference_tables_to_coordinator.out b/src/test/regress/expected/replicate_reference_tables_to_coordinator.out index de27df624..1d361d2fd 100644 --- a/src/test/regress/expected/replicate_reference_tables_to_coordinator.out +++ b/src/test/regress/expected/replicate_reference_tables_to_coordinator.out @@ -12,16 +12,16 @@ SET client_min_messages TO LOG; SET citus.log_local_commands TO ON; CREATE TABLE squares(a int, b int); SELECT create_reference_table('squares'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) INSERT INTO squares SELECT i, i * i FROM generate_series(1, 10) i; -- should be executed locally SELECT count(*) FROM squares; LOG: executing the command locally: SELECT count(*) AS count FROM replicate_ref_to_coordinator.squares_8000000 squares - count + count --------------------------------------------------------------------- 10 (1 row) @@ -29,9 +29,9 @@ LOG: executing the command locally: SELECT count(*) AS count FROM replicate_ref -- create a second reference table CREATE TABLE numbers(a int); SELECT create_reference_table('numbers'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) INSERT INTO numbers VALUES (20), (21); @@ -39,7 +39,7 @@ LOG: executing the command locally: INSERT INTO replicate_ref_to_coordinator.nu -- INSERT ... SELECT between reference tables BEGIN; EXPLAIN INSERT INTO squares SELECT a, a*a FROM numbers; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) Task Count: 1 @@ -52,7 +52,7 @@ EXPLAIN INSERT INTO squares SELECT a, a*a FROM numbers; INSERT INTO squares SELECT a, a*a FROM numbers; SELECT * FROM squares WHERE a >= 20 ORDER BY a; - a | b + a | b --------------------------------------------------------------------- 20 | 400 21 | 441 @@ -61,7 +61,7 @@ SELECT * FROM squares WHERE a >= 20 ORDER BY a; ROLLBACK; BEGIN; EXPLAIN INSERT INTO numbers SELECT a FROM squares WHERE a < 3; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) Task Count: 1 @@ -75,7 +75,7 @@ EXPLAIN INSERT INTO numbers SELECT a FROM squares WHERE a < 3; INSERT INTO numbers SELECT a FROM squares WHERE a < 3; SELECT * FROM numbers ORDER BY a; - a + a --------------------------------------------------------------------- 1 2 @@ -86,7 +86,7 @@ SELECT * FROM numbers ORDER BY a; ROLLBACK; -- Make sure we hide shard tables ... SELECT citus_table_is_visible('numbers_8000001'::regclass::oid); - citus_table_is_visible + citus_table_is_visible --------------------------------------------------------------------- f (1 row) @@ -95,7 +95,7 @@ SELECT citus_table_is_visible('numbers_8000001'::regclass::oid); CREATE TABLE local_table(a int); INSERT INTO local_table VALUES (2), (4), (7), (20); EXPLAIN SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Merge Join (cost=359.57..860.00 rows=32512 width=8) Merge Cond: (local_table.a = numbers_8000001.a) @@ -108,7 +108,7 @@ EXPLAIN SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers; (8 rows) SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers ORDER BY 1; - a | a + a | a --------------------------------------------------------------------- 20 | 20 (1 row) @@ -118,7 +118,7 @@ SELECT lt.a, sq.a, sq.b FROM local_table lt JOIN squares sq ON sq.a > lt.a and sq.b > 90 ORDER BY 1,2,3; - a | a | b + a | a | b --------------------------------------------------------------------- 2 | 10 | 100 4 | 10 | 100 @@ -158,14 +158,14 @@ ERROR: cannot join local tables and reference tables in a transaction block, ud CONTEXT: SQL statement "SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers ORDER BY 1" PL/pgSQL function test_reference_local_join_plpgsql_func() line 5 at PERFORM SELECT sum(a) FROM local_table; - sum + sum --------------------------------------------------------------------- 33 (1 row) SELECT sum(a) FROM numbers; LOG: executing the command locally: SELECT sum(a) AS sum FROM replicate_ref_to_coordinator.numbers_8000001 numbers - sum + sum --------------------------------------------------------------------- 41 (1 row) @@ -181,9 +181,9 @@ CONTEXT: SQL function "test_reference_local_join_proc" statement 1 CREATE SCHEMA s1; CREATE TABLE s1.ref(a int); SELECT create_reference_table('s1.ref'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) BEGIN; @@ -238,16 +238,16 @@ ERROR: relation local_table is not distributed WITH t AS (SELECT *, random() x FROM numbers) SELECT * FROM numbers, local_table WHERE EXISTS (SELECT * FROM t WHERE t.x = numbers.a); - a | a + a | a --------------------------------------------------------------------- (0 rows) -- shouldn't plan locally even if distributed table is in CTE or subquery CREATE TABLE dist(a int); SELECT create_distributed_table('dist', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO dist VALUES (20),(30); @@ -274,7 +274,7 @@ SELECT public.coordinator_plan($Q$ EXPLAIN (COSTS FALSE) SELECT * FROM squares JOIN numbers_v ON squares.a = numbers_v.a; $Q$); - coordinator_plan + coordinator_plan --------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 1 @@ -285,7 +285,7 @@ SELECT public.coordinator_plan($Q$ EXPLAIN (COSTS FALSE) SELECT * FROM squares JOIN local_table_v ON squares.a = local_table_v.a; $Q$); - coordinator_plan + coordinator_plan --------------------------------------------------------------------- Custom Scan (Citus Adaptive) -> Distributed Subplan 24_1 @@ -306,7 +306,7 @@ SELECT public.plan_is_distributed($Q$ EXPLAIN (COSTS FALSE) SELECT * FROM squares JOIN numbers_v ON squares.a = numbers_v.a; $Q$); - plan_is_distributed + plan_is_distributed --------------------------------------------------------------------- f (1 row) @@ -328,7 +328,7 @@ SELECT public.plan_is_distributed($Q$ EXPLAIN (COSTS FALSE) SELECT abs(a.a) FROM local_table a, numbers b WHERE a.a = b.a; $Q$); - plan_is_distributed + plan_is_distributed --------------------------------------------------------------------- f (1 row) @@ -337,7 +337,7 @@ SELECT public.plan_is_distributed($Q$ EXPLAIN (COSTS FALSE) SELECT a.a FROM local_table a, numbers b WHERE a.a = b.a ORDER BY abs(a.a); $Q$); - plan_is_distributed + plan_is_distributed --------------------------------------------------------------------- f (1 row) diff --git a/src/test/regress/expected/replicated_partitioned_table.out b/src/test/regress/expected/replicated_partitioned_table.out index 5df222dc8..5ca5dc80b 100644 --- a/src/test/regress/expected/replicated_partitioned_table.out +++ b/src/test/regress/expected/replicated_partitioned_table.out @@ -28,9 +28,9 @@ INSERT INTO collections (key, ts, collection_id, value) VALUES (4, '2009-01-01', SELECT create_distributed_table('collections', 'key'); NOTICE: Copying data from local table... NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- now create partition of a already distributed table @@ -44,9 +44,9 @@ NOTICE: Copying data from local table... -- finally attach a distributed table to a distributed table CREATE TABLE collections_5 AS SELECT * FROM collections LIMIT 0; SELECT create_distributed_table('collections_5', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- load some data @@ -64,7 +64,7 @@ GROUP BY logicalrelid ORDER BY 1,2; - logicalrelid | placement_count + logicalrelid | placement_count --------------------------------------------------------------------- collections | 8 collections_1 | 8 @@ -81,7 +81,7 @@ FROM pg_dist_partition WHERE logicalrelid::text LIKE '%collections%'; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -145,9 +145,9 @@ HINT: Run the query on the parent table "collections" instead. -- foreign key creation is disallowed due to replication factor > 1 CREATE TABLE fkey_test (key bigint PRIMARY KEY); SELECT create_distributed_table('fkey_test', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) ALTER TABLE @@ -168,13 +168,13 @@ ALTER TABLE collections DETACH PARTITION collections_6; ALTER TABLE collections ATTACH PARTITION collections_6 FOR VALUES IN ( 6 ); -- read queries works just fine SELECT count(*) FROM collections_1 WHERE key = 1; - count + count --------------------------------------------------------------------- 1 (1 row) SELECT count(*) FROM collections_1 WHERE key != 1; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -185,9 +185,9 @@ CREATE TABLE collections_agg ( sum_value numeric ); SELECT create_distributed_table('collections_agg', 'key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- pushdown roll-up @@ -212,9 +212,9 @@ CREATE INDEX ON customer_engagements (id, event_id); SET citus.shard_count TO 1; SET citus.shard_replication_factor TO 2; SELECT create_distributed_table('customer_engagements', 'id', 'hash'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- ingest some data for the tests @@ -245,14 +245,14 @@ ROLLBACK; -- modifications after reparing a shard are fine (will use new metadata) BEGIN; SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); - master_copy_shard_placement + master_copy_shard_placement --------------------------------------------------------------------- - + (1 row) ALTER TABLE customer_engagements ADD COLUMN value float DEFAULT 1.0; SELECT * FROM customer_engagements ORDER BY 1,2,3; - id | event_id | value + id | event_id | value --------------------------------------------------------------------- 1 | 1 | 1 1 | 2 | 1 @@ -263,14 +263,14 @@ SELECT * FROM customer_engagements ORDER BY 1,2,3; ROLLBACK; BEGIN; SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); - master_copy_shard_placement + master_copy_shard_placement --------------------------------------------------------------------- - + (1 row) INSERT INTO customer_engagements VALUES (1, 1); SELECT count(*) FROM customer_engagements; - count + count --------------------------------------------------------------------- 5 (1 row) diff --git a/src/test/regress/expected/row_types.out b/src/test/regress/expected/row_types.out index 51a23c9ac..c82b33b48 100644 --- a/src/test/regress/expected/row_types.out +++ b/src/test/regress/expected/row_types.out @@ -3,9 +3,9 @@ CREATE SCHEMA row_types; SET search_path TO row_types; CREATE TABLE test (x int, y int); SELECT create_distributed_table('test','x'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE OR REPLACE FUNCTION table_returner(INT) RETURNS TABLE(name text, id INT) @@ -15,9 +15,9 @@ BEGIN END; $$ language plpgsql; SELECT create_distributed_function('table_returner(int)'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) CREATE OR REPLACE FUNCTION record_returner(INOUT id int, OUT name text) @@ -29,9 +29,9 @@ BEGIN END; $$ language plpgsql; SELECT create_distributed_function('record_returner(int)'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) CREATE OR REPLACE FUNCTION identity_returner(x anyelement) @@ -42,15 +42,15 @@ BEGIN END; $$ language plpgsql; SELECT create_distributed_function('identity_returner(anyelement)'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) INSERT INTO test VALUES (1,2), (1,3), (2,2), (2,3); -- multi-shard queries support row types SELECT (x,y) FROM test ORDER BY x, y; - row + row --------------------------------------------------------------------- (1,2) (1,3) @@ -59,7 +59,7 @@ SELECT (x,y) FROM test ORDER BY x, y; (4 rows) SELECT (x,y) FROM test GROUP BY x, y ORDER BY x, y; - row + row --------------------------------------------------------------------- (1,2) (1,3) @@ -68,7 +68,7 @@ SELECT (x,y) FROM test GROUP BY x, y ORDER BY x, y; (4 rows) SELECT ARRAY[NULL,(x,(y,x)),NULL,(y,(x,y))] FROM test ORDER BY x, y; - array + array --------------------------------------------------------------------- {NULL,"(1,\"(2,1)\")",NULL,"(2,\"(1,2)\")"} {NULL,"(1,\"(3,1)\")",NULL,"(3,\"(1,3)\")"} @@ -77,7 +77,7 @@ SELECT ARRAY[NULL,(x,(y,x)),NULL,(y,(x,y))] FROM test ORDER BY x, y; (4 rows) SELECT ARRAY[[(x,(y,x))],[(x,(x,y))]] FROM test ORDER BY x, y; - array + array --------------------------------------------------------------------- {{"(1,\"(2,1)\")"},{"(1,\"(1,2)\")"}} {{"(1,\"(3,1)\")"},{"(1,\"(1,3)\")"}} @@ -86,7 +86,7 @@ SELECT ARRAY[[(x,(y,x))],[(x,(x,y))]] FROM test ORDER BY x, y; (4 rows) select distinct (x,y) AS foo, x, y FROM test ORDER BY x, y; - foo | x | y + foo | x | y --------------------------------------------------------------------- (1,2) | 1 | 2 (1,3) | 1 | 3 @@ -95,7 +95,7 @@ select distinct (x,y) AS foo, x, y FROM test ORDER BY x, y; (4 rows) SELECT table_returner(x) FROM test ORDER BY x, y; - table_returner + table_returner --------------------------------------------------------------------- (1,1) (1,1) @@ -104,7 +104,7 @@ SELECT table_returner(x) FROM test ORDER BY x, y; (4 rows) SELECT record_returner(x) FROM test ORDER BY x, y; - record_returner + record_returner --------------------------------------------------------------------- (2,returned) (2,returned) @@ -113,16 +113,16 @@ SELECT record_returner(x) FROM test ORDER BY x, y; (4 rows) SELECT NULLIF((x, y), (y, x)) FROM test ORDER BY x, y; - nullif + nullif --------------------------------------------------------------------- (1,2) (1,3) - + (2,3) (4 rows) SELECT LEAST((x, y), (y, x)) FROM test ORDER BY x, y; - least + least --------------------------------------------------------------------- (1,2) (1,3) @@ -131,7 +131,7 @@ SELECT LEAST((x, y), (y, x)) FROM test ORDER BY x, y; (4 rows) SELECT GREATEST((x, y), (y, x)) FROM test ORDER BY x, y; - greatest + greatest --------------------------------------------------------------------- (2,1) (3,1) @@ -140,7 +140,7 @@ SELECT GREATEST((x, y), (y, x)) FROM test ORDER BY x, y; (4 rows) SELECT COALESCE(NULL, (x, y), (y, x)) FROM test ORDER BY x, y; - coalesce + coalesce --------------------------------------------------------------------- (1,2) (1,3) @@ -149,7 +149,7 @@ SELECT COALESCE(NULL, (x, y), (y, x)) FROM test ORDER BY x, y; (4 rows) SELECT CASE x WHEN 2 THEN (x, y) ELSE (y, x) END FROM test ORDER BY x, y; - row + row --------------------------------------------------------------------- (2,1) (3,1) @@ -158,10 +158,10 @@ SELECT CASE x WHEN 2 THEN (x, y) ELSE (y, x) END FROM test ORDER BY x, y; (4 rows) SELECT CASE x WHEN 2 THEN (x, y) END FROM test ORDER BY x, y; - case + case --------------------------------------------------------------------- - - + + (2,2) (2,3) (4 rows) @@ -180,94 +180,94 @@ SELECT array_agg((x, y)) FROM test; ERROR: input of anonymous composite types is not implemented -- router queries support row types SELECT (x,y) FROM test WHERE x = 1 ORDER BY x, y; - row + row --------------------------------------------------------------------- (1,2) (1,3) (2 rows) SELECT (x,y) AS foo FROM test WHERE x = 1 ORDER BY x, y; - foo + foo --------------------------------------------------------------------- (1,2) (1,3) (2 rows) SELECT ARRAY[NULL,(x,(y,x)),NULL,(y,(x,y))] FROM test WHERE x = 1 ORDER BY x, y; - array + array --------------------------------------------------------------------- {NULL,"(1,\"(2,1)\")",NULL,"(2,\"(1,2)\")"} {NULL,"(1,\"(3,1)\")",NULL,"(3,\"(1,3)\")"} (2 rows) SELECT ARRAY[[(x,(y,x))],[(x,(x,y))]] FROM test WHERE x = 1 ORDER BY x, y; - array + array --------------------------------------------------------------------- {{"(1,\"(2,1)\")"},{"(1,\"(1,2)\")"}} {{"(1,\"(3,1)\")"},{"(1,\"(1,3)\")"}} (2 rows) select distinct (x,y) AS foo, x, y FROM test WHERE x = 1 ORDER BY x, y; - foo | x | y + foo | x | y --------------------------------------------------------------------- (1,2) | 1 | 2 (1,3) | 1 | 3 (2 rows) SELECT table_returner(x) FROM test WHERE x = 1 ORDER BY x, y; - table_returner + table_returner --------------------------------------------------------------------- (1,1) (1,1) (2 rows) SELECT record_returner(x) FROM test WHERE x = 1 ORDER BY x, y; - record_returner + record_returner --------------------------------------------------------------------- (2,returned) (2,returned) (2 rows) SELECT NULLIF((x, y), (y, x)) FROM test WHERE x = 1 ORDER BY x, y; - nullif + nullif --------------------------------------------------------------------- (1,2) (1,3) (2 rows) SELECT LEAST((x, y), (y, x)) FROM test WHERE x = 1 ORDER BY x, y; - least + least --------------------------------------------------------------------- (1,2) (1,3) (2 rows) SELECT GREATEST((x, y), (y, x)) FROM test WHERE x = 1 ORDER BY x, y; - greatest + greatest --------------------------------------------------------------------- (2,1) (3,1) (2 rows) SELECT COALESCE(NULL, (x, y), (y, x)) FROM test WHERE x = 1 ORDER BY x, y; - coalesce + coalesce --------------------------------------------------------------------- (1,2) (1,3) (2 rows) SELECT CASE x WHEN 2 THEN (x, y) ELSE (y, x) END FROM test WHERE x = 1 ORDER BY x, y; - row + row --------------------------------------------------------------------- (2,1) (3,1) (2 rows) SELECT CASE x WHEN 2 THEN (x, y) END FROM test WHERE x = 1 ORDER BY x, y; - case + case --------------------------------------------------------------------- - - + + (2 rows) -- varying shape unsupported @@ -284,14 +284,14 @@ SELECT array_agg((x, y)) FROM test WHERE x = 1; ERROR: input of anonymous composite types is not implemented -- nested row expressions SELECT (x,(x,y)) AS foo FROM test WHERE x = 1 ORDER BY x, y; - foo + foo --------------------------------------------------------------------- (1,"(1,2)") (1,"(1,3)") (2 rows) SELECT (x,record_returner(x)) FROM test WHERE x = 1 ORDER BY x, y; - row + row --------------------------------------------------------------------- (1,"(2,returned)") (1,"(2,returned)") @@ -303,42 +303,42 @@ ERROR: input of anonymous composite types is not implemented -- try prepared statements PREPARE rec(int) AS SELECT (x,y*$1) FROM test WHERE x = $1 ORDER BY x, y; EXECUTE rec(1); - row + row --------------------------------------------------------------------- (1,2) (1,3) (2 rows) EXECUTE rec(1); - row + row --------------------------------------------------------------------- (1,2) (1,3) (2 rows) EXECUTE rec(1); - row + row --------------------------------------------------------------------- (1,2) (1,3) (2 rows) EXECUTE rec(1); - row + row --------------------------------------------------------------------- (1,2) (1,3) (2 rows) EXECUTE rec(1); - row + row --------------------------------------------------------------------- (1,2) (1,3) (2 rows) EXECUTE rec(1); - row + row --------------------------------------------------------------------- (1,2) (1,3) diff --git a/src/test/regress/expected/sequential_modifications.out b/src/test/regress/expected/sequential_modifications.out index 53b47f9d7..a2a4d8b43 100644 --- a/src/test/regress/expected/sequential_modifications.out +++ b/src/test/regress/expected/sequential_modifications.out @@ -64,37 +64,37 @@ CREATE OR REPLACE FUNCTION set_local_multi_shard_modify_mode_to_sequential() -- disbable 2PC recovery since our tests will check that ALTER SYSTEM SET citus.recover_2pc_interval TO -1; SELECT pg_reload_conf(); - pg_reload_conf + pg_reload_conf --------------------------------------------------------------------- t (1 row) CREATE TABLE test_table(a int, b int); SELECT create_distributed_table('test_table', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- not useful if not in transaction SELECT set_local_multi_shard_modify_mode_to_sequential(); - set_local_multi_shard_modify_mode_to_sequential + set_local_multi_shard_modify_mode_to_sequential --------------------------------------------------------------------- - + (1 row) -- we should see #worker transactions -- when sequential mode is used SET citus.multi_shard_modify_mode TO 'sequential'; SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) ALTER TABLE test_table ADD CONSTRAINT a_check CHECK(a > 0); SELECT distributed_2PCs_are_equal_to_worker_count(); - distributed_2pcs_are_equal_to_worker_count + distributed_2pcs_are_equal_to_worker_count --------------------------------------------------------------------- t (1 row) @@ -103,14 +103,14 @@ SELECT distributed_2PCs_are_equal_to_worker_count(); -- when parallel mode is used SET citus.multi_shard_modify_mode TO 'parallel'; SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) ALTER TABLE test_table ADD CONSTRAINT b_check CHECK(b > 0); SELECT distributed_2PCs_are_equal_to_placement_count(); - distributed_2pcs_are_equal_to_placement_count + distributed_2pcs_are_equal_to_placement_count --------------------------------------------------------------------- t (1 row) @@ -119,14 +119,14 @@ SELECT distributed_2PCs_are_equal_to_placement_count(); SET citus.multi_shard_commit_protocol TO '1pc'; SET citus.multi_shard_modify_mode TO 'sequential'; SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) ALTER TABLE test_table ADD CONSTRAINT c_check CHECK(a > 0); SELECT no_distributed_2PCs(); - no_distributed_2pcs + no_distributed_2pcs --------------------------------------------------------------------- t (1 row) @@ -134,37 +134,37 @@ SELECT no_distributed_2PCs(); SET citus.multi_shard_commit_protocol TO '1pc'; SET citus.multi_shard_modify_mode TO 'parallel'; SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) ALTER TABLE test_table ADD CONSTRAINT d_check CHECK(a > 0); SELECT no_distributed_2PCs(); - no_distributed_2pcs + no_distributed_2pcs --------------------------------------------------------------------- t (1 row) CREATE TABLE ref_test(a int); SELECT create_reference_table('ref_test'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SET citus.multi_shard_commit_protocol TO '1pc'; -- reference tables should always use 2PC SET citus.multi_shard_modify_mode TO 'sequential'; SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) CREATE INDEX ref_test_seq_index ON ref_test(a); SELECT distributed_2PCs_are_equal_to_worker_count(); - distributed_2pcs_are_equal_to_worker_count + distributed_2pcs_are_equal_to_worker_count --------------------------------------------------------------------- t (1 row) @@ -172,14 +172,14 @@ SELECT distributed_2PCs_are_equal_to_worker_count(); -- reference tables should always use 2PC SET citus.multi_shard_modify_mode TO 'parallel'; SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) CREATE INDEX ref_test_seq_index_2 ON ref_test(a); SELECT distributed_2PCs_are_equal_to_worker_count(); - distributed_2pcs_are_equal_to_worker_count + distributed_2pcs_are_equal_to_worker_count --------------------------------------------------------------------- t (1 row) @@ -189,37 +189,37 @@ SELECT distributed_2PCs_are_equal_to_worker_count(); SET citus.shard_replication_factor TO 2; CREATE TABLE test_table_rep_2 (a int); SELECT create_distributed_table('test_table_rep_2', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- 1PC should never use 2PC with rep > 1 SET citus.multi_shard_commit_protocol TO '1pc'; SET citus.multi_shard_modify_mode TO 'sequential'; SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) CREATE INDEX test_table_rep_2_i_1 ON test_table_rep_2(a); SELECT no_distributed_2PCs(); - no_distributed_2pcs + no_distributed_2pcs --------------------------------------------------------------------- t (1 row) SET citus.multi_shard_modify_mode TO 'parallel'; SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) CREATE INDEX test_table_rep_2_i_2 ON test_table_rep_2(a); SELECT no_distributed_2PCs(); - no_distributed_2pcs + no_distributed_2pcs --------------------------------------------------------------------- t (1 row) @@ -228,28 +228,28 @@ SELECT no_distributed_2PCs(); SET citus.multi_shard_commit_protocol TO '2pc'; SET citus.multi_shard_modify_mode TO 'sequential'; SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) CREATE INDEX test_table_rep_2_i_3 ON test_table_rep_2(a); SELECT distributed_2PCs_are_equal_to_worker_count(); - distributed_2pcs_are_equal_to_worker_count + distributed_2pcs_are_equal_to_worker_count --------------------------------------------------------------------- t (1 row) SET citus.multi_shard_modify_mode TO 'parallel'; SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) CREATE INDEX test_table_rep_2_i_4 ON test_table_rep_2(a); SELECT distributed_2PCs_are_equal_to_placement_count(); - distributed_2pcs_are_equal_to_placement_count + distributed_2pcs_are_equal_to_placement_count --------------------------------------------------------------------- t (1 row) @@ -259,7 +259,7 @@ SELECT distributed_2PCs_are_equal_to_placement_count(); SET citus.multi_shard_commit_protocol TO '2pc'; SET citus.multi_shard_modify_mode TO 'sequential'; SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) @@ -267,7 +267,7 @@ SELECT recover_prepared_transactions(); CREATE INDEX CONCURRENTLY test_table_rep_2_i_5 ON test_table_rep_2(a); -- we shouldn't see any distributed transactions SELECT no_distributed_2PCs(); - no_distributed_2pcs + no_distributed_2pcs --------------------------------------------------------------------- t (1 row) @@ -275,7 +275,7 @@ SELECT no_distributed_2PCs(); SET citus.multi_shard_commit_protocol TO '2pc'; SET citus.multi_shard_modify_mode TO 'parallel'; SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) @@ -283,7 +283,7 @@ SELECT recover_prepared_transactions(); CREATE INDEX CONCURRENTLY test_table_rep_2_i_6 ON test_table_rep_2(a); -- we shouldn't see any distributed transactions SELECT no_distributed_2PCs(); - no_distributed_2pcs + no_distributed_2pcs --------------------------------------------------------------------- t (1 row) @@ -293,22 +293,22 @@ CREATE TABLE test_seq_truncate (a int); INSERT INTO test_seq_truncate SELECT i FROM generate_series(0, 100) i; SELECT create_distributed_table('test_seq_truncate', 'a'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- with parallel modification mode, we should see #shards records SET citus.multi_shard_modify_mode TO 'parallel'; SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) TRUNCATE test_seq_truncate; SELECT distributed_2PCs_are_equal_to_placement_count(); - distributed_2pcs_are_equal_to_placement_count + distributed_2pcs_are_equal_to_placement_count --------------------------------------------------------------------- t (1 row) @@ -316,14 +316,14 @@ SELECT distributed_2PCs_are_equal_to_placement_count(); -- with sequential modification mode, we should see #primary worker records SET citus.multi_shard_modify_mode TO 'sequential'; SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) TRUNCATE test_seq_truncate; SELECT distributed_2PCs_are_equal_to_worker_count(); - distributed_2pcs_are_equal_to_worker_count + distributed_2pcs_are_equal_to_worker_count --------------------------------------------------------------------- t (1 row) @@ -332,36 +332,36 @@ SELECT distributed_2PCs_are_equal_to_worker_count(); CREATE TABLE test_seq_truncate_rep_2 (a int); SET citus.shard_replication_factor TO 2; SELECT create_distributed_table('test_seq_truncate_rep_2', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO test_seq_truncate_rep_2 SELECT i FROM generate_series(0, 100) i; SET citus.multi_shard_modify_mode TO 'sequential'; SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) TRUNCATE test_seq_truncate_rep_2; SELECT distributed_2PCs_are_equal_to_worker_count(); - distributed_2pcs_are_equal_to_worker_count + distributed_2pcs_are_equal_to_worker_count --------------------------------------------------------------------- t (1 row) SET citus.multi_shard_modify_mode TO 'parallel'; SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) TRUNCATE test_seq_truncate_rep_2; SELECT distributed_2PCs_are_equal_to_placement_count(); - distributed_2pcs_are_equal_to_placement_count + distributed_2pcs_are_equal_to_placement_count --------------------------------------------------------------------- t (1 row) @@ -371,22 +371,22 @@ CREATE TABLE multi_shard_modify_test ( t_name varchar(25) not null, t_value integer not null); SELECT create_distributed_table('multi_shard_modify_test', 't_key'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- with parallel modification mode, we should see #shards records SET citus.multi_shard_modify_mode TO 'parallel'; SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) DELETE FROM multi_shard_modify_test; SELECT distributed_2PCs_are_equal_to_placement_count(); - distributed_2pcs_are_equal_to_placement_count + distributed_2pcs_are_equal_to_placement_count --------------------------------------------------------------------- t (1 row) @@ -394,14 +394,14 @@ SELECT distributed_2PCs_are_equal_to_placement_count(); -- with sequential modification mode, we should see #primary worker records SET citus.multi_shard_modify_mode TO 'sequential'; SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) DELETE FROM multi_shard_modify_test; SELECT distributed_2PCs_are_equal_to_worker_count(); - distributed_2pcs_are_equal_to_worker_count + distributed_2pcs_are_equal_to_worker_count --------------------------------------------------------------------- t (1 row) @@ -412,16 +412,16 @@ BEGIN; INSERT INTO multi_shard_modify_test VALUES (1,'1',1), (2,'2',2), (3,'3',3), (4,'4',4); -- now switch to sequential mode to enable a successful TRUNCATE SELECT set_local_multi_shard_modify_mode_to_sequential(); - set_local_multi_shard_modify_mode_to_sequential + set_local_multi_shard_modify_mode_to_sequential --------------------------------------------------------------------- - + (1 row) TRUNCATE multi_shard_modify_test; COMMIT; -- see that all the data successfully removed SELECT count(*) FROM multi_shard_modify_test; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -430,28 +430,28 @@ SELECT count(*) FROM multi_shard_modify_test; -- with sequential modification mode, we should see #primary worker records SET citus.multi_shard_modify_mode TO 'sequential'; SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) INSERT INTO multi_shard_modify_test SELECT * FROM multi_shard_modify_test; SELECT distributed_2PCs_are_equal_to_worker_count(); - distributed_2pcs_are_equal_to_worker_count + distributed_2pcs_are_equal_to_worker_count --------------------------------------------------------------------- t (1 row) SET citus.multi_shard_modify_mode TO 'parallel'; SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) INSERT INTO multi_shard_modify_test SELECT * FROM multi_shard_modify_test; SELECT distributed_2PCs_are_equal_to_placement_count(); - distributed_2pcs_are_equal_to_placement_count + distributed_2pcs_are_equal_to_placement_count --------------------------------------------------------------------- t (1 row) @@ -462,16 +462,16 @@ BEGIN; INSERT INTO multi_shard_modify_test VALUES (1,'1',1), (2,'2',2), (3,'3',3), (4,'4',4); -- now switch to sequential mode to enable a successful INSERT .. SELECT SELECT set_local_multi_shard_modify_mode_to_sequential(); - set_local_multi_shard_modify_mode_to_sequential + set_local_multi_shard_modify_mode_to_sequential --------------------------------------------------------------------- - + (1 row) INSERT INTO multi_shard_modify_test SELECT * FROM multi_shard_modify_test; COMMIT; -- see that all the data successfully inserted SELECT count(*) FROM multi_shard_modify_test; - count + count --------------------------------------------------------------------- 210 (1 row) @@ -479,14 +479,14 @@ SELECT count(*) FROM multi_shard_modify_test; ALTER SYSTEM SET citus.recover_2pc_interval TO DEFAULT; SET citus.shard_replication_factor TO DEFAULT; SELECT pg_reload_conf(); - pg_reload_conf + pg_reload_conf --------------------------------------------------------------------- t (1 row) -- The following tests are added to test if create_distributed_table honors sequential mode SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) @@ -496,16 +496,16 @@ CREATE TABLE test_seq_multi_shard_update(a int, b int); BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT create_distributed_table('test_seq_multi_shard_update', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO test_seq_multi_shard_update VALUES (0, 0), (1, 0), (2, 0), (3, 0), (4, 0); DELETE FROM test_seq_multi_shard_update WHERE b < 2; COMMIT; SELECT distributed_2PCs_are_equal_to_worker_count(); - distributed_2pcs_are_equal_to_worker_count + distributed_2pcs_are_equal_to_worker_count --------------------------------------------------------------------- t (1 row) @@ -513,7 +513,7 @@ SELECT distributed_2PCs_are_equal_to_worker_count(); DROP TABLE test_seq_multi_shard_update; -- Check if truncate works properly after create_distributed_table in sequential mode SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) @@ -522,16 +522,16 @@ CREATE TABLE test_seq_truncate_after_create(a int, b int); BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT create_distributed_table('test_seq_truncate_after_create', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO test_seq_truncate_after_create VALUES (0, 0), (1, 0), (2, 0), (3, 0), (4, 0); TRUNCATE test_seq_truncate_after_create; COMMIT; SELECT distributed_2PCs_are_equal_to_worker_count(); - distributed_2pcs_are_equal_to_worker_count + distributed_2pcs_are_equal_to_worker_count --------------------------------------------------------------------- t (1 row) @@ -539,7 +539,7 @@ SELECT distributed_2PCs_are_equal_to_worker_count(); DROP TABLE test_seq_truncate_after_create; -- Check if drop table works properly after create_distributed_table in sequential mode SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) @@ -548,22 +548,22 @@ CREATE TABLE test_seq_drop_table(a int, b int); BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT create_distributed_table('test_seq_drop_table', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) DROP TABLE test_seq_drop_table; COMMIT; SELECT distributed_2PCs_are_equal_to_worker_count(); - distributed_2pcs_are_equal_to_worker_count + distributed_2pcs_are_equal_to_worker_count --------------------------------------------------------------------- t (1 row) -- Check if copy errors out properly after create_distributed_table in sequential mode SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) @@ -572,15 +572,15 @@ CREATE TABLE test_seq_copy(a int, b int); BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT create_distributed_table('test_seq_copy', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) \COPY test_seq_copy FROM STDIN DELIMITER AS ','; ROLLBACK; SELECT distributed_2PCs_are_equal_to_worker_count(); - distributed_2pcs_are_equal_to_worker_count + distributed_2pcs_are_equal_to_worker_count --------------------------------------------------------------------- f (1 row) @@ -588,7 +588,7 @@ SELECT distributed_2PCs_are_equal_to_worker_count(); DROP TABLE test_seq_copy; -- Check if DDL + CREATE INDEX works properly after create_distributed_table in sequential mode SELECT recover_prepared_transactions(); - recover_prepared_transactions + recover_prepared_transactions --------------------------------------------------------------------- 0 (1 row) @@ -597,9 +597,9 @@ CREATE TABLE test_seq_ddl_index(a int, b int); BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT create_distributed_table('test_seq_ddl_index', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO test_seq_ddl_index VALUES (0, 0), (1, 0), (2, 0), (3, 0), (4, 0); @@ -607,7 +607,7 @@ BEGIN; CREATE INDEX idx ON test_seq_ddl_index(c); COMMIT; SELECT distributed_2PCs_are_equal_to_worker_count(); - distributed_2pcs_are_equal_to_worker_count + distributed_2pcs_are_equal_to_worker_count --------------------------------------------------------------------- t (1 row) diff --git a/src/test/regress/expected/set_operation_and_local_tables.out b/src/test/regress/expected/set_operation_and_local_tables.out index ebe7eb458..531e98a16 100644 --- a/src/test/regress/expected/set_operation_and_local_tables.out +++ b/src/test/regress/expected/set_operation_and_local_tables.out @@ -2,16 +2,16 @@ CREATE SCHEMA recursive_set_local; SET search_path TO recursive_set_local, public; CREATE TABLE recursive_set_local.test (x int, y int); SELECT create_distributed_table('test', 'x'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE recursive_set_local.ref (a int, b int); SELECT create_reference_table('ref'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE recursive_set_local.local_test (x int, y int); @@ -28,7 +28,7 @@ DEBUG: generating subplan 3_2 for subquery SELECT x FROM recursive_set_local.te DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT intermediate_result.x FROM read_intermediate_result('3_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer) INTERSECT SELECT intermediate_result.x FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) ORDER BY 1 DESC DEBUG: Creating router plan DEBUG: Plan is router executable - x + x --------------------------------------------------------------------- (0 rows) @@ -40,13 +40,13 @@ DEBUG: generating subplan 5_1 for subquery SELECT x FROM recursive_set_local.te DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT intermediate_result.x FROM read_intermediate_result('5_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) INTERSECT SELECT i.i FROM generate_series(0, 100) i(i) ORDER BY 1 DESC DEBUG: Creating router plan DEBUG: Plan is router executable - x + x --------------------------------------------------------------------- 2 1 (2 rows) --- we'd first recursively plan the query with "test", thus don't need to recursively +-- we'd first recursively plan the query with "test", thus don't need to recursively -- plan other query (SELECT x FROM test LIMIT 5) INTERSECT (SELECT i FROM generate_series(0, 100) i) ORDER BY 1 DESC; DEBUG: Router planner cannot handle multi-shard select queries @@ -56,7 +56,7 @@ DEBUG: generating subplan 7_1 for subquery SELECT x FROM recursive_set_local.te DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT intermediate_result.x FROM read_intermediate_result('7_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) INTERSECT SELECT i.i FROM generate_series(0, 100) i(i) ORDER BY 1 DESC DEBUG: Creating router plan DEBUG: Plan is router executable - x + x --------------------------------------------------------------------- 2 1 @@ -66,7 +66,7 @@ DEBUG: Plan is router executable (SELECT a FROM ref) INTERSECT (SELECT i FROM generate_series(0, 100) i) ORDER BY 1 DESC; DEBUG: Creating router plan DEBUG: Plan is router executable - a + a --------------------------------------------------------------------- 3 2 @@ -91,7 +91,7 @@ DEBUG: generating subplan 12_2 for subquery SELECT x FROM recursive_set_local.t DEBUG: Plan 12 query after replacing subqueries and CTEs: (SELECT intermediate_result.x FROM read_intermediate_result('12_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) UNION ALL SELECT intermediate_result.x FROM read_intermediate_result('12_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) INTERSECT SELECT i.i FROM generate_series(0, 100) i(i) ORDER BY 1 DESC DEBUG: Creating router plan DEBUG: Plan is router executable - x + x --------------------------------------------------------------------- 4 3 @@ -109,14 +109,14 @@ DEBUG: generating subplan 14_3 for subquery SELECT x FROM recursive_set_local.t DEBUG: Plan 14 query after replacing subqueries and CTEs: (SELECT intermediate_result.x FROM read_intermediate_result('14_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) UNION ALL SELECT intermediate_result.x FROM read_intermediate_result('14_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) INTERSECT SELECT intermediate_result.x FROM read_intermediate_result('14_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer) ORDER BY 1 DESC DEBUG: Creating router plan DEBUG: Plan is router executable - x + x --------------------------------------------------------------------- 4 3 (2 rows) -- use ctes inside unions along with local tables on the top level -WITH +WITH cte_1 AS (SELECT user_id FROM users_table), cte_2 AS (SELECT user_id FROM events_table) ((SELECT * FROM cte_1) UNION (SELECT * FROM cte_2) UNION (SELECT x FROM local_test)) INTERSECT (SELECT i FROM generate_series(0, 100) i) @@ -130,7 +130,7 @@ DEBUG: generating subplan 16_3 for subquery SELECT x FROM recursive_set_local.l DEBUG: Plan 16 query after replacing subqueries and CTEs: (SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('16_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_2.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('16_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_2 UNION SELECT intermediate_result.x FROM read_intermediate_result('16_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) INTERSECT SELECT i.i FROM generate_series(0, 100) i(i) ORDER BY 1 DESC DEBUG: Creating router plan DEBUG: Plan is router executable - user_id + user_id --------------------------------------------------------------------- 6 5 @@ -146,7 +146,7 @@ SELECT count(*) FROM ( - ((WITH cte_1 AS (SELECT x FROM test) SELECT * FROM cte_1) UNION + ((WITH cte_1 AS (SELECT x FROM test) SELECT * FROM cte_1) UNION (WITH cte_1 AS (SELECT a FROM ref) SELECT * FROM cte_1)) INTERSECT (SELECT x FROM local_test) ) as foo, @@ -165,7 +165,7 @@ DEBUG: Plan is router executable DEBUG: generating subplan 19_4 for subquery (SELECT cte_1.x FROM (SELECT intermediate_result.x FROM read_intermediate_result('19_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) cte_1 UNION SELECT cte_1.a FROM (SELECT intermediate_result.a FROM read_intermediate_result('19_2'::text, 'binary'::citus_copy_format) intermediate_result(a integer)) cte_1) INTERSECT SELECT intermediate_result.x FROM read_intermediate_result('19_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer) DEBUG: Plan 19 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.x FROM read_intermediate_result('19_4'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) foo, recursive_set_local.test WHERE (test.y OPERATOR(pg_catalog.=) foo.x) DEBUG: Router planner cannot handle multi-shard select queries - count + count --------------------------------------------------------------------- 0 (1 row) @@ -176,7 +176,7 @@ SELECT count(*) FROM ( - ((WITH cte_1 AS (SELECT x FROM test) SELECT * FROM cte_1) UNION + ((WITH cte_1 AS (SELECT x FROM test) SELECT * FROM cte_1) UNION (WITH cte_1 AS (SELECT a FROM ref) SELECT * FROM cte_1)) INTERSECT (SELECT x FROM local_test) ) as foo, @@ -196,7 +196,7 @@ DEBUG: generating subplan 23_4 for subquery (SELECT cte_1.x FROM (SELECT interm DEBUG: Plan 23 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.x FROM read_intermediate_result('23_4'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) foo, recursive_set_local.ref WHERE (ref.a OPERATOR(pg_catalog.=) foo.x) DEBUG: Creating router plan DEBUG: Plan is router executable - count + count --------------------------------------------------------------------- 1 (1 row) @@ -214,7 +214,7 @@ DEBUG: Plan is router executable DEBUG: generating subplan 27_4 for subquery SELECT intermediate_result.x FROM read_intermediate_result('27_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer) UNION SELECT intermediate_result.y FROM read_intermediate_result('27_3'::text, 'binary'::citus_copy_format) intermediate_result(y integer) UNION SELECT intermediate_result.y FROM read_intermediate_result('27_1'::text, 'binary'::citus_copy_format) intermediate_result(y integer) DEBUG: Plan 27 query after replacing subqueries and CTEs: SELECT x, y FROM recursive_set_local.test a WHERE (x OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.x FROM read_intermediate_result('27_4'::text, 'binary'::citus_copy_format) intermediate_result(x integer))) ORDER BY x, y DEBUG: Router planner cannot handle multi-shard select queries - x | y + x | y --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -235,7 +235,7 @@ DEBUG: Creating router plan DEBUG: Plan is router executable DEBUG: Plan 31 query after replacing subqueries and CTEs: SELECT x, y FROM recursive_set_local.test a WHERE (x OPERATOR(pg_catalog.=) ANY (SELECT cte.x FROM (SELECT intermediate_result.x FROM read_intermediate_result('31_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) cte)) ORDER BY x, y DEBUG: Router planner cannot handle multi-shard select queries - x | y + x | y --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -255,7 +255,7 @@ DEBUG: generating subplan 35_3 for subquery SELECT intermediate_result.x, inter DEBUG: Plan 35 query after replacing subqueries and CTEs: SELECT u.x, u.y, local_test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('35_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_set_local.local_test USING (x)) ORDER BY u.x, u.y DEBUG: Local tables cannot be used in distributed queries. ERROR: relation local_test is not distributed --- though we replace some queries including the local query, the intermediate result is on the outer part of an outer join +-- though we replace some queries including the local query, the intermediate result is on the outer part of an outer join SELECT * FROM ((SELECT * FROM local_test) INTERSECT (SELECT * FROM test ORDER BY x LIMIT 1)) u LEFT JOIN test USING (x) ORDER BY 1,2; DEBUG: Local tables cannot be used in distributed queries. DEBUG: generating subplan 39_1 for subquery SELECT x, y FROM recursive_set_local.local_test @@ -269,7 +269,7 @@ DEBUG: Plan 39 query after replacing subqueries and CTEs: SELECT u.x, u.y, test DEBUG: Router planner cannot handle multi-shard select queries ERROR: cannot pushdown the subquery DETAIL: Complex subqueries and CTEs cannot be in the outer part of the outer join --- we replace some queries including the local query, the intermediate result is on the inner part of an outer join +-- we replace some queries including the local query, the intermediate result is on the inner part of an outer join SELECT * FROM ((SELECT * FROM local_test) INTERSECT (SELECT * FROM test ORDER BY x LIMIT 1)) u RIGHT JOIN test USING (x) ORDER BY 1,2; DEBUG: Local tables cannot be used in distributed queries. DEBUG: generating subplan 42_1 for subquery SELECT x, y FROM recursive_set_local.local_test @@ -281,7 +281,7 @@ DEBUG: Plan is router executable DEBUG: generating subplan 42_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('42_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('42_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) DEBUG: Plan 42 query after replacing subqueries and CTEs: SELECT test.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('42_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u RIGHT JOIN recursive_set_local.test USING (x)) ORDER BY test.x, u.y DEBUG: Router planner cannot handle multi-shard select queries - x | y | y + x | y | y --------------------------------------------------------------------- 1 | | 1 2 | | 2 @@ -299,7 +299,7 @@ DEBUG: Plan is router executable DEBUG: generating subplan 45_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('45_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('45_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) DEBUG: Plan 45 query after replacing subqueries and CTEs: SELECT u.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('45_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_set_local.test USING (x)) ORDER BY u.x, u.y DEBUG: Router planner cannot handle multi-shard select queries - x | y | y + x | y | y --------------------------------------------------------------------- (0 rows) @@ -320,7 +320,7 @@ DEBUG: generating subplan 48_5 for subquery SELECT intermediate_result.x FROM r DEBUG: Plan 48 query after replacing subqueries and CTEs: SELECT x FROM (SELECT intermediate_result.x FROM read_intermediate_result('48_5'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) u ORDER BY x DEBUG: Creating router plan DEBUG: Plan is router executable - x + x --------------------------------------------------------------------- 1 2 @@ -328,7 +328,7 @@ DEBUG: Plan is router executable SET citus.task_executor_type TO 'task-tracker'; -- repartition is recursively planned before the set operation -(SELECT x FROM test) INTERSECT (SELECT t1.x FROM test as t1, test as t2 WHERE t1.x = t2.y LIMIT 2) INTERSECT (((SELECT x FROM local_test) UNION ALL (SELECT x FROM test)) INTERSECT (SELECT i FROM generate_series(0, 100) i)) ORDER BY 1 DESC; +(SELECT x FROM test) INTERSECT (SELECT t1.x FROM test as t1, test as t2 WHERE t1.x = t2.y LIMIT 2) INTERSECT (((SELECT x FROM local_test) UNION ALL (SELECT x FROM test)) INTERSECT (SELECT i FROM generate_series(0, 100) i)) ORDER BY 1 DESC; DEBUG: Local tables cannot be used in distributed queries. DEBUG: Router planner cannot handle multi-shard select queries DEBUG: push down of limit count: 2 @@ -369,7 +369,7 @@ DEBUG: generating subplan 53_4 for subquery SELECT x FROM recursive_set_local.t DEBUG: Plan 53 query after replacing subqueries and CTEs: SELECT intermediate_result.x FROM read_intermediate_result('53_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer) INTERSECT SELECT intermediate_result.x FROM read_intermediate_result('53_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) INTERSECT ((SELECT intermediate_result.x FROM read_intermediate_result('53_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer) UNION ALL SELECT intermediate_result.x FROM read_intermediate_result('53_4'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) INTERSECT SELECT i.i FROM generate_series(0, 100) i(i)) ORDER BY 1 DESC DEBUG: Creating router plan DEBUG: Plan is router executable - x + x --------------------------------------------------------------------- 2 1 diff --git a/src/test/regress/expected/set_operations.out b/src/test/regress/expected/set_operations.out index a42e1ebf1..a10b71c55 100644 --- a/src/test/regress/expected/set_operations.out +++ b/src/test/regress/expected/set_operations.out @@ -2,23 +2,23 @@ CREATE SCHEMA recursive_union; SET search_path TO recursive_union, public; CREATE TABLE recursive_union.test (x int, y int); SELECT create_distributed_table('test', 'x'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE recursive_union.ref (a int, b int); SELECT create_reference_table('ref'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE test_not_colocated (LIKE test); SELECT create_distributed_table('test_not_colocated', 'x', colocate_with := 'none'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO test VALUES (1,1), (2,2); @@ -34,7 +34,7 @@ DEBUG: generating subplan 3_2 for subquery SELECT x, y FROM recursive_union.tes DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('3_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) ORDER BY 1, 2 DEBUG: Creating router plan DEBUG: Plan is router executable - x | y + x | y --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -47,7 +47,7 @@ DEBUG: generating subplan 6_1 for subquery SELECT x, y FROM recursive_union.tes DEBUG: Plan 6 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('6_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT ref.a, ref.b FROM recursive_union.ref ORDER BY 1, 2 DEBUG: Creating router plan DEBUG: Plan is router executable - x | y + x | y --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -57,7 +57,7 @@ DEBUG: Plan is router executable (SELECT * FROM ref) UNION (SELECT * FROM ref) ORDER BY 1,2; DEBUG: Creating router plan DEBUG: Plan is router executable - a | b + a | b --------------------------------------------------------------------- 2 | 2 3 | 3 @@ -72,7 +72,7 @@ DEBUG: generating subplan 9_2 for subquery SELECT x, y FROM recursive_union.tes DEBUG: Plan 9 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('9_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION ALL SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('9_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) ORDER BY 1, 2 DEBUG: Creating router plan DEBUG: Plan is router executable - x | y + x | y --------------------------------------------------------------------- 1 | 1 1 | 1 @@ -87,7 +87,7 @@ DEBUG: generating subplan 12_1 for subquery SELECT x, y FROM recursive_union.te DEBUG: Plan 12 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('12_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION ALL SELECT ref.a, ref.b FROM recursive_union.ref ORDER BY 1, 2 DEBUG: Creating router plan DEBUG: Plan is router executable - x | y + x | y --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -98,7 +98,7 @@ DEBUG: Plan is router executable (SELECT * FROM ref) UNION ALL (SELECT * FROM ref) ORDER BY 1,2; DEBUG: Creating router plan DEBUG: Plan is router executable - a | b + a | b --------------------------------------------------------------------- 2 | 2 2 | 2 @@ -115,7 +115,7 @@ DEBUG: generating subplan 15_2 for subquery SELECT x, y FROM recursive_union.te DEBUG: Plan 15 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('15_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('15_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) ORDER BY 1, 2 DEBUG: Creating router plan DEBUG: Plan is router executable - x | y + x | y --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -128,7 +128,7 @@ DEBUG: generating subplan 18_1 for subquery SELECT x, y FROM recursive_union.te DEBUG: Plan 18 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('18_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT SELECT ref.a, ref.b FROM recursive_union.ref ORDER BY 1, 2 DEBUG: Creating router plan DEBUG: Plan is router executable - x | y + x | y --------------------------------------------------------------------- 2 | 2 (1 row) @@ -136,7 +136,7 @@ DEBUG: Plan is router executable (SELECT * FROM ref) INTERSECT (SELECT * FROM ref) ORDER BY 1,2; DEBUG: Creating router plan DEBUG: Plan is router executable - a | b + a | b --------------------------------------------------------------------- 2 | 2 3 | 3 @@ -151,7 +151,7 @@ DEBUG: generating subplan 21_2 for subquery SELECT x, y FROM recursive_union.te DEBUG: Plan 21 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('21_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT ALL SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('21_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) ORDER BY 1, 2 DEBUG: Creating router plan DEBUG: Plan is router executable - x | y + x | y --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -164,7 +164,7 @@ DEBUG: generating subplan 24_1 for subquery SELECT x, y FROM recursive_union.te DEBUG: Plan 24 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('24_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT ALL SELECT ref.a, ref.b FROM recursive_union.ref ORDER BY 1, 2 DEBUG: Creating router plan DEBUG: Plan is router executable - x | y + x | y --------------------------------------------------------------------- 2 | 2 (1 row) @@ -172,7 +172,7 @@ DEBUG: Plan is router executable (SELECT * FROM ref) INTERSECT ALL (SELECT * FROM ref) ORDER BY 1,2; DEBUG: Creating router plan DEBUG: Plan is router executable - a | b + a | b --------------------------------------------------------------------- 2 | 2 3 | 3 @@ -187,7 +187,7 @@ DEBUG: generating subplan 27_2 for subquery SELECT x, y FROM recursive_union.te DEBUG: Plan 27 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('27_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) EXCEPT SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('27_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) ORDER BY 1, 2 DEBUG: Creating router plan DEBUG: Plan is router executable - x | y + x | y --------------------------------------------------------------------- (0 rows) @@ -198,7 +198,7 @@ DEBUG: generating subplan 30_1 for subquery SELECT x, y FROM recursive_union.te DEBUG: Plan 30 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('30_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) EXCEPT SELECT ref.a, ref.b FROM recursive_union.ref ORDER BY 1, 2 DEBUG: Creating router plan DEBUG: Plan is router executable - x | y + x | y --------------------------------------------------------------------- 1 | 1 (1 row) @@ -206,7 +206,7 @@ DEBUG: Plan is router executable (SELECT * FROM ref) EXCEPT (SELECT * FROM ref) ORDER BY 1,2; DEBUG: Creating router plan DEBUG: Plan is router executable - a | b + a | b --------------------------------------------------------------------- (0 rows) @@ -219,7 +219,7 @@ DEBUG: generating subplan 33_2 for subquery SELECT x, y FROM recursive_union.te DEBUG: Plan 33 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('33_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) EXCEPT ALL SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('33_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) ORDER BY 1, 2 DEBUG: Creating router plan DEBUG: Plan is router executable - x | y + x | y --------------------------------------------------------------------- (0 rows) @@ -230,7 +230,7 @@ DEBUG: generating subplan 36_1 for subquery SELECT x, y FROM recursive_union.te DEBUG: Plan 36 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('36_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) EXCEPT ALL SELECT ref.a, ref.b FROM recursive_union.ref ORDER BY 1, 2 DEBUG: Creating router plan DEBUG: Plan is router executable - x | y + x | y --------------------------------------------------------------------- 1 | 1 (1 row) @@ -238,7 +238,7 @@ DEBUG: Plan is router executable (SELECT * FROM ref) EXCEPT ALL (SELECT * FROM ref) ORDER BY 1,2; DEBUG: Creating router plan DEBUG: Plan is router executable - a | b + a | b --------------------------------------------------------------------- (0 rows) @@ -261,9 +261,9 @@ DEBUG: generating subplan 39_2 for subquery SELECT test.x, ref.a FROM (recursiv DEBUG: Plan 39 query after replacing subqueries and CTEs: (((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('39_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) INTERSECT SELECT ref.a, ref.b FROM recursive_union.ref) UNION ALL SELECT s.s, s.s FROM generate_series(1, 10) s(s)) EXCEPT SELECT 1, 1) UNION SELECT intermediate_result.x, intermediate_result.a FROM read_intermediate_result('39_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, a integer) ORDER BY 1, 2 DEBUG: Creating router plan DEBUG: Plan is router executable - x | y + x | y --------------------------------------------------------------------- - 1 | + 1 | 2 | 2 3 | 3 4 | 4 @@ -278,7 +278,7 @@ DEBUG: Plan is router executable -- within a subquery, some unions can be pushed down SELECT * FROM ((SELECT * FROM test) UNION (SELECT * FROM test)) u ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries - x | y + x | y --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -296,7 +296,7 @@ DEBUG: generating subplan 43_3 for subquery SELECT intermediate_result.x, inter DEBUG: Plan 43 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('43_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u ORDER BY x, y DEBUG: Creating router plan DEBUG: Plan is router executable - x | y + x | y --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -312,7 +312,7 @@ DEBUG: generating subplan 47_2 for subquery SELECT intermediate_result.x, inter DEBUG: Plan 47 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('47_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u ORDER BY x, y DEBUG: Creating router plan DEBUG: Plan is router executable - x | y + x | y --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -322,7 +322,7 @@ DEBUG: Plan is router executable SELECT * FROM ((SELECT * FROM ref) UNION (SELECT * FROM ref)) u ORDER BY 1,2; DEBUG: Creating router plan DEBUG: Plan is router executable - a | b + a | b --------------------------------------------------------------------- 2 | 2 3 | 3 @@ -330,7 +330,7 @@ DEBUG: Plan is router executable SELECT * FROM ((SELECT * FROM test) UNION ALL (SELECT * FROM test)) u ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries - x | y + x | y --------------------------------------------------------------------- 1 | 1 1 | 1 @@ -340,7 +340,7 @@ DEBUG: Router planner cannot handle multi-shard select queries SELECT * FROM ((SELECT x, y FROM test) UNION ALL (SELECT y, x FROM test)) u ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries - x | y + x | y --------------------------------------------------------------------- 1 | 1 1 | 1 @@ -358,7 +358,7 @@ DEBUG: generating subplan 53_2 for subquery SELECT intermediate_result.x, inter DEBUG: Plan 53 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('53_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u ORDER BY x, y DEBUG: Creating router plan DEBUG: Plan is router executable - x | y + x | y --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -369,7 +369,7 @@ DEBUG: Plan is router executable SELECT * FROM ((SELECT * FROM ref) UNION ALL (SELECT * FROM ref)) u ORDER BY 1,2; DEBUG: Creating router plan DEBUG: Plan is router executable - a | b + a | b --------------------------------------------------------------------- 2 | 2 2 | 2 @@ -389,7 +389,7 @@ DEBUG: generating subplan 57_3 for subquery SELECT intermediate_result.x, inter DEBUG: Plan 57 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('57_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u ORDER BY x, y DEBUG: Creating router plan DEBUG: Plan is router executable - x | y + x | y --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -407,7 +407,7 @@ DEBUG: generating subplan 61_3 for subquery SELECT intermediate_result.x, inter DEBUG: Plan 61 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('61_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u ORDER BY x, y DEBUG: Creating router plan DEBUG: Plan is router executable - x | y + x | y --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -423,7 +423,7 @@ DEBUG: generating subplan 65_2 for subquery SELECT intermediate_result.x, inter DEBUG: Plan 65 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('65_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u ORDER BY x, y DEBUG: Creating router plan DEBUG: Plan is router executable - x | y + x | y --------------------------------------------------------------------- 2 | 2 (1 row) @@ -431,7 +431,7 @@ DEBUG: Plan is router executable SELECT * FROM ((SELECT * FROM ref) INTERSECT (SELECT * FROM ref)) u ORDER BY 1,2; DEBUG: Creating router plan DEBUG: Plan is router executable - a | b + a | b --------------------------------------------------------------------- 2 | 2 3 | 3 @@ -449,7 +449,7 @@ DEBUG: generating subplan 69_3 for subquery SELECT intermediate_result.x, inter DEBUG: Plan 69 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('69_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u ORDER BY x, y DEBUG: Creating router plan DEBUG: Plan is router executable - x | y + x | y --------------------------------------------------------------------- (0 rows) @@ -465,7 +465,7 @@ DEBUG: generating subplan 73_3 for subquery SELECT intermediate_result.x, inter DEBUG: Plan 73 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('73_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u ORDER BY x, y DEBUG: Creating router plan DEBUG: Plan is router executable - x | y + x | y --------------------------------------------------------------------- (0 rows) @@ -479,7 +479,7 @@ DEBUG: generating subplan 77_2 for subquery SELECT intermediate_result.x, inter DEBUG: Plan 77 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('77_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u ORDER BY x, y DEBUG: Creating router plan DEBUG: Plan is router executable - x | y + x | y --------------------------------------------------------------------- 1 | 1 (1 row) @@ -487,14 +487,14 @@ DEBUG: Plan is router executable SELECT * FROM ((SELECT * FROM ref) EXCEPT (SELECT * FROM ref)) u ORDER BY 1,2; DEBUG: Creating router plan DEBUG: Plan is router executable - a | b + a | b --------------------------------------------------------------------- (0 rows) -- unions can even be pushed down within a join SELECT * FROM ((SELECT * FROM test) UNION (SELECT * FROM test)) u JOIN test USING (x) ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries - x | y | y + x | y | y --------------------------------------------------------------------- 1 | 1 | 1 2 | 2 | 2 @@ -502,7 +502,7 @@ DEBUG: Router planner cannot handle multi-shard select queries SELECT * FROM ((SELECT * FROM test) UNION ALL (SELECT * FROM test)) u LEFT JOIN test USING (x) ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries - x | y | y + x | y | y --------------------------------------------------------------------- 1 | 1 | 1 1 | 1 | 1 @@ -523,7 +523,7 @@ DEBUG: Plan is router executable DEBUG: generating subplan 83_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('83_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('83_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) DEBUG: Plan 83 query after replacing subqueries and CTEs: SELECT u.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('83_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_union.test USING (x)) ORDER BY u.x, u.y DEBUG: Router planner cannot handle multi-shard select queries - x | y | y + x | y | y --------------------------------------------------------------------- 1 | 1 | 1 2 | 2 | 2 @@ -555,7 +555,7 @@ DEBUG: Plan is router executable DEBUG: generating subplan 91_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('91_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result.y, intermediate_result.x FROM read_intermediate_result('91_2'::text, 'binary'::citus_copy_format) intermediate_result(y integer, x integer) DEBUG: Plan 91 query after replacing subqueries and CTEs: SELECT u.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('91_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_union.test USING (x)) ORDER BY u.x, u.y DEBUG: Router planner cannot handle multi-shard select queries - x | y | y + x | y | y --------------------------------------------------------------------- 1 | 1 | 1 2 | 2 | 2 @@ -572,7 +572,7 @@ DEBUG: Plan is router executable DEBUG: generating subplan 95_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('95_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?" FROM read_intermediate_result('95_2'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer) DEBUG: Plan 95 query after replacing subqueries and CTEs: SELECT u.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('95_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_union.test USING (x)) ORDER BY u.x, u.y DEBUG: Router planner cannot handle multi-shard select queries - x | y | y + x | y | y --------------------------------------------------------------------- 1 | 1 | 1 2 | 2 | 2 @@ -581,7 +581,7 @@ DEBUG: Router planner cannot handle multi-shard select queries -- a join between a set operation and a generate_series which is pushdownable SELECT * FROM ((SELECT * FROM test) UNION (SELECT * FROM test ORDER BY x)) u JOIN generate_series(1,10) x USING (x) ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries - x | y + x | y --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -600,7 +600,7 @@ DEBUG: generating subplan 100_3 for subquery SELECT intermediate_result.x, inte DEBUG: Plan 100 query after replacing subqueries and CTEs: SELECT u.x, u.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('100_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN (SELECT x_1.x FROM generate_series(1, 10) x_1(x)) x USING (x)) ORDER BY u.x, u.y DEBUG: Creating router plan DEBUG: Plan is router executable - x | y + x | y --------------------------------------------------------------------- (0 rows) @@ -617,7 +617,7 @@ DEBUG: push down of limit count: 4 DEBUG: generating subplan 105_1 for subquery SELECT y FROM recursive_union.test ORDER BY y LIMIT 4 DEBUG: Plan 105 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT test.x, test.y FROM recursive_union.test UNION SELECT test.x, test.y FROM recursive_union.test) foo WHERE (x OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.y FROM read_intermediate_result('105_1'::text, 'binary'::citus_copy_format) intermediate_result(y integer))) ORDER BY x DEBUG: Router planner cannot handle multi-shard select queries - x | y + x | y --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -639,7 +639,7 @@ DEBUG: generating subplan 107_4 for subquery SELECT intermediate_result.x, inte DEBUG: Plan 107 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('107_4'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) foo WHERE (x OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.y FROM read_intermediate_result('107_1'::text, 'binary'::citus_copy_format) intermediate_result(y integer))) ORDER BY x DEBUG: Creating router plan DEBUG: Plan is router executable - x | y + x | y --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -660,7 +660,7 @@ DEBUG: generating subplan 112_4 for subquery SELECT y FROM recursive_union.test DEBUG: Plan 112 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('112_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) foo WHERE (x OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.y FROM read_intermediate_result('112_4'::text, 'binary'::citus_copy_format) intermediate_result(y integer))) ORDER BY x DEBUG: Creating router plan DEBUG: Plan is router executable - x | y + x | y --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -679,7 +679,7 @@ DEBUG: generating subplan 117_2 for subquery SELECT x, y, rnk FROM (SELECT test DEBUG: Plan 117 query after replacing subqueries and CTEs: SELECT intermediate_result.x, intermediate_result.y, intermediate_result.rnk FROM read_intermediate_result('117_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer, rnk bigint) UNION SELECT intermediate_result.x, intermediate_result.y, intermediate_result.rnk FROM read_intermediate_result('117_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer, rnk bigint) ORDER BY 1 DESC, 2 DESC, 3 DESC DEBUG: Creating router plan DEBUG: Plan is router executable - x | y | rnk + x | y | rnk --------------------------------------------------------------------- 2 | 2 | 1 1 | 1 | 1 @@ -706,7 +706,7 @@ DEBUG: Plan is router executable DEBUG: generating subplan 122_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('122_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) EXCEPT SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('122_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) DEBUG: Plan 122 query after replacing subqueries and CTEs: SELECT u.x, u.y, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('122_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_union.test USING (x)) ORDER BY u.x, u.y DEBUG: Router planner cannot handle multi-shard select queries - x | y | y + x | y | y --------------------------------------------------------------------- 2 | 2 | 2 (1 row) @@ -741,7 +741,7 @@ DEBUG: generating subplan 130_4 for subquery SELECT intermediate_result.x, inte DEBUG: Plan 130 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('130_4'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u ORDER BY x, y DEBUG: Creating router plan DEBUG: Plan is router executable - x | y + x | y --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -750,7 +750,7 @@ DEBUG: Plan is router executable -- subquery union in WHERE clause with partition column equality and implicit join is pushed down SELECT * FROM test a WHERE x IN (SELECT x FROM test b WHERE y = 1 UNION SELECT x FROM test c WHERE y = 2) ORDER BY 1,2; DEBUG: Router planner cannot handle multi-shard select queries - x | y + x | y --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -770,7 +770,7 @@ DEBUG: Plan is router executable DEBUG: generating subplan 136_1 for subquery SELECT b.x FROM recursive_union.test b WHERE (b.y OPERATOR(pg_catalog.=) 1) UNION SELECT c.x FROM recursive_union.test c WHERE (c.y OPERATOR(pg_catalog.=) 2) DEBUG: Plan 136 query after replacing subqueries and CTEs: SELECT x, y FROM recursive_union.test a WHERE (NOT (x OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.x FROM read_intermediate_result('136_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer)))) ORDER BY x, y DEBUG: Router planner cannot handle multi-shard select queries - x | y + x | y --------------------------------------------------------------------- (0 rows) @@ -786,7 +786,7 @@ DEBUG: Plan is router executable DEBUG: generating subplan 140_3 for subquery SELECT intermediate_result.x FROM read_intermediate_result('140_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) UNION SELECT intermediate_result.y FROM read_intermediate_result('140_2'::text, 'binary'::citus_copy_format) intermediate_result(y integer) DEBUG: Plan 140 query after replacing subqueries and CTEs: SELECT x, y FROM recursive_union.test a WHERE (x OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.x FROM read_intermediate_result('140_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer))) ORDER BY x, y DEBUG: Router planner cannot handle multi-shard select queries - x | y + x | y --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -820,7 +820,7 @@ DEBUG: generating subplan 146_1 for subquery SELECT test.x, test.y FROM recursi DEBUG: Plan 146 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('146_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) foo ORDER BY x DESC LIMIT 3 DEBUG: Creating router plan DEBUG: Plan is router executable - x | y + x | y --------------------------------------------------------------------- 2 | 2 1 | 1 @@ -839,7 +839,7 @@ DEBUG: generating subplan 150_3 for subquery SELECT intermediate_result.x FROM DEBUG: Plan 150 query after replacing subqueries and CTEs: SELECT count(DISTINCT x) AS count FROM (SELECT intermediate_result.x FROM read_intermediate_result('150_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) t(x) ORDER BY (count(DISTINCT x)) DEBUG: Creating router plan DEBUG: Plan is router executable - count + count --------------------------------------------------------------------- 2 (1 row) @@ -856,7 +856,7 @@ DEBUG: generating subplan 154_3 for subquery SELECT intermediate_result.count F DEBUG: Plan 154 query after replacing subqueries and CTEs: SELECT count(DISTINCT x) AS count FROM (SELECT intermediate_result.count FROM read_intermediate_result('154_3'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) t(x) ORDER BY (count(DISTINCT x)) DEBUG: Creating router plan DEBUG: Plan is router executable - count + count --------------------------------------------------------------------- 1 (1 row) @@ -874,7 +874,7 @@ DEBUG: generating subplan 158_3 for subquery SELECT intermediate_result.avg FRO DEBUG: Plan 158 query after replacing subqueries and CTEs: SELECT avg(DISTINCT x) AS avg FROM (SELECT intermediate_result.avg FROM read_intermediate_result('158_3'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) t(x) ORDER BY (avg(DISTINCT x)) DEBUG: Creating router plan DEBUG: Plan is router executable - avg + avg --------------------------------------------------------------------- 1.50000000000000000000 (1 row) @@ -926,7 +926,7 @@ DEBUG: generating subplan 164_2 for subquery SELECT x FROM recursive_union.test DEBUG: Plan 164 query after replacing subqueries and CTEs: SELECT intermediate_result.x FROM read_intermediate_result('164_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer) INTERSECT SELECT intermediate_result.x FROM read_intermediate_result('164_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) ORDER BY 1 DESC DEBUG: Creating router plan DEBUG: Plan is router executable - x + x --------------------------------------------------------------------- (0 rows) @@ -968,7 +968,7 @@ DEBUG: generating subplan 167_2 for subquery SELECT x FROM recursive_union.test DEBUG: Plan 167 query after replacing subqueries and CTEs: SELECT intermediate_result.x FROM read_intermediate_result('167_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer) INTERSECT SELECT intermediate_result.x FROM read_intermediate_result('167_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer) ORDER BY 1 DESC DEBUG: Creating router plan DEBUG: Plan is router executable - x + x --------------------------------------------------------------------- 2 1 @@ -989,7 +989,7 @@ DEBUG: generating subplan 170_3 for subquery SELECT intermediate_result.y FROM DEBUG: Plan 170 query after replacing subqueries and CTEs: SELECT y FROM (SELECT intermediate_result.y FROM read_intermediate_result('170_3'::text, 'binary'::citus_copy_format) intermediate_result(y integer)) set_view_recursive ORDER BY y DESC DEBUG: Creating router plan DEBUG: Plan is router executable - y + y --------------------------------------------------------------------- 2 1 @@ -999,7 +999,7 @@ DEBUG: Plan is router executable CREATE VIEW set_view_pushdown AS (SELECT x FROM test) UNION (SELECT x FROM test); SELECT * FROM set_view_pushdown ORDER BY 1 DESC; DEBUG: Router planner cannot handle multi-shard select queries - x + x --------------------------------------------------------------------- 2 1 @@ -1018,7 +1018,7 @@ DEBUG: Plan is router executable DEBUG: generating subplan 175_3 for subquery SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('175_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) UNION SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?" FROM read_intermediate_result('175_2'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer) DEBUG: Plan 175 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT u.x, test.y FROM ((SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('175_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u JOIN recursive_union.test USING (x)) ORDER BY u.x, test.y) set_view_recursive_second ORDER BY x, y DEBUG: Router planner cannot handle multi-shard select queries - x | y + x | y --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -1048,7 +1048,7 @@ DEBUG: generating subplan 179_8 for subquery SELECT x FROM (SELECT test.x FROM DEBUG: Plan 179 query after replacing subqueries and CTEs: (SELECT intermediate_result.x FROM read_intermediate_result('179_7'::text, 'binary'::citus_copy_format) intermediate_result(x integer) INTERSECT SELECT set_view_recursive.y FROM (SELECT intermediate_result.y FROM read_intermediate_result('179_6'::text, 'binary'::citus_copy_format) intermediate_result(y integer)) set_view_recursive) EXCEPT SELECT intermediate_result.x FROM read_intermediate_result('179_8'::text, 'binary'::citus_copy_format) intermediate_result(x integer) DEBUG: Creating router plan DEBUG: Plan is router executable - x + x --------------------------------------------------------------------- (0 rows) @@ -1065,7 +1065,7 @@ DEBUG: generating subplan 188_3 for subquery SELECT intermediate_result.x, inte DEBUG: Plan 188 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('188_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u ORDER BY x, y DEBUG: Creating router plan DEBUG: Plan is router executable - x | y + x | y --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -1083,7 +1083,7 @@ DEBUG: generating subplan 192_3 for subquery SELECT intermediate_result.x, inte DEBUG: Plan 192 query after replacing subqueries and CTEs: SELECT x, y FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('192_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) u ORDER BY x, y DEBUG: Creating router plan DEBUG: Plan is router executable - x | y + x | y --------------------------------------------------------------------- 1 | 1 2 | 2 diff --git a/src/test/regress/expected/single_hash_repartition_join.out b/src/test/regress/expected/single_hash_repartition_join.out index 2f239e381..04355880e 100644 --- a/src/test/regress/expected/single_hash_repartition_join.out +++ b/src/test/regress/expected/single_hash_repartition_join.out @@ -9,28 +9,28 @@ CREATE TABLE single_hash_repartition_second (id int, sum int, avg float); CREATE TABLE ref_table (id int, sum int, avg float); SET citus.shard_count TO 4; SELECT create_distributed_table('single_hash_repartition_first', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('single_hash_repartition_second', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_reference_table('ref_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SET citus.log_multi_join_order TO ON; SET client_min_messages TO DEBUG2; -- a very basic single hash re-partitioning example -EXPLAIN SELECT - count(*) +EXPLAIN SELECT + count(*) FROM single_hash_repartition_first t1, single_hash_repartition_second t2 WHERE @@ -60,8 +60,8 @@ DETAIL: Creating dependency on merge taskId 20 ERROR: the query contains a join that requires repartitioning HINT: Set citus.enable_repartition_joins to on to enable repartitioning -- the same query with the orders of the tables have changed -EXPLAIN SELECT - count(*) +EXPLAIN SELECT + count(*) FROM single_hash_repartition_second t1, single_hash_repartition_first t2 WHERE @@ -122,8 +122,8 @@ DETAIL: Creating dependency on merge taskId 20 ERROR: the query contains a join that requires repartitioning HINT: Set citus.enable_repartition_joins to on to enable repartitioning -- a more complicated join order, first colocated join, later single hash repartition join -EXPLAIN SELECT - count(*) +EXPLAIN SELECT + count(*) FROM single_hash_repartition_first t1, single_hash_repartition_first t2, single_hash_repartition_second t3 WHERE @@ -165,8 +165,8 @@ DETAIL: Creating dependency on merge taskId 20 ERROR: the query contains a join that requires repartitioning HINT: Set citus.enable_repartition_joins to on to enable repartitioning -- a more complicated join order, first hash-repartition join, later single hash repartition join -EXPLAIN SELECT - count(*) +EXPLAIN SELECT + count(*) FROM single_hash_repartition_first t1, single_hash_repartition_first t2, single_hash_repartition_second t3 WHERE @@ -224,8 +224,8 @@ DETAIL: Creating dependency on merge taskId 20 ERROR: the query contains a join that requires repartitioning HINT: Set citus.enable_repartition_joins to on to enable repartitioning -- single hash repartitioning is not supported between different column types -EXPLAIN SELECT - count(*) +EXPLAIN SELECT + count(*) FROM single_hash_repartition_first t1, single_hash_repartition_first t2, single_hash_repartition_second t3 WHERE @@ -248,9 +248,9 @@ ERROR: cannot perform distributed planning on this query DETAIL: Cartesian products are currently unsupported -- single repartition query in CTE -- should work fine -EXPLAIN WITH cte1 AS +EXPLAIN WITH cte1 AS ( - SELECT + SELECT t1.id * t2.avg as data FROM single_hash_repartition_first t1, single_hash_repartition_second t2 @@ -294,7 +294,7 @@ DETAIL: Creating dependency on merge taskId 20 ERROR: the query contains a join that requires repartitioning HINT: Set citus.enable_repartition_joins to on to enable repartitioning -- two single repartitions -EXPLAIN SELECT +EXPLAIN SELECT count(*) FROM single_hash_repartition_first t1, single_hash_repartition_second t2, single_hash_repartition_second t3 @@ -344,9 +344,9 @@ DEBUG: pruning merge fetch taskId 7 DETAIL: Creating dependency on merge taskId 24 ERROR: the query contains a join that requires repartitioning HINT: Set citus.enable_repartition_joins to on to enable repartitioning --- two single repartitions again, but this +-- two single repartitions again, but this -- time the columns of the second join is reverted -EXPLAIN SELECT +EXPLAIN SELECT avg(t1.avg + t2.avg) FROM single_hash_repartition_first t1, single_hash_repartition_second t2, single_hash_repartition_second t3 @@ -404,8 +404,8 @@ HINT: Set citus.enable_repartition_joins to on to enable repartitioning -- the following queries should also be a single hash repartition queries -- note that since we've manually updated the metadata without changing the -- the corresponding data, the results of the query would be wrong -EXPLAIN SELECT - count(*) +EXPLAIN SELECT + count(*) FROM single_hash_repartition_first t1, single_hash_repartition_second t2 WHERE @@ -437,8 +437,8 @@ HINT: Set citus.enable_repartition_joins to on to enable repartitioning -- the following queries should also be a single hash repartition queries -- note that since we've manually updated the metadata without changing the -- the corresponding data, the results of the query would be wrong -EXPLAIN SELECT - count(*) +EXPLAIN SELECT + count(*) FROM single_hash_repartition_first t1, single_hash_repartition_second t2 WHERE diff --git a/src/test/regress/expected/sql_procedure.out b/src/test/regress/expected/sql_procedure.out index 513bbaa14..ac7f878eb 100644 --- a/src/test/regress/expected/sql_procedure.out +++ b/src/test/regress/expected/sql_procedure.out @@ -9,9 +9,9 @@ SET SEARCH_PATH = procedure_schema; CREATE TABLE test_table(id integer , org_id integer); CREATE UNIQUE INDEX idx_table ON test_table(id, org_id); SELECT create_distributed_table('test_table','id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO test_table VALUES(1, 1); @@ -22,7 +22,7 @@ CREATE PROCEDURE test_procedure_delete_insert(id int, org_id int) LANGUAGE SQL A $$; CALL test_procedure_delete_insert(2,3); SELECT * FROM test_table ORDER BY 1, 2; - id | org_id + id | org_id --------------------------------------------------------------------- 2 | 3 (1 row) @@ -40,7 +40,7 @@ CALL test_procedure_commit(2,5); ERROR: COMMIT is not allowed in a SQL function CONTEXT: SQL function "test_procedure_commit" during startup SELECT * FROM test_table ORDER BY 1, 2; - id | org_id + id | org_id --------------------------------------------------------------------- 2 | 3 (1 row) @@ -55,7 +55,7 @@ CALL test_procedure_rollback(2,15); ERROR: ROLLBACK is not allowed in a SQL function CONTEXT: SQL function "test_procedure_rollback" during startup SELECT * FROM test_table ORDER BY 1, 2; - id | org_id + id | org_id --------------------------------------------------------------------- 2 | 3 (1 row) @@ -73,7 +73,7 @@ END; $$; CALL test_procedure_delete_insert(2,3); SELECT * FROM test_table ORDER BY 1, 2; - id | org_id + id | org_id --------------------------------------------------------------------- 2 | 3 (1 row) @@ -94,7 +94,7 @@ CONTEXT: while executing command on localhost:xxxxx SQL statement "INSERT INTO test_table VALUES (tt_id, tt_org_id)" PL/pgSQL function test_procedure_modify_insert(integer,integer) line 5 at SQL statement SELECT * FROM test_table ORDER BY 1, 2; - id | org_id + id | org_id --------------------------------------------------------------------- 2 | 12 (1 row) @@ -114,7 +114,7 @@ CONTEXT: while executing command on localhost:xxxxx SQL statement "INSERT INTO test_table VALUES (tt_id, tt_org_id)" PL/pgSQL function test_procedure_modify_insert_commit(integer,integer) line 5 at SQL statement SELECT * FROM test_table ORDER BY 1, 2; - id | org_id + id | org_id --------------------------------------------------------------------- 2 | 30 (1 row) @@ -130,7 +130,7 @@ END; $$; CALL test_procedure_rollback(2,5); SELECT * FROM test_table ORDER BY 1, 2; - id | org_id + id | org_id --------------------------------------------------------------------- (0 rows) @@ -145,7 +145,7 @@ END; $$; CALL test_procedure_rollback_2(12, 15); SELECT * FROM test_table ORDER BY 1, 2; - id | org_id + id | org_id --------------------------------------------------------------------- (0 rows) @@ -161,7 +161,7 @@ $$; INSERT INTO test_table VALUES (1, 1), (2, 2); CALL test_procedure_rollback_3(2,15); SELECT * FROM test_table ORDER BY 1, 2; - id | org_id + id | org_id --------------------------------------------------------------------- 1 | 1 2 | 15 @@ -190,14 +190,14 @@ BEGIN END; $$; SELECT * from test_table; - id | org_id + id | org_id --------------------------------------------------------------------- (0 rows) call test_procedure(1,1); call test_procedure(20, 20); SELECT * from test_table; - id | org_id + id | org_id --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/ssl_by_default.out b/src/test/regress/expected/ssl_by_default.out index 50b7348ce..d75bc1b28 100644 --- a/src/test/regress/expected/ssl_by_default.out +++ b/src/test/regress/expected/ssl_by_default.out @@ -6,13 +6,13 @@ -- ssl can only be enabled by default on installations that are OpenSSL-enabled. SHOW ssl_ciphers \gset SELECT :'ssl_ciphers' != 'none' AS hasssl; - hasssl + hasssl --------------------------------------------------------------------- t (1 row) SHOW ssl; - ssl + ssl --------------------------------------------------------------------- on (1 row) @@ -20,14 +20,14 @@ SHOW ssl; SELECT run_command_on_workers($$ SHOW ssl; $$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,on) (localhost,57638,t,on) (2 rows) SHOW citus.node_conninfo; - citus.node_conninfo + citus.node_conninfo --------------------------------------------------------------------- sslmode=require (1 row) @@ -35,7 +35,7 @@ SHOW citus.node_conninfo; SELECT run_command_on_workers($$ SHOW citus.node_conninfo; $$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,sslmode=require) (localhost,57638,t,sslmode=require) @@ -44,14 +44,14 @@ $$); SELECT run_command_on_workers($$ SELECT ssl FROM pg_stat_ssl WHERE pid = pg_backend_pid(); $$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,t) (localhost,57638,t,t) (2 rows) SHOW ssl_ciphers; - ssl_ciphers + ssl_ciphers --------------------------------------------------------------------- TLSv1.2+HIGH:!aNULL:!eNULL (1 row) @@ -59,7 +59,7 @@ SHOW ssl_ciphers; SELECT run_command_on_workers($$ SHOW ssl_ciphers; $$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,TLSv1.2+HIGH:!aNULL:!eNULL) (localhost,57638,t,TLSv1.2+HIGH:!aNULL:!eNULL) diff --git a/src/test/regress/expected/subqueries_deep.out b/src/test/regress/expected/subqueries_deep.out index ef6c09251..77cd491ac 100644 --- a/src/test/regress/expected/subqueries_deep.out +++ b/src/test/regress/expected/subqueries_deep.out @@ -7,27 +7,27 @@ SET client_min_messages TO DEBUG1; -- subquery in FROM -> FROM -> FROM should be replaced due to OFFSET -- one level up subquery should be replaced due to GROUP BY on non partition key -- one level up subquery should be replaced due to LIMUT -SELECT - DISTINCT user_id -FROM +SELECT + DISTINCT user_id +FROM ( - SELECT users_table.user_id FROM users_table, + SELECT users_table.user_id FROM users_table, ( - SELECT + SELECT avg(event_type) as avg_val FROM (SELECT - event_type, users_table.user_id - FROM + event_type, users_table.user_id + FROM users_table, (SELECT user_id, event_type FROM events_table WHERE value_2 < 3 ORDER BY 1, 2 OFFSET 3) as foo - WHERE - foo.user_id = users_table.user_id) bar, users_table - WHERE - bar.user_id = users_table.user_id - GROUP BY + WHERE + foo.user_id = users_table.user_id) bar, users_table + WHERE + bar.user_id = users_table.user_id + GROUP BY users_table.value_1 ) as baz - WHERE + WHERE baz.avg_val < users_table.user_id ORDER BY 1 LIMIT 3 @@ -38,7 +38,7 @@ DEBUG: generating subplan 1_2 for subquery SELECT avg(bar.event_type) AS avg_va DEBUG: push down of limit count: 3 DEBUG: generating subplan 1_3 for subquery SELECT users_table.user_id FROM public.users_table, (SELECT intermediate_result.avg_val FROM read_intermediate_result('1_2'::text, 'binary'::citus_copy_format) intermediate_result(avg_val numeric)) baz WHERE (baz.avg_val OPERATOR(pg_catalog.<) (users_table.user_id)::numeric) ORDER BY users_table.user_id LIMIT 3 DEBUG: Plan 1 query after replacing subqueries and CTEs: SELECT DISTINCT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('1_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) sub1 ORDER BY user_id DESC - user_id + user_id --------------------------------------------------------------------- 3 (1 row) @@ -51,18 +51,18 @@ SELECT event, array_length(events_table, 1) FROM ( SELECT event, array_agg(t.user_id) AS events_table FROM ( - SELECT + SELECT DISTINCT ON(e.event_type::text) e.event_type::text as event, e.time, e.user_id - FROM + FROM users_table AS u, events_table AS e - WHERE u.user_id = e.user_id AND - u.user_id IN + WHERE u.user_id = e.user_id AND + u.user_id IN ( - SELECT - user_id - FROM - users_table + SELECT + user_id + FROM + users_table WHERE value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 1 AND event_type <= 3 AND value_3 > 1 AND user_id = users_table.user_id) AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 3 AND event_type <= 4 AND value_3 > 1 AND user_id = users_table.user_id) @@ -79,7 +79,7 @@ DEBUG: generating subplan 5_2 for subquery SELECT user_id FROM public.users_tab DEBUG: generating subplan 5_3 for subquery SELECT DISTINCT ON ((e.event_type)::text) (e.event_type)::text AS event, e."time", e.user_id FROM public.users_table u, public.events_table e WHERE ((u.user_id OPERATOR(pg_catalog.=) e.user_id) AND (u.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('5_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)))) DEBUG: generating subplan 5_4 for subquery SELECT t.event, array_agg(t.user_id) AS events_table FROM (SELECT intermediate_result.event, intermediate_result."time", intermediate_result.user_id FROM read_intermediate_result('5_3'::text, 'binary'::citus_copy_format) intermediate_result(event text, "time" timestamp without time zone, user_id integer)) t, public.users_table WHERE (users_table.value_1 OPERATOR(pg_catalog.=) (t.event)::integer) GROUP BY t.event DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT event, array_length(events_table, 1) AS array_length FROM (SELECT intermediate_result.event, intermediate_result.events_table FROM read_intermediate_result('5_4'::text, 'binary'::citus_copy_format) intermediate_result(event text, events_table integer[])) q ORDER BY (array_length(events_table, 1)) DESC, event - event | array_length + event | array_length --------------------------------------------------------------------- 3 | 26 4 | 21 @@ -89,32 +89,32 @@ DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT event, array_le 5 | 9 (6 rows) --- this test probably doesn't add too much value, +-- this test probably doesn't add too much value, -- but recurse 6 times for fun SELECT count(*) FROM ( - SELECT avg(min) FROM + SELECT avg(min) FROM ( SELECT min(users_table.value_1) FROM ( - SELECT avg(event_type) as avg_ev_type FROM + SELECT avg(event_type) as avg_ev_type FROM ( - SELECT - max(value_1) as mx_val_1 + SELECT + max(value_1) as mx_val_1 FROM ( - SELECT + SELECT avg(event_type) as avg FROM ( - SELECT - cnt - FROM + SELECT + cnt + FROM (SELECT count(*) as cnt, value_2 FROM users_table GROUP BY value_2) as level_1, users_table - WHERE + WHERE users_table.user_id = level_1.cnt ) as level_2, events_table - WHERE events_table.user_id = level_2.cnt + WHERE events_table.user_id = level_2.cnt GROUP BY level_2.cnt ) as level_3, users_table WHERE user_id = level_3.avg @@ -123,9 +123,9 @@ FROM WHERE level_4.mx_val_1 = events_table.user_id GROUP BY level_4.mx_val_1 ) as level_5, users_table - WHERE + WHERE level_5.avg_ev_type = users_table.user_id - GROUP BY + GROUP BY level_5.avg_ev_type ) as level_6, users_table WHERE users_table.user_id = level_6.min GROUP BY users_table.value_1 @@ -137,43 +137,43 @@ DEBUG: generating subplan 10_4 for subquery SELECT avg(events_table.event_type) DEBUG: generating subplan 10_5 for subquery SELECT min(users_table.value_1) AS min FROM (SELECT intermediate_result.avg_ev_type FROM read_intermediate_result('10_4'::text, 'binary'::citus_copy_format) intermediate_result(avg_ev_type numeric)) level_5, public.users_table WHERE (level_5.avg_ev_type OPERATOR(pg_catalog.=) (users_table.user_id)::numeric) GROUP BY level_5.avg_ev_type DEBUG: generating subplan 10_6 for subquery SELECT avg(level_6.min) AS avg FROM (SELECT intermediate_result.min FROM read_intermediate_result('10_5'::text, 'binary'::citus_copy_format) intermediate_result(min integer)) level_6, public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) level_6.min) GROUP BY users_table.value_1 DEBUG: Plan 10 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.avg FROM read_intermediate_result('10_6'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) bar - count + count --------------------------------------------------------------------- 0 (1 row) -- same query happening in the subqueries in WHERE --- this test probably doesn't add too much value, +-- this test probably doesn't add too much value, -- but recurse 6 times for fun -SELECT - * -FROM - users_table +SELECT + * +FROM + users_table WHERE user_id IN ( SELECT count(*) FROM ( - SELECT avg(min) FROM + SELECT avg(min) FROM ( SELECT min(users_table.value_1) FROM ( - SELECT avg(event_type) as avg_ev_type FROM + SELECT avg(event_type) as avg_ev_type FROM ( - SELECT - max(value_1) as mx_val_1 + SELECT + max(value_1) as mx_val_1 FROM ( - SELECT + SELECT avg(event_type) as avg FROM ( - SELECT - cnt - FROM + SELECT + cnt + FROM (SELECT count(*) as cnt, value_2 FROM users_table GROUP BY value_2) as level_1, users_table - WHERE + WHERE users_table.user_id = level_1.cnt ) as level_2, events_table - WHERE events_table.user_id = level_2.cnt + WHERE events_table.user_id = level_2.cnt GROUP BY level_2.cnt ) as level_3, users_table WHERE user_id = level_3.avg @@ -182,9 +182,9 @@ WHERE user_id IN ( WHERE level_4.mx_val_1 = events_table.user_id GROUP BY level_4.mx_val_1 ) as level_5, users_table - WHERE + WHERE level_5.avg_ev_type = users_table.user_id - GROUP BY + GROUP BY level_5.avg_ev_type ) as level_6, users_table WHERE users_table.user_id = level_6.min GROUP BY users_table.value_1 @@ -197,7 +197,7 @@ DEBUG: generating subplan 17_5 for subquery SELECT min(users_table.value_1) AS DEBUG: generating subplan 17_6 for subquery SELECT avg(level_6.min) AS avg FROM (SELECT intermediate_result.min FROM read_intermediate_result('17_5'::text, 'binary'::citus_copy_format) intermediate_result(min integer)) level_6, public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) level_6.min) GROUP BY users_table.value_1 DEBUG: generating subplan 17_7 for subquery SELECT count(*) AS count FROM (SELECT intermediate_result.avg FROM read_intermediate_result('17_6'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) bar DEBUG: Plan 17 query after replacing subqueries and CTEs: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.count FROM read_intermediate_result('17_7'::text, 'binary'::citus_copy_format) intermediate_result(count bigint))) - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/subqueries_not_supported.out b/src/test/regress/expected/subqueries_not_supported.out index 3ce95e7bc..63c7ab417 100644 --- a/src/test/regress/expected/subqueries_not_supported.out +++ b/src/test/regress/expected/subqueries_not_supported.out @@ -6,33 +6,33 @@ SET search_path TO not_supported, public; SET client_min_messages TO DEBUG1; CREATE TABLE users_table_local AS SELECT * FROM users_table; -- we don't support subqueries with local tables when they are not leaf queries -SELECT - * +SELECT + * FROM ( - SELECT - users_table_local.user_id - FROM + SELECT + users_table_local.user_id + FROM users_table_local, (SELECT user_id FROM events_table) as evs WHERE users_table_local.user_id = evs.user_id ) as foo; ERROR: relation users_table_local is not distributed RESET client_min_messages; -- we don't support subqueries with local tables when they are not leaf queries -SELECT user_id FROM users_table WHERE user_id IN - (SELECT - user_id - FROM - users_table_local JOIN (SELECT user_id FROM events_table_local) as foo +SELECT user_id FROM users_table WHERE user_id IN + (SELECT + user_id + FROM + users_table_local JOIN (SELECT user_id FROM events_table_local) as foo USING (user_id) ); ERROR: relation "events_table_local" does not exist SET client_min_messages TO DEBUG1; -- we don't support aggregate distinct if the group by is not on partition key, expect for count distinct -- thus baz and bar are recursively planned but not foo -SELECT - * -FROM +SELECT + * +FROM ( SELECT avg(DISTINCT value_1), random() FROM users_table GROUP BY user_id OFFSET 3 ) as baz, @@ -47,16 +47,16 @@ DEBUG: generating subplan 4_2 for subquery SELECT count(DISTINCT value_1) AS co ERROR: cannot compute aggregate (distinct) DETAIL: table partitioning is unsuitable for aggregate (distinct) -- we don't support array_aggs with ORDER BYs -SELECT - * +SELECT + * FROM ( - SELECT - array_agg(users_table.value_2 ORDER BY users_table.time) - FROM + SELECT + array_agg(users_table.value_2 ORDER BY users_table.time) + FROM users_table, (SELECT user_id FROM events_table) as evs WHERE users_table.user_id = evs.user_id - GROUP BY users_table.value_2 + GROUP BY users_table.value_2 LIMIT 5 ) as foo; ERROR: array_agg with order by is unsupported @@ -65,12 +65,12 @@ SET citus.enable_router_execution TO false; SELECT user_id FROM - (SELECT - DISTINCT users_table.user_id - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + (SELECT + DISTINCT users_table.user_id + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) ORDER BY 1 DESC LIMIT 5 ) as foo @@ -81,9 +81,9 @@ DEBUG: Plan 10 query after replacing subqueries and CTEs: SELECT user_id FROM ( ERROR: cannot handle complex subqueries when the router executor is disabled SET citus.enable_router_execution TO true; -- window functions are not allowed if they're not partitioned on the distribution column -SELECT - * -FROM +SELECT + * +FROM ( SELECT user_id, time, rnk @@ -101,12 +101,12 @@ LIMIT 10) as foo; ERROR: could not run distributed query because the window function that is used cannot be pushed down HINT: Window functions are supported in two ways. Either add an equality filter on the distributed tables' partition column or use the window functions with a PARTITION BY clause containing the distribution column --- OUTER JOINs where the outer part is recursively planned and not the other way +-- OUTER JOINs where the outer part is recursively planned and not the other way -- around is not supported SELECT foo.value_2 FROM - (SELECT users_table.value_2 FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) LIMIT 5) as foo + (SELECT users_table.value_2 FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) LIMIT 5) as foo LEFT JOIN (SELECT users_table.value_2 FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND event_type IN (5,6,7,8)) as bar ON(foo.value_2 = bar.value_2); @@ -118,18 +118,18 @@ DETAIL: Complex subqueries and CTEs cannot be in the outer part of the outer jo -- Aggregates in subquery without partition column can be planned recursively -- unless there is a reference to an outer query SELECT - * + * FROM - users_table + users_table WHERE - user_id IN + user_id IN ( SELECT - SUM(events_table.user_id) + SUM(events_table.user_id) FROM - events_table + events_table WHERE - users_table.user_id = events_table.user_id + users_table.user_id = events_table.user_id ) ; ERROR: cannot push down this subquery @@ -137,20 +137,20 @@ DETAIL: Aggregates without group by are currently unsupported when a subquery r -- Having qual without group by on partition column can be planned recursively -- unless there is a reference to an outer query SELECT - * + * FROM - users_table + users_table WHERE - user_id IN + user_id IN ( SELECT - SUM(events_table.user_id) + SUM(events_table.user_id) FROM - events_table + events_table WHERE - events_table.user_id = users_table.user_id + events_table.user_id = users_table.user_id HAVING - MIN(value_2) > 2 + MIN(value_2) > 2 ) ; ERROR: cannot push down this subquery diff --git a/src/test/regress/expected/subquery_and_cte.out b/src/test/regress/expected/subquery_and_cte.out index 64d48aa19..4c13f9bf8 100644 --- a/src/test/regress/expected/subquery_and_cte.out +++ b/src/test/regress/expected/subquery_and_cte.out @@ -5,9 +5,9 @@ SET search_path TO subquery_and_ctes; CREATE TABLE users_table_local AS SELECT * FROM users_table; CREATE TABLE dist_table (id int, value int); SELECT create_distributed_table('dist_table', 'id', colocate_with => 'users_table'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO dist_table (id, value) VALUES(1, 2),(2, 3),(3,4); @@ -47,7 +47,7 @@ DEBUG: Plan 4 query after replacing subqueries and CTEs: SELECT dist_cte.user_i DEBUG: push down of limit count: 5 DEBUG: generating subplan 3_2 for subquery SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte, (SELECT intermediate_result.user_id FROM read_intermediate_result('3_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) cte.user_id) - count + count --------------------------------------------------------------------- 1644 (1 row) @@ -62,7 +62,7 @@ SELECT cte1.user_id, cte1.value_1, cte2.user_id, cte2.event_type FROM cte1, cte2 ORDER BY cte1.user_id, cte1.value_1, cte2.user_id, cte2.event_type LIMIT 5; - user_id | value_1 | user_id | event_type + user_id | value_1 | user_id | event_type --------------------------------------------------------------------- 1 | 1 | 1 | 0 1 | 1 | 1 | 0 @@ -84,7 +84,7 @@ LIMIT 5; DEBUG: generating subplan 8_1 for CTE cte1: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_and_ctes.users_table WHERE (user_id OPERATOR(pg_catalog.=) 1) DEBUG: generating subplan 8_2 for CTE cte2: SELECT user_id, "time", event_type, value_2, value_3, value_4 FROM subquery_and_ctes.events_table WHERE (user_id OPERATOR(pg_catalog.=) 6) DEBUG: Plan 8 query after replacing subqueries and CTEs: SELECT cte1.user_id, cte1.value_1, cte2.user_id, cte2.user_id FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('8_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) cte1, (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.event_type, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('8_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, event_type integer, value_2 integer, value_3 double precision, value_4 bigint)) cte2 ORDER BY cte1.user_id, cte1.value_1, cte2.user_id, cte2.event_type LIMIT 5 - user_id | value_1 | user_id | user_id + user_id | value_1 | user_id | user_id --------------------------------------------------------------------- 1 | 1 | 6 | 6 1 | 1 | 6 | 6 @@ -157,7 +157,7 @@ DEBUG: Plan 18 query after replacing subqueries and CTEs: SELECT dist_cte.user_ DEBUG: push down of limit count: 5 DEBUG: generating subplan 17_2 for subquery SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 DEBUG: Plan 17 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('17_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte, (SELECT intermediate_result.user_id FROM read_intermediate_result('17_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, subquery_and_ctes.events_table WHERE ((foo.user_id OPERATOR(pg_catalog.=) cte.user_id) AND (events_table.user_id OPERATOR(pg_catalog.=) cte.user_id)) - count + count --------------------------------------------------------------------- 30608 (1 row) @@ -187,7 +187,7 @@ DEBUG: Plan 22 query after replacing subqueries and CTEs: SELECT dist_cte.user_ DEBUG: push down of limit count: 5 DEBUG: generating subplan 21_2 for subquery SELECT DISTINCT value_2 FROM subquery_and_ctes.users_table WHERE ((value_1 OPERATOR(pg_catalog.>=) 1) AND (value_1 OPERATOR(pg_catalog.<=) 20)) ORDER BY value_2 LIMIT 5 DEBUG: Plan 21 query after replacing subqueries and CTEs: SELECT DISTINCT cte.user_id FROM subquery_and_ctes.users_table, (SELECT intermediate_result.user_id FROM read_intermediate_result('21_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte WHERE ((users_table.user_id OPERATOR(pg_catalog.=) cte.user_id) AND (users_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('21_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)))) ORDER BY cte.user_id DESC - user_id + user_id --------------------------------------------------------------------- 4 3 @@ -217,7 +217,7 @@ DEBUG: generating subplan 26_2 for CTE dist_cte: SELECT user_id FROM subquery_a DEBUG: Plan 26 query after replacing subqueries and CTEs: SELECT dist_cte.user_id FROM ((SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('26_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) local_cte JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('26_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) DEBUG: generating subplan 25_2 for subquery SELECT DISTINCT user_id FROM subquery_and_ctes.users_table WHERE ((value_1 OPERATOR(pg_catalog.>=) 1) AND (value_1 OPERATOR(pg_catalog.<=) 20)) DEBUG: Plan 25 query after replacing subqueries and CTEs: SELECT DISTINCT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('25_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('25_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) ORDER BY user_id DESC - user_id + user_id --------------------------------------------------------------------- 6 5 @@ -246,7 +246,7 @@ FROM ORDER BY 1 DESC; DEBUG: generating subplan 29_1 for CTE cte: SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: Plan 29 query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT cte.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('29_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte ORDER BY cte.user_id DESC) foo ORDER BY user_id DESC - user_id + user_id --------------------------------------------------------------------- 6 5 @@ -285,7 +285,7 @@ WHERE foo.user_id = bar.user_id ORDER BY 1 DESC; DEBUG: generating subplan 31_1 for CTE cte: SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: Plan 31 query after replacing subqueries and CTEs: SELECT bar.user_id FROM (SELECT cte.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('31_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte ORDER BY cte.user_id DESC) foo, (SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) ORDER BY bar.user_id DESC - user_id + user_id --------------------------------------------------------------------- 6 5 @@ -340,7 +340,7 @@ DEBUG: generating subplan 33_2 for CTE cte: SELECT events_table.event_type, use DEBUG: push down of limit count: 2 DEBUG: generating subplan 33_3 for subquery SELECT users_table.user_id, some_events.event_type FROM subquery_and_ctes.users_table, (SELECT cte.event_type, cte.user_id FROM (SELECT intermediate_result.event_type, intermediate_result.user_id FROM read_intermediate_result('33_2'::text, 'binary'::citus_copy_format) intermediate_result(event_type integer, user_id integer)) cte ORDER BY cte.event_type DESC) some_events WHERE ((users_table.user_id OPERATOR(pg_catalog.=) some_events.user_id) AND (some_events.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY some_events.event_type, users_table.user_id LIMIT 2 DEBUG: Plan 33 query after replacing subqueries and CTEs: SELECT DISTINCT bar.user_id FROM (SELECT cte.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('33_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte ORDER BY cte.user_id DESC) foo, (SELECT intermediate_result.user_id, intermediate_result.event_type FROM read_intermediate_result('33_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, event_type integer)) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) ORDER BY bar.user_id DESC LIMIT 5 - user_id + user_id --------------------------------------------------------------------- 1 (1 row) @@ -381,13 +381,13 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 37_3 for subquery SELECT DISTINCT cte.user_id FROM subquery_and_ctes.users_table, (SELECT intermediate_result.user_id FROM read_intermediate_result('37_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte WHERE ((users_table.user_id OPERATOR(pg_catalog.=) cte.user_id) AND (users_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT cte_in_where.value_2 FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('37_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) cte_in_where))) ORDER BY cte.user_id DESC DEBUG: Plan 37 query after replacing subqueries and CTEs: SELECT foo.user_id, events_table.user_id, events_table."time", events_table.event_type, events_table.value_2, events_table.value_3, events_table.value_4 FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('37_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, subquery_and_ctes.events_table WHERE (foo.user_id OPERATOR(pg_catalog.=) events_table.value_2) ORDER BY events_table."time" DESC, events_table.user_id DESC, foo.user_id DESC LIMIT 5 DEBUG: push down of limit count: 5 - user_id | user_id | time | event_type | value_2 | value_3 | value_4 + user_id | user_id | time | event_type | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 4 | 1 | Thu Nov 23 21:54:46.924477 2017 | 6 | 4 | 5 | - 2 | 4 | Thu Nov 23 18:10:21.338399 2017 | 1 | 2 | 4 | - 4 | 3 | Thu Nov 23 18:08:26.550729 2017 | 2 | 4 | 3 | - 2 | 3 | Thu Nov 23 16:44:41.903713 2017 | 4 | 2 | 2 | - 1 | 3 | Thu Nov 23 16:31:56.219594 2017 | 5 | 1 | 2 | + 4 | 1 | Thu Nov 23 21:54:46.924477 2017 | 6 | 4 | 5 | + 2 | 4 | Thu Nov 23 18:10:21.338399 2017 | 1 | 2 | 4 | + 4 | 3 | Thu Nov 23 18:08:26.550729 2017 | 2 | 4 | 3 | + 2 | 3 | Thu Nov 23 16:44:41.903713 2017 | 4 | 2 | 2 | + 1 | 3 | Thu Nov 23 16:31:56.219594 2017 | 5 | 1 | 2 | (5 rows) -- now recursively plan subqueries inside the CTEs that contains LIMIT and OFFSET @@ -432,7 +432,7 @@ DEBUG: Plan 43 query after replacing subqueries and CTEs: SELECT dist_cte.user_ DEBUG: push down of limit count: 5 DEBUG: generating subplan 42_2 for subquery SELECT DISTINCT users_table.user_id FROM subquery_and_ctes.users_table, subquery_and_ctes.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 DEBUG: Plan 42 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('42_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte, (SELECT intermediate_result.user_id FROM read_intermediate_result('42_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) cte.user_id) - count + count --------------------------------------------------------------------- 432 (1 row) @@ -488,13 +488,13 @@ DEBUG: generating subplan 48_2 for subquery SELECT DISTINCT users_table.user_id DEBUG: generating subplan 48_3 for subquery SELECT count(*) AS cnt FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('48_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte, (SELECT intermediate_result.user_id FROM read_intermediate_result('48_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) cte.user_id) DEBUG: Plan 48 query after replacing subqueries and CTEs: SELECT foo.cnt, users_table.user_id, users_table."time", users_table.value_1, users_table.value_2, users_table.value_3, users_table.value_4 FROM (SELECT intermediate_result.cnt FROM read_intermediate_result('48_3'::text, 'binary'::citus_copy_format) intermediate_result(cnt bigint)) foo, subquery_and_ctes.users_table WHERE (foo.cnt OPERATOR(pg_catalog.>) users_table.value_2) ORDER BY users_table."time" DESC, foo.cnt DESC, users_table.user_id DESC, users_table.value_1 DESC LIMIT 5 DEBUG: push down of limit count: 5 - cnt | user_id | time | value_1 | value_2 | value_3 | value_4 + cnt | user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 432 | 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | - 432 | 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | - 432 | 3 | Thu Nov 23 17:18:51.048758 2017 | 1 | 5 | 5 | - 432 | 3 | Thu Nov 23 17:10:35.959913 2017 | 4 | 3 | 1 | - 432 | 5 | Thu Nov 23 16:48:32.08896 2017 | 5 | 2 | 1 | + 432 | 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | + 432 | 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | + 432 | 3 | Thu Nov 23 17:18:51.048758 2017 | 1 | 5 | 5 | + 432 | 3 | Thu Nov 23 17:10:35.959913 2017 | 4 | 3 | 1 | + 432 | 5 | Thu Nov 23 16:48:32.08896 2017 | 5 | 2 | 1 | (5 rows) -- recursive CTES are not supported inside subqueries as well diff --git a/src/test/regress/expected/subquery_basics.out b/src/test/regress/expected/subquery_basics.out index cc56a2f4c..3e906f703 100644 --- a/src/test/regress/expected/subquery_basics.out +++ b/src/test/regress/expected/subquery_basics.out @@ -6,12 +6,12 @@ SET client_min_messages TO DEBUG1; SELECT user_id FROM - (SELECT - DISTINCT users_table.user_id - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + (SELECT + DISTINCT users_table.user_id + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) ORDER BY 1 DESC LIMIT 5 ) as foo @@ -19,7 +19,7 @@ FROM DEBUG: push down of limit count: 5 DEBUG: generating subplan 1_1 for subquery SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 DEBUG: Plan 1 query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('1_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo ORDER BY user_id DESC - user_id + user_id --------------------------------------------------------------------- 6 5 @@ -33,19 +33,19 @@ DEBUG: Plan 1 query after replacing subqueries and CTEs: SELECT user_id FROM (S SELECT * FROM - (SELECT - DISTINCT users_table.value_1 - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + (SELECT + DISTINCT users_table.value_1 + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) ORDER BY 1 ) as foo ORDER BY 1 DESC; DEBUG: generating subplan 3_1 for subquery SELECT DISTINCT users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.value_1 DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT value_1 FROM (SELECT intermediate_result.value_1 FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer)) foo ORDER BY value_1 DESC - value_1 + value_1 --------------------------------------------------------------------- 5 4 @@ -60,12 +60,12 @@ DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT value_1 FROM (S SELECT * FROM - (SELECT + (SELECT users_table.value_2, avg(value_1) - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) GROUP BY users_table.value_2 ORDER BY 1 DESC @@ -73,7 +73,7 @@ FROM ORDER BY 2 DESC, 1; DEBUG: generating subplan 5_1 for subquery SELECT users_table.value_2, avg(users_table.value_1) AS avg FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) GROUP BY users_table.value_2 ORDER BY users_table.value_2 DESC DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT value_2, avg FROM (SELECT intermediate_result.value_2, intermediate_result.avg FROM read_intermediate_result('5_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer, avg numeric)) foo ORDER BY avg DESC, value_2 - value_2 | avg + value_2 | avg --------------------------------------------------------------------- 4 | 2.8453608247422680 2 | 2.6833855799373041 @@ -87,10 +87,10 @@ DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT value_2, avg FR SELECT * FROM - (SELECT + (SELECT events_table.value_2 - FROM - events_table + FROM + events_table WHERE event_type IN (1,2,3,4) ORDER BY 1 DESC @@ -104,7 +104,7 @@ FROM DEBUG: push down of limit count: 5 DEBUG: generating subplan 7_1 for subquery SELECT value_2 FROM public.events_table WHERE (event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])) ORDER BY value_2 DESC LIMIT 5 DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT foo.value_2, bar.i FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('7_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT i.i FROM generate_series(0, 100) i(i)) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.i) ORDER BY bar.i DESC, foo.value_2 - value_2 | i + value_2 | i --------------------------------------------------------------------- 5 | 5 5 | 5 @@ -117,16 +117,16 @@ DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT foo.value_2, ba SELECT * FROM - (SELECT + (SELECT count(*) - FROM - events_table + FROM + events_table WHERE event_type IN (1,2,3,4) ) as foo; DEBUG: generating subplan 9_1 for subquery SELECT count(*) AS count FROM public.events_table WHERE (event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])) DEBUG: Plan 9 query after replacing subqueries and CTEs: SELECT count FROM (SELECT intermediate_result.count FROM read_intermediate_result('9_1'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) foo - count + count --------------------------------------------------------------------- 87 (1 row) @@ -135,18 +135,18 @@ DEBUG: Plan 9 query after replacing subqueries and CTEs: SELECT count FROM (SEL SELECT * FROM - (SELECT - SUM(events_table.user_id) - FROM - events_table + (SELECT + SUM(events_table.user_id) + FROM + events_table WHERE event_type IN (1,2,3,4) - HAVING + HAVING MIN(value_2) > 2 ) as foo; DEBUG: generating subplan 11_1 for subquery SELECT sum(user_id) AS sum FROM public.events_table WHERE (event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])) HAVING (min(value_2) OPERATOR(pg_catalog.>) 2) DEBUG: Plan 11 query after replacing subqueries and CTEs: SELECT sum FROM (SELECT intermediate_result.sum FROM read_intermediate_result('11_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint)) foo - sum + sum --------------------------------------------------------------------- (0 rows) @@ -155,22 +155,22 @@ DEBUG: Plan 11 query after replacing subqueries and CTEs: SELECT sum FROM (SELE SELECT * FROM - (SELECT + (SELECT users_table.value_2 - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) GROUP BY users_table.value_2 ORDER BY 1 DESC ) as foo, - (SELECT + (SELECT users_table.value_3 - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (5,6,7,8) GROUP BY users_table.value_3 ORDER BY 1 DESC @@ -180,7 +180,7 @@ FROM DEBUG: generating subplan 13_1 for subquery SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) GROUP BY users_table.value_2 ORDER BY users_table.value_2 DESC DEBUG: generating subplan 13_2 for subquery SELECT users_table.value_3 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) GROUP BY users_table.value_3 ORDER BY users_table.value_3 DESC DEBUG: Plan 13 query after replacing subqueries and CTEs: SELECT foo.value_2, bar.value_3 FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('13_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT intermediate_result.value_3 FROM read_intermediate_result('13_2'::text, 'binary'::citus_copy_format) intermediate_result(value_3 double precision)) bar WHERE ((foo.value_2)::double precision OPERATOR(pg_catalog.=) bar.value_3) ORDER BY bar.value_3 DESC, foo.value_2 - value_2 | value_3 + value_2 | value_3 --------------------------------------------------------------------- 5 | 5 4 | 4 @@ -194,22 +194,22 @@ DEBUG: Plan 13 query after replacing subqueries and CTEs: SELECT foo.value_2, b SELECT DISTINCT ON (citus) citus, postgres, citus + 1 as c1, postgres-1 as p1 FROM - (SELECT + (SELECT users_table.value_2 - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) GROUP BY users_table.value_2 ORDER BY 1 DESC ) as foo(postgres), - (SELECT + (SELECT users_table.user_id - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (5,6,7,8) ORDER BY 1 DESC ) as bar (citus) @@ -219,7 +219,7 @@ FROM DEBUG: generating subplan 16_1 for subquery SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) GROUP BY users_table.value_2 ORDER BY users_table.value_2 DESC DEBUG: Plan 16 query after replacing subqueries and CTEs: SELECT DISTINCT ON (bar.citus) bar.citus, foo.postgres, (bar.citus OPERATOR(pg_catalog.+) 1) AS c1, (foo.postgres OPERATOR(pg_catalog.-) 1) AS p1 FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('16_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo(postgres), (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) ORDER BY users_table.user_id DESC) bar(citus) WHERE (foo.postgres OPERATOR(pg_catalog.=) bar.citus) ORDER BY bar.citus DESC, foo.postgres DESC LIMIT 3 DEBUG: push down of limit count: 3 - citus | postgres | c1 | p1 + citus | postgres | c1 | p1 --------------------------------------------------------------------- 5 | 5 | 6 | 4 4 | 4 | 5 | 3 @@ -231,22 +231,22 @@ DEBUG: push down of limit count: 3 SELECT * FROM - (SELECT + (SELECT users_table.value_2 - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) GROUP BY users_table.value_2 ORDER BY 1 DESC ) as foo, - (SELECT + (SELECT users_table.user_id - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (5,6,7,8) ORDER BY 1 DESC ) as bar @@ -256,7 +256,7 @@ FROM DEBUG: generating subplan 18_1 for subquery SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) GROUP BY users_table.value_2 ORDER BY users_table.value_2 DESC DEBUG: Plan 18 query after replacing subqueries and CTEs: SELECT foo.value_2, bar.user_id FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('18_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) ORDER BY users_table.user_id DESC) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) ORDER BY bar.user_id DESC, foo.value_2 DESC LIMIT 3 DEBUG: push down of limit count: 3 - value_2 | user_id + value_2 | user_id --------------------------------------------------------------------- 5 | 5 5 | 5 @@ -266,13 +266,13 @@ DEBUG: push down of limit count: 3 -- subqueries in WHERE should be replaced SELECT DISTINCT user_id FROM users_table -WHERE +WHERE user_id IN (SELECT DISTINCT value_2 FROM users_table WHERE value_1 >= 1 AND value_1 <= 20 ORDER BY 1 LIMIT 5) ORDER BY 1 DESC; DEBUG: push down of limit count: 5 DEBUG: generating subplan 20_1 for subquery SELECT DISTINCT value_2 FROM public.users_table WHERE ((value_1 OPERATOR(pg_catalog.>=) 1) AND (value_1 OPERATOR(pg_catalog.<=) 20)) ORDER BY value_2 LIMIT 5 DEBUG: Plan 20 query after replacing subqueries and CTEs: SELECT DISTINCT user_id FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('20_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))) ORDER BY user_id DESC - user_id + user_id --------------------------------------------------------------------- 4 3 @@ -281,28 +281,28 @@ DEBUG: Plan 20 query after replacing subqueries and CTEs: SELECT DISTINCT user_ (4 rows) -- subquery in FROM -> FROM -> FROM should be replaced due to OFFSET -SELECT - DISTINCT user_id -FROM +SELECT + DISTINCT user_id +FROM ( - SELECT users_table.user_id FROM users_table, + SELECT users_table.user_id FROM users_table, ( - SELECT + SELECT event_type, user_id FROM - (SELECT event_type, users_table.user_id FROM users_table, + (SELECT event_type, users_table.user_id FROM users_table, (SELECT user_id, event_type FROM events_table WHERE value_2 < 3 OFFSET 3) as foo WHERE foo.user_id = users_table.user_id ) bar ) as baz WHERE baz.user_id = users_table.user_id ) as sub1 - ORDER BY 1 DESC + ORDER BY 1 DESC LIMIT 3; DEBUG: generating subplan 22_1 for subquery SELECT user_id, event_type FROM public.events_table WHERE (value_2 OPERATOR(pg_catalog.<) 3) OFFSET 3 DEBUG: Plan 22 query after replacing subqueries and CTEs: SELECT DISTINCT user_id FROM (SELECT users_table.user_id FROM public.users_table, (SELECT bar.event_type, bar.user_id FROM (SELECT foo.event_type, users_table_1.user_id FROM public.users_table users_table_1, (SELECT intermediate_result.user_id, intermediate_result.event_type FROM read_intermediate_result('22_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, event_type integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) users_table_1.user_id)) bar) baz WHERE (baz.user_id OPERATOR(pg_catalog.=) users_table.user_id)) sub1 ORDER BY user_id DESC LIMIT 3 DEBUG: push down of limit count: 3 - user_id + user_id --------------------------------------------------------------------- 6 5 @@ -314,18 +314,18 @@ SELECT user_id, array_length(events_table, 1) FROM ( SELECT user_id, array_agg(event ORDER BY time) AS events_table FROM ( - SELECT + SELECT u.user_id, e.event_type::text AS event, e.time - FROM + FROM users_table AS u, events_table AS e - WHERE u.user_id = e.user_id AND - u.user_id IN + WHERE u.user_id = e.user_id AND + u.user_id IN ( - SELECT - user_id - FROM - users_table + SELECT + user_id + FROM + users_table WHERE value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 1 AND event_type <= 3 AND value_3 > 1 AND user_id = users_table.user_id) AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 3 AND event_type <= 4 AND value_3 > 1 AND user_id = users_table.user_id) @@ -338,16 +338,16 @@ ORDER BY 2 DESC, 1; DEBUG: push down of limit count: 5 DEBUG: generating subplan 24_1 for subquery SELECT user_id FROM public.users_table WHERE ((value_2 OPERATOR(pg_catalog.>=) 5) AND (EXISTS (SELECT events_table.user_id FROM public.events_table WHERE ((events_table.event_type OPERATOR(pg_catalog.>) 1) AND (events_table.event_type OPERATOR(pg_catalog.<=) 3) AND (events_table.value_3 OPERATOR(pg_catalog.>) (1)::double precision) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)))) AND (NOT (EXISTS (SELECT events_table.user_id FROM public.events_table WHERE ((events_table.event_type OPERATOR(pg_catalog.>) 3) AND (events_table.event_type OPERATOR(pg_catalog.<=) 4) AND (events_table.value_3 OPERATOR(pg_catalog.>) (1)::double precision) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)))))) LIMIT 5 DEBUG: Plan 24 query after replacing subqueries and CTEs: SELECT user_id, array_length(events_table, 1) AS array_length FROM (SELECT t.user_id, array_agg(t.event ORDER BY t."time") AS events_table FROM (SELECT u.user_id, (e.event_type)::text AS event, e."time" FROM public.users_table u, public.events_table e WHERE ((u.user_id OPERATOR(pg_catalog.=) e.user_id) AND (u.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('24_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))))) t GROUP BY t.user_id) q ORDER BY (array_length(events_table, 1)) DESC, user_id - user_id | array_length + user_id | array_length --------------------------------------------------------------------- 5 | 364 (1 row) -- subquery (i.e., subquery_2) in WHERE->FROM should be replaced due to LIMIT -SELECT - user_id -FROM - users_table +SELECT + user_id +FROM + users_table WHERE user_id IN ( @@ -416,7 +416,7 @@ ORDER BY 1; DEBUG: push down of limit count: 10 DEBUG: generating subplan 26_1 for subquery SELECT user_id, count(*) AS count_pay FROM public.users_table WHERE ((user_id OPERATOR(pg_catalog.>=) 1) AND (user_id OPERATOR(pg_catalog.<=) 3) AND (value_1 OPERATOR(pg_catalog.>) 3) AND (value_1 OPERATOR(pg_catalog.<) 5)) GROUP BY user_id HAVING (count(*) OPERATOR(pg_catalog.>) 1) LIMIT 10 DEBUG: Plan 26 query after replacing subqueries and CTEs: SELECT user_id FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT subquery_top.user_id FROM (SELECT subquery_1.user_id, subquery_2.count_pay FROM ((SELECT users_table_1.user_id, 'action=>1'::text AS event, events_table."time" FROM public.users_table users_table_1, public.events_table WHERE ((users_table_1.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (users_table_1.user_id OPERATOR(pg_catalog.>=) 1) AND (users_table_1.user_id OPERATOR(pg_catalog.<=) 3) AND (events_table.event_type OPERATOR(pg_catalog.>) 1) AND (events_table.event_type OPERATOR(pg_catalog.<) 3)) UNION SELECT users_table_1.user_id, 'action=>2'::text AS event, events_table."time" FROM public.users_table users_table_1, public.events_table WHERE ((users_table_1.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (users_table_1.user_id OPERATOR(pg_catalog.>=) 1) AND (users_table_1.user_id OPERATOR(pg_catalog.<=) 3) AND (events_table.event_type OPERATOR(pg_catalog.>) 2) AND (events_table.event_type OPERATOR(pg_catalog.<) 4))) subquery_1 LEFT JOIN (SELECT intermediate_result.user_id, intermediate_result.count_pay FROM read_intermediate_result('26_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, count_pay bigint)) subquery_2 ON ((subquery_1.user_id OPERATOR(pg_catalog.=) subquery_2.user_id))) GROUP BY subquery_1.user_id, subquery_2.count_pay) subquery_top GROUP BY subquery_top.count_pay, subquery_top.user_id)) GROUP BY user_id HAVING ((count(*) OPERATOR(pg_catalog.>) 1) AND (sum(value_2) OPERATOR(pg_catalog.>) 29)) ORDER BY user_id - user_id + user_id --------------------------------------------------------------------- 2 3 @@ -440,7 +440,7 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 28_1 for subquery SELECT users_table.user_id FROM public.users_table, (SELECT events_table.user_id FROM public.events_table) evs WHERE (users_table.user_id OPERATOR(pg_catalog.=) evs.user_id) ORDER BY users_table.user_id LIMIT 5 DEBUG: generating subplan 28_2 for subquery SELECT count(*) AS count FROM public.users_table GROUP BY user_id DEBUG: Plan 28 query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('28_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.count FROM read_intermediate_result('28_2'::text, 'binary'::citus_copy_format) intermediate_result(count bigint))) - user_id + user_id --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/subquery_complex_target_list.out b/src/test/regress/expected/subquery_complex_target_list.out index 7ffb27f5c..89d4e4db6 100644 --- a/src/test/regress/expected/subquery_complex_target_list.out +++ b/src/test/regress/expected/subquery_complex_target_list.out @@ -19,7 +19,7 @@ LIMIT 3; DEBUG: push down of limit count: 20 DEBUG: generating subplan 1_1 for subquery SELECT user_id FROM public.users_table GROUP BY user_id ORDER BY (count(*)) DESC LIMIT 20 DEBUG: Plan 1 query after replacing subqueries and CTEs: SELECT event_type, count(DISTINCT value_2) AS count FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('1_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) GROUP BY event_type ORDER BY event_type DESC, (count(DISTINCT value_2)) DESC LIMIT 3 - event_type | count + event_type | count --------------------------------------------------------------------- 6 | 1 5 | 3 @@ -35,7 +35,7 @@ FROM ORDER BY 1 DESC, 2 DESC, 3 DESC LIMIT 5; DEBUG: generating subplan 3_1 for subquery SELECT user_id, value_1, value_2 FROM public.users_table OFFSET 0 DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT x, y, value_2 FROM (SELECT intermediate_result.user_id, intermediate_result.value_1, intermediate_result.value_2 FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer, value_2 integer)) foo(x, y, value_2) ORDER BY x DESC, y DESC, value_2 DESC LIMIT 5 - x | y | value_2 + x | y | value_2 --------------------------------------------------------------------- 6 | 5 | 2 6 | 5 | 0 @@ -75,7 +75,7 @@ DEBUG: generating subplan 5_3 for subquery SELECT count(DISTINCT value_2) AS cn DEBUG: push down of limit count: 4 DEBUG: generating subplan 5_4 for subquery SELECT user_id, sum(DISTINCT value_2) AS sum FROM public.users_table GROUP BY user_id ORDER BY user_id DESC LIMIT 4 DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT DISTINCT ON (foo.avg) foo.avg, bar.cnt_1, baz.cnt_2, bat.sum FROM (SELECT intermediate_result.avg FROM read_intermediate_result('5_1'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) foo, (SELECT intermediate_result.cnt_1 FROM read_intermediate_result('5_2'::text, 'binary'::citus_copy_format) intermediate_result(cnt_1 bigint)) bar, (SELECT intermediate_result.cnt_2 FROM read_intermediate_result('5_3'::text, 'binary'::citus_copy_format) intermediate_result(cnt_2 bigint)) baz, (SELECT intermediate_result.user_id, intermediate_result.sum FROM read_intermediate_result('5_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, sum bigint)) bat, public.events_table WHERE ((foo.avg OPERATOR(pg_catalog.<>) (bar.cnt_1)::numeric) AND (baz.cnt_2 OPERATOR(pg_catalog.=) events_table.event_type)) ORDER BY foo.avg DESC - avg | cnt_1 | cnt_2 | sum + avg | cnt_1 | cnt_2 | sum --------------------------------------------------------------------- 3.5000000000000000 | 6 | 6 | 10 (1 row) @@ -118,7 +118,7 @@ DEBUG: generating subplan 10_2 for subquery SELECT (min(value_3) OPERATOR(pg_ca DEBUG: push down of limit count: 3 DEBUG: generating subplan 10_3 for subquery SELECT min("time") AS min, max("time") AS max, count("time") AS count, count(*) FILTER (WHERE (user_id OPERATOR(pg_catalog.=) 3)) AS cnt_with_filter, count(*) FILTER (WHERE ((user_id)::text OPERATOR(pg_catalog.~~) '%3%'::text)) AS cnt_with_filter_2 FROM public.users_table ORDER BY (min("time")) DESC LIMIT 3 DEBUG: Plan 10 query after replacing subqueries and CTEs: SELECT foo."?column?", foo."?column?_1" AS "?column?", foo.sum, foo.count, foo.avg, bar."?column?", bar."?column?_1" AS "?column?", bar.sum, bar.count, bar.avg, baz.min, baz.max, baz.count, baz.cnt_with_filter, baz.cnt_with_filter_2 FROM (SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?", intermediate_result.sum, intermediate_result.count, intermediate_result.avg FROM read_intermediate_result('10_1'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer, sum bigint, count double precision, avg bigint)) foo("?column?", "?column?_1", sum, count, avg), (SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?", intermediate_result.sum, intermediate_result.count, intermediate_result.avg FROM read_intermediate_result('10_2'::text, 'binary'::citus_copy_format) intermediate_result("?column?" double precision, "?column?_1" double precision, sum double precision, count bigint, avg double precision)) bar("?column?", "?column?_1", sum, count, avg), (SELECT intermediate_result.min, intermediate_result.max, intermediate_result.count, intermediate_result.cnt_with_filter, intermediate_result.cnt_with_filter_2 FROM read_intermediate_result('10_3'::text, 'binary'::citus_copy_format) intermediate_result(min timestamp without time zone, max timestamp without time zone, count bigint, cnt_with_filter bigint, cnt_with_filter_2 bigint)) baz ORDER BY foo."?column?" DESC - ?column? | ?column? | sum | count | avg | ?column? | ?column? | sum | count | avg | min | max | count | cnt_with_filter | cnt_with_filter_2 + ?column? | ?column? | sum | count | avg | ?column? | ?column? | sum | count | avg | min | max | count | cnt_with_filter | cnt_with_filter_2 --------------------------------------------------------------------- 2 | 3 | 376 | 101 | 4 | 0 | 2.5 | 273 | 101 | 2.7029702970297 | Wed Nov 22 18:19:49.944985 2017 | Thu Nov 23 17:30:34.635085 2017 | 101 | 17 | 17 (1 row) @@ -173,7 +173,7 @@ DEBUG: generating subplan 14_3 for subquery SELECT avg(CASE WHEN (user_id OPERA DEBUG: push down of limit count: 25 DEBUG: generating subplan 14_4 for subquery SELECT COALESCE(value_3, (20)::double precision) AS count_pay FROM public.users_table ORDER BY COALESCE(value_3, (20)::double precision) OFFSET 20 LIMIT 5 DEBUG: Plan 14 query after replacing subqueries and CTEs: SELECT DISTINCT ON (foo.avg) foo.avg, bar.cnt_1, baz.cnt_2, baz.cnt_3, baz.sum_1, baz.l_year, baz.pos, tar.count_pay FROM (SELECT intermediate_result.avg FROM read_intermediate_result('14_1'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) foo, (SELECT intermediate_result.cnt_1 FROM read_intermediate_result('14_2'::text, 'binary'::citus_copy_format) intermediate_result(cnt_1 double precision)) bar, (SELECT intermediate_result.cnt_2, intermediate_result.cnt_3, intermediate_result.sum_1, intermediate_result.l_year, intermediate_result.pos FROM read_intermediate_result('14_3'::text, 'binary'::citus_copy_format) intermediate_result(cnt_2 numeric, cnt_3 numeric, sum_1 bigint, l_year double precision, pos integer)) baz, (SELECT intermediate_result.count_pay FROM read_intermediate_result('14_4'::text, 'binary'::citus_copy_format) intermediate_result(count_pay double precision)) tar, public.events_table WHERE (((foo.avg)::double precision OPERATOR(pg_catalog.<>) bar.cnt_1) AND (baz.cnt_2 OPERATOR(pg_catalog.<>) (events_table.event_type)::numeric)) ORDER BY foo.avg DESC - avg | cnt_1 | cnt_2 | cnt_3 | sum_1 | l_year | pos | count_pay + avg | cnt_1 | cnt_2 | cnt_3 | sum_1 | l_year | pos | count_pay --------------------------------------------------------------------- 30.14666771571734992301 | 3308.14619815793 | 2.5000000000000000 | | 31 | 2017 | 0 | 1 (1 row) @@ -195,7 +195,7 @@ FROM DEBUG: generating subplan 19_1 for subquery SELECT avg(value_3) AS avg FROM public.users_table GROUP BY value_1, value_2 DEBUG: Plan 19 query after replacing subqueries and CTEs: SELECT DISTINCT ON (foo.avg) foo.avg, bar.avg2 FROM (SELECT intermediate_result.avg FROM read_intermediate_result('19_1'::text, 'binary'::citus_copy_format) intermediate_result(avg double precision)) foo, (SELECT avg(users_table.value_3) AS avg2 FROM public.users_table GROUP BY users_table.value_1, users_table.value_2, users_table.user_id) bar WHERE (foo.avg OPERATOR(pg_catalog.=) bar.avg2) ORDER BY foo.avg DESC, bar.avg2 DESC LIMIT 3 DEBUG: push down of limit count: 3 - avg | avg2 + avg | avg2 --------------------------------------------------------------------- 5 | 5 4 | 4 @@ -254,7 +254,7 @@ DEBUG: generating subplan 21_1 for subquery SELECT user_id FROM public.users_ta DEBUG: generating subplan 21_2 for subquery SELECT value_2 FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.>) 2) GROUP BY value_2 HAVING (count(DISTINCT value_1) OPERATOR(pg_catalog.>) 2) ORDER BY value_2 DESC LIMIT 3 DEBUG: generating subplan 21_3 for subquery SELECT avg(user_id) AS avg FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.>) 2) GROUP BY value_2 HAVING (sum(value_1) OPERATOR(pg_catalog.>) 10) ORDER BY ((sum(value_3) OPERATOR(pg_catalog.-) (avg(value_1))::double precision) OPERATOR(pg_catalog.-) (COALESCE((array_upper(ARRAY[max(user_id)], 1) OPERATOR(pg_catalog.*) 5), 0))::double precision) DESC LIMIT 3 DEBUG: Plan 21 query after replacing subqueries and CTEs: SELECT a.user_id, b.value_2, c.avg FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('21_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) a, (SELECT intermediate_result.value_2 FROM read_intermediate_result('21_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) b, (SELECT intermediate_result.avg FROM read_intermediate_result('21_3'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) c WHERE (b.value_2 OPERATOR(pg_catalog.<>) a.user_id) ORDER BY c.avg DESC, b.value_2 DESC, a.user_id DESC LIMIT 5 - user_id | value_2 | avg + user_id | value_2 | avg --------------------------------------------------------------------- 4 | 5 | 4.1666666666666667 3 | 5 | 4.1666666666666667 @@ -291,7 +291,7 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 25_1 for subquery SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 DEBUG: generating subplan 25_2 for subquery SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND false AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 DEBUG: Plan 25 query after replacing subqueries and CTEs: SELECT bar.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('25_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('25_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE (foo.user_id OPERATOR(pg_catalog.>) bar.user_id) ORDER BY bar.user_id DESC - user_id + user_id --------------------------------------------------------------------- (0 rows) @@ -337,7 +337,7 @@ DEBUG: push down of limit count: 4 DEBUG: generating subplan 28_1 for subquery SELECT user_id, "time", event_type, value_2, value_3, value_4, rnk FROM (SELECT events_table.user_id, events_table."time", events_table.event_type, events_table.value_2, events_table.value_3, events_table.value_4, rank() OVER my_win AS rnk FROM public.events_table WINDOW my_win AS (PARTITION BY events_table.user_id ORDER BY events_table."time" DESC) ORDER BY (rank() OVER my_win) DESC) foo_inner ORDER BY user_id DESC LIMIT 4 DEBUG: generating subplan 28_2 for subquery SELECT user_id, "time", event_type, value_2, value_3, value_4, rank() OVER my_win AS rnk FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.=) 3) WINDOW my_win AS (PARTITION BY event_type ORDER BY "time" DESC) DEBUG: Plan 28 query after replacing subqueries and CTEs: SELECT foo.user_id, foo."time", foo.rnk, bar.user_id, bar."time", bar.rnk FROM (SELECT foo_1.user_id, foo_1."time", foo_1.rnk FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.event_type, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4, intermediate_result.rnk FROM read_intermediate_result('28_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, event_type integer, value_2 integer, value_3 double precision, value_4 bigint, rnk bigint)) foo_1 ORDER BY foo_1.rnk DESC, foo_1.user_id DESC, foo_1."time" DESC) foo, (SELECT foo_1.user_id, foo_1."time", foo_1.rnk FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.event_type, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4, intermediate_result.rnk FROM read_intermediate_result('28_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, event_type integer, value_2 integer, value_3 double precision, value_4 bigint, rnk bigint)) foo_1 ORDER BY foo_1.rnk DESC, foo_1.user_id DESC, foo_1."time" DESC) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) ORDER BY foo.rnk DESC, foo."time" DESC, bar."time" LIMIT 5 - user_id | time | rnk | user_id | time | rnk + user_id | time | rnk | user_id | time | rnk --------------------------------------------------------------------- (0 rows) @@ -358,25 +358,25 @@ DEBUG: push down of limit count: 20 DEBUG: generating subplan 31_1 for subquery SELECT user_id FROM public.users_table GROUP BY user_id ORDER BY (count(*)) DESC LIMIT 20 DEBUG: Plan 31 query after replacing subqueries and CTEs: SELECT event_type, count(DISTINCT value_2) AS count FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('31_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) GROUP BY event_type ORDER BY event_type DESC, (count(DISTINCT value_2)) DESC LIMIT 3 FETCH 1 FROM recursive_subquery; - event_type | count + event_type | count --------------------------------------------------------------------- 6 | 1 (1 row) FETCH 1 FROM recursive_subquery; - event_type | count + event_type | count --------------------------------------------------------------------- 5 | 3 (1 row) FETCH 1 FROM recursive_subquery; - event_type | count + event_type | count --------------------------------------------------------------------- 4 | 6 (1 row) FETCH 1 FROM recursive_subquery; - event_type | count + event_type | count --------------------------------------------------------------------- (0 rows) @@ -398,7 +398,7 @@ DEBUG: push down of limit count: 20 DEBUG: generating subplan 33_1 for subquery SELECT user_id FROM public.users_table GROUP BY user_id ORDER BY (count(*)) DESC LIMIT 20 DEBUG: Plan 33 query after replacing subqueries and CTEs: SELECT event_type, count(DISTINCT value_2) AS count FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('33_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) GROUP BY event_type ORDER BY event_type DESC, (count(DISTINCT value_2)) DESC LIMIT 3 FETCH ALL FROM recursive_subquery; - event_type | count + event_type | count --------------------------------------------------------------------- 6 | 1 5 | 3 @@ -406,7 +406,7 @@ DEBUG: Plan 33 query after replacing subqueries and CTEs: SELECT event_type, co (3 rows) FETCH ALL FROM recursive_subquery; - event_type | count + event_type | count --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/subquery_executors.out b/src/test/regress/expected/subquery_executors.out index 1b7fa7fae..e5c74e5db 100644 --- a/src/test/regress/expected/subquery_executors.out +++ b/src/test/regress/expected/subquery_executors.out @@ -6,19 +6,19 @@ SET search_path TO subquery_executor, public; CREATE TABLE users_table_local AS SELECT * FROM users_table; SET client_min_messages TO DEBUG1; -- subquery with router planner -SELECT - count(*) +SELECT + count(*) FROM ( SELECT value_2 FROM users_table WHERE user_id = 15 OFFSET 0 -) as foo, +) as foo, ( SELECT user_id FROM users_table ) as bar -WHERE foo.value_2 = bar.user_id; +WHERE foo.value_2 = bar.user_id; DEBUG: generating subplan 2_1 for subquery SELECT value_2 FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) 15) OFFSET 0 DEBUG: Plan 2 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('2_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT users_table.user_id FROM public.users_table) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) - count + count --------------------------------------------------------------------- 0 (1 row) @@ -26,66 +26,66 @@ DEBUG: Plan 2 query after replacing subqueries and CTEs: SELECT count(*) AS cou -- subquery with router but not logical plannable -- bar is recursively planned SELECT - count(*) + count(*) FROM ( SELECT user_id, sum(value_2) over (partition by user_id) AS counter FROM users_table WHERE user_id = 15 -) as foo, +) as foo, ( SELECT user_id FROM users_table ) as bar -WHERE foo.counter = bar.user_id; +WHERE foo.counter = bar.user_id; DEBUG: generating subplan 4_1 for subquery SELECT user_id FROM public.users_table DEBUG: Plan 4 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT users_table.user_id, sum(users_table.value_2) OVER (PARTITION BY users_table.user_id) AS counter FROM public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) 15)) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('4_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE (foo.counter OPERATOR(pg_catalog.=) bar.user_id) - count + count --------------------------------------------------------------------- 0 (1 row) -- subquery with real-time query -SELECT - count(*) +SELECT + count(*) FROM ( SELECT value_2 FROM users_table WHERE user_id != 15 OFFSET 0 -) as foo, +) as foo, ( SELECT user_id FROM users_table ) as bar -WHERE foo.value_2 = bar.user_id; +WHERE foo.value_2 = bar.user_id; DEBUG: generating subplan 6_1 for subquery SELECT value_2 FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.<>) 15) OFFSET 0 DEBUG: Plan 6 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('6_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT users_table.user_id FROM public.users_table) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) - count + count --------------------------------------------------------------------- 1612 (1 row) -- subquery with repartition query SET citus.enable_repartition_joins to ON; -SELECT - count(*) +SELECT + count(*) FROM ( SELECT DISTINCT users_table.value_2 FROM users_table, events_table WHERE users_table.user_id = events_table.value_2 AND users_table.user_id < 2 -) as foo, +) as foo, ( SELECT user_id FROM users_table ) as bar -WHERE foo.value_2 = bar.user_id; +WHERE foo.value_2 = bar.user_id; DEBUG: generating subplan 8_1 for subquery SELECT DISTINCT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (users_table.user_id OPERATOR(pg_catalog.<) 2)) DEBUG: Plan 8 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('8_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT users_table.user_id FROM public.users_table) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) - count + count --------------------------------------------------------------------- 58 (1 row) -- mixed of all executors (including local execution) -SELECT - count(*) +SELECT + count(*) FROM ( SELECT value_2 FROM users_table WHERE user_id = 15 OFFSET 0 -) as foo, +) as foo, ( SELECT user_id FROM users_table OFFSET 0 ) as bar, @@ -95,51 +95,51 @@ FROM ( SELECT user_id FROM users_table_local WHERE user_id = 2 ) baw -WHERE foo.value_2 = bar.user_id AND baz.value_2 = bar.user_id AND bar.user_id = baw.user_id; +WHERE foo.value_2 = bar.user_id AND baz.value_2 = bar.user_id AND bar.user_id = baw.user_id; DEBUG: generating subplan 10_1 for subquery SELECT value_2 FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) 15) OFFSET 0 DEBUG: generating subplan 10_2 for subquery SELECT user_id FROM public.users_table OFFSET 0 DEBUG: generating subplan 10_3 for subquery SELECT DISTINCT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (users_table.user_id OPERATOR(pg_catalog.<) 2)) DEBUG: generating subplan 10_4 for subquery SELECT user_id FROM subquery_executor.users_table_local WHERE (user_id OPERATOR(pg_catalog.=) 2) DEBUG: Plan 10 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('10_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('10_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar, (SELECT intermediate_result.value_2 FROM read_intermediate_result('10_3'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) baz, (SELECT intermediate_result.user_id FROM read_intermediate_result('10_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) baw WHERE ((foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) AND (baz.value_2 OPERATOR(pg_catalog.=) bar.user_id) AND (bar.user_id OPERATOR(pg_catalog.=) baw.user_id)) - count + count --------------------------------------------------------------------- 0 (1 row) SET citus.enable_repartition_joins to OFF; --- final query is router -SELECT - count(*) +-- final query is router +SELECT + count(*) FROM ( SELECT value_2 FROM users_table WHERE user_id = 1 OFFSET 0 -) as foo, +) as foo, ( SELECT user_id FROM users_table WHERE user_id = 2 OFFSET 0 ) as bar -WHERE foo.value_2 = bar.user_id; +WHERE foo.value_2 = bar.user_id; DEBUG: generating subplan 14_1 for subquery SELECT value_2 FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) 1) OFFSET 0 DEBUG: generating subplan 14_2 for subquery SELECT user_id FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) 2) OFFSET 0 DEBUG: Plan 14 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('14_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('14_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) - count + count --------------------------------------------------------------------- 18 (1 row) -- final query is real-time -SELECT - count(*) +SELECT + count(*) FROM ( SELECT value_2 FROM users_table WHERE user_id = 1 OFFSET 0 -) as foo, +) as foo, ( SELECT user_id FROM users_table WHERE user_id != 2 ) as bar -WHERE foo.value_2 = bar.user_id; +WHERE foo.value_2 = bar.user_id; DEBUG: generating subplan 17_1 for subquery SELECT value_2 FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) 1) OFFSET 0 DEBUG: Plan 17 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('17_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT users_table.user_id FROM public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.<>) 2)) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) - count + count --------------------------------------------------------------------- 103 (1 row) diff --git a/src/test/regress/expected/subquery_in_where.out b/src/test/regress/expected/subquery_in_where.out index d2ceae235..360e716a1 100644 --- a/src/test/regress/expected/subquery_in_where.out +++ b/src/test/regress/expected/subquery_in_where.out @@ -17,7 +17,7 @@ WHERE events_user_id IN (SELECT user_id DEBUG: generating subplan 1_1 for CTE event_id: SELECT user_id AS events_user_id, "time" AS events_time, event_type FROM public.events_table DEBUG: generating subplan 1_2 for subquery SELECT user_id FROM public.users_table DEBUG: Plan 1 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.events_user_id, intermediate_result.events_time, intermediate_result.event_type FROM read_intermediate_result('1_1'::text, 'binary'::citus_copy_format) intermediate_result(events_user_id integer, events_time timestamp without time zone, event_type integer)) event_id WHERE (events_user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('1_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) - count + count --------------------------------------------------------------------- 101 (1 row) @@ -46,7 +46,7 @@ WHERE id IN (SELECT user_id DEBUG: generating subplan 6_1 for subquery SELECT 1 AS id, 2 AS value_1, 3 AS value_3 UNION ALL SELECT 2 AS id, 3 AS value_1, 4 AS value_3 DEBUG: generating subplan 6_2 for subquery SELECT user_id FROM public.events_table DEBUG: Plan 6 query after replacing subqueries and CTEs: SELECT id, value_1, value_3 FROM (SELECT intermediate_result.id, intermediate_result.value_1, intermediate_result.value_3 FROM read_intermediate_result('6_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer, value_1 integer, value_3 integer)) tt1 WHERE (id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('6_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) - id | value_1 | value_3 + id | value_1 | value_3 --------------------------------------------------------------------- 1 | 2 | 3 2 | 3 | 4 @@ -79,7 +79,7 @@ DEBUG: generating subplan 8_3 for subquery SELECT intermediate_result.user_id F DEBUG: generating subplan 8_4 for CTE event_id: SELECT user_id AS events_user_id, "time" AS events_time, event_type FROM public.events_table DEBUG: generating subplan 8_5 for subquery SELECT events_user_id, events_time, event_type FROM (SELECT intermediate_result.events_user_id, intermediate_result.events_time, intermediate_result.event_type FROM read_intermediate_result('8_4'::text, 'binary'::citus_copy_format) intermediate_result(events_user_id integer, events_time timestamp without time zone, event_type integer)) event_id ORDER BY events_user_id, events_time, event_type LIMIT 10 DEBUG: Plan 8 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.events_user_id, intermediate_result.events_time, intermediate_result.event_type FROM read_intermediate_result('8_5'::text, 'binary'::citus_copy_format) intermediate_result(events_user_id integer, events_time timestamp without time zone, event_type integer)) sub_table WHERE (events_user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('8_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) - count + count --------------------------------------------------------------------- 10 (1 row) @@ -137,7 +137,7 @@ DEBUG: push down of limit count: 10 DEBUG: generating subplan 14_4 for subquery SELECT value_1 FROM public.users_table ORDER BY value_1 LIMIT 10 DEBUG: generating subplan 14_5 for subquery SELECT intermediate_result.user_id FROM read_intermediate_result('14_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION ALL SELECT intermediate_result.value_1 FROM read_intermediate_result('14_4'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer) DEBUG: Plan 14 query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('14_5'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) sub_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.events_user_id FROM read_intermediate_result('14_2'::text, 'binary'::citus_copy_format) intermediate_result(events_user_id integer))) - user_id + user_id --------------------------------------------------------------------- 1 1 @@ -175,7 +175,7 @@ DEBUG: push down of limit count: 10 DEBUG: generating subplan 20_1 for subquery SELECT user_id AS events_user_id, "time" AS events_time, event_type FROM public.events_table ORDER BY user_id, "time" LIMIT 10 DEBUG: generating subplan 20_2 for subquery SELECT max((abs((user_id OPERATOR(pg_catalog.*) 1)) OPERATOR(pg_catalog.+) mod(user_id, 3))) AS val_1 FROM public.users_table GROUP BY user_id DEBUG: Plan 20 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.events_user_id, intermediate_result.events_time, intermediate_result.event_type FROM read_intermediate_result('20_1'::text, 'binary'::citus_copy_format) intermediate_result(events_user_id integer, events_time timestamp without time zone, event_type integer)) sub_table WHERE (events_user_id OPERATOR(pg_catalog.<=) ANY (SELECT intermediate_result.val_1 FROM read_intermediate_result('20_2'::text, 'binary'::citus_copy_format) intermediate_result(val_1 integer))) - count + count --------------------------------------------------------------------- 10 (1 row) @@ -205,7 +205,7 @@ DEBUG: push down of limit count: 10 DEBUG: generating subplan 23_1 for subquery SELECT user_id AS events_user_id, "time" AS events_time, event_type FROM public.events_table LIMIT 10 DEBUG: generating subplan 23_2 for subquery SELECT DISTINCT user_id FROM public.users_table GROUP BY user_id DEBUG: Plan 23 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.events_user_id, intermediate_result.events_time, intermediate_result.event_type FROM read_intermediate_result('23_1'::text, 'binary'::citus_copy_format) intermediate_result(events_user_id integer, events_time timestamp without time zone, event_type integer)) sub_table WHERE (events_user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('23_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) - count + count --------------------------------------------------------------------- 10 (1 row) @@ -248,7 +248,7 @@ DEBUG: generating subplan 26_1 for subquery SELECT user_id AS events_user_id, " DEBUG: generating subplan 26_2 for subquery SELECT min(user_id) AS min FROM public.users_table GROUP BY user_id DEBUG: generating subplan 26_3 for subquery SELECT max(user_id) AS max FROM public.users_table GROUP BY user_id DEBUG: Plan 26 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.events_user_id, intermediate_result.events_time, intermediate_result.event_type FROM read_intermediate_result('26_1'::text, 'binary'::citus_copy_format) intermediate_result(events_user_id integer, events_time timestamp without time zone, event_type integer)) sub_table WHERE ((events_user_id OPERATOR(pg_catalog.>=) ANY (SELECT intermediate_result.min FROM read_intermediate_result('26_2'::text, 'binary'::citus_copy_format) intermediate_result(min integer))) AND (events_user_id OPERATOR(pg_catalog.<=) ANY (SELECT intermediate_result.max FROM read_intermediate_result('26_3'::text, 'binary'::citus_copy_format) intermediate_result(max integer)))) - count + count --------------------------------------------------------------------- 10 (1 row) @@ -291,7 +291,7 @@ DEBUG: generating subplan 30_1 for subquery SELECT user_id AS events_user_id, " DEBUG: generating subplan 30_2 for subquery SELECT min(user_id) AS min FROM public.users_table GROUP BY user_id DEBUG: generating subplan 30_3 for subquery SELECT max(value_2) AS max FROM public.users_table GROUP BY user_id DEBUG: Plan 30 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.events_user_id, intermediate_result.events_time, intermediate_result.event_type FROM read_intermediate_result('30_1'::text, 'binary'::citus_copy_format) intermediate_result(events_user_id integer, events_time timestamp without time zone, event_type integer)) sub_table WHERE ((events_user_id OPERATOR(pg_catalog.>=) ANY (SELECT intermediate_result.min FROM read_intermediate_result('30_2'::text, 'binary'::citus_copy_format) intermediate_result(min integer))) AND (events_user_id OPERATOR(pg_catalog.<=) ANY (SELECT intermediate_result.max FROM read_intermediate_result('30_3'::text, 'binary'::citus_copy_format) intermediate_result(max integer)))) - count + count --------------------------------------------------------------------- 10 (1 row) @@ -330,7 +330,7 @@ DEBUG: generating subplan 35_1 for subquery SELECT user_id, "time", value_1, va DEBUG: generating subplan 35_2 for subquery SELECT value_2 FROM public.events_table DEBUG: Plan 35 query after replacing subqueries and CTEs: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('35_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) sub_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('35_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))) DEBUG: Plan 34 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('34_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) cte - count + count --------------------------------------------------------------------- 10 (1 row) @@ -366,7 +366,7 @@ DEBUG: generating subplan 38_1 for subquery SELECT user_id, "time", value_1, va DEBUG: generating subplan 38_2 for subquery SELECT value_2 FROM public.events_table DEBUG: generating subplan 38_3 for subquery SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('38_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) sub_table_1 WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('38_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))) DEBUG: Plan 38 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('38_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) sub_table_2 - count + count --------------------------------------------------------------------- 10 (1 row) @@ -422,7 +422,7 @@ DEBUG: generating subplan 42_3 for subquery SELECT value_2 FROM public.events_t DEBUG: generating subplan 42_4 for subquery SELECT t1.user_id, t2.user_id_2 FROM ((SELECT intermediate_result.user_id FROM read_intermediate_result('42_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) t1 JOIN (SELECT intermediate_result.user_id_2 FROM read_intermediate_result('42_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id_2 integer)) t2 ON ((t1.user_id OPERATOR(pg_catalog.=) t2.user_id_2))) WHERE (t1.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('42_3'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))) DEBUG: generating subplan 42_5 for subquery SELECT min(user_id) AS min FROM public.events_table GROUP BY user_id DEBUG: Plan 42 query after replacing subqueries and CTEs: SELECT sum(user_id) AS sum FROM (SELECT intermediate_result.user_id, intermediate_result.user_id_2 FROM read_intermediate_result('42_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, user_id_2 integer)) t3 WHERE (user_id OPERATOR(pg_catalog.>) ANY (SELECT intermediate_result.min FROM read_intermediate_result('42_5'::text, 'binary'::citus_copy_format) intermediate_result(min integer))) - sum + sum --------------------------------------------------------------------- 18 (1 row) @@ -475,7 +475,7 @@ DEBUG: generating subplan 48_3 for subquery SELECT value_2 FROM public.events_t DEBUG: generating subplan 48_4 for subquery SELECT t1.user_id, t2.user_id_2 FROM ((SELECT intermediate_result.user_id FROM read_intermediate_result('48_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) t1 JOIN (SELECT intermediate_result.user_id_2 FROM read_intermediate_result('48_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id_2 integer)) t2 ON ((t1.user_id OPERATOR(pg_catalog.=) t2.user_id_2))) WHERE (t1.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('48_3'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))) DEBUG: generating subplan 48_5 for subquery SELECT 1, 2 FROM public.events_table WHERE (value_2 OPERATOR(pg_catalog.=) user_id) DEBUG: Plan 48 query after replacing subqueries and CTEs: SELECT sum(user_id) AS sum FROM (SELECT intermediate_result.user_id, intermediate_result.user_id_2 FROM read_intermediate_result('48_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, user_id_2 integer)) t3 WHERE (EXISTS (SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?" FROM read_intermediate_result('48_5'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer))) - sum + sum --------------------------------------------------------------------- 67 (1 row) @@ -529,7 +529,7 @@ DEBUG: generating subplan 54_3 for subquery SELECT value_2 FROM public.events_t DEBUG: generating subplan 54_4 for subquery SELECT t1.user_id, t2.user_id_2 FROM ((SELECT intermediate_result.user_id FROM read_intermediate_result('54_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) t1 JOIN (SELECT intermediate_result.user_id_2 FROM read_intermediate_result('54_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id_2 integer)) t2 ON ((t1.user_id OPERATOR(pg_catalog.=) t2.user_id_2))) WHERE (t1.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('54_3'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))) DEBUG: generating subplan 54_5 for subquery SELECT 1, 2 FROM public.events_table WHERE (value_2 OPERATOR(pg_catalog.=) (user_id OPERATOR(pg_catalog.+) 6)) DEBUG: Plan 54 query after replacing subqueries and CTEs: SELECT sum(user_id) AS sum FROM (SELECT intermediate_result.user_id, intermediate_result.user_id_2 FROM read_intermediate_result('54_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, user_id_2 integer)) t3 WHERE (NOT (EXISTS (SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?" FROM read_intermediate_result('54_5'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer)))) - sum + sum --------------------------------------------------------------------- 67 (1 row) @@ -557,7 +557,7 @@ DEBUG: generating subplan 60_1 for subquery SELECT (min(user_id) OPERATOR(pg_ca DEBUG: push down of limit count: 10 DEBUG: generating subplan 60_2 for subquery SELECT user_id, value_1 FROM public.users_table ORDER BY user_id, value_1 LIMIT 10 DEBUG: Plan 60 query after replacing subqueries and CTEs: SELECT user_id, value_1 FROM (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('60_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) t3 WHERE ((user_id, value_1) OPERATOR(pg_catalog.=) (SELECT intermediate_result."?column?", intermediate_result."?column?_1" AS "?column?" FROM read_intermediate_result('60_1'::text, 'binary'::citus_copy_format) intermediate_result("?column?" integer, "?column?_1" integer))) - user_id | value_1 + user_id | value_1 --------------------------------------------------------------------- (0 rows) @@ -583,7 +583,7 @@ ORDER BY generate_series ASC; DEBUG: generating subplan 63_1 for subquery SELECT value_2 FROM public.events_table DEBUG: Plan 63 query after replacing subqueries and CTEs: SELECT generate_series FROM (SELECT generate_series.generate_series FROM generate_series(1, 10) generate_series(generate_series)) gst WHERE (generate_series OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('63_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))) ORDER BY generate_series - generate_series + generate_series --------------------------------------------------------------------- 1 2 @@ -621,7 +621,7 @@ ORDER BY generate_series ASC; DEBUG: generating subplan 65_1 for subquery SELECT user_id FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT generate_series.generate_series FROM generate_series(1, 3) generate_series(generate_series))) DEBUG: Plan 65 query after replacing subqueries and CTEs: SELECT generate_series FROM (SELECT generate_series.generate_series FROM generate_series(1, 10) generate_series(generate_series)) gst WHERE (generate_series OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('65_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) ORDER BY generate_series - generate_series + generate_series --------------------------------------------------------------------- 1 2 @@ -649,7 +649,7 @@ IN DEBUG: generating subplan 67_1 for subquery SELECT id, value_1 FROM subquery_in_where.local_table DEBUG: generating subplan 67_2 for subquery SELECT user_id FROM public.users_table DEBUG: Plan 67 query after replacing subqueries and CTEs: SELECT id, value_1 FROM (SELECT intermediate_result.id, intermediate_result.value_1 FROM read_intermediate_result('67_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer, value_1 integer)) sub_table WHERE (id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('67_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) - id | value_1 + id | value_1 --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -678,7 +678,7 @@ DEBUG: generating subplan 69_1 for subquery SELECT id FROM subquery_in_where.lo DEBUG: push down of limit count: 10 DEBUG: generating subplan 69_2 for subquery SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.users_table ORDER BY user_id LIMIT 10 DEBUG: Plan 69 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id, intermediate_result."time", intermediate_result.value_1, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('69_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, "time" timestamp without time zone, value_1 integer, value_2 integer, value_3 double precision, value_4 bigint)) sub_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.id FROM read_intermediate_result('69_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer))) - count + count --------------------------------------------------------------------- 10 (1 row) diff --git a/src/test/regress/expected/subquery_local_tables.out b/src/test/regress/expected/subquery_local_tables.out index bbdbbf8dd..db6818da7 100644 --- a/src/test/regress/expected/subquery_local_tables.out +++ b/src/test/regress/expected/subquery_local_tables.out @@ -11,21 +11,21 @@ SET client_min_messages TO DEBUG1; SELECT foo.user_id FROM - (SELECT - DISTINCT users_table_local.user_id - FROM - users_table_local, events_table_local - WHERE - users_table_local.user_id = events_table_local.user_id AND + (SELECT + DISTINCT users_table_local.user_id + FROM + users_table_local, events_table_local + WHERE + users_table_local.user_id = events_table_local.user_id AND event_type IN (1,2,3,4) ORDER BY 1 DESC LIMIT 5 ) as foo, - (SELECT - DISTINCT users_table.user_id - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + (SELECT + DISTINCT users_table.user_id + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (5,6,7,8) ORDER BY 1 DESC LIMIT 5 ) as bar @@ -35,7 +35,7 @@ DEBUG: generating subplan 3_1 for subquery SELECT DISTINCT users_table_local.us DEBUG: push down of limit count: 5 DEBUG: generating subplan 3_2 for subquery SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) ORDER BY users_table.user_id DESC LIMIT 5 DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT foo.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('3_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE (bar.user_id OPERATOR(pg_catalog.=) foo.user_id) ORDER BY foo.user_id DESC - user_id + user_id --------------------------------------------------------------------- 6 5 @@ -48,28 +48,28 @@ DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT foo.user_id FRO SELECT foo.user_id FROM - (SELECT - DISTINCT users_table_local.user_id - FROM - users_table_local, events_table_local - WHERE - users_table_local.user_id = events_table_local.user_id AND + (SELECT + DISTINCT users_table_local.user_id + FROM + users_table_local, events_table_local + WHERE + users_table_local.user_id = events_table_local.user_id AND event_type IN (1,2,3,4) ORDER BY 1 DESC LIMIT 5 ) as foo, - (SELECT - DISTINCT users_table.user_id - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + (SELECT + DISTINCT users_table.user_id + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (5,6,7,8) ) as bar WHERE bar.user_id = foo.user_id ORDER BY 1 DESC; DEBUG: generating subplan 5_1 for subquery SELECT DISTINCT users_table_local.user_id FROM subquery_local_tables.users_table_local, subquery_local_tables.events_table_local WHERE ((users_table_local.user_id OPERATOR(pg_catalog.=) events_table_local.user_id) AND (events_table_local.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table_local.user_id DESC LIMIT 5 DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT foo.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('5_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, (SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE (bar.user_id OPERATOR(pg_catalog.=) foo.user_id) ORDER BY foo.user_id DESC - user_id + user_id --------------------------------------------------------------------- 6 5 @@ -81,13 +81,13 @@ DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT foo.user_id FRO -- subqueries in WHERE could be replaced even if they are on the local tables SELECT DISTINCT user_id FROM users_table -WHERE - user_id IN (SELECT DISTINCT value_2 FROM users_table_local WHERE value_1 = 1) +WHERE + user_id IN (SELECT DISTINCT value_2 FROM users_table_local WHERE value_1 = 1) ORDER BY 1 LIMIT 5; DEBUG: generating subplan 6_1 for subquery SELECT DISTINCT value_2 FROM subquery_local_tables.users_table_local WHERE (value_1 OPERATOR(pg_catalog.=) 1) DEBUG: Plan 6 query after replacing subqueries and CTEs: SELECT DISTINCT user_id FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('6_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))) ORDER BY user_id LIMIT 5 DEBUG: push down of limit count: 5 - user_id + user_id --------------------------------------------------------------------- 1 2 @@ -96,30 +96,30 @@ DEBUG: push down of limit count: 5 5 (5 rows) --- subquery in FROM -> FROM -> FROM should be replaced if +-- subquery in FROM -> FROM -> FROM should be replaced if -- it contains onle local tables -SELECT - DISTINCT user_id -FROM +SELECT + DISTINCT user_id +FROM ( - SELECT users_table.user_id FROM users_table, + SELECT users_table.user_id FROM users_table, ( - SELECT + SELECT event_type, user_id FROM - (SELECT event_type, users_table.user_id FROM users_table, + (SELECT event_type, users_table.user_id FROM users_table, (SELECT user_id, event_type FROM events_table_local WHERE value_2 < 3 OFFSET 3) as foo WHERE foo.user_id = users_table.user_id ) bar ) as baz WHERE baz.user_id = users_table.user_id ) as sub1 - ORDER BY 1 DESC + ORDER BY 1 DESC LIMIT 3; DEBUG: generating subplan 7_1 for subquery SELECT user_id, event_type FROM subquery_local_tables.events_table_local WHERE (value_2 OPERATOR(pg_catalog.<) 3) OFFSET 3 DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT DISTINCT user_id FROM (SELECT users_table.user_id FROM public.users_table, (SELECT bar.event_type, bar.user_id FROM (SELECT foo.event_type, users_table_1.user_id FROM public.users_table users_table_1, (SELECT intermediate_result.user_id, intermediate_result.event_type FROM read_intermediate_result('7_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, event_type integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) users_table_1.user_id)) bar) baz WHERE (baz.user_id OPERATOR(pg_catalog.=) users_table.user_id)) sub1 ORDER BY user_id DESC LIMIT 3 DEBUG: push down of limit count: 3 - user_id + user_id --------------------------------------------------------------------- 6 5 @@ -133,18 +133,18 @@ SELECT user_id, array_length(events_table, 1) FROM ( SELECT user_id, array_agg(event ORDER BY time) AS events_table FROM ( - SELECT + SELECT u.user_id, e.event_type::text AS event, e.time - FROM + FROM users_table AS u, events_table AS e - WHERE u.user_id = e.user_id AND - u.user_id IN + WHERE u.user_id = e.user_id AND + u.user_id IN ( - SELECT - user_id - FROM - users_table + SELECT + user_id + FROM + users_table WHERE value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table_local WHERE event_type > 1 AND event_type <= 3 AND value_3 > 1) AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 3 AND event_type <= 4 AND value_3 > 1 AND user_id = users_table.user_id) @@ -158,16 +158,16 @@ DEBUG: generating subplan 8_1 for subquery SELECT user_id FROM subquery_local_t DEBUG: push down of limit count: 5 DEBUG: generating subplan 8_2 for subquery SELECT user_id FROM public.users_table WHERE ((value_2 OPERATOR(pg_catalog.>=) 5) AND (EXISTS (SELECT intermediate_result.user_id FROM read_intermediate_result('8_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) AND (NOT (EXISTS (SELECT events_table.user_id FROM public.events_table WHERE ((events_table.event_type OPERATOR(pg_catalog.>) 3) AND (events_table.event_type OPERATOR(pg_catalog.<=) 4) AND (events_table.value_3 OPERATOR(pg_catalog.>) (1)::double precision) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)))))) LIMIT 5 DEBUG: Plan 8 query after replacing subqueries and CTEs: SELECT user_id, array_length(events_table, 1) AS array_length FROM (SELECT t.user_id, array_agg(t.event ORDER BY t."time") AS events_table FROM (SELECT u.user_id, (e.event_type)::text AS event, e."time" FROM public.users_table u, public.events_table e WHERE ((u.user_id OPERATOR(pg_catalog.=) e.user_id) AND (u.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('8_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))))) t GROUP BY t.user_id) q ORDER BY (array_length(events_table, 1)) DESC, user_id - user_id | array_length + user_id | array_length --------------------------------------------------------------------- 5 | 364 (1 row) -- subquery (i.e., subquery_2) in WHERE->FROM should be replaced due to local tables -SELECT - user_id -FROM - users_table +SELECT + user_id +FROM + users_table WHERE user_id IN ( @@ -235,7 +235,7 @@ HAVING count(*) > 1 AND sum(value_2) > 29 ORDER BY 1; DEBUG: generating subplan 10_1 for subquery SELECT user_id, count(*) AS count_pay FROM subquery_local_tables.users_table_local WHERE ((user_id OPERATOR(pg_catalog.>=) 1) AND (user_id OPERATOR(pg_catalog.<=) 3) AND (value_1 OPERATOR(pg_catalog.>) 3) AND (value_1 OPERATOR(pg_catalog.<) 5)) GROUP BY user_id HAVING (count(*) OPERATOR(pg_catalog.>) 1) LIMIT 10 DEBUG: Plan 10 query after replacing subqueries and CTEs: SELECT user_id FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) ANY (SELECT subquery_top.user_id FROM (SELECT subquery_1.user_id, subquery_2.count_pay FROM ((SELECT users_table_1.user_id, 'action=>1'::text AS event, events_table."time" FROM public.users_table users_table_1, public.events_table WHERE ((users_table_1.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (users_table_1.user_id OPERATOR(pg_catalog.>=) 1) AND (users_table_1.user_id OPERATOR(pg_catalog.<=) 3) AND (events_table.event_type OPERATOR(pg_catalog.>) 1) AND (events_table.event_type OPERATOR(pg_catalog.<) 3)) UNION SELECT users_table_1.user_id, 'action=>2'::text AS event, events_table."time" FROM public.users_table users_table_1, public.events_table WHERE ((users_table_1.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (users_table_1.user_id OPERATOR(pg_catalog.>=) 1) AND (users_table_1.user_id OPERATOR(pg_catalog.<=) 3) AND (events_table.event_type OPERATOR(pg_catalog.>) 2) AND (events_table.event_type OPERATOR(pg_catalog.<) 4))) subquery_1 LEFT JOIN (SELECT intermediate_result.user_id, intermediate_result.count_pay FROM read_intermediate_result('10_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, count_pay bigint)) subquery_2 ON ((subquery_1.user_id OPERATOR(pg_catalog.=) subquery_2.user_id))) GROUP BY subquery_1.user_id, subquery_2.count_pay) subquery_top GROUP BY subquery_top.count_pay, subquery_top.user_id)) GROUP BY user_id HAVING ((count(*) OPERATOR(pg_catalog.>) 1) AND (sum(value_2) OPERATOR(pg_catalog.>) 29)) ORDER BY user_id - user_id + user_id --------------------------------------------------------------------- 2 3 diff --git a/src/test/regress/expected/subquery_partitioning.out b/src/test/regress/expected/subquery_partitioning.out index dd8dbbe0f..f110477a5 100644 --- a/src/test/regress/expected/subquery_partitioning.out +++ b/src/test/regress/expected/subquery_partitioning.out @@ -19,9 +19,9 @@ SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('partitioning_test', 'id'); NOTICE: Copying data from local table... NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SET client_min_messages TO DEBUG1; @@ -39,7 +39,7 @@ FROM DEBUG: push down of limit count: 5 DEBUG: generating subplan 3_1 for subquery SELECT DISTINCT id FROM subquery_and_partitioning.partitioning_test LIMIT 5 DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT id FROM (SELECT intermediate_result.id FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) foo ORDER BY id DESC - id + id --------------------------------------------------------------------- 4 3 @@ -70,7 +70,7 @@ DEBUG: generating subplan 5_1 for subquery SELECT DISTINCT id FROM subquery_and DEBUG: push down of limit count: 5 DEBUG: generating subplan 5_2 for subquery SELECT DISTINCT "time" FROM subquery_and_partitioning.partitioning_test LIMIT 5 DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT foo.id, bar."time" FROM (SELECT intermediate_result.id FROM read_intermediate_result('5_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) foo, (SELECT intermediate_result."time" FROM read_intermediate_result('5_2'::text, 'binary'::citus_copy_format) intermediate_result("time" date)) bar WHERE ((foo.id)::double precision OPERATOR(pg_catalog.=) date_part('day'::text, bar."time")) ORDER BY bar."time" DESC, foo.id - id | time + id | time --------------------------------------------------------------------- 3 | 03-03-2010 (1 row) @@ -99,7 +99,7 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 8_1 for subquery SELECT DISTINCT "time" FROM subquery_and_partitioning.partitioning_test ORDER BY "time" DESC LIMIT 5 DEBUG: Plan 8 query after replacing subqueries and CTEs: SELECT foo."time", bar.id FROM (SELECT intermediate_result."time" FROM read_intermediate_result('8_1'::text, 'binary'::citus_copy_format) intermediate_result("time" date)) foo, (SELECT DISTINCT partitioning_test.id FROM subquery_and_partitioning.partitioning_test) bar WHERE (date_part('day'::text, foo."time") OPERATOR(pg_catalog.=) (bar.id)::double precision) ORDER BY bar.id DESC, foo."time" DESC LIMIT 3 DEBUG: push down of limit count: 3 - time | id + time | id --------------------------------------------------------------------- 03-03-2010 | 3 (1 row) @@ -129,7 +129,7 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 10_1 for subquery SELECT DISTINCT "time" FROM subquery_and_partitioning.partitioning_test ORDER BY "time" DESC LIMIT 5 DEBUG: Plan 10 query after replacing subqueries and CTEs: SELECT foo."time", bar.id, partitioning_test.id, partitioning_test.value_1, partitioning_test."time" FROM (SELECT intermediate_result."time" FROM read_intermediate_result('10_1'::text, 'binary'::citus_copy_format) intermediate_result("time" date)) foo, (SELECT DISTINCT partitioning_test_1.id FROM subquery_and_partitioning.partitioning_test partitioning_test_1) bar, subquery_and_partitioning.partitioning_test WHERE ((date_part('day'::text, foo."time") OPERATOR(pg_catalog.=) (bar.id)::double precision) AND (partitioning_test.id OPERATOR(pg_catalog.=) bar.id)) ORDER BY bar.id DESC, foo."time" DESC LIMIT 3 DEBUG: push down of limit count: 3 - time | id | id | value_1 | time + time | id | id | value_1 | time --------------------------------------------------------------------- 03-03-2010 | 3 | 3 | 3 | 11-22-2017 (1 row) @@ -141,7 +141,7 @@ WHERE id IN (SELECT DISTINCT date_part('day', time) FROM partitioning_test); DEBUG: generating subplan 12_1 for subquery SELECT DISTINCT date_part('day'::text, "time") AS date_part FROM subquery_and_partitioning.partitioning_test DEBUG: Plan 12 query after replacing subqueries and CTEs: SELECT DISTINCT id FROM subquery_and_partitioning.partitioning_test WHERE ((id)::double precision OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.date_part FROM read_intermediate_result('12_1'::text, 'binary'::citus_copy_format) intermediate_result(date_part double precision))) - id + id --------------------------------------------------------------------- 3 (1 row) @@ -157,10 +157,10 @@ FROM ( SELECT user_id FROM users_table ) as bar -WHERE foo.value_1 = bar.user_id; +WHERE foo.value_1 = bar.user_id; DEBUG: generating subplan 14_1 for subquery SELECT DISTINCT p1.value_1 FROM subquery_and_partitioning.partitioning_test p1, subquery_and_partitioning.partitioning_test p2 WHERE (p1.id OPERATOR(pg_catalog.=) p2.value_1) DEBUG: Plan 14 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.value_1 FROM read_intermediate_result('14_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer)) foo, (SELECT users_table.user_id FROM public.users_table) bar WHERE (foo.value_1 OPERATOR(pg_catalog.=) bar.user_id) - count + count --------------------------------------------------------------------- 47 (1 row) @@ -219,13 +219,13 @@ DEBUG: generating subplan 16_2 for subquery SELECT DISTINCT events_table.user_i DEBUG: generating subplan 16_3 for subquery SELECT count(*) AS cnt FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('16_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte, (SELECT intermediate_result.user_id FROM read_intermediate_result('16_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) cte.user_id) DEBUG: Plan 16 query after replacing subqueries and CTEs: SELECT cnt, user_id, "time", value_1, value_2, value_3, value_4 FROM (SELECT foo.cnt, users_table.user_id, users_table."time", users_table.value_1, users_table.value_2, users_table.value_3, users_table.value_4 FROM (SELECT intermediate_result.cnt FROM read_intermediate_result('16_3'::text, 'binary'::citus_copy_format) intermediate_result(cnt bigint)) foo, public.users_table WHERE (foo.cnt OPERATOR(pg_catalog.>) users_table.value_2)) subquery_and_ctes ORDER BY "time" DESC, cnt DESC, user_id DESC, value_1 DESC LIMIT 5 DEBUG: push down of limit count: 5 - cnt | user_id | time | value_1 | value_2 | value_3 | value_4 + cnt | user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 105 | 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | - 105 | 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | - 105 | 3 | Thu Nov 23 17:18:51.048758 2017 | 1 | 5 | 5 | - 105 | 3 | Thu Nov 23 17:10:35.959913 2017 | 4 | 3 | 1 | - 105 | 5 | Thu Nov 23 16:48:32.08896 2017 | 5 | 2 | 1 | + 105 | 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | + 105 | 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | + 105 | 3 | Thu Nov 23 17:18:51.048758 2017 | 1 | 5 | 5 | + 105 | 3 | Thu Nov 23 17:10:35.959913 2017 | 4 | 3 | 1 | + 105 | 5 | Thu Nov 23 16:48:32.08896 2017 | 5 | 2 | 1 | (5 rows) -- deep subquery, partitioned and non-partitioned tables together @@ -275,7 +275,7 @@ DEBUG: generating subplan 23_4 for subquery SELECT avg(events_table.event_type) DEBUG: generating subplan 23_5 for subquery SELECT min(partitioning_test.value_1) AS min FROM (SELECT intermediate_result.avg_ev_type FROM read_intermediate_result('23_4'::text, 'binary'::citus_copy_format) intermediate_result(avg_ev_type numeric)) level_5, subquery_and_partitioning.partitioning_test WHERE (level_5.avg_ev_type OPERATOR(pg_catalog.=) (partitioning_test.id)::numeric) GROUP BY level_5.avg_ev_type DEBUG: generating subplan 23_6 for subquery SELECT avg(level_6.min) AS avg FROM (SELECT intermediate_result.min FROM read_intermediate_result('23_5'::text, 'binary'::citus_copy_format) intermediate_result(min integer)) level_6, public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) level_6.min) GROUP BY users_table.value_1 DEBUG: Plan 23 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.avg FROM read_intermediate_result('23_6'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) bar - count + count --------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/subquery_prepared_statements.out b/src/test/regress/expected/subquery_prepared_statements.out index 4a33969a8..a9761787f 100644 --- a/src/test/regress/expected/subquery_prepared_statements.out +++ b/src/test/regress/expected/subquery_prepared_statements.out @@ -3,7 +3,7 @@ -- =================================================================== CREATE SCHEMA subquery_prepared_statements; SELECT run_command_on_workers('CREATE SCHEMA subquery_prepared_statements;'); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,"CREATE SCHEMA") (localhost,57638,t,"CREATE SCHEMA") @@ -12,45 +12,45 @@ SELECT run_command_on_workers('CREATE SCHEMA subquery_prepared_statements;'); SET search_path TO subquery_prepared_statements, public; CREATE TYPE subquery_prepared_statements.xy AS (x int, y int); SET client_min_messages TO DEBUG1; -PREPARE subquery_prepare_without_param AS +PREPARE subquery_prepare_without_param AS SELECT DISTINCT values_of_subquery FROM - (SELECT + (SELECT DISTINCT (users_table.user_id, events_table.event_type)::xy as values_of_subquery - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) ORDER BY 1 DESC LIMIT 5 ) as foo ORDER BY 1 DESC; -PREPARE subquery_prepare_param_on_partkey(int) AS +PREPARE subquery_prepare_param_on_partkey(int) AS SELECT DISTINCT values_of_subquery FROM - (SELECT + (SELECT DISTINCT (users_table.user_id, events_table.event_type)::xy as values_of_subquery - FROM - users_table, events_table - WHERE + FROM + users_table, events_table + WHERE users_table.user_id = events_table.user_id AND (users_table.user_id = $1 OR users_table.user_id = 2) AND event_type IN (1,2,3,4) ORDER BY 1 DESC LIMIT 5 ) as foo ORDER BY 1 DESC; -PREPARE subquery_prepare_param_non_partkey(int) AS +PREPARE subquery_prepare_param_non_partkey(int) AS SELECT DISTINCT values_of_subquery FROM - (SELECT + (SELECT DISTINCT (users_table.user_id, events_table.event_type)::xy as values_of_subquery - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type = $1 ORDER BY 1 DESC LIMIT 5 ) as foo @@ -60,7 +60,7 @@ EXECUTE subquery_prepare_without_param; DEBUG: push down of limit count: 5 DEBUG: generating subplan 1_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 DEBUG: Plan 1 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('1_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC - values_of_subquery + values_of_subquery --------------------------------------------------------------------- (6,4) (6,3) @@ -70,7 +70,7 @@ DEBUG: Plan 1 query after replacing subqueries and CTEs: SELECT DISTINCT values (5 rows) EXECUTE subquery_prepare_without_param; - values_of_subquery + values_of_subquery --------------------------------------------------------------------- (6,4) (6,3) @@ -80,7 +80,7 @@ EXECUTE subquery_prepare_without_param; (5 rows) EXECUTE subquery_prepare_without_param; - values_of_subquery + values_of_subquery --------------------------------------------------------------------- (6,4) (6,3) @@ -90,7 +90,7 @@ EXECUTE subquery_prepare_without_param; (5 rows) EXECUTE subquery_prepare_without_param; - values_of_subquery + values_of_subquery --------------------------------------------------------------------- (6,4) (6,3) @@ -100,7 +100,7 @@ EXECUTE subquery_prepare_without_param; (5 rows) EXECUTE subquery_prepare_without_param; - values_of_subquery + values_of_subquery --------------------------------------------------------------------- (6,4) (6,3) @@ -110,7 +110,7 @@ EXECUTE subquery_prepare_without_param; (5 rows) EXECUTE subquery_prepare_without_param; - values_of_subquery + values_of_subquery --------------------------------------------------------------------- (6,4) (6,3) @@ -120,7 +120,7 @@ EXECUTE subquery_prepare_without_param; (5 rows) EXECUTE subquery_prepare_without_param; - values_of_subquery + values_of_subquery --------------------------------------------------------------------- (6,4) (6,3) @@ -133,7 +133,7 @@ EXECUTE subquery_prepare_param_on_partkey(1); DEBUG: push down of limit count: 5 DEBUG: generating subplan 3_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND ((users_table.user_id OPERATOR(pg_catalog.=) 1) OR (users_table.user_id OPERATOR(pg_catalog.=) 2)) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('3_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC - values_of_subquery + values_of_subquery --------------------------------------------------------------------- (2,4) (2,3) @@ -146,7 +146,7 @@ EXECUTE subquery_prepare_param_on_partkey(1); DEBUG: push down of limit count: 5 DEBUG: generating subplan 5_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND ((users_table.user_id OPERATOR(pg_catalog.=) 1) OR (users_table.user_id OPERATOR(pg_catalog.=) 2)) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('5_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC - values_of_subquery + values_of_subquery --------------------------------------------------------------------- (2,4) (2,3) @@ -159,7 +159,7 @@ EXECUTE subquery_prepare_param_on_partkey(1); DEBUG: push down of limit count: 5 DEBUG: generating subplan 7_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND ((users_table.user_id OPERATOR(pg_catalog.=) 1) OR (users_table.user_id OPERATOR(pg_catalog.=) 2)) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('7_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC - values_of_subquery + values_of_subquery --------------------------------------------------------------------- (2,4) (2,3) @@ -172,7 +172,7 @@ EXECUTE subquery_prepare_param_on_partkey(1); DEBUG: push down of limit count: 5 DEBUG: generating subplan 9_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND ((users_table.user_id OPERATOR(pg_catalog.=) 1) OR (users_table.user_id OPERATOR(pg_catalog.=) 2)) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 DEBUG: Plan 9 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('9_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC - values_of_subquery + values_of_subquery --------------------------------------------------------------------- (2,4) (2,3) @@ -185,7 +185,7 @@ EXECUTE subquery_prepare_param_on_partkey(1); DEBUG: push down of limit count: 5 DEBUG: generating subplan 11_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND ((users_table.user_id OPERATOR(pg_catalog.=) 1) OR (users_table.user_id OPERATOR(pg_catalog.=) 2)) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 DEBUG: Plan 11 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('11_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC - values_of_subquery + values_of_subquery --------------------------------------------------------------------- (2,4) (2,3) @@ -198,7 +198,7 @@ EXECUTE subquery_prepare_param_on_partkey(1); DEBUG: push down of limit count: 5 DEBUG: generating subplan 14_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND ((users_table.user_id OPERATOR(pg_catalog.=) 1) OR (users_table.user_id OPERATOR(pg_catalog.=) 2)) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 DEBUG: Plan 14 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('14_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC - values_of_subquery + values_of_subquery --------------------------------------------------------------------- (2,4) (2,3) @@ -211,7 +211,7 @@ EXECUTE subquery_prepare_param_non_partkey(1); DEBUG: push down of limit count: 5 DEBUG: generating subplan 16_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) 1)) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 DEBUG: Plan 16 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('16_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC - values_of_subquery + values_of_subquery --------------------------------------------------------------------- (6,1) (5,1) @@ -224,7 +224,7 @@ EXECUTE subquery_prepare_param_non_partkey(1); DEBUG: push down of limit count: 5 DEBUG: generating subplan 18_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) 1)) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 DEBUG: Plan 18 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('18_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC - values_of_subquery + values_of_subquery --------------------------------------------------------------------- (6,1) (5,1) @@ -237,7 +237,7 @@ EXECUTE subquery_prepare_param_non_partkey(1); DEBUG: push down of limit count: 5 DEBUG: generating subplan 20_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) 1)) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 DEBUG: Plan 20 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('20_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC - values_of_subquery + values_of_subquery --------------------------------------------------------------------- (6,1) (5,1) @@ -250,7 +250,7 @@ EXECUTE subquery_prepare_param_non_partkey(1); DEBUG: push down of limit count: 5 DEBUG: generating subplan 22_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) 1)) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 DEBUG: Plan 22 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('22_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC - values_of_subquery + values_of_subquery --------------------------------------------------------------------- (6,1) (5,1) @@ -263,7 +263,7 @@ EXECUTE subquery_prepare_param_non_partkey(1); DEBUG: push down of limit count: 5 DEBUG: generating subplan 24_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) 1)) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 DEBUG: Plan 24 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('24_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC - values_of_subquery + values_of_subquery --------------------------------------------------------------------- (6,1) (5,1) @@ -276,7 +276,7 @@ EXECUTE subquery_prepare_param_non_partkey(1); DEBUG: push down of limit count: 5 DEBUG: generating subplan 27_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) 1)) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5 DEBUG: Plan 27 query after replacing subqueries and CTEs: SELECT DISTINCT values_of_subquery FROM (SELECT intermediate_result.values_of_subquery FROM read_intermediate_result('27_1'::text, 'text'::citus_copy_format) intermediate_result(values_of_subquery subquery_prepared_statements.xy)) foo ORDER BY values_of_subquery DESC - values_of_subquery + values_of_subquery --------------------------------------------------------------------- (6,1) (5,1) diff --git a/src/test/regress/expected/subquery_view.out b/src/test/regress/expected/subquery_view.out index 7a698b51a..2f41c5316 100644 --- a/src/test/regress/expected/subquery_view.out +++ b/src/test/regress/expected/subquery_view.out @@ -7,22 +7,22 @@ CREATE TABLE users_table_local AS SELECT * FROM users_table; CREATE TABLE events_table_local AS SELECT * FROM events_table; SET client_min_messages TO DEBUG1; CREATE VIEW view_without_subquery AS -SELECT - DISTINCT users_table.value_1 - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND +SELECT + DISTINCT users_table.value_1 + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) ORDER BY 1 DESC; -SELECT - * -FROM - view_without_subquery +SELECT + * +FROM + view_without_subquery ORDER BY 1 DESC LIMIT 5; DEBUG: generating subplan 3_1 for subquery SELECT DISTINCT users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.value_1 DESC DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT value_1 FROM (SELECT intermediate_result.value_1 FROM read_intermediate_result('3_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer)) view_without_subquery ORDER BY value_1 DESC LIMIT 5 - value_1 + value_1 --------------------------------------------------------------------- 5 4 @@ -32,24 +32,24 @@ DEBUG: Plan 3 query after replacing subqueries and CTEs: SELECT value_1 FROM (S (5 rows) CREATE VIEW view_without_subquery_second AS -SELECT - DISTINCT users_table.user_id - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND +SELECT + DISTINCT users_table.user_id + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) ORDER BY 1 DESC LIMIT 5; -SELECT - * -FROM - view_without_subquery_second +SELECT + * +FROM + view_without_subquery_second ORDER BY 1; DEBUG: push down of limit count: 5 DEBUG: generating subplan 5_1 for subquery SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('5_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) view_without_subquery_second ORDER BY user_id - user_id + user_id --------------------------------------------------------------------- 2 3 @@ -63,12 +63,12 @@ CREATE VIEW subquery_limit AS SELECT user_id FROM - (SELECT - DISTINCT users_table.user_id - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + (SELECT + DISTINCT users_table.user_id + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) ORDER BY 1 DESC LIMIT 5 ) as foo @@ -77,7 +77,7 @@ SELECT * FROM subquery_limit ORDER BY 1 DESC; DEBUG: push down of limit count: 5 DEBUG: generating subplan 7_1 for subquery SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.user_id DESC LIMIT 5 DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT foo.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('7_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo ORDER BY foo.user_id DESC) subquery_limit ORDER BY user_id DESC - user_id + user_id --------------------------------------------------------------------- 6 5 @@ -87,16 +87,16 @@ DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT user_id FROM (S (5 rows) -- subqueries in FROM clause with GROUP BY non-distribution column should be recursively planned -CREATE VIEW subquery_non_p_key_group_by AS +CREATE VIEW subquery_non_p_key_group_by AS SELECT * FROM - (SELECT - DISTINCT users_table.value_1 - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + (SELECT + DISTINCT users_table.value_1 + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) ORDER BY 1 ) as foo @@ -104,7 +104,7 @@ FROM SELECT * FROM subquery_non_p_key_group_by ORDER BY 1 DESC; DEBUG: generating subplan 9_1 for subquery SELECT DISTINCT users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY users_table.value_1 DEBUG: Plan 9 query after replacing subqueries and CTEs: SELECT value_1 FROM (SELECT foo.value_1 FROM (SELECT intermediate_result.value_1 FROM read_intermediate_result('9_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer)) foo ORDER BY foo.value_1 DESC) subquery_non_p_key_group_by ORDER BY value_1 DESC - value_1 + value_1 --------------------------------------------------------------------- 5 4 @@ -114,26 +114,26 @@ DEBUG: Plan 9 query after replacing subqueries and CTEs: SELECT value_1 FROM (S 0 (6 rows) -CREATE VIEW final_query_router AS +CREATE VIEW final_query_router AS SELECT * FROM - (SELECT + (SELECT users_table.value_2 - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) GROUP BY users_table.value_2 ORDER BY 1 DESC ) as foo, - (SELECT + (SELECT users_table.value_3 - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (5,6,7,8) GROUP BY users_table.value_3 ORDER BY 1 DESC @@ -144,7 +144,7 @@ SELECT * FROM final_query_router ORDER BY 1; DEBUG: generating subplan 11_1 for subquery SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) GROUP BY users_table.value_2 ORDER BY users_table.value_2 DESC DEBUG: generating subplan 11_2 for subquery SELECT users_table.value_3 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) GROUP BY users_table.value_3 ORDER BY users_table.value_3 DESC DEBUG: Plan 11 query after replacing subqueries and CTEs: SELECT value_2, value_3 FROM (SELECT foo.value_2, bar.value_3 FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('11_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT intermediate_result.value_3 FROM read_intermediate_result('11_2'::text, 'binary'::citus_copy_format) intermediate_result(value_3 double precision)) bar WHERE ((foo.value_2)::double precision OPERATOR(pg_catalog.=) bar.value_3) ORDER BY bar.value_3 DESC, foo.value_2) final_query_router ORDER BY value_2 - value_2 | value_3 + value_2 | value_3 --------------------------------------------------------------------- 0 | 0 1 | 1 @@ -158,33 +158,33 @@ CREATE VIEW final_query_realtime AS SELECT * FROM - (SELECT + (SELECT users_table.value_2 - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) GROUP BY users_table.value_2 ORDER BY 1 DESC ) as foo, - (SELECT + (SELECT users_table.user_id - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (5,6,7,8) ORDER BY 1 DESC ) as bar WHERE foo.value_2 = bar.user_id ORDER BY 2 DESC, 1 DESC LIMIT 3; -SELECT - DISTINCT ON (users_table.value_2) users_table.value_2, time, value_3 -FROM +SELECT + DISTINCT ON (users_table.value_2) users_table.value_2, time, value_3 +FROM final_query_realtime, users_table -WHERE +WHERE users_table.user_id = final_query_realtime.user_id ORDER BY 1 DESC, 2 DESC, 3 DESC LIMIT 3; @@ -193,7 +193,7 @@ DEBUG: push down of limit count: 3 DEBUG: generating subplan 14_2 for subquery SELECT foo.value_2, bar.user_id FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('14_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) ORDER BY users_table.user_id DESC) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) ORDER BY bar.user_id DESC, foo.value_2 DESC LIMIT 3 DEBUG: Plan 14 query after replacing subqueries and CTEs: SELECT DISTINCT ON (users_table.value_2) users_table.value_2, users_table."time", users_table.value_3 FROM (SELECT intermediate_result.value_2, intermediate_result.user_id FROM read_intermediate_result('14_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer, user_id integer)) final_query_realtime, public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) final_query_realtime.user_id) ORDER BY users_table.value_2 DESC, users_table."time" DESC, users_table.value_3 DESC LIMIT 3 DEBUG: push down of limit count: 3 - value_2 | time | value_3 + value_2 | time | value_3 --------------------------------------------------------------------- 5 | Thu Nov 23 16:28:38.455322 2017 | 4 4 | Thu Nov 23 10:22:39.468816 2017 | 3 @@ -203,17 +203,17 @@ DEBUG: push down of limit count: 3 CREATE VIEW subquery_in_where AS SELECT DISTINCT user_id FROM users_table -WHERE +WHERE user_id IN (SELECT DISTINCT value_2 FROM users_table WHERE value_1 >= 1 AND value_1 <= 20 ORDER BY 1 LIMIT 5); -SELECT - * -FROM +SELECT + * +FROM subquery_in_where ORDER BY 1 DESC; DEBUG: push down of limit count: 5 DEBUG: generating subplan 17_1 for subquery SELECT DISTINCT value_2 FROM public.users_table WHERE ((value_1 OPERATOR(pg_catalog.>=) 1) AND (value_1 OPERATOR(pg_catalog.<=) 20)) ORDER BY value_2 LIMIT 5 DEBUG: Plan 17 query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('17_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)))) subquery_in_where ORDER BY user_id DESC - user_id + user_id --------------------------------------------------------------------- 4 3 @@ -222,23 +222,23 @@ DEBUG: Plan 17 query after replacing subqueries and CTEs: SELECT user_id FROM ( (4 rows) -- subquery in FROM -> FROM -> WHERE should be replaced due to LIMIT -CREATE VIEW subquery_from_from_where AS +CREATE VIEW subquery_from_from_where AS SELECT user_id, array_length(events_table, 1) FROM ( SELECT user_id, array_agg(event ORDER BY time) AS events_table FROM ( - SELECT + SELECT u.user_id, e.event_type::text AS event, e.time - FROM + FROM users_table AS u, events_table AS e - WHERE u.user_id = e.user_id AND - u.user_id IN + WHERE u.user_id = e.user_id AND + u.user_id IN ( - SELECT - user_id - FROM - users_table + SELECT + user_id + FROM + users_table WHERE value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 1 AND event_type <= 3 AND value_3 > 1 AND user_id = users_table.user_id) AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 3 AND event_type <= 4 AND value_3 > 1 AND user_id = users_table.user_id) @@ -247,85 +247,85 @@ FROM ( ) t GROUP BY user_id ) q; -SELECT - * -FROM +SELECT + * +FROM subquery_from_from_where -ORDER BY +ORDER BY 2 DESC, 1; DEBUG: push down of limit count: 5 DEBUG: generating subplan 19_1 for subquery SELECT user_id FROM public.users_table WHERE ((value_2 OPERATOR(pg_catalog.>=) 5) AND (EXISTS (SELECT events_table.user_id FROM public.events_table WHERE ((events_table.event_type OPERATOR(pg_catalog.>) 1) AND (events_table.event_type OPERATOR(pg_catalog.<=) 3) AND (events_table.value_3 OPERATOR(pg_catalog.>) (1)::double precision) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)))) AND (NOT (EXISTS (SELECT events_table.user_id FROM public.events_table WHERE ((events_table.event_type OPERATOR(pg_catalog.>) 3) AND (events_table.event_type OPERATOR(pg_catalog.<=) 4) AND (events_table.value_3 OPERATOR(pg_catalog.>) (1)::double precision) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)))))) LIMIT 5 DEBUG: Plan 19 query after replacing subqueries and CTEs: SELECT user_id, array_length FROM (SELECT q.user_id, array_length(q.events_table, 1) AS array_length FROM (SELECT t.user_id, array_agg(t.event ORDER BY t."time") AS events_table FROM (SELECT u.user_id, (e.event_type)::text AS event, e."time" FROM public.users_table u, public.events_table e WHERE ((u.user_id OPERATOR(pg_catalog.=) e.user_id) AND (u.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('19_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))))) t GROUP BY t.user_id) q) subquery_from_from_where ORDER BY array_length DESC, user_id - user_id | array_length + user_id | array_length --------------------------------------------------------------------- 5 | 364 (1 row) --- subquery in FROM -> FROM -> FROM should be replaced if +-- subquery in FROM -> FROM -> FROM should be replaced if -- it contains onle local tables -CREATE VIEW subquery_from_from_where_local_table AS -SELECT - DISTINCT user_id -FROM +CREATE VIEW subquery_from_from_where_local_table AS +SELECT + DISTINCT user_id +FROM ( - SELECT users_table.user_id FROM users_table, + SELECT users_table.user_id FROM users_table, ( - SELECT + SELECT event_type, user_id FROM - (SELECT event_type, users_table.user_id FROM users_table, + (SELECT event_type, users_table.user_id FROM users_table, (SELECT user_id, event_type FROM events_table_local WHERE value_2 < 3 OFFSET 3) as foo WHERE foo.user_id = users_table.user_id ) bar ) as baz WHERE baz.user_id = users_table.user_id ) as sub1; -SELECT - * -FROM +SELECT + * +FROM subquery_from_from_where -ORDER BY 1 DESC +ORDER BY 1 DESC LIMIT 3; DEBUG: push down of limit count: 5 DEBUG: generating subplan 21_1 for subquery SELECT user_id FROM public.users_table WHERE ((value_2 OPERATOR(pg_catalog.>=) 5) AND (EXISTS (SELECT events_table.user_id FROM public.events_table WHERE ((events_table.event_type OPERATOR(pg_catalog.>) 1) AND (events_table.event_type OPERATOR(pg_catalog.<=) 3) AND (events_table.value_3 OPERATOR(pg_catalog.>) (1)::double precision) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)))) AND (NOT (EXISTS (SELECT events_table.user_id FROM public.events_table WHERE ((events_table.event_type OPERATOR(pg_catalog.>) 3) AND (events_table.event_type OPERATOR(pg_catalog.<=) 4) AND (events_table.value_3 OPERATOR(pg_catalog.>) (1)::double precision) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)))))) LIMIT 5 DEBUG: Plan 21 query after replacing subqueries and CTEs: SELECT user_id, array_length FROM (SELECT q.user_id, array_length(q.events_table, 1) AS array_length FROM (SELECT t.user_id, array_agg(t.event ORDER BY t."time") AS events_table FROM (SELECT u.user_id, (e.event_type)::text AS event, e."time" FROM public.users_table u, public.events_table e WHERE ((u.user_id OPERATOR(pg_catalog.=) e.user_id) AND (u.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('21_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))))) t GROUP BY t.user_id) q) subquery_from_from_where ORDER BY user_id DESC LIMIT 3 DEBUG: push down of limit count: 3 - user_id | array_length + user_id | array_length --------------------------------------------------------------------- 5 | 364 (1 row) SET citus.enable_repartition_joins to ON; CREATE VIEW repartition_view AS -SELECT - count(*) +SELECT + count(*) FROM ( SELECT DISTINCT users_table.value_2 FROM users_table, events_table WHERE users_table.user_id = events_table.value_2 AND users_table.user_id < 2 -) as foo, +) as foo, ( SELECT user_id FROM users_table ) as bar -WHERE foo.value_2 = bar.user_id; -SELECT - * -FROM +WHERE foo.value_2 = bar.user_id; +SELECT + * +FROM repartition_view; DEBUG: generating subplan 23_1 for subquery SELECT DISTINCT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (users_table.user_id OPERATOR(pg_catalog.<) 2)) DEBUG: generating subplan 23_2 for subquery SELECT count(*) AS count FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('23_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT users_table.user_id FROM public.users_table) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) DEBUG: Plan 23 query after replacing subqueries and CTEs: SELECT count FROM (SELECT intermediate_result.count FROM read_intermediate_result('23_2'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) repartition_view - count + count --------------------------------------------------------------------- 58 (1 row) CREATE VIEW all_executors_view AS -SELECT - count(*) +SELECT + count(*) FROM ( SELECT value_2 FROM users_table WHERE user_id = 15 OFFSET 0 -) as foo, +) as foo, ( SELECT user_id FROM users_table OFFSET 0 ) as bar, @@ -335,10 +335,10 @@ FROM ( SELECT user_id FROM users_table_local WHERE user_id = 2 ) baw -WHERE foo.value_2 = bar.user_id AND baz.value_2 = bar.user_id AND bar.user_id = baw.user_id; -SELECT - * -FROM +WHERE foo.value_2 = bar.user_id AND baz.value_2 = bar.user_id AND bar.user_id = baw.user_id; +SELECT + * +FROM all_executors_view; DEBUG: generating subplan 26_1 for subquery SELECT value_2 FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.=) 15) OFFSET 0 DEBUG: generating subplan 26_2 for subquery SELECT user_id FROM public.users_table OFFSET 0 @@ -346,47 +346,47 @@ DEBUG: generating subplan 26_3 for subquery SELECT DISTINCT users_table.value_2 DEBUG: generating subplan 26_4 for subquery SELECT user_id FROM subquery_view.users_table_local WHERE (user_id OPERATOR(pg_catalog.=) 2) DEBUG: generating subplan 26_5 for subquery SELECT count(*) AS count FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('26_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('26_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar, (SELECT intermediate_result.value_2 FROM read_intermediate_result('26_3'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) baz, (SELECT intermediate_result.user_id FROM read_intermediate_result('26_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) baw WHERE ((foo.value_2 OPERATOR(pg_catalog.=) bar.user_id) AND (baz.value_2 OPERATOR(pg_catalog.=) bar.user_id) AND (bar.user_id OPERATOR(pg_catalog.=) baw.user_id)) DEBUG: Plan 26 query after replacing subqueries and CTEs: SELECT count FROM (SELECT intermediate_result.count FROM read_intermediate_result('26_5'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) all_executors_view - count + count --------------------------------------------------------------------- 0 (1 row) SET citus.enable_repartition_joins to OFF; -- the same query, but this time the CTEs also live inside a subquery -CREATE VIEW subquery_and_ctes AS -SELECT - * -FROM +CREATE VIEW subquery_and_ctes AS +SELECT + * +FROM ( WITH cte AS ( WITH local_cte AS ( SELECT * FROM users_table_local ), dist_cte AS ( - SELECT + SELECT user_id - FROM - events_table, + FROM + events_table, (SELECT DISTINCT value_2 FROM users_table OFFSET 0) as foo - WHERE + WHERE events_table.user_id = foo.value_2 AND events_table.user_id IN (SELECT DISTINCT value_1 FROM users_table ORDER BY 1 LIMIT 3) ) SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id ) -SELECT +SELECT count(*) as cnt -FROM +FROM cte, - (SELECT - DISTINCT users_table.user_id - FROM + (SELECT + DISTINCT users_table.user_id + FROM users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) ORDER BY 1 DESC LIMIT 5 - ) as foo + ) as foo WHERE foo.user_id = cte.user_id ) as foo, users_table WHERE foo.cnt > users_table.value_2; SELECT * FROM subquery_and_ctes @@ -405,17 +405,17 @@ DEBUG: generating subplan 31_2 for subquery SELECT DISTINCT users_table.user_id DEBUG: generating subplan 31_3 for subquery SELECT count(*) AS cnt FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('31_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte, (SELECT intermediate_result.user_id FROM read_intermediate_result('31_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) cte.user_id) DEBUG: Plan 31 query after replacing subqueries and CTEs: SELECT cnt, user_id, "time", value_1, value_2, value_3, value_4 FROM (SELECT foo.cnt, users_table.user_id, users_table."time", users_table.value_1, users_table.value_2, users_table.value_3, users_table.value_4 FROM (SELECT intermediate_result.cnt FROM read_intermediate_result('31_3'::text, 'binary'::citus_copy_format) intermediate_result(cnt bigint)) foo, public.users_table WHERE (foo.cnt OPERATOR(pg_catalog.>) users_table.value_2)) subquery_and_ctes ORDER BY "time" DESC, cnt DESC, user_id DESC, value_1 DESC LIMIT 5 DEBUG: push down of limit count: 5 - cnt | user_id | time | value_1 | value_2 | value_3 | value_4 + cnt | user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 432 | 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | - 432 | 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | - 432 | 3 | Thu Nov 23 17:18:51.048758 2017 | 1 | 5 | 5 | - 432 | 3 | Thu Nov 23 17:10:35.959913 2017 | 4 | 3 | 1 | - 432 | 5 | Thu Nov 23 16:48:32.08896 2017 | 5 | 2 | 1 | + 432 | 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | + 432 | 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | + 432 | 3 | Thu Nov 23 17:18:51.048758 2017 | 1 | 5 | 5 | + 432 | 3 | Thu Nov 23 17:10:35.959913 2017 | 4 | 3 | 1 | + 432 | 5 | Thu Nov 23 16:48:32.08896 2017 | 5 | 2 | 1 | (5 rows) -CREATE VIEW subquery_and_ctes_second AS -SELECT time, event_type, value_2, value_3 FROM +CREATE VIEW subquery_and_ctes_second AS +SELECT time, event_type, value_2, value_3 FROM ( WITH cte AS ( WITH local_cte AS ( @@ -428,17 +428,17 @@ SELECT time, event_type, value_2, value_3 FROM ) SELECT DISTINCT cte.user_id FROM users_table, cte - WHERE + WHERE users_table.user_id = cte.user_id AND - users_table.user_id IN + users_table.user_id IN (WITH cte_in_where AS (SELECT DISTINCT value_2 FROM users_table WHERE value_1 >= 1 AND value_1 <= 20 ORDER BY 1 LIMIT 5) SELECT * FROM cte_in_where) ORDER BY 1 DESC - ) as foo, - events_table - WHERE + ) as foo, + events_table + WHERE foo.user_id = events_table.value_2; SELECT * FROM subquery_and_ctes_second -ORDER BY 3 DESC, 2 DESC, 1 DESC +ORDER BY 3 DESC, 2 DESC, 1 DESC LIMIT 5; DEBUG: generating subplan 38_1 for CTE cte: WITH local_cte AS (SELECT users_table_local.user_id, users_table_local."time", users_table_local.value_1, users_table_local.value_2, users_table_local.value_3, users_table_local.value_4 FROM subquery_view.users_table_local), dist_cte AS (SELECT events_table.user_id FROM public.events_table) SELECT dist_cte.user_id FROM (local_cte JOIN dist_cte ON ((dist_cte.user_id OPERATOR(pg_catalog.=) local_cte.user_id))) DEBUG: generating subplan 39_1 for CTE local_cte: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM subquery_view.users_table_local @@ -449,7 +449,7 @@ DEBUG: push down of limit count: 5 DEBUG: generating subplan 38_3 for subquery SELECT DISTINCT cte.user_id FROM public.users_table, (SELECT intermediate_result.user_id FROM read_intermediate_result('38_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte WHERE ((users_table.user_id OPERATOR(pg_catalog.=) cte.user_id) AND (users_table.user_id OPERATOR(pg_catalog.=) ANY (SELECT cte_in_where.value_2 FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('38_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) cte_in_where))) ORDER BY cte.user_id DESC DEBUG: Plan 38 query after replacing subqueries and CTEs: SELECT "time", event_type, value_2, value_3 FROM (SELECT events_table."time", events_table.event_type, events_table.value_2, events_table.value_3 FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('38_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, public.events_table WHERE (foo.user_id OPERATOR(pg_catalog.=) events_table.value_2)) subquery_and_ctes_second ORDER BY value_2 DESC, event_type DESC, "time" DESC LIMIT 5 DEBUG: push down of limit count: 5 - time | event_type | value_2 | value_3 + time | event_type | value_2 | value_3 --------------------------------------------------------------------- Thu Nov 23 21:54:46.924477 2017 | 6 | 4 | 5 Wed Nov 22 21:24:22.849224 2017 | 5 | 4 | 1 @@ -462,27 +462,27 @@ CREATE VIEW deep_subquery AS SELECT count(*) FROM ( - SELECT avg(min) FROM + SELECT avg(min) FROM ( SELECT min(users_table.value_1) FROM ( - SELECT avg(event_type) as avg_ev_type FROM + SELECT avg(event_type) as avg_ev_type FROM ( - SELECT - max(value_1) as mx_val_1 + SELECT + max(value_1) as mx_val_1 FROM ( - SELECT + SELECT avg(event_type) as avg FROM ( - SELECT - cnt - FROM + SELECT + cnt + FROM (SELECT count(*) as cnt, value_2 FROM users_table GROUP BY value_2) as level_1, users_table - WHERE + WHERE users_table.user_id = level_1.cnt ) as level_2, events_table - WHERE events_table.user_id = level_2.cnt + WHERE events_table.user_id = level_2.cnt GROUP BY level_2.cnt ) as level_3, users_table WHERE user_id = level_3.avg @@ -491,16 +491,16 @@ FROM WHERE level_4.mx_val_1 = events_table.user_id GROUP BY level_4.mx_val_1 ) as level_5, users_table - WHERE + WHERE level_5.avg_ev_type = users_table.user_id - GROUP BY + GROUP BY level_5.avg_ev_type ) as level_6, users_table WHERE users_table.user_id = level_6.min GROUP BY users_table.value_1 ) as bar; -SELECT - * -FROM +SELECT + * +FROM deep_subquery; DEBUG: generating subplan 43_1 for subquery SELECT count(*) AS cnt, value_2 FROM public.users_table GROUP BY value_2 DEBUG: generating subplan 43_2 for subquery SELECT avg(events_table.event_type) AS avg FROM (SELECT level_1.cnt FROM (SELECT intermediate_result.cnt, intermediate_result.value_2 FROM read_intermediate_result('43_1'::text, 'binary'::citus_copy_format) intermediate_result(cnt bigint, value_2 integer)) level_1, public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) level_1.cnt)) level_2, public.events_table WHERE (events_table.user_id OPERATOR(pg_catalog.=) level_2.cnt) GROUP BY level_2.cnt @@ -510,7 +510,7 @@ DEBUG: generating subplan 43_5 for subquery SELECT min(users_table.value_1) AS DEBUG: generating subplan 43_6 for subquery SELECT avg(level_6.min) AS avg FROM (SELECT intermediate_result.min FROM read_intermediate_result('43_5'::text, 'binary'::citus_copy_format) intermediate_result(min integer)) level_6, public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) level_6.min) GROUP BY users_table.value_1 DEBUG: generating subplan 43_7 for subquery SELECT count(*) AS count FROM (SELECT intermediate_result.avg FROM read_intermediate_result('43_6'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) bar DEBUG: Plan 43 query after replacing subqueries and CTEs: SELECT count FROM (SELECT intermediate_result.count FROM read_intermediate_result('43_7'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) deep_subquery - count + count --------------------------------------------------------------------- 0 (1 row) @@ -519,24 +519,24 @@ CREATE VIEW result_of_view_is_also_recursively_planned AS SELECT user_id FROM - (SELECT - DISTINCT users_table.user_id - FROM - users_table, events_table - WHERE - users_table.user_id = events_table.user_id AND + (SELECT + DISTINCT users_table.user_id + FROM + users_table, events_table + WHERE + users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4) ORDER BY 1 DESC LIMIT 5 ) as foo ORDER BY 1 DESC; -SELECT +SELECT * FROM - (SELECT - * + (SELECT + * FROM - result_of_view_is_also_recursively_planned, events_table - WHERE + result_of_view_is_also_recursively_planned, events_table + WHERE events_table.value_2 = result_of_view_is_also_recursively_planned.user_id ORDER BY time DESC LIMIT 5 @@ -548,13 +548,13 @@ DEBUG: generating subplan 51_1 for subquery SELECT DISTINCT users_table.user_id DEBUG: push down of limit count: 9 DEBUG: generating subplan 51_2 for subquery SELECT result_of_view_is_also_recursively_planned.user_id, events_table.user_id, events_table."time", events_table.event_type, events_table.value_2, events_table.value_3, events_table.value_4 FROM (SELECT foo.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('51_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo ORDER BY foo.user_id DESC) result_of_view_is_also_recursively_planned, public.events_table WHERE (events_table.value_2 OPERATOR(pg_catalog.=) result_of_view_is_also_recursively_planned.user_id) ORDER BY events_table."time" DESC OFFSET 4 LIMIT 5 DEBUG: Plan 51 query after replacing subqueries and CTEs: SELECT user_id, user_id_1 AS user_id, "time", event_type, value_2, value_3, value_4 FROM (SELECT intermediate_result.user_id, intermediate_result.user_id_1 AS user_id, intermediate_result."time", intermediate_result.event_type, intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4 FROM read_intermediate_result('51_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, user_id_1 integer, "time" timestamp without time zone, event_type integer, value_2 integer, value_3 double precision, value_4 bigint)) foo(user_id, user_id_1, "time", event_type, value_2, value_3, value_4) ORDER BY "time" DESC LIMIT 5 - user_id | user_id | time | event_type | value_2 | value_3 | value_4 + user_id | user_id | time | event_type | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 2 | 3 | Thu Nov 23 16:44:41.903713 2017 | 4 | 2 | 2 | - 2 | 5 | Thu Nov 23 16:11:02.929469 2017 | 4 | 2 | 0 | - 4 | 5 | Thu Nov 23 14:40:40.467511 2017 | 1 | 4 | 1 | - 3 | 2 | Thu Nov 23 14:02:47.738901 2017 | 1 | 3 | 2 | - 3 | 6 | Thu Nov 23 14:00:13.20013 2017 | 3 | 3 | 3 | + 2 | 3 | Thu Nov 23 16:44:41.903713 2017 | 4 | 2 | 2 | + 2 | 5 | Thu Nov 23 16:11:02.929469 2017 | 4 | 2 | 0 | + 4 | 5 | Thu Nov 23 14:40:40.467511 2017 | 1 | 4 | 1 | + 3 | 2 | Thu Nov 23 14:02:47.738901 2017 | 1 | 3 | 2 | + 3 | 6 | Thu Nov 23 14:00:13.20013 2017 | 3 | 3 | 3 | (5 rows) SET client_min_messages TO DEFAULT; diff --git a/src/test/regress/expected/task_tracker_assign_task.out b/src/test/regress/expected/task_tracker_assign_task.out index dcf336ed2..85085e848 100644 --- a/src/test/regress/expected/task_tracker_assign_task.out +++ b/src/test/regress/expected/task_tracker_assign_task.out @@ -14,55 +14,55 @@ SELECT task_tracker_assign_task(:JobId, :SimpleTaskId, 'COPY (SELECT * FROM lineitem) TO ' '''base/pgsql_job_cache/job_401010/task_101101'''); - task_tracker_assign_task + task_tracker_assign_task --------------------------------------------------------------------- - + (1 row) SELECT task_tracker_assign_task(:JobId, :RecoverableTaskId, :BadQueryString); - task_tracker_assign_task + task_tracker_assign_task --------------------------------------------------------------------- - + (1 row) -- After assigning the two tasks, we wait for them to make progress. Note that -- these tasks get scheduled and run asynchronously, so if the sleep interval is -- not enough, the regression tests may fail on an overloaded box. SELECT pg_sleep(3.0); - pg_sleep + pg_sleep --------------------------------------------------------------------- - + (1 row) SELECT task_tracker_task_status(:JobId, :SimpleTaskId); - task_tracker_task_status + task_tracker_task_status --------------------------------------------------------------------- 6 (1 row) SELECT task_tracker_task_status(:JobId, :RecoverableTaskId); - task_tracker_task_status + task_tracker_task_status --------------------------------------------------------------------- 5 (1 row) COPY :SimpleTaskTable FROM 'base/pgsql_job_cache/job_401010/task_101101'; SELECT COUNT(*) FROM :SimpleTaskTable; - count + count --------------------------------------------------------------------- 12000 (1 row) SELECT COUNT(*) AS diff_lhs FROM ( :SelectAll FROM :SimpleTaskTable EXCEPT ALL :SelectAll FROM lineitem ) diff; - diff_lhs + diff_lhs --------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) As diff_rhs FROM ( :SelectAll FROM lineitem EXCEPT ALL :SelectAll FROM :SimpleTaskTable ) diff; - diff_rhs + diff_rhs --------------------------------------------------------------------- 0 (1 row) @@ -70,19 +70,19 @@ SELECT COUNT(*) As diff_rhs FROM ( :SelectAll FROM lineitem EXCEPT ALL -- We now reassign the recoverable task with a good query string. This updates -- the task's query string, and reschedules the updated task for execution. SELECT task_tracker_assign_task(:JobId, :RecoverableTaskId, :GoodQueryString); - task_tracker_assign_task + task_tracker_assign_task --------------------------------------------------------------------- - + (1 row) SELECT pg_sleep(2.0); - pg_sleep + pg_sleep --------------------------------------------------------------------- - + (1 row) SELECT task_tracker_task_status(:JobId, :RecoverableTaskId); - task_tracker_task_status + task_tracker_task_status --------------------------------------------------------------------- 6 (1 row) diff --git a/src/test/regress/expected/task_tracker_cleanup_job.out b/src/test/regress/expected/task_tracker_cleanup_job.out index 112412a2f..8d44a9b1d 100644 --- a/src/test/regress/expected/task_tracker_cleanup_job.out +++ b/src/test/regress/expected/task_tracker_cleanup_job.out @@ -7,25 +7,25 @@ SET citus.next_shard_id TO 1060000; \set RunningTaskId 801108 -- Test worker_cleanup_job_schema_cache SELECT * FROM task_tracker_assign_task(2, 2, ''); - task_tracker_assign_task + task_tracker_assign_task --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_catalog.pg_namespace WHERE nspname = 'pg_merge_job_0002'; - count + count --------------------------------------------------------------------- 1 (1 row) SELECT worker_cleanup_job_schema_cache(); - worker_cleanup_job_schema_cache + worker_cleanup_job_schema_cache --------------------------------------------------------------------- - + (1 row) SELECT count(*) FROM pg_catalog.pg_namespace WHERE nspname = 'pg_merge_job_0002'; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -35,44 +35,44 @@ SELECT count(*) FROM pg_catalog.pg_namespace WHERE nspname = 'pg_merge_job_0002' SELECT task_tracker_assign_task(:JobId, :CompletedTaskId, 'COPY (SELECT * FROM lineitem) TO ' '''base/pgsql_job_cache/job_401010/task_801107'''); - task_tracker_assign_task + task_tracker_assign_task --------------------------------------------------------------------- - + (1 row) SELECT task_tracker_assign_task(:JobId, :RunningTaskId, 'SELECT pg_sleep(100)'); - task_tracker_assign_task + task_tracker_assign_task --------------------------------------------------------------------- - + (1 row) SELECT pg_sleep(2.0); - pg_sleep + pg_sleep --------------------------------------------------------------------- - + (1 row) SELECT task_tracker_task_status(:JobId, :CompletedTaskId); - task_tracker_task_status + task_tracker_task_status --------------------------------------------------------------------- 6 (1 row) SELECT task_tracker_task_status(:JobId, :RunningTaskId); - task_tracker_task_status + task_tracker_task_status --------------------------------------------------------------------- 3 (1 row) SELECT isdir FROM pg_stat_file('base/pgsql_job_cache/job_401010/task_801107'); - isdir + isdir --------------------------------------------------------------------- f (1 row) SELECT isdir FROM pg_stat_file('base/pgsql_job_cache/job_401010'); - isdir + isdir --------------------------------------------------------------------- t (1 row) @@ -80,15 +80,15 @@ SELECT isdir FROM pg_stat_file('base/pgsql_job_cache/job_401010'); -- We now clean up all tasks for this job id. As a result, shared hash entries, -- files, and connections associated with these tasks should all be cleaned up. SELECT task_tracker_cleanup_job(:JobId); - task_tracker_cleanup_job + task_tracker_cleanup_job --------------------------------------------------------------------- - + (1 row) SELECT pg_sleep(1.0); - pg_sleep + pg_sleep --------------------------------------------------------------------- - + (1 row) SELECT task_tracker_task_status(:JobId, :CompletedTaskId); diff --git a/src/test/regress/expected/task_tracker_partition_task.out b/src/test/regress/expected/task_tracker_partition_task.out index 6dbf0f740..0bc0b1389 100644 --- a/src/test/regress/expected/task_tracker_partition_task.out +++ b/src/test/regress/expected/task_tracker_partition_task.out @@ -22,19 +22,19 @@ SELECT task_tracker_assign_task(:JobId, :PartitionTaskId, 'SELECT worker_range_partition_table(' '401010, 801106, ''SELECT * FROM lineitem'', ' '''l_orderkey'', 20, ARRAY[1000, 3000]::_int8)'); - task_tracker_assign_task + task_tracker_assign_task --------------------------------------------------------------------- - + (1 row) SELECT pg_sleep(4.0); - pg_sleep + pg_sleep --------------------------------------------------------------------- - + (1 row) SELECT task_tracker_task_status(:JobId, :PartitionTaskId); - task_tracker_task_status + task_tracker_task_status --------------------------------------------------------------------- 6 (1 row) @@ -43,13 +43,13 @@ COPY :TablePart00 FROM :'Table_File_00'; COPY :TablePart01 FROM :'Table_File_01'; COPY :TablePart02 FROM :'Table_File_02'; SELECT COUNT(*) FROM :TablePart00; - count + count --------------------------------------------------------------------- 1004 (1 row) SELECT COUNT(*) FROM :TablePart02; - count + count --------------------------------------------------------------------- 8970 (1 row) @@ -59,7 +59,7 @@ SELECT COUNT(*) FROM :TablePart02; SELECT COUNT(*) AS diff_lhs_00 FROM ( :SelectAll FROM :TablePart00 EXCEPT ALL :SelectAll FROM lineitem WHERE :PartitionColumn < 1000 ) diff; - diff_lhs_00 + diff_lhs_00 --------------------------------------------------------------------- 0 (1 row) @@ -68,7 +68,7 @@ SELECT COUNT(*) AS diff_lhs_01 FROM ( :SelectAll FROM :TablePart01 EXCEPT ALL :SelectAll FROM lineitem WHERE :PartitionColumn >= 1000 AND :PartitionColumn < 3000 ) diff; - diff_lhs_01 + diff_lhs_01 --------------------------------------------------------------------- 0 (1 row) @@ -76,7 +76,7 @@ SELECT COUNT(*) AS diff_lhs_01 FROM ( SELECT COUNT(*) AS diff_lhs_02 FROM ( :SelectAll FROM :TablePart02 EXCEPT ALL :SelectAll FROM lineitem WHERE :PartitionColumn >= 3000 ) diff; - diff_lhs_02 + diff_lhs_02 --------------------------------------------------------------------- 0 (1 row) @@ -84,7 +84,7 @@ SELECT COUNT(*) AS diff_lhs_02 FROM ( SELECT COUNT(*) AS diff_rhs_00 FROM ( :SelectAll FROM lineitem WHERE :PartitionColumn < 1000 EXCEPT ALL :SelectAll FROM :TablePart00 ) diff; - diff_rhs_00 + diff_rhs_00 --------------------------------------------------------------------- 0 (1 row) @@ -93,7 +93,7 @@ SELECT COUNT(*) AS diff_rhs_01 FROM ( :SelectAll FROM lineitem WHERE :PartitionColumn >= 1000 AND :PartitionColumn < 3000 EXCEPT ALL :SelectAll FROM :TablePart01 ) diff; - diff_rhs_01 + diff_rhs_01 --------------------------------------------------------------------- 0 (1 row) @@ -101,7 +101,7 @@ SELECT COUNT(*) AS diff_rhs_01 FROM ( SELECT COUNT(*) AS diff_rhs_02 FROM ( :SelectAll FROM lineitem WHERE :PartitionColumn >= 3000 EXCEPT ALL :SelectAll FROM :TablePart02 ) diff; - diff_rhs_02 + diff_rhs_02 --------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/upgrade_basic_after.out b/src/test/regress/expected/upgrade_basic_after.out index e4c53db00..88e60c649 100644 --- a/src/test/regress/expected/upgrade_basic_after.out +++ b/src/test/regress/expected/upgrade_basic_after.out @@ -1,7 +1,7 @@ SET search_path TO upgrade_basic, public, pg_catalog; BEGIN; SELECT * FROM pg_indexes WHERE schemaname = 'upgrade_basic' ORDER BY tablename; - schemaname | tablename | indexname | tablespace | indexdef + schemaname | tablename | indexname | tablespace | indexdef --------------------------------------------------------------------- upgrade_basic | r | r_pkey | | CREATE UNIQUE INDEX r_pkey ON upgrade_basic.r USING btree (a) upgrade_basic | t | t_a_idx | | CREATE INDEX t_a_idx ON upgrade_basic.t USING hash (a) @@ -9,31 +9,31 @@ SELECT * FROM pg_indexes WHERE schemaname = 'upgrade_basic' ORDER BY tablename; (3 rows) SELECT nextval('pg_dist_shardid_seq') = MAX(shardid)+1 FROM pg_dist_shard; - ?column? + ?column? --------------------------------------------------------------------- t (1 row) SELECT nextval('pg_dist_placement_placementid_seq') = MAX(placementid)+1 FROM pg_dist_placement; - ?column? + ?column? --------------------------------------------------------------------- t (1 row) SELECT nextval('pg_dist_groupid_seq') = MAX(groupid)+1 FROM pg_dist_node; - ?column? + ?column? --------------------------------------------------------------------- t (1 row) SELECT nextval('pg_dist_node_nodeid_seq') = MAX(nodeid)+1 FROM pg_dist_node; - ?column? + ?column? --------------------------------------------------------------------- t (1 row) SELECT nextval('pg_dist_colocationid_seq') = MAX(colocationid)+1 FROM pg_dist_colocation; - ?column? + ?column? --------------------------------------------------------------------- t (1 row) @@ -50,7 +50,7 @@ SELECT sequence_name FROM information_schema.sequences 'pg_dist_node_nodeid_seq', 'pg_dist_colocationid_seq' ); - sequence_name + sequence_name --------------------------------------------------------------------- (0 rows) @@ -61,7 +61,7 @@ SELECT logicalrelid FROM pg_dist_partition refobjid=(select oid FROM pg_extension WHERE extname = 'citus') AND relnamespace='upgrade_basic'::regnamespace ORDER BY logicalrelid; - logicalrelid + logicalrelid --------------------------------------------------------------------- t tp @@ -79,7 +79,7 @@ SELECT tgrelid::regclass, tgfoid::regproc, tgisinternal, tgenabled, tgtype::int4 relnamespace='upgrade_basic'::regnamespace AND tgname LIKE 'truncate_trigger_%' ORDER BY tgrelid::regclass; - tgrelid | tgfoid | tgisinternal | tgenabled | tgtype + tgrelid | tgfoid | tgisinternal | tgenabled | tgtype --------------------------------------------------------------------- t | citus_truncate_trigger | t | O | 00100000 tp | citus_truncate_trigger | t | O | 00100000 @@ -90,7 +90,7 @@ SELECT tgrelid::regclass, tgfoid::regproc, tgisinternal, tgenabled, tgtype::int4 (6 rows) SELECT * FROM t ORDER BY a; - a + a --------------------------------------------------------------------- 1 2 @@ -100,14 +100,14 @@ SELECT * FROM t ORDER BY a; (5 rows) SELECT * FROM t WHERE a = 1; - a + a --------------------------------------------------------------------- 1 (1 row) INSERT INTO t SELECT * FROM generate_series(10, 15); EXPLAIN (COSTS FALSE) SELECT * from t; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 32 @@ -118,7 +118,7 @@ EXPLAIN (COSTS FALSE) SELECT * from t; (6 rows) EXPLAIN (COSTS FALSE) SELECT * from t WHERE a = 1; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Adaptive) Task Count: 1 @@ -132,13 +132,13 @@ EXPLAIN (COSTS FALSE) SELECT * from t WHERE a = 1; (9 rows) SELECT * FROM t WHERE a = 10; - a + a --------------------------------------------------------------------- 10 (1 row) SELECT * FROM t WHERE a = 11; - a + a --------------------------------------------------------------------- 11 (1 row) @@ -146,7 +146,7 @@ SELECT * FROM t WHERE a = 11; COPY t FROM PROGRAM 'echo 20 && echo 21 && echo 22 && echo 23 && echo 24' WITH CSV; ALTER TABLE t ADD COLUMN b int DEFAULT 10; SELECT * FROM t ORDER BY a; - a | b + a | b --------------------------------------------------------------------- 1 | 10 2 | 10 @@ -168,7 +168,7 @@ SELECT * FROM t ORDER BY a; TRUNCATE TABLE t; SELECT * FROM T; - a | b + a | b --------------------------------------------------------------------- (0 rows) @@ -176,7 +176,7 @@ DROP TABLE t; \d t -- verify that the table whose column is dropped before a pg_upgrade still works as expected. SELECT * FROM t_ab ORDER BY b; - b + b --------------------------------------------------------------------- 11 22 @@ -184,13 +184,13 @@ SELECT * FROM t_ab ORDER BY b; (3 rows) SELECT * FROM t_ab WHERE b = 11; - b + b --------------------------------------------------------------------- 11 (1 row) SELECT * FROM t_ab WHERE b = 22; - b + b --------------------------------------------------------------------- 22 (1 row) @@ -198,7 +198,7 @@ SELECT * FROM t_ab WHERE b = 22; -- Check that we can create a distributed table out of a table that was created -- before the upgrade SELECT * FROM t2 ORDER BY a; - a | b + a | b --------------------------------------------------------------------- 1 | 11 2 | 22 @@ -207,13 +207,13 @@ SELECT * FROM t2 ORDER BY a; SELECT create_distributed_table('t2', 'a'); NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT * FROM t2 ORDER BY a; - a | b + a | b --------------------------------------------------------------------- 1 | 11 2 | 22 @@ -224,7 +224,7 @@ ROLLBACK; BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT * FROM r ORDER BY a; - a + a --------------------------------------------------------------------- 1 2 @@ -234,7 +234,7 @@ SELECT * FROM r ORDER BY a; (5 rows) SELECT * FROM tr ORDER BY pk; - pk | a + pk | a --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -245,7 +245,7 @@ SELECT * FROM tr ORDER BY pk; DELETE FROM r where a = 1; SELECT * FROM r ORDER BY a; - a + a --------------------------------------------------------------------- 2 3 @@ -254,7 +254,7 @@ SELECT * FROM r ORDER BY a; (4 rows) SELECT * FROM tr ORDER BY pk; - pk | a + pk | a --------------------------------------------------------------------- 2 | 2 3 | 3 @@ -264,7 +264,7 @@ SELECT * FROM tr ORDER BY pk; UPDATE r SET a = 30 WHERE a = 3; SELECT * FROM r ORDER BY a; - a + a --------------------------------------------------------------------- 2 4 @@ -273,7 +273,7 @@ SELECT * FROM r ORDER BY a; (4 rows) SELECT * FROM tr ORDER BY pk; - pk | a + pk | a --------------------------------------------------------------------- 2 | 2 3 | 30 @@ -284,16 +284,16 @@ SELECT * FROM tr ORDER BY pk; -- Check we can still create distributed tables after upgrade CREATE TABLE t3(a int, b int); SELECT create_distributed_table('t3', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO t3 VALUES (1, 11); INSERT INTO t3 VALUES (2, 22); INSERT INTO t3 VALUES (3, 33); SELECT * FROM t3 ORDER BY a; - a | b + a | b --------------------------------------------------------------------- 1 | 11 2 | 22 @@ -303,14 +303,14 @@ SELECT * FROM t3 ORDER BY a; SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE logicalrelid = 't_append'::regclass ORDER BY shardminvalue, shardmaxvalue; - shardminvalue | shardmaxvalue + shardminvalue | shardmaxvalue --------------------------------------------------------------------- 1 | 3 5 | 7 (2 rows) SELECT * FROM t_append ORDER BY id; - id | value_1 + id | value_1 --------------------------------------------------------------------- 1 | 2 2 | 3 @@ -324,7 +324,7 @@ SELECT * FROM t_append ORDER BY id; SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE logicalrelid = 't_append'::regclass ORDER BY shardminvalue, shardmaxvalue; - shardminvalue | shardmaxvalue + shardminvalue | shardmaxvalue --------------------------------------------------------------------- 1 | 3 5 | 7 @@ -332,7 +332,7 @@ SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard (3 rows) SELECT * FROM t_append ORDER BY id; - id | value_1 + id | value_1 --------------------------------------------------------------------- 1 | 2 2 | 3 diff --git a/src/test/regress/expected/upgrade_basic_before.out b/src/test/regress/expected/upgrade_basic_before.out index 5a81db535..1555d4f81 100644 --- a/src/test/regress/expected/upgrade_basic_before.out +++ b/src/test/regress/expected/upgrade_basic_before.out @@ -3,17 +3,17 @@ SET search_path TO upgrade_basic, public; CREATE TABLE t(a int); CREATE INDEX ON t USING HASH (a); SELECT create_distributed_table('t', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO t SELECT * FROM generate_series(1, 5); CREATE TABLE tp(a int PRIMARY KEY); SELECT create_distributed_table('tp', 'a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO tp SELECT * FROM generate_series(1, 5); @@ -22,9 +22,9 @@ INSERT INTO tp SELECT * FROM generate_series(1, 5); -- distribution column. The index information is in partkey column of pg_dist_partition table. CREATE TABLE t_ab(a int, b int); SELECT create_distributed_table('t_ab', 'b'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO t_ab VALUES (1, 11); @@ -38,25 +38,25 @@ ALTER TABLE t_ab DROP a; -- Check that basic reference tables work CREATE TABLE r(a int PRIMARY KEY); SELECT create_reference_table('r'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) INSERT INTO r SELECT * FROM generate_series(1, 5); CREATE TABLE tr(pk int, a int REFERENCES r(a) ON DELETE CASCADE ON UPDATE CASCADE); SELECT create_distributed_table('tr', 'pk'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO tr SELECT c, c FROM generate_series(1, 5) as c; CREATE TABLE t_append(id int, value_1 int); SELECT master_create_distributed_table('t_append', 'id', 'append'); - master_create_distributed_table + master_create_distributed_table --------------------------------------------------------------------- - + (1 row) \copy t_append FROM STDIN DELIMITER ',' diff --git a/src/test/regress/expected/upgrade_distributed_function_after.out b/src/test/regress/expected/upgrade_distributed_function_after.out index b1515de0a..9b11d029d 100644 --- a/src/test/regress/expected/upgrade_distributed_function_after.out +++ b/src/test/regress/expected/upgrade_distributed_function_after.out @@ -5,7 +5,7 @@ SELECT bool_and(metadatasynced) FROM pg_dist_node WHERE isactive AND noderole = --------------------------------------------------------------------- t (1 row) - + SET client_min_messages TO DEBUG1; -- these are simple select functions, so doesn't have any -- side effects, safe to be called without BEGIN;..;ROLLBACK; @@ -15,7 +15,7 @@ DEBUG: pushing down the function call --------------------------------------------------------------------- 1 (1 row) - + SELECT count_values(12); DEBUG: pushing down the function call count_values diff --git a/src/test/regress/expected/upgrade_distributed_function_before.out b/src/test/regress/expected/upgrade_distributed_function_before.out index a5639468a..fb6440dfa 100644 --- a/src/test/regress/expected/upgrade_distributed_function_before.out +++ b/src/test/regress/expected/upgrade_distributed_function_before.out @@ -4,9 +4,9 @@ SET citus.replication_model TO streaming; SET citus.shard_replication_factor TO 1; CREATE TABLE t1 (a int PRIMARY KEY, b int); SELECT create_distributed_table('t1','a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO t1 VALUES (11), (12); @@ -21,20 +21,20 @@ $$ END; $$ LANGUAGE plpgsql; SELECT create_distributed_function('count_values(int)', '$1', colocate_with:='t1'); - create_distributed_function + create_distributed_function --------------------------------------------------------------------- - + (1 row) -- make sure that the metadata synced before running the queries SELECT wait_until_metadata_sync(); - wait_until_metadata_sync + wait_until_metadata_sync --------------------------------------------------------------------- - + (1 row) SELECT bool_and(metadatasynced) FROM pg_dist_node WHERE isactive AND noderole = 'primary'; - bool_and + bool_and --------------------------------------------------------------------- t (1 row) @@ -42,14 +42,14 @@ SELECT bool_and(metadatasynced) FROM pg_dist_node WHERE isactive AND noderole = SET client_min_messages TO DEBUG1; SELECT count_values(11); DEBUG: pushing down the function call - count_values + count_values --------------------------------------------------------------------- 1 (1 row) SELECT count_values(12); DEBUG: pushing down the function call - count_values + count_values --------------------------------------------------------------------- 1 (1 row) diff --git a/src/test/regress/expected/upgrade_rebalance_strategy_after.out b/src/test/regress/expected/upgrade_rebalance_strategy_after.out index 51795030d..36dd71b6c 100644 --- a/src/test/regress/expected/upgrade_rebalance_strategy_after.out +++ b/src/test/regress/expected/upgrade_rebalance_strategy_after.out @@ -1,5 +1,5 @@ SELECT * FROM pg_catalog.pg_dist_rebalance_strategy ORDER BY name; - name | default_strategy | shard_cost_function | node_capacity_function | shard_allowed_on_node_function | default_threshold | minimum_threshold + name | default_strategy | shard_cost_function | node_capacity_function | shard_allowed_on_node_function | default_threshold | minimum_threshold --------------------------------------------------------------------- by_disk_size | f | citus_shard_cost_by_disk_size | citus_node_capacity_1 | citus_shard_allowed_on_node_true | 0.1 | 0.01 by_shard_count | f | citus_shard_cost_1 | citus_node_capacity_1 | citus_shard_allowed_on_node_true | 0 | 0 diff --git a/src/test/regress/expected/upgrade_rebalance_strategy_before.out b/src/test/regress/expected/upgrade_rebalance_strategy_before.out index 65808f9e6..0a12b1d60 100644 --- a/src/test/regress/expected/upgrade_rebalance_strategy_before.out +++ b/src/test/regress/expected/upgrade_rebalance_strategy_before.out @@ -24,15 +24,15 @@ SELECT citus_add_rebalance_strategy( 0.5, 0.2 ); - citus_add_rebalance_strategy + citus_add_rebalance_strategy --------------------------------------------------------------------- - + (1 row) SELECT citus_set_default_rebalance_strategy('custom_strategy'); - citus_set_default_rebalance_strategy + citus_set_default_rebalance_strategy --------------------------------------------------------------------- - + (1 row) ALTER TABLE pg_catalog.pg_dist_rebalance_strategy ENABLE TRIGGER pg_dist_rebalance_strategy_enterprise_check_trigger; diff --git a/src/test/regress/expected/upgrade_ref2ref_after.out b/src/test/regress/expected/upgrade_ref2ref_after.out index b00a021ef..ec8dc5d92 100644 --- a/src/test/regress/expected/upgrade_ref2ref_after.out +++ b/src/test/regress/expected/upgrade_ref2ref_after.out @@ -2,7 +2,7 @@ SET search_path TO upgrade_ref2ref, public; BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SELECT * FROM ref_table_1 ORDER BY id; - id | value + id | value --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -12,7 +12,7 @@ SELECT * FROM ref_table_1 ORDER BY id; (5 rows) SELECT * FROM ref_table_2 ORDER BY id; - id | value + id | value --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -22,7 +22,7 @@ SELECT * FROM ref_table_2 ORDER BY id; (5 rows) SELECT * FROM ref_table_3 ORDER BY id; - id | value + id | value --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -32,7 +32,7 @@ SELECT * FROM ref_table_3 ORDER BY id; (5 rows) SELECT * FROM dist_table ORDER BY id; - id | value + id | value --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -43,7 +43,7 @@ SELECT * FROM dist_table ORDER BY id; UPDATE ref_table_1 SET id = 10 where id = 1; SELECT * FROM ref_table_1 ORDER BY id; - id | value + id | value --------------------------------------------------------------------- 2 | 2 3 | 3 @@ -53,7 +53,7 @@ SELECT * FROM ref_table_1 ORDER BY id; (5 rows) SELECT * FROM ref_table_2 ORDER BY id; - id | value + id | value --------------------------------------------------------------------- 1 | 10 2 | 2 @@ -63,7 +63,7 @@ SELECT * FROM ref_table_2 ORDER BY id; (5 rows) SELECT * FROM ref_table_3 ORDER BY id; - id | value + id | value --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -73,7 +73,7 @@ SELECT * FROM ref_table_3 ORDER BY id; (5 rows) SELECT * FROM dist_table ORDER BY id; - id | value + id | value --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -84,7 +84,7 @@ SELECT * FROM dist_table ORDER BY id; DELETE FROM ref_table_1 WHERE id = 4; SELECT * FROM ref_table_1 ORDER BY id; - id | value + id | value --------------------------------------------------------------------- 2 | 2 3 | 3 @@ -93,7 +93,7 @@ SELECT * FROM ref_table_1 ORDER BY id; (4 rows) SELECT * FROM ref_table_2 ORDER BY id; - id | value + id | value --------------------------------------------------------------------- 1 | 10 2 | 2 @@ -102,7 +102,7 @@ SELECT * FROM ref_table_2 ORDER BY id; (4 rows) SELECT * FROM ref_table_3 ORDER BY id; - id | value + id | value --------------------------------------------------------------------- 1 | 1 2 | 2 @@ -111,7 +111,7 @@ SELECT * FROM ref_table_3 ORDER BY id; (4 rows) SELECT * FROM dist_table ORDER BY id; - id | value + id | value --------------------------------------------------------------------- 1 | 1 2 | 2 diff --git a/src/test/regress/expected/upgrade_ref2ref_before.out b/src/test/regress/expected/upgrade_ref2ref_before.out index 9b8faf074..ea91f9566 100644 --- a/src/test/regress/expected/upgrade_ref2ref_before.out +++ b/src/test/regress/expected/upgrade_ref2ref_before.out @@ -2,30 +2,30 @@ CREATE SCHEMA upgrade_ref2ref; SET search_path TO upgrade_ref2ref, public; CREATE TABLE ref_table_1(id int PRIMARY KEY, value int); SELECT create_reference_table('ref_table_1'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE ref_table_2(id int PRIMARY KEY, value int REFERENCES ref_table_1(id) ON DELETE CASCADE ON UPDATE CASCADE); SELECT create_reference_table('ref_table_2'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE ref_table_3(id int PRIMARY KEY, value int REFERENCES ref_table_2(id) ON DELETE CASCADE ON UPDATE CASCADE); SELECT create_reference_table('ref_table_3'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE dist_table(id int PRIMARY KEY, value int REFERENCES ref_table_2(id) ON DELETE CASCADE ON UPDATE CASCADE); SELECT create_distributed_table('dist_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO ref_table_1 SELECT c, c FROM generate_series(1, 5) as c; diff --git a/src/test/regress/expected/upgrade_type_after.out b/src/test/regress/expected/upgrade_type_after.out index 6d6eb5496..a9a96c490 100644 --- a/src/test/regress/expected/upgrade_type_after.out +++ b/src/test/regress/expected/upgrade_type_after.out @@ -4,7 +4,7 @@ SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; -- test distributed type INSERT INTO tt VALUES (1, (2,3)::type1); SELECT * FROM tt; - a | b + a | b --------------------------------------------------------------------- 1 | (2,3) 2 | (3,4) diff --git a/src/test/regress/expected/upgrade_type_before.out b/src/test/regress/expected/upgrade_type_before.out index 50bf00ddd..97b613748 100644 --- a/src/test/regress/expected/upgrade_type_before.out +++ b/src/test/regress/expected/upgrade_type_before.out @@ -3,9 +3,9 @@ SET search_path TO upgrade_type, public; CREATE TYPE type1 AS (a int, b int); CREATE TABLE tt (a int PRIMARY KEY, b type1); SELECT create_distributed_table('tt','a'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO tt VALUES (2, (3,4)::type1); diff --git a/src/test/regress/expected/validate_constraint.out b/src/test/regress/expected/validate_constraint.out index d52341f3f..a3f75a2c5 100644 --- a/src/test/regress/expected/validate_constraint.out +++ b/src/test/regress/expected/validate_constraint.out @@ -55,23 +55,23 @@ CREATE VIEW constraint_validations AS AND contype = 'c'; CREATE TABLE referenced_table (id int UNIQUE, test_column int); SELECT create_reference_table('referenced_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE referencing_table (id int, ref_id int); SELECT create_distributed_table('referencing_table', 'ref_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE constrained_table (id int, constrained_column int); SELECT create_distributed_table('constrained_table', 'constrained_column'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- The two constraint types that are allowed to be NOT VALID @@ -111,7 +111,7 @@ ALTER TABLE constrained_table SELECT * FROM constraint_validations ORDER BY 1, 2; - Constraint | Validated? + Constraint | Validated? --------------------------------------------------------------------- validatable_constraint | t (1 row) @@ -119,7 +119,7 @@ ORDER BY 1, 2; SELECT * FROM constraint_validations_in_workers ORDER BY 1, 2; - name | validated + name | validated --------------------------------------------------------------------- validatable_constraint_8000009 | t validatable_constraint_8000010 | t diff --git a/src/test/regress/expected/window_functions.out b/src/test/regress/expected/window_functions.out index 3f838a4cc..bc5d45914 100644 --- a/src/test/regress/expected/window_functions.out +++ b/src/test/regress/expected/window_functions.out @@ -11,7 +11,7 @@ FROM ORDER BY 1 DESC, 2 DESC, 3 DESC LIMIT 5; - user_id | count | rank + user_id | count | rank --------------------------------------------------------------------- 6 | 10 | 1 6 | 10 | 1 @@ -30,7 +30,7 @@ GROUP BY 1 ORDER BY 2 DESC NULLS LAST, 1 DESC; - user_id | avg + user_id | avg --------------------------------------------------------------------- 2 | 3 4 | 2.82608695652174 @@ -57,7 +57,7 @@ GROUP BY 1, value_1 ORDER BY 2 DESC, 1; - user_id | max + user_id | max --------------------------------------------------------------------- 1 | 5 3 | 5 @@ -110,7 +110,7 @@ ORDER BY 1, 2 LIMIT 5; - user_id | sum + user_id | sum --------------------------------------------------------------------- 1 | 13 1 | 13 @@ -134,7 +134,7 @@ GROUP BY ORDER BY 3 DESC, 2 DESC, 1 DESC LIMIT 5; - user_id | value_1 | sum + user_id | value_1 | sum --------------------------------------------------------------------- 5 | 5 | 15 4 | 5 | 15 @@ -159,7 +159,7 @@ FROM ORDER BY 2 DESC, 1 LIMIT 10; - user_id | rank + user_id | rank --------------------------------------------------------------------- 5 | 6 2 | 5 @@ -185,7 +185,7 @@ GROUP BY HAVING count(*) > 4 ORDER BY 2 DESC, 1; - user_id | rank + user_id | rank --------------------------------------------------------------------- 4 | 2 5 | 2 @@ -208,7 +208,7 @@ WINDOW ORDER BY rnk DESC, 1 DESC LIMIT 10; - user_id | rnk + user_id | rnk --------------------------------------------------------------------- 3 | 121 5 | 118 @@ -234,7 +234,7 @@ WINDOW ORDER BY rnk DESC, 1 DESC LIMIT 10; - user_id | rnk + user_id | rnk --------------------------------------------------------------------- 2 | 24 2 | 23 @@ -274,7 +274,7 @@ WINDOW my_win AS (PARTITION BY user_id ORDER BY avg(event_type) DESC) ORDER BY 3 DESC, 2 DESC, 1 DESC; - user_id | rnk | avg_val_2 + user_id | rnk | avg_val_2 --------------------------------------------------------------------- 1 | 1 | 3.3750000000000000 3 | 2 | 3.1666666666666667 @@ -313,7 +313,7 @@ WINDOW ORDER BY cnt_with_filter_2 DESC NULLS LAST, filtered_count DESC NULLS LAST, datee DESC NULLS LAST, rnnk DESC, cnt2 DESC, cnt1 DESC, user_id DESC LIMIT 5; - count | cnt1 | cnt2 | datee | rnnk | filtered_count | cnt_with_filter_2 + count | cnt1 | cnt2 | datee | rnnk | filtered_count | cnt_with_filter_2 --------------------------------------------------------------------- 23 | 1 | 7 | Thu Nov 23 02:14:00 2017 | 6 | 0.00000000000000000000 | 72.7272727272727 10 | 1 | 3 | Wed Nov 22 23:01:00 2017 | 1 | 1.00000000000000000000 | 57.1428571428571 @@ -341,7 +341,7 @@ ORDER BY mx_time DESC, my_rank DESC, user_id DESC; - user_id | my_rank | avg | mx_time + user_id | my_rank | avg | mx_time --------------------------------------------------------------------- 6 | 1 | 3.0000000000000000 | Thu Nov 23 14:00:13.20013 2017 6 | 2 | 3.0000000000000000 | Thu Nov 23 11:16:13.106691 2017 @@ -394,7 +394,7 @@ GROUP BY 1 ORDER BY 4 DESC,3 DESC,2 DESC ,1 DESC; - user_id | rank | dense_rank | cume_dist | percent_rank + user_id | rank | dense_rank | cume_dist | percent_rank --------------------------------------------------------------------- 6 | 1 | 1 | 1 | 0 5 | 1 | 1 | 1 | 0 @@ -416,9 +416,9 @@ WHERE user_id > 2 AND user_id < 6 ORDER BY user_id, value_1, 3, 4; - user_id | value_1 | array_agg | array_agg + user_id | value_1 | array_agg | array_agg --------------------------------------------------------------------- - 3 | 0 | {0} | + 3 | 0 | {0} | 3 | 1 | {0,1,1,1,1,1,1} | {0,1,1,1,1,1} 3 | 1 | {0,1,1,1,1,1,1} | {0,1,1,1,1,1} 3 | 1 | {0,1,1,1,1,1,1} | {0,1,1,1,1,1} @@ -501,7 +501,7 @@ WINDOW range_window_exclude as (PARTITION BY user_id ORDER BY value_1 RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING EXCLUDE CURRENT ROW) ORDER BY user_id, value_1, 3, 4; - user_id | value_1 | array_agg | array_agg + user_id | value_1 | array_agg | array_agg --------------------------------------------------------------------- 3 | 0 | {0,1,1,1,1,1,1} | {1,1,1,1,1,1} 3 | 1 | {0,1,1,1,1,1,1,2,2} | {0,1,1,1,1,1,2,2} @@ -586,7 +586,7 @@ WINDOW row_window_exclude as (PARTITION BY user_id ORDER BY value_1 ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING EXCLUDE CURRENT ROW) ORDER BY user_id, value_1, 3, 4; - user_id | value_1 | array_agg | array_agg + user_id | value_1 | array_agg | array_agg --------------------------------------------------------------------- 3 | 0 | {0,1} | {1} 3 | 1 | {0,1,1} | {0,1} @@ -669,7 +669,7 @@ ORDER BY 2 DESC, 3 DESC, 1 DESC LIMIT 5; - user_id | sum | event_type + user_id | sum | event_type --------------------------------------------------------------------- 4 | 4 | 4 3 | 4 | 4 @@ -690,7 +690,7 @@ ORDER BY 2 DESC, 1 LIMIT 10; - user_id | sum + user_id | sum --------------------------------------------------------------------- 5 | 3 4 | 2 @@ -707,7 +707,7 @@ ORDER BY 1, 2 DESC LIMIT 10; - user_id | sum + user_id | sum --------------------------------------------------------------------- 4 | 2 5 | 3 @@ -724,7 +724,7 @@ ORDER BY (SUM(value_1) OVER (PARTITION BY user_id)) , 2 DESC, 1 LIMIT 10; - user_id | sum + user_id | sum --------------------------------------------------------------------- 5 | 3 4 | 2 @@ -741,7 +741,7 @@ GROUP BY 1 ORDER BY 3 DESC, 2 DESC, 1 DESC; - user_id | avg | avg + user_id | avg | avg --------------------------------------------------------------------- 6 | 2.1000000000000000 | 6.0000000000000000 5 | 2.6538461538461538 | 5.0000000000000000 @@ -764,7 +764,7 @@ GROUP BY ORDER BY 3 DESC, 2 DESC, 1 DESC; $Q$); - coordinator_plan + coordinator_plan --------------------------------------------------------------------- Sort Sort Key: remote_scan.avg_1 DESC, remote_scan.avg DESC, remote_scan.user_id DESC @@ -784,7 +784,7 @@ GROUP BY user_id, value_2 ORDER BY user_id, value_2; - user_id | ?column? | ?column? + user_id | ?column? | ?column? --------------------------------------------------------------------- 1 | 5 | 3.2500000000000000 1 | 4 | 3.2500000000000000 @@ -831,7 +831,7 @@ GROUP BY ORDER BY 2 DESC, 1 LIMIT 5; - user_id | ?column? | ?column? + user_id | ?column? | ?column? --------------------------------------------------------------------- 4 | 28 | 3.5000000000000000 5 | 24 | 3.5000000000000000 @@ -849,7 +849,7 @@ FROM users_table GROUP BY user_id, value_2 ORDER BY user_id, value_2 DESC; - user_id | avg | rank + user_id | avg | rank --------------------------------------------------------------------- 1 | 3.6666666666666667 | 4 1 | 2.5000000000000000 | 3 @@ -894,7 +894,7 @@ FROM users_table GROUP BY user_id, value_2 ORDER BY user_id, avg(value_1) DESC; - user_id | avg | rank + user_id | avg | rank --------------------------------------------------------------------- 1 | 4.0000000000000000 | 1 1 | 3.6666666666666667 | 2 @@ -939,7 +939,7 @@ FROM users_table GROUP BY user_id, value_2 ORDER BY user_id, avg(value_1) DESC; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Sort Sort Key: remote_scan.user_id, ((pg_catalog.sum(remote_scan.avg) / pg_catalog.sum(remote_scan.avg_1))) DESC @@ -967,7 +967,7 @@ FROM users_table GROUP BY user_id, value_2 ORDER BY user_id, avg(value_1) DESC; - user_id | avg | rank + user_id | avg | rank --------------------------------------------------------------------- 1 | 4.0000000000000000 | 1 1 | 3.6666666666666667 | 2 @@ -1014,7 +1014,7 @@ FROM GROUP BY user_id, value_2 ORDER BY user_id, avg(value_1) DESC LIMIT 5; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit -> Sort @@ -1044,7 +1044,7 @@ FROM GROUP BY user_id, value_2 ORDER BY user_id, avg(value_1) DESC LIMIT 5; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit -> Sort @@ -1074,7 +1074,7 @@ FROM GROUP BY user_id, value_2 ORDER BY user_id, avg(value_1) DESC LIMIT 5; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit -> Sort @@ -1104,7 +1104,7 @@ FROM GROUP BY user_id, value_2 ORDER BY user_id, avg(value_1) DESC LIMIT 5; - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Limit -> Sort diff --git a/src/test/regress/expected/with_basics.out b/src/test/regress/expected/with_basics.out index d821991b4..8a6df96a5 100644 --- a/src/test/regress/expected/with_basics.out +++ b/src/test/regress/expected/with_basics.out @@ -6,7 +6,7 @@ WITH cte AS ( SELECT user_id, value_2 from users_table WHERE user_id IN (1, 2) ORDER BY 1,2 LIMIT 5 ) SELECT * FROM cte; - user_id | value_2 + user_id | value_2 --------------------------------------------------------------------- 1 | 0 1 | 2 @@ -29,7 +29,7 @@ ORDER BY value_2 LIMIT 5; - value_2 + value_2 --------------------------------------------------------------------- 0 0 @@ -49,7 +49,7 @@ WITH cte_1 AS ( SELECT user_id FROM cte_1_2 ORDER BY user_id ) SELECT value_2 FROM users_table WHERE user_id IN (SELECT user_id FROM cte_1) ORDER BY value_2 LIMIT 1; - value_2 + value_2 --------------------------------------------------------------------- 0 (1 row) @@ -71,7 +71,7 @@ ORDER BY 1, 2 LIMIT 5; - max | value_2 + max | value_2 --------------------------------------------------------------------- 5 | 5 6 | 5 @@ -87,7 +87,7 @@ SELECT user_id FROM ( ) SELECT user_id FROM cte WHERE value_2 > 0 ) a ORDER BY 1 LIMIT 3; - user_id + user_id --------------------------------------------------------------------- 2 2 @@ -115,7 +115,7 @@ WITH cte AS ( SELECT user_id FROM users_table WHERE value_2 IN (1, 2) ) SELECT (SELECT * FROM cte ORDER BY 1 LIMIT 1); - user_id + user_id --------------------------------------------------------------------- 1 (1 row) @@ -134,7 +134,7 @@ GROUP BY ORDER BY 1, 2 LIMIT 5; - user_id | count + user_id | count --------------------------------------------------------------------- 1 | 7 2 | 18 @@ -160,7 +160,7 @@ HAVING ORDER BY 1, 2 LIMIT 5; - twice | min + twice | min --------------------------------------------------------------------- 6 | 3 8 | 4 @@ -182,7 +182,7 @@ ORDER BY user_id LIMIT 1; - user_id + user_id --------------------------------------------------------------------- 2 (1 row) @@ -201,7 +201,7 @@ ORDER BY user_id LIMIT 1; - user_id + user_id --------------------------------------------------------------------- 2 (1 row) @@ -216,7 +216,7 @@ FROM (SELECT min(user_id) AS user_id FROM top_users) top_users JOIN users_table USING (user_id); - user_id + user_id --------------------------------------------------------------------- 6 6 @@ -235,7 +235,7 @@ WITH top_ten(id, val1) AS ( SELECT user_id, value_1 FROM users_table ORDER BY value_1 DESC, user_id DESC LIMIT 10 ) SELECT * FROM top_ten; - id | val1 + id | val1 --------------------------------------------------------------------- 6 | 5 6 | 5 @@ -260,7 +260,7 @@ WITH top_ten(id) AS ( SELECT user_id, value_1 FROM users_table ORDER BY value_1 DESC, user_id DESC LIMIT 10 ) SELECT * FROM top_ten ORDER BY value_1 DESC; - id | value_1 + id | value_1 --------------------------------------------------------------------- 6 | 5 6 | 5 @@ -279,7 +279,7 @@ WITH top_ten(id, val, val_mul, val_sum) AS ( SELECT user_id, value_1, value_1*2, value_1 + value_2 FROM users_table ORDER BY value_1 DESC, user_id DESC, value_2 DESC LIMIT 10 ) SELECT * FROM top_ten ORDER BY id DESC, val_mul DESC, (val_sum + 1) DESC; - id | val | val_mul | val_sum + id | val | val_mul | val_sum --------------------------------------------------------------------- 6 | 5 | 10 | 7 6 | 5 | 10 | 5 @@ -298,7 +298,7 @@ WITH top_ten(id, val, val_mul, val_sum) AS ( SELECT user_id, value_1, value_1*2, value_1 + value_2 FROM users_table ORDER BY value_1 DESC, value_2 DESC, user_id DESC LIMIT 10 ) SELECT id, val, id * val, val_sum * 2, val_sum + val_sum FROM top_ten ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC; - id | val | ?column? | ?column? | ?column? + id | val | ?column? | ?column? | ?column? --------------------------------------------------------------------- 6 | 5 | 30 | 14 | 14 6 | 5 | 30 | 10 | 10 @@ -319,7 +319,7 @@ WITH top_ten(id, val, val_mul, val_sum) AS ( SELECT id, count(*), avg(val), max(val_mul), min(val_sum) FROM top_ten GROUP BY id ORDER BY 2 DESC, 1 DESC; - id | count | avg | max | min + id | count | avg | max | min --------------------------------------------------------------------- 5 | 26 | 2.6538461538461538 | 10 | 2 4 | 23 | 2.7391304347826087 | 10 | 0 @@ -343,7 +343,7 @@ ORDER BY user_id LIMIT 5; - user_id + user_id --------------------------------------------------------------------- 6 6 @@ -366,7 +366,7 @@ ORDER BY user_id LIMIT 5; - user_id + user_id --------------------------------------------------------------------- 6 6 @@ -389,7 +389,7 @@ ORDER BY user_id LIMIT 5; - user_id + user_id --------------------------------------------------------------------- 6 6 @@ -412,7 +412,7 @@ ORDER BY user_id LIMIT 5; - user_id + user_id --------------------------------------------------------------------- 6 6 @@ -438,7 +438,7 @@ ORDER BY user_id LIMIT 5; - user_id + user_id --------------------------------------------------------------------- 6 6 @@ -482,7 +482,7 @@ ORDER BY user_id LIMIT 5; - user_id + user_id --------------------------------------------------------------------- 1 1 @@ -520,7 +520,7 @@ ORDER BY user_id LIMIT 5; - user_id + user_id --------------------------------------------------------------------- 1 1 @@ -545,7 +545,7 @@ ORDER BY 2 DESC, 1 LIMIT 5; - user_id | sum + user_id | sum --------------------------------------------------------------------- 3 | 651 2 | 552 @@ -565,18 +565,18 @@ ORDER BY 1,2,3,4,5,6 LIMIT 10; - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 1 | Wed Nov 22 18:49:42.327403 2017 | 3 | 2 | 1 | - 1 | Wed Nov 22 19:03:01.772353 2017 | 4 | 1 | 2 | - 1 | Wed Nov 22 19:07:03.846437 2017 | 1 | 2 | 5 | - 1 | Wed Nov 22 20:56:21.122638 2017 | 2 | 4 | 4 | - 1 | Wed Nov 22 21:06:57.457147 2017 | 4 | 3 | 2 | - 1 | Wed Nov 22 21:47:04.188168 2017 | 4 | 2 | 0 | - 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | - 1 | Wed Nov 22 23:22:09.957743 2017 | 1 | 1 | 1 | - 1 | Thu Nov 23 00:42:37.237615 2017 | 2 | 4 | 3 | - 1 | Thu Nov 23 02:59:23.620864 2017 | 4 | 5 | 4 | + 1 | Wed Nov 22 18:49:42.327403 2017 | 3 | 2 | 1 | + 1 | Wed Nov 22 19:03:01.772353 2017 | 4 | 1 | 2 | + 1 | Wed Nov 22 19:07:03.846437 2017 | 1 | 2 | 5 | + 1 | Wed Nov 22 20:56:21.122638 2017 | 2 | 4 | 4 | + 1 | Wed Nov 22 21:06:57.457147 2017 | 4 | 3 | 2 | + 1 | Wed Nov 22 21:47:04.188168 2017 | 4 | 2 | 0 | + 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | + 1 | Wed Nov 22 23:22:09.957743 2017 | 1 | 1 | 1 | + 1 | Thu Nov 23 00:42:37.237615 2017 | 2 | 4 | 3 | + 1 | Thu Nov 23 02:59:23.620864 2017 | 4 | 5 | 4 | (10 rows) SELECT * FROM ( @@ -589,18 +589,18 @@ ORDER BY 1,2,3,4,5,6 LIMIT 10; - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 1 | Wed Nov 22 18:49:42.327403 2017 | 3 | 2 | 1 | - 1 | Wed Nov 22 19:03:01.772353 2017 | 4 | 1 | 2 | - 1 | Wed Nov 22 19:07:03.846437 2017 | 1 | 2 | 5 | - 1 | Wed Nov 22 20:56:21.122638 2017 | 2 | 4 | 4 | - 1 | Wed Nov 22 21:06:57.457147 2017 | 4 | 3 | 2 | - 1 | Wed Nov 22 21:47:04.188168 2017 | 4 | 2 | 0 | - 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | - 1 | Wed Nov 22 23:22:09.957743 2017 | 1 | 1 | 1 | - 1 | Thu Nov 23 00:42:37.237615 2017 | 2 | 4 | 3 | - 1 | Thu Nov 23 02:59:23.620864 2017 | 4 | 5 | 4 | + 1 | Wed Nov 22 18:49:42.327403 2017 | 3 | 2 | 1 | + 1 | Wed Nov 22 19:03:01.772353 2017 | 4 | 1 | 2 | + 1 | Wed Nov 22 19:07:03.846437 2017 | 1 | 2 | 5 | + 1 | Wed Nov 22 20:56:21.122638 2017 | 2 | 4 | 4 | + 1 | Wed Nov 22 21:06:57.457147 2017 | 4 | 3 | 2 | + 1 | Wed Nov 22 21:47:04.188168 2017 | 4 | 2 | 0 | + 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | + 1 | Wed Nov 22 23:22:09.957743 2017 | 1 | 1 | 1 | + 1 | Thu Nov 23 00:42:37.237615 2017 | 2 | 4 | 3 | + 1 | Thu Nov 23 02:59:23.620864 2017 | 4 | 5 | 4 | (10 rows) -- SELECT * FROM (SELECT * FROM cte UNION SELECT * FROM cte) a; should work @@ -615,13 +615,13 @@ ORDER BY 1,2,3,4,5,6 LIMIT 5; - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | - 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | + 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | + 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | (5 rows) WITH cte AS ( @@ -631,18 +631,18 @@ cte_2 AS ( SELECT * FROM users_table WHERE user_id IN (3, 4) ORDER BY 1,2,3 LIMIT 5 ) SELECT * FROM cte UNION ALL SELECT * FROM cte_2; - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | - 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | - 3 | Wed Nov 22 18:43:51.450263 2017 | 1 | 1 | 4 | - 3 | Wed Nov 22 20:43:31.008625 2017 | 1 | 3 | 2 | - 3 | Wed Nov 22 23:24:32.080584 2017 | 3 | 2 | 5 | - 3 | Thu Nov 23 00:15:45.610845 2017 | 1 | 1 | 4 | - 3 | Thu Nov 23 03:23:24.702501 2017 | 1 | 2 | 5 | + 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | + 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | + 3 | Wed Nov 22 18:43:51.450263 2017 | 1 | 1 | 4 | + 3 | Wed Nov 22 20:43:31.008625 2017 | 1 | 3 | 2 | + 3 | Wed Nov 22 23:24:32.080584 2017 | 3 | 2 | 5 | + 3 | Thu Nov 23 00:15:45.610845 2017 | 1 | 1 | 4 | + 3 | Thu Nov 23 03:23:24.702501 2017 | 1 | 2 | 5 | (10 rows) -- basic recursive CTE which should all error out @@ -733,7 +733,7 @@ WITH cte_user AS ( SELECT basic_view.user_id,events_table.value_2 FROM basic_view join events_table on (basic_view.user_id = events_table.user_id) ) SELECT user_id, sum(value_2) FROM cte_user GROUP BY 1 ORDER BY 1, 2; - user_id | sum + user_id | sum --------------------------------------------------------------------- 1 | 294 2 | 1026 @@ -744,7 +744,7 @@ SELECT user_id, sum(value_2) FROM cte_user GROUP BY 1 ORDER BY 1, 2; (6 rows) SELECT * FROM cte_view ORDER BY 1, 2 LIMIT 5; - user_id | value_1 + user_id | value_1 --------------------------------------------------------------------- 1 | 5 2 | 4 @@ -758,7 +758,7 @@ WITH cte_user_with_view AS SELECT * FROM cte_view WHERE user_id < 3 ) SELECT user_id, value_1 FROM cte_user_with_view ORDER BY 1, 2 LIMIT 10 OFFSET 2; - user_id | value_1 + user_id | value_1 --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/with_dml.out b/src/test/regress/expected/with_dml.out index 7fea4642c..620bb85b9 100644 --- a/src/test/regress/expected/with_dml.out +++ b/src/test/regress/expected/with_dml.out @@ -2,23 +2,23 @@ CREATE SCHEMA with_dml; SET search_path TO with_dml, public; CREATE TABLE with_dml.distributed_table (tenant_id text PRIMARY KEY, dept int); SELECT create_distributed_table('distributed_table', 'tenant_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE with_dml.second_distributed_table (tenant_id text, dept int); SELECT create_distributed_table('second_distributed_table', 'tenant_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE with_dml.reference_table (id text, name text); SELECT create_reference_table('reference_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) INSERT INTO distributed_table SELECT i::text, i % 10 FROM generate_series (0, 100) i; diff --git a/src/test/regress/expected/with_executors.out b/src/test/regress/expected/with_executors.out index 7ce963c2d..5c1fb8317 100644 --- a/src/test/regress/expected/with_executors.out +++ b/src/test/regress/expected/with_executors.out @@ -3,7 +3,7 @@ CREATE SCHEMA with_executors; SET search_path TO with_executors, public; SET citus.enable_repartition_joins TO on; CREATE TABLE with_executors.local_table (id int); -INSERT INTO local_table VALUES (0), (1), (2), (3), (4), (5), (6), (7), (8), (9), (10); +INSERT INTO local_table VALUES (0), (1), (2), (3), (4), (5), (6), (7), (8), (9), (10); -- CTEs should be able to use local queries WITH cte AS ( WITH local_cte AS ( @@ -15,7 +15,7 @@ WITH cte AS ( SELECT * FROM local_cte join dist_cte on dist_cte.user_id=local_cte.id ) SELECT count(*) FROM cte; - count + count --------------------------------------------------------------------- 101 (1 row) @@ -33,7 +33,7 @@ WITH cte AS ( SELECT * FROM merger_cte WHERE user_id IN (1, 2, 3) ) SELECT * FROM cte ORDER BY 1; - user_id + user_id --------------------------------------------------------------------- 1 2 @@ -49,32 +49,32 @@ WITH cte AS ( ) SELECT local_cte.id as id_1, local_cte_2.id as id_2 FROM local_cte,local_cte_2 ) -SELECT - * -FROM - cte -join - users_table -on - cte.id_1 = users_table.user_id -WHERE +SELECT + * +FROM + cte +join + users_table +on + cte.id_1 = users_table.user_id +WHERE cte.id_1 IN (3, 4, 5) ORDER BY 1,2,3,4,5,6,7 LIMIT 10; - id_1 | id_2 | user_id | time | value_1 | value_2 | value_3 | value_4 + id_1 | id_2 | user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 3 | 6 | 3 | Wed Nov 22 18:43:51.450263 2017 | 1 | 1 | 4 | - 3 | 6 | 3 | Wed Nov 22 20:43:31.008625 2017 | 1 | 3 | 2 | - 3 | 6 | 3 | Wed Nov 22 23:24:32.080584 2017 | 3 | 2 | 5 | - 3 | 6 | 3 | Thu Nov 23 00:15:45.610845 2017 | 1 | 1 | 4 | - 3 | 6 | 3 | Thu Nov 23 03:23:24.702501 2017 | 1 | 2 | 5 | - 3 | 6 | 3 | Thu Nov 23 03:52:32.008895 2017 | 4 | 2 | 0 | - 3 | 6 | 3 | Thu Nov 23 04:01:08.04806 2017 | 5 | 5 | 3 | - 3 | 6 | 3 | Thu Nov 23 05:01:44.885505 2017 | 3 | 5 | 4 | - 3 | 6 | 3 | Thu Nov 23 06:20:05.854857 2017 | 1 | 4 | 2 | - 3 | 6 | 3 | Thu Nov 23 09:57:41.540228 2017 | 2 | 2 | 3 | + 3 | 6 | 3 | Wed Nov 22 18:43:51.450263 2017 | 1 | 1 | 4 | + 3 | 6 | 3 | Wed Nov 22 20:43:31.008625 2017 | 1 | 3 | 2 | + 3 | 6 | 3 | Wed Nov 22 23:24:32.080584 2017 | 3 | 2 | 5 | + 3 | 6 | 3 | Thu Nov 23 00:15:45.610845 2017 | 1 | 1 | 4 | + 3 | 6 | 3 | Thu Nov 23 03:23:24.702501 2017 | 1 | 2 | 5 | + 3 | 6 | 3 | Thu Nov 23 03:52:32.008895 2017 | 4 | 2 | 0 | + 3 | 6 | 3 | Thu Nov 23 04:01:08.04806 2017 | 5 | 5 | 3 | + 3 | 6 | 3 | Thu Nov 23 05:01:44.885505 2017 | 3 | 5 | 4 | + 3 | 6 | 3 | Thu Nov 23 06:20:05.854857 2017 | 1 | 4 | 2 | + 3 | 6 | 3 | Thu Nov 23 09:57:41.540228 2017 | 2 | 2 | 3 | (10 rows) -- CTEs should be able to use router queries @@ -85,13 +85,13 @@ WITH cte AS ( router_cte_2 AS ( SELECT user_id, event_type, value_2 FROM events_table WHERE user_id = 1 ) - SELECT - router_cte.user_id as uid, event_type - FROM + SELECT + router_cte.user_id as uid, event_type + FROM router_cte, router_cte_2 ) SELECT * FROM cte ORDER BY 2 LIMIT 5; - uid | event_type + uid | event_type --------------------------------------------------------------------- 1 | 0 1 | 0 @@ -105,18 +105,18 @@ WITH real_time_cte AS ( SELECT * FROM users_table WHERE value_2 IN (1, 2, 3) ) SELECT * FROM real_time_cte ORDER BY 1, 2, 3, 4, 5, 6 LIMIT 10; - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | - 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | - 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | - 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | - 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | - 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | + 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | + 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | + 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | + 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | + 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | (10 rows) -- router & real-time together @@ -127,15 +127,15 @@ WITH cte AS ( real_time AS ( SELECT user_id, event_type, value_2 FROM events_table ) - SELECT - router_cte.user_id as uid, event_type - FROM - router_cte, real_time - WHERE + SELECT + router_cte.user_id as uid, event_type + FROM + router_cte, real_time + WHERE router_cte.user_id=real_time.user_id ) SELECT * FROM cte WHERE uid=1 ORDER BY 2 LIMIT 5; - uid | event_type + uid | event_type --------------------------------------------------------------------- 1 | 0 1 | 0 @@ -147,46 +147,46 @@ SELECT * FROM cte WHERE uid=1 ORDER BY 2 LIMIT 5; -- CTEs should be able to use task-tracker queries WITH cte AS ( WITH task_tracker_1 AS ( - SELECT - users_table.user_id as uid_1, users_table.value_2 - FROM - users_table + SELECT + users_table.user_id as uid_1, users_table.value_2 + FROM + users_table JOIN - events_table - ON + events_table + ON users_table.value_2=events_table.value_2 ), task_tracker_2 AS ( - SELECT - users_table.user_id as uid_2, users_table.value_3 - FROM - users_table - JOIN - events_table - ON + SELECT + users_table.user_id as uid_2, users_table.value_3 + FROM + users_table + JOIN + events_table + ON users_table.value_3=events_table.value_3 ) - SELECT + SELECT uid_1, uid_2, value_2, value_3 - FROM + FROM task_tracker_1 JOIN task_tracker_2 - ON + ON value_2 = value_3 ) -SELECT - uid_1, uid_2, cte.value_2, cte.value_3 -FROM - cte -JOIN +SELECT + uid_1, uid_2, cte.value_2, cte.value_3 +FROM + cte +JOIN events_table ON cte.value_2 = events_table.event_type -ORDER BY - 1, 2, 3, 4 +ORDER BY + 1, 2, 3, 4 LIMIT 10; - uid_1 | uid_2 | value_2 | value_3 + uid_1 | uid_2 | value_2 | value_3 --------------------------------------------------------------------- 1 | 1 | 0 | 0 1 | 1 | 0 | 0 @@ -203,13 +203,13 @@ LIMIT 10; -- All combined WITH cte AS ( WITH task_tracker AS ( - SELECT + SELECT users_table.user_id as uid_1, users_table.value_2 as val_2 - FROM - users_table + FROM + users_table JOIN - events_table - ON + events_table + ON users_table.value_2=events_table.value_2 ), real_time AS ( @@ -225,19 +225,19 @@ WITH cte AS ( SELECT uid_1, time, value_3 FROM task_tracker JOIN real_time ON val_2=value_3 ), join_last_two AS ( - SELECT - router_exec.user_id, local_table.id - FROM - router_exec - JOIN - local_table - ON + SELECT + router_exec.user_id, local_table.id + FROM + router_exec + JOIN + local_table + ON router_exec.user_id=local_table.id ) SELECT * FROM join_first_two JOIN join_last_two ON id = value_3 ORDER BY 1,2,3,4,5 LIMIT 10 ) SELECT DISTINCT uid_1, time, value_3 FROM cte ORDER BY 1, 2, 3 LIMIT 20; - uid_1 | time | value_3 + uid_1 | time | value_3 --------------------------------------------------------------------- 2 | Wed Nov 22 18:19:49.944985 2017 | 1 (1 row) @@ -245,13 +245,13 @@ SELECT DISTINCT uid_1, time, value_3 FROM cte ORDER BY 1, 2, 3 LIMIT 20; -- All combined with outer join WITH cte AS ( WITH task_tracker AS ( - SELECT + SELECT users_table.user_id as uid_1, users_table.value_2 as val_2 - FROM - users_table + FROM + users_table JOIN - events_table - ON + events_table + ON users_table.value_2=events_table.value_2 ), real_time AS ( @@ -267,19 +267,19 @@ WITH cte AS ( SELECT uid_1, time, value_3 FROM task_tracker JOIN real_time ON val_2=value_3 ), join_last_two AS ( - SELECT - router_exec.user_id, local_table.id - FROM - router_exec - JOIN - local_table - ON + SELECT + router_exec.user_id, local_table.id + FROM + router_exec + JOIN + local_table + ON router_exec.user_id=local_table.id ) SELECT uid_1, value_3 as val_3 FROM join_first_two JOIN join_last_two ON id = value_3 ORDER BY 1,2 LIMIT 10 ) SELECT DISTINCT uid_1, val_3 FROM cte join events_table on cte.val_3=events_table.event_type ORDER BY 1, 2; - uid_1 | val_3 + uid_1 | val_3 --------------------------------------------------------------------- 2 | 1 (1 row) @@ -312,7 +312,7 @@ FROM cte, users_table WHERE cte.count=user_id and user_id=5; - row_number | count + row_number | count --------------------------------------------------------------------- 1 | 0 (1 row) @@ -334,7 +334,7 @@ WITH cte AS ( SELECT count(*) FROM users_table join cte_merge on id=user_id ) SELECT count(*) FROM cte, users_table where cte.count=user_id; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -342,7 +342,7 @@ SELECT count(*) FROM cte, users_table where cte.count=user_id; SET citus.task_executor_type='task-tracker'; -- CTEs shouldn't be able to terminate a task-tracker query WITH cte_1 AS ( - SELECT + SELECT u_table.user_id as u_id, e_table.event_type FROM users_table as u_table diff --git a/src/test/regress/expected/with_join.out b/src/test/regress/expected/with_join.out index 105dd926b..12d855c3e 100644 --- a/src/test/regress/expected/with_join.out +++ b/src/test/regress/expected/with_join.out @@ -3,9 +3,9 @@ SET search_path TO with_join, public; SET citus.next_shard_id TO 1501000; CREATE TABLE with_join.reference_table(user_id int); SELECT create_reference_table('with_join.reference_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) INSERT INTO reference_table VALUES (6), (7); @@ -36,7 +36,7 @@ GROUP BY 1 ORDER BY 2 DESC, 1; - user_id | count + user_id | count --------------------------------------------------------------------- 3 | 30168 4 | 27768 @@ -72,7 +72,7 @@ GROUP BY 1 ORDER BY 2 DESC, 1; - user_id | count + user_id | count --------------------------------------------------------------------- 2 | 67507 4 | 23040 @@ -142,7 +142,7 @@ ORDER BY 1 DESC LIMIT 5; - uid + uid --------------------------------------------------------------------- 6 5 @@ -181,7 +181,7 @@ ORDER BY 1,2,3 LIMIT 5; - user_id | time | event_type + user_id | time | event_type --------------------------------------------------------------------- 1 | Wed Nov 22 22:51:43.132261 2017 | 0 1 | Wed Nov 22 22:51:43.132261 2017 | 0 @@ -204,7 +204,7 @@ ORDER BY 1,2,3 LIMIT 5; - user_id | time | event_type + user_id | time | event_type --------------------------------------------------------------------- 1 | Thu Nov 23 09:26:42.145043 2017 | 0 1 | Thu Nov 23 09:26:42.145043 2017 | 0 @@ -259,10 +259,10 @@ ORDER BY user_id LIMIT 5; - row_number | user_id + row_number | user_id --------------------------------------------------------------------- 2 | 6 - 1 | + 1 | (2 rows) -- some more tests for more complex outer-joins @@ -272,27 +272,27 @@ CREATE TABLE distributed_2 (col1 int, col2 int, distrib_col int); CREATE TABLE reference_1 (col1 int, col2 int); CREATE TABLE reference_2(col1 int, col2 int); SELECT create_distributed_table('distributed_1','distrib_col'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_distributed_table('distributed_2','distrib_col'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT create_reference_table('reference_1'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) SELECT create_reference_table('reference_2'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) INSERT INTO distributed_1 SELECT i, i, i FROM generate_series(0,100) i; @@ -303,7 +303,7 @@ select count(*) from distributed_1 AS d1 LEFT JOIN reference_1 AS r1 ON d1.col2=r1.col2 LEFT JOIN reference_2 AS r2 ON r2.col1 = r1.col1 join (select distrib_col,count(*) from distributed_2 group by distrib_col) d2 ON d2.distrib_col=d1.distrib_col; - count + count --------------------------------------------------------------------- 1734 (1 row) @@ -313,7 +313,7 @@ select count(*) from distributed_1 AS d1 LEFT JOIN reference_1 AS r1 ON d1.col2=r1.col2 LEFT JOIN reference_2 AS r2 ON r2.col1 = r1.col1 join d2 ON d2.distrib_col=d1.distrib_col; - count + count --------------------------------------------------------------------- 1734 (1 row) @@ -323,7 +323,7 @@ select count(*) from distributed_1 AS d1 LEFT JOIN reference_1 AS r1 ON d1.col2=r1.col2 LEFT JOIN reference_2 AS r2 ON r2.col1 = r1.col1 join d2 ON d2.distrib_col=d1.distrib_col; - count + count --------------------------------------------------------------------- 87584 (1 row) @@ -333,7 +333,7 @@ select count(*) from distributed_1 AS d1 LEFT JOIN reference_1 AS r1 ON d1.col2=r1.col2 LEFT JOIN reference_2 AS r2 ON r2.col1 = r1.col1 join cte_1 ON cte_1.col1=d1.distrib_col; - count + count --------------------------------------------------------------------- 86181 (1 row) diff --git a/src/test/regress/expected/with_modifying.out b/src/test/regress/expected/with_modifying.out index 8eff622b1..cb2279724 100644 --- a/src/test/regress/expected/with_modifying.out +++ b/src/test/regress/expected/with_modifying.out @@ -4,31 +4,31 @@ CREATE SCHEMA with_modifying; SET search_path TO with_modifying, public; CREATE TABLE with_modifying.modify_table (id int, val int); SELECT create_distributed_table('modify_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE with_modifying.users_table (LIKE public.users_table INCLUDING ALL); SELECT create_distributed_table('with_modifying.users_table', 'user_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO with_modifying.users_table SELECT * FROM public.users_table; CREATE TABLE with_modifying.summary_table (id int, counter int); SELECT create_distributed_table('summary_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE with_modifying.anchor_table (id int); SELECT create_reference_table('anchor_table'); - create_reference_table + create_reference_table --------------------------------------------------------------------- - + (1 row) -- basic insert query in CTE @@ -41,11 +41,11 @@ FROM basic_insert ORDER BY user_id; - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 1 | | | | | - 2 | | | | | - 3 | | | | | + 1 | | | | | + 2 | | | | | + 3 | | | | | (3 rows) -- single-shard UPDATE in CTE @@ -60,17 +60,17 @@ ORDER BY user_id, time LIMIT 10; - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 41 | - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 41 | - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 41 | - 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 41 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 41 | - 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 41 | - 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 41 | - 1 | | | | 41 | - 1 | | | | 41 | + 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 41 | + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 41 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 41 | + 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 41 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 41 | + 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 41 | + 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 41 | + 1 | | | | 41 | + 1 | | | | 41 | (9 rows) -- multi-shard UPDATE in CTE @@ -85,18 +85,18 @@ ORDER BY user_id, time LIMIT 10; - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 42 | - 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 42 | - 2 | Thu Nov 23 13:52:54.83829 2017 | 3 | 1 | 42 | - 3 | Wed Nov 22 18:43:51.450263 2017 | 1 | 1 | 42 | - 3 | Thu Nov 23 00:15:45.610845 2017 | 1 | 1 | 42 | - 4 | Wed Nov 22 23:59:46.493416 2017 | 3 | 1 | 42 | - 4 | Thu Nov 23 01:55:21.824618 2017 | 3 | 1 | 42 | - 4 | Thu Nov 23 06:50:08.101207 2017 | 2 | 1 | 42 | - 4 | Thu Nov 23 07:09:37.382372 2017 | 4 | 1 | 42 | - 4 | Thu Nov 23 08:38:45.877401 2017 | 4 | 1 | 42 | + 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 42 | + 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 42 | + 2 | Thu Nov 23 13:52:54.83829 2017 | 3 | 1 | 42 | + 3 | Wed Nov 22 18:43:51.450263 2017 | 1 | 1 | 42 | + 3 | Thu Nov 23 00:15:45.610845 2017 | 1 | 1 | 42 | + 4 | Wed Nov 22 23:59:46.493416 2017 | 3 | 1 | 42 | + 4 | Thu Nov 23 01:55:21.824618 2017 | 3 | 1 | 42 | + 4 | Thu Nov 23 06:50:08.101207 2017 | 2 | 1 | 42 | + 4 | Thu Nov 23 07:09:37.382372 2017 | 4 | 1 | 42 | + 4 | Thu Nov 23 08:38:45.877401 2017 | 4 | 1 | 42 | (10 rows) -- single-shard DELETE in CTE @@ -111,18 +111,18 @@ ORDER BY user_id, time LIMIT 10; - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 6 | Wed Nov 22 20:15:53.317797 2017 | 1 | 1 | 42 | - 6 | Wed Nov 22 23:01:24.82289 2017 | 2 | 4 | 1 | - 6 | Thu Nov 23 00:07:11.068353 2017 | 1 | 1 | 42 | - 6 | Thu Nov 23 00:09:44.19812 2017 | 5 | 2 | 0 | - 6 | Thu Nov 23 01:13:50.526322 2017 | 2 | 4 | 1 | - 6 | Thu Nov 23 01:14:55.769581 2017 | 0 | 0 | 5 | - 6 | Thu Nov 23 10:22:11.02918 2017 | 5 | 0 | 5 | - 6 | Thu Nov 23 11:08:04.244582 2017 | 2 | 3 | 2 | - 6 | Thu Nov 23 13:51:16.92838 2017 | 0 | 4 | 2 | - 6 | Thu Nov 23 14:43:18.024104 2017 | 3 | 2 | 5 | + 6 | Wed Nov 22 20:15:53.317797 2017 | 1 | 1 | 42 | + 6 | Wed Nov 22 23:01:24.82289 2017 | 2 | 4 | 1 | + 6 | Thu Nov 23 00:07:11.068353 2017 | 1 | 1 | 42 | + 6 | Thu Nov 23 00:09:44.19812 2017 | 5 | 2 | 0 | + 6 | Thu Nov 23 01:13:50.526322 2017 | 2 | 4 | 1 | + 6 | Thu Nov 23 01:14:55.769581 2017 | 0 | 0 | 5 | + 6 | Thu Nov 23 10:22:11.02918 2017 | 5 | 0 | 5 | + 6 | Thu Nov 23 11:08:04.244582 2017 | 2 | 3 | 2 | + 6 | Thu Nov 23 13:51:16.92838 2017 | 0 | 4 | 2 | + 6 | Thu Nov 23 14:43:18.024104 2017 | 3 | 2 | 5 | (10 rows) -- multi-shard DELETE in CTE @@ -137,17 +137,17 @@ ORDER BY user_id, time LIMIT 10; - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 41 | - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 41 | - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 41 | - 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 41 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 41 | - 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 41 | - 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 41 | - 1 | | | | 41 | - 1 | | | | 41 | + 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 41 | + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 41 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 41 | + 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 41 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 41 | + 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 41 | + 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 41 | + 1 | | | | 41 | + 1 | | | | 41 | (9 rows) -- INSERT...SELECT query in CTE @@ -162,18 +162,18 @@ ORDER BY user_id, time LIMIT 10; - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 3 | Wed Nov 22 18:43:51.450263 2017 | 1 | 1 | 42 | - 3 | Wed Nov 22 20:43:31.008625 2017 | 1 | 3 | 2 | - 3 | Wed Nov 22 23:24:32.080584 2017 | 3 | 2 | 5 | - 3 | Thu Nov 23 00:15:45.610845 2017 | 1 | 1 | 42 | - 3 | Thu Nov 23 03:23:24.702501 2017 | 1 | 2 | 5 | - 3 | Thu Nov 23 03:52:32.008895 2017 | 4 | 2 | 0 | - 3 | Thu Nov 23 04:01:08.04806 2017 | 5 | 5 | 3 | - 3 | Thu Nov 23 05:01:44.885505 2017 | 3 | 5 | 4 | - 3 | Thu Nov 23 06:20:05.854857 2017 | 1 | 4 | 2 | - 3 | Thu Nov 23 09:57:41.540228 2017 | 2 | 2 | 3 | + 3 | Wed Nov 22 18:43:51.450263 2017 | 1 | 1 | 42 | + 3 | Wed Nov 22 20:43:31.008625 2017 | 1 | 3 | 2 | + 3 | Wed Nov 22 23:24:32.080584 2017 | 3 | 2 | 5 | + 3 | Thu Nov 23 00:15:45.610845 2017 | 1 | 1 | 42 | + 3 | Thu Nov 23 03:23:24.702501 2017 | 1 | 2 | 5 | + 3 | Thu Nov 23 03:52:32.008895 2017 | 4 | 2 | 0 | + 3 | Thu Nov 23 04:01:08.04806 2017 | 5 | 5 | 3 | + 3 | Thu Nov 23 05:01:44.885505 2017 | 3 | 5 | 4 | + 3 | Thu Nov 23 06:20:05.854857 2017 | 1 | 4 | 2 | + 3 | Thu Nov 23 09:57:41.540228 2017 | 2 | 2 | 3 | (10 rows) -- CTEs prior to INSERT...SELECT via the coordinator should work @@ -232,7 +232,7 @@ WITH raw_data AS ( ) INSERT INTO summary_table SELECT id, COUNT(*) AS counter FROM raw_data GROUP BY id; SELECT * FROM summary_table ORDER BY id; - id | counter + id | counter --------------------------------------------------------------------- 2 | 20 3 | 38 @@ -241,7 +241,7 @@ SELECT * FROM summary_table ORDER BY id; (4 rows) SELECT COUNT(*) FROM modify_table; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -252,7 +252,7 @@ WITH raw_data AS ( ) INSERT INTO summary_table SELECT id, COUNT(*) AS counter FROM raw_data GROUP BY id; SELECT * FROM summary_table ORDER BY id, counter; - id | counter + id | counter --------------------------------------------------------------------- 1 | 1 2 | 1 @@ -264,7 +264,7 @@ SELECT * FROM summary_table ORDER BY id, counter; (7 rows) SELECT COUNT(*) FROM modify_table; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -273,7 +273,7 @@ WITH insert_reference AS ( INSERT INTO anchor_table VALUES (1), (2) RETURNING * ) SELECT id FROM insert_reference ORDER BY id; - id + id --------------------------------------------------------------------- 1 2 @@ -298,13 +298,13 @@ SELECT id, SUM(counter) FROM ( GROUP BY id; SELECT COUNT(*) FROM modify_table; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT * FROM summary_table ORDER BY id, counter; - id | counter + id | counter --------------------------------------------------------------------- 1 | 1 2 | 21 @@ -321,13 +321,13 @@ raw_data AS ( ) INSERT INTO summary_table SELECT id, COUNT(*) AS counter FROM raw_data GROUP BY id; SELECT COUNT(*) FROM modify_table; - count + count --------------------------------------------------------------------- 3 (1 row) SELECT * FROM summary_table ORDER BY id, counter; - id | counter + id | counter --------------------------------------------------------------------- 1 | 1 1 | 1 @@ -343,7 +343,7 @@ WITH summary_data AS ( ) INSERT INTO summary_table SELECT id, SUM(counter) AS counter FROM summary_data GROUP BY id; SELECT * FROM summary_table ORDER BY id, counter; - id | counter + id | counter --------------------------------------------------------------------- 1 | 2 2 | 21 @@ -353,7 +353,7 @@ SELECT * FROM summary_table ORDER BY id, counter; (5 rows) SELECT * FROM modify_table ORDER BY id, val; - id | val + id | val --------------------------------------------------------------------- 1 | 2 2 | 4 @@ -361,7 +361,7 @@ SELECT * FROM modify_table ORDER BY id, val; (3 rows) SELECT * FROM anchor_table ORDER BY id; - id + id --------------------------------------------------------------------- 1 2 @@ -379,13 +379,13 @@ INSERT INTO modify_table VALUES (21, 1), (22, 2), (23, 3); -- read ids from the same table WITH distinct_ids AS ( SELECT DISTINCT id FROM modify_table -), +), update_data AS ( - UPDATE modify_table SET val = 100 WHERE id > 10 AND + UPDATE modify_table SET val = 100 WHERE id > 10 AND id IN (SELECT * FROM distinct_ids) RETURNING * ) SELECT count(*) FROM update_data; - count + count --------------------------------------------------------------------- 3 (1 row) @@ -393,27 +393,27 @@ SELECT count(*) FROM update_data; -- read ids from a different table WITH distinct_ids AS ( SELECT DISTINCT id FROM summary_table -), +), update_data AS ( UPDATE modify_table SET val = 100 WHERE id > 10 AND id IN (SELECT * FROM distinct_ids) RETURNING * ) SELECT count(*) FROM update_data; - count + count --------------------------------------------------------------------- 0 (1 row) -- test update with generate series -UPDATE modify_table SET val = 200 WHERE id > 10 AND +UPDATE modify_table SET val = 200 WHERE id > 10 AND id IN (SELECT 2*s FROM generate_series(1,20) s); -- test update with generate series in CTE WITH update_data AS ( - UPDATE modify_table SET val = 300 WHERE id > 10 AND + UPDATE modify_table SET val = 300 WHERE id > 10 AND id IN (SELECT 3*s FROM generate_series(1,20) s) RETURNING * ) SELECT COUNT(*) FROM update_data; - count + count --------------------------------------------------------------------- 1 (1 row) @@ -422,7 +422,7 @@ WITH delete_rows AS ( DELETE FROM modify_table WHERE id > 10 RETURNING * ) SELECT * FROM delete_rows ORDER BY id, val; - id | val + id | val --------------------------------------------------------------------- 21 | 300 22 | 200 @@ -433,7 +433,7 @@ WITH delete_rows AS ( DELETE FROM summary_table WHERE id > 10 RETURNING * ) SELECT * FROM delete_rows ORDER BY id, counter; - id | counter + id | counter --------------------------------------------------------------------- 11 | 1 12 | 1 @@ -450,14 +450,14 @@ WITH insert_reference AS ( INSERT INTO anchor_table VALUES (3), (4) RETURNING * ) SELECT id FROM insert_reference ORDER BY id; - id + id --------------------------------------------------------------------- 3 4 (2 rows) SELECT * FROM summary_table ORDER BY id, counter; - id | counter + id | counter --------------------------------------------------------------------- 1 | 1 1 | 2 @@ -470,12 +470,12 @@ SELECT * FROM summary_table ORDER BY id, counter; (8 rows) SELECT * FROM modify_table ORDER BY id, val; - id | val + id | val --------------------------------------------------------------------- (0 rows) SELECT * FROM anchor_table ORDER BY id; - id + id --------------------------------------------------------------------- 1 2 @@ -485,7 +485,7 @@ SELECT * FROM anchor_table ORDER BY id; ROLLBACK; SELECT * FROM summary_table ORDER BY id, counter; - id | counter + id | counter --------------------------------------------------------------------- 1 | 2 2 | 21 @@ -495,7 +495,7 @@ SELECT * FROM summary_table ORDER BY id, counter; (5 rows) SELECT * FROM modify_table ORDER BY id, val; - id | val + id | val --------------------------------------------------------------------- 1 | 2 2 | 4 @@ -503,7 +503,7 @@ SELECT * FROM modify_table ORDER BY id, val; (3 rows) SELECT * FROM anchor_table ORDER BY id; - id + id --------------------------------------------------------------------- 1 2 @@ -514,7 +514,7 @@ WITH deleted_rows AS ( DELETE FROM modify_table WHERE id IN (SELECT id FROM modify_table WHERE id = 1) RETURNING * ) SELECT * FROM deleted_rows; - id | val + id | val --------------------------------------------------------------------- 1 | 2 (1 row) @@ -523,7 +523,7 @@ WITH deleted_rows AS ( DELETE FROM modify_table WHERE id IN (SELECT id FROM modify_table WHERE val = 4) RETURNING * ) SELECT * FROM deleted_rows; - id | val + id | val --------------------------------------------------------------------- 2 | 4 (1 row) @@ -535,7 +535,7 @@ deleted_rows AS ( DELETE FROM modify_table WHERE id IN (SELECT id FROM select_rows) RETURNING * ) SELECT * FROM deleted_rows; - id | val + id | val --------------------------------------------------------------------- (0 rows) @@ -543,7 +543,7 @@ WITH deleted_rows AS ( DELETE FROM modify_table WHERE val IN (SELECT val FROM modify_table WHERE id = 3) RETURNING * ) SELECT * FROM deleted_rows; - id | val + id | val --------------------------------------------------------------------- 3 | 6 (1 row) @@ -555,7 +555,7 @@ deleted_rows AS ( DELETE FROM modify_table WHERE val IN (SELECT val FROM select_rows) RETURNING * ) SELECT * FROM deleted_rows; - id | val + id | val --------------------------------------------------------------------- (0 rows) @@ -596,7 +596,7 @@ raw_data AS ( DELETE FROM modify_table WHERE id = 1 AND val IN (SELECT val FROM select_data) RETURNING * ) SELECT COUNT(*) FROM raw_data; - count + count --------------------------------------------------------------------- 2 (1 row) @@ -609,7 +609,7 @@ raw_data AS ( DELETE FROM modify_table WHERE id IN (SELECT id FROM select_data WHERE val > 5) RETURNING id, val ) SELECT * FROM raw_data ORDER BY val; - id | val + id | val --------------------------------------------------------------------- 1 | 2 1 | 6 @@ -622,13 +622,13 @@ raw_data AS ( UPDATE modify_table SET val = 0 WHERE id IN (SELECT id FROM select_data WHERE val < 5) RETURNING id, val ) SELECT * FROM raw_data ORDER BY val; - id | val + id | val --------------------------------------------------------------------- 2 | 0 (1 row) SELECT * FROM modify_table ORDER BY id, val; - id | val + id | val --------------------------------------------------------------------- 2 | 0 3 | 5 @@ -639,13 +639,13 @@ WITH select_data AS ( SELECT * FROM modify_table ), raw_data AS ( - UPDATE modify_table SET val = 0 WHERE - id IN (SELECT id FROM select_data) AND + UPDATE modify_table SET val = 0 WHERE + id IN (SELECT id FROM select_data) AND val IN (SELECT counter FROM summary_table) RETURNING id, val ) SELECT * FROM raw_data ORDER BY val; - id | val + id | val --------------------------------------------------------------------- (0 rows) @@ -654,28 +654,28 @@ SET citus.shard_replication_factor to 2; DROP TABLE modify_table; CREATE TABLE with_modifying.modify_table (id int, val int); SELECT create_distributed_table('modify_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO with_modifying.modify_table SELECT user_id, value_1 FROM public.users_table; DROP TABLE summary_table; CREATE TABLE with_modifying.summary_table (id int, counter int); SELECT create_distributed_table('summary_table', 'id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) SELECT COUNT(*) FROM modify_table; - count + count --------------------------------------------------------------------- 107 (1 row) SELECT * FROM summary_table ORDER BY id, counter; - id | counter + id | counter --------------------------------------------------------------------- (0 rows) @@ -684,13 +684,13 @@ WITH raw_data AS ( ) INSERT INTO summary_table SELECT id, COUNT(*) AS counter FROM raw_data GROUP BY id; SELECT COUNT(*) FROM modify_table; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT * FROM summary_table ORDER BY id, counter; - id | counter + id | counter --------------------------------------------------------------------- 1 | 8 2 | 19 @@ -705,16 +705,16 @@ SELECT * FROM summary_table ORDER BY id, counter; BEGIN; INSERT INTO modify_table (id) VALUES (10000); WITH test_cte AS (SELECT count(*) FROM modify_table) SELECT * FROM test_cte; - count + count --------------------------------------------------------------------- 1 (1 row) ROLLBACK; -- similarly, make sure that the intermediate result uses a seperate connection - WITH first_query AS (INSERT INTO modify_table (id) VALUES (10001)), + WITH first_query AS (INSERT INTO modify_table (id) VALUES (10001)), second_query AS (SELECT * FROM modify_table) SELECT count(*) FROM second_query; - count + count --------------------------------------------------------------------- 1 (1 row) diff --git a/src/test/regress/expected/with_nested.out b/src/test/regress/expected/with_nested.out index 97051124a..92ec7b271 100644 --- a/src/test/regress/expected/with_nested.out +++ b/src/test/regress/expected/with_nested.out @@ -14,7 +14,7 @@ cte_2 AS ( SELECT * FROM cte_1_1 WHERE user_id < 3 ) SELECT user_id FROM cte_2 LIMIT 1; - user_id + user_id --------------------------------------------------------------------- 2 (1 row) @@ -49,7 +49,7 @@ FROM ORDER BY 1, 2 LIMIT 20; - user_id | event_type + user_id | event_type --------------------------------------------------------------------- 1 | 0 1 | 0 @@ -151,7 +151,7 @@ FROM users_events GROUP BY 1; - uid | avg | sum | sum + uid | avg | sum | sum --------------------------------------------------------------------- 1 | 1.00000000000000000000 | 3 | 72 (1 row) @@ -228,28 +228,28 @@ ORDER BY 1, 2, 3, 4, 5, 6 LIMIT 20; - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | - 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | - 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | - 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | - 1 | | | | | - 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | - 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | - 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 01:14:27.658529 2017 | 4 | 4 | 4 | - 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | - 2 | Thu Nov 23 06:01:08.148777 2017 | 2 | 4 | 2 | - 2 | Thu Nov 23 06:23:53.572592 2017 | 4 | 4 | 5 | - 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | - 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | - 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | - 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | - 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | + 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | + 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | + 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | + 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | + 1 | | | | | + 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | + 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | + 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 01:14:27.658529 2017 | 4 | 4 | 4 | + 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | + 2 | Thu Nov 23 06:01:08.148777 2017 | 2 | 4 | 2 | + 2 | Thu Nov 23 06:23:53.572592 2017 | 4 | 4 | 5 | + 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | + 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | + 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | + 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | + 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | (20 rows) -- Nested CTEs - joined with local table. Not supported yet. @@ -318,28 +318,28 @@ ORDER BY 1, 2, 3, 4, 5, 6 LIMIT 20; - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | - 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | - 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | - 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | - 1 | | | | | - 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | - 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | - 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 01:14:27.658529 2017 | 4 | 4 | 4 | - 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | - 2 | Thu Nov 23 06:01:08.148777 2017 | 2 | 4 | 2 | - 2 | Thu Nov 23 06:23:53.572592 2017 | 4 | 4 | 5 | - 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | - 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | - 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | - 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | - 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | + 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | + 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | + 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | + 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | + 1 | | | | | + 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | + 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | + 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 01:14:27.658529 2017 | 4 | 4 | 4 | + 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | + 2 | Thu Nov 23 06:01:08.148777 2017 | 2 | 4 | 2 | + 2 | Thu Nov 23 06:23:53.572592 2017 | 4 | 4 | 5 | + 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | + 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | + 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | + 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | + 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | (20 rows) -- access to uncle, use window function, apply aggregates, use group by, LIMIT/OFFSET @@ -396,7 +396,7 @@ cte2 AS ( SELECT * FROM cte2_1 ORDER BY 1,2,3,4 LIMIT 3 OFFSET 2 ) SELECT * FROM cte2; - user_id | time | value_1 | min + user_id | time | value_1 | min --------------------------------------------------------------------- 3 | Wed Nov 22 23:24:32.080584 2017 | 3 | 5 3 | Thu Nov 23 00:15:45.610845 2017 | 1 | 5 diff --git a/src/test/regress/expected/with_partitioning.out b/src/test/regress/expected/with_partitioning.out index 2b151b984..0f7d40e4e 100644 --- a/src/test/regress/expected/with_partitioning.out +++ b/src/test/regress/expected/with_partitioning.out @@ -16,9 +16,9 @@ INSERT INTO partitioning_test_2010 VALUES (4, '2010-03-03'); SELECT create_distributed_table('with_partitioning.partitioning_test', 'id'); NOTICE: Copying data from local table... NOTICE: Copying data from local table... - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) -- Join of a CTE on distributed table and then join with a partitioned table @@ -26,7 +26,7 @@ WITH cte AS ( SELECT * FROM users_table ) SELECT DISTINCT ON (id) id, cte.time FROM cte join partitioning_test on cte.time::date=partitioning_test.time ORDER BY 1, 2 LIMIT 3; - id | time + id | time --------------------------------------------------------------------- 1 | Thu Nov 23 00:07:11.068353 2017 3 | Wed Nov 22 18:19:49.944985 2017 @@ -37,7 +37,7 @@ WITH cte AS ( SELECT * FROM users_table ) SELECT DISTINCT ON (id) id, cte.time FROM cte join partitioning_test on cte.time::date=partitioning_test.time WHERE partitioning_test.time >'2017-11-20' ORDER BY 1, 2 LIMIT 3; - id | time + id | time --------------------------------------------------------------------- 1 | Thu Nov 23 00:07:11.068353 2017 3 | Wed Nov 22 18:19:49.944985 2017 @@ -57,7 +57,7 @@ cte_joined_2 AS ( SELECT user_id, cte_joined.time FROM cte_joined join cte on (cte_joined.time = cte.time) ) SELECT DISTINCT ON (event_type) event_type, cte_joined_2.user_id FROM events_table join cte_joined_2 on (cte_joined_2.time=events_table.time::date) ORDER BY 1, 2 LIMIT 10 OFFSET 2; - event_type | user_id + event_type | user_id --------------------------------------------------------------------- 2 | 1 3 | 1 @@ -81,7 +81,7 @@ cte_joined_2 AS ( SELECT users_table.user_id, cte_joined.time FROM cte_joined join users_table on (cte_joined.time = users_table.time::date) ) SELECT DISTINCT ON (id) id, cte_joined_2.time FROM cte_joined_2 join partitioning_test on (cte_joined_2.time=partitioning_test.time) ORDER BY 1, 2; - id | time + id | time --------------------------------------------------------------------- 1 | 11-23-2017 3 | 11-22-2017 diff --git a/src/test/regress/expected/with_prepare.out b/src/test/regress/expected/with_prepare.out index 58c9191a3..9f6d85052 100644 --- a/src/test/regress/expected/with_prepare.out +++ b/src/test/regress/expected/with_prepare.out @@ -4,7 +4,7 @@ WITH basic AS( SELECT * FROM users_table ) SELECT - * + * FROM basic WHERE @@ -47,7 +47,7 @@ user_coolness AS( user_id ) SELECT - * + * FROM user_coolness ORDER BY @@ -58,7 +58,7 @@ PREPARE prepared_test_3(integer) AS WITH users_events AS( -- events 1 and 2 only WITH spec_events AS( - SELECT + SELECT * FROM events_table @@ -111,7 +111,7 @@ user_coolness AS( user_id ) SELECT - * + * FROM user_coolness ORDER BY @@ -123,7 +123,7 @@ WITH basic AS( SELECT * FROM users_table WHERE value_2 IN ($1, $2, $3) ) SELECT - * + * FROM basic ORDER BY @@ -181,103 +181,103 @@ WITH event_id AS ( FROM events_table ) SELECT - count(*) + count(*) FROM event_id WHERE events_user_id IN (SELECT user_id FROM users_table); EXECUTE prepared_test_1; - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | - 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | - 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | - 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | - 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | - 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | + 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | + 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | + 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | + 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | + 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | (10 rows) EXECUTE prepared_test_1; - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | - 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | - 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | - 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | - 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | - 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | + 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | + 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | + 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | + 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | + 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | (10 rows) EXECUTE prepared_test_1; - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | - 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | - 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | - 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | - 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | - 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | + 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | + 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | + 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | + 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | + 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | (10 rows) EXECUTE prepared_test_1; - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | - 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | - 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | - 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | - 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | - 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | + 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | + 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | + 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | + 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | + 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | (10 rows) EXECUTE prepared_test_1; - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | - 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | - 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | - 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | - 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | - 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | + 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | + 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | + 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | + 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | + 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | (10 rows) EXECUTE prepared_test_1; - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | - 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | - 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | - 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | - 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | - 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | + 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | + 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | + 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | + 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | + 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | (10 rows) EXECUTE prepared_test_2; - user_id | sum + user_id | sum --------------------------------------------------------------------- 1 | 29 3 | 29 @@ -288,7 +288,7 @@ EXECUTE prepared_test_2; (6 rows) EXECUTE prepared_test_2; - user_id | sum + user_id | sum --------------------------------------------------------------------- 1 | 29 3 | 29 @@ -299,7 +299,7 @@ EXECUTE prepared_test_2; (6 rows) EXECUTE prepared_test_2; - user_id | sum + user_id | sum --------------------------------------------------------------------- 1 | 29 3 | 29 @@ -310,7 +310,7 @@ EXECUTE prepared_test_2; (6 rows) EXECUTE prepared_test_2; - user_id | sum + user_id | sum --------------------------------------------------------------------- 1 | 29 3 | 29 @@ -321,7 +321,7 @@ EXECUTE prepared_test_2; (6 rows) EXECUTE prepared_test_2; - user_id | sum + user_id | sum --------------------------------------------------------------------- 1 | 29 3 | 29 @@ -332,7 +332,7 @@ EXECUTE prepared_test_2; (6 rows) EXECUTE prepared_test_2; - user_id | sum + user_id | sum --------------------------------------------------------------------- 1 | 29 3 | 29 @@ -343,7 +343,7 @@ EXECUTE prepared_test_2; (6 rows) EXECUTE prepared_test_3(1); - user_id | sum + user_id | sum --------------------------------------------------------------------- 1 | 10850 6 | 15500 @@ -354,7 +354,7 @@ EXECUTE prepared_test_3(1); (6 rows) EXECUTE prepared_test_3(2); - user_id | sum + user_id | sum --------------------------------------------------------------------- 1 | 10850 6 | 15500 @@ -365,265 +365,265 @@ EXECUTE prepared_test_3(2); (6 rows) EXECUTE prepared_test_3(3); - user_id | sum + user_id | sum --------------------------------------------------------------------- (0 rows) EXECUTE prepared_test_3(4); - user_id | sum + user_id | sum --------------------------------------------------------------------- (0 rows) EXECUTE prepared_test_3(5); - user_id | sum + user_id | sum --------------------------------------------------------------------- (0 rows) EXECUTE prepared_test_3(6); - user_id | sum + user_id | sum --------------------------------------------------------------------- (0 rows) EXECUTE prepared_test_4(1, 2, 3); - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | - 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | - 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | - 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | - 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | - 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | + 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | + 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | + 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | + 2 | Thu Nov 23 08:49:47.029236 2017 | 4 | 2 | 4 | + 2 | Thu Nov 23 09:54:28.13665 2017 | 0 | 3 | 4 | (10 rows) EXECUTE prepared_test_4(2, 3, 4); - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | - 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | - 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | - 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | - 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | - 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 01:14:27.658529 2017 | 4 | 4 | 4 | - 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | + 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | + 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | + 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | + 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | + 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 01:14:27.658529 2017 | 4 | 4 | 4 | + 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | (10 rows) EXECUTE prepared_test_4(3, 4, 5); - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | - 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | - 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | - 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | - 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | - 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | - 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 01:14:27.658529 2017 | 4 | 4 | 4 | - 2 | Thu Nov 23 06:01:08.148777 2017 | 2 | 4 | 2 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | + 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | + 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | + 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | + 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | + 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | + 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 01:14:27.658529 2017 | 4 | 4 | 4 | + 2 | Thu Nov 23 06:01:08.148777 2017 | 2 | 4 | 2 | (10 rows) EXECUTE prepared_test_4(4, 5, 6); - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | - 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | - 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | - 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | - 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | - 2 | Thu Nov 23 01:14:27.658529 2017 | 4 | 4 | 4 | - 2 | Thu Nov 23 06:01:08.148777 2017 | 2 | 4 | 2 | - 2 | Thu Nov 23 06:23:53.572592 2017 | 4 | 4 | 5 | - 2 | Thu Nov 23 11:41:04.042936 2017 | 3 | 4 | 1 | - 2 | Thu Nov 23 11:48:24.943542 2017 | 0 | 5 | 5 | + 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | + 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | + 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | + 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | + 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | + 2 | Thu Nov 23 01:14:27.658529 2017 | 4 | 4 | 4 | + 2 | Thu Nov 23 06:01:08.148777 2017 | 2 | 4 | 2 | + 2 | Thu Nov 23 06:23:53.572592 2017 | 4 | 4 | 5 | + 2 | Thu Nov 23 11:41:04.042936 2017 | 3 | 4 | 1 | + 2 | Thu Nov 23 11:48:24.943542 2017 | 0 | 5 | 5 | (10 rows) EXECUTE prepared_test_4(5, 6, 7); - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | - 2 | Thu Nov 23 11:48:24.943542 2017 | 0 | 5 | 5 | - 3 | Thu Nov 23 04:01:08.04806 2017 | 5 | 5 | 3 | - 3 | Thu Nov 23 05:01:44.885505 2017 | 3 | 5 | 4 | - 3 | Thu Nov 23 11:31:17.403189 2017 | 4 | 5 | 3 | - 3 | Thu Nov 23 11:41:21.157066 2017 | 3 | 5 | 3 | - 3 | Thu Nov 23 12:56:49.29191 2017 | 0 | 5 | 1 | - 3 | Thu Nov 23 17:18:51.048758 2017 | 1 | 5 | 5 | - 4 | Thu Nov 23 07:28:42.537255 2017 | 3 | 5 | 3 | - 4 | Thu Nov 23 11:45:39.744961 2017 | 4 | 5 | 4 | + 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | + 2 | Thu Nov 23 11:48:24.943542 2017 | 0 | 5 | 5 | + 3 | Thu Nov 23 04:01:08.04806 2017 | 5 | 5 | 3 | + 3 | Thu Nov 23 05:01:44.885505 2017 | 3 | 5 | 4 | + 3 | Thu Nov 23 11:31:17.403189 2017 | 4 | 5 | 3 | + 3 | Thu Nov 23 11:41:21.157066 2017 | 3 | 5 | 3 | + 3 | Thu Nov 23 12:56:49.29191 2017 | 0 | 5 | 1 | + 3 | Thu Nov 23 17:18:51.048758 2017 | 1 | 5 | 5 | + 4 | Thu Nov 23 07:28:42.537255 2017 | 3 | 5 | 3 | + 4 | Thu Nov 23 11:45:39.744961 2017 | 4 | 5 | 4 | (10 rows) EXECUTE prepared_test_4(6, 7, 8); - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- (0 rows) EXECUTE prepared_test_5(1, 2, 3); - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | - 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | - 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | - 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | - 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | - 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | - 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | - 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | - 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | - 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | + 1 | Wed Nov 22 22:51:43.132261 2017 | 4 | 0 | 3 | + 1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 | + 1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 | + 1 | Thu Nov 23 11:11:24.40789 2017 | 3 | 4 | 0 | + 1 | Thu Nov 23 11:44:57.515981 2017 | 4 | 3 | 4 | + 1 | Thu Nov 23 17:23:03.441394 2017 | 5 | 4 | 3 | + 1 | Thu Nov 23 17:30:34.635085 2017 | 3 | 4 | 4 | + 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | + 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | + 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | (10 rows) EXECUTE prepared_test_5(2, 3, 4); - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | - 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | - 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | - 2 | Thu Nov 23 01:14:27.658529 2017 | 4 | 4 | 4 | - 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | - 2 | Thu Nov 23 06:01:08.148777 2017 | 2 | 4 | 2 | - 2 | Thu Nov 23 06:23:53.572592 2017 | 4 | 4 | 5 | - 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | - 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | - 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | + 2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 | + 2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 | + 2 | Thu Nov 23 01:04:26.198826 2017 | 4 | 3 | 4 | + 2 | Thu Nov 23 01:14:27.658529 2017 | 4 | 4 | 4 | + 2 | Thu Nov 23 03:27:50.327051 2017 | 2 | 2 | 0 | + 2 | Thu Nov 23 06:01:08.148777 2017 | 2 | 4 | 2 | + 2 | Thu Nov 23 06:23:53.572592 2017 | 4 | 4 | 5 | + 2 | Thu Nov 23 06:50:30.797805 2017 | 1 | 1 | 1 | + 2 | Thu Nov 23 06:56:38.46819 2017 | 0 | 1 | 3 | + 2 | Thu Nov 23 08:22:22.169158 2017 | 4 | 2 | 5 | (10 rows) EXECUTE prepared_test_5(3, 4, 5); - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 3 | Wed Nov 22 18:43:51.450263 2017 | 1 | 1 | 4 | - 3 | Wed Nov 22 20:43:31.008625 2017 | 1 | 3 | 2 | - 3 | Wed Nov 22 23:24:32.080584 2017 | 3 | 2 | 5 | - 3 | Thu Nov 23 00:15:45.610845 2017 | 1 | 1 | 4 | - 3 | Thu Nov 23 03:23:24.702501 2017 | 1 | 2 | 5 | - 3 | Thu Nov 23 03:52:32.008895 2017 | 4 | 2 | 0 | - 3 | Thu Nov 23 04:01:08.04806 2017 | 5 | 5 | 3 | - 3 | Thu Nov 23 05:01:44.885505 2017 | 3 | 5 | 4 | - 3 | Thu Nov 23 06:20:05.854857 2017 | 1 | 4 | 2 | - 3 | Thu Nov 23 09:57:41.540228 2017 | 2 | 2 | 3 | + 3 | Wed Nov 22 18:43:51.450263 2017 | 1 | 1 | 4 | + 3 | Wed Nov 22 20:43:31.008625 2017 | 1 | 3 | 2 | + 3 | Wed Nov 22 23:24:32.080584 2017 | 3 | 2 | 5 | + 3 | Thu Nov 23 00:15:45.610845 2017 | 1 | 1 | 4 | + 3 | Thu Nov 23 03:23:24.702501 2017 | 1 | 2 | 5 | + 3 | Thu Nov 23 03:52:32.008895 2017 | 4 | 2 | 0 | + 3 | Thu Nov 23 04:01:08.04806 2017 | 5 | 5 | 3 | + 3 | Thu Nov 23 05:01:44.885505 2017 | 3 | 5 | 4 | + 3 | Thu Nov 23 06:20:05.854857 2017 | 1 | 4 | 2 | + 3 | Thu Nov 23 09:57:41.540228 2017 | 2 | 2 | 3 | (10 rows) EXECUTE prepared_test_5(4, 5, 6); - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 4 | Wed Nov 22 21:33:03.616802 2017 | 5 | 4 | 1 | - 4 | Wed Nov 22 23:48:11.949567 2017 | 2 | 0 | 0 | - 4 | Wed Nov 22 23:59:46.493416 2017 | 3 | 1 | 3 | - 4 | Thu Nov 23 00:28:45.060248 2017 | 4 | 4 | 3 | - 4 | Thu Nov 23 01:55:21.824618 2017 | 3 | 1 | 4 | - 4 | Thu Nov 23 02:14:35.047974 2017 | 4 | 4 | 1 | - 4 | Thu Nov 23 03:34:40.419294 2017 | 1 | 0 | 4 | - 4 | Thu Nov 23 05:42:12.89386 2017 | 2 | 3 | 3 | - 4 | Thu Nov 23 06:39:06.287818 2017 | 3 | 3 | 2 | - 4 | Thu Nov 23 06:50:08.101207 2017 | 2 | 1 | 5 | + 4 | Wed Nov 22 21:33:03.616802 2017 | 5 | 4 | 1 | + 4 | Wed Nov 22 23:48:11.949567 2017 | 2 | 0 | 0 | + 4 | Wed Nov 22 23:59:46.493416 2017 | 3 | 1 | 3 | + 4 | Thu Nov 23 00:28:45.060248 2017 | 4 | 4 | 3 | + 4 | Thu Nov 23 01:55:21.824618 2017 | 3 | 1 | 4 | + 4 | Thu Nov 23 02:14:35.047974 2017 | 4 | 4 | 1 | + 4 | Thu Nov 23 03:34:40.419294 2017 | 1 | 0 | 4 | + 4 | Thu Nov 23 05:42:12.89386 2017 | 2 | 3 | 3 | + 4 | Thu Nov 23 06:39:06.287818 2017 | 3 | 3 | 2 | + 4 | Thu Nov 23 06:50:08.101207 2017 | 2 | 1 | 5 | (10 rows) EXECUTE prepared_test_5(5, 6, 7); - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 5 | Wed Nov 22 20:43:18.667473 2017 | 0 | 3 | 2 | - 5 | Wed Nov 22 21:02:07.575129 2017 | 2 | 0 | 2 | - 5 | Wed Nov 22 22:10:24.315371 2017 | 1 | 2 | 1 | - 5 | Wed Nov 22 22:31:47.62577 2017 | 3 | 1 | 4 | - 5 | Wed Nov 22 23:10:42.777699 2017 | 3 | 4 | 5 | - 5 | Thu Nov 23 00:46:13.498577 2017 | 3 | 2 | 2 | - 5 | Thu Nov 23 00:54:44.192608 2017 | 1 | 3 | 2 | - 5 | Thu Nov 23 02:09:42.27857 2017 | 3 | 2 | 4 | - 5 | Thu Nov 23 02:50:32.678074 2017 | 4 | 2 | 4 | - 5 | Thu Nov 23 06:35:05.166535 2017 | 5 | 5 | 1 | + 5 | Wed Nov 22 20:43:18.667473 2017 | 0 | 3 | 2 | + 5 | Wed Nov 22 21:02:07.575129 2017 | 2 | 0 | 2 | + 5 | Wed Nov 22 22:10:24.315371 2017 | 1 | 2 | 1 | + 5 | Wed Nov 22 22:31:47.62577 2017 | 3 | 1 | 4 | + 5 | Wed Nov 22 23:10:42.777699 2017 | 3 | 4 | 5 | + 5 | Thu Nov 23 00:46:13.498577 2017 | 3 | 2 | 2 | + 5 | Thu Nov 23 00:54:44.192608 2017 | 1 | 3 | 2 | + 5 | Thu Nov 23 02:09:42.27857 2017 | 3 | 2 | 4 | + 5 | Thu Nov 23 02:50:32.678074 2017 | 4 | 2 | 4 | + 5 | Thu Nov 23 06:35:05.166535 2017 | 5 | 5 | 1 | (10 rows) EXECUTE prepared_test_5(6, 7, 8); - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 6 | Wed Nov 22 20:15:53.317797 2017 | 1 | 1 | 1 | - 6 | Wed Nov 22 23:01:24.82289 2017 | 2 | 4 | 1 | - 6 | Thu Nov 23 00:07:11.068353 2017 | 1 | 1 | 4 | - 6 | Thu Nov 23 00:09:44.19812 2017 | 5 | 2 | 0 | - 6 | Thu Nov 23 01:13:50.526322 2017 | 2 | 4 | 1 | - 6 | Thu Nov 23 01:14:55.769581 2017 | 0 | 0 | 5 | - 6 | Thu Nov 23 10:22:11.02918 2017 | 5 | 0 | 5 | - 6 | Thu Nov 23 11:08:04.244582 2017 | 2 | 3 | 2 | - 6 | Thu Nov 23 13:51:16.92838 2017 | 0 | 4 | 2 | - 6 | Thu Nov 23 14:43:18.024104 2017 | 3 | 2 | 5 | + 6 | Wed Nov 22 20:15:53.317797 2017 | 1 | 1 | 1 | + 6 | Wed Nov 22 23:01:24.82289 2017 | 2 | 4 | 1 | + 6 | Thu Nov 23 00:07:11.068353 2017 | 1 | 1 | 4 | + 6 | Thu Nov 23 00:09:44.19812 2017 | 5 | 2 | 0 | + 6 | Thu Nov 23 01:13:50.526322 2017 | 2 | 4 | 1 | + 6 | Thu Nov 23 01:14:55.769581 2017 | 0 | 0 | 5 | + 6 | Thu Nov 23 10:22:11.02918 2017 | 5 | 0 | 5 | + 6 | Thu Nov 23 11:08:04.244582 2017 | 2 | 3 | 2 | + 6 | Thu Nov 23 13:51:16.92838 2017 | 0 | 4 | 2 | + 6 | Thu Nov 23 14:43:18.024104 2017 | 3 | 2 | 5 | (10 rows) EXECUTE prepared_test_6; - count + count --------------------------------------------------------------------- 101 (1 row) EXECUTE prepared_test_6; - count + count --------------------------------------------------------------------- 101 (1 row) EXECUTE prepared_test_6; - count + count --------------------------------------------------------------------- 101 (1 row) EXECUTE prepared_test_6; - count + count --------------------------------------------------------------------- 101 (1 row) EXECUTE prepared_test_6; - count + count --------------------------------------------------------------------- 101 (1 row) EXECUTE prepared_test_6; - count + count --------------------------------------------------------------------- 101 (1 row) EXECUTE prepared_partition_column_insert(1); - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 1 | | | | | + 1 | | | | | (1 row) EXECUTE prepared_partition_column_insert(2); - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 2 | | | | | + 2 | | | | | (1 row) EXECUTE prepared_partition_column_insert(3); - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 3 | | | | | + 3 | | | | | (1 row) EXECUTE prepared_partition_column_insert(4); - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 4 | | | | | + 4 | | | | | (1 row) EXECUTE prepared_partition_column_insert(5); - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 5 | | | | | + 5 | | | | | (1 row) EXECUTE prepared_partition_column_insert(6); - user_id | time | value_1 | value_2 | value_3 | value_4 + user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- - 6 | | | | | + 6 | | | | | (1 row) DEALLOCATE ALL; diff --git a/src/test/regress/expected/with_set_operations.out b/src/test/regress/expected/with_set_operations.out index 39425e5da..fd6b106f9 100644 --- a/src/test/regress/expected/with_set_operations.out +++ b/src/test/regress/expected/with_set_operations.out @@ -3,7 +3,7 @@ -- =================================================================== SET client_min_messages TO DEBUG1; -- use ctes inside unions on the top level -WITH +WITH cte_1 AS (SELECT user_id FROM users_table), cte_2 AS (SELECT user_id FROM events_table) (SELECT * FROM cte_1) UNION (SELECT * FROM cte_2) @@ -11,7 +11,7 @@ ORDER BY 1 DESC; DEBUG: generating subplan 1_1 for CTE cte_1: SELECT user_id FROM public.users_table DEBUG: generating subplan 1_2 for CTE cte_2: SELECT user_id FROM public.events_table DEBUG: Plan 1 query after replacing subqueries and CTEs: SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('1_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_2.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('1_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_2 ORDER BY 1 DESC - user_id + user_id --------------------------------------------------------------------- 6 5 @@ -22,11 +22,11 @@ DEBUG: Plan 1 query after replacing subqueries and CTEs: SELECT cte_1.user_id F (6 rows) -- use ctes inside unions in a subquery -WITH +WITH cte_1 AS (SELECT user_id FROM users_table), cte_2 AS (SELECT user_id FROM events_table) -SELECT - count(*) +SELECT + count(*) FROM ( (SELECT * FROM cte_1) UNION (SELECT * FROM cte_2) ) as foo; @@ -34,13 +34,13 @@ DEBUG: generating subplan 4_1 for CTE cte_1: SELECT user_id FROM public.users_t DEBUG: generating subplan 4_2 for CTE cte_2: SELECT user_id FROM public.events_table DEBUG: generating subplan 4_3 for subquery SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('4_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_2.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('4_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_2 DEBUG: Plan 4 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('4_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo - count + count --------------------------------------------------------------------- 6 (1 row) -- cte with unions of other ctes -WITH +WITH cte_1 AS (SELECT user_id FROM users_table), cte_2 AS (SELECT user_id FROM events_table), cte_3 AS ((SELECT * FROM cte_1) UNION (SELECT * FROM cte_2)) @@ -49,7 +49,7 @@ DEBUG: generating subplan 8_1 for CTE cte_1: SELECT user_id FROM public.users_t DEBUG: generating subplan 8_2 for CTE cte_2: SELECT user_id FROM public.events_table DEBUG: generating subplan 8_3 for CTE cte_3: SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('8_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_2.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('8_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_2 DEBUG: Plan 8 query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('8_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_3 ORDER BY user_id DESC - user_id + user_id --------------------------------------------------------------------- 6 5 @@ -60,7 +60,7 @@ DEBUG: Plan 8 query after replacing subqueries and CTEs: SELECT user_id FROM (S (6 rows) -- cte with unions of distributed table -WITH +WITH cte_1 AS ((SELECT user_id FROM users_table) UNION (SELECT user_id FROM users_table)) SELECT * FROM cte_1 ORDER BY 1 DESC; DEBUG: generating subplan 12_1 for CTE cte_1: SELECT users_table.user_id FROM public.users_table UNION SELECT users_table.user_id FROM public.users_table @@ -68,7 +68,7 @@ DEBUG: generating subplan 13_1 for subquery SELECT user_id FROM public.users_ta DEBUG: generating subplan 13_2 for subquery SELECT user_id FROM public.users_table DEBUG: Plan 13 query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('13_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION SELECT intermediate_result.user_id FROM read_intermediate_result('13_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) DEBUG: Plan 12 query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('12_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 ORDER BY user_id DESC - user_id + user_id --------------------------------------------------------------------- 6 5 @@ -79,7 +79,7 @@ DEBUG: Plan 12 query after replacing subqueries and CTEs: SELECT user_id FROM ( (6 rows) -- cte with unions of tables is intersected with another query -WITH +WITH cte_1 AS ((SELECT user_id FROM users_table) UNION (SELECT user_id FROM users_table)) (SELECT * FROM cte_1) INTERSECT (SELECT user_id FROM users_table) ORDER BY 1 DESC; DEBUG: generating subplan 16_1 for CTE cte_1: SELECT users_table.user_id FROM public.users_table UNION SELECT users_table.user_id FROM public.users_table @@ -88,7 +88,7 @@ DEBUG: generating subplan 17_2 for subquery SELECT user_id FROM public.users_ta DEBUG: Plan 17 query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('17_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION SELECT intermediate_result.user_id FROM read_intermediate_result('17_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) DEBUG: generating subplan 16_2 for subquery SELECT user_id FROM public.users_table DEBUG: Plan 16 query after replacing subqueries and CTEs: SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('16_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 INTERSECT SELECT intermediate_result.user_id FROM read_intermediate_result('16_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) ORDER BY 1 DESC - user_id + user_id --------------------------------------------------------------------- 6 5 @@ -99,10 +99,10 @@ DEBUG: Plan 16 query after replacing subqueries and CTEs: SELECT cte_1.user_id (6 rows) -- cte with unions of tables is intersected with another query that involves except -WITH +WITH cte_1 AS ((SELECT user_id FROM users_table) UNION (SELECT user_id FROM users_table)) -(SELECT * FROM cte_1) - INTERSECT +(SELECT * FROM cte_1) + INTERSECT ((SELECT user_id FROM events_table WHERE user_id < 3) EXCEPT (SELECT user_id FROM users_table WHERE user_id > 4)) ORDER BY 1 DESC; DEBUG: generating subplan 21_1 for CTE cte_1: SELECT users_table.user_id FROM public.users_table UNION SELECT users_table.user_id FROM public.users_table DEBUG: generating subplan 22_1 for subquery SELECT user_id FROM public.users_table @@ -111,30 +111,30 @@ DEBUG: Plan 22 query after replacing subqueries and CTEs: SELECT intermediate_r DEBUG: generating subplan 21_2 for subquery SELECT user_id FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 3) DEBUG: generating subplan 21_3 for subquery SELECT user_id FROM public.users_table WHERE (user_id OPERATOR(pg_catalog.>) 4) DEBUG: Plan 21 query after replacing subqueries and CTEs: SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('21_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 INTERSECT (SELECT intermediate_result.user_id FROM read_intermediate_result('21_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) EXCEPT SELECT intermediate_result.user_id FROM read_intermediate_result('21_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) ORDER BY 1 DESC - user_id + user_id --------------------------------------------------------------------- 2 1 (2 rows) -- CTE inside a top level EXCEPT -(WITH cte_1 AS (SELECT user_id FROM events_table WHERE user_id < 3) SELECT * FROM cte_1) INTERSECT (SELECT user_id FROM users_table) ORDER BY 1; +(WITH cte_1 AS (SELECT user_id FROM events_table WHERE user_id < 3) SELECT * FROM cte_1) INTERSECT (SELECT user_id FROM users_table) ORDER BY 1; DEBUG: generating subplan 27_1 for CTE cte_1: SELECT user_id FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 3) DEBUG: generating subplan 27_2 for subquery SELECT user_id FROM public.users_table DEBUG: Plan 27 query after replacing subqueries and CTEs: SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('27_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 INTERSECT SELECT intermediate_result.user_id FROM read_intermediate_result('27_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) ORDER BY 1 - user_id + user_id --------------------------------------------------------------------- 1 2 (2 rows) -- INTERSECT inside a CTE, which is inside a subquery -SELECT - DISTINCT users_table.user_id -FROM - users_table, - (WITH cte_1 AS (SELECT user_id FROM events_table WHERE user_id < 3 INTERSECT - SELECT user_id FROM events_table WHERE user_id < 2) +SELECT + DISTINCT users_table.user_id +FROM + users_table, + (WITH cte_1 AS (SELECT user_id FROM events_table WHERE user_id < 3 INTERSECT + SELECT user_id FROM events_table WHERE user_id < 2) SELECT * FROM cte_1) as foo WHERE users_table.user_id = foo.user_id @@ -144,19 +144,19 @@ DEBUG: generating subplan 31_1 for subquery SELECT user_id FROM public.events_t DEBUG: generating subplan 31_2 for subquery SELECT user_id FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 2) DEBUG: Plan 31 query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('31_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) INTERSECT SELECT intermediate_result.user_id FROM read_intermediate_result('31_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) DEBUG: Plan 30 query after replacing subqueries and CTEs: SELECT DISTINCT users_table.user_id FROM public.users_table, (SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('30_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1) foo WHERE (users_table.user_id OPERATOR(pg_catalog.=) foo.user_id) ORDER BY users_table.user_id DESC - user_id + user_id --------------------------------------------------------------------- 1 (1 row) -- UNION is created via outputs of CTEs, which is inside a subquery -- and the subquery is joined with a distributed table -SELECT - count(*) -FROM +SELECT + count(*) +FROM events_table, ( - WITH + WITH cte_1 AS (SELECT user_id FROM users_table), cte_2 AS (SELECT user_id FROM events_table) (SELECT * FROM cte_1) UNION (SELECT * FROM cte_2) @@ -166,7 +166,7 @@ DEBUG: generating subplan 34_1 for CTE cte_1: SELECT user_id FROM public.users_ DEBUG: generating subplan 34_2 for CTE cte_2: SELECT user_id FROM public.events_table DEBUG: generating subplan 34_3 for subquery SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('34_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_2.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('34_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_2 DEBUG: Plan 34 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.events_table, (SELECT intermediate_result.user_id FROM read_intermediate_result('34_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) events_table.event_type) - count + count --------------------------------------------------------------------- 95 (1 row) @@ -179,7 +179,7 @@ ORDER BY 1 DESC; DEBUG: generating subplan 38_1 for subquery SELECT DISTINCT events_table.user_id FROM public.users_table, public.events_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) DEBUG: generating subplan 38_2 for subquery SELECT DISTINCT events_table.user_id FROM public.users_table, public.events_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) DEBUG: Plan 38 query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('38_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) INTERSECT SELECT intermediate_result.user_id FROM read_intermediate_result('38_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) ORDER BY 1 DESC - user_id + user_id --------------------------------------------------------------------- 6 5 @@ -199,7 +199,7 @@ DEBUG: generating subplan 41_1 for subquery SELECT DISTINCT events_table.user_i DEBUG: push down of limit count: 10 DEBUG: generating subplan 41_2 for subquery SELECT DISTINCT events_table.user_id FROM public.users_table, public.events_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) LIMIT 10 DEBUG: Plan 41 query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('41_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) INTERSECT SELECT intermediate_result.user_id FROM read_intermediate_result('41_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) ORDER BY 1 DESC - user_id + user_id --------------------------------------------------------------------- 6 5 @@ -210,17 +210,17 @@ DEBUG: Plan 41 query after replacing subqueries and CTEs: SELECT intermediate_r (6 rows) -- joins inside unions that are not safe to pushdown inside a subquery -SELECT - count(*) -FROM +SELECT + count(*) +FROM (SELECT DISTINCT value_2 FROM events_table) as events_table, - (WITH foo AS + (WITH foo AS ((SELECT DISTINCT events_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.user_id ) INTERSECT - (SELECT DISTINCT events_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.user_id LIMIT 10)) - SELECT * FROM foo) + (SELECT DISTINCT events_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.user_id LIMIT 10)) + SELECT * FROM foo) as foo -WHERE +WHERE foo.user_id = events_table.value_2; DEBUG: generating subplan 44_1 for subquery SELECT DISTINCT value_2 FROM public.events_table DEBUG: generating subplan 44_2 for CTE foo: SELECT DISTINCT events_table.user_id FROM public.users_table, public.events_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) INTERSECT (SELECT DISTINCT events_table.user_id FROM public.users_table, public.events_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) LIMIT 10) @@ -229,20 +229,20 @@ DEBUG: generating subplan 46_1 for subquery SELECT DISTINCT events_table.user_i DEBUG: generating subplan 46_2 for subquery SELECT DISTINCT events_table.user_id FROM public.users_table, public.events_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) DEBUG: Plan 46 query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('46_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) INTERSECT SELECT intermediate_result.user_id FROM read_intermediate_result('46_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) DEBUG: Plan 44 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.value_2 FROM read_intermediate_result('44_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) events_table, (SELECT foo_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('44_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo_1) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) events_table.value_2) - count + count --------------------------------------------------------------------- 5 (1 row) -- joins inside unions some safe to pushdown -SELECT - count(*) -FROM +SELECT + count(*) +FROM (WITH events_table AS (SELECT DISTINCT user_id FROM events_table) SELECT * FROM events_table) as events_table, ((SELECT DISTINCT events_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.user_id ) INTERSECT (SELECT DISTINCT events_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.user_id LIMIT 10)) as foo -WHERE +WHERE foo.user_id = events_table.user_id; DEBUG: generating subplan 49_1 for CTE events_table: SELECT DISTINCT user_id FROM public.events_table DEBUG: push down of limit count: 10 @@ -250,18 +250,18 @@ DEBUG: generating subplan 49_2 for subquery SELECT DISTINCT events_table.user_i DEBUG: generating subplan 49_3 for subquery SELECT DISTINCT events_table.user_id FROM public.users_table, public.events_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) DEBUG: generating subplan 49_4 for subquery SELECT intermediate_result.user_id FROM read_intermediate_result('49_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) INTERSECT SELECT intermediate_result.user_id FROM read_intermediate_result('49_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) DEBUG: Plan 49 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT events_table_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('49_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) events_table_1) events_table, (SELECT intermediate_result.user_id FROM read_intermediate_result('49_4'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo WHERE (foo.user_id OPERATOR(pg_catalog.=) events_table.user_id) - count + count --------------------------------------------------------------------- 6 (1 row) -- CTE inside unions -(WITH cte_1 AS (SELECT user_id FROM users_table) SELECT * FROM cte_1) UNION -(WITH cte_1 AS (SELECT user_id FROM users_table) SELECT * FROM cte_1) ORDER BY 1 DESC; +(WITH cte_1 AS (SELECT user_id FROM users_table) SELECT * FROM cte_1) UNION +(WITH cte_1 AS (SELECT user_id FROM users_table) SELECT * FROM cte_1) ORDER BY 1 DESC; DEBUG: generating subplan 54_1 for CTE cte_1: SELECT user_id FROM public.users_table DEBUG: generating subplan 54_2 for CTE cte_1: SELECT user_id FROM public.users_table DEBUG: Plan 54 query after replacing subqueries and CTEs: SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('54_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('54_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 ORDER BY 1 DESC - user_id + user_id --------------------------------------------------------------------- 6 5 @@ -276,7 +276,7 @@ SELECT count(*) FROM ( - (WITH cte_1 AS (SELECT user_id FROM users_table) SELECT * FROM cte_1) UNION + (WITH cte_1 AS (SELECT user_id FROM users_table) SELECT * FROM cte_1) UNION (WITH cte_1 AS (SELECT user_id FROM users_table) SELECT * FROM cte_1) ) as foo, users_table @@ -285,20 +285,20 @@ DEBUG: generating subplan 57_1 for CTE cte_1: SELECT user_id FROM public.users_ DEBUG: generating subplan 57_2 for CTE cte_1: SELECT user_id FROM public.users_table DEBUG: generating subplan 57_3 for subquery SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('57_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('57_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 DEBUG: Plan 57 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('57_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, public.users_table WHERE (users_table.value_2 OPERATOR(pg_catalog.=) foo.user_id) - count + count --------------------------------------------------------------------- 92 (1 row) -- CTEs with less alias than the input subquery -(WITH cte_1(x) AS (SELECT user_id, value_2 FROM users_table) SELECT * FROM cte_1) UNION +(WITH cte_1(x) AS (SELECT user_id, value_2 FROM users_table) SELECT * FROM cte_1) UNION (WITH cte_1(x) AS (SELECT user_id, value_2 FROM users_table) SELECT * FROM cte_1) ORDER BY 1 DESC, 2 DESC LIMIT 5; DEBUG: generating subplan 61_1 for CTE cte_1: SELECT user_id, value_2 FROM public.users_table DEBUG: generating subplan 61_2 for CTE cte_1: SELECT user_id, value_2 FROM public.users_table DEBUG: Plan 61 query after replacing subqueries and CTEs: SELECT cte_1.x, cte_1.value_2 FROM (SELECT intermediate_result.user_id AS x, intermediate_result.value_2 FROM read_intermediate_result('61_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_2 integer)) cte_1 UNION SELECT cte_1.x, cte_1.value_2 FROM (SELECT intermediate_result.user_id AS x, intermediate_result.value_2 FROM read_intermediate_result('61_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_2 integer)) cte_1 ORDER BY 1 DESC, 2 DESC LIMIT 5 - x | value_2 + x | value_2 --------------------------------------------------------------------- - 6 | + 6 | 6 | 4 6 | 3 6 | 2 @@ -306,7 +306,7 @@ DEBUG: Plan 61 query after replacing subqueries and CTEs: SELECT cte_1.x, cte_1 (5 rows) -- simple subqueries in WHERE with unions -SELECT +SELECT count(*) FROM users_table @@ -326,20 +326,20 @@ DEBUG: generating subplan 65_1 for subquery SELECT user_id FROM public.users_ta DEBUG: generating subplan 65_2 for subquery SELECT user_id FROM public.events_table DEBUG: Plan 65 query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('65_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION SELECT intermediate_result.user_id FROM read_intermediate_result('65_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) DEBUG: Plan 64 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.users_table WHERE (value_2 OPERATOR(pg_catalog.=) ANY (SELECT DISTINCT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('64_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1)) ORDER BY (count(*)) DESC - count + count --------------------------------------------------------------------- 92 (1 row) -- simple subqueries in WHERE with unions and ctes -SELECT +SELECT count(*) FROM users_table WHERE value_2 IN ( - WITH + WITH cte_1 AS (SELECT user_id FROM users_table), cte_2 AS (SELECT user_id FROM events_table) (SELECT * FROM cte_1) UNION (SELECT * FROM cte_2) @@ -349,24 +349,24 @@ DEBUG: generating subplan 68_1 for CTE cte_1: SELECT user_id FROM public.users_ DEBUG: generating subplan 68_2 for CTE cte_2: SELECT user_id FROM public.events_table DEBUG: generating subplan 68_3 for subquery SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('68_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_2.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('68_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_2 DEBUG: Plan 68 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.users_table WHERE (value_2 OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('68_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) ORDER BY (count(*)) DESC - count + count --------------------------------------------------------------------- 92 (1 row) -- unions and ctes inside subqueries in where clause with a pushdownable correlated subquery -SELECT - DISTINCT user_id -FROM - events_table -WHERE - event_type IN +SELECT + DISTINCT user_id +FROM + events_table +WHERE + event_type IN ( SELECT users_table.user_id FROM ( - (WITH cte_1 AS (SELECT user_id FROM users_table) SELECT * FROM cte_1) UNION + (WITH cte_1 AS (SELECT user_id FROM users_table) SELECT * FROM cte_1) UNION (WITH cte_1 AS (SELECT user_id FROM users_table) SELECT * FROM cte_1) ) as foo, users_table @@ -377,7 +377,7 @@ DEBUG: generating subplan 72_1 for CTE cte_1: SELECT user_id FROM public.users_ DEBUG: generating subplan 72_2 for CTE cte_1: SELECT user_id FROM public.users_table DEBUG: generating subplan 72_3 for subquery SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('72_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 UNION SELECT cte_1.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('72_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) cte_1 DEBUG: Plan 72 query after replacing subqueries and CTEs: SELECT DISTINCT user_id FROM public.events_table WHERE (event_type OPERATOR(pg_catalog.=) ANY (SELECT users_table.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('72_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, public.users_table WHERE ((users_table.value_2 OPERATOR(pg_catalog.=) foo.user_id) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)))) ORDER BY user_id DESC - user_id + user_id --------------------------------------------------------------------- 5 4 @@ -388,18 +388,18 @@ DEBUG: Plan 72 query after replacing subqueries and CTEs: SELECT DISTINCT user_ -- unions and ctes inside subqueries in where clause with a not pushdownable correlated subquery -- should error out -SELECT - DISTINCT user_id -FROM - events_table -WHERE - event_type IN +SELECT + DISTINCT user_id +FROM + events_table +WHERE + event_type IN ( SELECT users_table.user_id FROM ( - (WITH cte_1 AS (SELECT user_id FROM users_table) SELECT * FROM cte_1) UNION + (WITH cte_1 AS (SELECT user_id FROM users_table) SELECT * FROM cte_1) UNION (WITH cte_1 AS (SELECT user_id FROM users_table) SELECT * FROM cte_1) ) as foo, users_table diff --git a/src/test/regress/expected/with_transactions.out b/src/test/regress/expected/with_transactions.out index 91c2f3257..cf5dd7300 100644 --- a/src/test/regress/expected/with_transactions.out +++ b/src/test/regress/expected/with_transactions.out @@ -5,16 +5,16 @@ SET citus.shard_replication_factor TO 1; -- https://github.com/citusdata/citus/i SET citus.next_placement_id TO 800000; CREATE TABLE with_transactions.raw_table (tenant_id int, income float, created_at timestamptz); SELECT create_distributed_table('raw_table', 'tenant_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) CREATE TABLE with_transactions.second_raw_table (tenant_id int, income float, created_at timestamptz); SELECT create_distributed_table('second_raw_table', 'tenant_id'); - create_distributed_table + create_distributed_table --------------------------------------------------------------------- - + (1 row) INSERT INTO @@ -42,13 +42,13 @@ DEBUG: Plan 3 query after replacing subqueries and CTEs: UPDATE with_transactio ROLLBACK; -- see that both UPDATE and DELETE commands are rollbacked SELECT count(*) FROM raw_table; - count + count --------------------------------------------------------------------- 101 (1 row) SELECT max(income) FROM raw_table; - max + max --------------------------------------------------------------------- 1000 (1 row) @@ -56,7 +56,7 @@ SELECT max(income) FROM raw_table; -- multi-statement multi shard modifying statements should work BEGIN; SELECT count (*) FROM second_raw_table; - count + count --------------------------------------------------------------------- 101 (1 row) @@ -85,19 +85,19 @@ DEBUG: generating subplan 12_1 for CTE ids_inserted: INSERT INTO with_transacti DEBUG: Plan 12 query after replacing subqueries and CTEs: UPDATE with_transactions.raw_table SET created_at = 'Sat Feb 10 20:00:00 2001 PST'::timestamp with time zone WHERE (tenant_id OPERATOR(pg_catalog.=) ANY (SELECT ids_inserted.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('12_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id integer)) ids_inserted)) -- make sure that everything committed SELECT count(*) FROM raw_table; - count + count --------------------------------------------------------------------- 105 (1 row) SELECT count(*) FROM raw_table WHERE created_at = '2001-02-10 20:00:00'; - count + count --------------------------------------------------------------------- 4 (1 row) SELECT count(*) FROM second_raw_table; - count + count --------------------------------------------------------------------- 0 (1 row) @@ -113,7 +113,7 @@ SELECT income FROM second_raw_table WHERE tenant_id IN (SELECT * FROM ids_insert DEBUG: generating subplan 17_1 for CTE ids_inserted: INSERT INTO with_transactions.raw_table (tenant_id) VALUES (11), (12), (13), (14) RETURNING raw_table.tenant_id DEBUG: Plan 17 query after replacing subqueries and CTEs: SELECT income FROM with_transactions.second_raw_table WHERE (tenant_id OPERATOR(pg_catalog.=) ANY (SELECT ids_inserted.tenant_id FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('17_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id integer)) ids_inserted)) ORDER BY income DESC LIMIT 3 DEBUG: push down of limit count: 3 - income + income --------------------------------------------------------------------- (0 rows) @@ -135,7 +135,7 @@ BEGIN END; $BODY$; SELECT count(*) FROM (SELECT run_ctes(s) FROM generate_series(1,current_setting('max_connections')::int+2) s) a; - count + count --------------------------------------------------------------------- 102 (1 row) diff --git a/src/test/regress/expected/with_where.out b/src/test/regress/expected/with_where.out index 00f62f9a6..032b7444b 100644 --- a/src/test/regress/expected/with_where.out +++ b/src/test/regress/expected/with_where.out @@ -2,15 +2,15 @@ SET citus.enable_repartition_joins TO on; -- CTE in WHERE basic WITH events AS ( - SELECT - event_type - FROM - events_table + SELECT + event_type + FROM + events_table WHERE - user_id < 5 + user_id < 5 GROUP BY event_type - ORDER BY + ORDER BY event_type LIMIT 10 ) @@ -25,7 +25,7 @@ IN event_type FROM events); - count + count --------------------------------------------------------------------- 101 (1 row) @@ -37,12 +37,12 @@ WITH users AS ( events_table, users_table WHERE events_table.user_id = users_table.user_id - GROUP BY + GROUP BY 1 ORDER BY 1 LIMIT 10 -) +) SELECT count(*) FROM @@ -55,7 +55,7 @@ WHERE FROM users ); - count + count --------------------------------------------------------------------- 101 (1 row) @@ -67,12 +67,12 @@ WITH users AS ( events_table, users_table WHERE events_table.user_id = users_table.user_id - GROUP BY + GROUP BY 1 ORDER BY 1 LIMIT 10 -) +) SELECT count(*) FROM @@ -85,7 +85,7 @@ WHERE FROM users ); - count + count --------------------------------------------------------------------- 101 (1 row) @@ -98,12 +98,12 @@ WITH users AS ( events_table, users_table WHERE events_table.value_2 = users_table.value_2 - GROUP BY + GROUP BY 1 ORDER BY 1 LIMIT 10 -) +) SELECT count(*) FROM @@ -116,7 +116,7 @@ WHERE FROM users ); - count + count --------------------------------------------------------------------- 101 (1 row) @@ -134,18 +134,18 @@ WHERE event_type IN (WITH events AS ( - SELECT - event_type - FROM - events_table - WHERE user_id < 5 - GROUP BY - 1 - ORDER BY + SELECT + event_type + FROM + events_table + WHERE user_id < 5 + GROUP BY + 1 + ORDER BY 1) SELECT * FROM events LIMIT 10 ); - count + count --------------------------------------------------------------------- 101 (1 row) @@ -168,14 +168,14 @@ WHERE events_table, users_table WHERE events_table.value_2 = users_table.value_2 - GROUP BY + GROUP BY 1 ORDER BY 1 ) SELECT * FROM users LIMIT 10 ); - count + count --------------------------------------------------------------------- 101 (1 row) diff --git a/src/test/regress/expected/worker_binary_data_partition.out b/src/test/regress/expected/worker_binary_data_partition.out index 9c793079b..9e031cdae 100644 --- a/src/test/regress/expected/worker_binary_data_partition.out +++ b/src/test/regress/expected/worker_binary_data_partition.out @@ -21,7 +21,7 @@ SELECT usesysid AS userid FROM pg_user WHERE usename = current_user \gset CREATE TABLE :Table_Name(textcolumn text, binarycolumn bytea); COPY :Table_Name FROM stdin; SELECT length(binarycolumn) FROM :Table_Name; - length + length --------------------------------------------------------------------- 2 4 @@ -43,9 +43,9 @@ SELECT length(binarycolumn) FROM :Table_Name; SELECT worker_range_partition_table(:JobId, :TaskId, :Select_Query_Text, :Partition_Column_Text, :Partition_Column_Type, ARRAY['aaa', 'some']::_text); - worker_range_partition_table + worker_range_partition_table --------------------------------------------------------------------- - + (1 row) -- Copy range partitioned files into tables @@ -60,7 +60,7 @@ SELECT COUNT(*) AS total_row_count FROM ( SELECT * FROM :Table_Part_00 UNION ALL SELECT * FROM :Table_Part_01 UNION ALL SELECT * FROM :Table_Part_02 ) AS all_rows; - total_row_count + total_row_count --------------------------------------------------------------------- 14 (1 row) @@ -71,7 +71,7 @@ SELECT COUNT(*) AS diff_lhs_00 FROM ( :Select_All FROM :Table_Part_00 EXCEPT ALL :Select_All FROM :Table_Name WHERE :Partition_Column IS NULL OR :Partition_Column < 'aaa' ) diff; - diff_lhs_00 + diff_lhs_00 --------------------------------------------------------------------- 0 (1 row) @@ -80,7 +80,7 @@ SELECT COUNT(*) AS diff_lhs_01 FROM ( :Select_All FROM :Table_Part_01 EXCEPT ALL :Select_All FROM :Table_Name WHERE :Partition_Column >= 'aaa' AND :Partition_Column < 'some' ) diff; - diff_lhs_01 + diff_lhs_01 --------------------------------------------------------------------- 0 (1 row) @@ -88,7 +88,7 @@ SELECT COUNT(*) AS diff_lhs_01 FROM ( SELECT COUNT(*) AS diff_lhs_02 FROM ( :Select_All FROM :Table_Part_02 EXCEPT ALL :Select_All FROM :Table_Name WHERE :Partition_Column >= 'some' ) diff; - diff_lhs_02 + diff_lhs_02 --------------------------------------------------------------------- 0 (1 row) @@ -97,7 +97,7 @@ SELECT COUNT(*) AS diff_rhs_00 FROM ( :Select_All FROM :Table_Name WHERE :Partition_Column IS NULL OR :Partition_Column < 'aaa' EXCEPT ALL :Select_All FROM :Table_Part_00 ) diff; - diff_rhs_00 + diff_rhs_00 --------------------------------------------------------------------- 0 (1 row) @@ -106,7 +106,7 @@ SELECT COUNT(*) AS diff_rhs_01 FROM ( :Select_All FROM :Table_Name WHERE :Partition_Column >= 'aaa' AND :Partition_Column < 'some' EXCEPT ALL :Select_All FROM :Table_Part_01 ) diff; - diff_rhs_01 + diff_rhs_01 --------------------------------------------------------------------- 0 (1 row) @@ -114,7 +114,7 @@ SELECT COUNT(*) AS diff_rhs_01 FROM ( SELECT COUNT(*) AS diff_rhs_02 FROM ( :Select_All FROM :Table_Name WHERE :Partition_Column >= 'some' EXCEPT ALL :Select_All FROM :Table_Part_02 ) diff; - diff_rhs_02 + diff_rhs_02 --------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/worker_check_invalid_arguments.out b/src/test/regress/expected/worker_check_invalid_arguments.out index 4a5fa377c..3fa2d2335 100644 --- a/src/test/regress/expected/worker_check_invalid_arguments.out +++ b/src/test/regress/expected/worker_check_invalid_arguments.out @@ -20,7 +20,7 @@ SET citus.next_shard_id TO 1100000; CREATE TABLE :Table_Name(textcolumn text, binarycolumn bytea); COPY :Table_Name FROM stdin; SELECT COUNT(*) FROM :Table_Name; - count + count --------------------------------------------------------------------- 2 (1 row) @@ -50,9 +50,9 @@ ERROR: partition column types 25 and 20 do not match SELECT worker_range_partition_table(:JobId, :TaskId, :Select_Query_Text, :Partition_Column_Name, :Partition_Column_Type, ARRAY['aaa', 'some']::_text); - worker_range_partition_table + worker_range_partition_table --------------------------------------------------------------------- - + (1 row) -- Check that we fail to merge when the number of column names and column types @@ -90,9 +90,9 @@ ERROR: could not open directory "base/pgsql_job_cache/job_201010/task_429496729 SELECT worker_merge_files_into_table(:JobId, :TaskId, ARRAY['textcolumn', 'binarycolumn'], ARRAY['text', 'bytea']); - worker_merge_files_into_table + worker_merge_files_into_table --------------------------------------------------------------------- - + (1 row) -- worker_execute_sql_task should only accept queries diff --git a/src/test/regress/expected/worker_hash_partition.out b/src/test/regress/expected/worker_hash_partition.out index 172d96a7e..08c6d468f 100644 --- a/src/test/regress/expected/worker_hash_partition.out +++ b/src/test/regress/expected/worker_hash_partition.out @@ -26,9 +26,9 @@ SELECT usesysid AS userid FROM pg_user WHERE usename = current_user \gset SELECT worker_hash_partition_table(:JobId, :TaskId, :Select_Query_Text, :Partition_Column_Text, :Partition_Column_Type::regtype, ARRAY[-2147483648, -1073741824, 0, 1073741824]::int4[]); - worker_hash_partition_table + worker_hash_partition_table --------------------------------------------------------------------- - + (1 row) COPY :Table_Part_00 FROM :'Table_File_00'; @@ -36,25 +36,25 @@ COPY :Table_Part_01 FROM :'Table_File_01'; COPY :Table_Part_02 FROM :'Table_File_02'; COPY :Table_Part_03 FROM :'Table_File_03'; SELECT COUNT(*) FROM :Table_Part_00; - count + count --------------------------------------------------------------------- 2885 (1 row) SELECT COUNT(*) FROM :Table_Part_01; - count + count --------------------------------------------------------------------- 3009 (1 row) SELECT COUNT(*) FROM :Table_Part_02; - count + count --------------------------------------------------------------------- 3104 (1 row) SELECT COUNT(*) FROM :Table_Part_03; - count + count --------------------------------------------------------------------- 3002 (1 row) @@ -64,7 +64,7 @@ SELECT COUNT(*) FROM :Table_Part_03; SELECT COUNT(*) AS diff_lhs_00 FROM ( :Select_All FROM :Table_Part_00 EXCEPT ALL :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 0) ) diff; - diff_lhs_00 + diff_lhs_00 --------------------------------------------------------------------- 0 (1 row) @@ -72,7 +72,7 @@ SELECT COUNT(*) AS diff_lhs_00 FROM ( SELECT COUNT(*) AS diff_lhs_01 FROM ( :Select_All FROM :Table_Part_01 EXCEPT ALL :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 1) ) diff; - diff_lhs_01 + diff_lhs_01 --------------------------------------------------------------------- 0 (1 row) @@ -80,7 +80,7 @@ SELECT COUNT(*) AS diff_lhs_01 FROM ( SELECT COUNT(*) AS diff_lhs_02 FROM ( :Select_All FROM :Table_Part_02 EXCEPT ALL :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 2) ) diff; - diff_lhs_02 + diff_lhs_02 --------------------------------------------------------------------- 0 (1 row) @@ -88,7 +88,7 @@ SELECT COUNT(*) AS diff_lhs_02 FROM ( SELECT COUNT(*) AS diff_lhs_03 FROM ( :Select_All FROM :Table_Part_03 EXCEPT ALL :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 3) ) diff; - diff_lhs_03 + diff_lhs_03 --------------------------------------------------------------------- 0 (1 row) @@ -96,7 +96,7 @@ SELECT COUNT(*) AS diff_lhs_03 FROM ( SELECT COUNT(*) AS diff_rhs_00 FROM ( :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 0) EXCEPT ALL :Select_All FROM :Table_Part_00 ) diff; - diff_rhs_00 + diff_rhs_00 --------------------------------------------------------------------- 0 (1 row) @@ -104,7 +104,7 @@ SELECT COUNT(*) AS diff_rhs_00 FROM ( SELECT COUNT(*) AS diff_rhs_01 FROM ( :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 1) EXCEPT ALL :Select_All FROM :Table_Part_01 ) diff; - diff_rhs_01 + diff_rhs_01 --------------------------------------------------------------------- 0 (1 row) @@ -112,7 +112,7 @@ SELECT COUNT(*) AS diff_rhs_01 FROM ( SELECT COUNT(*) AS diff_rhs_02 FROM ( :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 2) EXCEPT ALL :Select_All FROM :Table_Part_02 ) diff; - diff_rhs_02 + diff_rhs_02 --------------------------------------------------------------------- 0 (1 row) @@ -120,7 +120,7 @@ SELECT COUNT(*) AS diff_rhs_02 FROM ( SELECT COUNT(*) AS diff_rhs_03 FROM ( :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 3) EXCEPT ALL :Select_All FROM :Table_Part_03 ) diff; - diff_rhs_03 + diff_rhs_03 --------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/worker_hash_partition_complex.out b/src/test/regress/expected/worker_hash_partition_complex.out index 12899e510..14c879bdf 100644 --- a/src/test/regress/expected/worker_hash_partition_complex.out +++ b/src/test/regress/expected/worker_hash_partition_complex.out @@ -30,9 +30,9 @@ SELECT worker_hash_partition_table(:JobId, :TaskId, ' AND l_discount between 0.02 AND 0.08', :Partition_Column_Text, :Partition_Column_Type, ARRAY[-2147483648, -1073741824, 0, 1073741824]::int4[]); - worker_hash_partition_table + worker_hash_partition_table --------------------------------------------------------------------- - + (1 row) -- Copy partitioned data files into tables for testing purposes @@ -41,13 +41,13 @@ COPY :Table_Part_01 FROM :'Table_File_01'; COPY :Table_Part_02 FROM :'Table_File_02'; COPY :Table_Part_03 FROM :'Table_File_03'; SELECT COUNT(*) FROM :Table_Part_00; - count + count --------------------------------------------------------------------- 1883 (1 row) SELECT COUNT(*) FROM :Table_Part_03; - count + count --------------------------------------------------------------------- 1913 (1 row) @@ -58,7 +58,7 @@ SELECT COUNT(*) AS diff_lhs_00 FROM ( :Select_Columns FROM :Table_Part_00 EXCEPT ALL :Select_Columns FROM lineitem WHERE :Select_Filters AND (:Hash_Mod_Function = 0) ) diff; - diff_lhs_00 + diff_lhs_00 --------------------------------------------------------------------- 0 (1 row) @@ -67,7 +67,7 @@ SELECT COUNT(*) AS diff_lhs_01 FROM ( :Select_Columns FROM :Table_Part_01 EXCEPT ALL :Select_Columns FROM lineitem WHERE :Select_Filters AND (:Hash_Mod_Function = 1) ) diff; - diff_lhs_01 + diff_lhs_01 --------------------------------------------------------------------- 0 (1 row) @@ -76,7 +76,7 @@ SELECT COUNT(*) AS diff_lhs_02 FROM ( :Select_Columns FROM :Table_Part_02 EXCEPT ALL :Select_Columns FROM lineitem WHERE :Select_Filters AND (:Hash_Mod_Function = 2) ) diff; - diff_lhs_02 + diff_lhs_02 --------------------------------------------------------------------- 0 (1 row) @@ -85,7 +85,7 @@ SELECT COUNT(*) AS diff_lhs_03 FROM ( :Select_Columns FROM :Table_Part_03 EXCEPT ALL :Select_Columns FROM lineitem WHERE :Select_Filters AND (:Hash_Mod_Function = 3) ) diff; - diff_lhs_03 + diff_lhs_03 --------------------------------------------------------------------- 0 (1 row) @@ -94,7 +94,7 @@ SELECT COUNT(*) AS diff_rhs_00 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND (:Hash_Mod_Function = 0) EXCEPT ALL :Select_Columns FROM :Table_Part_00 ) diff; - diff_rhs_00 + diff_rhs_00 --------------------------------------------------------------------- 0 (1 row) @@ -103,7 +103,7 @@ SELECT COUNT(*) AS diff_rhs_01 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND (:Hash_Mod_Function = 1) EXCEPT ALL :Select_Columns FROM :Table_Part_01 ) diff; - diff_rhs_01 + diff_rhs_01 --------------------------------------------------------------------- 0 (1 row) @@ -112,7 +112,7 @@ SELECT COUNT(*) AS diff_rhs_02 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND (:Hash_Mod_Function = 2) EXCEPT ALL :Select_Columns FROM :Table_Part_02 ) diff; - diff_rhs_02 + diff_rhs_02 --------------------------------------------------------------------- 0 (1 row) @@ -121,7 +121,7 @@ SELECT COUNT(*) AS diff_rhs_03 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND (:Hash_Mod_Function = 3) EXCEPT ALL :Select_Columns FROM :Table_Part_03 ) diff; - diff_rhs_03 + diff_rhs_03 --------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/worker_merge_hash_files.out b/src/test/regress/expected/worker_merge_hash_files.out index ee361819e..74d4a9016 100644 --- a/src/test/regress/expected/worker_merge_hash_files.out +++ b/src/test/regress/expected/worker_merge_hash_files.out @@ -15,35 +15,35 @@ SELECT worker_merge_files_into_table(:JobId, :TaskId, ARRAY['bigint', 'integer', 'integer', 'integer', 'decimal(15, 2)', 'decimal(15, 2)', 'decimal(15, 2)', 'decimal(15, 2)', 'char(1)', 'char(1)', 'date', 'date', 'date', 'char(25)', 'char(10)', 'varchar(44)']::_text); - worker_merge_files_into_table + worker_merge_files_into_table --------------------------------------------------------------------- - + (1 row) -- We first count elements from the merged table and the original table we hash -- partitioned. We then compute the difference of these two tables. SELECT COUNT(*) FROM :Task_Table_Name; - count + count --------------------------------------------------------------------- 12000 (1 row) SELECT COUNT(*) FROM lineitem; - count + count --------------------------------------------------------------------- 12000 (1 row) SELECT COUNT(*) AS diff_lhs FROM ( :Select_All FROM :Task_Table_Name EXCEPT ALL :Select_All FROM lineitem ) diff; - diff_lhs + diff_lhs --------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs FROM ( :Select_All FROM lineitem EXCEPT ALL :Select_All FROM :Task_Table_Name ) diff; - diff_rhs + diff_rhs --------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/worker_merge_range_files.out b/src/test/regress/expected/worker_merge_range_files.out index f7e46399f..54907fe4a 100644 --- a/src/test/regress/expected/worker_merge_range_files.out +++ b/src/test/regress/expected/worker_merge_range_files.out @@ -15,35 +15,35 @@ SELECT worker_merge_files_into_table(:JobId, :TaskId, ARRAY['bigint', 'integer', 'integer', 'integer', 'decimal(15, 2)', 'decimal(15, 2)', 'decimal(15, 2)', 'decimal(15, 2)', 'char(1)', 'char(1)', 'date', 'date', 'date', 'char(25)', 'char(10)', 'varchar(44)']::_text); - worker_merge_files_into_table + worker_merge_files_into_table --------------------------------------------------------------------- - + (1 row) -- We first count elements from the merged table and the original table we range -- partitioned. We then compute the difference of these two tables. SELECT COUNT(*) FROM :Task_Table_Name; - count + count --------------------------------------------------------------------- 12000 (1 row) SELECT COUNT(*) FROM lineitem; - count + count --------------------------------------------------------------------- 12000 (1 row) SELECT COUNT(*) AS diff_lhs FROM ( :Select_All FROM :Task_Table_Name EXCEPT ALL :Select_All FROM lineitem ) diff; - diff_lhs + diff_lhs --------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs FROM ( :Select_All FROM lineitem EXCEPT ALL :Select_All FROM :Task_Table_Name ) diff; - diff_rhs + diff_rhs --------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/worker_null_data_partition.out b/src/test/regress/expected/worker_null_data_partition.out index 3a42eaabc..0aa065320 100644 --- a/src/test/regress/expected/worker_null_data_partition.out +++ b/src/test/regress/expected/worker_null_data_partition.out @@ -23,9 +23,9 @@ SELECT usesysid AS userid FROM pg_user WHERE usename = current_user \gset SELECT worker_range_partition_table(:JobId, :Range_TaskId, :Select_Query_Text, :Partition_Column_Text, :Partition_Column_Type, ARRAY[0, 10]::_int4); - worker_range_partition_table + worker_range_partition_table --------------------------------------------------------------------- - + (1 row) -- Copy partitioned data files into tables for testing purposes @@ -33,13 +33,13 @@ COPY :Range_Table_Part_00 FROM :'Range_Table_File_00'; COPY :Range_Table_Part_01 FROM :'Range_Table_File_01'; COPY :Range_Table_Part_02 FROM :'Range_Table_File_02'; SELECT COUNT(*) FROM :Range_Table_Part_00; - count + count --------------------------------------------------------------------- 6 (1 row) SELECT COUNT(*) FROM :Range_Table_Part_02; - count + count --------------------------------------------------------------------- 588 (1 row) @@ -50,7 +50,7 @@ SELECT COUNT(*) AS diff_lhs_00 FROM ( :Select_All FROM :Range_Table_Part_00 EXCEPT ALL (:Select_All FROM supplier WHERE :Partition_Column < 0 OR :Partition_Column IS NULL) ) diff; - diff_lhs_00 + diff_lhs_00 --------------------------------------------------------------------- 0 (1 row) @@ -59,7 +59,7 @@ SELECT COUNT(*) AS diff_lhs_01 FROM ( :Select_All FROM :Range_Table_Part_01 EXCEPT ALL :Select_All FROM supplier WHERE :Partition_Column >= 0 AND :Partition_Column < 10 ) diff; - diff_lhs_01 + diff_lhs_01 --------------------------------------------------------------------- 0 (1 row) @@ -67,7 +67,7 @@ SELECT COUNT(*) AS diff_lhs_01 FROM ( SELECT COUNT(*) AS diff_rhs_02 FROM ( :Select_All FROM supplier WHERE :Partition_Column >= 10 EXCEPT ALL :Select_All FROM :Range_Table_Part_02 ) diff; - diff_rhs_02 + diff_rhs_02 --------------------------------------------------------------------- 0 (1 row) @@ -76,7 +76,7 @@ SELECT COUNT(*) AS diff_rhs_00 FROM ( (:Select_All FROM supplier WHERE :Partition_Column < 0 OR :Partition_Column IS NULL) EXCEPT ALL :Select_All FROM :Range_Table_Part_00 ) diff; - diff_rhs_00 + diff_rhs_00 --------------------------------------------------------------------- 0 (1 row) @@ -85,7 +85,7 @@ SELECT COUNT(*) AS diff_rhs_01 FROM ( :Select_All FROM supplier WHERE :Partition_Column >= 0 AND :Partition_Column < 10 EXCEPT ALL :Select_All FROM :Range_Table_Part_01 ) diff; - diff_rhs_01 + diff_rhs_01 --------------------------------------------------------------------- 0 (1 row) @@ -93,7 +93,7 @@ SELECT COUNT(*) AS diff_rhs_01 FROM ( SELECT COUNT(*) AS diff_rhs_02 FROM ( :Select_All FROM supplier WHERE :Partition_Column >= 10 EXCEPT ALL :Select_All FROM :Range_Table_Part_02 ) diff; - diff_rhs_02 + diff_rhs_02 --------------------------------------------------------------------- 0 (1 row) @@ -116,22 +116,22 @@ SELECT COUNT(*) AS diff_rhs_02 FROM ( SELECT worker_hash_partition_table(:JobId, :Hash_TaskId, :Select_Query_Text, :Partition_Column_Text, :Partition_Column_Type, ARRAY[-2147483648, -1073741824, 0, 1073741824]::int4[]); - worker_hash_partition_table + worker_hash_partition_table --------------------------------------------------------------------- - + (1 row) COPY :Hash_Table_Part_00 FROM :'Hash_Table_File_00'; COPY :Hash_Table_Part_01 FROM :'Hash_Table_File_01'; COPY :Hash_Table_Part_02 FROM :'Hash_Table_File_02'; SELECT COUNT(*) FROM :Hash_Table_Part_00; - count + count --------------------------------------------------------------------- 282 (1 row) SELECT COUNT(*) FROM :Hash_Table_Part_02; - count + count --------------------------------------------------------------------- 102 (1 row) @@ -142,7 +142,7 @@ SELECT COUNT(*) AS diff_lhs_00 FROM ( :Select_All FROM :Hash_Table_Part_00 EXCEPT ALL (:Select_All FROM supplier WHERE (:Hash_Mod_Function = 0) OR :Partition_Column IS NULL) ) diff; - diff_lhs_00 + diff_lhs_00 --------------------------------------------------------------------- 0 (1 row) @@ -150,7 +150,7 @@ SELECT COUNT(*) AS diff_lhs_00 FROM ( SELECT COUNT(*) AS diff_lhs_01 FROM ( :Select_All FROM :Hash_Table_Part_01 EXCEPT ALL :Select_All FROM supplier WHERE (:Hash_Mod_Function = 1) ) diff; - diff_lhs_01 + diff_lhs_01 --------------------------------------------------------------------- 0 (1 row) @@ -158,7 +158,7 @@ SELECT COUNT(*) AS diff_lhs_01 FROM ( SELECT COUNT(*) AS diff_lhs_02 FROM ( :Select_All FROM :Hash_Table_Part_02 EXCEPT ALL :Select_All FROM supplier WHERE (:Hash_Mod_Function = 2) ) diff; - diff_lhs_02 + diff_lhs_02 --------------------------------------------------------------------- 0 (1 row) @@ -167,7 +167,7 @@ SELECT COUNT(*) AS diff_rhs_00 FROM ( (:Select_All FROM supplier WHERE (:Hash_Mod_Function = 0) OR :Partition_Column IS NULL) EXCEPT ALL :Select_All FROM :Hash_Table_Part_00 ) diff; - diff_rhs_00 + diff_rhs_00 --------------------------------------------------------------------- 0 (1 row) @@ -175,7 +175,7 @@ SELECT COUNT(*) AS diff_rhs_00 FROM ( SELECT COUNT(*) AS diff_rhs_01 FROM ( :Select_All FROM supplier WHERE (:Hash_Mod_Function = 1) EXCEPT ALL :Select_All FROM :Hash_Table_Part_01 ) diff; - diff_rhs_01 + diff_rhs_01 --------------------------------------------------------------------- 0 (1 row) @@ -183,7 +183,7 @@ SELECT COUNT(*) AS diff_rhs_01 FROM ( SELECT COUNT(*) AS diff_rhs_02 FROM ( :Select_All FROM supplier WHERE (:Hash_Mod_Function = 2) EXCEPT ALL :Select_All FROM :Hash_Table_Part_02 ) diff; - diff_rhs_02 + diff_rhs_02 --------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/worker_range_partition.out b/src/test/regress/expected/worker_range_partition.out index 9acd12002..2e925471b 100644 --- a/src/test/regress/expected/worker_range_partition.out +++ b/src/test/regress/expected/worker_range_partition.out @@ -22,9 +22,9 @@ SELECT usesysid AS userid FROM pg_user WHERE usename = current_user \gset SELECT worker_range_partition_table(:JobId, :TaskId, :Select_Query_Text, :Partition_Column_Text, :Partition_Column_Type, ARRAY[1, 3000, 12000]::_int8); - worker_range_partition_table + worker_range_partition_table --------------------------------------------------------------------- - + (1 row) COPY :Table_Part_00 FROM :'Table_File_00'; @@ -32,13 +32,13 @@ COPY :Table_Part_01 FROM :'Table_File_01'; COPY :Table_Part_02 FROM :'Table_File_02'; COPY :Table_Part_03 FROM :'Table_File_03'; SELECT COUNT(*) FROM :Table_Part_00; - count + count --------------------------------------------------------------------- 0 (1 row) SELECT COUNT(*) FROM :Table_Part_03; - count + count --------------------------------------------------------------------- 3047 (1 row) @@ -48,7 +48,7 @@ SELECT COUNT(*) FROM :Table_Part_03; SELECT COUNT(*) AS diff_lhs_00 FROM ( :Select_All FROM :Table_Part_00 EXCEPT ALL :Select_All FROM lineitem WHERE :Partition_Column < 1 ) diff; - diff_lhs_00 + diff_lhs_00 --------------------------------------------------------------------- 0 (1 row) @@ -57,7 +57,7 @@ SELECT COUNT(*) AS diff_lhs_01 FROM ( :Select_All FROM :Table_Part_01 EXCEPT ALL :Select_All FROM lineitem WHERE :Partition_Column >= 1 AND :Partition_Column < 3000 ) diff; - diff_lhs_01 + diff_lhs_01 --------------------------------------------------------------------- 0 (1 row) @@ -66,7 +66,7 @@ SELECT COUNT(*) AS diff_lhs_02 FROM ( :Select_All FROM :Table_Part_02 EXCEPT ALL :Select_All FROM lineitem WHERE :Partition_Column >= 3000 AND :Partition_Column < 12000 ) diff; - diff_lhs_02 + diff_lhs_02 --------------------------------------------------------------------- 0 (1 row) @@ -74,7 +74,7 @@ SELECT COUNT(*) AS diff_lhs_02 FROM ( SELECT COUNT(*) AS diff_lhs_03 FROM ( :Select_All FROM :Table_Part_03 EXCEPT ALL :Select_All FROM lineitem WHERE :Partition_Column >= 12000 ) diff; - diff_lhs_03 + diff_lhs_03 --------------------------------------------------------------------- 0 (1 row) @@ -82,7 +82,7 @@ SELECT COUNT(*) AS diff_lhs_03 FROM ( SELECT COUNT(*) AS diff_rhs_00 FROM ( :Select_All FROM lineitem WHERE :Partition_Column < 1 EXCEPT ALL :Select_All FROM :Table_Part_00 ) diff; - diff_rhs_00 + diff_rhs_00 --------------------------------------------------------------------- 0 (1 row) @@ -91,7 +91,7 @@ SELECT COUNT(*) AS diff_rhs_01 FROM ( :Select_All FROM lineitem WHERE :Partition_Column >= 1 AND :Partition_Column < 3000 EXCEPT ALL :Select_All FROM :Table_Part_01 ) diff; - diff_rhs_01 + diff_rhs_01 --------------------------------------------------------------------- 0 (1 row) @@ -100,7 +100,7 @@ SELECT COUNT(*) AS diff_rhs_02 FROM ( :Select_All FROM lineitem WHERE :Partition_Column >= 3000 AND :Partition_Column < 12000 EXCEPT ALL :Select_All FROM :Table_Part_02 ) diff; - diff_rhs_02 + diff_rhs_02 --------------------------------------------------------------------- 0 (1 row) @@ -108,7 +108,7 @@ SELECT COUNT(*) AS diff_rhs_02 FROM ( SELECT COUNT(*) AS diff_rhs_03 FROM ( :Select_All FROM lineitem WHERE :Partition_Column >= 12000 EXCEPT ALL :Select_All FROM :Table_Part_03 ) diff; - diff_rhs_03 + diff_rhs_03 --------------------------------------------------------------------- 0 (1 row) diff --git a/src/test/regress/expected/worker_range_partition_complex.out b/src/test/regress/expected/worker_range_partition_complex.out index c39857389..63ec9b376 100644 --- a/src/test/regress/expected/worker_range_partition_complex.out +++ b/src/test/regress/expected/worker_range_partition_complex.out @@ -27,9 +27,9 @@ SELECT worker_range_partition_table(:JobId, :TaskId, ' AND l_discount between 0.02 AND 0.08', :Partition_Column_Text, :Partition_Column_Type, ARRAY[101, 12000, 18000]::_int4); - worker_range_partition_table + worker_range_partition_table --------------------------------------------------------------------- - + (1 row) -- Copy partitioned data files into tables for testing purposes @@ -38,13 +38,13 @@ COPY :Table_Part_01 FROM :'Table_File_01'; COPY :Table_Part_02 FROM :'Table_File_02'; COPY :Table_Part_03 FROM :'Table_File_03'; SELECT COUNT(*) FROM :Table_Part_00; - count + count --------------------------------------------------------------------- 3 (1 row) SELECT COUNT(*) FROM :Table_Part_03; - count + count --------------------------------------------------------------------- 7022 (1 row) @@ -55,7 +55,7 @@ SELECT COUNT(*) AS diff_lhs_00 FROM ( :Select_Columns FROM :Table_Part_00 EXCEPT ALL :Select_Columns FROM lineitem WHERE :Select_Filters AND :Partition_Column < 101 ) diff; - diff_lhs_00 + diff_lhs_00 --------------------------------------------------------------------- 0 (1 row) @@ -65,7 +65,7 @@ SELECT COUNT(*) AS diff_lhs_01 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND :Partition_Column >= 101 AND :Partition_Column < 12000 ) diff; - diff_lhs_01 + diff_lhs_01 --------------------------------------------------------------------- 0 (1 row) @@ -75,7 +75,7 @@ SELECT COUNT(*) AS diff_lhs_02 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND :Partition_Column >= 12000 AND :Partition_Column < 18000 ) diff; - diff_lhs_02 + diff_lhs_02 --------------------------------------------------------------------- 0 (1 row) @@ -84,7 +84,7 @@ SELECT COUNT(*) AS diff_lhs_03 FROM ( :Select_Columns FROM :Table_Part_03 EXCEPT ALL :Select_Columns FROM lineitem WHERE :Select_Filters AND :Partition_Column >= 18000 ) diff; - diff_lhs_03 + diff_lhs_03 --------------------------------------------------------------------- 0 (1 row) @@ -93,7 +93,7 @@ SELECT COUNT(*) AS diff_rhs_00 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND :Partition_Column < 101 EXCEPT ALL :Select_Columns FROM :Table_Part_00 ) diff; - diff_rhs_00 + diff_rhs_00 --------------------------------------------------------------------- 0 (1 row) @@ -103,7 +103,7 @@ SELECT COUNT(*) AS diff_rhs_01 FROM ( :Partition_Column >= 101 AND :Partition_Column < 12000 EXCEPT ALL :Select_Columns FROM :Table_Part_01 ) diff; - diff_rhs_01 + diff_rhs_01 --------------------------------------------------------------------- 0 (1 row) @@ -113,7 +113,7 @@ SELECT COUNT(*) AS diff_rhs_02 FROM ( :Partition_Column >= 12000 AND :Partition_Column < 18000 EXCEPT ALL :Select_Columns FROM :Table_Part_02 ) diff; - diff_rhs_02 + diff_rhs_02 --------------------------------------------------------------------- 0 (1 row) @@ -122,7 +122,7 @@ SELECT COUNT(*) AS diff_rhs_03 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND :Partition_Column >= 18000 EXCEPT ALL :Select_Columns FROM :Table_Part_03 ) diff; - diff_rhs_03 + diff_rhs_03 --------------------------------------------------------------------- 0 (1 row)