diff --git a/src/test/regress/citus_tests/run_test.py b/src/test/regress/citus_tests/run_test.py
index 04bf606f8..1ce2cfd9f 100755
--- a/src/test/regress/citus_tests/run_test.py
+++ b/src/test/regress/citus_tests/run_test.py
@@ -127,6 +127,9 @@ DEPS = {
"multi_mx_function_table_reference",
],
),
+ "alter_distributed_table": TestDeps(
+ "minimal_schedule", ["multi_behavioral_analytics_create_table"]
+ ),
"background_rebalance": TestDeps(
None,
[
@@ -144,6 +147,7 @@ DEPS = {
worker_count=6,
),
"function_propagation": TestDeps("minimal_schedule"),
+ "grant_on_foreign_server_propagation": TestDeps("minimal_schedule"),
"multi_mx_modifying_xacts": TestDeps(None, ["multi_mx_create_table"]),
"multi_mx_router_planner": TestDeps(None, ["multi_mx_create_table"]),
"multi_mx_copy_data": TestDeps(None, ["multi_mx_create_table"]),
diff --git a/src/test/regress/columnar_schedule b/src/test/regress/columnar_schedule
index 11e9494d2..602af0fc7 100644
--- a/src/test/regress/columnar_schedule
+++ b/src/test/regress/columnar_schedule
@@ -2,6 +2,7 @@ test: multi_test_helpers multi_test_helpers_superuser columnar_test_helpers
test: multi_cluster_management
test: multi_test_catalog_views
+test: remove_coordinator_from_metadata
test: columnar_create
test: columnar_load
test: columnar_query
diff --git a/src/test/regress/enterprise_schedule b/src/test/regress/enterprise_schedule
index 55791d43a..9a832c4d6 100644
--- a/src/test/regress/enterprise_schedule
+++ b/src/test/regress/enterprise_schedule
@@ -16,11 +16,11 @@ test: add_coordinator
test: citus_local_tables_ent
-test: remove_coordinator
# --------
test: publication
test: logical_replication
+test: check_cluster_state
test: multi_create_table
test: multi_create_table_superuser
test: multi_create_role_dependency
diff --git a/src/test/regress/expected/adaptive_executor_repartition.out b/src/test/regress/expected/adaptive_executor_repartition.out
index a84677a35..3ac9e6a13 100644
--- a/src/test/regress/expected/adaptive_executor_repartition.out
+++ b/src/test/regress/expected/adaptive_executor_repartition.out
@@ -168,10 +168,11 @@ select count(*) from trips t1, cars r1, trips t2, cars r2 where t1.trip_id = t2.
(1 row)
DROP SCHEMA adaptive_executor CASCADE;
-NOTICE: drop cascades to 6 other objects
+NOTICE: drop cascades to 7 other objects
DETAIL: drop cascades to table ab
drop cascades to table single_hash_repartition_first
drop cascades to table single_hash_repartition_second
drop cascades to table ref_table
+drop cascades to table ref_table_361397
drop cascades to table cars
drop cascades to table trips
diff --git a/src/test/regress/expected/add_coordinator.out b/src/test/regress/expected/add_coordinator.out
index 01f3a682d..499669385 100644
--- a/src/test/regress/expected/add_coordinator.out
+++ b/src/test/regress/expected/add_coordinator.out
@@ -2,6 +2,13 @@
-- ADD_COORDINATOR
--
-- node trying to add itself without specifying groupid => 0 should error out
+-- first remove the coordinator to for testing master_add_node for coordinator
+SELECT master_remove_node('localhost', :master_port);
+ master_remove_node
+---------------------------------------------------------------------
+
+(1 row)
+
SELECT master_add_node('localhost', :master_port);
ERROR: Node cannot add itself as a worker.
HINT: Add the node as a coordinator by using: SELECT citus_set_coordinator_host('localhost', 57636);
diff --git a/src/test/regress/expected/alter_distributed_table.out b/src/test/regress/expected/alter_distributed_table.out
index b8b86cd11..9d968dbb1 100644
--- a/src/test/regress/expected/alter_distributed_table.out
+++ b/src/test/regress/expected/alter_distributed_table.out
@@ -528,8 +528,8 @@ SELECT COUNT(DISTINCT colocationid) FROM pg_dist_partition WHERE logicalrelid::r
-- test references
CREATE TABLE referenced_dist_table (a INT UNIQUE);
CREATE TABLE referenced_ref_table (a INT UNIQUE);
-CREATE TABLE table_with_references (a1 INT UNIQUE REFERENCES referenced_dist_table(a), a2 INT REFERENCES referenced_ref_table(a));
-CREATE TABLE referencing_dist_table (a INT REFERENCES table_with_references(a1));
+CREATE TABLE table_with_references (a1 INT UNIQUE, a2 INT);
+CREATE TABLE referencing_dist_table (a INT);
SELECT create_distributed_table('referenced_dist_table', 'a', colocate_with:='none');
create_distributed_table
---------------------------------------------------------------------
@@ -554,6 +554,9 @@ SELECT create_distributed_table('referencing_dist_table', 'a', colocate_with:='r
(1 row)
+ALTER TABLE table_with_references ADD FOREIGN KEY (a1) REFERENCES referenced_dist_table(a);
+ALTER TABLE table_with_references ADD FOREIGN KEY (a2) REFERENCES referenced_ref_table(a);
+ALTER TABLE referencing_dist_table ADD FOREIGN KEY (a) REFERENCES table_with_references(a1);
SET client_min_messages TO WARNING;
SELECT conrelid::regclass::text AS "Referencing Table", pg_get_constraintdef(oid, true) AS "Definition" FROM pg_constraint
WHERE (conrelid::regclass::text = 'table_with_references' OR confrelid::regclass::text = 'table_with_references') AND contype = 'f' ORDER BY 1,2;
@@ -1255,3 +1258,4 @@ SELECT run_command_on_workers($$SELECT count(*) FROM pg_matviews WHERE matviewna
RESET search_path;
DROP SCHEMA alter_distributed_table CASCADE;
DROP SCHEMA schema_to_test_alter_dist_table CASCADE;
+DROP USER alter_dist_table_test_user;
diff --git a/src/test/regress/expected/alter_table_set_access_method.out b/src/test/regress/expected/alter_table_set_access_method.out
index 63d0990d4..8a6f335a7 100644
--- a/src/test/regress/expected/alter_table_set_access_method.out
+++ b/src/test/regress/expected/alter_table_set_access_method.out
@@ -802,9 +802,3 @@ select alter_table_set_access_method('view_test_view','columnar');
ERROR: you cannot alter access method of a view
SET client_min_messages TO WARNING;
DROP SCHEMA alter_table_set_access_method CASCADE;
-SELECT 1 FROM master_remove_node('localhost', :master_port);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
diff --git a/src/test/regress/expected/check_cluster_state.out b/src/test/regress/expected/check_cluster_state.out
new file mode 100644
index 000000000..c66119d52
--- /dev/null
+++ b/src/test/regress/expected/check_cluster_state.out
@@ -0,0 +1,6 @@
+SELECT count(*) >= 1 as coordinator_exists FROM pg_dist_node WHERE groupid = 0 AND isactive;
+ coordinator_exists
+---------------------------------------------------------------------
+ t
+(1 row)
+
diff --git a/src/test/regress/expected/citus_local_dist_joins.out b/src/test/regress/expected/citus_local_dist_joins.out
index 25833fc05..44101c925 100644
--- a/src/test/regress/expected/citus_local_dist_joins.out
+++ b/src/test/regress/expected/citus_local_dist_joins.out
@@ -1,7 +1,6 @@
CREATE SCHEMA citus_local_dist_joins;
SET search_path TO citus_local_dist_joins;
SET client_min_messages to ERROR;
-SELECT master_add_node('localhost', :master_port, groupId => 0) AS coordinator_nodeid \gset
CREATE TABLE citus_local(key int, value text);
SELECT citus_add_local_table_to_metadata('citus_local');
citus_add_local_table_to_metadata
@@ -523,11 +522,5 @@ ERROR: recursive complex joins are only supported when all distributed tables a
RESET citus.local_table_join_policy;
SET client_min_messages to ERROR;
DROP TABLE citus_local;
-SELECT master_remove_node('localhost', :master_port);
- master_remove_node
----------------------------------------------------------------------
-
-(1 row)
-
\set VERBOSITY terse
DROP SCHEMA citus_local_dist_joins CASCADE;
diff --git a/src/test/regress/expected/citus_table_triggers.out b/src/test/regress/expected/citus_table_triggers.out
index 80273121e..80954f70f 100644
--- a/src/test/regress/expected/citus_table_triggers.out
+++ b/src/test/regress/expected/citus_table_triggers.out
@@ -155,4 +155,4 @@ SELECT master_get_table_ddl_events('test_table');
-- cleanup at exit
DROP SCHEMA table_triggers_schema CASCADE;
-NOTICE: drop cascades to 8 other objects
+NOTICE: drop cascades to 9 other objects
diff --git a/src/test/regress/expected/citus_update_table_statistics.out b/src/test/regress/expected/citus_update_table_statistics.out
index 031104c53..d908e433d 100644
--- a/src/test/regress/expected/citus_update_table_statistics.out
+++ b/src/test/regress/expected/citus_update_table_statistics.out
@@ -64,6 +64,10 @@ SET citus.multi_shard_modify_mode TO sequential;
SELECT citus_update_table_statistics('test_table_statistics_hash');
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing SELECT 0::bigint, NULL::text, 0::bigint;
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SELECT 981000 AS shard_id, 'public.test_table_statistics_hash_981000' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981000') UNION ALL SELECT 981001 AS shard_id, 'public.test_table_statistics_hash_981001' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981001') UNION ALL SELECT 981002 AS shard_id, 'public.test_table_statistics_hash_981002' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981002') UNION ALL SELECT 981003 AS shard_id, 'public.test_table_statistics_hash_981003' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981003') UNION ALL SELECT 981004 AS shard_id, 'public.test_table_statistics_hash_981004' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981004') UNION ALL SELECT 981005 AS shard_id, 'public.test_table_statistics_hash_981005' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981005') UNION ALL SELECT 981006 AS shard_id, 'public.test_table_statistics_hash_981006' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981006') UNION ALL SELECT 981007 AS shard_id, 'public.test_table_statistics_hash_981007' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981007') UNION ALL SELECT 0::bigint, NULL::text, 0::bigint;
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
@@ -73,6 +77,8 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing COMMIT
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
citus_update_table_statistics
---------------------------------------------------------------------
@@ -152,6 +158,10 @@ SET citus.multi_shard_modify_mode TO sequential;
SELECT citus_update_table_statistics('test_table_statistics_append');
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing SELECT 0::bigint, NULL::text, 0::bigint;
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SELECT 981008 AS shard_id, 'public.test_table_statistics_append_981008' AS shard_name, pg_total_relation_size('public.test_table_statistics_append_981008') UNION ALL SELECT 981009 AS shard_id, 'public.test_table_statistics_append_981009' AS shard_name, pg_total_relation_size('public.test_table_statistics_append_981009') UNION ALL SELECT 0::bigint, NULL::text, 0::bigint;
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
@@ -161,6 +171,8 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing COMMIT
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
citus_update_table_statistics
---------------------------------------------------------------------
diff --git a/src/test/regress/expected/distributed_functions.out b/src/test/regress/expected/distributed_functions.out
index bc72af14c..668f97c3f 100644
--- a/src/test/regress/expected/distributed_functions.out
+++ b/src/test/regress/expected/distributed_functions.out
@@ -1118,7 +1118,7 @@ SET client_min_messages TO error; -- suppress cascading objects dropping
DROP SCHEMA function_tests CASCADE;
DROP SCHEMA function_tests2 CASCADE;
-- clear objects
-SELECT stop_metadata_sync_to_node(nodename,nodeport) FROM pg_dist_node WHERE isactive AND noderole = 'primary';
+SELECT stop_metadata_sync_to_node(nodename,nodeport) FROM pg_dist_node WHERE isactive AND noderole = 'primary' AND groupid <> 0;
stop_metadata_sync_to_node
---------------------------------------------------------------------
@@ -1144,7 +1144,7 @@ SELECT 1 FROM run_command_on_workers($$DROP USER functionuser$$);
(2 rows)
-- sync metadata again
-SELECT start_metadata_sync_to_node(nodename,nodeport) FROM pg_dist_node WHERE isactive AND noderole = 'primary';
+SELECT start_metadata_sync_to_node(nodename,nodeport) FROM pg_dist_node WHERE isactive AND noderole = 'primary' AND groupid <> 0;
start_metadata_sync_to_node
---------------------------------------------------------------------
diff --git a/src/test/regress/expected/dml_recursive.out b/src/test/regress/expected/dml_recursive.out
index b1d521ca2..cc4058def 100644
--- a/src/test/regress/expected/dml_recursive.out
+++ b/src/test/regress/expected/dml_recursive.out
@@ -357,9 +357,10 @@ DEBUG: generating subplan XXX_1 for subquery SELECT tenant_id FROM recursive_dm
DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE recursive_dml_queries.local_table SET id = 'citus_test'::text FROM (SELECT distributed_table_1.tenant_id, NULL::integer AS dept, NULL::jsonb AS info FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) distributed_table_1) distributed_table WHERE (distributed_table.tenant_id OPERATOR(pg_catalog.=) local_table.id)
RESET client_min_messages;
DROP SCHEMA recursive_dml_queries CASCADE;
-NOTICE: drop cascades to 5 other objects
+NOTICE: drop cascades to 6 other objects
DETAIL: drop cascades to table distributed_table
drop cascades to table second_distributed_table
drop cascades to table reference_table
+drop cascades to table reference_table_2370008
drop cascades to table local_table
drop cascades to view tenant_ids
diff --git a/src/test/regress/expected/fast_path_router_modify.out b/src/test/regress/expected/fast_path_router_modify.out
index d27b0fb65..6934e6434 100644
--- a/src/test/regress/expected/fast_path_router_modify.out
+++ b/src/test/regress/expected/fast_path_router_modify.out
@@ -489,8 +489,9 @@ RESET citus.enable_fast_path_router_planner;
RESET client_min_messages;
RESET citus.log_remote_commands;
DROP SCHEMA fast_path_router_modify CASCADE;
-NOTICE: drop cascades to 4 other objects
+NOTICE: drop cascades to 5 other objects
DETAIL: drop cascades to table modify_fast_path
drop cascades to table modify_fast_path_replication_2
drop cascades to table modify_fast_path_reference
+drop cascades to table modify_fast_path_reference_1840008
drop cascades to function modify_fast_path_plpsql(integer,integer)
diff --git a/src/test/regress/expected/foreign_key_restriction_enforcement.out b/src/test/regress/expected/foreign_key_restriction_enforcement.out
index 15facd198..f8c9339a7 100644
--- a/src/test/regress/expected/foreign_key_restriction_enforcement.out
+++ b/src/test/regress/expected/foreign_key_restriction_enforcement.out
@@ -616,12 +616,15 @@ BEGIN;
ALTER TABLE reference_table ALTER COLUMN id SET DATA TYPE smallint;
DEBUG: rewriting table "reference_table"
DEBUG: validating foreign key constraint "fkey"
+DEBUG: rewriting table "reference_table_2380001"
CREATE INDEX fkey_test_index_1 ON on_update_fkey_table(value_1);
ROLLBACK;
BEGIN;
ALTER TABLE transitive_reference_table ALTER COLUMN id SET DATA TYPE smallint;
DEBUG: rewriting table "transitive_reference_table"
DEBUG: validating foreign key constraint "fkey"
+DEBUG: rewriting table "transitive_reference_table_2380000"
+DEBUG: validating foreign key constraint "fkey_xxxxxxx"
CREATE INDEX fkey_test_index_1 ON on_update_fkey_table(value_1);
ROLLBACK;
-- case 4.6: DDL to reference table followed by a DDL to dist table, both touching fkey columns
@@ -629,6 +632,7 @@ BEGIN;
ALTER TABLE reference_table ALTER COLUMN id SET DATA TYPE smallint;
DEBUG: rewriting table "reference_table"
DEBUG: validating foreign key constraint "fkey"
+DEBUG: rewriting table "reference_table_2380001"
ALTER TABLE on_update_fkey_table ALTER COLUMN value_1 SET DATA TYPE smallint;
DEBUG: rewriting table "on_update_fkey_table"
DEBUG: validating foreign key constraint "fkey"
@@ -637,6 +641,8 @@ BEGIN;
ALTER TABLE transitive_reference_table ALTER COLUMN id SET DATA TYPE smallint;
DEBUG: rewriting table "transitive_reference_table"
DEBUG: validating foreign key constraint "fkey"
+DEBUG: rewriting table "transitive_reference_table_2380000"
+DEBUG: validating foreign key constraint "fkey_xxxxxxx"
ALTER TABLE on_update_fkey_table ALTER COLUMN value_1 SET DATA TYPE smallint;
DEBUG: rewriting table "on_update_fkey_table"
DEBUG: validating foreign key constraint "fkey"
@@ -672,12 +678,15 @@ BEGIN;
ALTER TABLE reference_table ALTER COLUMN id SET DATA TYPE smallint;
DEBUG: rewriting table "reference_table"
DEBUG: validating foreign key constraint "fkey"
+DEBUG: rewriting table "reference_table_2380001"
TRUNCATE on_update_fkey_table;
ROLLBACK;
BEGIN;
ALTER TABLE transitive_reference_table ALTER COLUMN id SET DATA TYPE smallint;
DEBUG: rewriting table "transitive_reference_table"
DEBUG: validating foreign key constraint "fkey"
+DEBUG: rewriting table "transitive_reference_table_2380000"
+DEBUG: validating foreign key constraint "fkey_xxxxxxx"
TRUNCATE on_update_fkey_table;
ROLLBACK;
---------------------------------------------------------------------
@@ -834,6 +843,7 @@ BEGIN;
TRUNCATE transitive_reference_table CASCADE;
NOTICE: truncate cascades to table "reference_table"
NOTICE: truncate cascades to table "on_update_fkey_table"
+NOTICE: truncate cascades to table "reference_table_xxxxx"
ROLLBACK;
-- case 4.7: SELECT to a dist table is followed by a DROP
-- DROP following SELECT is important as we error out after
@@ -1101,6 +1111,12 @@ ROLLBACK;
-- the fails since we're trying to switch sequential mode after
-- already executed a parallel query
BEGIN;
+ SELECT master_remove_node('localhost', :master_port);
+ master_remove_node
+---------------------------------------------------------------------
+
+(1 row)
+
CREATE TABLE test_table_1(id int PRIMARY KEY);
SELECT create_reference_table('test_table_1');
create_reference_table
@@ -1129,6 +1145,12 @@ ROLLBACK;
-- same test with the above, but this time using
-- sequential mode, succeeds
BEGIN;
+ SELECT master_remove_node('localhost', :master_port);
+ master_remove_node
+---------------------------------------------------------------------
+
+(1 row)
+
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
CREATE TABLE test_table_1(id int PRIMARY KEY);
SELECT create_reference_table('test_table_1');
@@ -1499,6 +1521,6 @@ ROLLBACK;
RESET client_min_messages;
\set VERBOSITY terse
DROP SCHEMA test_fkey_to_ref_in_tx CASCADE;
-NOTICE: drop cascades to 5 other objects
+NOTICE: drop cascades to 7 other objects
\set VERBOSITY default
SET search_path TO public;
diff --git a/src/test/regress/expected/function_propagation.out b/src/test/regress/expected/function_propagation.out
index 8a588a3cf..d49f4fd10 100644
--- a/src/test/regress/expected/function_propagation.out
+++ b/src/test/regress/expected/function_propagation.out
@@ -864,13 +864,6 @@ BEGIN;
(0 rows)
CREATE TABLE citus_local_table_to_test_func(l1 int DEFAULT func_in_transaction_for_local_table());
- SET LOCAL client_min_messages TO WARNING;
- SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
SELECT citus_add_local_table_to_metadata('citus_local_table_to_test_func');
citus_add_local_table_to_metadata
---------------------------------------------------------------------
diff --git a/src/test/regress/expected/grant_on_foreign_server_propagation.out b/src/test/regress/expected/grant_on_foreign_server_propagation.out
index b98130404..7c47a5524 100644
--- a/src/test/regress/expected/grant_on_foreign_server_propagation.out
+++ b/src/test/regress/expected/grant_on_foreign_server_propagation.out
@@ -5,6 +5,12 @@
CREATE SCHEMA "grant on server";
SET search_path TO "grant on server";
-- remove one of the worker nodes to test adding a new node later
+SELECT 1 FROM citus_remove_node('localhost', :master_port);
+ ?column?
+---------------------------------------------------------------------
+ 1
+(1 row)
+
SELECT 1 FROM citus_remove_node('localhost', :worker_2_port);
?column?
---------------------------------------------------------------------
@@ -173,9 +179,3 @@ SET client_min_messages TO ERROR;
DROP SERVER "Foreign Server" CASCADE;
DROP SCHEMA "grant on server" CASCADE;
DROP ROLE role_test_servers, role_test_servers_2, ownerrole;
-SELECT 1 FROM citus_remove_node('localhost', :master_port);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
diff --git a/src/test/regress/expected/insert_select_into_local_table.out b/src/test/regress/expected/insert_select_into_local_table.out
index f53348272..0e919b7cd 100644
--- a/src/test/regress/expected/insert_select_into_local_table.out
+++ b/src/test/regress/expected/insert_select_into_local_table.out
@@ -1112,4 +1112,4 @@ RETURNING *;
ROLLBACK;
\set VERBOSITY terse
DROP SCHEMA insert_select_into_local_table CASCADE;
-NOTICE: drop cascades to 12 other objects
+NOTICE: drop cascades to 13 other objects
diff --git a/src/test/regress/expected/insert_select_repartition.out b/src/test/regress/expected/insert_select_repartition.out
index b97a82b63..88acc49e3 100644
--- a/src/test/regress/expected/insert_select_repartition.out
+++ b/src/test/regress/expected/insert_select_repartition.out
@@ -1092,14 +1092,14 @@ EXPLAIN (costs off) INSERT INTO test(y, x) SELECT a.x, b.y FROM test a JOIN test
Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive)
- Task Count: 4
+ Task Count: 6
Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob
Map Task Count: 3
- Merge Task Count: 4
+ Merge Task Count: 6
-> MapMergeJob
Map Task Count: 3
- Merge Task Count: 4
+ Merge Task Count: 6
(11 rows)
SET client_min_messages TO DEBUG1;
@@ -1121,14 +1121,14 @@ EXPLAIN (costs off) INSERT INTO test SELECT a.* FROM test a JOIN test b USING (y
Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive)
- Task Count: 4
+ Task Count: 6
Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob
Map Task Count: 3
- Merge Task Count: 4
+ Merge Task Count: 6
-> MapMergeJob
Map Task Count: 3
- Merge Task Count: 4
+ Merge Task Count: 6
(11 rows)
SET client_min_messages TO DEBUG1;
diff --git a/src/test/regress/expected/insert_select_repartition_0.out b/src/test/regress/expected/insert_select_repartition_0.out
index 5bcb894cc..7217be3e9 100644
--- a/src/test/regress/expected/insert_select_repartition_0.out
+++ b/src/test/regress/expected/insert_select_repartition_0.out
@@ -1092,14 +1092,14 @@ EXPLAIN (costs off) INSERT INTO test(y, x) SELECT a.x, b.y FROM test a JOIN test
Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive)
- Task Count: 4
+ Task Count: 6
Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob
Map Task Count: 3
- Merge Task Count: 4
+ Merge Task Count: 6
-> MapMergeJob
Map Task Count: 3
- Merge Task Count: 4
+ Merge Task Count: 6
(11 rows)
SET client_min_messages TO DEBUG1;
@@ -1121,14 +1121,14 @@ EXPLAIN (costs off) INSERT INTO test SELECT a.* FROM test a JOIN test b USING (y
Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive)
- Task Count: 4
+ Task Count: 6
Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob
Map Task Count: 3
- Merge Task Count: 4
+ Merge Task Count: 6
-> MapMergeJob
Map Task Count: 3
- Merge Task Count: 4
+ Merge Task Count: 6
(11 rows)
SET client_min_messages TO DEBUG1;
diff --git a/src/test/regress/expected/insert_select_single_shard_table.out b/src/test/regress/expected/insert_select_single_shard_table.out
index 219e7d5d9..d27bdcd73 100644
--- a/src/test/regress/expected/insert_select_single_shard_table.out
+++ b/src/test/regress/expected/insert_select_single_shard_table.out
@@ -2,13 +2,6 @@ CREATE SCHEMA insert_select_single_shard_table;
SET search_path TO insert_select_single_shard_table;
SET citus.next_shard_id TO 1820000;
SET citus.shard_count TO 32;
-SET client_min_messages TO WARNING;
-SELECT 1 FROM citus_add_node('localhost', :master_port, groupid => 0);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
SET client_min_messages TO NOTICE;
CREATE TABLE nullkey_c1_t1(a int, b int);
CREATE TABLE nullkey_c1_t2(a int, b int);
@@ -823,9 +816,3 @@ DEBUG: Creating router plan
SET client_min_messages TO WARNING;
DROP SCHEMA insert_select_single_shard_table CASCADE;
-SELECT citus_remove_node('localhost', :master_port);
- citus_remove_node
----------------------------------------------------------------------
-
-(1 row)
-
diff --git a/src/test/regress/expected/intermediate_result_pruning.out b/src/test/regress/expected/intermediate_result_pruning.out
index e178765a8..5262ebc79 100644
--- a/src/test/regress/expected/intermediate_result_pruning.out
+++ b/src/test/regress/expected/intermediate_result_pruning.out
@@ -99,6 +99,7 @@ FROM
DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text]))
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.ref_table USING (key))
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
+DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
count
---------------------------------------------------------------------
@@ -386,6 +387,7 @@ DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx
+DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx
key | key | value
---------------------------------------------------------------------
diff --git a/src/test/regress/expected/intermediate_result_pruning_0.out b/src/test/regress/expected/intermediate_result_pruning_0.out
index ec4b489d0..ae1247545 100644
--- a/src/test/regress/expected/intermediate_result_pruning_0.out
+++ b/src/test/regress/expected/intermediate_result_pruning_0.out
@@ -99,6 +99,7 @@ FROM
DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text]))
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.ref_table USING (key))
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
+DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
count
---------------------------------------------------------------------
@@ -386,6 +387,7 @@ DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx
+DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx
key | key | value
---------------------------------------------------------------------
diff --git a/src/test/regress/expected/intermediate_results.out b/src/test/regress/expected/intermediate_results.out
index bf13ce21c..8b2e200f7 100644
--- a/src/test/regress/expected/intermediate_results.out
+++ b/src/test/regress/expected/intermediate_results.out
@@ -672,3 +672,5 @@ COMMIT;
SET client_min_messages TO ERROR;
DROP SCHEMA other_schema CASCADE;
DROP SCHEMA intermediate_results CASCADE;
+DROP OWNED BY some_other_user;
+DROP USER some_other_user;
diff --git a/src/test/regress/expected/join_pushdown.out b/src/test/regress/expected/join_pushdown.out
index c71478d30..02a16c195 100644
--- a/src/test/regress/expected/join_pushdown.out
+++ b/src/test/regress/expected/join_pushdown.out
@@ -463,10 +463,11 @@ SELECT * FROM abcd first join abcd second USING(b) join abcd third on first.b=th
END;
DROP SCHEMA join_schema CASCADE;
-NOTICE: drop cascades to 6 other objects
+NOTICE: drop cascades to 7 other objects
DETAIL: drop cascades to table abcd
drop cascades to table distributed_table
drop cascades to table reference_table
+drop cascades to table reference_table_9000004
drop cascades to table test_table_1
drop cascades to table test_table_2
drop cascades to view abcd_view
diff --git a/src/test/regress/expected/limit_intermediate_size.out b/src/test/regress/expected/limit_intermediate_size.out
index 662ce0e05..e6fd0e798 100644
--- a/src/test/regress/expected/limit_intermediate_size.out
+++ b/src/test/regress/expected/limit_intermediate_size.out
@@ -16,7 +16,7 @@ SELECT cte.user_id, cte.value_2 FROM cte,cte2 ORDER BY 1,2 LIMIT 10;
ERROR: the intermediate result size exceeds citus.max_intermediate_result_size (currently 2 kB)
DETAIL: Citus restricts the size of intermediate results of complex subqueries and CTEs to avoid accidentally pulling large result sets into once place.
HINT: To run the current query, set citus.max_intermediate_result_size to a higher value or -1 to disable.
-SET citus.max_intermediate_result_size TO 9;
+SET citus.max_intermediate_result_size TO 17;
WITH cte AS MATERIALIZED
(
SELECT
diff --git a/src/test/regress/expected/local_table_join.out b/src/test/regress/expected/local_table_join.out
index 96b570ac3..7da341207 100644
--- a/src/test/regress/expected/local_table_join.out
+++ b/src/test/regress/expected/local_table_join.out
@@ -86,7 +86,13 @@ CREATE FOREIGN TABLE foreign_table (
CREATE MATERIALIZED VIEW mv1 AS SELECT * FROM postgres_table;
CREATE MATERIALIZED VIEW mv2 AS SELECT * FROM distributed_table;
SET client_min_messages TO DEBUG1;
--- the user doesn't allow local / distributed table joinn
+-- the user doesn't allow local / distributed table join
+SELECT master_remove_node('localhost', :master_port); -- https://github.com/citusdata/citus/issues/6958
+ master_remove_node
+---------------------------------------------------------------------
+
+(1 row)
+
SET citus.local_table_join_policy TO 'never';
SELECT count(*) FROM postgres_table JOIN distributed_table USING(key);
ERROR: direct joins between distributed and local tables are not supported
@@ -94,6 +100,12 @@ HINT: Use CTE's or subqueries to select from local tables and use them in joins
SELECT count(*) FROM postgres_table JOIN reference_table USING(key);
ERROR: direct joins between distributed and local tables are not supported
HINT: Use CTE's or subqueries to select from local tables and use them in joins
+SELECT citus_set_coordinator_host('localhost'); -- https://github.com/citusdata/citus/issues/6958
+ citus_set_coordinator_host
+---------------------------------------------------------------------
+
+(1 row)
+
-- the user prefers local table recursively planned
SET citus.local_table_join_policy TO 'prefer-local';
SELECT count(*) FROM postgres_table JOIN distributed_table USING(key);
@@ -1586,6 +1598,12 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT 1 AS res FROM
(1 row)
ROLLBACK;
+SELECT master_remove_node('localhost', :master_port); -- https://github.com/citusdata/citus/issues/6958
+ master_remove_node
+---------------------------------------------------------------------
+
+(1 row)
+
BEGIN;
SELECT create_reference_table('table1');
NOTICE: Copying data from local table...
@@ -1632,7 +1650,13 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT 1 AS res FROM
(1 row)
ROLLBACK;
+SELECT citus_set_coordinator_host('localhost'); -- https://github.com/citusdata/citus/issues/6958
+ citus_set_coordinator_host
+---------------------------------------------------------------------
+
+(1 row)
+
RESET client_min_messages;
\set VERBOSITY terse
DROP SCHEMA local_table_join CASCADE;
-NOTICE: drop cascades to 22 other objects
+NOTICE: drop cascades to 23 other objects
diff --git a/src/test/regress/expected/logical_replication.out b/src/test/regress/expected/logical_replication.out
index 79108dd11..8a3e96da9 100644
--- a/src/test/regress/expected/logical_replication.out
+++ b/src/test/regress/expected/logical_replication.out
@@ -14,13 +14,6 @@ SELECT create_distributed_table('dist', 'id');
(1 row)
INSERT INTO dist SELECT generate_series(1, 100);
-SELECT 1 from citus_add_node('localhost', :master_port, groupId := 0);
-NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
-- Create a publiction and subscription (including replication slot) manually.
-- This allows us to test the cleanup logic at the start of the shard move.
\c - - - :worker_1_port
@@ -97,12 +90,6 @@ select citus_move_shard_placement(6830002, 'localhost', :worker_1_port, 'localho
(1 row)
-SELECT citus_remove_node('localhost', :master_port);
- citus_remove_node
----------------------------------------------------------------------
-
-(1 row)
-
-- the subscription is still there, as there is no cleanup record for it
-- we have created it manually
SELECT count(*) from pg_subscription;
diff --git a/src/test/regress/expected/merge.out b/src/test/regress/expected/merge.out
index 3cf776ded..85d6daab6 100644
--- a/src/test/regress/expected/merge.out
+++ b/src/test/regress/expected/merge.out
@@ -20,13 +20,6 @@ SET citus.next_shard_id TO 4000000;
SET citus.explain_all_tasks TO true;
SET citus.shard_replication_factor TO 1;
SET citus.max_adaptive_executor_pool_size TO 1;
-SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0);
-NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
CREATE TABLE source
(
order_id INT,
@@ -3477,9 +3470,3 @@ drop cascades to table dist_colocated
drop cascades to table dist_target
drop cascades to table dist_source
drop cascades to view show_tables
-SELECT 1 FROM master_remove_node('localhost', :master_port);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
diff --git a/src/test/regress/expected/multi_alter_table_add_constraints_without_name.out b/src/test/regress/expected/multi_alter_table_add_constraints_without_name.out
index 54224c924..0b048946c 100644
--- a/src/test/regress/expected/multi_alter_table_add_constraints_without_name.out
+++ b/src/test/regress/expected/multi_alter_table_add_constraints_without_name.out
@@ -862,14 +862,6 @@ DROP TABLE AT_AddConstNoName.dist_partitioned_table;
-- Test with Citus Local Tables
-- Test "ADD PRIMARY KEY"
\c - - :master_host :master_port
-SET client_min_messages to ERROR;
-SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
-RESET client_min_messages;
CREATE TABLE AT_AddConstNoName.citus_local_table(id int, other_column int);
SELECT citus_add_local_table_to_metadata('AT_AddConstNoName.citus_local_table');
citus_add_local_table_to_metadata
@@ -1175,12 +1167,6 @@ SELECT con.conname
(0 rows)
\c - - :master_host :master_port
-SELECT 1 FROM master_remove_node('localhost', :master_port);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
-- Test with unusual table and column names
CREATE TABLE AT_AddConstNoName."2nd table" ( "2nd id" INTEGER, "3rd id" INTEGER);
SELECT create_distributed_table('AT_AddConstNoName."2nd table"','2nd id');
@@ -1315,7 +1301,7 @@ NOTICE: drop cascades to 7 other objects
DETAIL: drop cascades to table at_addconstnoname.tbl
drop cascades to table at_addconstnoname.products_ref_2
drop cascades to table at_addconstnoname.products_ref_3
-drop cascades to table at_addconstnoname.verylonglonglonglonglonglonglonglonglonglonglonglonglonglonglon
drop cascades to table at_addconstnoname.products_ref_3_5410009
+drop cascades to table at_addconstnoname.verylonglonglonglonglonglonglonglonglonglonglonglonglonglonglon
drop cascades to table at_addconstnoname.citus_local_partitioned_table
drop cascades to table at_addconstnoname."2nd table"
diff --git a/src/test/regress/expected/multi_alter_table_add_foreign_key_without_name.out b/src/test/regress/expected/multi_alter_table_add_foreign_key_without_name.out
index c27e6a425..58571cc34 100644
--- a/src/test/regress/expected/multi_alter_table_add_foreign_key_without_name.out
+++ b/src/test/regress/expected/multi_alter_table_add_foreign_key_without_name.out
@@ -120,7 +120,7 @@ ERROR: cannot create foreign key constraint since relations are not colocated o
DETAIL: A distributed table can only have foreign keys if it is referencing another colocated hash distributed table or a reference table
DROP TABLE referencing_table;
DROP TABLE referenced_table;
--- test foreign constraint creation is not supported when one of the tables is not a citus table
+-- test foreign constraint creation is supported when coordinator is in metadata
CREATE TABLE referenced_local_table(id int PRIMARY KEY, other_column int);
CREATE TABLE reference_table(id int, referencing_column int);
SELECT create_reference_table('reference_table');
@@ -130,11 +130,12 @@ SELECT create_reference_table('reference_table');
(1 row)
ALTER TABLE reference_table ADD FOREIGN KEY (referencing_column) REFERENCES referenced_local_table(id);
-ERROR: referenced table "referenced_local_table" must be a distributed table or a reference table
-DETAIL: To enforce foreign keys, the referencing and referenced rows need to be stored on the same node.
-HINT: You could use SELECT create_reference_table('referenced_local_table') to replicate the referenced table to all nodes or consider dropping the foreign key
DROP TABLE referenced_local_table;
-DROP TABLE reference_table;
+ERROR: cannot drop table referenced_local_table because other objects depend on it
+DETAIL: constraint reference_table_referencing_column_fkey on table reference_table depends on table referenced_local_table
+HINT: Use DROP ... CASCADE to drop the dependent objects too.
+DROP TABLE reference_table CASCADE;
+NOTICE: removing table at_add_fk.referenced_local_table from metadata as it is not connected to any reference tables via foreign keys
-- test foreign constraint with correct conditions
CREATE TABLE referenced_table(id int PRIMARY KEY, test_column int);
CREATE TABLE referencing_table(id int, ref_id int);
@@ -170,8 +171,8 @@ SELECT con.conname
conname
---------------------------------------------------------------------
referencing_table_ref_id_fkey
- referencing_table_ref_id_fkey_1770033
- referencing_table_ref_id_fkey_1770035
+ referencing_table_ref_id_fkey_1770034
+ referencing_table_ref_id_fkey_1770036
(3 rows)
\c - - :master_host :master_port
@@ -198,8 +199,8 @@ SELECT con.conname
conname
---------------------------------------------------------------------
referencing_table_ref_id_fkey
- referencing_table_ref_id_fkey_1770033
- referencing_table_ref_id_fkey_1770035
+ referencing_table_ref_id_fkey_1770034
+ referencing_table_ref_id_fkey_1770036
(3 rows)
\c - - :master_host :master_port
@@ -244,8 +245,8 @@ SELECT con.conname, con.confupdtype, con.confdeltype, con.confmatchtype
conname | confupdtype | confdeltype | confmatchtype
---------------------------------------------------------------------
referencing_table_ref_id_fkey | a | c | s
- referencing_table_ref_id_fkey_1770041 | a | c | s
- referencing_table_ref_id_fkey_1770043 | a | c | s
+ referencing_table_ref_id_fkey_1770042 | a | c | s
+ referencing_table_ref_id_fkey_1770044 | a | c | s
(3 rows)
\c - - :master_host :master_port
@@ -272,8 +273,8 @@ SELECT con.conname, con.convalidated
conname | convalidated
---------------------------------------------------------------------
referencing_table_ref_id_fkey | f
- referencing_table_ref_id_fkey_1770041 | f
- referencing_table_ref_id_fkey_1770043 | f
+ referencing_table_ref_id_fkey_1770042 | f
+ referencing_table_ref_id_fkey_1770044 | f
(3 rows)
\c - - :master_host :master_port
@@ -300,8 +301,8 @@ SELECT con.conname, con.confupdtype, con.confdeltype, con.confmatchtype
conname | confupdtype | confdeltype | confmatchtype
---------------------------------------------------------------------
referencing_table_ref_id_fkey | a | a | s
- referencing_table_ref_id_fkey_1770041 | a | a | s
- referencing_table_ref_id_fkey_1770043 | a | a | s
+ referencing_table_ref_id_fkey_1770042 | a | a | s
+ referencing_table_ref_id_fkey_1770044 | a | a | s
(3 rows)
\c - - :master_host :master_port
@@ -328,8 +329,8 @@ SELECT con.conname, con.confupdtype, con.confdeltype, con.confmatchtype
conname | confupdtype | confdeltype | confmatchtype
---------------------------------------------------------------------
referencing_table_ref_id_fkey | a | r | s
- referencing_table_ref_id_fkey_1770041 | a | r | s
- referencing_table_ref_id_fkey_1770043 | a | r | s
+ referencing_table_ref_id_fkey_1770042 | a | r | s
+ referencing_table_ref_id_fkey_1770044 | a | r | s
(3 rows)
\c - - :master_host :master_port
@@ -356,8 +357,8 @@ SELECT con.conname, con.confupdtype, con.confdeltype, con.confmatchtype
conname | confupdtype | confdeltype | confmatchtype
---------------------------------------------------------------------
referencing_table_ref_id_id_fkey | a | a | s
- referencing_table_ref_id_id_fkey_1770041 | a | a | s
- referencing_table_ref_id_id_fkey_1770043 | a | a | s
+ referencing_table_ref_id_id_fkey_1770042 | a | a | s
+ referencing_table_ref_id_id_fkey_1770044 | a | a | s
(3 rows)
\c - - :master_host :master_port
@@ -384,8 +385,8 @@ SELECT con.conname, con.confupdtype, con.confdeltype, con.confmatchtype
conname | confupdtype | confdeltype | confmatchtype
---------------------------------------------------------------------
referencing_table_ref_id_id_fkey | r | a | s
- referencing_table_ref_id_id_fkey_1770041 | r | a | s
- referencing_table_ref_id_id_fkey_1770043 | r | a | s
+ referencing_table_ref_id_id_fkey_1770042 | r | a | s
+ referencing_table_ref_id_id_fkey_1770044 | r | a | s
(3 rows)
\c - - :master_host :master_port
@@ -412,8 +413,8 @@ SELECT con.conname, con.confupdtype, con.confdeltype, con.confmatchtype
conname | confupdtype | confdeltype | confmatchtype
---------------------------------------------------------------------
referencing_table_ref_id_id_fkey | a | a | s
- referencing_table_ref_id_id_fkey_1770041 | a | a | s
- referencing_table_ref_id_id_fkey_1770043 | a | a | s
+ referencing_table_ref_id_id_fkey_1770042 | a | a | s
+ referencing_table_ref_id_id_fkey_1770044 | a | a | s
(3 rows)
\c - - :master_host :master_port
@@ -440,8 +441,8 @@ SELECT con.conname, con.confupdtype, con.confdeltype, con.confmatchtype
conname | confupdtype | confdeltype | confmatchtype
---------------------------------------------------------------------
referencing_table_ref_id_id_fkey | a | a | f
- referencing_table_ref_id_id_fkey_1770041 | a | a | f
- referencing_table_ref_id_id_fkey_1770043 | a | a | f
+ referencing_table_ref_id_id_fkey_1770042 | a | a | f
+ referencing_table_ref_id_id_fkey_1770044 | a | a | f
(3 rows)
\c - - :master_host :master_port
@@ -524,13 +525,6 @@ BEGIN;
DROP TABLE dist_table CASCADE;
DROP TABLE reference_table CASCADE;
-- test ADD FOREIGN KEY from citus local to reference table
-SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0);
-NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
CREATE TABLE citus_local_table(l1 int);
SELECT citus_add_local_table_to_metadata('citus_local_table');
citus_add_local_table_to_metadata
@@ -557,17 +551,12 @@ ALTER TABLE citus_local_table ADD FOREIGN KEY(l1) REFERENCES reference_table(r1)
ALTER TABLE citus_local_table ADD FOREIGN KEY(l1) REFERENCES reference_table(r1) ON DELETE NO ACTION;
ALTER TABLE citus_local_table ADD FOREIGN KEY(l1) REFERENCES reference_table(r1) ON DELETE RESTRICT;
DROP TABLE citus_local_table CASCADE;
-SELECT 1 FROM master_remove_node('localhost', :master_port);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
RESET SEARCH_PATH;
RESET client_min_messages;
DROP SCHEMA at_add_fk CASCADE;
-NOTICE: drop cascades to 4 other objects
-DETAIL: drop cascades to table at_add_fk.referenced_table
+NOTICE: drop cascades to 5 other objects
+DETAIL: drop cascades to table at_add_fk.referenced_local_table
+drop cascades to table at_add_fk.referenced_table
drop cascades to table at_add_fk.referencing_table
drop cascades to table at_add_fk.reference_table
-drop cascades to table at_add_fk.reference_table_1770051
+drop cascades to table at_add_fk.reference_table_1770052
diff --git a/src/test/regress/expected/multi_citus_tools.out b/src/test/regress/expected/multi_citus_tools.out
index 792839d87..eef7a98ca 100644
--- a/src/test/regress/expected/multi_citus_tools.out
+++ b/src/test/regress/expected/multi_citus_tools.out
@@ -547,7 +547,8 @@ WHERE isactive = 't' AND noderole='primary';
---------------------------------------------------------------------
t
t
-(2 rows)
+ t
+(3 rows)
CREATE TABLE distributed(id int, data text);
SELECT create_distributed_table('distributed', 'id');
@@ -632,11 +633,16 @@ SELECT citus_check_connection_to_node('localhost', :worker_2_port);
SELECT * FROM citus_check_cluster_node_health() ORDER BY 1,2,3,4;
from_nodename | from_nodeport | to_nodename | to_nodeport | result
---------------------------------------------------------------------
+ localhost | 57636 | localhost | 57636 | t
+ localhost | 57636 | localhost | 57637 | t
+ localhost | 57636 | localhost | 57638 | t
+ localhost | 57637 | localhost | 57636 | t
localhost | 57637 | localhost | 57637 | t
localhost | 57637 | localhost | 57638 | t
+ localhost | 57638 | localhost | 57636 | t
localhost | 57638 | localhost | 57637 | t
localhost | 57638 | localhost | 57638 | t
-(4 rows)
+(9 rows)
-- test cluster connectivity when we have broken nodes
SET client_min_messages TO ERROR;
@@ -648,23 +654,32 @@ INSERT INTO pg_dist_node VALUES
SELECT * FROM citus_check_cluster_node_health() ORDER BY 5,1,2,3,4;
from_nodename | from_nodeport | to_nodename | to_nodeport | result
---------------------------------------------------------------------
+ localhost | 57636 | localhost | 123456789 | f
+ localhost | 57636 | www.citusdata.com | 5432 | f
localhost | 57637 | localhost | 123456789 | f
localhost | 57637 | www.citusdata.com | 5432 | f
localhost | 57638 | localhost | 123456789 | f
localhost | 57638 | www.citusdata.com | 5432 | f
+ localhost | 57636 | localhost | 57636 | t
+ localhost | 57636 | localhost | 57637 | t
+ localhost | 57636 | localhost | 57638 | t
+ localhost | 57637 | localhost | 57636 | t
localhost | 57637 | localhost | 57637 | t
localhost | 57637 | localhost | 57638 | t
+ localhost | 57638 | localhost | 57636 | t
localhost | 57638 | localhost | 57637 | t
localhost | 57638 | localhost | 57638 | t
+ localhost | 123456789 | localhost | 57636 |
localhost | 123456789 | localhost | 57637 |
localhost | 123456789 | localhost | 57638 |
localhost | 123456789 | localhost | 123456789 |
localhost | 123456789 | www.citusdata.com | 5432 |
+ www.citusdata.com | 5432 | localhost | 57636 |
www.citusdata.com | 5432 | localhost | 57637 |
www.citusdata.com | 5432 | localhost | 57638 |
www.citusdata.com | 5432 | localhost | 123456789 |
www.citusdata.com | 5432 | www.citusdata.com | 5432 |
-(16 rows)
+(25 rows)
ROLLBACK;
RESET citus.node_connection_timeout;
diff --git a/src/test/regress/expected/multi_cluster_management.out b/src/test/regress/expected/multi_cluster_management.out
index 7d5d25d57..e58b02937 100644
--- a/src/test/regress/expected/multi_cluster_management.out
+++ b/src/test/regress/expected/multi_cluster_management.out
@@ -681,6 +681,12 @@ SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node;
(3 rows)
+SELECT citus_set_coordinator_host('localhost');
+ citus_set_coordinator_host
+---------------------------------------------------------------------
+
+(1 row)
+
SELECT 1 FROM master_add_node('localhost', :worker_1_port);
?column?
---------------------------------------------------------------------
@@ -791,13 +797,13 @@ SELECT 1 FROM master_add_inactive_node('localhost', 9996, groupid => :worker_2_g
SELECT master_add_inactive_node('localhost', 9999, groupid => :worker_2_group, nodecluster => 'olap', noderole => 'secondary');
master_add_inactive_node
---------------------------------------------------------------------
- 22
+ 23
(1 row)
SELECT master_activate_node('localhost', 9999);
master_activate_node
---------------------------------------------------------------------
- 22
+ 23
(1 row)
SELECT citus_disable_node('localhost', 9999);
@@ -831,17 +837,17 @@ CONTEXT: PL/pgSQL function citus_internal.pg_dist_node_trigger_func() line XX a
INSERT INTO pg_dist_node (nodename, nodeport, groupid, noderole, nodecluster)
VALUES ('localhost', 5000, 1000, 'primary', 'olap');
ERROR: new row for relation "pg_dist_node" violates check constraint "primaries_are_only_allowed_in_the_default_cluster"
-DETAIL: Failing row contains (24, 1000, localhost, 5000, default, f, t, primary, olap, f, t).
+DETAIL: Failing row contains (25, 1000, localhost, 5000, default, f, t, primary, olap, f, t).
UPDATE pg_dist_node SET nodecluster = 'olap'
WHERE nodeport = :worker_1_port;
ERROR: new row for relation "pg_dist_node" violates check constraint "primaries_are_only_allowed_in_the_default_cluster"
-DETAIL: Failing row contains (16, 14, localhost, 57637, default, f, t, primary, olap, f, t).
+DETAIL: Failing row contains (17, 14, localhost, 57637, default, f, t, primary, olap, f, t).
-- check that you /can/ add a secondary node to a non-default cluster
SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset
SELECT master_add_node('localhost', 8888, groupid => :worker_1_group, noderole => 'secondary', nodecluster=> 'olap');
master_add_node
---------------------------------------------------------------------
- 25
+ 26
(1 row)
-- check that super-long cluster names are truncated
@@ -854,13 +860,13 @@ SELECT master_add_node('localhost', 8887, groupid => :worker_1_group, noderole =
);
master_add_node
---------------------------------------------------------------------
- 26
+ 27
(1 row)
SELECT * FROM pg_dist_node WHERE nodeport=8887;
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards
---------------------------------------------------------------------
- 26 | 14 | localhost | 8887 | default | f | t | secondary | thisisasixtyfourcharacterstringrepeatedfourtimestomake256chars. | f | t
+ 27 | 14 | localhost | 8887 | default | f | t | secondary | thisisasixtyfourcharacterstringrepeatedfourtimestomake256chars. | f | t
(1 row)
-- don't remove the secondary and unavailable nodes, check that no commands are sent to
@@ -869,13 +875,13 @@ SELECT * FROM pg_dist_node WHERE nodeport=8887;
SELECT master_add_secondary_node('localhost', 9995, 'localhost', :worker_1_port);
master_add_secondary_node
---------------------------------------------------------------------
- 27
+ 28
(1 row)
SELECT master_add_secondary_node('localhost', 9994, primaryname => 'localhost', primaryport => :worker_2_port);
master_add_secondary_node
---------------------------------------------------------------------
- 28
+ 29
(1 row)
SELECT master_add_secondary_node('localhost', 9993, 'localhost', 2000);
@@ -883,7 +889,7 @@ ERROR: node at "localhost:xxxxx" does not exist
SELECT master_add_secondary_node('localhost', 9992, 'localhost', :worker_1_port, nodecluster => 'second-cluster');
master_add_secondary_node
---------------------------------------------------------------------
- 29
+ 30
(1 row)
SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=9992 \gset
@@ -941,7 +947,7 @@ SELECT master_update_node(:worker_1_node, 'somehost', 9000);
SELECT * FROM pg_dist_node WHERE nodeid = :worker_1_node;
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards
---------------------------------------------------------------------
- 16 | 14 | somehost | 9000 | default | f | t | primary | default | f | t
+ 17 | 14 | somehost | 9000 | default | f | t | primary | default | f | t
(1 row)
-- cleanup
@@ -954,7 +960,7 @@ SELECT master_update_node(:worker_1_node, 'localhost', :worker_1_port);
SELECT * FROM pg_dist_node WHERE nodeid = :worker_1_node;
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards
---------------------------------------------------------------------
- 16 | 14 | localhost | 57637 | default | f | t | primary | default | f | t
+ 17 | 14 | localhost | 57637 | default | f | t | primary | default | f | t
(1 row)
SET client_min_messages TO ERROR;
@@ -963,7 +969,8 @@ SELECT start_metadata_sync_to_node(nodename, nodeport) FROM pg_dist_node WHERE i
---------------------------------------------------------------------
-(2 rows)
+
+(3 rows)
RESET client_min_messages;
SET citus.shard_replication_factor TO 1;
@@ -1044,9 +1051,10 @@ FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid)
WHERE logicalrelid = 'test_ref'::regclass GROUP BY nodeport ORDER BY nodeport;
nodeport | count
---------------------------------------------------------------------
+ 57636 | 1
57637 | 1
57638 | 1
-(2 rows)
+(3 rows)
-- cleanup for next test
DROP TABLE test_dist, test_ref, test_dist_colocated, test_dist_non_colocated, test_dist_colocated_with_non_colocated;
@@ -1088,9 +1096,10 @@ FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid)
WHERE logicalrelid = 'test_ref'::regclass GROUP BY nodeport ORDER BY nodeport;
nodeport | count
---------------------------------------------------------------------
+ 57636 | 1
57637 | 1
57638 | 1
-(2 rows)
+(3 rows)
SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', true);
master_set_node_property
@@ -1114,9 +1123,10 @@ FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid)
WHERE logicalrelid = 'test_ref'::regclass GROUP BY nodeport ORDER BY nodeport;
nodeport | count
---------------------------------------------------------------------
+ 57636 | 1
57637 | 1
57638 | 1
-(2 rows)
+(3 rows)
SELECT create_distributed_table('test_dist_colocated', 'x');
create_distributed_table
diff --git a/src/test/regress/expected/multi_colocation_utils.out b/src/test/regress/expected/multi_colocation_utils.out
index 7415983a2..9ef2896ff 100644
--- a/src/test/regress/expected/multi_colocation_utils.out
+++ b/src/test/regress/expected/multi_colocation_utils.out
@@ -825,11 +825,13 @@ ORDER BY
table1_group_default | 1300070 | t | 57638 | -715827883 | 715827881
table1_group_default | 1300071 | t | 57637 | 715827882 | 2147483647
table1_group_default | 1300071 | t | 57638 | 715827882 | 2147483647
+ table1_groupf | 1300080 | t | 57636 | |
table1_groupf | 1300080 | t | 57637 | |
table1_groupf | 1300080 | t | 57638 | |
+ table2_groupf | 1300081 | t | 57636 | |
table2_groupf | 1300081 | t | 57637 | |
table2_groupf | 1300081 | t | 57638 | |
-(92 rows)
+(94 rows)
-- reset colocation ids to test update_distributed_table_colocation
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1;
diff --git a/src/test/regress/expected/multi_copy.out b/src/test/regress/expected/multi_copy.out
index 2b302965c..abd58eb1d 100644
--- a/src/test/regress/expected/multi_copy.out
+++ b/src/test/regress/expected/multi_copy.out
@@ -756,9 +756,10 @@ SELECT shardid, shardstate, nodename, nodeport
WHERE logicalrelid = 'numbers_reference'::regclass order by placementid;
shardid | shardstate | nodename | nodeport
---------------------------------------------------------------------
+ 560165 | 1 | localhost | 57636
560165 | 1 | localhost | 57637
560165 | 1 | localhost | 57638
-(2 rows)
+(3 rows)
-- try to insert into numbers_hash_other. copy should fail and rollback
-- since it can not insert into either copies of a shard. shards are expected to
diff --git a/src/test/regress/expected/multi_create_table_superuser.out b/src/test/regress/expected/multi_create_table_superuser.out
index e15b6359e..1f756d688 100644
--- a/src/test/regress/expected/multi_create_table_superuser.out
+++ b/src/test/regress/expected/multi_create_table_superuser.out
@@ -642,13 +642,15 @@ DROP TABLE tt1;
DROP TABLE tt2;
DROP TABLE alter_replica_table;
DROP SCHEMA sc CASCADE;
-NOTICE: drop cascades to 2 other objects
+NOTICE: drop cascades to 3 other objects
DETAIL: drop cascades to table sc.ref
+drop cascades to table sc.ref_360102
drop cascades to table sc.hash
DROP SCHEMA sc2 CASCADE;
-NOTICE: drop cascades to 2 other objects
+NOTICE: drop cascades to 3 other objects
DETAIL: drop cascades to table sc2.hash
drop cascades to table sc2.ref
+drop cascades to table sc2.ref_360111
DROP SCHEMA sc3 CASCADE;
NOTICE: drop cascades to table sc3.alter_replica_table
DROP SCHEMA sc4 CASCADE;
diff --git a/src/test/regress/expected/multi_drop_extension.out b/src/test/regress/expected/multi_drop_extension.out
index 112fa7f70..909ad2f87 100644
--- a/src/test/regress/expected/multi_drop_extension.out
+++ b/src/test/regress/expected/multi_drop_extension.out
@@ -21,12 +21,6 @@ BEGIN;
SET client_min_messages TO ERROR;
SET search_path TO public;
CREATE EXTENSION citus;
- SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
create table l1 (a int unique);
SELECT create_reference_table('l1');
create_reference_table
@@ -136,6 +130,12 @@ ROLLBACK TO SAVEPOINT s3;
ROLLBACK;
CREATE EXTENSION citus;
-- re-add the nodes to the cluster
+SELECT citus_set_coordinator_host('localhost');
+ citus_set_coordinator_host
+---------------------------------------------------------------------
+
+(1 row)
+
SELECT 1 FROM master_add_node('localhost', :worker_1_port);
?column?
---------------------------------------------------------------------
diff --git a/src/test/regress/expected/multi_explain.out b/src/test/regress/expected/multi_explain.out
index 7b533a642..f9fc5a164 100644
--- a/src/test/regress/expected/multi_explain.out
+++ b/src/test/regress/expected/multi_explain.out
@@ -331,16 +331,16 @@ BEGIN;
SET LOCAL citus.enable_repartition_joins TO true;
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off) SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b;
Aggregate (actual rows=1 loops=1)
- -> Custom Scan (Citus Adaptive) (actual rows=4 loops=1)
- Task Count: 4
- Tuple data received from nodes: 32 bytes
+ -> Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
+ Task Count: 6
+ Tuple data received from nodes: 48 bytes
Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob
Map Task Count: 3
- Merge Task Count: 4
+ Merge Task Count: 6
-> MapMergeJob
Map Task Count: 3
- Merge Task Count: 4
+ Merge Task Count: 6
-- Confirm repartiton join in distributed subplan works
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off)
WITH repartition AS (SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b)
@@ -350,16 +350,16 @@ Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Intermediate Data Size: 14 bytes
Result destination: Write locally
-> Aggregate (actual rows=1 loops=1)
- -> Custom Scan (Citus Adaptive) (actual rows=4 loops=1)
- Task Count: 4
- Tuple data received from nodes: 32 bytes
+ -> Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
+ Task Count: 6
+ Tuple data received from nodes: 48 bytes
Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob
Map Task Count: 3
- Merge Task Count: 4
+ Merge Task Count: 6
-> MapMergeJob
Map Task Count: 3
- Merge Task Count: 4
+ Merge Task Count: 6
Task Count: 1
Tuple data received from nodes: 8 bytes
Tasks Shown: All
@@ -1108,20 +1108,20 @@ EXPLAIN (COSTS FALSE)
AND l_suppkey = s_suppkey;
Aggregate
-> Custom Scan (Citus Adaptive)
- Task Count: 4
+ Task Count: 6
Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob
- Map Task Count: 4
- Merge Task Count: 4
+ Map Task Count: 6
+ Merge Task Count: 6
-> MapMergeJob
Map Task Count: 2
- Merge Task Count: 4
+ Merge Task Count: 6
-> MapMergeJob
Map Task Count: 1
- Merge Task Count: 4
+ Merge Task Count: 6
-> MapMergeJob
Map Task Count: 1
- Merge Task Count: 4
+ Merge Task Count: 6
EXPLAIN (COSTS FALSE, FORMAT JSON)
SELECT count(*)
FROM lineitem, orders, customer_append, supplier_single_shard
@@ -1142,26 +1142,26 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Parallel Aware": false,
"Distributed Query": {
"Job": {
- "Task Count": 4,
+ "Task Count": 6,
"Tasks Shown": "None, not supported for re-partition queries",
"Dependent Jobs": [
{
- "Map Task Count": 4,
- "Merge Task Count": 4,
+ "Map Task Count": 6,
+ "Merge Task Count": 6,
"Dependent Jobs": [
{
"Map Task Count": 2,
- "Merge Task Count": 4
+ "Merge Task Count": 6
},
{
"Map Task Count": 1,
- "Merge Task Count": 4
+ "Merge Task Count": 6
}
]
},
{
"Map Task Count": 1,
- "Merge Task Count": 4
+ "Merge Task Count": 6
}
]
}
@@ -1198,26 +1198,26 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
false
- 4
+ 6
None, not supported for re-partition queries
- 4
- 4
+ 6
+ 6
2
- 4
+ 6
1
- 4
+ 6
1
- 4
+ 6
@@ -1264,13 +1264,13 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
Parallel Aware: false
Distributed Query:
Job:
- Task Count: 4
+ Task Count: 6
Tasks Shown: "None, not supported for re-partition queries"
Dependent Jobs:
- Map Task Count: 2
- Merge Task Count: 4
+ Merge Task Count: 6
- Map Task Count: 1
- Merge Task Count: 4
+ Merge Task Count: 6
-- ensure local plans display correctly
CREATE TABLE lineitem_clone (LIKE lineitem);
EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem_clone;
@@ -2317,7 +2317,7 @@ SELECT count(distinct a) from r NATURAL JOIN ref_table;
Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
-> Distributed Subplan XXX_1
Intermediate Data Size: 220 bytes
- Result destination: Send to 2 nodes
+ Result destination: Send to 3 nodes
-> Custom Scan (Citus Adaptive) (actual rows=10 loops=1)
Task Count: 4
Tuple data received from nodes: 120 bytes
@@ -3146,8 +3146,6 @@ Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
-- check when auto explain + analyze is enabled, we do not allow local execution.
CREATE SCHEMA test_auto_explain;
SET search_path TO 'test_auto_explain';
-SELECT citus_set_coordinator_host('localhost');
-
CREATE TABLE test_ref_table (key int PRIMARY KEY);
SELECT create_reference_table('test_ref_table');
@@ -3157,9 +3155,5 @@ set auto_explain.log_analyze to true;
-- the following should not be locally executed since explain analyze is on
select * from test_ref_table;
DROP SCHEMA test_auto_explain CASCADE;
-select master_remove_node('localhost', :master_port);
-
-SELECT public.wait_until_metadata_sync(30000);
-
SET client_min_messages TO ERROR;
DROP SCHEMA multi_explain CASCADE;
diff --git a/src/test/regress/expected/multi_fix_partition_shard_index_names.out b/src/test/regress/expected/multi_fix_partition_shard_index_names.out
index 5f7526982..e6795317c 100644
--- a/src/test/regress/expected/multi_fix_partition_shard_index_names.out
+++ b/src/test/regress/expected/multi_fix_partition_shard_index_names.out
@@ -495,14 +495,6 @@ SET search_path TO fix_idx_names, public;
DROP TABLE dist_partitioned_table;
SET citus.next_shard_id TO 910040;
-- test with citus local table
-SET client_min_messages TO WARNING;
-SELECT 1 FROM citus_add_node('localhost', :master_port, groupid=>0);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
-RESET client_min_messages;
CREATE TABLE date_partitioned_citus_local_table(
measureid integer,
eventdate date,
@@ -750,9 +742,3 @@ ALTER TABLE parent_table DROP CONSTRAINT pkey_cst CASCADE;
ALTER TABLE parent_table DROP CONSTRAINT unique_cst CASCADE;
SET client_min_messages TO WARNING;
DROP SCHEMA fix_idx_names CASCADE;
-SELECT citus_remove_node('localhost', :master_port);
- citus_remove_node
----------------------------------------------------------------------
-
-(1 row)
-
diff --git a/src/test/regress/expected/multi_foreign_key.out b/src/test/regress/expected/multi_foreign_key.out
index 7efa9d61c..832be2740 100644
--- a/src/test/regress/expected/multi_foreign_key.out
+++ b/src/test/regress/expected/multi_foreign_key.out
@@ -856,11 +856,16 @@ SELECT create_reference_table('reference_table_second');
CREATE TABLE referenced_local_table(id int PRIMARY KEY, other_column int);
DROP TABLE reference_table CASCADE;
NOTICE: drop cascades to constraint reference_table_second_referencing_column_fkey on table reference_table_second
+NOTICE: drop cascades to constraint reference_table_second_referencing_column_fkey_1350654 on table public.reference_table_second_1350654
+CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false)"
+PL/pgSQL function citus_drop_trigger() line XX at PERFORM
CREATE TABLE reference_table(id int, referencing_column int REFERENCES referenced_local_table(id));
SELECT create_reference_table('reference_table');
-ERROR: referenced table "referenced_local_table" must be a distributed table or a reference table
-DETAIL: To enforce foreign keys, the referencing and referenced rows need to be stored on the same node.
-HINT: You could use SELECT create_reference_table('referenced_local_table') to replicate the referenced table to all nodes or consider dropping the foreign key
+ create_reference_table
+---------------------------------------------------------------------
+
+(1 row)
+
-- test foreign key creation on CREATE TABLE on self referencing reference table
CREATE TABLE self_referencing_reference_table(
id int,
@@ -877,6 +882,7 @@ SELECT create_reference_table('self_referencing_reference_table');
-- test foreign key creation on ALTER TABLE from reference table
DROP TABLE reference_table;
+NOTICE: removing table public.referenced_local_table from metadata as it is not connected to any reference tables via foreign keys
CREATE TABLE reference_table(id int PRIMARY KEY, referencing_column int);
SELECT create_reference_table('reference_table');
create_reference_table
@@ -911,6 +917,9 @@ DROP TABLE reference_table CASCADE;
NOTICE: drop cascades to 2 other objects
DETAIL: drop cascades to constraint fk on table references_to_reference_table
drop cascades to constraint fk on table reference_table_second
+NOTICE: drop cascades to constraint fk_1350663 on table public.reference_table_second_1350663
+CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false)"
+PL/pgSQL function citus_drop_trigger() line XX at PERFORM
CREATE TABLE reference_table(id int PRIMARY KEY, referencing_column int);
SELECT create_reference_table('reference_table');
create_reference_table
@@ -919,9 +928,6 @@ SELECT create_reference_table('reference_table');
(1 row)
ALTER TABLE reference_table ADD CONSTRAINT fk FOREIGN KEY(referencing_column) REFERENCES referenced_local_table(id);
-ERROR: referenced table "referenced_local_table" must be a distributed table or a reference table
-DETAIL: To enforce foreign keys, the referencing and referenced rows need to be stored on the same node.
-HINT: You could use SELECT create_reference_table('referenced_local_table') to replicate the referenced table to all nodes or consider dropping the foreign key
-- test foreign key creation on ALTER TABLE on self referencing reference table
DROP TABLE self_referencing_reference_table;
CREATE TABLE self_referencing_reference_table(
@@ -1187,12 +1193,7 @@ CREATE TABLE set_on_default_test_referencing(
REFERENCES set_on_default_test_referenced(col_1, col_3)
ON UPDATE SET DEFAULT
);
--- from distributed / reference to reference, fkey exists before calling the UDFs
-SELECT create_distributed_table('set_on_default_test_referencing', 'col_1');
ERROR: cannot create foreign key constraint since Citus does not support ON DELETE / UPDATE SET DEFAULT actions on the columns that default to sequences
-SELECT create_reference_table('set_on_default_test_referencing');
-ERROR: cannot create foreign key constraint since Citus does not support ON DELETE / UPDATE SET DEFAULT actions on the columns that default to sequences
-DROP TABLE set_on_default_test_referencing;
CREATE TABLE set_on_default_test_referencing(
col_1 serial, col_2 int, col_3 int, col_4 int
);
@@ -1276,3 +1277,6 @@ ERROR: cannot create foreign key constraint since Citus does not support ON DEL
-- we no longer need those tables
DROP TABLE referenced_by_reference_table, references_to_reference_table, reference_table, reference_table_second, referenced_local_table, self_referencing_reference_table, dropfkeytest2,
set_on_default_test_referenced, set_on_default_test_referencing;
+NOTICE: drop cascades to constraint fk_1350664 on table public.reference_table_1350664
+CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false)"
+PL/pgSQL function citus_drop_trigger() line XX at PERFORM
diff --git a/src/test/regress/expected/multi_insert_select.out b/src/test/regress/expected/multi_insert_select.out
index abebf314e..ac339a620 100644
--- a/src/test/regress/expected/multi_insert_select.out
+++ b/src/test/regress/expected/multi_insert_select.out
@@ -884,7 +884,7 @@ $Q$);
Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive)
- Task Count: 4
+ Task Count: 6
(4 rows)
-- EXPLAIN ANALYZE is not supported for INSERT ... SELECT via coordinator
@@ -921,7 +921,7 @@ $Q$);
Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive)
- Task Count: 4
+ Task Count: 6
(4 rows)
-- make things a bit more complicate with IN clauses
@@ -940,7 +940,7 @@ $Q$);
Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive)
- Task Count: 4
+ Task Count: 6
(4 rows)
-- implicit join on non partition column should also not be pushed down,
@@ -959,7 +959,7 @@ $Q$);
Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive)
- Task Count: 4
+ Task Count: 6
(4 rows)
RESET client_min_messages;
@@ -981,7 +981,7 @@ $Q$);
Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive)
- Task Count: 4
+ Task Count: 6
(4 rows)
-- foo is not joined on the partition key so the query is not
@@ -1046,7 +1046,7 @@ $Q$);
Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive)
- Task Count: 4
+ Task Count: 6
(4 rows)
-- foo is not joined on the partition key so the query is not
@@ -1437,7 +1437,7 @@ $Q$);
Group Key: remote_scan.id
Filter: (pg_catalog.sum(remote_scan.worker_column_4) > '10'::numeric)
-> Custom Scan (Citus Adaptive)
- Task Count: 4
+ Task Count: 6
(11 rows)
-- cannot push down since foo doesn't have en equi join
@@ -1514,7 +1514,7 @@ $Q$);
-> HashAggregate
Group Key: remote_scan.user_id
-> Custom Scan (Citus Adaptive)
- Task Count: 4
+ Task Count: 6
(6 rows)
-- join among reference_ids and averages is not on the partition key
@@ -1576,7 +1576,7 @@ $Q$);
Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive)
- Task Count: 4
+ Task Count: 6
(4 rows)
-- Selected value in the WHERE is not partition key, so we cannot use distributed
@@ -3276,7 +3276,7 @@ $$);
Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive)
- Task Count: 4
+ Task Count: 6
(4 rows)
-- For INSERT SELECT, when a lateral query references an outer query, push-down is possible even if limit clause exists in the lateral query.
diff --git a/src/test/regress/expected/multi_insert_select_0.out b/src/test/regress/expected/multi_insert_select_0.out
index ee2341759..a4988bceb 100644
--- a/src/test/regress/expected/multi_insert_select_0.out
+++ b/src/test/regress/expected/multi_insert_select_0.out
@@ -884,7 +884,7 @@ $Q$);
Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive)
- Task Count: 4
+ Task Count: 6
(4 rows)
-- EXPLAIN ANALYZE is not supported for INSERT ... SELECT via coordinator
@@ -921,7 +921,7 @@ $Q$);
Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive)
- Task Count: 4
+ Task Count: 6
(4 rows)
-- make things a bit more complicate with IN clauses
@@ -940,7 +940,7 @@ $Q$);
Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive)
- Task Count: 4
+ Task Count: 6
(4 rows)
-- implicit join on non partition column should also not be pushed down,
@@ -959,7 +959,7 @@ $Q$);
Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive)
- Task Count: 4
+ Task Count: 6
(4 rows)
RESET client_min_messages;
@@ -981,7 +981,7 @@ $Q$);
Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive)
- Task Count: 4
+ Task Count: 6
(4 rows)
-- foo is not joined on the partition key so the query is not
@@ -1046,7 +1046,7 @@ $Q$);
Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive)
- Task Count: 4
+ Task Count: 6
(4 rows)
-- foo is not joined on the partition key so the query is not
@@ -1437,7 +1437,7 @@ $Q$);
Group Key: remote_scan.id
Filter: (pg_catalog.sum(remote_scan.worker_column_4) > '10'::numeric)
-> Custom Scan (Citus Adaptive)
- Task Count: 4
+ Task Count: 6
(11 rows)
-- cannot push down since foo doesn't have en equi join
@@ -1514,7 +1514,7 @@ $Q$);
-> HashAggregate
Group Key: remote_scan.user_id
-> Custom Scan (Citus Adaptive)
- Task Count: 4
+ Task Count: 6
(6 rows)
-- join among reference_ids and averages is not on the partition key
@@ -1576,7 +1576,7 @@ $Q$);
Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive)
- Task Count: 4
+ Task Count: 6
(4 rows)
-- Selected value in the WHERE is not partition key, so we cannot use distributed
@@ -3276,7 +3276,7 @@ $$);
Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive)
- Task Count: 4
+ Task Count: 6
(4 rows)
-- For INSERT SELECT, when a lateral query references an outer query, push-down is possible even if limit clause exists in the lateral query.
diff --git a/src/test/regress/expected/multi_insert_select_conflict.out b/src/test/regress/expected/multi_insert_select_conflict.out
index f344a8b79..5c06719d3 100644
--- a/src/test/regress/expected/multi_insert_select_conflict.out
+++ b/src/test/regress/expected/multi_insert_select_conflict.out
@@ -589,8 +589,9 @@ DEBUG: Collecting INSERT ... SELECT results on coordinator
RESET client_min_messages;
DROP SCHEMA on_conflict CASCADE;
-NOTICE: drop cascades to 7 other objects
+NOTICE: drop cascades to 8 other objects
DETAIL: drop cascades to table test_ref_table
+drop cascades to table test_ref_table_1900012
drop cascades to table source_table_3
drop cascades to table source_table_4
drop cascades to table target_table_2
diff --git a/src/test/regress/expected/multi_insert_select_conflict_0.out b/src/test/regress/expected/multi_insert_select_conflict_0.out
index 42b5aed31..4c2add1d7 100644
--- a/src/test/regress/expected/multi_insert_select_conflict_0.out
+++ b/src/test/regress/expected/multi_insert_select_conflict_0.out
@@ -589,8 +589,9 @@ DEBUG: Collecting INSERT ... SELECT results on coordinator
RESET client_min_messages;
DROP SCHEMA on_conflict CASCADE;
-NOTICE: drop cascades to 7 other objects
+NOTICE: drop cascades to 8 other objects
DETAIL: drop cascades to table test_ref_table
+drop cascades to table test_ref_table_1900012
drop cascades to table source_table_3
drop cascades to table source_table_4
drop cascades to table target_table_2
diff --git a/src/test/regress/expected/multi_join_pruning.out b/src/test/regress/expected/multi_join_pruning.out
index efc6954ad..27fdc3980 100644
--- a/src/test/regress/expected/multi_join_pruning.out
+++ b/src/test/regress/expected/multi_join_pruning.out
@@ -140,3 +140,7 @@ DEBUG: join prunable for intervals [BA1000U2AMO4ZGX,BZZXSP27F21T6] and [AA1000U
explain statements for distributed queries are not enabled
(3 rows)
+SET client_min_messages TO WARNING;
+DROP TABLE varchar_partitioned_table;
+DROP TABLE array_partitioned_table;
+DROP TABLE composite_partitioned_table;
diff --git a/src/test/regress/expected/multi_level_recursive_queries.out b/src/test/regress/expected/multi_level_recursive_queries.out
index b2bf0a49c..e0a4d44a9 100644
--- a/src/test/regress/expected/multi_level_recursive_queries.out
+++ b/src/test/regress/expected/multi_level_recursive_queries.out
@@ -298,10 +298,5 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT avg(table_5.i
DEBUG: generating subplan XXX_1 for subquery SELECT table_6.id FROM ((SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) table_6 JOIN multi_recursive.dist0 table_8 USING (id)) WHERE (table_8.id OPERATOR(pg_catalog.<) 0) ORDER BY table_6.id
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT avg(table_5.id) AS avg FROM ((SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) table_5 JOIN multi_recursive.dist0 table_9 USING (id))
ERROR: recursive complex joins are only supported when all distributed tables are co-located and joined on their distribution columns
-RESET client_min_messages;
+SET client_min_messages TO WARNING;
DROP SCHEMA multi_recursive CASCADE;
-NOTICE: drop cascades to 4 other objects
-DETAIL: drop cascades to table tbl_dist1
-drop cascades to table tbl_ref1
-drop cascades to table dist0
-drop cascades to table dist1
diff --git a/src/test/regress/expected/multi_metadata_sync.out b/src/test/regress/expected/multi_metadata_sync.out
index 626584de6..e92b9aa1b 100644
--- a/src/test/regress/expected/multi_metadata_sync.out
+++ b/src/test/regress/expected/multi_metadata_sync.out
@@ -69,7 +69,7 @@ ALTER ROLE CURRENT_USER WITH PASSWORD 'dummypassword';
-- Show that, with no MX tables, activate node snapshot contains only the delete commands,
-- pg_dist_node entries, pg_dist_object entries and roles.
SELECT unnest(activate_node_snapshot()) order by 1;
- unnest
+ unnest
---------------------------------------------------------------------
ALTER DATABASE regression OWNER TO postgres;
CALL pg_catalog.worker_drop_all_shell_tables(true)
@@ -85,7 +85,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
GRANT CREATE ON SCHEMA public TO pg_database_owner;
GRANT USAGE ON SCHEMA public TO PUBLIC;
GRANT USAGE ON SCHEMA public TO pg_database_owner;
- INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
+ INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
RESET ROLE
RESET ROLE
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
@@ -98,9 +98,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
SET citus.enable_ddl_propagation TO 'on'
SET citus.enable_ddl_propagation TO 'on'
UPDATE pg_dist_local_group SET groupid = 1
- UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1
- UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1
- UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1
+ UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2
+ UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2
+ UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
@@ -158,7 +158,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
GRANT CREATE ON SCHEMA public TO pg_database_owner;
GRANT USAGE ON SCHEMA public TO PUBLIC;
GRANT USAGE ON SCHEMA public TO pg_database_owner;
- INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
+ INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
RESET ROLE
RESET ROLE
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
@@ -180,9 +180,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
SET citus.enable_ddl_propagation TO 'on'
SET citus.enable_ddl_propagation TO 'on'
UPDATE pg_dist_local_group SET groupid = 1
- UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1
- UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1
- UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1
+ UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2
+ UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2
+ UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (3, 1, 2, 0, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
@@ -226,7 +226,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
GRANT CREATE ON SCHEMA public TO pg_database_owner;
GRANT USAGE ON SCHEMA public TO PUBLIC;
GRANT USAGE ON SCHEMA public TO pg_database_owner;
- INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
+ INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
RESET ROLE
RESET ROLE
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
@@ -245,9 +245,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
SET citus.enable_ddl_propagation TO 'on'
SET citus.enable_ddl_propagation TO 'on'
UPDATE pg_dist_local_group SET groupid = 1
- UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1
- UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1
- UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1
+ UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2
+ UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2
+ UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
@@ -287,7 +287,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
GRANT CREATE ON SCHEMA public TO pg_database_owner;
GRANT USAGE ON SCHEMA public TO PUBLIC;
GRANT USAGE ON SCHEMA public TO pg_database_owner;
- INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
+ INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
RESET ROLE
RESET ROLE
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
@@ -306,9 +306,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
SET citus.enable_ddl_propagation TO 'on'
SET citus.enable_ddl_propagation TO 'on'
UPDATE pg_dist_local_group SET groupid = 1
- UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1
- UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1
- UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1
+ UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2
+ UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2
+ UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
@@ -355,7 +355,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
GRANT CREATE ON SCHEMA public TO pg_database_owner;
GRANT USAGE ON SCHEMA public TO PUBLIC;
GRANT USAGE ON SCHEMA public TO pg_database_owner;
- INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
+ INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
RESET ROLE
RESET ROLE
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
@@ -374,9 +374,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
SET citus.enable_ddl_propagation TO 'on'
SET citus.enable_ddl_propagation TO 'on'
UPDATE pg_dist_local_group SET groupid = 1
- UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1
- UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1
- UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1
+ UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2
+ UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2
+ UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
@@ -416,7 +416,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
GRANT CREATE ON SCHEMA public TO pg_database_owner;
GRANT USAGE ON SCHEMA public TO PUBLIC;
GRANT USAGE ON SCHEMA public TO pg_database_owner;
- INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
+ INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
RESET ROLE
RESET ROLE
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
@@ -435,9 +435,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
SET citus.enable_ddl_propagation TO 'on'
SET citus.enable_ddl_propagation TO 'on'
UPDATE pg_dist_local_group SET groupid = 1
- UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1
- UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1
- UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1
+ UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2
+ UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2
+ UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
@@ -451,11 +451,11 @@ SELECT unnest(activate_node_snapshot()) order by 1;
(54 rows)
-- Test start_metadata_sync_to_node and citus_activate_node UDFs
--- Ensure that hasmetadata=false for all nodes
+-- Ensure that hasmetadata=false for all nodes except for the coordinator node
SELECT count(*) FROM pg_dist_node WHERE hasmetadata=true;
count
---------------------------------------------------------------------
- 0
+ 1
(1 row)
-- Show that metadata can not be synced on secondary node
@@ -463,7 +463,7 @@ SELECT groupid AS worker_1_group FROM pg_dist_node WHERE nodeport = :worker_1_po
SELECT master_add_node('localhost', 8888, groupid => :worker_1_group, noderole => 'secondary');
master_add_node
---------------------------------------------------------------------
- 4
+ 5
(1 row)
SELECT start_metadata_sync_to_node('localhost', 8888);
@@ -495,7 +495,7 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport = 8888;
SELECT master_add_secondary_node('localhost', 8889, 'localhost', :worker_1_port, nodecluster => 'second-cluster');
master_add_secondary_node
---------------------------------------------------------------------
- 5
+ 6
(1 row)
\c - - - :master_port
@@ -509,7 +509,7 @@ SELECT 1 FROM citus_activate_node('localhost', :worker_1_port);
SELECT nodeid, hasmetadata FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_1_port;
nodeid | hasmetadata
---------------------------------------------------------------------
- 1 | t
+ 2 | t
(1 row)
-- Check that the metadata has been copied to the worker
@@ -523,11 +523,12 @@ SELECT * FROM pg_dist_local_group;
SELECT * FROM pg_dist_node ORDER BY nodeid;
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards
---------------------------------------------------------------------
- 1 | 1 | localhost | 57637 | default | t | t | primary | default | t | t
- 2 | 2 | localhost | 57638 | default | f | t | primary | default | f | t
- 4 | 1 | localhost | 8888 | default | f | t | secondary | default | f | t
- 5 | 1 | localhost | 8889 | default | f | t | secondary | second-cluster | f | t
-(4 rows)
+ 1 | 0 | localhost | 57636 | default | t | t | primary | default | t | f
+ 2 | 1 | localhost | 57637 | default | t | t | primary | default | t | t
+ 3 | 2 | localhost | 57638 | default | f | t | primary | default | f | t
+ 5 | 1 | localhost | 8888 | default | f | t | secondary | default | f | t
+ 6 | 1 | localhost | 8889 | default | f | t | secondary | second-cluster | f | t
+(5 rows)
SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid::text;
logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted
@@ -661,11 +662,12 @@ SELECT * FROM pg_dist_local_group;
SELECT * FROM pg_dist_node ORDER BY nodeid;
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards
---------------------------------------------------------------------
- 1 | 1 | localhost | 57637 | default | t | t | primary | default | t | t
- 2 | 2 | localhost | 57638 | default | f | t | primary | default | f | t
- 4 | 1 | localhost | 8888 | default | f | t | secondary | default | f | t
- 5 | 1 | localhost | 8889 | default | f | t | secondary | second-cluster | f | t
-(4 rows)
+ 1 | 0 | localhost | 57636 | default | t | t | primary | default | t | f
+ 2 | 1 | localhost | 57637 | default | t | t | primary | default | t | t
+ 3 | 2 | localhost | 57638 | default | f | t | primary | default | f | t
+ 5 | 1 | localhost | 8888 | default | f | t | secondary | default | f | t
+ 6 | 1 | localhost | 8889 | default | f | t | secondary | second-cluster | f | t
+(5 rows)
SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid::text;
logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted
@@ -1509,7 +1511,7 @@ SELECT create_distributed_table('mx_table', 'a');
SELECT master_add_node('localhost', :worker_2_port);
master_add_node
---------------------------------------------------------------------
- 6
+ 7
(1 row)
\c - mx_user - :worker_1_port
@@ -1620,9 +1622,10 @@ ORDER BY
nodeport;
logicalrelid | partmethod | repmodel | shardid | placementid | nodename | nodeport
---------------------------------------------------------------------
- mx_ref | n | t | 1310074 | 100074 | localhost | 57637
- mx_ref | n | t | 1310074 | 100075 | localhost | 57638
-(2 rows)
+ mx_ref | n | t | 1310074 | 100074 | localhost | 57636
+ mx_ref | n | t | 1310074 | 100075 | localhost | 57637
+ mx_ref | n | t | 1310074 | 100076 | localhost | 57638
+(3 rows)
SELECT shardid AS ref_table_shardid FROM pg_dist_shard WHERE logicalrelid='mx_ref'::regclass \gset
-- make sure we have the pg_dist_colocation record on the worker
@@ -1716,8 +1719,9 @@ FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement
WHERE logicalrelid='mx_ref'::regclass;
shardid | nodename | nodeport
---------------------------------------------------------------------
+ 1310075 | localhost | 57636
1310075 | localhost | 57637
-(1 row)
+(2 rows)
\c - - - :worker_1_port
SELECT shardid, nodename, nodeport
@@ -1725,15 +1729,16 @@ FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement
WHERE logicalrelid='mx_ref'::regclass;
shardid | nodename | nodeport
---------------------------------------------------------------------
+ 1310075 | localhost | 57636
1310075 | localhost | 57637
-(1 row)
+(2 rows)
\c - - - :master_port
SET client_min_messages TO ERROR;
SELECT master_add_node('localhost', :worker_2_port);
master_add_node
---------------------------------------------------------------------
- 7
+ 8
(1 row)
RESET client_min_messages;
@@ -1743,8 +1748,9 @@ WHERE logicalrelid='mx_ref'::regclass
ORDER BY shardid, nodeport;
shardid | nodename | nodeport
---------------------------------------------------------------------
+ 1310075 | localhost | 57636
1310075 | localhost | 57637
-(1 row)
+(2 rows)
\c - - - :worker_1_port
SELECT shardid, nodename, nodeport
@@ -1753,8 +1759,9 @@ WHERE logicalrelid='mx_ref'::regclass
ORDER BY shardid, nodeport;
shardid | nodename | nodeport
---------------------------------------------------------------------
+ 1310075 | localhost | 57636
1310075 | localhost | 57637
-(1 row)
+(2 rows)
-- Get the metadata back into a consistent state
\c - - - :master_port
@@ -1862,10 +1869,6 @@ HINT: If the node is up, wait until metadata gets synced to it and try again.
ALTER TABLE dist_table_1 ADD COLUMN b int;
ERROR: localhost:xxxxx is a metadata node, but is out of sync
HINT: If the node is up, wait until metadata gets synced to it and try again.
-SELECT master_add_node('localhost', :master_port, groupid => 0);
-NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata
-ERROR: localhost:xxxxx is a metadata node, but is out of sync
-HINT: If the node is up, wait until metadata gets synced to it and try again.
SELECT citus_disable_node_and_wait('localhost', :worker_1_port);
ERROR: disabling the first worker node in the metadata is not allowed
DETAIL: Citus uses the first worker node in the metadata for certain internal operations when replicated tables are modified. Synchronous mode ensures that all nodes have the same view of the first worker node, which is used for certain locking operations.
@@ -1918,7 +1921,7 @@ SELECT wait_until_metadata_sync(60000);
SELECT master_add_node('localhost', :worker_2_port);
master_add_node
---------------------------------------------------------------------
- 7
+ 8
(1 row)
CREATE SEQUENCE mx_test_sequence_0;
@@ -1989,7 +1992,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
GRANT CREATE ON SCHEMA public TO pg_database_owner;
GRANT USAGE ON SCHEMA public TO PUBLIC;
GRANT USAGE ON SCHEMA public TO pg_database_owner;
- INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (4, 1, 'localhost', 8888, 'default', FALSE, FALSE, TRUE, 'secondary'::noderole, 'default', TRUE),(5, 1, 'localhost', 8889, 'default', FALSE, FALSE, TRUE, 'secondary'::noderole, 'second-cluster', TRUE),(1, 1, 'localhost', 57637, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', TRUE),(7, 5, 'localhost', 57638, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', TRUE)
+ INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (5, 1, 'localhost', 8888, 'default', FALSE, FALSE, TRUE, 'secondary'::noderole, 'default', TRUE),(6, 1, 'localhost', 8889, 'default', FALSE, FALSE, TRUE, 'secondary'::noderole, 'second-cluster', TRUE),(1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', TRUE),(8, 5, 'localhost', 57638, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', TRUE)
RESET ROLE
RESET ROLE
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
@@ -2025,9 +2028,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
SET citus.enable_ddl_propagation TO 'on'
SET citus.enable_ddl_propagation TO 'on'
UPDATE pg_dist_local_group SET groupid = 1
- UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1
- UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1
- UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1
+ UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2
+ UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2
+ UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10009, 1, -1, 0, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10010, 4, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
@@ -2050,9 +2053,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 5, 100001), (1310002, 0, 1, 100002), (1310003, 0, 5, 100003), (1310004, 0, 1, 100004), (1310005, 0, 5, 100005), (1310006, 0, 1, 100006), (1310007, 0, 5, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310022, 0, 1, 100022), (1310023, 0, 5, 100023), (1310024, 0, 1, 100024), (1310025, 0, 5, 100025), (1310026, 0, 1, 100026)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310027, 0, 1, 100027), (1310028, 0, 5, 100028), (1310029, 0, 1, 100029), (1310030, 0, 5, 100030), (1310031, 0, 1, 100031)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
- WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310075, 0, 1, 100076), (1310075, 0, 5, 100077)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
- WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310076, 0, 1, 100078), (1310077, 0, 5, 100079), (1310078, 0, 1, 100080), (1310079, 0, 5, 100081)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
- WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310085, 0, 1, 100088), (1310086, 0, 5, 100089), (1310087, 0, 1, 100090), (1310088, 0, 5, 100091)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
+ WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310075, 0, 0, 100077), (1310075, 0, 1, 100078), (1310075, 0, 5, 100079)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
+ WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310076, 0, 1, 100080), (1310077, 0, 5, 100081), (1310078, 0, 1, 100082), (1310079, 0, 5, 100083)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
+ WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310085, 0, 1, 100091), (1310086, 0, 5, 100092), (1310087, 0, 1, 100093), (1310088, 0, 5, 100094)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_1.mx_table_1'::regclass, 1310022, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_1.mx_table_1'::regclass, 1310023, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_1.mx_table_1'::regclass, 1310024, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_1.mx_table_1'::regclass, 1310025, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_1.mx_table_1'::regclass, 1310026, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_2.mx_table_2'::regclass, 1310027, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_2.mx_table_2'::regclass, 1310028, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_2.mx_table_2'::regclass, 1310029, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_2.mx_table_2'::regclass, 1310030, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_2.mx_table_2'::regclass, 1310031, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
diff --git a/src/test/regress/expected/multi_metadata_sync_0.out b/src/test/regress/expected/multi_metadata_sync_0.out
index 341a0f319..9ab50664c 100644
--- a/src/test/regress/expected/multi_metadata_sync_0.out
+++ b/src/test/regress/expected/multi_metadata_sync_0.out
@@ -69,7 +69,7 @@ ALTER ROLE CURRENT_USER WITH PASSWORD 'dummypassword';
-- Show that, with no MX tables, activate node snapshot contains only the delete commands,
-- pg_dist_node entries, pg_dist_object entries and roles.
SELECT unnest(activate_node_snapshot()) order by 1;
- unnest
+ unnest
---------------------------------------------------------------------
ALTER DATABASE regression OWNER TO postgres;
CALL pg_catalog.worker_drop_all_shell_tables(true)
@@ -85,7 +85,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
GRANT CREATE ON SCHEMA public TO postgres;
GRANT USAGE ON SCHEMA public TO PUBLIC;
GRANT USAGE ON SCHEMA public TO postgres;
- INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
+ INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
RESET ROLE
RESET ROLE
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
@@ -98,9 +98,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
SET citus.enable_ddl_propagation TO 'on'
SET citus.enable_ddl_propagation TO 'on'
UPDATE pg_dist_local_group SET groupid = 1
- UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1
- UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1
- UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1
+ UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2
+ UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2
+ UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
@@ -158,7 +158,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
GRANT CREATE ON SCHEMA public TO postgres;
GRANT USAGE ON SCHEMA public TO PUBLIC;
GRANT USAGE ON SCHEMA public TO postgres;
- INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
+ INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
RESET ROLE
RESET ROLE
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
@@ -180,9 +180,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
SET citus.enable_ddl_propagation TO 'on'
SET citus.enable_ddl_propagation TO 'on'
UPDATE pg_dist_local_group SET groupid = 1
- UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1
- UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1
- UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1
+ UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2
+ UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2
+ UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (3, 1, 2, 0, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
@@ -226,7 +226,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
GRANT CREATE ON SCHEMA public TO postgres;
GRANT USAGE ON SCHEMA public TO PUBLIC;
GRANT USAGE ON SCHEMA public TO postgres;
- INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
+ INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
RESET ROLE
RESET ROLE
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
@@ -245,9 +245,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
SET citus.enable_ddl_propagation TO 'on'
SET citus.enable_ddl_propagation TO 'on'
UPDATE pg_dist_local_group SET groupid = 1
- UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1
- UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1
- UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1
+ UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2
+ UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2
+ UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
@@ -287,7 +287,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
GRANT CREATE ON SCHEMA public TO postgres;
GRANT USAGE ON SCHEMA public TO PUBLIC;
GRANT USAGE ON SCHEMA public TO postgres;
- INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
+ INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
RESET ROLE
RESET ROLE
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
@@ -306,9 +306,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
SET citus.enable_ddl_propagation TO 'on'
SET citus.enable_ddl_propagation TO 'on'
UPDATE pg_dist_local_group SET groupid = 1
- UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1
- UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1
- UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1
+ UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2
+ UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2
+ UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
@@ -355,7 +355,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
GRANT CREATE ON SCHEMA public TO postgres;
GRANT USAGE ON SCHEMA public TO PUBLIC;
GRANT USAGE ON SCHEMA public TO postgres;
- INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
+ INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
RESET ROLE
RESET ROLE
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
@@ -374,9 +374,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
SET citus.enable_ddl_propagation TO 'on'
SET citus.enable_ddl_propagation TO 'on'
UPDATE pg_dist_local_group SET groupid = 1
- UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1
- UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1
- UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1
+ UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2
+ UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2
+ UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
@@ -416,7 +416,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
GRANT CREATE ON SCHEMA public TO postgres;
GRANT USAGE ON SCHEMA public TO PUBLIC;
GRANT USAGE ON SCHEMA public TO postgres;
- INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
+ INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
RESET ROLE
RESET ROLE
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
@@ -435,9 +435,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
SET citus.enable_ddl_propagation TO 'on'
SET citus.enable_ddl_propagation TO 'on'
UPDATE pg_dist_local_group SET groupid = 1
- UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1
- UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1
- UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1
+ UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2
+ UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2
+ UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
@@ -451,11 +451,11 @@ SELECT unnest(activate_node_snapshot()) order by 1;
(54 rows)
-- Test start_metadata_sync_to_node and citus_activate_node UDFs
--- Ensure that hasmetadata=false for all nodes
+-- Ensure that hasmetadata=false for all nodes except for the coordinator node
SELECT count(*) FROM pg_dist_node WHERE hasmetadata=true;
count
---------------------------------------------------------------------
- 0
+ 1
(1 row)
-- Show that metadata can not be synced on secondary node
@@ -463,7 +463,7 @@ SELECT groupid AS worker_1_group FROM pg_dist_node WHERE nodeport = :worker_1_po
SELECT master_add_node('localhost', 8888, groupid => :worker_1_group, noderole => 'secondary');
master_add_node
---------------------------------------------------------------------
- 4
+ 5
(1 row)
SELECT start_metadata_sync_to_node('localhost', 8888);
@@ -495,7 +495,7 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport = 8888;
SELECT master_add_secondary_node('localhost', 8889, 'localhost', :worker_1_port, nodecluster => 'second-cluster');
master_add_secondary_node
---------------------------------------------------------------------
- 5
+ 6
(1 row)
\c - - - :master_port
@@ -509,7 +509,7 @@ SELECT 1 FROM citus_activate_node('localhost', :worker_1_port);
SELECT nodeid, hasmetadata FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_1_port;
nodeid | hasmetadata
---------------------------------------------------------------------
- 1 | t
+ 2 | t
(1 row)
-- Check that the metadata has been copied to the worker
@@ -523,11 +523,12 @@ SELECT * FROM pg_dist_local_group;
SELECT * FROM pg_dist_node ORDER BY nodeid;
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards
---------------------------------------------------------------------
- 1 | 1 | localhost | 57637 | default | t | t | primary | default | t | t
- 2 | 2 | localhost | 57638 | default | f | t | primary | default | f | t
- 4 | 1 | localhost | 8888 | default | f | t | secondary | default | f | t
- 5 | 1 | localhost | 8889 | default | f | t | secondary | second-cluster | f | t
-(4 rows)
+ 1 | 0 | localhost | 57636 | default | t | t | primary | default | t | f
+ 2 | 1 | localhost | 57637 | default | t | t | primary | default | t | t
+ 3 | 2 | localhost | 57638 | default | f | t | primary | default | f | t
+ 5 | 1 | localhost | 8888 | default | f | t | secondary | default | f | t
+ 6 | 1 | localhost | 8889 | default | f | t | secondary | second-cluster | f | t
+(5 rows)
SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid::text;
logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted
@@ -661,11 +662,12 @@ SELECT * FROM pg_dist_local_group;
SELECT * FROM pg_dist_node ORDER BY nodeid;
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards
---------------------------------------------------------------------
- 1 | 1 | localhost | 57637 | default | t | t | primary | default | t | t
- 2 | 2 | localhost | 57638 | default | f | t | primary | default | f | t
- 4 | 1 | localhost | 8888 | default | f | t | secondary | default | f | t
- 5 | 1 | localhost | 8889 | default | f | t | secondary | second-cluster | f | t
-(4 rows)
+ 1 | 0 | localhost | 57636 | default | t | t | primary | default | t | f
+ 2 | 1 | localhost | 57637 | default | t | t | primary | default | t | t
+ 3 | 2 | localhost | 57638 | default | f | t | primary | default | f | t
+ 5 | 1 | localhost | 8888 | default | f | t | secondary | default | f | t
+ 6 | 1 | localhost | 8889 | default | f | t | secondary | second-cluster | f | t
+(5 rows)
SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid::text;
logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted
@@ -1509,7 +1511,7 @@ SELECT create_distributed_table('mx_table', 'a');
SELECT master_add_node('localhost', :worker_2_port);
master_add_node
---------------------------------------------------------------------
- 6
+ 7
(1 row)
\c - mx_user - :worker_1_port
@@ -1620,9 +1622,10 @@ ORDER BY
nodeport;
logicalrelid | partmethod | repmodel | shardid | placementid | nodename | nodeport
---------------------------------------------------------------------
- mx_ref | n | t | 1310074 | 100074 | localhost | 57637
- mx_ref | n | t | 1310074 | 100075 | localhost | 57638
-(2 rows)
+ mx_ref | n | t | 1310074 | 100074 | localhost | 57636
+ mx_ref | n | t | 1310074 | 100075 | localhost | 57637
+ mx_ref | n | t | 1310074 | 100076 | localhost | 57638
+(3 rows)
SELECT shardid AS ref_table_shardid FROM pg_dist_shard WHERE logicalrelid='mx_ref'::regclass \gset
-- make sure we have the pg_dist_colocation record on the worker
@@ -1716,8 +1719,9 @@ FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement
WHERE logicalrelid='mx_ref'::regclass;
shardid | nodename | nodeport
---------------------------------------------------------------------
+ 1310075 | localhost | 57636
1310075 | localhost | 57637
-(1 row)
+(2 rows)
\c - - - :worker_1_port
SELECT shardid, nodename, nodeport
@@ -1725,15 +1729,16 @@ FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement
WHERE logicalrelid='mx_ref'::regclass;
shardid | nodename | nodeport
---------------------------------------------------------------------
+ 1310075 | localhost | 57636
1310075 | localhost | 57637
-(1 row)
+(2 rows)
\c - - - :master_port
SET client_min_messages TO ERROR;
SELECT master_add_node('localhost', :worker_2_port);
master_add_node
---------------------------------------------------------------------
- 7
+ 8
(1 row)
RESET client_min_messages;
@@ -1743,8 +1748,9 @@ WHERE logicalrelid='mx_ref'::regclass
ORDER BY shardid, nodeport;
shardid | nodename | nodeport
---------------------------------------------------------------------
+ 1310075 | localhost | 57636
1310075 | localhost | 57637
-(1 row)
+(2 rows)
\c - - - :worker_1_port
SELECT shardid, nodename, nodeport
@@ -1753,8 +1759,9 @@ WHERE logicalrelid='mx_ref'::regclass
ORDER BY shardid, nodeport;
shardid | nodename | nodeport
---------------------------------------------------------------------
+ 1310075 | localhost | 57636
1310075 | localhost | 57637
-(1 row)
+(2 rows)
-- Get the metadata back into a consistent state
\c - - - :master_port
@@ -1862,10 +1869,6 @@ HINT: If the node is up, wait until metadata gets synced to it and try again.
ALTER TABLE dist_table_1 ADD COLUMN b int;
ERROR: localhost:xxxxx is a metadata node, but is out of sync
HINT: If the node is up, wait until metadata gets synced to it and try again.
-SELECT master_add_node('localhost', :master_port, groupid => 0);
-NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata
-ERROR: localhost:xxxxx is a metadata node, but is out of sync
-HINT: If the node is up, wait until metadata gets synced to it and try again.
SELECT citus_disable_node_and_wait('localhost', :worker_1_port);
ERROR: disabling the first worker node in the metadata is not allowed
DETAIL: Citus uses the first worker node in the metadata for certain internal operations when replicated tables are modified. Synchronous mode ensures that all nodes have the same view of the first worker node, which is used for certain locking operations.
@@ -1918,7 +1921,7 @@ SELECT wait_until_metadata_sync(60000);
SELECT master_add_node('localhost', :worker_2_port);
master_add_node
---------------------------------------------------------------------
- 7
+ 8
(1 row)
CREATE SEQUENCE mx_test_sequence_0;
@@ -1989,7 +1992,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
GRANT CREATE ON SCHEMA public TO postgres;
GRANT USAGE ON SCHEMA public TO PUBLIC;
GRANT USAGE ON SCHEMA public TO postgres;
- INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (4, 1, 'localhost', 8888, 'default', FALSE, FALSE, TRUE, 'secondary'::noderole, 'default', TRUE),(5, 1, 'localhost', 8889, 'default', FALSE, FALSE, TRUE, 'secondary'::noderole, 'second-cluster', TRUE),(1, 1, 'localhost', 57637, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', TRUE),(7, 5, 'localhost', 57638, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', TRUE)
+ INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (5, 1, 'localhost', 8888, 'default', FALSE, FALSE, TRUE, 'secondary'::noderole, 'default', TRUE),(6, 1, 'localhost', 8889, 'default', FALSE, FALSE, TRUE, 'secondary'::noderole, 'second-cluster', TRUE),(1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', TRUE),(8, 5, 'localhost', 57638, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', TRUE)
RESET ROLE
RESET ROLE
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
@@ -2025,9 +2028,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
SET citus.enable_ddl_propagation TO 'on'
SET citus.enable_ddl_propagation TO 'on'
UPDATE pg_dist_local_group SET groupid = 1
- UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1
- UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1
- UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1
+ UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2
+ UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2
+ UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10009, 1, -1, 0, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10010, 4, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
@@ -2050,9 +2053,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 5, 100001), (1310002, 0, 1, 100002), (1310003, 0, 5, 100003), (1310004, 0, 1, 100004), (1310005, 0, 5, 100005), (1310006, 0, 1, 100006), (1310007, 0, 5, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310022, 0, 1, 100022), (1310023, 0, 5, 100023), (1310024, 0, 1, 100024), (1310025, 0, 5, 100025), (1310026, 0, 1, 100026)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310027, 0, 1, 100027), (1310028, 0, 5, 100028), (1310029, 0, 1, 100029), (1310030, 0, 5, 100030), (1310031, 0, 1, 100031)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
- WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310075, 0, 1, 100076), (1310075, 0, 5, 100077)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
- WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310076, 0, 1, 100078), (1310077, 0, 5, 100079), (1310078, 0, 1, 100080), (1310079, 0, 5, 100081)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
- WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310085, 0, 1, 100088), (1310086, 0, 5, 100089), (1310087, 0, 1, 100090), (1310088, 0, 5, 100091)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
+ WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310075, 0, 0, 100077), (1310075, 0, 1, 100078), (1310075, 0, 5, 100079)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
+ WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310076, 0, 1, 100080), (1310077, 0, 5, 100081), (1310078, 0, 1, 100082), (1310079, 0, 5, 100083)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
+ WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310085, 0, 1, 100091), (1310086, 0, 5, 100092), (1310087, 0, 1, 100093), (1310088, 0, 5, 100094)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_1.mx_table_1'::regclass, 1310022, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_1.mx_table_1'::regclass, 1310023, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_1.mx_table_1'::regclass, 1310024, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_1.mx_table_1'::regclass, 1310025, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_1.mx_table_1'::regclass, 1310026, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_2.mx_table_2'::regclass, 1310027, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_2.mx_table_2'::regclass, 1310028, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_2.mx_table_2'::regclass, 1310029, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_2.mx_table_2'::regclass, 1310030, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_2.mx_table_2'::regclass, 1310031, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
diff --git a/src/test/regress/expected/multi_modifying_xacts.out b/src/test/regress/expected/multi_modifying_xacts.out
index 99cdc9ce4..5eba6e21d 100644
--- a/src/test/regress/expected/multi_modifying_xacts.out
+++ b/src/test/regress/expected/multi_modifying_xacts.out
@@ -914,7 +914,7 @@ GROUP BY s.logicalrelid, sp.shardstate
ORDER BY s.logicalrelid, sp.shardstate;
logicalrelid | shardstate | count
---------------------------------------------------------------------
- reference_modifying_xacts | 1 | 2
+ reference_modifying_xacts | 1 | 3
(1 row)
-- for the time-being drop the constraint
@@ -1021,7 +1021,7 @@ GROUP BY s.logicalrelid, sp.shardstate
ORDER BY s.logicalrelid, sp.shardstate;
logicalrelid | shardstate | count
---------------------------------------------------------------------
- reference_modifying_xacts | 1 | 2
+ reference_modifying_xacts | 1 | 3
hash_modifying_xacts | 1 | 4
(2 rows)
@@ -1070,7 +1070,7 @@ GROUP BY s.logicalrelid, sp.shardstate
ORDER BY s.logicalrelid, sp.shardstate;
logicalrelid | shardstate | count
---------------------------------------------------------------------
- reference_modifying_xacts | 1 | 2
+ reference_modifying_xacts | 1 | 3
hash_modifying_xacts | 1 | 4
(2 rows)
@@ -1235,7 +1235,7 @@ GROUP BY s.logicalrelid, sp.shardstate
ORDER BY s.logicalrelid, sp.shardstate;
logicalrelid | shardstate | count
---------------------------------------------------------------------
- reference_failure_test | 1 | 2
+ reference_failure_test | 1 | 3
(1 row)
-- any failure rollbacks the transaction
diff --git a/src/test/regress/expected/multi_multiuser_auth.out b/src/test/regress/expected/multi_multiuser_auth.out
index 4b7c6fcc7..8dd9b8ba7 100644
--- a/src/test/regress/expected/multi_multiuser_auth.out
+++ b/src/test/regress/expected/multi_multiuser_auth.out
@@ -15,7 +15,7 @@
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port;
worker_1_id
---------------------------------------------------------------------
- 16
+ 17
(1 row)
\gset
diff --git a/src/test/regress/expected/multi_multiuser_master_protocol.out b/src/test/regress/expected/multi_multiuser_master_protocol.out
index 0ea53b339..a6bddb7f2 100644
--- a/src/test/regress/expected/multi_multiuser_master_protocol.out
+++ b/src/test/regress/expected/multi_multiuser_master_protocol.out
@@ -355,9 +355,10 @@ SELECT * FROM run_command_on_placements('multiuser_schema.reference_table', $$ s
ORDER BY nodename, nodeport, shardid;
nodename | nodeport | shardid | success | result
---------------------------------------------------------------------
+ localhost | 57636 | 109094 | t | t
localhost | 57637 | 109094 | t | t
localhost | 57638 | 109094 | t | t
-(2 rows)
+(3 rows)
-- create another table in the schema, verify select is not granted
CREATE TABLE multiuser_schema.another_table(a int, b int);
@@ -483,9 +484,10 @@ ORDER BY nodename, nodeport, shardid;
(6 rows)
DROP SCHEMA multiuser_schema CASCADE;
-NOTICE: drop cascades to 3 other objects
+NOTICE: drop cascades to 4 other objects
DETAIL: drop cascades to table multiuser_schema.hash_table
drop cascades to table multiuser_schema.reference_table
+drop cascades to table multiuser_schema.reference_table_109094
drop cascades to table multiuser_schema.another_table
DROP SCHEMA multiuser_second_schema CASCADE;
NOTICE: drop cascades to table multiuser_second_schema.hash_table
diff --git a/src/test/regress/expected/multi_name_resolution.out b/src/test/regress/expected/multi_name_resolution.out
index 890c336bf..5c59a10e6 100644
--- a/src/test/regress/expected/multi_name_resolution.out
+++ b/src/test/regress/expected/multi_name_resolution.out
@@ -36,6 +36,7 @@ WHERE bar.id_deep = join_alias.id_deep;
(0 rows)
DROP SCHEMA multi_name_resolution CASCADE;
-NOTICE: drop cascades to 2 other objects
+NOTICE: drop cascades to 3 other objects
DETAIL: drop cascades to table namenest1
drop cascades to table namenest2
+drop cascades to table namenest2_2250000000010
diff --git a/src/test/regress/expected/multi_null_minmax_value_pruning.out b/src/test/regress/expected/multi_null_minmax_value_pruning.out
index a531b065f..5b0ad79f4 100644
--- a/src/test/regress/expected/multi_null_minmax_value_pruning.out
+++ b/src/test/regress/expected/multi_null_minmax_value_pruning.out
@@ -104,15 +104,33 @@ LOG: join order: [ "lineitem" ][ dual partition join "orders" ]
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 0 and 4
+DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 1 and 4
+DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 2 and 4
+DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: join prunable for task partitionId 3 and 4
+DEBUG: join prunable for task partitionId 3 and 5
+DEBUG: join prunable for task partitionId 4 and 0
+DEBUG: join prunable for task partitionId 4 and 1
+DEBUG: join prunable for task partitionId 4 and 2
+DEBUG: join prunable for task partitionId 4 and 3
+DEBUG: join prunable for task partitionId 4 and 5
+DEBUG: join prunable for task partitionId 5 and 0
+DEBUG: join prunable for task partitionId 5 and 1
+DEBUG: join prunable for task partitionId 5 and 2
+DEBUG: join prunable for task partitionId 5 and 3
+DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2
@@ -129,18 +147,26 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 12
+DEBUG: pruning merge fetch taskId 13
+DETAIL: Creating dependency on merge taskId 15
+DEBUG: pruning merge fetch taskId 14
+DETAIL: Creating dependency on merge taskId 15
+DEBUG: pruning merge fetch taskId 16
+DETAIL: Creating dependency on merge taskId 18
+DEBUG: pruning merge fetch taskId 17
+DETAIL: Creating dependency on merge taskId 18
QUERY PLAN
---------------------------------------------------------------------
Aggregate
-> Custom Scan (Citus Adaptive)
- Task Count: 4
+ Task Count: 6
Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob
Map Task Count: 2
- Merge Task Count: 4
+ Merge Task Count: 6
-> MapMergeJob
Map Task Count: 2
- Merge Task Count: 4
+ Merge Task Count: 6
(10 rows)
-- Next, set the maximum value for another shard to null. Then check that we
@@ -169,15 +195,33 @@ LOG: join order: [ "lineitem" ][ dual partition join "orders" ]
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 0 and 4
+DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 1 and 4
+DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 2 and 4
+DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: join prunable for task partitionId 3 and 4
+DEBUG: join prunable for task partitionId 3 and 5
+DEBUG: join prunable for task partitionId 4 and 0
+DEBUG: join prunable for task partitionId 4 and 1
+DEBUG: join prunable for task partitionId 4 and 2
+DEBUG: join prunable for task partitionId 4 and 3
+DEBUG: join prunable for task partitionId 4 and 5
+DEBUG: join prunable for task partitionId 5 and 0
+DEBUG: join prunable for task partitionId 5 and 1
+DEBUG: join prunable for task partitionId 5 and 2
+DEBUG: join prunable for task partitionId 5 and 3
+DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2
@@ -194,18 +238,26 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 12
+DEBUG: pruning merge fetch taskId 13
+DETAIL: Creating dependency on merge taskId 15
+DEBUG: pruning merge fetch taskId 14
+DETAIL: Creating dependency on merge taskId 15
+DEBUG: pruning merge fetch taskId 16
+DETAIL: Creating dependency on merge taskId 18
+DEBUG: pruning merge fetch taskId 17
+DETAIL: Creating dependency on merge taskId 18
QUERY PLAN
---------------------------------------------------------------------
Aggregate
-> Custom Scan (Citus Adaptive)
- Task Count: 4
+ Task Count: 6
Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob
Map Task Count: 2
- Merge Task Count: 4
+ Merge Task Count: 6
-> MapMergeJob
Map Task Count: 2
- Merge Task Count: 4
+ Merge Task Count: 6
(10 rows)
-- Last, set the minimum value to 0 and check that we don't treat it as null. We
@@ -232,15 +284,33 @@ LOG: join order: [ "lineitem" ][ dual partition join "orders" ]
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 0 and 4
+DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 1 and 4
+DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 2 and 4
+DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: join prunable for task partitionId 3 and 4
+DEBUG: join prunable for task partitionId 3 and 5
+DEBUG: join prunable for task partitionId 4 and 0
+DEBUG: join prunable for task partitionId 4 and 1
+DEBUG: join prunable for task partitionId 4 and 2
+DEBUG: join prunable for task partitionId 4 and 3
+DEBUG: join prunable for task partitionId 4 and 5
+DEBUG: join prunable for task partitionId 5 and 0
+DEBUG: join prunable for task partitionId 5 and 1
+DEBUG: join prunable for task partitionId 5 and 2
+DEBUG: join prunable for task partitionId 5 and 3
+DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2
@@ -257,18 +327,26 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 12
+DEBUG: pruning merge fetch taskId 13
+DETAIL: Creating dependency on merge taskId 15
+DEBUG: pruning merge fetch taskId 14
+DETAIL: Creating dependency on merge taskId 15
+DEBUG: pruning merge fetch taskId 16
+DETAIL: Creating dependency on merge taskId 18
+DEBUG: pruning merge fetch taskId 17
+DETAIL: Creating dependency on merge taskId 18
QUERY PLAN
---------------------------------------------------------------------
Aggregate
-> Custom Scan (Citus Adaptive)
- Task Count: 4
+ Task Count: 6
Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob
Map Task Count: 2
- Merge Task Count: 4
+ Merge Task Count: 6
-> MapMergeJob
Map Task Count: 2
- Merge Task Count: 4
+ Merge Task Count: 6
(10 rows)
RESET client_min_messages;
diff --git a/src/test/regress/expected/multi_partitioning.out b/src/test/regress/expected/multi_partitioning.out
index db79e075a..47139614d 100644
--- a/src/test/regress/expected/multi_partitioning.out
+++ b/src/test/regress/expected/multi_partitioning.out
@@ -1952,6 +1952,8 @@ DEBUG: switching to sequential query execution mode
DETAIL: Table "" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed tables due to foreign keys. Any parallel modification to those hash distributed tables in the same transaction can only be executed in sequential query execution mode
CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false)"
PL/pgSQL function citus_drop_trigger() line XX at PERFORM
+CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false)"
+PL/pgSQL function citus_drop_trigger() line XX at PERFORM
DEBUG: drop cascades to 2 other objects
DETAIL: drop cascades to constraint partitioning_reference_fkey_1660302 on table partitioning_schema.partitioning_test_1660302
drop cascades to constraint partitioning_reference_fkey_1660304 on table partitioning_schema.partitioning_test_1660304
@@ -3772,13 +3774,6 @@ BEGIN;
ROLLBACK;
DROP TABLE pi_table;
-- 6) test with citus local table
-select 1 from citus_add_node('localhost', :master_port, groupid=>0);
-NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
CREATE TABLE date_partitioned_citus_local_table(
measureid integer,
eventdate date,
@@ -4214,12 +4209,6 @@ DROP TABLE date_partitioned_table_to_exp;
DROP TABLE date_partitioned_citus_local_table CASCADE;
DROP TABLE date_partitioned_citus_local_table_2;
set client_min_messages to notice;
-SELECT citus_remove_node('localhost', :master_port);
- citus_remove_node
----------------------------------------------------------------------
-
-(1 row)
-
-- d) invalid tables for helper UDFs
CREATE TABLE multiple_partition_column_table(
event_id bigserial,
diff --git a/src/test/regress/expected/multi_poolinfo_usage.out b/src/test/regress/expected/multi_poolinfo_usage.out
index b428409ff..ee98f0df7 100644
--- a/src/test/regress/expected/multi_poolinfo_usage.out
+++ b/src/test/regress/expected/multi_poolinfo_usage.out
@@ -9,7 +9,7 @@ SET citus.next_shard_id TO 20000000;
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port;
worker_1_id
---------------------------------------------------------------------
- 16
+ 17
(1 row)
\gset
diff --git a/src/test/regress/expected/multi_read_from_secondaries.out b/src/test/regress/expected/multi_read_from_secondaries.out
index 5c69458e4..9652b6520 100644
--- a/src/test/regress/expected/multi_read_from_secondaries.out
+++ b/src/test/regress/expected/multi_read_from_secondaries.out
@@ -27,9 +27,10 @@ INSERT INTO source_table (a, b) VALUES (10, 10);
SELECT nodeid, groupid, nodename, nodeport, noderack, isactive, noderole, nodecluster FROM pg_dist_node ORDER BY 1, 2;
nodeid | groupid | nodename | nodeport | noderack | isactive | noderole | nodecluster
---------------------------------------------------------------------
- 1 | 1 | localhost | 57637 | default | t | primary | default
- 2 | 2 | localhost | 57638 | default | t | primary | default
-(2 rows)
+ 1 | 0 | localhost | 57636 | default | t | primary | default
+ 2 | 1 | localhost | 57637 | default | t | primary | default
+ 3 | 2 | localhost | 57638 | default | t | primary | default
+(3 rows)
UPDATE pg_dist_node SET noderole = 'secondary';
\c "dbname=regression options='-c\ citus.use_secondary_nodes=always'"
diff --git a/src/test/regress/expected/multi_real_time_transaction.out b/src/test/regress/expected/multi_real_time_transaction.out
index 633d00dab..f348430e1 100644
--- a/src/test/regress/expected/multi_real_time_transaction.out
+++ b/src/test/regress/expected/multi_real_time_transaction.out
@@ -668,8 +668,9 @@ SELECT id, pg_advisory_xact_lock(16) FROM test_table ORDER BY id;
END;
DROP SCHEMA multi_real_time_transaction CASCADE;
-NOTICE: drop cascades to 4 other objects
+NOTICE: drop cascades to 5 other objects
DETAIL: drop cascades to table test_table
drop cascades to table co_test_table
drop cascades to table ref_test_table
+drop cascades to table ref_test_table_1610008
drop cascades to function insert_row_test(name)
diff --git a/src/test/regress/expected/multi_remove_node_reference_table.out b/src/test/regress/expected/multi_remove_node_reference_table.out
index 98fd168b5..44233266a 100644
--- a/src/test/regress/expected/multi_remove_node_reference_table.out
+++ b/src/test/regress/expected/multi_remove_node_reference_table.out
@@ -218,10 +218,24 @@ WHERE colocationid IN
1 | -1 | 0
(1 row)
+-- test that we cannot remove a node if it has the only placement for a shard
+SELECT master_remove_node('localhost', :master_port);
+ master_remove_node
+---------------------------------------------------------------------
+
+(1 row)
+
SELECT master_remove_node('localhost', :worker_1_port);
ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx
DETAIL: One of the table(s) that prevents the operation complete successfully is public.remove_node_reference_table
HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables
+-- restore the coordinator
+SELECT citus_set_coordinator_host('localhost');
+ citus_set_coordinator_host
+---------------------------------------------------------------------
+
+(1 row)
+
\c - - - :worker_1_port
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
count
@@ -972,12 +986,6 @@ ORDER BY shardid ASC;
(0 rows)
\c - - - :master_port
-SELECT 1 FROM citus_set_coordinator_host('localhost', :master_port);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
SELECT citus_disable_node('localhost', :worker_2_port);
citus_disable_node
---------------------------------------------------------------------
@@ -1004,12 +1012,6 @@ SELECT hasmetadata, metadatasynced FROM pg_dist_node WHERE nodeport = :master_po
t | t
(1 row)
-SELECT 1 FROM citus_remove_node('localhost', :master_port);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
SELECT
shardid, shardstate, shardlength, nodename, nodeport
FROM
diff --git a/src/test/regress/expected/multi_repartition_join_planning.out b/src/test/regress/expected/multi_repartition_join_planning.out
index 13f569a4e..237fe906b 100644
--- a/src/test/regress/expected/multi_repartition_join_planning.out
+++ b/src/test/regress/expected/multi_repartition_join_planning.out
@@ -7,6 +7,7 @@
SET citus.next_shard_id TO 690000;
SET citus.enable_unique_job_ids TO off;
SET citus.enable_repartition_joins to ON;
+SET citus.shard_replication_factor to 1;
create schema repartition_join;
DROP TABLE IF EXISTS repartition_join.order_line;
NOTICE: table "order_line" does not exist, skipping
@@ -69,15 +70,33 @@ DEBUG: join prunable for intervals [0,2147483647] and [-2147483648,-1]
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 0 and 4
+DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 1 and 4
+DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 2 and 4
+DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: join prunable for task partitionId 3 and 4
+DEBUG: join prunable for task partitionId 3 and 5
+DEBUG: join prunable for task partitionId 4 and 0
+DEBUG: join prunable for task partitionId 4 and 1
+DEBUG: join prunable for task partitionId 4 and 2
+DEBUG: join prunable for task partitionId 4 and 3
+DEBUG: join prunable for task partitionId 4 and 5
+DEBUG: join prunable for task partitionId 5 and 0
+DEBUG: join prunable for task partitionId 5 and 1
+DEBUG: join prunable for task partitionId 5 and 2
+DEBUG: join prunable for task partitionId 5 and 3
+DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2
@@ -94,34 +113,68 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 12
+DEBUG: pruning merge fetch taskId 13
+DETAIL: Creating dependency on merge taskId 15
+DEBUG: pruning merge fetch taskId 14
+DETAIL: Creating dependency on merge taskId 15
+DEBUG: pruning merge fetch taskId 16
+DETAIL: Creating dependency on merge taskId 18
+DEBUG: pruning merge fetch taskId 17
+DETAIL: Creating dependency on merge taskId 18
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 0 and 4
+DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 1 and 4
+DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 2 and 4
+DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: join prunable for task partitionId 3 and 4
+DEBUG: join prunable for task partitionId 3 and 5
+DEBUG: join prunable for task partitionId 4 and 0
+DEBUG: join prunable for task partitionId 4 and 1
+DEBUG: join prunable for task partitionId 4 and 2
+DEBUG: join prunable for task partitionId 4 and 3
+DEBUG: join prunable for task partitionId 4 and 5
+DEBUG: join prunable for task partitionId 5 and 0
+DEBUG: join prunable for task partitionId 5 and 1
+DEBUG: join prunable for task partitionId 5 and 2
+DEBUG: join prunable for task partitionId 5 and 3
+DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1
-DETAIL: Creating dependency on merge taskId 13
+DETAIL: Creating dependency on merge taskId 19
DEBUG: pruning merge fetch taskId 2
DETAIL: Creating dependency on merge taskId 4
DEBUG: pruning merge fetch taskId 4
-DETAIL: Creating dependency on merge taskId 18
+DETAIL: Creating dependency on merge taskId 26
DEBUG: pruning merge fetch taskId 5
DETAIL: Creating dependency on merge taskId 8
DEBUG: pruning merge fetch taskId 7
-DETAIL: Creating dependency on merge taskId 23
+DETAIL: Creating dependency on merge taskId 33
DEBUG: pruning merge fetch taskId 8
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 10
-DETAIL: Creating dependency on merge taskId 28
+DETAIL: Creating dependency on merge taskId 40
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 16
+DEBUG: pruning merge fetch taskId 13
+DETAIL: Creating dependency on merge taskId 47
+DEBUG: pruning merge fetch taskId 14
+DETAIL: Creating dependency on merge taskId 20
+DEBUG: pruning merge fetch taskId 16
+DETAIL: Creating dependency on merge taskId 54
+DEBUG: pruning merge fetch taskId 17
+DETAIL: Creating dependency on merge taskId 24
l_partkey | o_orderkey | count
---------------------------------------------------------------------
18 | 12005 | 1
@@ -170,15 +223,33 @@ DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 0 and 4
+DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 1 and 4
+DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 2 and 4
+DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: join prunable for task partitionId 3 and 4
+DEBUG: join prunable for task partitionId 3 and 5
+DEBUG: join prunable for task partitionId 4 and 0
+DEBUG: join prunable for task partitionId 4 and 1
+DEBUG: join prunable for task partitionId 4 and 2
+DEBUG: join prunable for task partitionId 4 and 3
+DEBUG: join prunable for task partitionId 4 and 5
+DEBUG: join prunable for task partitionId 5 and 0
+DEBUG: join prunable for task partitionId 5 and 1
+DEBUG: join prunable for task partitionId 5 and 2
+DEBUG: join prunable for task partitionId 5 and 3
+DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2
@@ -195,6 +266,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 12
+DEBUG: pruning merge fetch taskId 13
+DETAIL: Creating dependency on merge taskId 15
+DEBUG: pruning merge fetch taskId 14
+DETAIL: Creating dependency on merge taskId 15
+DEBUG: pruning merge fetch taskId 16
+DETAIL: Creating dependency on merge taskId 18
+DEBUG: pruning merge fetch taskId 17
+DETAIL: Creating dependency on merge taskId 18
l_partkey | o_orderkey | count
---------------------------------------------------------------------
(0 rows)
@@ -214,15 +293,33 @@ DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 0 and 4
+DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 1 and 4
+DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 2 and 4
+DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: join prunable for task partitionId 3 and 4
+DEBUG: join prunable for task partitionId 3 and 5
+DEBUG: join prunable for task partitionId 4 and 0
+DEBUG: join prunable for task partitionId 4 and 1
+DEBUG: join prunable for task partitionId 4 and 2
+DEBUG: join prunable for task partitionId 4 and 3
+DEBUG: join prunable for task partitionId 4 and 5
+DEBUG: join prunable for task partitionId 5 and 0
+DEBUG: join prunable for task partitionId 5 and 1
+DEBUG: join prunable for task partitionId 5 and 2
+DEBUG: join prunable for task partitionId 5 and 3
+DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2
@@ -239,6 +336,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 12
+DEBUG: pruning merge fetch taskId 13
+DETAIL: Creating dependency on merge taskId 15
+DEBUG: pruning merge fetch taskId 14
+DETAIL: Creating dependency on merge taskId 15
+DEBUG: pruning merge fetch taskId 16
+DETAIL: Creating dependency on merge taskId 18
+DEBUG: pruning merge fetch taskId 17
+DETAIL: Creating dependency on merge taskId 18
o_orderkey | o_shippriority | count
---------------------------------------------------------------------
(0 rows)
@@ -260,15 +365,33 @@ DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 0 and 4
+DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 1 and 4
+DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 2 and 4
+DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: join prunable for task partitionId 3 and 4
+DEBUG: join prunable for task partitionId 3 and 5
+DEBUG: join prunable for task partitionId 4 and 0
+DEBUG: join prunable for task partitionId 4 and 1
+DEBUG: join prunable for task partitionId 4 and 2
+DEBUG: join prunable for task partitionId 4 and 3
+DEBUG: join prunable for task partitionId 4 and 5
+DEBUG: join prunable for task partitionId 5 and 0
+DEBUG: join prunable for task partitionId 5 and 1
+DEBUG: join prunable for task partitionId 5 and 2
+DEBUG: join prunable for task partitionId 5 and 3
+DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2
@@ -285,6 +408,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 12
+DEBUG: pruning merge fetch taskId 13
+DETAIL: Creating dependency on merge taskId 15
+DEBUG: pruning merge fetch taskId 14
+DETAIL: Creating dependency on merge taskId 15
+DEBUG: pruning merge fetch taskId 16
+DETAIL: Creating dependency on merge taskId 18
+DEBUG: pruning merge fetch taskId 17
+DETAIL: Creating dependency on merge taskId 18
o_orderkey | o_shippriority | count
---------------------------------------------------------------------
(0 rows)
@@ -304,15 +435,33 @@ DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 0 and 4
+DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 1 and 4
+DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 2 and 4
+DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: join prunable for task partitionId 3 and 4
+DEBUG: join prunable for task partitionId 3 and 5
+DEBUG: join prunable for task partitionId 4 and 0
+DEBUG: join prunable for task partitionId 4 and 1
+DEBUG: join prunable for task partitionId 4 and 2
+DEBUG: join prunable for task partitionId 4 and 3
+DEBUG: join prunable for task partitionId 4 and 5
+DEBUG: join prunable for task partitionId 5 and 0
+DEBUG: join prunable for task partitionId 5 and 1
+DEBUG: join prunable for task partitionId 5 and 2
+DEBUG: join prunable for task partitionId 5 and 3
+DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2
@@ -329,6 +478,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 12
+DEBUG: pruning merge fetch taskId 13
+DETAIL: Creating dependency on merge taskId 15
+DEBUG: pruning merge fetch taskId 14
+DETAIL: Creating dependency on merge taskId 15
+DEBUG: pruning merge fetch taskId 16
+DETAIL: Creating dependency on merge taskId 18
+DEBUG: pruning merge fetch taskId 17
+DETAIL: Creating dependency on merge taskId 18
o_orderkey | any_value
---------------------------------------------------------------------
(0 rows)
@@ -346,15 +503,33 @@ DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 0 and 4
+DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 1 and 4
+DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 2 and 4
+DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: join prunable for task partitionId 3 and 4
+DEBUG: join prunable for task partitionId 3 and 5
+DEBUG: join prunable for task partitionId 4 and 0
+DEBUG: join prunable for task partitionId 4 and 1
+DEBUG: join prunable for task partitionId 4 and 2
+DEBUG: join prunable for task partitionId 4 and 3
+DEBUG: join prunable for task partitionId 4 and 5
+DEBUG: join prunable for task partitionId 5 and 0
+DEBUG: join prunable for task partitionId 5 and 1
+DEBUG: join prunable for task partitionId 5 and 2
+DEBUG: join prunable for task partitionId 5 and 3
+DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 5
DEBUG: pruning merge fetch taskId 2
@@ -371,6 +546,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 20
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 20
+DEBUG: pruning merge fetch taskId 13
+DETAIL: Creating dependency on merge taskId 25
+DEBUG: pruning merge fetch taskId 14
+DETAIL: Creating dependency on merge taskId 25
+DEBUG: pruning merge fetch taskId 16
+DETAIL: Creating dependency on merge taskId 30
+DEBUG: pruning merge fetch taskId 17
+DETAIL: Creating dependency on merge taskId 30
s_i_id
---------------------------------------------------------------------
(0 rows)
diff --git a/src/test/regress/expected/multi_repartition_join_pruning.out b/src/test/regress/expected/multi_repartition_join_pruning.out
index 8c0a26800..b5c571f2a 100644
--- a/src/test/regress/expected/multi_repartition_join_pruning.out
+++ b/src/test/regress/expected/multi_repartition_join_pruning.out
@@ -17,15 +17,33 @@ DEBUG: Router planner does not support append-partitioned tables.
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 0 and 4
+DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 1 and 4
+DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 2 and 4
+DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: join prunable for task partitionId 3 and 4
+DEBUG: join prunable for task partitionId 3 and 5
+DEBUG: join prunable for task partitionId 4 and 0
+DEBUG: join prunable for task partitionId 4 and 1
+DEBUG: join prunable for task partitionId 4 and 2
+DEBUG: join prunable for task partitionId 4 and 3
+DEBUG: join prunable for task partitionId 4 and 5
+DEBUG: join prunable for task partitionId 5 and 0
+DEBUG: join prunable for task partitionId 5 and 1
+DEBUG: join prunable for task partitionId 5 and 2
+DEBUG: join prunable for task partitionId 5 and 3
+DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2
@@ -42,18 +60,26 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 16
+DEBUG: pruning merge fetch taskId 13
+DETAIL: Creating dependency on merge taskId 15
+DEBUG: pruning merge fetch taskId 14
+DETAIL: Creating dependency on merge taskId 20
+DEBUG: pruning merge fetch taskId 16
+DETAIL: Creating dependency on merge taskId 18
+DEBUG: pruning merge fetch taskId 17
+DETAIL: Creating dependency on merge taskId 24
QUERY PLAN
---------------------------------------------------------------------
Aggregate
-> Custom Scan (Citus Adaptive)
- Task Count: 4
+ Task Count: 6
Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob
Map Task Count: 2
- Merge Task Count: 4
+ Merge Task Count: 6
-> MapMergeJob
Map Task Count: 3
- Merge Task Count: 4
+ Merge Task Count: 6
(10 rows)
SELECT
@@ -66,15 +92,33 @@ DEBUG: Router planner does not support append-partitioned tables.
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 0 and 4
+DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 1 and 4
+DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 2 and 4
+DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: join prunable for task partitionId 3 and 4
+DEBUG: join prunable for task partitionId 3 and 5
+DEBUG: join prunable for task partitionId 4 and 0
+DEBUG: join prunable for task partitionId 4 and 1
+DEBUG: join prunable for task partitionId 4 and 2
+DEBUG: join prunable for task partitionId 4 and 3
+DEBUG: join prunable for task partitionId 4 and 5
+DEBUG: join prunable for task partitionId 5 and 0
+DEBUG: join prunable for task partitionId 5 and 1
+DEBUG: join prunable for task partitionId 5 and 2
+DEBUG: join prunable for task partitionId 5 and 3
+DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2
@@ -91,6 +135,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 16
+DEBUG: pruning merge fetch taskId 13
+DETAIL: Creating dependency on merge taskId 15
+DEBUG: pruning merge fetch taskId 14
+DETAIL: Creating dependency on merge taskId 20
+DEBUG: pruning merge fetch taskId 16
+DETAIL: Creating dependency on merge taskId 18
+DEBUG: pruning merge fetch taskId 17
+DETAIL: Creating dependency on merge taskId 24
count
---------------------------------------------------------------------
2985
@@ -110,15 +162,33 @@ DEBUG: Router planner does not support append-partitioned tables.
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 0 and 4
+DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 1 and 4
+DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 2 and 4
+DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: join prunable for task partitionId 3 and 4
+DEBUG: join prunable for task partitionId 3 and 5
+DEBUG: join prunable for task partitionId 4 and 0
+DEBUG: join prunable for task partitionId 4 and 1
+DEBUG: join prunable for task partitionId 4 and 2
+DEBUG: join prunable for task partitionId 4 and 3
+DEBUG: join prunable for task partitionId 4 and 5
+DEBUG: join prunable for task partitionId 5 and 0
+DEBUG: join prunable for task partitionId 5 and 1
+DEBUG: join prunable for task partitionId 5 and 2
+DEBUG: join prunable for task partitionId 5 and 3
+DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2
@@ -135,18 +205,26 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 16
+DEBUG: pruning merge fetch taskId 13
+DETAIL: Creating dependency on merge taskId 15
+DEBUG: pruning merge fetch taskId 14
+DETAIL: Creating dependency on merge taskId 20
+DEBUG: pruning merge fetch taskId 16
+DETAIL: Creating dependency on merge taskId 18
+DEBUG: pruning merge fetch taskId 17
+DETAIL: Creating dependency on merge taskId 24
QUERY PLAN
---------------------------------------------------------------------
Aggregate
-> Custom Scan (Citus Adaptive)
- Task Count: 4
+ Task Count: 6
Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob
Map Task Count: 2
- Merge Task Count: 4
+ Merge Task Count: 6
-> MapMergeJob
Map Task Count: 3
- Merge Task Count: 4
+ Merge Task Count: 6
(10 rows)
SELECT
@@ -160,15 +238,33 @@ DEBUG: Router planner does not support append-partitioned tables.
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 0 and 4
+DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 1 and 4
+DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 2 and 4
+DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: join prunable for task partitionId 3 and 4
+DEBUG: join prunable for task partitionId 3 and 5
+DEBUG: join prunable for task partitionId 4 and 0
+DEBUG: join prunable for task partitionId 4 and 1
+DEBUG: join prunable for task partitionId 4 and 2
+DEBUG: join prunable for task partitionId 4 and 3
+DEBUG: join prunable for task partitionId 4 and 5
+DEBUG: join prunable for task partitionId 5 and 0
+DEBUG: join prunable for task partitionId 5 and 1
+DEBUG: join prunable for task partitionId 5 and 2
+DEBUG: join prunable for task partitionId 5 and 3
+DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2
@@ -185,6 +281,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 16
+DEBUG: pruning merge fetch taskId 13
+DETAIL: Creating dependency on merge taskId 15
+DEBUG: pruning merge fetch taskId 14
+DETAIL: Creating dependency on merge taskId 20
+DEBUG: pruning merge fetch taskId 16
+DETAIL: Creating dependency on merge taskId 18
+DEBUG: pruning merge fetch taskId 17
+DETAIL: Creating dependency on merge taskId 24
count
---------------------------------------------------------------------
0
@@ -204,15 +308,33 @@ DEBUG: Router planner does not support append-partitioned tables.
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 0 and 4
+DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 1 and 4
+DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 2 and 4
+DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: join prunable for task partitionId 3 and 4
+DEBUG: join prunable for task partitionId 3 and 5
+DEBUG: join prunable for task partitionId 4 and 0
+DEBUG: join prunable for task partitionId 4 and 1
+DEBUG: join prunable for task partitionId 4 and 2
+DEBUG: join prunable for task partitionId 4 and 3
+DEBUG: join prunable for task partitionId 4 and 5
+DEBUG: join prunable for task partitionId 5 and 0
+DEBUG: join prunable for task partitionId 5 and 1
+DEBUG: join prunable for task partitionId 5 and 2
+DEBUG: join prunable for task partitionId 5 and 3
+DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2
@@ -229,18 +351,26 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 16
+DEBUG: pruning merge fetch taskId 13
+DETAIL: Creating dependency on merge taskId 15
+DEBUG: pruning merge fetch taskId 14
+DETAIL: Creating dependency on merge taskId 20
+DEBUG: pruning merge fetch taskId 16
+DETAIL: Creating dependency on merge taskId 18
+DEBUG: pruning merge fetch taskId 17
+DETAIL: Creating dependency on merge taskId 24
QUERY PLAN
---------------------------------------------------------------------
Aggregate
-> Custom Scan (Citus Adaptive)
- Task Count: 4
+ Task Count: 6
Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob
Map Task Count: 2
- Merge Task Count: 4
+ Merge Task Count: 6
-> MapMergeJob
Map Task Count: 3
- Merge Task Count: 4
+ Merge Task Count: 6
(10 rows)
SELECT
@@ -254,15 +384,33 @@ DEBUG: Router planner does not support append-partitioned tables.
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 0 and 4
+DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 1 and 4
+DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 2 and 4
+DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: join prunable for task partitionId 3 and 4
+DEBUG: join prunable for task partitionId 3 and 5
+DEBUG: join prunable for task partitionId 4 and 0
+DEBUG: join prunable for task partitionId 4 and 1
+DEBUG: join prunable for task partitionId 4 and 2
+DEBUG: join prunable for task partitionId 4 and 3
+DEBUG: join prunable for task partitionId 4 and 5
+DEBUG: join prunable for task partitionId 5 and 0
+DEBUG: join prunable for task partitionId 5 and 1
+DEBUG: join prunable for task partitionId 5 and 2
+DEBUG: join prunable for task partitionId 5 and 3
+DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2
@@ -279,6 +427,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 16
+DEBUG: pruning merge fetch taskId 13
+DETAIL: Creating dependency on merge taskId 15
+DEBUG: pruning merge fetch taskId 14
+DETAIL: Creating dependency on merge taskId 20
+DEBUG: pruning merge fetch taskId 16
+DETAIL: Creating dependency on merge taskId 18
+DEBUG: pruning merge fetch taskId 17
+DETAIL: Creating dependency on merge taskId 24
count
---------------------------------------------------------------------
0
@@ -298,15 +454,33 @@ DEBUG: Router planner does not support append-partitioned tables.
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 0 and 4
+DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 1 and 4
+DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 2 and 4
+DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: join prunable for task partitionId 3 and 4
+DEBUG: join prunable for task partitionId 3 and 5
+DEBUG: join prunable for task partitionId 4 and 0
+DEBUG: join prunable for task partitionId 4 and 1
+DEBUG: join prunable for task partitionId 4 and 2
+DEBUG: join prunable for task partitionId 4 and 3
+DEBUG: join prunable for task partitionId 4 and 5
+DEBUG: join prunable for task partitionId 5 and 0
+DEBUG: join prunable for task partitionId 5 and 1
+DEBUG: join prunable for task partitionId 5 and 2
+DEBUG: join prunable for task partitionId 5 and 3
+DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2
@@ -323,18 +497,26 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 16
+DEBUG: pruning merge fetch taskId 13
+DETAIL: Creating dependency on merge taskId 15
+DEBUG: pruning merge fetch taskId 14
+DETAIL: Creating dependency on merge taskId 20
+DEBUG: pruning merge fetch taskId 16
+DETAIL: Creating dependency on merge taskId 18
+DEBUG: pruning merge fetch taskId 17
+DETAIL: Creating dependency on merge taskId 24
QUERY PLAN
---------------------------------------------------------------------
Aggregate
-> Custom Scan (Citus Adaptive)
- Task Count: 4
+ Task Count: 6
Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob
Map Task Count: 2
- Merge Task Count: 4
+ Merge Task Count: 6
-> MapMergeJob
Map Task Count: 3
- Merge Task Count: 4
+ Merge Task Count: 6
(10 rows)
SELECT
@@ -347,15 +529,33 @@ DEBUG: Router planner does not support append-partitioned tables.
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 0 and 4
+DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 1 and 4
+DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 2 and 4
+DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: join prunable for task partitionId 3 and 4
+DEBUG: join prunable for task partitionId 3 and 5
+DEBUG: join prunable for task partitionId 4 and 0
+DEBUG: join prunable for task partitionId 4 and 1
+DEBUG: join prunable for task partitionId 4 and 2
+DEBUG: join prunable for task partitionId 4 and 3
+DEBUG: join prunable for task partitionId 4 and 5
+DEBUG: join prunable for task partitionId 5 and 0
+DEBUG: join prunable for task partitionId 5 and 1
+DEBUG: join prunable for task partitionId 5 and 2
+DEBUG: join prunable for task partitionId 5 and 3
+DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2
@@ -372,6 +572,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 16
+DEBUG: pruning merge fetch taskId 13
+DETAIL: Creating dependency on merge taskId 15
+DEBUG: pruning merge fetch taskId 14
+DETAIL: Creating dependency on merge taskId 20
+DEBUG: pruning merge fetch taskId 16
+DETAIL: Creating dependency on merge taskId 18
+DEBUG: pruning merge fetch taskId 17
+DETAIL: Creating dependency on merge taskId 24
count
---------------------------------------------------------------------
125
@@ -391,15 +599,33 @@ DEBUG: Router planner does not support append-partitioned tables.
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 0 and 4
+DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 1 and 4
+DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 2 and 4
+DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: join prunable for task partitionId 3 and 4
+DEBUG: join prunable for task partitionId 3 and 5
+DEBUG: join prunable for task partitionId 4 and 0
+DEBUG: join prunable for task partitionId 4 and 1
+DEBUG: join prunable for task partitionId 4 and 2
+DEBUG: join prunable for task partitionId 4 and 3
+DEBUG: join prunable for task partitionId 4 and 5
+DEBUG: join prunable for task partitionId 5 and 0
+DEBUG: join prunable for task partitionId 5 and 1
+DEBUG: join prunable for task partitionId 5 and 2
+DEBUG: join prunable for task partitionId 5 and 3
+DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2
@@ -416,18 +642,26 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 16
+DEBUG: pruning merge fetch taskId 13
+DETAIL: Creating dependency on merge taskId 15
+DEBUG: pruning merge fetch taskId 14
+DETAIL: Creating dependency on merge taskId 20
+DEBUG: pruning merge fetch taskId 16
+DETAIL: Creating dependency on merge taskId 18
+DEBUG: pruning merge fetch taskId 17
+DETAIL: Creating dependency on merge taskId 24
QUERY PLAN
---------------------------------------------------------------------
Aggregate
-> Custom Scan (Citus Adaptive)
- Task Count: 4
+ Task Count: 6
Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob
Map Task Count: 2
- Merge Task Count: 4
+ Merge Task Count: 6
-> MapMergeJob
Map Task Count: 3
- Merge Task Count: 4
+ Merge Task Count: 6
(10 rows)
SELECT
@@ -441,15 +675,33 @@ DEBUG: Router planner does not support append-partitioned tables.
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 0 and 4
+DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 1 and 4
+DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 2 and 4
+DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: join prunable for task partitionId 3 and 4
+DEBUG: join prunable for task partitionId 3 and 5
+DEBUG: join prunable for task partitionId 4 and 0
+DEBUG: join prunable for task partitionId 4 and 1
+DEBUG: join prunable for task partitionId 4 and 2
+DEBUG: join prunable for task partitionId 4 and 3
+DEBUG: join prunable for task partitionId 4 and 5
+DEBUG: join prunable for task partitionId 5 and 0
+DEBUG: join prunable for task partitionId 5 and 1
+DEBUG: join prunable for task partitionId 5 and 2
+DEBUG: join prunable for task partitionId 5 and 3
+DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2
@@ -466,6 +718,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 16
+DEBUG: pruning merge fetch taskId 13
+DETAIL: Creating dependency on merge taskId 15
+DEBUG: pruning merge fetch taskId 14
+DETAIL: Creating dependency on merge taskId 20
+DEBUG: pruning merge fetch taskId 16
+DETAIL: Creating dependency on merge taskId 18
+DEBUG: pruning merge fetch taskId 17
+DETAIL: Creating dependency on merge taskId 24
count
---------------------------------------------------------------------
0
diff --git a/src/test/regress/expected/multi_repartition_join_task_assignment.out b/src/test/regress/expected/multi_repartition_join_task_assignment.out
index 3fbe9121b..713a68e6b 100644
--- a/src/test/regress/expected/multi_repartition_join_task_assignment.out
+++ b/src/test/regress/expected/multi_repartition_join_task_assignment.out
@@ -30,15 +30,33 @@ DEBUG: assigned task to node localhost:xxxxx
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 0 and 4
+DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 1 and 4
+DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 2 and 4
+DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: join prunable for task partitionId 3 and 4
+DEBUG: join prunable for task partitionId 3 and 5
+DEBUG: join prunable for task partitionId 4 and 0
+DEBUG: join prunable for task partitionId 4 and 1
+DEBUG: join prunable for task partitionId 4 and 2
+DEBUG: join prunable for task partitionId 4 and 3
+DEBUG: join prunable for task partitionId 4 and 5
+DEBUG: join prunable for task partitionId 5 and 0
+DEBUG: join prunable for task partitionId 5 and 1
+DEBUG: join prunable for task partitionId 5 and 2
+DEBUG: join prunable for task partitionId 5 and 3
+DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2
@@ -55,6 +73,16 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 16
+DEBUG: pruning merge fetch taskId 13
+DETAIL: Creating dependency on merge taskId 15
+DEBUG: pruning merge fetch taskId 14
+DETAIL: Creating dependency on merge taskId 20
+DEBUG: pruning merge fetch taskId 16
+DETAIL: Creating dependency on merge taskId 18
+DEBUG: pruning merge fetch taskId 17
+DETAIL: Creating dependency on merge taskId 24
+DEBUG: assigned task to node localhost:xxxxx
+DEBUG: assigned task to node localhost:xxxxx
DEBUG: assigned task to node localhost:xxxxx
DEBUG: assigned task to node localhost:xxxxx
DEBUG: assigned task to node localhost:xxxxx
@@ -88,15 +116,33 @@ DEBUG: assigned task to node localhost:xxxxx
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 0 and 4
+DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 1 and 4
+DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 2 and 4
+DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: join prunable for task partitionId 3 and 4
+DEBUG: join prunable for task partitionId 3 and 5
+DEBUG: join prunable for task partitionId 4 and 0
+DEBUG: join prunable for task partitionId 4 and 1
+DEBUG: join prunable for task partitionId 4 and 2
+DEBUG: join prunable for task partitionId 4 and 3
+DEBUG: join prunable for task partitionId 4 and 5
+DEBUG: join prunable for task partitionId 5 and 0
+DEBUG: join prunable for task partitionId 5 and 1
+DEBUG: join prunable for task partitionId 5 and 2
+DEBUG: join prunable for task partitionId 5 and 3
+DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 4
DEBUG: pruning merge fetch taskId 2
@@ -113,6 +159,16 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 16
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 12
+DEBUG: pruning merge fetch taskId 13
+DETAIL: Creating dependency on merge taskId 20
+DEBUG: pruning merge fetch taskId 14
+DETAIL: Creating dependency on merge taskId 15
+DEBUG: pruning merge fetch taskId 16
+DETAIL: Creating dependency on merge taskId 24
+DEBUG: pruning merge fetch taskId 17
+DETAIL: Creating dependency on merge taskId 18
+DEBUG: assigned task to node localhost:xxxxx
+DEBUG: assigned task to node localhost:xxxxx
DEBUG: assigned task to node localhost:xxxxx
DEBUG: assigned task to node localhost:xxxxx
DEBUG: assigned task to node localhost:xxxxx
@@ -143,15 +199,33 @@ DEBUG: assigned task to node localhost:xxxxx
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 0 and 4
+DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 1 and 4
+DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 2 and 4
+DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: join prunable for task partitionId 3 and 4
+DEBUG: join prunable for task partitionId 3 and 5
+DEBUG: join prunable for task partitionId 4 and 0
+DEBUG: join prunable for task partitionId 4 and 1
+DEBUG: join prunable for task partitionId 4 and 2
+DEBUG: join prunable for task partitionId 4 and 3
+DEBUG: join prunable for task partitionId 4 and 5
+DEBUG: join prunable for task partitionId 5 and 0
+DEBUG: join prunable for task partitionId 5 and 1
+DEBUG: join prunable for task partitionId 5 and 2
+DEBUG: join prunable for task partitionId 5 and 3
+DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2
@@ -168,6 +242,16 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 16
+DEBUG: pruning merge fetch taskId 13
+DETAIL: Creating dependency on merge taskId 15
+DEBUG: pruning merge fetch taskId 14
+DETAIL: Creating dependency on merge taskId 20
+DEBUG: pruning merge fetch taskId 16
+DETAIL: Creating dependency on merge taskId 18
+DEBUG: pruning merge fetch taskId 17
+DETAIL: Creating dependency on merge taskId 24
+DEBUG: assigned task to node localhost:xxxxx
+DEBUG: assigned task to node localhost:xxxxx
DEBUG: assigned task to node localhost:xxxxx
DEBUG: assigned task to node localhost:xxxxx
DEBUG: assigned task to node localhost:xxxxx
diff --git a/src/test/regress/expected/multi_repartition_udt.out b/src/test/regress/expected/multi_repartition_udt.out
index 437e188ee..35d3bd80f 100644
--- a/src/test/regress/expected/multi_repartition_udt.out
+++ b/src/test/regress/expected/multi_repartition_udt.out
@@ -155,14 +155,14 @@ LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_ot
QUERY PLAN
---------------------------------------------------------------------
Custom Scan (Citus Adaptive)
- Task Count: 4
+ Task Count: 6
Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob
Map Task Count: 3
- Merge Task Count: 4
+ Merge Task Count: 6
-> MapMergeJob
Map Task Count: 5
- Merge Task Count: 4
+ Merge Task Count: 6
(9 rows)
SELECT * FROM repartition_udt JOIN repartition_udt_other
diff --git a/src/test/regress/expected/multi_replicate_reference_table.out b/src/test/regress/expected/multi_replicate_reference_table.out
index e16726171..3d8d8a787 100644
--- a/src/test/regress/expected/multi_replicate_reference_table.out
+++ b/src/test/regress/expected/multi_replicate_reference_table.out
@@ -284,7 +284,7 @@ DROP TABLE replicate_reference_table_rollback;
SELECT count(*) FROM pg_dist_node;
count
---------------------------------------------------------------------
- 1
+ 2
(1 row)
-- test whether we can create distributed objects on a single worker node
@@ -376,12 +376,6 @@ SELECT citus_add_node('localhost', :worker_2_port);
(1 row)
-- required for create_distributed_table_concurrently
-SELECT 1 FROM citus_set_coordinator_host('localhost', :master_port);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
SET citus.shard_replication_factor TO 1;
CREATE TABLE distributed_table_cdtc(column1 int primary key);
SELECT create_distributed_table_concurrently('distributed_table_cdtc', 'column1');
@@ -391,12 +385,6 @@ SELECT create_distributed_table_concurrently('distributed_table_cdtc', 'column1'
(1 row)
RESET citus.shard_replication_factor;
-SELECT citus_remove_node('localhost', :master_port);
- citus_remove_node
----------------------------------------------------------------------
-
-(1 row)
-
SELECT
shardid, shardstate, shardlength, nodename, nodeport
FROM
@@ -712,12 +700,22 @@ SELECT master_remove_node('localhost', :worker_2_port);
CREATE TABLE ref_table_1(id int primary key, v int);
CREATE TABLE ref_table_2(id int primary key, v int references ref_table_1(id));
CREATE TABLE ref_table_3(id int primary key, v int references ref_table_2(id));
-SELECT create_reference_table('ref_table_1'),
- create_reference_table('ref_table_2'),
- create_reference_table('ref_table_3');
- create_reference_table | create_reference_table | create_reference_table
+SELECT create_reference_table('ref_table_1');
+ create_reference_table
---------------------------------------------------------------------
- | |
+
+(1 row)
+
+SELECT create_reference_table('ref_table_2');
+ create_reference_table
+---------------------------------------------------------------------
+
+(1 row)
+
+SELECT create_reference_table('ref_table_3');
+ create_reference_table
+---------------------------------------------------------------------
+
(1 row)
-- status before master_add_node
@@ -795,7 +793,7 @@ WHERE
ORDER BY 1,4,5;
shardid | shardstate | shardlength | nodename | nodeport
---------------------------------------------------------------------
- 1370019 | 1 | 0 | localhost | 57637
+ 1370021 | 1 | 0 | localhost | 57637
(1 row)
-- we should see the two shard placements after activation
@@ -820,7 +818,7 @@ WHERE
ORDER BY 1,4,5;
shardid | shardstate | shardlength | nodename | nodeport
---------------------------------------------------------------------
- 1370019 | 1 | 0 | localhost | 57637
+ 1370021 | 1 | 0 | localhost | 57637
(1 row)
SELECT 1 FROM master_remove_node('localhost', :worker_2_port);
@@ -850,7 +848,7 @@ HINT: Add the target node via SELECT citus_add_node('localhost', 57638);
SELECT citus_add_secondary_node('localhost', :worker_2_port, 'localhost', :worker_1_port);
citus_add_secondary_node
---------------------------------------------------------------------
- 1370014
+ 1370013
(1 row)
SELECT citus_copy_shard_placement(
@@ -1139,8 +1137,10 @@ select 1 FROM master_add_node('localhost', :worker_2_port);
BEGIN;
DROP TABLE test;
CREATE TABLE test (x int, y int references ref(a));
-SELECT create_distributed_table('test','x');
ERROR: canceling the transaction since it was involved in a distributed deadlock
+DETAIL: When adding a foreign key from a local table to a reference table, Citus applies a conversion to all the local tables in the foreign key graph
+SELECT create_distributed_table('test','x');
+ERROR: current transaction is aborted, commands ignored until end of transaction block
END;
-- verify the split fails if we still need to replicate reference tables
SELECT citus_remove_node('localhost', :worker_2_port);
@@ -1158,7 +1158,7 @@ SELECT create_distributed_table('test','x');
SELECT citus_add_node('localhost', :worker_2_port);
citus_add_node
---------------------------------------------------------------------
- 1370022
+ 1370020
(1 row)
SELECT
@@ -1194,7 +1194,7 @@ errors_received := 0;
RAISE '(%/1) failed to add node', errors_received;
END;
$$;
-ERROR: (1/1) failed to add node
+ERROR: (0/1) failed to add node
-- drop unnecassary tables
DROP TABLE initially_not_replicated_reference_table;
-- reload pg_dist_shard_placement table
diff --git a/src/test/regress/expected/multi_router_planner.out b/src/test/regress/expected/multi_router_planner.out
index a2f840aa6..edfc728db 100644
--- a/src/test/regress/expected/multi_router_planner.out
+++ b/src/test/regress/expected/multi_router_planner.out
@@ -794,15 +794,33 @@ DEBUG: push down of limit count: 3
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 0 and 4
+DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 1 and 4
+DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 2 and 4
+DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: join prunable for task partitionId 3 and 4
+DEBUG: join prunable for task partitionId 3 and 5
+DEBUG: join prunable for task partitionId 4 and 0
+DEBUG: join prunable for task partitionId 4 and 1
+DEBUG: join prunable for task partitionId 4 and 2
+DEBUG: join prunable for task partitionId 4 and 3
+DEBUG: join prunable for task partitionId 4 and 5
+DEBUG: join prunable for task partitionId 5 and 0
+DEBUG: join prunable for task partitionId 5 and 1
+DEBUG: join prunable for task partitionId 5 and 2
+DEBUG: join prunable for task partitionId 5 and 3
+DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 2
DEBUG: pruning merge fetch taskId 2
@@ -819,6 +837,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 8
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 8
+DEBUG: pruning merge fetch taskId 13
+DETAIL: Creating dependency on merge taskId 10
+DEBUG: pruning merge fetch taskId 14
+DETAIL: Creating dependency on merge taskId 10
+DEBUG: pruning merge fetch taskId 16
+DETAIL: Creating dependency on merge taskId 12
+DEBUG: pruning merge fetch taskId 17
+DETAIL: Creating dependency on merge taskId 12
ERROR: the query contains a join that requires repartitioning
HINT: Set citus.enable_repartition_joins to on to enable repartitioning
RESET citus.enable_non_colocated_router_query_pushdown;
@@ -1517,15 +1543,33 @@ DEBUG: router planner does not support queries that reference non-colocated dis
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 0 and 4
+DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 1 and 4
+DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 2 and 4
+DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: join prunable for task partitionId 3 and 4
+DEBUG: join prunable for task partitionId 3 and 5
+DEBUG: join prunable for task partitionId 4 and 0
+DEBUG: join prunable for task partitionId 4 and 1
+DEBUG: join prunable for task partitionId 4 and 2
+DEBUG: join prunable for task partitionId 4 and 3
+DEBUG: join prunable for task partitionId 4 and 5
+DEBUG: join prunable for task partitionId 5 and 0
+DEBUG: join prunable for task partitionId 5 and 1
+DEBUG: join prunable for task partitionId 5 and 2
+DEBUG: join prunable for task partitionId 5 and 3
+DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 2
DEBUG: pruning merge fetch taskId 2
@@ -1542,6 +1586,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 8
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 8
+DEBUG: pruning merge fetch taskId 13
+DETAIL: Creating dependency on merge taskId 10
+DEBUG: pruning merge fetch taskId 14
+DETAIL: Creating dependency on merge taskId 10
+DEBUG: pruning merge fetch taskId 16
+DETAIL: Creating dependency on merge taskId 12
+DEBUG: pruning merge fetch taskId 17
+DETAIL: Creating dependency on merge taskId 12
ERROR: the query contains a join that requires repartitioning
HINT: Set citus.enable_repartition_joins to on to enable repartitioning
SELECT a.author_id as first_author, b.word_count as second_word_count
@@ -1652,11 +1704,6 @@ DETAIL: A command for a distributed function is run. To make sure subsequent co
SELECT 1 FROM authors_reference r JOIN (
SELECT s.datid FROM number1() s LEFT JOIN pg_database d ON s.datid = d.oid
) num_db ON (r.id = num_db.datid) LIMIT 1;
-DEBUG: found no worker with all shard placements
-DEBUG: generating subplan XXX_1 for subquery SELECT datid FROM multi_router_planner.number1() s(datid)
-DEBUG: Creating router plan
-DEBUG: generating subplan XXX_2 for subquery SELECT s.datid FROM ((SELECT intermediate_result.datid FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(datid integer)) s LEFT JOIN pg_database d ON (((s.datid)::oid OPERATOR(pg_catalog.=) d.oid)))
-DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT 1 FROM (multi_router_planner.authors_reference r JOIN (SELECT intermediate_result.datid FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(datid integer)) num_db ON ((r.id OPERATOR(pg_catalog.=) num_db.datid))) LIMIT 1
DEBUG: Creating router plan
?column?
---------------------------------------------------------------------
@@ -1666,11 +1713,6 @@ DEBUG: Creating router plan
CREATE VIEW num_db AS
SELECT s.datid FROM number1() s LEFT JOIN pg_database d ON s.datid = d.oid;
SELECT 1 FROM authors_reference r JOIN num_db ON (r.id = num_db.datid) LIMIT 1;
-DEBUG: found no worker with all shard placements
-DEBUG: generating subplan XXX_1 for subquery SELECT datid FROM multi_router_planner.number1() s(datid)
-DEBUG: Creating router plan
-DEBUG: generating subplan XXX_2 for subquery SELECT s.datid FROM ((SELECT intermediate_result.datid FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(datid integer)) s LEFT JOIN pg_database d ON (((s.datid)::oid OPERATOR(pg_catalog.=) d.oid)))
-DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT 1 FROM (multi_router_planner.authors_reference r JOIN (SELECT intermediate_result.datid FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(datid integer)) num_db ON ((r.id OPERATOR(pg_catalog.=) num_db.datid))) LIMIT 1
DEBUG: Creating router plan
?column?
---------------------------------------------------------------------
@@ -1679,9 +1721,6 @@ DEBUG: Creating router plan
-- with a CTE in a view
WITH cte AS MATERIALIZED (SELECT * FROM num_db)
SELECT 1 FROM authors_reference r JOIN cte ON (r.id = cte.datid) LIMIT 1;
-DEBUG: found no worker with all shard placements
-DEBUG: generating subplan XXX_1 for CTE cte: SELECT datid FROM (SELECT s.datid FROM (multi_router_planner.number1() s(datid) LEFT JOIN pg_database d ON (((s.datid)::oid OPERATOR(pg_catalog.=) d.oid)))) num_db
-DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT 1 FROM (multi_router_planner.authors_reference r JOIN (SELECT intermediate_result.datid FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(datid integer)) cte ON ((r.id OPERATOR(pg_catalog.=) cte.datid))) LIMIT 1
DEBUG: Creating router plan
?column?
---------------------------------------------------------------------
@@ -1897,15 +1936,33 @@ DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 0 and 4
+DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 1 and 4
+DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 2 and 4
+DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: join prunable for task partitionId 3 and 4
+DEBUG: join prunable for task partitionId 3 and 5
+DEBUG: join prunable for task partitionId 4 and 0
+DEBUG: join prunable for task partitionId 4 and 1
+DEBUG: join prunable for task partitionId 4 and 2
+DEBUG: join prunable for task partitionId 4 and 3
+DEBUG: join prunable for task partitionId 4 and 5
+DEBUG: join prunable for task partitionId 5 and 0
+DEBUG: join prunable for task partitionId 5 and 1
+DEBUG: join prunable for task partitionId 5 and 2
+DEBUG: join prunable for task partitionId 5 and 3
+DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 2
DEBUG: pruning merge fetch taskId 2
@@ -1922,6 +1979,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 8
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 20
+DEBUG: pruning merge fetch taskId 13
+DETAIL: Creating dependency on merge taskId 10
+DEBUG: pruning merge fetch taskId 14
+DETAIL: Creating dependency on merge taskId 25
+DEBUG: pruning merge fetch taskId 16
+DETAIL: Creating dependency on merge taskId 12
+DEBUG: pruning merge fetch taskId 17
+DETAIL: Creating dependency on merge taskId 30
id | author_id | title | word_count | name | id
---------------------------------------------------------------------
(0 rows)
@@ -1935,15 +2000,33 @@ DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 0 and 4
+DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 1 and 4
+DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 2 and 4
+DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: join prunable for task partitionId 3 and 4
+DEBUG: join prunable for task partitionId 3 and 5
+DEBUG: join prunable for task partitionId 4 and 0
+DEBUG: join prunable for task partitionId 4 and 1
+DEBUG: join prunable for task partitionId 4 and 2
+DEBUG: join prunable for task partitionId 4 and 3
+DEBUG: join prunable for task partitionId 4 and 5
+DEBUG: join prunable for task partitionId 5 and 0
+DEBUG: join prunable for task partitionId 5 and 1
+DEBUG: join prunable for task partitionId 5 and 2
+DEBUG: join prunable for task partitionId 5 and 3
+DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 5
DEBUG: pruning merge fetch taskId 2
@@ -1960,6 +2043,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 20
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 20
+DEBUG: pruning merge fetch taskId 13
+DETAIL: Creating dependency on merge taskId 25
+DEBUG: pruning merge fetch taskId 14
+DETAIL: Creating dependency on merge taskId 25
+DEBUG: pruning merge fetch taskId 16
+DETAIL: Creating dependency on merge taskId 30
+DEBUG: pruning merge fetch taskId 17
+DETAIL: Creating dependency on merge taskId 30
id | author_id | title | word_count | name | id
---------------------------------------------------------------------
(0 rows)
@@ -1993,15 +2084,33 @@ DEBUG: router planner does not support queries that reference non-colocated dis
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 0 and 4
+DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 1 and 4
+DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 2 and 4
+DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: join prunable for task partitionId 3 and 4
+DEBUG: join prunable for task partitionId 3 and 5
+DEBUG: join prunable for task partitionId 4 and 0
+DEBUG: join prunable for task partitionId 4 and 1
+DEBUG: join prunable for task partitionId 4 and 2
+DEBUG: join prunable for task partitionId 4 and 3
+DEBUG: join prunable for task partitionId 4 and 5
+DEBUG: join prunable for task partitionId 5 and 0
+DEBUG: join prunable for task partitionId 5 and 1
+DEBUG: join prunable for task partitionId 5 and 2
+DEBUG: join prunable for task partitionId 5 and 3
+DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 2
DEBUG: pruning merge fetch taskId 2
@@ -2018,6 +2127,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 8
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 20
+DEBUG: pruning merge fetch taskId 13
+DETAIL: Creating dependency on merge taskId 10
+DEBUG: pruning merge fetch taskId 14
+DETAIL: Creating dependency on merge taskId 25
+DEBUG: pruning merge fetch taskId 16
+DETAIL: Creating dependency on merge taskId 12
+DEBUG: pruning merge fetch taskId 17
+DETAIL: Creating dependency on merge taskId 30
id | author_id | title | word_count | name | id
---------------------------------------------------------------------
(0 rows)
@@ -2030,15 +2147,33 @@ DEBUG: router planner does not support queries that reference non-colocated dis
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 0 and 4
+DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 1 and 4
+DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 2 and 4
+DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: join prunable for task partitionId 3 and 4
+DEBUG: join prunable for task partitionId 3 and 5
+DEBUG: join prunable for task partitionId 4 and 0
+DEBUG: join prunable for task partitionId 4 and 1
+DEBUG: join prunable for task partitionId 4 and 2
+DEBUG: join prunable for task partitionId 4 and 3
+DEBUG: join prunable for task partitionId 4 and 5
+DEBUG: join prunable for task partitionId 5 and 0
+DEBUG: join prunable for task partitionId 5 and 1
+DEBUG: join prunable for task partitionId 5 and 2
+DEBUG: join prunable for task partitionId 5 and 3
+DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 2
DEBUG: pruning merge fetch taskId 2
@@ -2055,6 +2190,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 8
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 20
+DEBUG: pruning merge fetch taskId 13
+DETAIL: Creating dependency on merge taskId 10
+DEBUG: pruning merge fetch taskId 14
+DETAIL: Creating dependency on merge taskId 25
+DEBUG: pruning merge fetch taskId 16
+DETAIL: Creating dependency on merge taskId 12
+DEBUG: pruning merge fetch taskId 17
+DETAIL: Creating dependency on merge taskId 30
id | author_id | title | word_count | name | id
---------------------------------------------------------------------
(0 rows)
diff --git a/src/test/regress/expected/multi_schema_support.out b/src/test/regress/expected/multi_schema_support.out
index b4003f258..9128491c9 100644
--- a/src/test/regress/expected/multi_schema_support.out
+++ b/src/test/regress/expected/multi_schema_support.out
@@ -1153,7 +1153,12 @@ SELECT create_reference_table('schema_with_user.test_table');
SET citus.next_shard_id TO 1197000;
-- we do not use run_command_on_coordinator_and_workers here because when there is CASCADE, it causes deadlock
DROP OWNED BY "test-user" CASCADE;
-NOTICE: drop cascades to table schema_with_user.test_table
+NOTICE: drop cascades to 2 other objects
+DETAIL: drop cascades to table schema_with_user.test_table
+drop cascades to table schema_with_user.test_table_1190039
+NOTICE: schema "schema_with_user" does not exist, skipping
+CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false)"
+PL/pgSQL function citus_drop_trigger() line XX at PERFORM
DROP USER "test-user";
DROP FUNCTION run_command_on_coordinator_and_workers(p_sql text);
-- test run_command_on_* UDFs with schema
diff --git a/src/test/regress/expected/multi_sequence_default.out b/src/test/regress/expected/multi_sequence_default.out
index c22e8109b..57305befa 100644
--- a/src/test/regress/expected/multi_sequence_default.out
+++ b/src/test/regress/expected/multi_sequence_default.out
@@ -9,13 +9,6 @@ SET citus.shard_replication_factor TO 1;
CREATE SCHEMA sequence_default;
SET search_path = sequence_default, public;
-- test both distributed and citus local tables
-SELECT 1 FROM citus_add_node('localhost', :master_port, groupId => 0);
-NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
-- Cannot add a column involving DEFAULT nextval('..') because the table is not empty
CREATE SEQUENCE seq_0;
CREATE SEQUENCE seq_0_local_table;
@@ -891,10 +884,4 @@ DROP TABLE test_seq_dist;
DROP TABLE sequence_default.seq_test_7_par;
SET client_min_messages TO error; -- suppress cascading objects dropping
DROP SCHEMA sequence_default CASCADE;
-SELECT master_remove_node('localhost', :master_port);
- master_remove_node
----------------------------------------------------------------------
-
-(1 row)
-
SET search_path TO public;
diff --git a/src/test/regress/expected/multi_simple_queries.out b/src/test/regress/expected/multi_simple_queries.out
index 646c42599..d48f935c6 100644
--- a/src/test/regress/expected/multi_simple_queries.out
+++ b/src/test/regress/expected/multi_simple_queries.out
@@ -507,15 +507,33 @@ DEBUG: push down of limit count: 3
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 0 and 4
+DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 1 and 4
+DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 2 and 4
+DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: join prunable for task partitionId 3 and 4
+DEBUG: join prunable for task partitionId 3 and 5
+DEBUG: join prunable for task partitionId 4 and 0
+DEBUG: join prunable for task partitionId 4 and 1
+DEBUG: join prunable for task partitionId 4 and 2
+DEBUG: join prunable for task partitionId 4 and 3
+DEBUG: join prunable for task partitionId 4 and 5
+DEBUG: join prunable for task partitionId 5 and 0
+DEBUG: join prunable for task partitionId 5 and 1
+DEBUG: join prunable for task partitionId 5 and 2
+DEBUG: join prunable for task partitionId 5 and 3
+DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 2
DEBUG: pruning merge fetch taskId 2
@@ -532,6 +550,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 8
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 8
+DEBUG: pruning merge fetch taskId 13
+DETAIL: Creating dependency on merge taskId 10
+DEBUG: pruning merge fetch taskId 14
+DETAIL: Creating dependency on merge taskId 10
+DEBUG: pruning merge fetch taskId 16
+DETAIL: Creating dependency on merge taskId 12
+DEBUG: pruning merge fetch taskId 17
+DETAIL: Creating dependency on merge taskId 12
ERROR: the query contains a join that requires repartitioning
HINT: Set citus.enable_repartition_joins to on to enable repartitioning
-- but they can be executed via repartition join planner
@@ -545,15 +571,33 @@ DEBUG: push down of limit count: 3
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 0 and 4
+DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 1 and 4
+DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 2 and 4
+DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: join prunable for task partitionId 3 and 4
+DEBUG: join prunable for task partitionId 3 and 5
+DEBUG: join prunable for task partitionId 4 and 0
+DEBUG: join prunable for task partitionId 4 and 1
+DEBUG: join prunable for task partitionId 4 and 2
+DEBUG: join prunable for task partitionId 4 and 3
+DEBUG: join prunable for task partitionId 4 and 5
+DEBUG: join prunable for task partitionId 5 and 0
+DEBUG: join prunable for task partitionId 5 and 1
+DEBUG: join prunable for task partitionId 5 and 2
+DEBUG: join prunable for task partitionId 5 and 3
+DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 2
DEBUG: pruning merge fetch taskId 2
@@ -570,6 +614,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 8
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 8
+DEBUG: pruning merge fetch taskId 13
+DETAIL: Creating dependency on merge taskId 10
+DEBUG: pruning merge fetch taskId 14
+DETAIL: Creating dependency on merge taskId 10
+DEBUG: pruning merge fetch taskId 16
+DETAIL: Creating dependency on merge taskId 12
+DEBUG: pruning merge fetch taskId 17
+DETAIL: Creating dependency on merge taskId 12
first_author | second_word_count
---------------------------------------------------------------------
10 | 19519
@@ -655,15 +707,33 @@ DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 0 and 4
+DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 1 and 4
+DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 2 and 4
+DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: join prunable for task partitionId 3 and 4
+DEBUG: join prunable for task partitionId 3 and 5
+DEBUG: join prunable for task partitionId 4 and 0
+DEBUG: join prunable for task partitionId 4 and 1
+DEBUG: join prunable for task partitionId 4 and 2
+DEBUG: join prunable for task partitionId 4 and 3
+DEBUG: join prunable for task partitionId 4 and 5
+DEBUG: join prunable for task partitionId 5 and 0
+DEBUG: join prunable for task partitionId 5 and 1
+DEBUG: join prunable for task partitionId 5 and 2
+DEBUG: join prunable for task partitionId 5 and 3
+DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 2
DEBUG: pruning merge fetch taskId 2
@@ -680,6 +750,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 8
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 12
+DEBUG: pruning merge fetch taskId 13
+DETAIL: Creating dependency on merge taskId 10
+DEBUG: pruning merge fetch taskId 14
+DETAIL: Creating dependency on merge taskId 15
+DEBUG: pruning merge fetch taskId 16
+DETAIL: Creating dependency on merge taskId 12
+DEBUG: pruning merge fetch taskId 17
+DETAIL: Creating dependency on merge taskId 18
ERROR: the query contains a join that requires repartitioning
HINT: Set citus.enable_repartition_joins to on to enable repartitioning
-- system columns from shard tables can be queried and retrieved
diff --git a/src/test/regress/expected/multi_size_queries.out b/src/test/regress/expected/multi_size_queries.out
index 97036b1db..2ff8d9c4b 100644
--- a/src/test/regress/expected/multi_size_queries.out
+++ b/src/test/regress/expected/multi_size_queries.out
@@ -75,7 +75,7 @@ SELECT citus_table_size('customer_copy_hash'),
citus_table_size('supplier');
citus_table_size | citus_table_size | citus_table_size
---------------------------------------------------------------------
- 548864 | 548864 | 442368
+ 548864 | 548864 | 655360
(1 row)
CREATE INDEX index_1 on customer_copy_hash(c_custkey);
@@ -104,19 +104,19 @@ VACUUM (FULL) supplier;
SELECT citus_table_size('supplier');
citus_table_size
---------------------------------------------------------------------
- 376832
+ 565248
(1 row)
SELECT citus_relation_size('supplier');
citus_relation_size
---------------------------------------------------------------------
- 376832
+ 565248
(1 row)
SELECT citus_total_relation_size('supplier');
citus_total_relation_size
---------------------------------------------------------------------
- 376832
+ 565248
(1 row)
CREATE INDEX index_2 on supplier(s_suppkey);
@@ -124,19 +124,19 @@ VACUUM (FULL) supplier;
SELECT citus_table_size('supplier');
citus_table_size
---------------------------------------------------------------------
- 376832
+ 565248
(1 row)
SELECT citus_relation_size('supplier');
citus_relation_size
---------------------------------------------------------------------
- 376832
+ 565248
(1 row)
SELECT citus_total_relation_size('supplier');
citus_total_relation_size
---------------------------------------------------------------------
- 458752
+ 688128
(1 row)
-- Test inside the transaction
diff --git a/src/test/regress/expected/multi_table_ddl.out b/src/test/regress/expected/multi_table_ddl.out
index 4a2f68162..2db4a7797 100644
--- a/src/test/regress/expected/multi_table_ddl.out
+++ b/src/test/regress/expected/multi_table_ddl.out
@@ -78,6 +78,12 @@ SELECT * FROM pg_dist_shard_placement;
DROP EXTENSION citus;
CREATE EXTENSION citus;
-- re-add the nodes to the cluster
+SELECT 1 FROM citus_set_coordinator_host('localhost');
+ ?column?
+---------------------------------------------------------------------
+ 1
+(1 row)
+
SELECT 1 FROM master_add_node('localhost', :worker_1_port);
?column?
---------------------------------------------------------------------
diff --git a/src/test/regress/expected/multi_tenant_isolation.out b/src/test/regress/expected/multi_tenant_isolation.out
index b370ba6c6..5af7acac8 100644
--- a/src/test/regress/expected/multi_tenant_isolation.out
+++ b/src/test/regress/expected/multi_tenant_isolation.out
@@ -986,20 +986,25 @@ SELECT create_distributed_table('test_colocated_table_1', 'id', colocate_with =>
(1 row)
-CREATE TABLE test_colocated_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_reference_table_fkey(id), FOREIGN KEY(id) REFERENCES test_colocated_table_1(id));
+CREATE TABLE test_colocated_table_2(id int PRIMARY KEY, value_1 int);
SELECT create_distributed_table('test_colocated_table_2', 'id', colocate_with => 'test_colocated_table_1');
create_distributed_table
---------------------------------------------------------------------
(1 row)
-CREATE TABLE test_colocated_table_3(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_reference_table_fkey(id), FOREIGN KEY(id) REFERENCES test_colocated_table_1(id), FOREIGN KEY(id) REFERENCES test_colocated_table_2(id));
+ALTER TABLE test_colocated_table_2 ADD FOREIGN KEY(value_1) REFERENCES test_reference_table_fkey(id);
+ALTER TABLE test_colocated_table_2 ADD FOREIGN KEY(id) REFERENCES test_colocated_table_1(id);
+CREATE TABLE test_colocated_table_3(id int PRIMARY KEY, value_1 int);
SELECT create_distributed_table('test_colocated_table_3', 'id', colocate_with => 'test_colocated_table_1');
create_distributed_table
---------------------------------------------------------------------
(1 row)
+ALTER TABLE test_colocated_table_3 ADD FOREIGN KEY(value_1) REFERENCES test_reference_table_fkey(id);
+ALTER TABLE test_colocated_table_3 ADD FOREIGN KEY(id) REFERENCES test_colocated_table_1(id);
+ALTER TABLE test_colocated_table_3 ADD FOREIGN KEY(id) REFERENCES test_colocated_table_2(id);
INSERT INTO test_reference_table_fkey SELECT i FROM generate_series (0, 100) i;
INSERT INTO test_colocated_table_1 SELECT i, i FROM generate_series (0, 100) i;
INSERT INTO test_colocated_table_2 SELECT i, i FROM generate_series (0, 100) i;
@@ -1159,7 +1164,7 @@ SELECT 1 FROM master_add_node('localhost', :master_port, groupId=>0);
SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'ref_table'::regclass;
count
---------------------------------------------------------------------
- 2
+ 3
(1 row)
\c - mx_isolation_role_ent - :master_port
diff --git a/src/test/regress/expected/multi_tenant_isolation_nonblocking.out b/src/test/regress/expected/multi_tenant_isolation_nonblocking.out
index dbd15b056..3ec16e6ee 100644
--- a/src/test/regress/expected/multi_tenant_isolation_nonblocking.out
+++ b/src/test/regress/expected/multi_tenant_isolation_nonblocking.out
@@ -1275,3 +1275,9 @@ SELECT count(*) FROM pg_catalog.pg_dist_partition WHERE colocationid > 0;
TRUNCATE TABLE pg_catalog.pg_dist_colocation;
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 100;
ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id;
+SELECT citus_set_coordinator_host('localhost');
+ citus_set_coordinator_host
+---------------------------------------------------------------------
+
+(1 row)
+
diff --git a/src/test/regress/expected/multi_transaction_recovery.out b/src/test/regress/expected/multi_transaction_recovery.out
index 509a87acf..85144944d 100644
--- a/src/test/regress/expected/multi_transaction_recovery.out
+++ b/src/test/regress/expected/multi_transaction_recovery.out
@@ -1,16 +1,5 @@
-- Tests for prepared transaction recovery
SET citus.next_shard_id TO 1220000;
--- reference tables can have placements on the coordinator. Add it so
--- verify we recover transactions which do DML on coordinator placements
--- properly.
-SET client_min_messages TO ERROR;
-SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
-RESET client_min_messages;
-- enforce 1 connection per placement since
-- the tests are prepared for that
SET citus.force_max_query_parallelization TO ON;
@@ -516,9 +505,3 @@ DROP TABLE test_recovery;
DROP TABLE test_recovery_single;
DROP TABLE test_2pcskip;
DROP TABLE test_reference;
-SELECT 1 FROM master_remove_node('localhost', :master_port);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
diff --git a/src/test/regress/expected/multi_transactional_drop_shards.out b/src/test/regress/expected/multi_transactional_drop_shards.out
index dca2466d6..761275dff 100644
--- a/src/test/regress/expected/multi_transactional_drop_shards.out
+++ b/src/test/regress/expected/multi_transactional_drop_shards.out
@@ -374,9 +374,10 @@ ORDER BY
shardid, nodename, nodeport;
shardid | shardstate | nodename | nodeport
---------------------------------------------------------------------
+ 1410006 | 1 | localhost | 57636
1410006 | 1 | localhost | 57637
1410006 | 1 | localhost | 57638
-(2 rows)
+(3 rows)
-- verify table is not dropped
\dt transactional_drop_reference
@@ -516,7 +517,7 @@ SET citus.override_table_visibility TO false;
(8 rows)
\ds transactional_drop_serial_column2_seq
- List of relations
+ List of relations
Schema | Name | Type | Owner
---------------------------------------------------------------------
public | transactional_drop_serial_column2_seq | sequence | postgres
@@ -670,13 +671,6 @@ ORDER BY
\c - - - :master_port
SET client_min_messages TO WARNING;
--- try using the coordinator as a worker and then dropping the table
-SELECT 1 FROM master_add_node('localhost', :master_port, groupid := 0);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
CREATE TABLE citus_local (id serial, k int);
SELECT create_distributed_table('citus_local', 'id');
create_distributed_table
@@ -686,12 +680,6 @@ SELECT create_distributed_table('citus_local', 'id');
INSERT INTO citus_local (k) VALUES (2);
DROP TABLE citus_local;
-SELECT master_remove_node('localhost', :master_port);
- master_remove_node
----------------------------------------------------------------------
-
-(1 row)
-
-- clean the workspace
DROP TABLE transactional_drop_shards, transactional_drop_reference;
-- test DROP TABLE as a non-superuser in a transaction block
diff --git a/src/test/regress/expected/multi_truncate.out b/src/test/regress/expected/multi_truncate.out
index 9b47237ea..f08f0c3c7 100644
--- a/src/test/regress/expected/multi_truncate.out
+++ b/src/test/regress/expected/multi_truncate.out
@@ -432,8 +432,12 @@ CREATE TABLE dist(id int, ref_id int REFERENCES ref(id));
INSERT INTO dist SELECT x,x FROM generate_series(1,10000) x;
-- test that we do not cascade truncates to local referencing tables
SELECT truncate_local_data_after_distributing_table('ref');
-ERROR: cannot truncate a table referenced in a foreign key constraint by a local table
-DETAIL: Table "dist" references "ref"
+NOTICE: truncate cascades to table "dist"
+ truncate_local_data_after_distributing_table
+---------------------------------------------------------------------
+
+(1 row)
+
-- test that we allow distributing tables that have foreign keys to reference tables
SELECT create_distributed_table('dist','id');
NOTICE: Copying data from local table...
@@ -461,11 +465,12 @@ NOTICE: truncate cascades to table "dist"
(1 row)
SELECT * FROM table_sizes;
- name | has_data
+ name | has_data
---------------------------------------------------------------------
- dist | f
- ref | f
-(2 rows)
+ dist | f
+ ref | f
+ ref_1210032 | t
+(3 rows)
ROLLBACK;
-- the following should truncate dist table only
@@ -477,11 +482,12 @@ SELECT truncate_local_data_after_distributing_table('dist');
(1 row)
SELECT * FROM table_sizes;
- name | has_data
+ name | has_data
---------------------------------------------------------------------
- dist | f
- ref | t
-(2 rows)
+ dist | f
+ ref | f
+ ref_1210032 | t
+(3 rows)
ROLLBACK;
DROP TABLE ref, dist;
diff --git a/src/test/regress/expected/multi_utilities.out b/src/test/regress/expected/multi_utilities.out
index 93021a067..b82e54f16 100644
--- a/src/test/regress/expected/multi_utilities.out
+++ b/src/test/regress/expected/multi_utilities.out
@@ -370,6 +370,8 @@ NOTICE: issuing VACUUM multi_utilities.reference_vacuum_table_970000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM multi_utilities.reference_vacuum_table_970000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing VACUUM multi_utilities.reference_vacuum_table_970000
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- should propagate to all workers because table is distributed table
VACUUM distributed_vacuum_table;
NOTICE: issuing VACUUM multi_utilities.distributed_vacuum_table_970001
@@ -382,12 +384,16 @@ NOTICE: issuing VACUUM multi_utilities.reference_vacuum_table_970000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM multi_utilities.reference_vacuum_table_970000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing VACUUM multi_utilities.reference_vacuum_table_970000
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- only reference_vacuum_table should propagate
VACUUM local_vacuum_table, reference_vacuum_table;
NOTICE: issuing VACUUM multi_utilities.reference_vacuum_table_970000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM multi_utilities.reference_vacuum_table_970000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing VACUUM multi_utilities.reference_vacuum_table_970000
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- vacuum (disable_page_skipping) aggressively process pages of the relation, it does not respect visibility map
VACUUM (DISABLE_PAGE_SKIPPING true) local_vacuum_table;
VACUUM (DISABLE_PAGE_SKIPPING false) local_vacuum_table;
@@ -440,6 +446,8 @@ NOTICE: issuing VACUUM (ANALYZE) multi_utilities.reference_vacuum_table_970000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (ANALYZE) multi_utilities.reference_vacuum_table_970000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing VACUUM (ANALYZE) multi_utilities.reference_vacuum_table_970000
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- give enough time for stats to be updated.(updated per 500ms by default)
select pg_sleep(1);
pg_sleep
@@ -499,6 +507,8 @@ NOTICE: issuing ANALYZE multi_utilities.reference_analyze_table_970002
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing ANALYZE multi_utilities.reference_analyze_table_970002
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing ANALYZE multi_utilities.reference_analyze_table_970002
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- should propagate to all workers because table is distributed table
ANALYZE distributed_analyze_table;
NOTICE: issuing ANALYZE multi_utilities.distributed_analyze_table_970003
@@ -511,12 +521,16 @@ NOTICE: issuing ANALYZE multi_utilities.reference_analyze_table_970002
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing ANALYZE multi_utilities.reference_analyze_table_970002
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing ANALYZE multi_utilities.reference_analyze_table_970002
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- only reference_analyze_table should propagate
ANALYZE local_analyze_table, reference_analyze_table;
NOTICE: issuing ANALYZE multi_utilities.reference_analyze_table_970002
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing ANALYZE multi_utilities.reference_analyze_table_970002
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing ANALYZE multi_utilities.reference_analyze_table_970002
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- should not propagate because ddl propagation is disabled
SET citus.enable_ddl_propagation TO OFF;
ANALYZE distributed_analyze_table;
diff --git a/src/test/regress/expected/pg12.out b/src/test/regress/expected/pg12.out
index e8ebe3f3f..8999038ec 100644
--- a/src/test/regress/expected/pg12.out
+++ b/src/test/regress/expected/pg12.out
@@ -404,13 +404,6 @@ where val = 'asdf';
3
(1 row)
-SELECT 1 FROM citus_add_node('localhost', :master_port, groupId => 0);
-NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
BEGIN;
CREATE TABLE generated_stored_col_test (x int, y int generated always as (x+1) stored);
SELECT citus_add_local_table_to_metadata('generated_stored_col_test');
@@ -639,12 +632,6 @@ NOTICE: renaming the new table to test_pg12.generated_stored_ref
(4 rows)
ROLLBACK;
-SELECT citus_remove_node('localhost', :master_port);
- citus_remove_node
----------------------------------------------------------------------
-
-(1 row)
-
CREATE TABLE superuser_columnar_table (a int) USING columnar;
CREATE USER read_access;
SET ROLE read_access;
diff --git a/src/test/regress/expected/pg14.out b/src/test/regress/expected/pg14.out
index ae5fe8bdc..e4f94c053 100644
--- a/src/test/regress/expected/pg14.out
+++ b/src/test/regress/expected/pg14.out
@@ -1332,12 +1332,6 @@ set client_min_messages to error;
drop schema pg14 cascade;
create schema pg14;
set search_path to pg14;
-select 1 from citus_add_node('localhost',:master_port,groupid=>0);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
-- test adding foreign table to metadata with the guc
-- will test truncating foreign tables later
CREATE TABLE foreign_table_test (id integer NOT NULL, data text, a bigserial);
@@ -1505,9 +1499,3 @@ set client_min_messages to error;
drop extension postgres_fdw cascade;
drop schema pg14 cascade;
reset client_min_messages;
-select 1 from citus_remove_node('localhost',:master_port);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
diff --git a/src/test/regress/expected/pg15.out b/src/test/regress/expected/pg15.out
index 4d1040a7e..68c4c4466 100644
--- a/src/test/regress/expected/pg15.out
+++ b/src/test/regress/expected/pg15.out
@@ -218,6 +218,9 @@ BEGIN;
NOTICE: drop cascades to 2 other objects
DETAIL: drop cascades to column col_3 of table generated_stored_ref
drop cascades to column col_5 of table generated_stored_ref
+NOTICE: drop cascades to 2 other objects
+DETAIL: drop cascades to column col_3 of table generated_stored_ref_960016
+drop cascades to column col_5 of table generated_stored_ref_960016
ALTER TABLE generated_stored_ref DROP COLUMN col_4;
-- show that undistribute_table works fine
SELECT undistribute_table('generated_stored_ref');
@@ -269,15 +272,6 @@ CREATE TABLE tbl2
-- on local tables works fine
MERGE INTO tbl1 USING tbl2 ON (true)
WHEN MATCHED THEN DELETE;
--- add coordinator node as a worker
-SET client_min_messages to ERROR;
-SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
-RESET client_min_messages;
-- one table is Citus local table, fails
SELECT citus_add_local_table_to_metadata('tbl1');
citus_add_local_table_to_metadata
@@ -398,12 +392,6 @@ SET search_path TO pg15;
SET client_min_messages to ERROR;
DROP TABLE FKTABLE_local, PKTABLE_local;
RESET client_min_messages;
-SELECT 1 FROM citus_remove_node('localhost', :master_port);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
SELECT create_distributed_table('tbl1', 'x');
create_distributed_table
---------------------------------------------------------------------
@@ -880,8 +868,8 @@ SELECT create_reference_table('FKTABLE');
SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conrelid = 'fktable'::regclass::oid ORDER BY oid;
pg_get_constraintdef
---------------------------------------------------------------------
- FOREIGN KEY (tid, fk_id_del_set_null) REFERENCES pktable(tid, id) ON DELETE SET NULL (fk_id_del_set_null)
FOREIGN KEY (tid, fk_id_del_set_default) REFERENCES pktable(tid, id) ON DELETE SET DEFAULT (fk_id_del_set_default)
+ FOREIGN KEY (tid, fk_id_del_set_null) REFERENCES pktable(tid, id) ON DELETE SET NULL (fk_id_del_set_null)
(2 rows)
\c - - - :worker_1_port
@@ -1274,6 +1262,7 @@ SELECT create_reference_table('set_on_default_test_referenced');
(1 row)
+-- should error since col_3 defaults to a sequence
CREATE TABLE set_on_default_test_referencing(
col_1 int, col_2 int, col_3 serial, col_4 int,
FOREIGN KEY(col_1, col_3)
@@ -1281,10 +1270,7 @@ CREATE TABLE set_on_default_test_referencing(
ON DELETE SET DEFAULT (col_1)
ON UPDATE SET DEFAULT
);
--- should error since col_3 defaults to a sequence
-SELECT create_reference_table('set_on_default_test_referencing');
ERROR: cannot create foreign key constraint since Citus does not support ON DELETE / UPDATE SET DEFAULT actions on the columns that default to sequences
-DROP TABLE set_on_default_test_referencing;
CREATE TABLE set_on_default_test_referencing(
col_1 int, col_2 int, col_3 serial, col_4 int,
FOREIGN KEY(col_1, col_3)
@@ -1447,12 +1433,6 @@ NOTICE: renaming the new table to pg15.foreign_table_test
(1 row)
-SELECT 1 FROM citus_remove_node('localhost', :master_port);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
DROP SERVER foreign_server CASCADE;
NOTICE: drop cascades to 2 other objects
-- PG15 now supports specifying oid on CREATE DATABASE
diff --git a/src/test/regress/expected/pgmerge.out b/src/test/regress/expected/pgmerge.out
index 6bdb7f771..7742610f4 100644
--- a/src/test/regress/expected/pgmerge.out
+++ b/src/test/regress/expected/pgmerge.out
@@ -15,13 +15,6 @@ SET search_path TO pgmerge_schema;
SET citus.use_citus_managed_tables to true;
\set SHOW_CONTEXT errors
SET citus.next_shard_id TO 4001000;
-SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0);
-NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
CREATE USER regress_merge_privs;
CREATE USER regress_merge_no_privs;
DROP TABLE IF EXISTS target;
@@ -2133,9 +2126,3 @@ drop cascades to table source2
drop cascades to function merge_trigfunc()
DROP USER regress_merge_privs;
DROP USER regress_merge_no_privs;
-SELECT 1 FROM master_remove_node('localhost', :master_port);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
diff --git a/src/test/regress/expected/propagate_foreign_servers.out b/src/test/regress/expected/propagate_foreign_servers.out
index 551d1dde7..076973da3 100644
--- a/src/test/regress/expected/propagate_foreign_servers.out
+++ b/src/test/regress/expected/propagate_foreign_servers.out
@@ -30,13 +30,6 @@ CREATE FOREIGN TABLE foreign_table (
)
SERVER foreign_server_dependent_schema
OPTIONS (schema_name 'test_dependent_schema', table_name 'foreign_table_test');
-SELECT 1 FROM citus_add_node('localhost', :master_port, groupId=>0);
-NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
-- verify that the aggregate is propagated to the new node
SELECT run_command_on_workers($$select aggfnoid from pg_aggregate where aggfnoid::text like '%propagate_foreign_server.array_agg%';$$);
run_command_on_workers
diff --git a/src/test/regress/expected/publication.out b/src/test/regress/expected/publication.out
index a267cbe71..7fc75637d 100644
--- a/src/test/regress/expected/publication.out
+++ b/src/test/regress/expected/publication.out
@@ -2,13 +2,6 @@ CREATE SCHEMA publication;
CREATE SCHEMA "publication-1";
SET search_path TO publication;
SET citus.shard_replication_factor TO 1;
--- for citus_add_local_table_to_metadata / create_distributed_table_concurrently
-SELECT citus_set_coordinator_host('localhost', :master_port);
- citus_set_coordinator_host
----------------------------------------------------------------------
-
-(1 row)
-
CREATE OR REPLACE FUNCTION activate_node_snapshot()
RETURNS text[]
LANGUAGE C STRICT
@@ -264,7 +257,6 @@ SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
SET client_min_messages TO ERROR;
DROP SCHEMA publication CASCADE;
DROP SCHEMA "publication-1" CASCADE;
-SELECT citus_remove_node('localhost', :master_port);
\q
\endif
-- recreate a mixed publication
@@ -371,9 +363,3 @@ DROP PUBLICATION pubpartitioned;
SET client_min_messages TO ERROR;
DROP SCHEMA publication CASCADE;
DROP SCHEMA "publication-1" CASCADE;
-SELECT citus_remove_node('localhost', :master_port);
- citus_remove_node
----------------------------------------------------------------------
-
-(1 row)
-
diff --git a/src/test/regress/expected/publication_0.out b/src/test/regress/expected/publication_0.out
index 617950a76..02978ff65 100644
--- a/src/test/regress/expected/publication_0.out
+++ b/src/test/regress/expected/publication_0.out
@@ -2,13 +2,6 @@ CREATE SCHEMA publication;
CREATE SCHEMA "publication-1";
SET search_path TO publication;
SET citus.shard_replication_factor TO 1;
--- for citus_add_local_table_to_metadata / create_distributed_table_concurrently
-SELECT citus_set_coordinator_host('localhost', :master_port);
- citus_set_coordinator_host
----------------------------------------------------------------------
-
-(1 row)
-
CREATE OR REPLACE FUNCTION activate_node_snapshot()
RETURNS text[]
LANGUAGE C STRICT
@@ -264,10 +257,4 @@ SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
SET client_min_messages TO ERROR;
DROP SCHEMA publication CASCADE;
DROP SCHEMA "publication-1" CASCADE;
-SELECT citus_remove_node('localhost', :master_port);
- citus_remove_node
----------------------------------------------------------------------
-
-(1 row)
-
\q
diff --git a/src/test/regress/expected/query_single_shard_table.out b/src/test/regress/expected/query_single_shard_table.out
index ff04ad50e..992b91f9f 100644
--- a/src/test/regress/expected/query_single_shard_table.out
+++ b/src/test/regress/expected/query_single_shard_table.out
@@ -2,13 +2,6 @@ CREATE SCHEMA query_single_shard_table;
SET search_path TO query_single_shard_table;
SET citus.next_shard_id TO 1620000;
SET citus.shard_count TO 32;
-SET client_min_messages TO WARNING;
-SELECT 1 FROM citus_add_node('localhost', :master_port, groupid => 0);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
SET client_min_messages TO NOTICE;
CREATE TABLE nullkey_c1_t1(a int, b int);
CREATE TABLE nullkey_c1_t2(a int, b int);
@@ -1879,9 +1872,3 @@ DEBUG: Creating router plan
SET client_min_messages TO ERROR;
DROP SCHEMA query_single_shard_table CASCADE;
-SELECT citus_remove_node('localhost', :master_port);
- citus_remove_node
----------------------------------------------------------------------
-
-(1 row)
-
diff --git a/src/test/regress/expected/recurring_outer_join.out b/src/test/regress/expected/recurring_outer_join.out
index aa8cb906d..4ff353838 100644
--- a/src/test/regress/expected/recurring_outer_join.out
+++ b/src/test/regress/expected/recurring_outer_join.out
@@ -2,14 +2,6 @@ CREATE SCHEMA recurring_outer_join;
SET search_path TO recurring_outer_join;
SET citus.next_shard_id TO 1520000;
SET citus.shard_count TO 32;
--- idempotently add node to allow this test to run without add_coordinator
-SET client_min_messages TO WARNING;
-SELECT 1 FROM citus_add_node('localhost', :master_port, groupid => 0);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
SET client_min_messages TO DEBUG1;
CREATE TABLE dist_1 (a int, b int);
SELECT create_distributed_table('dist_1', 'a');
@@ -2012,9 +2004,3 @@ DEBUG: performing repartitioned INSERT ... SELECT
ROLLBACK;
SET client_min_messages TO ERROR;
DROP SCHEMA recurring_outer_join CASCADE;
-SELECT master_remove_node('localhost', :master_port);
- master_remove_node
----------------------------------------------------------------------
-
-(1 row)
-
diff --git a/src/test/regress/expected/recursive_dml_with_different_planners_executors.out b/src/test/regress/expected/recursive_dml_with_different_planners_executors.out
index 4532b3bb2..029d7b451 100644
--- a/src/test/regress/expected/recursive_dml_with_different_planners_executors.out
+++ b/src/test/regress/expected/recursive_dml_with_different_planners_executors.out
@@ -71,10 +71,6 @@ UPDATE distributed_table SET dept = foo.max_dept FROM
) as foo WHERE foo.max_dept >= dept and tenant_id = '8';
DEBUG: generating subplan XXX_1 for subquery SELECT max(dept) AS max_dept FROM (SELECT DISTINCT distributed_table_1.tenant_id, distributed_table_1.dept FROM recursive_dml_with_different_planner_executors.distributed_table distributed_table_1) distributed_table WHERE (tenant_id OPERATOR(pg_catalog.=) ANY (SELECT second_distributed_table.tenant_id FROM recursive_dml_with_different_planner_executors.second_distributed_table WHERE (second_distributed_table.dept OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))))
DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE recursive_dml_with_different_planner_executors.distributed_table SET dept = foo.max_dept FROM (SELECT intermediate_result.max_dept FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(max_dept integer)) foo WHERE ((foo.max_dept OPERATOR(pg_catalog.>=) distributed_table.dept) AND (distributed_table.tenant_id OPERATOR(pg_catalog.=) '8'::text))
-RESET client_min_messages;
+SET client_min_messages TO WARNING;
DROP SCHEMA recursive_dml_with_different_planner_executors CASCADE;
-NOTICE: drop cascades to 3 other objects
-DETAIL: drop cascades to table distributed_table
-drop cascades to table second_distributed_table
-drop cascades to table reference_table
SET search_path TO public;
diff --git a/src/test/regress/expected/recursive_relation_planning_restriction_pushdown.out b/src/test/regress/expected/recursive_relation_planning_restriction_pushdown.out
index 6a41c735a..26c4e09b4 100644
--- a/src/test/regress/expected/recursive_relation_planning_restriction_pushdown.out
+++ b/src/test/regress/expected/recursive_relation_planning_restriction_pushdown.out
@@ -491,18 +491,14 @@ SELECT MAX(x) FROM (
UNION ALL
SELECT 1 as x FROM (SELECT 1 FROM tbl1, tbl2 WHERE b > 0) AS s1 WHERE false
) as res;
-DEBUG: Wrapping relation "tbl2" to a subquery
-DEBUG: generating subplan XXX_1 for subquery SELECT b FROM push_down_filters.tbl2 WHERE (b OPERATOR(pg_catalog.>) 0)
-DEBUG: Wrapping relation "tbl2" to a subquery
-DEBUG: generating subplan XXX_2 for subquery SELECT b FROM push_down_filters.tbl2 WHERE false
-DEBUG: generating subplan XXX_3 for subquery SELECT 1 AS x FROM (SELECT 1 FROM push_down_filters.tbl1, (SELECT tbl2_1.b FROM (SELECT intermediate_result.b FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(b integer)) tbl2_1) tbl2 WHERE (tbl2.b OPERATOR(pg_catalog.>) 0)) s1("?column?") WHERE true UNION ALL SELECT 1 AS x FROM (SELECT 1 FROM push_down_filters.tbl1, (SELECT tbl2_1.b FROM (SELECT intermediate_result.b FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(b integer)) tbl2_1) tbl2 WHERE (tbl2.b OPERATOR(pg_catalog.>) 0)) s1("?column?") WHERE false
-DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT max(x) AS max FROM (SELECT intermediate_result.x FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) res
max
---------------------------------------------------------------------
1
(1 row)
DROP TABLE tbl1, tbl2;
+CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false)"
+PL/pgSQL function citus_drop_trigger() line XX at PERFORM
CREATE table tbl2(a int, b int, d int);
CREATE table tbl1(a int, b int, c int);
INSERT INTO tbl1 VALUES (1,1,1);
@@ -563,12 +559,6 @@ SELECT 1 as x FROM (SELECT 1 FROM tbl1, tbl2 WHERE tbl2.b > 0) AS s1 WHERE true
UNION ALL
SELECT 1 as x FROM (SELECT 1 FROM tbl1, tbl2 WHERE tbl2.b > 0) AS s1 WHERE false
) as res;
-DEBUG: Wrapping relation "tbl2" to a subquery
-DEBUG: generating subplan XXX_1 for subquery SELECT b FROM push_down_filters.tbl2 WHERE (b OPERATOR(pg_catalog.>) 0)
-DEBUG: Wrapping relation "tbl2" to a subquery
-DEBUG: generating subplan XXX_2 for subquery SELECT b FROM push_down_filters.tbl2 WHERE false
-DEBUG: generating subplan XXX_3 for subquery SELECT 1 AS x FROM (SELECT 1 FROM push_down_filters.tbl1, (SELECT NULL::integer AS a, tbl2_1.b, NULL::integer AS d FROM (SELECT intermediate_result.b FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(b integer)) tbl2_1) tbl2 WHERE (tbl2.b OPERATOR(pg_catalog.>) 0)) s1("?column?") WHERE true UNION ALL SELECT 1 AS x FROM (SELECT 1 FROM push_down_filters.tbl1, (SELECT NULL::integer AS a, tbl2_1.b, NULL::integer AS d FROM (SELECT intermediate_result.b FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(b integer)) tbl2_1) tbl2 WHERE (tbl2.b OPERATOR(pg_catalog.>) 0)) s1("?column?") WHERE false
-DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT max(x) AS max FROM (SELECT intermediate_result.x FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) res
max
---------------------------------------------------------------------
1
@@ -577,4 +567,4 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT max(x) AS max
\set VERBOSITY terse
RESET client_min_messages;
DROP SCHEMA push_down_filters CASCADE;
-NOTICE: drop cascades to 7 other objects
+NOTICE: drop cascades to 8 other objects
diff --git a/src/test/regress/expected/recursive_view_local_table.out b/src/test/regress/expected/recursive_view_local_table.out
index b4ef802b4..dd11b103a 100644
--- a/src/test/regress/expected/recursive_view_local_table.out
+++ b/src/test/regress/expected/recursive_view_local_table.out
@@ -152,8 +152,11 @@ SELECT ref_table.* FROM ref_table JOIN (SELECT * FROM recursive_defined_non_recu
(3 rows)
SELECT ref_table.* FROM ref_table WHERE EXISTS (SELECT * FROM local_table l WHERE l.a = ref_table.a);
-ERROR: direct joins between distributed and local tables are not supported
-HINT: Use CTE's or subqueries to select from local tables and use them in joins
+ a | b
+---------------------------------------------------------------------
+ 1 | 1
+(1 row)
+
SELECT ref_table.* FROM ref_table WHERE EXISTS (SELECT * FROM local_table l WHERE l.a = ref_table.a) AND false;
a | b
---------------------------------------------------------------------
@@ -196,9 +199,5 @@ SELECT ref_table.* FROM ref_table WHERE EXISTS (SELECT * FROM recursive_defined_
---------------------------------------------------------------------
(0 rows)
+SET client_min_messages TO WARNING;
DROP SCHEMA postgres_local_table CASCADE;
-NOTICE: drop cascades to 4 other objects
-DETAIL: drop cascades to table local_table
-drop cascades to view recursive_view
-drop cascades to view recursive_defined_non_recursive_view
-drop cascades to table ref_table
diff --git a/src/test/regress/expected/relation_access_tracking.out b/src/test/regress/expected/relation_access_tracking.out
index 0b8c1d05b..052c456e5 100644
--- a/src/test/regress/expected/relation_access_tracking.out
+++ b/src/test/regress/expected/relation_access_tracking.out
@@ -1020,20 +1020,6 @@ HINT: To remove the local data, run: SELECT truncate_local_data_after_distribut
(1 row)
COMMIT;
+SET client_min_messages TO WARNING;
SET search_path TO 'public';
DROP SCHEMA access_tracking CASCADE;
-NOTICE: drop cascades to 14 other objects
-DETAIL: drop cascades to function access_tracking.relation_select_access_mode(oid)
-drop cascades to function access_tracking.relation_dml_access_mode(oid)
-drop cascades to function access_tracking.relation_ddl_access_mode(oid)
-drop cascades to function access_tracking.distributed_relation(text)
-drop cascades to function access_tracking.relation_access_mode_to_text(text,integer)
-drop cascades to view access_tracking.relation_accesses
-drop cascades to table access_tracking.table_1
-drop cascades to table access_tracking.table_2
-drop cascades to table access_tracking.table_4
-drop cascades to table access_tracking.table_5
-drop cascades to table access_tracking.table_6
-drop cascades to table access_tracking.table_7
-drop cascades to table access_tracking.partitioning_test
-drop cascades to table access_tracking.table_3
diff --git a/src/test/regress/expected/remove_coordinator.out b/src/test/regress/expected/remove_coordinator.out
index e59a1f89e..0226a7cd0 100644
--- a/src/test/regress/expected/remove_coordinator.out
+++ b/src/test/regress/expected/remove_coordinator.out
@@ -5,3 +5,10 @@ SELECT master_remove_node('localhost', :master_port);
(1 row)
+-- restore coordinator for the rest of the tests
+SELECT citus_set_coordinator_host('localhost', :master_port);
+ citus_set_coordinator_host
+---------------------------------------------------------------------
+
+(1 row)
+
diff --git a/src/test/regress/expected/remove_coordinator_from_metadata.out b/src/test/regress/expected/remove_coordinator_from_metadata.out
new file mode 100644
index 000000000..5b062ed6a
--- /dev/null
+++ b/src/test/regress/expected/remove_coordinator_from_metadata.out
@@ -0,0 +1,6 @@
+SELECT master_remove_node('localhost', :master_port);
+ master_remove_node
+---------------------------------------------------------------------
+
+(1 row)
+
diff --git a/src/test/regress/expected/replicated_table_disable_node.out b/src/test/regress/expected/replicated_table_disable_node.out
index 60de41f08..be1ad92b3 100644
--- a/src/test/regress/expected/replicated_table_disable_node.out
+++ b/src/test/regress/expected/replicated_table_disable_node.out
@@ -38,7 +38,7 @@ SELECT count(*) FROM pg_dist_placement p JOIN pg_dist_node n USING(groupid)
AND p.shardid IN (101500, 101501, 101502);
count
---------------------------------------------------------------------
- 3
+ 4
(1 row)
\c - - - :worker_1_port
@@ -47,7 +47,7 @@ SELECT count(*) FROM pg_dist_placement p JOIN pg_dist_node n USING(groupid)
AND p.shardid IN (101500, 101501, 101502);
count
---------------------------------------------------------------------
- 3
+ 4
(1 row)
SET search_path TO disable_node_with_replicated_tables;
diff --git a/src/test/regress/expected/run_command_on_all_nodes.out b/src/test/regress/expected/run_command_on_all_nodes.out
index 76c42ad23..e95989d84 100644
--- a/src/test/regress/expected/run_command_on_all_nodes.out
+++ b/src/test/regress/expected/run_command_on_all_nodes.out
@@ -1,5 +1,11 @@
CREATE SCHEMA run_command_on_all_nodes;
SET search_path TO run_command_on_all_nodes;
+SELECT master_remove_node('localhost', :master_port);
+ master_remove_node
+---------------------------------------------------------------------
+
+(1 row)
+
-- check coordinator isn't in metadata
SELECT count(*) != 0 AS "Coordinator is in Metadata"
FROM pg_dist_node
@@ -205,3 +211,9 @@ DROP SCHEMA run_command_on_all_nodes CASCADE;
NOTICE: drop cascades to 2 other objects
DETAIL: drop cascades to table run_command_on_all_nodes.tbl
drop cascades to table run_command_on_all_nodes.test
+SELECT citus_set_coordinator_host('localhost');
+ citus_set_coordinator_host
+---------------------------------------------------------------------
+
+(1 row)
+
diff --git a/src/test/regress/expected/sequential_modifications.out b/src/test/regress/expected/sequential_modifications.out
index e5092ae56..c0acde1da 100644
--- a/src/test/regress/expected/sequential_modifications.out
+++ b/src/test/regress/expected/sequential_modifications.out
@@ -22,7 +22,7 @@ $$
DECLARE
result bool;
BEGIN
- SELECT tx_count = worker_count FROM (SELECT count(*) as tx_count FROM pg_dist_transaction WHERE gid LIKE 'citus_%_' || pg_backend_pid() || '%_%') as s1, (SELECT count(*) as worker_count FROM pg_dist_node WHERE noderole = 'primary') as s2 INTO result;
+ SELECT tx_count = worker_count FROM (SELECT count(*) as tx_count FROM pg_dist_transaction WHERE gid LIKE 'citus_%_' || pg_backend_pid() || '%_%') as s1, (SELECT count(*) as worker_count FROM pg_dist_node WHERE noderole = 'primary' AND groupid <> 0 ) as s2 INTO result;
RETURN result;
END;
$$
@@ -669,13 +669,14 @@ HINT: To remove the local data, run: SELECT truncate_local_data_after_distribut
ABORT;
SET search_path TO 'public';
DROP SCHEMA test_seq_ddl CASCADE;
-NOTICE: drop cascades to 11 other objects
+NOTICE: drop cascades to 12 other objects
DETAIL: drop cascades to function test_seq_ddl.distributed_2pcs_are_equal_to_worker_count()
drop cascades to function test_seq_ddl.distributed_2pcs_are_equal_to_placement_count()
drop cascades to function test_seq_ddl.no_distributed_2pcs()
drop cascades to function test_seq_ddl.set_local_multi_shard_modify_mode_to_sequential()
drop cascades to table test_seq_ddl.test_table
drop cascades to table test_seq_ddl.ref_test
+drop cascades to table test_seq_ddl.ref_test_16004
drop cascades to table test_seq_ddl.test_table_rep_2
drop cascades to table test_seq_ddl.test_seq_truncate
drop cascades to table test_seq_ddl.test_seq_truncate_rep_2
diff --git a/src/test/regress/expected/set_operation_and_local_tables.out b/src/test/regress/expected/set_operation_and_local_tables.out
index 92cde1148..db9b36506 100644
--- a/src/test/regress/expected/set_operation_and_local_tables.out
+++ b/src/test/regress/expected/set_operation_and_local_tables.out
@@ -321,15 +321,33 @@ DEBUG: push down of limit count: 2
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 0 and 4
+DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 1 and 4
+DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 2 and 4
+DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: join prunable for task partitionId 3 and 4
+DEBUG: join prunable for task partitionId 3 and 5
+DEBUG: join prunable for task partitionId 4 and 0
+DEBUG: join prunable for task partitionId 4 and 1
+DEBUG: join prunable for task partitionId 4 and 2
+DEBUG: join prunable for task partitionId 4 and 3
+DEBUG: join prunable for task partitionId 4 and 5
+DEBUG: join prunable for task partitionId 5 and 0
+DEBUG: join prunable for task partitionId 5 and 1
+DEBUG: join prunable for task partitionId 5 and 2
+DEBUG: join prunable for task partitionId 5 and 3
+DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 5
DEBUG: pruning merge fetch taskId 2
@@ -346,6 +364,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 20
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 20
+DEBUG: pruning merge fetch taskId 13
+DETAIL: Creating dependency on merge taskId 25
+DEBUG: pruning merge fetch taskId 14
+DETAIL: Creating dependency on merge taskId 25
+DEBUG: pruning merge fetch taskId 16
+DETAIL: Creating dependency on merge taskId 30
+DEBUG: pruning merge fetch taskId 17
+DETAIL: Creating dependency on merge taskId 30
DEBUG: generating subplan XXX_1 for subquery SELECT t1.x FROM recursive_set_local.test t1, recursive_set_local.test t2 WHERE (t1.x OPERATOR(pg_catalog.=) t2.y) LIMIT 2
DEBUG: generating subplan XXX_2 for subquery SELECT x FROM recursive_set_local.local_test
DEBUG: Router planner cannot handle multi-shard select queries
@@ -360,9 +386,5 @@ DEBUG: Creating router plan
1
(2 rows)
-RESET client_min_messages;
+SET client_min_messages TO WARNING;
DROP SCHEMA recursive_set_local CASCADE;
-NOTICE: drop cascades to 3 other objects
-DETAIL: drop cascades to table test
-drop cascades to table ref
-drop cascades to table local_test
diff --git a/src/test/regress/expected/set_operations.out b/src/test/regress/expected/set_operations.out
index 8580fd51a..a0dad36a8 100644
--- a/src/test/regress/expected/set_operations.out
+++ b/src/test/regress/expected/set_operations.out
@@ -916,15 +916,33 @@ DEBUG: push down of limit count: 0
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 0 and 4
+DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 1 and 4
+DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 2 and 4
+DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: join prunable for task partitionId 3 and 4
+DEBUG: join prunable for task partitionId 3 and 5
+DEBUG: join prunable for task partitionId 4 and 0
+DEBUG: join prunable for task partitionId 4 and 1
+DEBUG: join prunable for task partitionId 4 and 2
+DEBUG: join prunable for task partitionId 4 and 3
+DEBUG: join prunable for task partitionId 4 and 5
+DEBUG: join prunable for task partitionId 5 and 0
+DEBUG: join prunable for task partitionId 5 and 1
+DEBUG: join prunable for task partitionId 5 and 2
+DEBUG: join prunable for task partitionId 5 and 3
+DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 5
DEBUG: pruning merge fetch taskId 2
@@ -941,6 +959,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 20
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 20
+DEBUG: pruning merge fetch taskId 13
+DETAIL: Creating dependency on merge taskId 25
+DEBUG: pruning merge fetch taskId 14
+DETAIL: Creating dependency on merge taskId 25
+DEBUG: pruning merge fetch taskId 16
+DETAIL: Creating dependency on merge taskId 30
+DEBUG: pruning merge fetch taskId 17
+DETAIL: Creating dependency on merge taskId 30
DEBUG: generating subplan XXX_1 for subquery SELECT t1.x FROM recursive_union.test t1, recursive_union.test t2 WHERE (t1.x OPERATOR(pg_catalog.=) t2.y) LIMIT 0
DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: generating subplan XXX_2 for subquery SELECT x FROM recursive_union.test
@@ -957,15 +983,33 @@ DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 0 and 4
+DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 1 and 4
+DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 2 and 4
+DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: join prunable for task partitionId 3 and 4
+DEBUG: join prunable for task partitionId 3 and 5
+DEBUG: join prunable for task partitionId 4 and 0
+DEBUG: join prunable for task partitionId 4 and 1
+DEBUG: join prunable for task partitionId 4 and 2
+DEBUG: join prunable for task partitionId 4 and 3
+DEBUG: join prunable for task partitionId 4 and 5
+DEBUG: join prunable for task partitionId 5 and 0
+DEBUG: join prunable for task partitionId 5 and 1
+DEBUG: join prunable for task partitionId 5 and 2
+DEBUG: join prunable for task partitionId 5 and 3
+DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 5
DEBUG: pruning merge fetch taskId 2
@@ -982,6 +1026,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 20
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 20
+DEBUG: pruning merge fetch taskId 13
+DETAIL: Creating dependency on merge taskId 25
+DEBUG: pruning merge fetch taskId 14
+DETAIL: Creating dependency on merge taskId 25
+DEBUG: pruning merge fetch taskId 16
+DETAIL: Creating dependency on merge taskId 30
+DEBUG: pruning merge fetch taskId 17
+DETAIL: Creating dependency on merge taskId 30
DEBUG: generating subplan XXX_1 for subquery SELECT t1.x FROM recursive_union.test t1, recursive_union.test t2 WHERE (t1.x OPERATOR(pg_catalog.=) t2.y)
DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: generating subplan XXX_2 for subquery SELECT x FROM recursive_union.test
@@ -1098,12 +1150,5 @@ DEBUG: Creating router plan
2 | 2
(2 rows)
-RESET client_min_messages;
+SET client_min_messages TO WARNING;
DROP SCHEMA recursive_union CASCADE;
-NOTICE: drop cascades to 6 other objects
-DETAIL: drop cascades to table test
-drop cascades to table ref
-drop cascades to table test_not_colocated
-drop cascades to view set_view_recursive
-drop cascades to view set_view_pushdown
-drop cascades to view set_view_recursive_second
diff --git a/src/test/regress/expected/shard_move_constraints.out b/src/test/regress/expected/shard_move_constraints.out
index 931e55644..72b49f262 100644
--- a/src/test/regress/expected/shard_move_constraints.out
+++ b/src/test/regress/expected/shard_move_constraints.out
@@ -472,12 +472,13 @@ SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
(1 row)
DROP SCHEMA "shard Move Fkeys Indexes" CASCADE;
-NOTICE: drop cascades to 7 other objects
+NOTICE: drop cascades to 8 other objects
DETAIL: drop cascades to extension btree_gist
drop cascades to table "shard Move Fkeys Indexes".sensors
drop cascades to table "shard Move Fkeys Indexes".colocated_dist_table
drop cascades to table "shard Move Fkeys Indexes".colocated_partitioned_table
drop cascades to table "shard Move Fkeys Indexes".reference_table
+drop cascades to table "shard Move Fkeys Indexes".reference_table_8970028
drop cascades to table "shard Move Fkeys Indexes".index_backed_rep_identity
drop cascades to table "shard Move Fkeys Indexes".multiple_unique_keys
DROP ROLE mx_rebalancer_role_ent;
diff --git a/src/test/regress/expected/shard_move_constraints_blocking.out b/src/test/regress/expected/shard_move_constraints_blocking.out
index 82d925821..5f1b91cb6 100644
--- a/src/test/regress/expected/shard_move_constraints_blocking.out
+++ b/src/test/regress/expected/shard_move_constraints_blocking.out
@@ -358,10 +358,11 @@ ALTER TABLE sensors_2020_01_01 DROP CONSTRAINT fkey_from_child_to_child;
-- cleanup
\c - postgres - :master_port
DROP SCHEMA "blocking shard Move Fkeys Indexes" CASCADE;
-NOTICE: drop cascades to 5 other objects
+NOTICE: drop cascades to 6 other objects
DETAIL: drop cascades to table "blocking shard Move Fkeys Indexes".sensors
drop cascades to table "blocking shard Move Fkeys Indexes".colocated_dist_table
drop cascades to table "blocking shard Move Fkeys Indexes".colocated_partitioned_table
drop cascades to table "blocking shard Move Fkeys Indexes".reference_table
+drop cascades to table "blocking shard Move Fkeys Indexes".reference_table_8970028
drop cascades to table "blocking shard Move Fkeys Indexes".index_backed_rep_identity
DROP ROLE mx_rebalancer_blocking_role_ent;
diff --git a/src/test/regress/expected/single_hash_repartition_join.out b/src/test/regress/expected/single_hash_repartition_join.out
index 31a5c7e9f..7f7586355 100644
--- a/src/test/regress/expected/single_hash_repartition_join.out
+++ b/src/test/regress/expected/single_hash_repartition_join.out
@@ -196,15 +196,33 @@ DETAIL: Creating dependency on merge taskId 20
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 0 and 4
+DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 1 and 4
+DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 2 and 4
+DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: join prunable for task partitionId 3 and 4
+DEBUG: join prunable for task partitionId 3 and 5
+DEBUG: join prunable for task partitionId 4 and 0
+DEBUG: join prunable for task partitionId 4 and 1
+DEBUG: join prunable for task partitionId 4 and 2
+DEBUG: join prunable for task partitionId 4 and 3
+DEBUG: join prunable for task partitionId 4 and 5
+DEBUG: join prunable for task partitionId 5 and 0
+DEBUG: join prunable for task partitionId 5 and 1
+DEBUG: join prunable for task partitionId 5 and 2
+DEBUG: join prunable for task partitionId 5 and 3
+DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 9
DEBUG: pruning merge fetch taskId 2
@@ -221,6 +239,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 24
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 20
+DEBUG: pruning merge fetch taskId 13
+DETAIL: Creating dependency on merge taskId 29
+DEBUG: pruning merge fetch taskId 14
+DETAIL: Creating dependency on merge taskId 25
+DEBUG: pruning merge fetch taskId 16
+DETAIL: Creating dependency on merge taskId 34
+DEBUG: pruning merge fetch taskId 17
+DETAIL: Creating dependency on merge taskId 30
ERROR: the query contains a join that requires repartitioning
HINT: Set citus.enable_repartition_joins to on to enable repartitioning
-- single hash repartitioning is not supported between different column types
diff --git a/src/test/regress/expected/subquery_and_cte.out b/src/test/regress/expected/subquery_and_cte.out
index 4360bb69e..c15e9b9d7 100644
--- a/src/test/regress/expected/subquery_and_cte.out
+++ b/src/test/regress/expected/subquery_and_cte.out
@@ -718,15 +718,6 @@ END;
$$;
ERROR: (3/3) failed to execute one of the tasks
CONTEXT: PL/pgSQL function inline_code_block line XX at RAISE
-SET client_min_messages TO DEFAULT;
+SET client_min_messages TO WARNING;
DROP SCHEMA subquery_and_ctes CASCADE;
-NOTICE: drop cascades to 8 other objects
-DETAIL: drop cascades to table users_table
-drop cascades to table events_table
-drop cascades to table users_table_local
-drop cascades to table dist_table
-drop cascades to function func()
-drop cascades to table ref_table_1
-drop cascades to table ref_table_2
-drop cascades to table dist
SET search_path TO public;
diff --git a/src/test/regress/expected/subquery_append.out b/src/test/regress/expected/subquery_append.out
index 493c0bc37..381c467a7 100644
--- a/src/test/regress/expected/subquery_append.out
+++ b/src/test/regress/expected/subquery_append.out
@@ -224,7 +224,5 @@ SELECT count(*) FROM append_table WHERE extra = 1;
UPDATE append_table a sET extra = 1 FROM append_table b WHERE a.key = b.key;
ERROR: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns
END;
+SET client_min_messages TO WARNING;
DROP SCHEMA subquery_append CASCADE;
-NOTICE: drop cascades to 2 other objects
-DETAIL: drop cascades to table append_table
-drop cascades to table ref_table
diff --git a/src/test/regress/expected/subquery_basics.out b/src/test/regress/expected/subquery_basics.out
index 7a4fb77b7..9e2b226eb 100644
--- a/src/test/regress/expected/subquery_basics.out
+++ b/src/test/regress/expected/subquery_basics.out
@@ -579,6 +579,7 @@ DEBUG: Wrapping relation "dist" "d1" to a subquery
DEBUG: generating subplan XXX_1 for subquery SELECT id FROM public.dist d1 WHERE true
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (SELECT dist.id FROM public.dist WHERE (dist.id OPERATOR(pg_catalog.>) d1.id) GROUP BY dist.id) AS id FROM (public.ref FULL JOIN (SELECT d1_1.id, NULL::integer AS value FROM (SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) d1_1) d1 USING (id))
ERROR: correlated subqueries are not supported when the FROM clause contains a reference table
+SET client_min_messages TO WARNING;
DROP TABLE dist;
DROP TABLE ref;
DROP TABLE local;
diff --git a/src/test/regress/expected/subquery_view.out b/src/test/regress/expected/subquery_view.out
index 32354e329..3de55b3aa 100644
--- a/src/test/regress/expected/subquery_view.out
+++ b/src/test/regress/expected/subquery_view.out
@@ -584,16 +584,11 @@ EXPLAIN (COSTS OFF) WITH cte AS (
FROM pg_stat_activity
) SELECT * FROM reference_table JOIN cte USING (text_col);
$Q$);
- coordinator_plan_with_subplans
+ coordinator_plan_with_subplans
---------------------------------------------------------------------
Custom Scan (Citus Adaptive)
- -> Distributed Subplan XXX_1
- -> Function Scan on pg_stat_get_activity s
- -> Distributed Subplan XXX_2
- -> Custom Scan (Citus Adaptive)
- Task Count: 1
Task Count: 1
-(7 rows)
+(2 rows)
CREATE OR REPLACE VIEW view_on_views AS SELECT pg_stat_activity.application_name, pg_locks.pid FROM pg_stat_activity, pg_locks;
SELECT public.coordinator_plan_with_subplans($Q$
@@ -602,35 +597,12 @@ EXPLAIN (COSTS OFF) WITH cte AS (
FROM view_on_views
) SELECT * FROM reference_table JOIN cte USING (text_col);
$Q$);
- coordinator_plan_with_subplans
+ coordinator_plan_with_subplans
---------------------------------------------------------------------
Custom Scan (Citus Adaptive)
- -> Distributed Subplan XXX_1
- -> Nested Loop
- -> Function Scan on pg_stat_get_activity s
- -> Function Scan on pg_lock_status l
Task Count: 1
-(6 rows)
+(2 rows)
+SET client_min_messages TO WARNING;
DROP SCHEMA subquery_view CASCADE;
-NOTICE: drop cascades to 19 other objects
-DETAIL: drop cascades to table users_table_local
-drop cascades to table events_table_local
-drop cascades to view view_without_subquery
-drop cascades to view view_without_subquery_second
-drop cascades to view subquery_limit
-drop cascades to view subquery_non_p_key_group_by
-drop cascades to view final_query_router
-drop cascades to view final_query_realtime
-drop cascades to view subquery_in_where
-drop cascades to view subquery_from_from_where
-drop cascades to view subquery_from_from_where_local_table
-drop cascades to view repartition_view
-drop cascades to view all_executors_view
-drop cascades to view subquery_and_ctes
-drop cascades to view subquery_and_ctes_second
-drop cascades to view deep_subquery
-drop cascades to view result_of_view_is_also_recursively_planned
-drop cascades to table reference_table
-drop cascades to view view_on_views
SET search_path TO public;
diff --git a/src/test/regress/expected/tableam.out b/src/test/regress/expected/tableam.out
index 36f3729dd..47f4c241d 100644
--- a/src/test/regress/expected/tableam.out
+++ b/src/test/regress/expected/tableam.out
@@ -96,9 +96,7 @@ HINT: To remove the local data, run: SELECT truncate_local_data_after_distribut
select * from test_ref;
WARNING: fake_scan_getnextslot
-DETAIL: from localhost:xxxxx
WARNING: fake_scan_getnextslot
-DETAIL: from localhost:xxxxx
a
---------------------------------------------------------------------
1
@@ -109,6 +107,7 @@ WARNING: fake_tuple_insert
DETAIL: from localhost:xxxxx
WARNING: fake_tuple_insert
DETAIL: from localhost:xxxxx
+WARNING: fake_tuple_insert
-- we should error on following, since this AM is append only
SET client_min_messages TO ERROR;
delete from test_ref;
@@ -292,4 +291,4 @@ ERROR: specifying a table access method is not supported on a partitioned table
ALTER EXTENSION citus DROP ACCESS METHOD fake_am;
NOTICE: Citus does not propagate adding/dropping member objects
drop schema test_tableam cascade;
-NOTICE: drop cascades to 5 other objects
+NOTICE: drop cascades to 6 other objects
diff --git a/src/test/regress/expected/union_pushdown.out b/src/test/regress/expected/union_pushdown.out
index cbee11f8e..040535b75 100644
--- a/src/test/regress/expected/union_pushdown.out
+++ b/src/test/regress/expected/union_pushdown.out
@@ -1469,14 +1469,5 @@ $$);
f
(1 row)
-RESET client_min_messages;
+SET client_min_messages TO WARNING;
DROP SCHEMA union_pushdown CASCADE;
-NOTICE: drop cascades to 8 other objects
-DETAIL: drop cascades to table users_table_part
-drop cascades to table events_table_part
-drop cascades to table events_table_ref
-drop cascades to table events_table_local
-drop cascades to table test_a
-drop cascades to table test_b
-drop cascades to type comp_type
-drop cascades to view v
diff --git a/src/test/regress/expected/values.out b/src/test/regress/expected/values.out
index ad5f8a911..575fd6866 100644
--- a/src/test/regress/expected/values.out
+++ b/src/test/regress/expected/values.out
@@ -636,9 +636,5 @@ DEBUG: CTE cte_1 is going to be inlined via distributed planning
(1 row)
COMMIT;
-RESET client_min_messages;
+SET client_min_messages TO WARNING;
DROP SCHEMA values_subquery CASCADE;
-NOTICE: drop cascades to 3 other objects
-DETAIL: drop cascades to table test_values
-drop cascades to table test_values_ref
-drop cascades to function fixed_volatile_value()
diff --git a/src/test/regress/expected/with_dml.out b/src/test/regress/expected/with_dml.out
index f2743a8d9..4f2170082 100644
--- a/src/test/regress/expected/with_dml.out
+++ b/src/test/regress/expected/with_dml.out
@@ -176,9 +176,5 @@ WITH ids_to_delete AS (
SELECT id FROM reference_table
)
DELETE FROM reference_table WHERE id = ANY(SELECT id FROM ids_to_delete);
-RESET client_min_messages;
+SET client_min_messages TO WARNING;
DROP SCHEMA with_dml CASCADE;
-NOTICE: drop cascades to 3 other objects
-DETAIL: drop cascades to table distributed_table
-drop cascades to table second_distributed_table
-drop cascades to table reference_table
diff --git a/src/test/regress/expected/with_executors.out b/src/test/regress/expected/with_executors.out
index df1c80625..0ef716cac 100644
--- a/src/test/regress/expected/with_executors.out
+++ b/src/test/regress/expected/with_executors.out
@@ -427,7 +427,5 @@ WHERE
4365606
(1 row)
+SET client_min_messages TO WARNING;
DROP SCHEMA with_executors CASCADE;
-NOTICE: drop cascades to 2 other objects
-DETAIL: drop cascades to table local_table
-drop cascades to table ref_table
diff --git a/src/test/regress/expected/with_join.out b/src/test/regress/expected/with_join.out
index 23cafd2cc..c5df67ca7 100644
--- a/src/test/regress/expected/with_join.out
+++ b/src/test/regress/expected/with_join.out
@@ -385,9 +385,12 @@ join cte_1 ON cte_1.col1=d1.distrib_col;
RESET client_min_messages;
DROP SCHEMA with_join CASCADE;
-NOTICE: drop cascades to 5 other objects
+NOTICE: drop cascades to 8 other objects
DETAIL: drop cascades to table reference_table
+drop cascades to table reference_table_1501000
drop cascades to table distributed_1
drop cascades to table distributed_2
drop cascades to table reference_1
drop cascades to table reference_2
+drop cascades to table reference_1_1501009
+drop cascades to table reference_2_1501010
diff --git a/src/test/regress/expected/with_modifying.out b/src/test/regress/expected/with_modifying.out
index 997c62f93..70418251b 100644
--- a/src/test/regress/expected/with_modifying.out
+++ b/src/test/regress/expected/with_modifying.out
@@ -1083,4 +1083,4 @@ WITH mb AS (DELETE FROM modify_table WHERE id = 3 RETURNING NULL, NULL) SELECT *
\set VERBOSITY terse
DROP SCHEMA with_modifying CASCADE;
-NOTICE: drop cascades to 9 other objects
+NOTICE: drop cascades to 10 other objects
diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule
index 5a8a6b635..b9a1db0e2 100644
--- a/src/test/regress/multi_1_schedule
+++ b/src/test/regress/multi_1_schedule
@@ -374,3 +374,4 @@ test: ensure_no_intermediate_data_leak
test: ensure_no_shared_connection_leak
test: check_mx
+test: check_cluster_state
diff --git a/src/test/regress/multi_mx_schedule b/src/test/regress/multi_mx_schedule
index 181f9288f..682379b78 100644
--- a/src/test/regress/multi_mx_schedule
+++ b/src/test/regress/multi_mx_schedule
@@ -17,6 +17,7 @@ test: multi_extension
test: multi_test_helpers multi_test_helpers_superuser
test: multi_mx_node_metadata
test: multi_cluster_management
+test: remove_coordinator_from_metadata
test: multi_mx_function_table_reference
test: multi_test_catalog_views
diff --git a/src/test/regress/multi_schedule b/src/test/regress/multi_schedule
index 67fb48fa2..527dec8f7 100644
--- a/src/test/regress/multi_schedule
+++ b/src/test/regress/multi_schedule
@@ -128,3 +128,4 @@ test: check_mx
test: generated_identity
test: drop_database
+test: check_cluster_state
diff --git a/src/test/regress/split_schedule b/src/test/regress/split_schedule
index 2510260fb..b47acd828 100644
--- a/src/test/regress/split_schedule
+++ b/src/test/regress/split_schedule
@@ -2,6 +2,7 @@
# Include tests from 'minimal_schedule' for setup.
test: multi_test_helpers multi_test_helpers_superuser columnar_test_helpers
test: multi_cluster_management
+test: remove_coordinator_from_metadata
test: multi_test_catalog_views
test: tablespace
# Helpers for foreign key catalogs.
diff --git a/src/test/regress/sql/add_coordinator.sql b/src/test/regress/sql/add_coordinator.sql
index 2dba78064..81b77bfcd 100644
--- a/src/test/regress/sql/add_coordinator.sql
+++ b/src/test/regress/sql/add_coordinator.sql
@@ -3,6 +3,8 @@
--
-- node trying to add itself without specifying groupid => 0 should error out
+-- first remove the coordinator to for testing master_add_node for coordinator
+SELECT master_remove_node('localhost', :master_port);
SELECT master_add_node('localhost', :master_port);
SELECT master_add_node('localhost', :master_port, groupid => 0) AS master_nodeid \gset
diff --git a/src/test/regress/sql/alter_distributed_table.sql b/src/test/regress/sql/alter_distributed_table.sql
index 348aba7b1..0577421de 100644
--- a/src/test/regress/sql/alter_distributed_table.sql
+++ b/src/test/regress/sql/alter_distributed_table.sql
@@ -195,14 +195,18 @@ SELECT COUNT(DISTINCT colocationid) FROM pg_dist_partition WHERE logicalrelid::r
-- test references
CREATE TABLE referenced_dist_table (a INT UNIQUE);
CREATE TABLE referenced_ref_table (a INT UNIQUE);
-CREATE TABLE table_with_references (a1 INT UNIQUE REFERENCES referenced_dist_table(a), a2 INT REFERENCES referenced_ref_table(a));
-CREATE TABLE referencing_dist_table (a INT REFERENCES table_with_references(a1));
+CREATE TABLE table_with_references (a1 INT UNIQUE, a2 INT);
+CREATE TABLE referencing_dist_table (a INT);
SELECT create_distributed_table('referenced_dist_table', 'a', colocate_with:='none');
SELECT create_reference_table('referenced_ref_table');
SELECT create_distributed_table('table_with_references', 'a1', colocate_with:='referenced_dist_table');
SELECT create_distributed_table('referencing_dist_table', 'a', colocate_with:='referenced_dist_table');
+ALTER TABLE table_with_references ADD FOREIGN KEY (a1) REFERENCES referenced_dist_table(a);
+ALTER TABLE table_with_references ADD FOREIGN KEY (a2) REFERENCES referenced_ref_table(a);
+ALTER TABLE referencing_dist_table ADD FOREIGN KEY (a) REFERENCES table_with_references(a1);
+
SET client_min_messages TO WARNING;
SELECT conrelid::regclass::text AS "Referencing Table", pg_get_constraintdef(oid, true) AS "Definition" FROM pg_constraint
WHERE (conrelid::regclass::text = 'table_with_references' OR confrelid::regclass::text = 'table_with_references') AND contype = 'f' ORDER BY 1,2;
@@ -477,3 +481,4 @@ RESET search_path;
DROP SCHEMA alter_distributed_table CASCADE;
DROP SCHEMA schema_to_test_alter_dist_table CASCADE;
+DROP USER alter_dist_table_test_user;
diff --git a/src/test/regress/sql/alter_table_set_access_method.sql b/src/test/regress/sql/alter_table_set_access_method.sql
index 87055e364..d7720cfda 100644
--- a/src/test/regress/sql/alter_table_set_access_method.sql
+++ b/src/test/regress/sql/alter_table_set_access_method.sql
@@ -278,4 +278,3 @@ select alter_table_set_access_method('view_test_view','columnar');
SET client_min_messages TO WARNING;
DROP SCHEMA alter_table_set_access_method CASCADE;
-SELECT 1 FROM master_remove_node('localhost', :master_port);
diff --git a/src/test/regress/sql/check_cluster_state.sql b/src/test/regress/sql/check_cluster_state.sql
new file mode 100644
index 000000000..bcdd8f1a7
--- /dev/null
+++ b/src/test/regress/sql/check_cluster_state.sql
@@ -0,0 +1 @@
+SELECT count(*) >= 1 as coordinator_exists FROM pg_dist_node WHERE groupid = 0 AND isactive;
diff --git a/src/test/regress/sql/citus_local_dist_joins.sql b/src/test/regress/sql/citus_local_dist_joins.sql
index c68fbbfd8..2a7bbacf9 100644
--- a/src/test/regress/sql/citus_local_dist_joins.sql
+++ b/src/test/regress/sql/citus_local_dist_joins.sql
@@ -2,8 +2,6 @@ CREATE SCHEMA citus_local_dist_joins;
SET search_path TO citus_local_dist_joins;
SET client_min_messages to ERROR;
-SELECT master_add_node('localhost', :master_port, groupId => 0) AS coordinator_nodeid \gset
-
CREATE TABLE citus_local(key int, value text);
SELECT citus_add_local_table_to_metadata('citus_local');
@@ -273,6 +271,5 @@ RESET citus.local_table_join_policy;
SET client_min_messages to ERROR;
DROP TABLE citus_local;
-SELECT master_remove_node('localhost', :master_port);
\set VERBOSITY terse
DROP SCHEMA citus_local_dist_joins CASCADE;
diff --git a/src/test/regress/sql/distributed_functions.sql b/src/test/regress/sql/distributed_functions.sql
index 18198a217..9eb2fe730 100644
--- a/src/test/regress/sql/distributed_functions.sql
+++ b/src/test/regress/sql/distributed_functions.sql
@@ -696,7 +696,7 @@ DROP SCHEMA function_tests CASCADE;
DROP SCHEMA function_tests2 CASCADE;
-- clear objects
-SELECT stop_metadata_sync_to_node(nodename,nodeport) FROM pg_dist_node WHERE isactive AND noderole = 'primary';
+SELECT stop_metadata_sync_to_node(nodename,nodeport) FROM pg_dist_node WHERE isactive AND noderole = 'primary' AND groupid <> 0;
-- This is hacky, but we should clean-up the resources as below
\c - - - :worker_1_port
@@ -710,7 +710,8 @@ TRUNCATE pg_dist_node;
SET client_min_messages TO ERROR;
DROP USER functionuser;
DROP ROLE r1;
+
SELECT 1 FROM run_command_on_workers($$DROP USER functionuser$$);
-- sync metadata again
-SELECT start_metadata_sync_to_node(nodename,nodeport) FROM pg_dist_node WHERE isactive AND noderole = 'primary';
+SELECT start_metadata_sync_to_node(nodename,nodeport) FROM pg_dist_node WHERE isactive AND noderole = 'primary' AND groupid <> 0;
diff --git a/src/test/regress/sql/foreign_key_restriction_enforcement.sql b/src/test/regress/sql/foreign_key_restriction_enforcement.sql
index cdef1c798..f6c7bd68b 100644
--- a/src/test/regress/sql/foreign_key_restriction_enforcement.sql
+++ b/src/test/regress/sql/foreign_key_restriction_enforcement.sql
@@ -680,6 +680,8 @@ ROLLBACK;
-- the fails since we're trying to switch sequential mode after
-- already executed a parallel query
BEGIN;
+ SELECT master_remove_node('localhost', :master_port);
+
CREATE TABLE test_table_1(id int PRIMARY KEY);
SELECT create_reference_table('test_table_1');
@@ -697,6 +699,8 @@ ROLLBACK;
-- same test with the above, but this time using
-- sequential mode, succeeds
BEGIN;
+ SELECT master_remove_node('localhost', :master_port);
+
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
CREATE TABLE test_table_1(id int PRIMARY KEY);
SELECT create_reference_table('test_table_1');
diff --git a/src/test/regress/sql/function_propagation.sql b/src/test/regress/sql/function_propagation.sql
index cd718d17a..d3e4a74d4 100644
--- a/src/test/regress/sql/function_propagation.sql
+++ b/src/test/regress/sql/function_propagation.sql
@@ -565,8 +565,6 @@ BEGIN;
SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object where objid = 'function_propagation_schema.func_in_transaction_for_local_table'::regproc::oid;
CREATE TABLE citus_local_table_to_test_func(l1 int DEFAULT func_in_transaction_for_local_table());
- SET LOCAL client_min_messages TO WARNING;
- SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0);
SELECT citus_add_local_table_to_metadata('citus_local_table_to_test_func');
-- Function should be marked as distributed after distributing the table that depends on it
diff --git a/src/test/regress/sql/grant_on_foreign_server_propagation.sql b/src/test/regress/sql/grant_on_foreign_server_propagation.sql
index d2ecd482b..df0d01f62 100644
--- a/src/test/regress/sql/grant_on_foreign_server_propagation.sql
+++ b/src/test/regress/sql/grant_on_foreign_server_propagation.sql
@@ -6,6 +6,7 @@ CREATE SCHEMA "grant on server";
SET search_path TO "grant on server";
-- remove one of the worker nodes to test adding a new node later
+SELECT 1 FROM citus_remove_node('localhost', :master_port);
SELECT 1 FROM citus_remove_node('localhost', :worker_2_port);
select 1 from citus_add_node('localhost',:master_port,groupId=>0);
@@ -133,5 +134,3 @@ SET client_min_messages TO ERROR;
DROP SERVER "Foreign Server" CASCADE;
DROP SCHEMA "grant on server" CASCADE;
DROP ROLE role_test_servers, role_test_servers_2, ownerrole;
-
-SELECT 1 FROM citus_remove_node('localhost', :master_port);
diff --git a/src/test/regress/sql/insert_select_single_shard_table.sql b/src/test/regress/sql/insert_select_single_shard_table.sql
index 6593ab90b..3ea036772 100644
--- a/src/test/regress/sql/insert_select_single_shard_table.sql
+++ b/src/test/regress/sql/insert_select_single_shard_table.sql
@@ -4,9 +4,6 @@ SET search_path TO insert_select_single_shard_table;
SET citus.next_shard_id TO 1820000;
SET citus.shard_count TO 32;
-SET client_min_messages TO WARNING;
-SELECT 1 FROM citus_add_node('localhost', :master_port, groupid => 0);
-
SET client_min_messages TO NOTICE;
CREATE TABLE nullkey_c1_t1(a int, b int);
@@ -472,5 +469,3 @@ SELECT * FROM upsert_test_3 ORDER BY key_1, key_2;
SET client_min_messages TO WARNING;
DROP SCHEMA insert_select_single_shard_table CASCADE;
-
-SELECT citus_remove_node('localhost', :master_port);
diff --git a/src/test/regress/sql/intermediate_results.sql b/src/test/regress/sql/intermediate_results.sql
index 44eadf0e5..4cd54b29b 100644
--- a/src/test/regress/sql/intermediate_results.sql
+++ b/src/test/regress/sql/intermediate_results.sql
@@ -337,3 +337,5 @@ COMMIT;
SET client_min_messages TO ERROR;
DROP SCHEMA other_schema CASCADE;
DROP SCHEMA intermediate_results CASCADE;
+DROP OWNED BY some_other_user;
+DROP USER some_other_user;
diff --git a/src/test/regress/sql/limit_intermediate_size.sql b/src/test/regress/sql/limit_intermediate_size.sql
index 5b013bf75..8f64c31fd 100644
--- a/src/test/regress/sql/limit_intermediate_size.sql
+++ b/src/test/regress/sql/limit_intermediate_size.sql
@@ -17,7 +17,7 @@ cte2 AS MATERIALIZED (
SELECT cte.user_id, cte.value_2 FROM cte,cte2 ORDER BY 1,2 LIMIT 10;
-SET citus.max_intermediate_result_size TO 9;
+SET citus.max_intermediate_result_size TO 17;
WITH cte AS MATERIALIZED
(
SELECT
diff --git a/src/test/regress/sql/local_table_join.sql b/src/test/regress/sql/local_table_join.sql
index 96b51ff69..8d0d7d332 100644
--- a/src/test/regress/sql/local_table_join.sql
+++ b/src/test/regress/sql/local_table_join.sql
@@ -60,11 +60,16 @@ CREATE MATERIALIZED VIEW mv2 AS SELECT * FROM distributed_table;
SET client_min_messages TO DEBUG1;
--- the user doesn't allow local / distributed table joinn
+-- the user doesn't allow local / distributed table join
+
+SELECT master_remove_node('localhost', :master_port); -- https://github.com/citusdata/citus/issues/6958
+
SET citus.local_table_join_policy TO 'never';
SELECT count(*) FROM postgres_table JOIN distributed_table USING(key);
SELECT count(*) FROM postgres_table JOIN reference_table USING(key);
+SELECT citus_set_coordinator_host('localhost'); -- https://github.com/citusdata/citus/issues/6958
+
-- the user prefers local table recursively planned
SET citus.local_table_join_policy TO 'prefer-local';
SELECT count(*) FROM postgres_table JOIN distributed_table USING(key);
@@ -466,6 +471,7 @@ SELECT create_distributed_table('table2', 'a');
SELECT 1 AS res FROM table2 RIGHT JOIN (SELECT 1 FROM table1, table2) AS sub1 ON false;
ROLLBACK;
+SELECT master_remove_node('localhost', :master_port); -- https://github.com/citusdata/citus/issues/6958
BEGIN;
SELECT create_reference_table('table1');
SELECT 1 AS res FROM table2 RIGHT JOIN (SELECT 1 FROM table1, table2) AS sub1 ON false;
@@ -476,6 +482,7 @@ SELECT create_reference_table('table2');
SELECT 1 AS res FROM table2 RIGHT JOIN (SELECT 1 FROM table1, table2) AS sub1 ON false;
ROLLBACK;
+SELECT citus_set_coordinator_host('localhost'); -- https://github.com/citusdata/citus/issues/6958
RESET client_min_messages;
\set VERBOSITY terse
diff --git a/src/test/regress/sql/logical_replication.sql b/src/test/regress/sql/logical_replication.sql
index e78b0a393..3f8e048ca 100644
--- a/src/test/regress/sql/logical_replication.sql
+++ b/src/test/regress/sql/logical_replication.sql
@@ -15,8 +15,6 @@ SELECT oid AS postgres_oid FROM pg_roles where rolname = 'postgres' \gset
SELECT create_distributed_table('dist', 'id');
INSERT INTO dist SELECT generate_series(1, 100);
-SELECT 1 from citus_add_node('localhost', :master_port, groupId := 0);
-
-- Create a publiction and subscription (including replication slot) manually.
-- This allows us to test the cleanup logic at the start of the shard move.
\c - - - :worker_1_port
@@ -55,8 +53,6 @@ SET search_path TO logical_replication;
select citus_move_shard_placement(6830002, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical');
-SELECT citus_remove_node('localhost', :master_port);
-
-- the subscription is still there, as there is no cleanup record for it
-- we have created it manually
SELECT count(*) from pg_subscription;
diff --git a/src/test/regress/sql/merge.sql b/src/test/regress/sql/merge.sql
index f10ab6c99..911642812 100644
--- a/src/test/regress/sql/merge.sql
+++ b/src/test/regress/sql/merge.sql
@@ -21,7 +21,6 @@ SET citus.next_shard_id TO 4000000;
SET citus.explain_all_tasks TO true;
SET citus.shard_replication_factor TO 1;
SET citus.max_adaptive_executor_pool_size TO 1;
-SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0);
CREATE TABLE source
(
@@ -2166,4 +2165,3 @@ SET search_path TO merge_schema;
DROP SERVER foreign_server CASCADE;
DROP FUNCTION merge_when_and_write();
DROP SCHEMA merge_schema CASCADE;
-SELECT 1 FROM master_remove_node('localhost', :master_port);
diff --git a/src/test/regress/sql/multi_alter_table_add_constraints_without_name.sql b/src/test/regress/sql/multi_alter_table_add_constraints_without_name.sql
index fe8bb4b20..700e37f6e 100644
--- a/src/test/regress/sql/multi_alter_table_add_constraints_without_name.sql
+++ b/src/test/regress/sql/multi_alter_table_add_constraints_without_name.sql
@@ -620,10 +620,6 @@ DROP TABLE AT_AddConstNoName.dist_partitioned_table;
-- Test "ADD PRIMARY KEY"
\c - - :master_host :master_port
-SET client_min_messages to ERROR;
-SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0);
-RESET client_min_messages;
-
CREATE TABLE AT_AddConstNoName.citus_local_table(id int, other_column int);
SELECT citus_add_local_table_to_metadata('AT_AddConstNoName.citus_local_table');
@@ -821,8 +817,6 @@ SELECT con.conname
WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' ORDER BY con.conname ASC;
\c - - :master_host :master_port
-SELECT 1 FROM master_remove_node('localhost', :master_port);
-
-- Test with unusual table and column names
CREATE TABLE AT_AddConstNoName."2nd table" ( "2nd id" INTEGER, "3rd id" INTEGER);
SELECT create_distributed_table('AT_AddConstNoName."2nd table"','2nd id');
diff --git a/src/test/regress/sql/multi_alter_table_add_foreign_key_without_name.sql b/src/test/regress/sql/multi_alter_table_add_foreign_key_without_name.sql
index 330ac0c45..03df65709 100644
--- a/src/test/regress/sql/multi_alter_table_add_foreign_key_without_name.sql
+++ b/src/test/regress/sql/multi_alter_table_add_foreign_key_without_name.sql
@@ -69,14 +69,14 @@ ALTER TABLE referencing_table ADD FOREIGN KEY (id) REFERENCES referenced_table(i
DROP TABLE referencing_table;
DROP TABLE referenced_table;
--- test foreign constraint creation is not supported when one of the tables is not a citus table
+-- test foreign constraint creation is supported when coordinator is in metadata
CREATE TABLE referenced_local_table(id int PRIMARY KEY, other_column int);
CREATE TABLE reference_table(id int, referencing_column int);
SELECT create_reference_table('reference_table');
ALTER TABLE reference_table ADD FOREIGN KEY (referencing_column) REFERENCES referenced_local_table(id);
DROP TABLE referenced_local_table;
-DROP TABLE reference_table;
+DROP TABLE reference_table CASCADE;
-- test foreign constraint with correct conditions
CREATE TABLE referenced_table(id int PRIMARY KEY, test_column int);
@@ -352,7 +352,6 @@ DROP TABLE dist_table CASCADE;
DROP TABLE reference_table CASCADE;
-- test ADD FOREIGN KEY from citus local to reference table
-SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0);
CREATE TABLE citus_local_table(l1 int);
SELECT citus_add_local_table_to_metadata('citus_local_table');
@@ -373,7 +372,6 @@ ALTER TABLE citus_local_table ADD FOREIGN KEY(l1) REFERENCES reference_table(r1)
ALTER TABLE citus_local_table ADD FOREIGN KEY(l1) REFERENCES reference_table(r1) ON DELETE RESTRICT;
DROP TABLE citus_local_table CASCADE;
-SELECT 1 FROM master_remove_node('localhost', :master_port);
RESET SEARCH_PATH;
RESET client_min_messages;
diff --git a/src/test/regress/sql/multi_cluster_management.sql b/src/test/regress/sql/multi_cluster_management.sql
index 367fa9d58..9ec0eb28e 100644
--- a/src/test/regress/sql/multi_cluster_management.sql
+++ b/src/test/regress/sql/multi_cluster_management.sql
@@ -272,6 +272,7 @@ SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodep
\c - - - :master_port
SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node;
+SELECT citus_set_coordinator_host('localhost');
SELECT 1 FROM master_add_node('localhost', :worker_1_port);
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
diff --git a/src/test/regress/sql/multi_drop_extension.sql b/src/test/regress/sql/multi_drop_extension.sql
index 29da58dc4..0bb3c3ecd 100644
--- a/src/test/regress/sql/multi_drop_extension.sql
+++ b/src/test/regress/sql/multi_drop_extension.sql
@@ -23,8 +23,6 @@ BEGIN;
SET search_path TO public;
CREATE EXTENSION citus;
- SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0);
-
create table l1 (a int unique);
SELECT create_reference_table('l1');
@@ -135,6 +133,7 @@ ROLLBACK;
CREATE EXTENSION citus;
-- re-add the nodes to the cluster
+SELECT citus_set_coordinator_host('localhost');
SELECT 1 FROM master_add_node('localhost', :worker_1_port);
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
diff --git a/src/test/regress/sql/multi_explain.sql b/src/test/regress/sql/multi_explain.sql
index 931fb02bc..dd4615434 100644
--- a/src/test/regress/sql/multi_explain.sql
+++ b/src/test/regress/sql/multi_explain.sql
@@ -1151,8 +1151,6 @@ EXPLAIN :default_analyze_flags EXECUTE q2('(1)');
CREATE SCHEMA test_auto_explain;
SET search_path TO 'test_auto_explain';
-SELECT citus_set_coordinator_host('localhost');
-
CREATE TABLE test_ref_table (key int PRIMARY KEY);
SELECT create_reference_table('test_ref_table');
@@ -1164,8 +1162,6 @@ set auto_explain.log_analyze to true;
select * from test_ref_table;
DROP SCHEMA test_auto_explain CASCADE;
-select master_remove_node('localhost', :master_port);
-SELECT public.wait_until_metadata_sync(30000);
SET client_min_messages TO ERROR;
DROP SCHEMA multi_explain CASCADE;
diff --git a/src/test/regress/sql/multi_fix_partition_shard_index_names.sql b/src/test/regress/sql/multi_fix_partition_shard_index_names.sql
index 5f87708a2..d0f789cd9 100644
--- a/src/test/regress/sql/multi_fix_partition_shard_index_names.sql
+++ b/src/test/regress/sql/multi_fix_partition_shard_index_names.sql
@@ -278,10 +278,6 @@ DROP TABLE dist_partitioned_table;
SET citus.next_shard_id TO 910040;
-- test with citus local table
-SET client_min_messages TO WARNING;
-SELECT 1 FROM citus_add_node('localhost', :master_port, groupid=>0);
-RESET client_min_messages;
-
CREATE TABLE date_partitioned_citus_local_table(
measureid integer,
eventdate date,
@@ -345,4 +341,3 @@ ALTER TABLE parent_table DROP CONSTRAINT unique_cst CASCADE;
SET client_min_messages TO WARNING;
DROP SCHEMA fix_idx_names CASCADE;
-SELECT citus_remove_node('localhost', :master_port);
diff --git a/src/test/regress/sql/multi_foreign_key.sql b/src/test/regress/sql/multi_foreign_key.sql
index 041202dff..328df1fb0 100644
--- a/src/test/regress/sql/multi_foreign_key.sql
+++ b/src/test/regress/sql/multi_foreign_key.sql
@@ -709,11 +709,6 @@ CREATE TABLE set_on_default_test_referencing(
ON UPDATE SET DEFAULT
);
--- from distributed / reference to reference, fkey exists before calling the UDFs
-SELECT create_distributed_table('set_on_default_test_referencing', 'col_1');
-SELECT create_reference_table('set_on_default_test_referencing');
-
-DROP TABLE set_on_default_test_referencing;
CREATE TABLE set_on_default_test_referencing(
col_1 serial, col_2 int, col_3 int, col_4 int
);
diff --git a/src/test/regress/sql/multi_join_pruning.sql b/src/test/regress/sql/multi_join_pruning.sql
index d655f7c52..1b8f22706 100644
--- a/src/test/regress/sql/multi_join_pruning.sql
+++ b/src/test/regress/sql/multi_join_pruning.sql
@@ -66,3 +66,8 @@ EXPLAIN (COSTS OFF)
SELECT count(*)
FROM varchar_partitioned_table table1, varchar_partitioned_table table2
WHERE table1.varchar_column = table2.varchar_column;
+
+SET client_min_messages TO WARNING;
+DROP TABLE varchar_partitioned_table;
+DROP TABLE array_partitioned_table;
+DROP TABLE composite_partitioned_table;
diff --git a/src/test/regress/sql/multi_level_recursive_queries.sql b/src/test/regress/sql/multi_level_recursive_queries.sql
index 29db13b6e..a708dd3dc 100644
--- a/src/test/regress/sql/multi_level_recursive_queries.sql
+++ b/src/test/regress/sql/multi_level_recursive_queries.sql
@@ -170,5 +170,5 @@ SELECT avg(table_5.id) FROM (
) AS table_5 INNER JOIN dist0 AS table_9 USING (id);
-RESET client_min_messages;
+SET client_min_messages TO WARNING;
DROP SCHEMA multi_recursive CASCADE;
diff --git a/src/test/regress/sql/multi_metadata_sync.sql b/src/test/regress/sql/multi_metadata_sync.sql
index b03843edc..c1a0a6a9b 100644
--- a/src/test/regress/sql/multi_metadata_sync.sql
+++ b/src/test/regress/sql/multi_metadata_sync.sql
@@ -95,7 +95,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
-- Test start_metadata_sync_to_node and citus_activate_node UDFs
--- Ensure that hasmetadata=false for all nodes
+-- Ensure that hasmetadata=false for all nodes except for the coordinator node
SELECT count(*) FROM pg_dist_node WHERE hasmetadata=true;
-- Show that metadata can not be synced on secondary node
@@ -770,7 +770,6 @@ SELECT create_reference_table('dist_table_2');
ALTER TABLE dist_table_1 ADD COLUMN b int;
-SELECT master_add_node('localhost', :master_port, groupid => 0);
SELECT citus_disable_node_and_wait('localhost', :worker_1_port);
SELECT citus_disable_node_and_wait('localhost', :worker_2_port);
SELECT master_remove_node('localhost', :worker_1_port);
diff --git a/src/test/regress/sql/multi_partitioning.sql b/src/test/regress/sql/multi_partitioning.sql
index 85a3ece66..6fbd92638 100644
--- a/src/test/regress/sql/multi_partitioning.sql
+++ b/src/test/regress/sql/multi_partitioning.sql
@@ -1786,7 +1786,6 @@ ROLLBACK;
DROP TABLE pi_table;
-- 6) test with citus local table
-select 1 from citus_add_node('localhost', :master_port, groupid=>0);
CREATE TABLE date_partitioned_citus_local_table(
measureid integer,
eventdate date,
@@ -1938,8 +1937,6 @@ DROP TABLE date_partitioned_citus_local_table CASCADE;
DROP TABLE date_partitioned_citus_local_table_2;
set client_min_messages to notice;
-SELECT citus_remove_node('localhost', :master_port);
-
-- d) invalid tables for helper UDFs
CREATE TABLE multiple_partition_column_table(
event_id bigserial,
diff --git a/src/test/regress/sql/multi_remove_node_reference_table.sql b/src/test/regress/sql/multi_remove_node_reference_table.sql
index fbc9f9524..cc8a67239 100644
--- a/src/test/regress/sql/multi_remove_node_reference_table.sql
+++ b/src/test/regress/sql/multi_remove_node_reference_table.sql
@@ -107,8 +107,13 @@ WHERE colocationid IN
FROM pg_dist_partition
WHERE logicalrelid = 'remove_node_reference_table'::regclass);
+-- test that we cannot remove a node if it has the only placement for a shard
+SELECT master_remove_node('localhost', :master_port);
SELECT master_remove_node('localhost', :worker_1_port);
+-- restore the coordinator
+SELECT citus_set_coordinator_host('localhost');
+
\c - - - :worker_1_port
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
@@ -574,7 +579,6 @@ WHERE
ORDER BY shardid ASC;
\c - - - :master_port
-SELECT 1 FROM citus_set_coordinator_host('localhost', :master_port);
SELECT citus_disable_node('localhost', :worker_2_port);
SELECT public.wait_until_metadata_sync();
@@ -584,8 +588,6 @@ SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
-- never mark coordinator metadatasynced = false
SELECT hasmetadata, metadatasynced FROM pg_dist_node WHERE nodeport = :master_port;
-SELECT 1 FROM citus_remove_node('localhost', :master_port);
-
SELECT
shardid, shardstate, shardlength, nodename, nodeport
diff --git a/src/test/regress/sql/multi_repartition_join_planning.sql b/src/test/regress/sql/multi_repartition_join_planning.sql
index 30cfc7156..c2f379d23 100644
--- a/src/test/regress/sql/multi_repartition_join_planning.sql
+++ b/src/test/regress/sql/multi_repartition_join_planning.sql
@@ -9,6 +9,7 @@
SET citus.next_shard_id TO 690000;
SET citus.enable_unique_job_ids TO off;
SET citus.enable_repartition_joins to ON;
+SET citus.shard_replication_factor to 1;
create schema repartition_join;
DROP TABLE IF EXISTS repartition_join.order_line;
diff --git a/src/test/regress/sql/multi_replicate_reference_table.sql b/src/test/regress/sql/multi_replicate_reference_table.sql
index 39aaf44c6..4d5594126 100644
--- a/src/test/regress/sql/multi_replicate_reference_table.sql
+++ b/src/test/regress/sql/multi_replicate_reference_table.sql
@@ -238,14 +238,12 @@ SELECT create_reference_table('replicate_reference_table_cdtc');
SELECT citus_add_node('localhost', :worker_2_port);
-- required for create_distributed_table_concurrently
-SELECT 1 FROM citus_set_coordinator_host('localhost', :master_port);
SET citus.shard_replication_factor TO 1;
CREATE TABLE distributed_table_cdtc(column1 int primary key);
SELECT create_distributed_table_concurrently('distributed_table_cdtc', 'column1');
RESET citus.shard_replication_factor;
-SELECT citus_remove_node('localhost', :master_port);
SELECT
shardid, shardstate, shardlength, nodename, nodeport
@@ -456,9 +454,9 @@ CREATE TABLE ref_table_1(id int primary key, v int);
CREATE TABLE ref_table_2(id int primary key, v int references ref_table_1(id));
CREATE TABLE ref_table_3(id int primary key, v int references ref_table_2(id));
-SELECT create_reference_table('ref_table_1'),
- create_reference_table('ref_table_2'),
- create_reference_table('ref_table_3');
+SELECT create_reference_table('ref_table_1');
+SELECT create_reference_table('ref_table_2');
+SELECT create_reference_table('ref_table_3');
-- status before master_add_node
SELECT
diff --git a/src/test/regress/sql/multi_sequence_default.sql b/src/test/regress/sql/multi_sequence_default.sql
index 3b1dd188b..b41aba577 100644
--- a/src/test/regress/sql/multi_sequence_default.sql
+++ b/src/test/regress/sql/multi_sequence_default.sql
@@ -12,7 +12,6 @@ SET search_path = sequence_default, public;
-- test both distributed and citus local tables
-SELECT 1 FROM citus_add_node('localhost', :master_port, groupId => 0);
-- Cannot add a column involving DEFAULT nextval('..') because the table is not empty
CREATE SEQUENCE seq_0;
CREATE SEQUENCE seq_0_local_table;
@@ -451,5 +450,4 @@ DROP TABLE test_seq_dist;
DROP TABLE sequence_default.seq_test_7_par;
SET client_min_messages TO error; -- suppress cascading objects dropping
DROP SCHEMA sequence_default CASCADE;
-SELECT master_remove_node('localhost', :master_port);
SET search_path TO public;
diff --git a/src/test/regress/sql/multi_table_ddl.sql b/src/test/regress/sql/multi_table_ddl.sql
index fc6539ac9..ee826add0 100644
--- a/src/test/regress/sql/multi_table_ddl.sql
+++ b/src/test/regress/sql/multi_table_ddl.sql
@@ -58,6 +58,7 @@ DROP EXTENSION citus;
CREATE EXTENSION citus;
-- re-add the nodes to the cluster
+SELECT 1 FROM citus_set_coordinator_host('localhost');
SELECT 1 FROM master_add_node('localhost', :worker_1_port);
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
diff --git a/src/test/regress/sql/multi_tenant_isolation.sql b/src/test/regress/sql/multi_tenant_isolation.sql
index 52ce044dc..c3e51b6cc 100644
--- a/src/test/regress/sql/multi_tenant_isolation.sql
+++ b/src/test/regress/sql/multi_tenant_isolation.sql
@@ -497,12 +497,19 @@ SELECT create_reference_table('test_reference_table_fkey');
CREATE TABLE test_colocated_table_1(id int PRIMARY KEY, value_1 int, FOREIGN KEY(id) REFERENCES test_colocated_table_1(id));
SELECT create_distributed_table('test_colocated_table_1', 'id', colocate_with => 'NONE');
-CREATE TABLE test_colocated_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_reference_table_fkey(id), FOREIGN KEY(id) REFERENCES test_colocated_table_1(id));
+CREATE TABLE test_colocated_table_2(id int PRIMARY KEY, value_1 int);
SELECT create_distributed_table('test_colocated_table_2', 'id', colocate_with => 'test_colocated_table_1');
-CREATE TABLE test_colocated_table_3(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_reference_table_fkey(id), FOREIGN KEY(id) REFERENCES test_colocated_table_1(id), FOREIGN KEY(id) REFERENCES test_colocated_table_2(id));
+ALTER TABLE test_colocated_table_2 ADD FOREIGN KEY(value_1) REFERENCES test_reference_table_fkey(id);
+ALTER TABLE test_colocated_table_2 ADD FOREIGN KEY(id) REFERENCES test_colocated_table_1(id);
+
+CREATE TABLE test_colocated_table_3(id int PRIMARY KEY, value_1 int);
SELECT create_distributed_table('test_colocated_table_3', 'id', colocate_with => 'test_colocated_table_1');
+ALTER TABLE test_colocated_table_3 ADD FOREIGN KEY(value_1) REFERENCES test_reference_table_fkey(id);
+ALTER TABLE test_colocated_table_3 ADD FOREIGN KEY(id) REFERENCES test_colocated_table_1(id);
+ALTER TABLE test_colocated_table_3 ADD FOREIGN KEY(id) REFERENCES test_colocated_table_2(id);
+
INSERT INTO test_reference_table_fkey SELECT i FROM generate_series (0, 100) i;
INSERT INTO test_colocated_table_1 SELECT i, i FROM generate_series (0, 100) i;
INSERT INTO test_colocated_table_2 SELECT i, i FROM generate_series (0, 100) i;
diff --git a/src/test/regress/sql/multi_tenant_isolation_nonblocking.sql b/src/test/regress/sql/multi_tenant_isolation_nonblocking.sql
index f74835108..1299c9282 100644
--- a/src/test/regress/sql/multi_tenant_isolation_nonblocking.sql
+++ b/src/test/regress/sql/multi_tenant_isolation_nonblocking.sql
@@ -607,3 +607,6 @@ TRUNCATE TABLE pg_catalog.pg_dist_colocation;
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 100;
ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id;
+
+SELECT citus_set_coordinator_host('localhost');
+
diff --git a/src/test/regress/sql/multi_transaction_recovery.sql b/src/test/regress/sql/multi_transaction_recovery.sql
index 333807267..b1072fe6b 100644
--- a/src/test/regress/sql/multi_transaction_recovery.sql
+++ b/src/test/regress/sql/multi_transaction_recovery.sql
@@ -1,13 +1,6 @@
-- Tests for prepared transaction recovery
SET citus.next_shard_id TO 1220000;
--- reference tables can have placements on the coordinator. Add it so
--- verify we recover transactions which do DML on coordinator placements
--- properly.
-SET client_min_messages TO ERROR;
-SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0);
-RESET client_min_messages;
-
-- enforce 1 connection per placement since
-- the tests are prepared for that
SET citus.force_max_query_parallelization TO ON;
@@ -264,5 +257,3 @@ DROP TABLE test_recovery;
DROP TABLE test_recovery_single;
DROP TABLE test_2pcskip;
DROP TABLE test_reference;
-
-SELECT 1 FROM master_remove_node('localhost', :master_port);
diff --git a/src/test/regress/sql/multi_transactional_drop_shards.sql b/src/test/regress/sql/multi_transactional_drop_shards.sql
index dd7ba43a5..3ab463b7f 100644
--- a/src/test/regress/sql/multi_transactional_drop_shards.sql
+++ b/src/test/regress/sql/multi_transactional_drop_shards.sql
@@ -379,13 +379,10 @@ ORDER BY
\c - - - :master_port
SET client_min_messages TO WARNING;
--- try using the coordinator as a worker and then dropping the table
-SELECT 1 FROM master_add_node('localhost', :master_port, groupid := 0);
CREATE TABLE citus_local (id serial, k int);
SELECT create_distributed_table('citus_local', 'id');
INSERT INTO citus_local (k) VALUES (2);
DROP TABLE citus_local;
-SELECT master_remove_node('localhost', :master_port);
-- clean the workspace
DROP TABLE transactional_drop_shards, transactional_drop_reference;
diff --git a/src/test/regress/sql/pg12.sql b/src/test/regress/sql/pg12.sql
index 5624a70eb..a86dbbb42 100644
--- a/src/test/regress/sql/pg12.sql
+++ b/src/test/regress/sql/pg12.sql
@@ -267,8 +267,6 @@ select count(*)
from col_test
where val = 'asdf';
-SELECT 1 FROM citus_add_node('localhost', :master_port, groupId => 0);
-
BEGIN;
CREATE TABLE generated_stored_col_test (x int, y int generated always as (x+1) stored);
SELECT citus_add_local_table_to_metadata('generated_stored_col_test');
@@ -374,8 +372,6 @@ BEGIN;
SELECT * FROM generated_stored_ref;
ROLLBACK;
-SELECT citus_remove_node('localhost', :master_port);
-
CREATE TABLE superuser_columnar_table (a int) USING columnar;
CREATE USER read_access;
diff --git a/src/test/regress/sql/pg14.sql b/src/test/regress/sql/pg14.sql
index 77f3e1cc5..be4d2f72d 100644
--- a/src/test/regress/sql/pg14.sql
+++ b/src/test/regress/sql/pg14.sql
@@ -671,8 +671,6 @@ drop schema pg14 cascade;
create schema pg14;
set search_path to pg14;
-select 1 from citus_add_node('localhost',:master_port,groupid=>0);
-
-- test adding foreign table to metadata with the guc
-- will test truncating foreign tables later
CREATE TABLE foreign_table_test (id integer NOT NULL, data text, a bigserial);
@@ -786,4 +784,3 @@ set client_min_messages to error;
drop extension postgres_fdw cascade;
drop schema pg14 cascade;
reset client_min_messages;
-select 1 from citus_remove_node('localhost',:master_port);
diff --git a/src/test/regress/sql/pg15.sql b/src/test/regress/sql/pg15.sql
index b82b0d745..e29ceff28 100644
--- a/src/test/regress/sql/pg15.sql
+++ b/src/test/regress/sql/pg15.sql
@@ -179,11 +179,6 @@ CREATE TABLE tbl2
MERGE INTO tbl1 USING tbl2 ON (true)
WHEN MATCHED THEN DELETE;
--- add coordinator node as a worker
-SET client_min_messages to ERROR;
-SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0);
-RESET client_min_messages;
-
-- one table is Citus local table, fails
SELECT citus_add_local_table_to_metadata('tbl1');
@@ -254,8 +249,6 @@ SET client_min_messages to ERROR;
DROP TABLE FKTABLE_local, PKTABLE_local;
RESET client_min_messages;
-SELECT 1 FROM citus_remove_node('localhost', :master_port);
-
SELECT create_distributed_table('tbl1', 'x');
SELECT create_distributed_table('tbl2', 'x');
@@ -810,6 +803,7 @@ CREATE TABLE set_on_default_test_referenced(
);
SELECT create_reference_table('set_on_default_test_referenced');
+-- should error since col_3 defaults to a sequence
CREATE TABLE set_on_default_test_referencing(
col_1 int, col_2 int, col_3 serial, col_4 int,
FOREIGN KEY(col_1, col_3)
@@ -818,10 +812,6 @@ CREATE TABLE set_on_default_test_referencing(
ON UPDATE SET DEFAULT
);
--- should error since col_3 defaults to a sequence
-SELECT create_reference_table('set_on_default_test_referencing');
-
-DROP TABLE set_on_default_test_referencing;
CREATE TABLE set_on_default_test_referencing(
col_1 int, col_2 int, col_3 serial, col_4 int,
FOREIGN KEY(col_1, col_3)
@@ -921,7 +911,6 @@ SELECT * FROM foreign_table WHERE c1::text LIKE 'foo' LIMIT 1; -- ERROR; cast no
RESET citus.use_citus_managed_tables;
SELECT undistribute_table('foreign_table');
SELECT undistribute_table('foreign_table_test');
-SELECT 1 FROM citus_remove_node('localhost', :master_port);
DROP SERVER foreign_server CASCADE;
-- PG15 now supports specifying oid on CREATE DATABASE
diff --git a/src/test/regress/sql/pgmerge.sql b/src/test/regress/sql/pgmerge.sql
index 9b828f27e..86dc15040 100644
--- a/src/test/regress/sql/pgmerge.sql
+++ b/src/test/regress/sql/pgmerge.sql
@@ -19,8 +19,6 @@ SET citus.use_citus_managed_tables to true;
SET citus.next_shard_id TO 4001000;
-SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0);
-
CREATE USER regress_merge_privs;
CREATE USER regress_merge_no_privs;
DROP TABLE IF EXISTS target;
@@ -1374,4 +1372,3 @@ REVOKE ALL ON SCHEMA pgmerge_schema FROM regress_merge_no_privs;
DROP SCHEMA pgmerge_schema CASCADE;
DROP USER regress_merge_privs;
DROP USER regress_merge_no_privs;
-SELECT 1 FROM master_remove_node('localhost', :master_port);
diff --git a/src/test/regress/sql/propagate_foreign_servers.sql b/src/test/regress/sql/propagate_foreign_servers.sql
index 32cba12ef..a9f93a702 100644
--- a/src/test/regress/sql/propagate_foreign_servers.sql
+++ b/src/test/regress/sql/propagate_foreign_servers.sql
@@ -29,8 +29,6 @@ CREATE FOREIGN TABLE foreign_table (
SERVER foreign_server_dependent_schema
OPTIONS (schema_name 'test_dependent_schema', table_name 'foreign_table_test');
-SELECT 1 FROM citus_add_node('localhost', :master_port, groupId=>0);
-
-- verify that the aggregate is propagated to the new node
SELECT run_command_on_workers($$select aggfnoid from pg_aggregate where aggfnoid::text like '%propagate_foreign_server.array_agg%';$$);
diff --git a/src/test/regress/sql/publication.sql b/src/test/regress/sql/publication.sql
index 3fd6128b8..488c0408c 100644
--- a/src/test/regress/sql/publication.sql
+++ b/src/test/regress/sql/publication.sql
@@ -3,9 +3,6 @@ CREATE SCHEMA "publication-1";
SET search_path TO publication;
SET citus.shard_replication_factor TO 1;
--- for citus_add_local_table_to_metadata / create_distributed_table_concurrently
-SELECT citus_set_coordinator_host('localhost', :master_port);
-
CREATE OR REPLACE FUNCTION activate_node_snapshot()
RETURNS text[]
LANGUAGE C STRICT
@@ -187,8 +184,6 @@ SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
SET client_min_messages TO ERROR;
DROP SCHEMA publication CASCADE;
DROP SCHEMA "publication-1" CASCADE;
-
-SELECT citus_remove_node('localhost', :master_port);
\q
\endif
@@ -265,5 +260,3 @@ DROP PUBLICATION pubpartitioned;
SET client_min_messages TO ERROR;
DROP SCHEMA publication CASCADE;
DROP SCHEMA "publication-1" CASCADE;
-
-SELECT citus_remove_node('localhost', :master_port);
diff --git a/src/test/regress/sql/query_single_shard_table.sql b/src/test/regress/sql/query_single_shard_table.sql
index 0a05558af..f1a04c9e3 100644
--- a/src/test/regress/sql/query_single_shard_table.sql
+++ b/src/test/regress/sql/query_single_shard_table.sql
@@ -4,9 +4,6 @@ SET search_path TO query_single_shard_table;
SET citus.next_shard_id TO 1620000;
SET citus.shard_count TO 32;
-SET client_min_messages TO WARNING;
-SELECT 1 FROM citus_add_node('localhost', :master_port, groupid => 0);
-
SET client_min_messages TO NOTICE;
CREATE TABLE nullkey_c1_t1(a int, b int);
@@ -1175,5 +1172,3 @@ LIMIT 10;
SET client_min_messages TO ERROR;
DROP SCHEMA query_single_shard_table CASCADE;
-
-SELECT citus_remove_node('localhost', :master_port);
diff --git a/src/test/regress/sql/recurring_outer_join.sql b/src/test/regress/sql/recurring_outer_join.sql
index e26df4b86..595d734ec 100644
--- a/src/test/regress/sql/recurring_outer_join.sql
+++ b/src/test/regress/sql/recurring_outer_join.sql
@@ -4,12 +4,7 @@ SET search_path TO recurring_outer_join;
SET citus.next_shard_id TO 1520000;
SET citus.shard_count TO 32;
--- idempotently add node to allow this test to run without add_coordinator
-SET client_min_messages TO WARNING;
-SELECT 1 FROM citus_add_node('localhost', :master_port, groupid => 0);
-
SET client_min_messages TO DEBUG1;
-
CREATE TABLE dist_1 (a int, b int);
SELECT create_distributed_table('dist_1', 'a');
INSERT INTO dist_1 VALUES
@@ -1026,5 +1021,3 @@ ROLLBACK;
SET client_min_messages TO ERROR;
DROP SCHEMA recurring_outer_join CASCADE;
-
-SELECT master_remove_node('localhost', :master_port);
diff --git a/src/test/regress/sql/recursive_dml_with_different_planners_executors.sql b/src/test/regress/sql/recursive_dml_with_different_planners_executors.sql
index 179fcb198..e5fb8a6c5 100644
--- a/src/test/regress/sql/recursive_dml_with_different_planners_executors.sql
+++ b/src/test/regress/sql/recursive_dml_with_different_planners_executors.sql
@@ -60,7 +60,6 @@ UPDATE distributed_table SET dept = foo.max_dept FROM
(SELECT tenant_id FROM second_distributed_table WHERE dept IN (1, 2, 3, 4))
) as foo WHERE foo.max_dept >= dept and tenant_id = '8';
-
-RESET client_min_messages;
+SET client_min_messages TO WARNING;
DROP SCHEMA recursive_dml_with_different_planner_executors CASCADE;
SET search_path TO public;
diff --git a/src/test/regress/sql/recursive_view_local_table.sql b/src/test/regress/sql/recursive_view_local_table.sql
index c33a95e99..e813dc455 100644
--- a/src/test/regress/sql/recursive_view_local_table.sql
+++ b/src/test/regress/sql/recursive_view_local_table.sql
@@ -51,5 +51,5 @@ SELECT ref_table.* FROM ref_table WHERE EXISTS (SELECT * FROM recursive_defined_
SELECT ref_table.* FROM ref_table WHERE EXISTS (SELECT * FROM recursive_defined_non_recursive_view l WHERE l.c = ref_table.a) AND false;
SELECT ref_table.* FROM ref_table WHERE EXISTS (SELECT * FROM recursive_defined_non_recursive_view l WHERE l.c = ref_table.a AND false);
-
+SET client_min_messages TO WARNING;
DROP SCHEMA postgres_local_table CASCADE;
diff --git a/src/test/regress/sql/relation_access_tracking.sql b/src/test/regress/sql/relation_access_tracking.sql
index 3a4581e59..f0f132e6b 100644
--- a/src/test/regress/sql/relation_access_tracking.sql
+++ b/src/test/regress/sql/relation_access_tracking.sql
@@ -583,5 +583,6 @@ BEGIN;
SELECT * FROM relation_accesses WHERE table_name IN ('table_3') ORDER BY 1;
COMMIT;
+SET client_min_messages TO WARNING;
SET search_path TO 'public';
DROP SCHEMA access_tracking CASCADE;
diff --git a/src/test/regress/sql/remove_coordinator.sql b/src/test/regress/sql/remove_coordinator.sql
index 2db26d4d7..b0df327d1 100644
--- a/src/test/regress/sql/remove_coordinator.sql
+++ b/src/test/regress/sql/remove_coordinator.sql
@@ -1,2 +1,5 @@
-- removing coordinator from pg_dist_node should update pg_dist_colocation
SELECT master_remove_node('localhost', :master_port);
+
+-- restore coordinator for the rest of the tests
+SELECT citus_set_coordinator_host('localhost', :master_port);
diff --git a/src/test/regress/sql/remove_coordinator_from_metadata.sql b/src/test/regress/sql/remove_coordinator_from_metadata.sql
new file mode 100644
index 000000000..8ec16cfaf
--- /dev/null
+++ b/src/test/regress/sql/remove_coordinator_from_metadata.sql
@@ -0,0 +1 @@
+SELECT master_remove_node('localhost', :master_port);
diff --git a/src/test/regress/sql/run_command_on_all_nodes.sql b/src/test/regress/sql/run_command_on_all_nodes.sql
index 0004a74e7..9b7b083af 100644
--- a/src/test/regress/sql/run_command_on_all_nodes.sql
+++ b/src/test/regress/sql/run_command_on_all_nodes.sql
@@ -1,6 +1,8 @@
CREATE SCHEMA run_command_on_all_nodes;
SET search_path TO run_command_on_all_nodes;
+SELECT master_remove_node('localhost', :master_port);
+
-- check coordinator isn't in metadata
SELECT count(*) != 0 AS "Coordinator is in Metadata"
FROM pg_dist_node
@@ -85,3 +87,6 @@ SELECT success, result FROM run_command_on_all_nodes($$select count(*) from run_
SELECT success, result FROM run_command_on_all_nodes($$create index on run_command_on_all_nodes.test (x)$$);
DROP SCHEMA run_command_on_all_nodes CASCADE;
+
+SELECT citus_set_coordinator_host('localhost');
+
diff --git a/src/test/regress/sql/sequential_modifications.sql b/src/test/regress/sql/sequential_modifications.sql
index 79e0a1211..3d56a20ee 100644
--- a/src/test/regress/sql/sequential_modifications.sql
+++ b/src/test/regress/sql/sequential_modifications.sql
@@ -25,7 +25,7 @@ $$
DECLARE
result bool;
BEGIN
- SELECT tx_count = worker_count FROM (SELECT count(*) as tx_count FROM pg_dist_transaction WHERE gid LIKE 'citus_%_' || pg_backend_pid() || '%_%') as s1, (SELECT count(*) as worker_count FROM pg_dist_node WHERE noderole = 'primary') as s2 INTO result;
+ SELECT tx_count = worker_count FROM (SELECT count(*) as tx_count FROM pg_dist_transaction WHERE gid LIKE 'citus_%_' || pg_backend_pid() || '%_%') as s1, (SELECT count(*) as worker_count FROM pg_dist_node WHERE noderole = 'primary' AND groupid <> 0 ) as s2 INTO result;
RETURN result;
END;
$$
diff --git a/src/test/regress/sql/set_operation_and_local_tables.sql b/src/test/regress/sql/set_operation_and_local_tables.sql
index cbc024729..ab044d019 100644
--- a/src/test/regress/sql/set_operation_and_local_tables.sql
+++ b/src/test/regress/sql/set_operation_and_local_tables.sql
@@ -97,6 +97,5 @@ SELECT * FROM ((SELECT x FROM test) UNION (SELECT x FROM (SELECT x FROM local_te
-- repartition is recursively planned before the set operation
(SELECT x FROM test) INTERSECT (SELECT t1.x FROM test as t1, test as t2 WHERE t1.x = t2.y LIMIT 2) INTERSECT (((SELECT x FROM local_test) UNION ALL (SELECT x FROM test)) INTERSECT (SELECT i FROM generate_series(0, 100) i)) ORDER BY 1 DESC;
-
-RESET client_min_messages;
+SET client_min_messages TO WARNING;
DROP SCHEMA recursive_set_local CASCADE;
diff --git a/src/test/regress/sql/set_operations.sql b/src/test/regress/sql/set_operations.sql
index 9f66f4dfb..633b5c0b5 100644
--- a/src/test/regress/sql/set_operations.sql
+++ b/src/test/regress/sql/set_operations.sql
@@ -200,5 +200,5 @@ SELECT * FROM set_view_recursive_second ORDER BY 1,2;
SELECT * FROM (SELECT * FROM test UNION SELECT * FROM test_not_colocated) u ORDER BY 1,2;
SELECT * FROM (SELECT * FROM test UNION ALL SELECT * FROM test_not_colocated) u ORDER BY 1,2;
-RESET client_min_messages;
+SET client_min_messages TO WARNING;
DROP SCHEMA recursive_union CASCADE;
diff --git a/src/test/regress/sql/subquery_and_cte.sql b/src/test/regress/sql/subquery_and_cte.sql
index 47bfe7184..1644c5fcc 100644
--- a/src/test/regress/sql/subquery_and_cte.sql
+++ b/src/test/regress/sql/subquery_and_cte.sql
@@ -510,7 +510,6 @@ RAISE '(%/3) failed to execute one of the tasks', errors_received;
END;
$$;
-SET client_min_messages TO DEFAULT;
-
+SET client_min_messages TO WARNING;
DROP SCHEMA subquery_and_ctes CASCADE;
SET search_path TO public;
diff --git a/src/test/regress/sql/subquery_append.sql b/src/test/regress/sql/subquery_append.sql
index 4210f61ef..ed456fc19 100644
--- a/src/test/regress/sql/subquery_append.sql
+++ b/src/test/regress/sql/subquery_append.sql
@@ -87,4 +87,5 @@ SELECT count(*) FROM append_table WHERE extra = 1;
UPDATE append_table a sET extra = 1 FROM append_table b WHERE a.key = b.key;
END;
+SET client_min_messages TO WARNING;
DROP SCHEMA subquery_append CASCADE;
diff --git a/src/test/regress/sql/subquery_basics.sql b/src/test/regress/sql/subquery_basics.sql
index cfc02521f..0acd584fe 100644
--- a/src/test/regress/sql/subquery_basics.sql
+++ b/src/test/regress/sql/subquery_basics.sql
@@ -391,6 +391,7 @@ WHERE
-- sublinks in the targetlist are not supported
SELECT (SELECT id FROM dist WHERE dist.id > d1.id GROUP BY id) FROM ref FULL JOIN dist d1 USING (id);
+SET client_min_messages TO WARNING;
DROP TABLE dist;
DROP TABLE ref;
DROP TABLE local;
diff --git a/src/test/regress/sql/subquery_view.sql b/src/test/regress/sql/subquery_view.sql
index 8f57ef5a3..23732d7e8 100644
--- a/src/test/regress/sql/subquery_view.sql
+++ b/src/test/regress/sql/subquery_view.sql
@@ -443,5 +443,6 @@ EXPLAIN (COSTS OFF) WITH cte AS (
) SELECT * FROM reference_table JOIN cte USING (text_col);
$Q$);
+SET client_min_messages TO WARNING;
DROP SCHEMA subquery_view CASCADE;
SET search_path TO public;
diff --git a/src/test/regress/sql/union_pushdown.sql b/src/test/regress/sql/union_pushdown.sql
index 884d93600..57099f060 100644
--- a/src/test/regress/sql/union_pushdown.sql
+++ b/src/test/regress/sql/union_pushdown.sql
@@ -1109,6 +1109,5 @@ SELECT k, COUNT(*) FROM v GROUP BY k ORDER BY k;
$$);
-
-RESET client_min_messages;
+SET client_min_messages TO WARNING;
DROP SCHEMA union_pushdown CASCADE;
diff --git a/src/test/regress/sql/values.sql b/src/test/regress/sql/values.sql
index 4a5bb8352..6ff2282dc 100644
--- a/src/test/regress/sql/values.sql
+++ b/src/test/regress/sql/values.sql
@@ -393,5 +393,5 @@ BEGIN;
COMMIT;
-RESET client_min_messages;
+SET client_min_messages TO WARNING;
DROP SCHEMA values_subquery CASCADE;
diff --git a/src/test/regress/sql/with_dml.sql b/src/test/regress/sql/with_dml.sql
index 8602a961b..40f9fe3ed 100644
--- a/src/test/regress/sql/with_dml.sql
+++ b/src/test/regress/sql/with_dml.sql
@@ -157,5 +157,5 @@ WITH ids_to_delete AS (
)
DELETE FROM reference_table WHERE id = ANY(SELECT id FROM ids_to_delete);
-RESET client_min_messages;
+SET client_min_messages TO WARNING;
DROP SCHEMA with_dml CASCADE;
diff --git a/src/test/regress/sql/with_executors.sql b/src/test/regress/sql/with_executors.sql
index dec5fcd9b..897c79cdc 100644
--- a/src/test/regress/sql/with_executors.sql
+++ b/src/test/regress/sql/with_executors.sql
@@ -334,4 +334,5 @@ FROM
WHERE
users_table.user_id = cte_merge.u_id;
+SET client_min_messages TO WARNING;
DROP SCHEMA with_executors CASCADE;