diff --git a/src/test/regress/expected/multi_replicate_reference_table.out b/src/test/regress/expected/multi_replicate_reference_table.out index e239a8c78..e77ce2df1 100644 --- a/src/test/regress/expected/multi_replicate_reference_table.out +++ b/src/test/regress/expected/multi_replicate_reference_table.out @@ -669,7 +669,7 @@ ORDER BY shardid, nodeport; (0 rows) -- verify constraints have been created on the new node -SELECT run_command_on_workers('select count(*) from pg_constraint where contype=''f'' AND conname like ''ref_table%'';'); +SELECT run_command_on_workers('select count(*) from pg_constraint where contype=''f'' AND conname similar to ''ref_table%\d'';'); run_command_on_workers --------------------------------------------------------------------- (localhost,57637,t,2) @@ -950,12 +950,6 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port); (1 row) SET citus.shard_replication_factor TO 1; -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - SELECT master_copy_shard_placement( :ref_table_shard, 'localhost', :worker_1_port, @@ -1027,13 +1021,6 @@ WHERE ref_table.a = dist_table.a; \c - - - :master_port SET search_path TO replicate_reference_table; -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); -NOTICE: dropping metadata on the node (localhost,57637) - stop_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -- -- The following case used to get stuck on create_distributed_table() instead -- of detecting the distributed deadlock. diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule index 707e5fe1b..44b5f44e9 100644 --- a/src/test/regress/multi_1_schedule +++ b/src/test/regress/multi_1_schedule @@ -48,8 +48,6 @@ test: multi_read_from_secondaries # ---------- # multi_citus_tools tests utility functions written for citus tools # ---------- -test: check_mx -test: turn_mx_off test: multi_citus_tools # ---------- @@ -57,7 +55,6 @@ test: multi_citus_tools # multi_remove_node_reference_table tests metadata changes after master_remove_node # ---------- test: multi_replicate_reference_table -test: turn_mx_on test: multi_remove_node_reference_table # ---------- diff --git a/src/test/regress/sql/multi_replicate_reference_table.sql b/src/test/regress/sql/multi_replicate_reference_table.sql index 497f38619..121e35c0f 100644 --- a/src/test/regress/sql/multi_replicate_reference_table.sql +++ b/src/test/regress/sql/multi_replicate_reference_table.sql @@ -451,7 +451,7 @@ WHERE ORDER BY shardid, nodeport; -- verify constraints have been created on the new node -SELECT run_command_on_workers('select count(*) from pg_constraint where contype=''f'' AND conname like ''ref_table%'';'); +SELECT run_command_on_workers('select count(*) from pg_constraint where contype=''f'' AND conname similar to ''ref_table%\d'';'); DROP TABLE ref_table_1, ref_table_2, ref_table_3; @@ -591,7 +591,6 @@ SELECT 1 FROM master_remove_node('localhost', :worker_2_port); SELECT 1 FROM master_add_node('localhost', :worker_2_port); SET citus.shard_replication_factor TO 1; -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); SELECT master_copy_shard_placement( :ref_table_shard, @@ -637,8 +636,6 @@ WHERE ref_table.a = dist_table.a; SET search_path TO replicate_reference_table; -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); - -- -- The following case used to get stuck on create_distributed_table() instead -- of detecting the distributed deadlock.