Turn metadata sync on in multi_replicate_reference_table and multi_citus_tools

pull/5544/head
Halil Ozan Akgul 2021-12-17 09:31:28 +03:00
parent cc4c83b1e5
commit df8d0f3db1
3 changed files with 2 additions and 21 deletions

View File

@ -669,7 +669,7 @@ ORDER BY shardid, nodeport;
(0 rows)
-- verify constraints have been created on the new node
SELECT run_command_on_workers('select count(*) from pg_constraint where contype=''f'' AND conname like ''ref_table%'';');
SELECT run_command_on_workers('select count(*) from pg_constraint where contype=''f'' AND conname similar to ''ref_table%\d'';');
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,2)
@ -950,12 +950,6 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port);
(1 row)
SET citus.shard_replication_factor TO 1;
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
start_metadata_sync_to_node
---------------------------------------------------------------------
(1 row)
SELECT master_copy_shard_placement(
:ref_table_shard,
'localhost', :worker_1_port,
@ -1027,13 +1021,6 @@ WHERE ref_table.a = dist_table.a;
\c - - - :master_port
SET search_path TO replicate_reference_table;
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
NOTICE: dropping metadata on the node (localhost,57637)
stop_metadata_sync_to_node
---------------------------------------------------------------------
(1 row)
--
-- The following case used to get stuck on create_distributed_table() instead
-- of detecting the distributed deadlock.

View File

@ -48,8 +48,6 @@ test: multi_read_from_secondaries
# ----------
# multi_citus_tools tests utility functions written for citus tools
# ----------
test: check_mx
test: turn_mx_off
test: multi_citus_tools
# ----------
@ -57,7 +55,6 @@ test: multi_citus_tools
# multi_remove_node_reference_table tests metadata changes after master_remove_node
# ----------
test: multi_replicate_reference_table
test: turn_mx_on
test: multi_remove_node_reference_table
# ----------

View File

@ -451,7 +451,7 @@ WHERE
ORDER BY shardid, nodeport;
-- verify constraints have been created on the new node
SELECT run_command_on_workers('select count(*) from pg_constraint where contype=''f'' AND conname like ''ref_table%'';');
SELECT run_command_on_workers('select count(*) from pg_constraint where contype=''f'' AND conname similar to ''ref_table%\d'';');
DROP TABLE ref_table_1, ref_table_2, ref_table_3;
@ -591,7 +591,6 @@ SELECT 1 FROM master_remove_node('localhost', :worker_2_port);
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
SET citus.shard_replication_factor TO 1;
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
SELECT master_copy_shard_placement(
:ref_table_shard,
@ -637,8 +636,6 @@ WHERE ref_table.a = dist_table.a;
SET search_path TO replicate_reference_table;
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
--
-- The following case used to get stuck on create_distributed_table() instead
-- of detecting the distributed deadlock.