From 10a11d79d37cd8955a8930e0e34c697f72bd80a2 Mon Sep 17 00:00:00 2001 From: naisila Date: Fri, 10 Nov 2023 15:14:40 +0300 Subject: [PATCH] citus_non_blocking_split_shard_cleanup, citus_non_blocking_split_shards and logical_replication ARE NOT FLAKY --- ...citus_non_blocking_split_shard_cleanup.out | 2 -- .../citus_non_blocking_split_shards.out | 33 +++++++++---------- .../regress/expected/logical_replication.out | 2 -- ...citus_non_blocking_split_shard_cleanup.sql | 2 -- .../sql/citus_non_blocking_split_shards.sql | 6 ++-- src/test/regress/sql/logical_replication.sql | 2 -- 6 files changed, 18 insertions(+), 29 deletions(-) diff --git a/src/test/regress/expected/citus_non_blocking_split_shard_cleanup.out b/src/test/regress/expected/citus_non_blocking_split_shard_cleanup.out index f25cb7953..a559ec442 100644 --- a/src/test/regress/expected/citus_non_blocking_split_shard_cleanup.out +++ b/src/test/regress/expected/citus_non_blocking_split_shard_cleanup.out @@ -113,8 +113,6 @@ SELECT public.wait_for_resource_cleanup(); (1 row) --- try to get flaky result -SET search_path TO "citus_split_test_schema"; \c - - - :worker_2_port SET search_path TO "citus_split_test_schema"; -- Replication slots should be cleaned up diff --git a/src/test/regress/expected/citus_non_blocking_split_shards.out b/src/test/regress/expected/citus_non_blocking_split_shards.out index 4d9b28339..1f42eab61 100644 --- a/src/test/regress/expected/citus_non_blocking_split_shards.out +++ b/src/test/regress/expected/citus_non_blocking_split_shards.out @@ -611,29 +611,26 @@ SELECT COUNT(*) FROM colocated_dist_table; --BEGIN : Cleanup \c - postgres - :master_port -- make sure we don't have any replication objects leftover on the workers -SELECT result FROM run_command_on_all_nodes($$SELECT count(*) FROM pg_replication_slots$$); - result +SELECT run_command_on_workers($$SELECT count(*) FROM pg_replication_slots$$); + run_command_on_workers --------------------------------------------------------------------- - 0 - 0 - 0 -(3 rows) + (localhost,57637,t,0) + (localhost,57638,t,0) +(2 rows) -SELECT result FROM run_command_on_all_nodes($$SELECT count(*) FROM pg_publication$$); - result +SELECT run_command_on_workers($$SELECT count(*) FROM pg_publication$$); + run_command_on_workers --------------------------------------------------------------------- - 0 - 0 - 0 -(3 rows) + (localhost,57637,t,0) + (localhost,57638,t,0) +(2 rows) -SELECT result FROM run_command_on_all_nodes($$SELECT count(*) FROM pg_subscription$$); - result +SELECT run_command_on_workers($$SELECT count(*) FROM pg_subscription$$); + run_command_on_workers --------------------------------------------------------------------- - 0 - 0 - 0 -(3 rows) + (localhost,57637,t,0) + (localhost,57638,t,0) +(2 rows) ALTER SYSTEM RESET citus.defer_shard_delete_interval; SELECT pg_reload_conf(); diff --git a/src/test/regress/expected/logical_replication.out b/src/test/regress/expected/logical_replication.out index 11a3d24ed..b5a36125a 100644 --- a/src/test/regress/expected/logical_replication.out +++ b/src/test/regress/expected/logical_replication.out @@ -93,8 +93,6 @@ SELECT public.wait_for_resource_cleanup(); (1 row) --- try to get flaky result -SET search_path TO logical_replication; -- the subscription is still there, as there is no cleanup record for it -- we have created it manually SELECT subname from pg_subscription; diff --git a/src/test/regress/sql/citus_non_blocking_split_shard_cleanup.sql b/src/test/regress/sql/citus_non_blocking_split_shard_cleanup.sql index 3d77be874..480d81b88 100644 --- a/src/test/regress/sql/citus_non_blocking_split_shard_cleanup.sql +++ b/src/test/regress/sql/citus_non_blocking_split_shard_cleanup.sql @@ -80,8 +80,6 @@ SELECT pg_catalog.citus_split_shard_by_split_points( 'force_logical'); SELECT public.wait_for_resource_cleanup(); --- try to get flaky result -SET search_path TO "citus_split_test_schema"; \c - - - :worker_2_port SET search_path TO "citus_split_test_schema"; diff --git a/src/test/regress/sql/citus_non_blocking_split_shards.sql b/src/test/regress/sql/citus_non_blocking_split_shards.sql index b54ff1363..88eef3ca3 100644 --- a/src/test/regress/sql/citus_non_blocking_split_shards.sql +++ b/src/test/regress/sql/citus_non_blocking_split_shards.sql @@ -311,9 +311,9 @@ SELECT COUNT(*) FROM colocated_dist_table; --BEGIN : Cleanup \c - postgres - :master_port -- make sure we don't have any replication objects leftover on the workers -SELECT result FROM run_command_on_all_nodes($$SELECT count(*) FROM pg_replication_slots$$); -SELECT result FROM run_command_on_all_nodes($$SELECT count(*) FROM pg_publication$$); -SELECT result FROM run_command_on_all_nodes($$SELECT count(*) FROM pg_subscription$$); +SELECT run_command_on_workers($$SELECT count(*) FROM pg_replication_slots$$); +SELECT run_command_on_workers($$SELECT count(*) FROM pg_publication$$); +SELECT run_command_on_workers($$SELECT count(*) FROM pg_subscription$$); ALTER SYSTEM RESET citus.defer_shard_delete_interval; SELECT pg_reload_conf(); diff --git a/src/test/regress/sql/logical_replication.sql b/src/test/regress/sql/logical_replication.sql index b2d36db9f..a85c70b08 100644 --- a/src/test/regress/sql/logical_replication.sql +++ b/src/test/regress/sql/logical_replication.sql @@ -54,8 +54,6 @@ SET search_path TO logical_replication; select citus_move_shard_placement(6830002, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical'); SELECT public.wait_for_resource_cleanup(); --- try to get flaky result -SET search_path TO logical_replication; -- the subscription is still there, as there is no cleanup record for it -- we have created it manually