From d3401b26a33545406e3a477524f6563392942378 Mon Sep 17 00:00:00 2001 From: naisila Date: Mon, 13 Nov 2023 11:02:18 +0300 Subject: [PATCH] Test whether flaky citus_split_shard_by_split_points_negative + cleanup citus_split_shard_by_split_points_negative has something leftover. If I run the test two times in a row in my local machine, I get the following error in the second test run: ```diff -- UDF fails where source shard cannot be split further i.e min and max range is equal. -- Create a Shard where range cannot be split further SELECT isolate_tenant_to_new_shard('table_to_split', 1, shard_transfer_mode => 'block_writes'); - isolate_tenant_to_new_shard ---------------------------------------------------------------------- - 49761305 -(1 row) - +ERROR: could not open relation with OID 0 +CONTEXT: while executing command on localhost:57637 SELECT citus_split_shard_by_split_points( 49761305, ARRAY['-1073741826'], ARRAY[:worker_1_node, :worker_2_node]); -ERROR: Cannot split shard id "49761305" as min/max range are equal: ('-1905060026', '-1905060026'). +ERROR: could not find valid entry for shard 49761305 ``` We need a resource cleanup here so I added that line. --- ...s_split_shard_by_split_points_negative.out | 30 +++++++++++++++++++ ...s_split_shard_by_split_points_negative.sql | 6 ++++ 2 files changed, 36 insertions(+) diff --git a/src/test/regress/expected/citus_split_shard_by_split_points_negative.out b/src/test/regress/expected/citus_split_shard_by_split_points_negative.out index 85b1fc3ee..5e8919c21 100644 --- a/src/test/regress/expected/citus_split_shard_by_split_points_negative.out +++ b/src/test/regress/expected/citus_split_shard_by_split_points_negative.out @@ -136,3 +136,33 @@ DETAIL: drop cascades to table citus_split_shard_by_split_points_negative.range drop cascades to table citus_split_shard_by_split_points_negative.table_to_split drop cascades to table citus_split_shard_by_split_points_negative.table_to_split_replication_factor_2 --END : Cleanup +SELECT public.wait_for_resource_cleanup(); + wait_for_resource_cleanup +--------------------------------------------------------------------- + +(1 row) + +SELECT result FROM run_command_on_all_nodes($$SELECT count(*) FROM pg_replication_slots$$); + result +--------------------------------------------------------------------- + 0 + 0 + 0 +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$SELECT count(*) FROM pg_publication$$); + result +--------------------------------------------------------------------- + 0 + 0 + 0 +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$SELECT count(*) FROM pg_subscription$$); + result +--------------------------------------------------------------------- + 0 + 0 + 0 +(3 rows) + diff --git a/src/test/regress/sql/citus_split_shard_by_split_points_negative.sql b/src/test/regress/sql/citus_split_shard_by_split_points_negative.sql index fe37777c7..b7da24f79 100644 --- a/src/test/regress/sql/citus_split_shard_by_split_points_negative.sql +++ b/src/test/regress/sql/citus_split_shard_by_split_points_negative.sql @@ -114,3 +114,9 @@ SELECT citus_split_shard_by_split_points( \c - postgres - :master_port DROP SCHEMA "citus_split_shard_by_split_points_negative" CASCADE; --END : Cleanup + +SELECT public.wait_for_resource_cleanup(); + +SELECT result FROM run_command_on_all_nodes($$SELECT count(*) FROM pg_replication_slots$$); +SELECT result FROM run_command_on_all_nodes($$SELECT count(*) FROM pg_publication$$); +SELECT result FROM run_command_on_all_nodes($$SELECT count(*) FROM pg_subscription$$);