Fix flaky cleanup tests (#6530)

We are having some flakiness in our test schedule because of the objects
leftover from shard moves/splits. With this commit we prevent logging
cleanup object counts.

fixes: #6534
pull/6535/head
Ahmet Gedemenli 2022-12-02 12:39:36 +03:00 committed by GitHub
parent d4394b2e2d
commit 3b24c47470
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 93 additions and 37 deletions

View File

@ -525,8 +525,9 @@ NOTICE: cleaned up 11 orphaned resources
-- END: Split a partition table directly
-- BEGIN: Perform deferred cleanup.
SET client_min_messages TO WARNING;
CALL pg_catalog.citus_cleanup_orphaned_resources();
NOTICE: cleaned up 11 orphaned resources
RESET client_min_messages;
-- END: Perform deferred cleanup.
-- BEGIN: Validate Shard Info and Data
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport

View File

@ -47,6 +47,11 @@ SELECT pg_catalog.citus_split_shard_by_split_points(
(1 row)
-- BEGIN: Perform deferred cleanup.
SET client_min_messages TO WARNING;
CALL pg_catalog.citus_cleanup_orphaned_resources();
RESET client_min_messages;
-- END: Perform deferred cleanup.
\c - - - :worker_1_port
SET search_path TO "citus_split_test_schema";
SET citus.show_shards_for_app_name_prefixes = '*';

View File

@ -237,8 +237,9 @@ SELECT pg_catalog.citus_split_shard_by_split_points(
(1 row)
-- BEGIN: Perform deferred cleanup.
SET client_min_messages TO WARNING;
CALL pg_catalog.citus_cleanup_orphaned_resources();
NOTICE: cleaned up 3 orphaned resources
RESET client_min_messages;
-- END: Perform deferred cleanup.
-- Perform 3 way split
SELECT pg_catalog.citus_split_shard_by_split_points(
@ -253,8 +254,9 @@ SELECT pg_catalog.citus_split_shard_by_split_points(
-- END : Split two shards : One with move and One without move.
-- BEGIN: Perform deferred cleanup.
SET client_min_messages TO WARNING;
CALL pg_catalog.citus_cleanup_orphaned_resources();
NOTICE: cleaned up 3 orphaned resources
RESET client_min_messages;
-- END: Perform deferred cleanup.
-- BEGIN : Move a shard post split.
SELECT citus_move_shard_placement(8981007, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='block_writes');
@ -474,7 +476,9 @@ ERROR: cannot use logical replication to transfer shards of the relation table_
DETAIL: UPDATE and DELETE commands on the shard will error out during logical replication unless there is a REPLICA IDENTITY or PRIMARY KEY.
HINT: If you wish to continue without a replica identity set the shard_transfer_mode to 'force_logical' or 'block_writes'.
-- BEGIN: Perform deferred cleanup.
SET client_min_messages TO WARNING;
CALL pg_catalog.citus_cleanup_orphaned_resources();
RESET client_min_messages;
-- END: Perform deferred cleanup.
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport
FROM pg_dist_shard AS shard
@ -522,8 +526,9 @@ SELECT pg_catalog.citus_split_shard_by_split_points(
(1 row)
-- BEGIN: Perform deferred cleanup.
SET client_min_messages TO WARNING;
CALL pg_catalog.citus_cleanup_orphaned_resources();
NOTICE: cleaned up 3 orphaned resources
RESET client_min_messages;
-- END: Perform deferred cleanup.
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport
FROM pg_dist_shard AS shard
@ -580,6 +585,28 @@ SELECT COUNT(*) FROM colocated_dist_table;
-- END: Validate Data Count
--BEGIN : Cleanup
\c - postgres - :master_port
-- make sure we don't have any replication objects leftover on the workers
SELECT run_command_on_workers($$SELECT count(*) FROM pg_replication_slots$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,0)
(localhost,57638,t,0)
(2 rows)
SELECT run_command_on_workers($$SELECT count(*) FROM pg_publication$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,0)
(localhost,57638,t,0)
(2 rows)
SELECT run_command_on_workers($$SELECT count(*) FROM pg_subscription$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,0)
(localhost,57638,t,0)
(2 rows)
ALTER SYSTEM RESET citus.defer_shard_delete_interval;
SELECT pg_reload_conf();
pg_reload_conf

View File

@ -325,8 +325,9 @@ SELECT run_command_on_workers($$DROP SUBSCRIPTION IF EXISTS citus_shard_move_sub
-- cleanup leftovers
-- verify we don't see any error for already dropped subscription
SET client_min_messages TO WARNING;
CALL citus_cleanup_orphaned_resources();
NOTICE: cleaned up 3 orphaned resources
RESET client_min_messages;
-- cancellation on dropping subscription
SELECT citus.mitmproxy('conn.onQuery(query="^DROP SUBSCRIPTION").cancel(' || :pid || ')');
mitmproxy

View File

@ -52,11 +52,11 @@ WARNING: failed to clean up 2 orphaned shards out of 5 after a citus_split_shar
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
operation_id | object_type | object_name | node_group_id | policy_type
---------------------------------------------------------------------
777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981003 | 2 | 1
777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981002 | 2 | 0
777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981003 | 2 | 1
777 | 4 | citus_shard_split_publication_xxxxxxx_xxxxxxx | 2 | 0
(3 rows)
@ -101,7 +101,7 @@ CONTEXT: while executing command on localhost:xxxxx
CALL pg_catalog.citus_cleanup_orphaned_resources();
NOTICE: cleaned up 3 orphaned resources
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
operation_id | object_type | object_name | node_group_id | policy_type
---------------------------------------------------------------------
(0 rows)
@ -155,11 +155,11 @@ NOTICE: cleaned up 3 orphaned resources
ERROR: Failed to run worker_split_shard_replication_setup UDF. It should successfully execute for splitting a shard in a non-blocking way. Please retry.
RESET client_min_messages;
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
operation_id | object_type | object_name | node_group_id | policy_type
---------------------------------------------------------------------
777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981003 | 2 | 1
777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981002 | 2 | 0
777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981003 | 2 | 1
777 | 4 | citus_shard_split_publication_xxxxxxx_xxxxxxx | 2 | 0
777 | 4 | citus_shard_split_publication_xxxxxxx_xxxxxxx | 2 | 0
(4 rows)
@ -207,7 +207,7 @@ ERROR: Failed to run worker_split_shard_replication_setup UDF. It should succes
CALL pg_catalog.citus_cleanup_orphaned_resources();
NOTICE: cleaned up 4 orphaned resources
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
operation_id | object_type | object_name | node_group_id | policy_type
---------------------------------------------------------------------
(0 rows)
@ -266,11 +266,11 @@ WARNING: failed to clean up 2 orphaned shards out of 7 after a citus_split_shar
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
operation_id | object_type | object_name | node_group_id | policy_type
---------------------------------------------------------------------
777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981003 | 2 | 1
777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981002 | 2 | 0
777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981003 | 2 | 1
777 | 4 | citus_shard_split_publication_xxxxxxx_xxxxxxx | 2 | 0
777 | 4 | citus_shard_split_publication_xxxxxxx_xxxxxxx | 2 | 0
777 | 3 | citus_shard_split_slot_xxxxxxx_xxxxxxx | 2 | 0
@ -319,7 +319,7 @@ CONTEXT: while executing command on localhost:xxxxx
CALL pg_catalog.citus_cleanup_orphaned_resources();
NOTICE: cleaned up 5 orphaned resources
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
operation_id | object_type | object_name | node_group_id | policy_type
---------------------------------------------------------------------
(0 rows)
@ -378,17 +378,17 @@ WARNING: failed to clean up 2 orphaned shards out of 12 after a citus_split_sha
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
operation_id | object_type | object_name | node_group_id | policy_type
---------------------------------------------------------------------
777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981003 | 2 | 1
777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981002 | 2 | 0
777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981003 | 2 | 1
777 | 4 | citus_shard_split_publication_xxxxxxx_xxxxxxx | 2 | 0
777 | 4 | citus_shard_split_publication_xxxxxxx_xxxxxxx | 2 | 0
777 | 3 | citus_shard_split_slot_xxxxxxx_xxxxxxx | 2 | 0
777 | 3 | citus_shard_split_slot_xxxxxxx_xxxxxxx | 2 | 0
777 | 5 | citus_shard_split_subscription_role_10 | 2 | 0
777 | 2 | citus_shard_split_subscription_xxxxxxx | 2 | 0
777 | 5 | citus_shard_split_subscription_role_10 | 2 | 0
(8 rows)
-- we need to allow connection so that we can connect to proxy
@ -437,7 +437,7 @@ CONTEXT: while executing command on localhost:xxxxx
CALL pg_catalog.citus_cleanup_orphaned_resources();
NOTICE: cleaned up 8 orphaned resources
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
operation_id | object_type | object_name | node_group_id | policy_type
---------------------------------------------------------------------
(0 rows)
@ -496,17 +496,17 @@ WARNING: failed to clean up 2 orphaned shards out of 12 after a citus_split_sha
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
operation_id | object_type | object_name | node_group_id | policy_type
---------------------------------------------------------------------
777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981003 | 2 | 1
777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981002 | 2 | 0
777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981003 | 2 | 1
777 | 4 | citus_shard_split_publication_xxxxxxx_xxxxxxx | 2 | 0
777 | 4 | citus_shard_split_publication_xxxxxxx_xxxxxxx | 2 | 0
777 | 3 | citus_shard_split_slot_xxxxxxx_xxxxxxx | 2 | 0
777 | 3 | citus_shard_split_slot_xxxxxxx_xxxxxxx | 2 | 0
777 | 5 | citus_shard_split_subscription_role_10 | 2 | 0
777 | 2 | citus_shard_split_subscription_xxxxxxx | 2 | 0
777 | 5 | citus_shard_split_subscription_role_10 | 2 | 0
(8 rows)
-- we need to allow connection so that we can connect to proxy
@ -555,7 +555,7 @@ CONTEXT: while executing command on localhost:xxxxx
CALL pg_catalog.citus_cleanup_orphaned_resources();
NOTICE: cleaned up 8 orphaned resources
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
operation_id | object_type | object_name | node_group_id | policy_type
---------------------------------------------------------------------
(0 rows)
@ -615,7 +615,7 @@ WARNING: connection to the remote node localhost:xxxxx failed with the followin
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
operation_id | object_type | object_name | node_group_id | policy_type
---------------------------------------------------------------------
777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981002 | 1 | 1
@ -624,8 +624,8 @@ CONTEXT: while executing command on localhost:xxxxx
777 | 4 | citus_shard_split_publication_xxxxxxx_xxxxxxx | 2 | 0
777 | 3 | citus_shard_split_slot_xxxxxxx_xxxxxxx | 2 | 0
777 | 3 | citus_shard_split_slot_xxxxxxx_xxxxxxx | 2 | 0
777 | 5 | citus_shard_split_subscription_role_10 | 2 | 0
777 | 2 | citus_shard_split_subscription_xxxxxxx | 2 | 0
777 | 5 | citus_shard_split_subscription_role_10 | 2 | 0
(8 rows)
SELECT relname FROM pg_class where relname LIKE '%table_to_split_%' AND relkind = 'r' order by relname;
@ -678,7 +678,7 @@ CONTEXT: while executing command on localhost:xxxxx
CALL pg_catalog.citus_cleanup_orphaned_resources();
NOTICE: cleaned up 8 orphaned resources
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
operation_id | object_type | object_name | node_group_id | policy_type
---------------------------------------------------------------------
(0 rows)

View File

@ -244,7 +244,9 @@ CALL pg_catalog.citus_cleanup_orphaned_resources();
-- END: Split a partition table directly
-- BEGIN: Perform deferred cleanup.
SET client_min_messages TO WARNING;
CALL pg_catalog.citus_cleanup_orphaned_resources();
RESET client_min_messages;
-- END: Perform deferred cleanup.
-- BEGIN: Validate Shard Info and Data

View File

@ -46,6 +46,11 @@ SELECT pg_catalog.citus_split_shard_by_split_points(
ARRAY[:worker_2_node, :worker_2_node],
'force_logical');
-- BEGIN: Perform deferred cleanup.
SET client_min_messages TO WARNING;
CALL pg_catalog.citus_cleanup_orphaned_resources();
RESET client_min_messages;
-- END: Perform deferred cleanup.
\c - - - :worker_1_port
SET search_path TO "citus_split_test_schema";

View File

@ -149,7 +149,9 @@ SELECT pg_catalog.citus_split_shard_by_split_points(
'force_logical');
-- BEGIN: Perform deferred cleanup.
SET client_min_messages TO WARNING;
CALL pg_catalog.citus_cleanup_orphaned_resources();
RESET client_min_messages;
-- END: Perform deferred cleanup.
-- Perform 3 way split
@ -161,7 +163,9 @@ SELECT pg_catalog.citus_split_shard_by_split_points(
-- END : Split two shards : One with move and One without move.
-- BEGIN: Perform deferred cleanup.
SET client_min_messages TO WARNING;
CALL pg_catalog.citus_cleanup_orphaned_resources();
RESET client_min_messages;
-- END: Perform deferred cleanup.
-- BEGIN : Move a shard post split.
@ -263,7 +267,9 @@ SELECT pg_catalog.citus_split_shard_by_split_points(
ARRAY[:worker_1_node, :worker_2_node]);
-- BEGIN: Perform deferred cleanup.
SET client_min_messages TO WARNING;
CALL pg_catalog.citus_cleanup_orphaned_resources();
RESET client_min_messages;
-- END: Perform deferred cleanup.
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport
@ -288,7 +294,9 @@ SELECT pg_catalog.citus_split_shard_by_split_points(
'auto');
-- BEGIN: Perform deferred cleanup.
SET client_min_messages TO WARNING;
CALL pg_catalog.citus_cleanup_orphaned_resources();
RESET client_min_messages;
-- END: Perform deferred cleanup.
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport
@ -308,6 +316,11 @@ SELECT COUNT(*) FROM colocated_dist_table;
--BEGIN : Cleanup
\c - postgres - :master_port
-- make sure we don't have any replication objects leftover on the workers
SELECT run_command_on_workers($$SELECT count(*) FROM pg_replication_slots$$);
SELECT run_command_on_workers($$SELECT count(*) FROM pg_publication$$);
SELECT run_command_on_workers($$SELECT count(*) FROM pg_subscription$$);
ALTER SYSTEM RESET citus.defer_shard_delete_interval;
SELECT pg_reload_conf();
DROP SCHEMA "citus_split_test_schema" CASCADE;

View File

@ -134,7 +134,9 @@ SELECT citus.mitmproxy('conn.allow()');
SELECT run_command_on_workers($$DROP SUBSCRIPTION IF EXISTS citus_shard_move_subscription_10$$);
-- cleanup leftovers
-- verify we don't see any error for already dropped subscription
SET client_min_messages TO WARNING;
CALL citus_cleanup_orphaned_resources();
RESET client_min_messages;
-- cancellation on dropping subscription
SELECT citus.mitmproxy('conn.onQuery(query="^DROP SUBSCRIPTION").cancel(' || :pid || ')');

View File

@ -40,7 +40,7 @@ SELECT create_distributed_table('table_to_split', 'id');
ARRAY[:worker_1_node, :worker_2_node],
'force_logical');
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
-- we need to allow connection so that we can connect to proxy
SELECT citus.mitmproxy('conn.allow()');
@ -60,7 +60,7 @@ SELECT create_distributed_table('table_to_split', 'id');
\c - postgres - :master_port
CALL pg_catalog.citus_cleanup_orphaned_resources();
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
\c - - - :worker_2_proxy_port
SET search_path TO "citus_failure_split_cleanup_schema", public, pg_catalog;
@ -92,7 +92,7 @@ SELECT create_distributed_table('table_to_split', 'id');
RESET client_min_messages;
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
-- we need to allow connection so that we can connect to proxy
SELECT citus.mitmproxy('conn.allow()');
@ -111,7 +111,7 @@ SELECT create_distributed_table('table_to_split', 'id');
\c - postgres - :master_port
CALL pg_catalog.citus_cleanup_orphaned_resources();
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
\c - - - :worker_2_proxy_port
SET search_path TO "citus_failure_split_cleanup_schema", public, pg_catalog;
@ -138,7 +138,7 @@ SELECT create_distributed_table('table_to_split', 'id');
ARRAY[:worker_1_node, :worker_2_node],
'force_logical');
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
-- we need to allow connection so that we can connect to proxy
SELECT citus.mitmproxy('conn.allow()');
@ -157,7 +157,7 @@ SELECT create_distributed_table('table_to_split', 'id');
\c - postgres - :master_port
CALL pg_catalog.citus_cleanup_orphaned_resources();
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
\c - - - :worker_2_proxy_port
SET search_path TO "citus_failure_split_cleanup_schema", public, pg_catalog;
@ -184,7 +184,7 @@ SELECT create_distributed_table('table_to_split', 'id');
ARRAY[:worker_1_node, :worker_2_node],
'force_logical');
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
-- we need to allow connection so that we can connect to proxy
SELECT citus.mitmproxy('conn.allow()');
@ -203,7 +203,7 @@ SELECT create_distributed_table('table_to_split', 'id');
\c - postgres - :master_port
CALL pg_catalog.citus_cleanup_orphaned_resources();
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
\c - - - :worker_2_proxy_port
SET search_path TO "citus_failure_split_cleanup_schema", public, pg_catalog;
@ -230,7 +230,7 @@ SELECT create_distributed_table('table_to_split', 'id');
ARRAY[:worker_1_node, :worker_2_node],
'force_logical');
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
-- we need to allow connection so that we can connect to proxy
SELECT citus.mitmproxy('conn.allow()');
@ -249,7 +249,7 @@ SELECT create_distributed_table('table_to_split', 'id');
\c - postgres - :master_port
CALL pg_catalog.citus_cleanup_orphaned_resources();
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
\c - - - :worker_2_proxy_port
SET search_path TO "citus_failure_split_cleanup_schema", public, pg_catalog;
@ -277,7 +277,7 @@ SELECT create_distributed_table('table_to_split', 'id');
'force_logical');
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
SELECT relname FROM pg_class where relname LIKE '%table_to_split_%' AND relkind = 'r' order by relname;
-- we need to allow connection so that we can connect to proxy
SELECT citus.mitmproxy('conn.allow()');
@ -297,7 +297,7 @@ SELECT create_distributed_table('table_to_split', 'id');
\c - postgres - :master_port
CALL pg_catalog.citus_cleanup_orphaned_resources();
SELECT operation_id, object_type, object_name, node_group_id, policy_type
FROM pg_dist_cleanup where operation_id = 777;
FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name;
\c - - - :worker_2_proxy_port
SET search_path TO "citus_failure_split_cleanup_schema", public, pg_catalog;