diff --git a/src/test/regress/expected/background_rebalance.out b/src/test/regress/expected/background_rebalance.out index 8843654d6..3822ac678 100644 --- a/src/test/regress/expected/background_rebalance.out +++ b/src/test/regress/expected/background_rebalance.out @@ -120,7 +120,7 @@ SELECT pg_sleep(1); -- failing move due to a stopped rebalance, first clean orphans to make the error stable SET client_min_messages TO WARNING; -CALL citus_cleanup_orphaned_shards(); +CALL citus_cleanup_orphaned_resources(); RESET client_min_messages; SELECT citus_move_shard_placement(85674000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode => 'block_writes'); WARNING: shard is already present on node localhost:xxxxx @@ -212,3 +212,4 @@ SELECT citus_rebalance_stop(); RESET ROLE; SET client_min_messages TO WARNING; DROP SCHEMA background_rebalance CASCADE; +DROP USER non_super_user_rebalance; diff --git a/src/test/regress/expected/cpu_priority.out b/src/test/regress/expected/cpu_priority.out index 0c2412fdd..ad05e09f5 100644 --- a/src/test/regress/expected/cpu_priority.out +++ b/src/test/regress/expected/cpu_priority.out @@ -132,8 +132,12 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx (1 row) -CALL citus_cleanup_orphaned_shards(); -NOTICE: cleaned up 4 orphaned shards +SELECT public.wait_for_resource_cleanup(); + wait_for_resource_cleanup +--------------------------------------------------------------------- + +(1 row) + -- Make sure shard splits use citus.cpu_priority_for_logical_replication_senders -- in their CREATE SUBSCRIPTION commands. SELECT pg_catalog.citus_split_shard_by_split_points( diff --git a/src/test/regress/expected/single_node_enterprise.out b/src/test/regress/expected/single_node_enterprise.out index 6f828830e..f8d725beb 100644 --- a/src/test/regress/expected/single_node_enterprise.out +++ b/src/test/regress/expected/single_node_enterprise.out @@ -487,11 +487,14 @@ SET search_path TO single_node_ent; -- Cleanup RESET citus.log_remote_commands; SET client_min_messages TO WARNING; +CALL pg_catalog.citus_cleanup_orphaned_resources(); DROP SCHEMA single_node_ent CASCADE; DROP OWNED BY full_access_single_node; DROP OWNED BY read_access_single_node; +DROP OWNED BY no_access_single_node; DROP ROLE full_access_single_node; DROP ROLE read_access_single_node; +DROP ROLE no_access_single_node; -- remove the nodes for next tests SELECT 1 FROM master_remove_node('localhost', :master_port); ?column? diff --git a/src/test/regress/sql/background_rebalance.sql b/src/test/regress/sql/background_rebalance.sql index 6528c71b7..58cdcb123 100644 --- a/src/test/regress/sql/background_rebalance.sql +++ b/src/test/regress/sql/background_rebalance.sql @@ -43,7 +43,7 @@ SELECT pg_sleep(1); -- failing move due to a stopped rebalance, first clean orphans to make the error stable SET client_min_messages TO WARNING; -CALL citus_cleanup_orphaned_shards(); +CALL citus_cleanup_orphaned_resources(); RESET client_min_messages; SELECT citus_move_shard_placement(85674000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode => 'block_writes'); @@ -80,3 +80,4 @@ RESET ROLE; SET client_min_messages TO WARNING; DROP SCHEMA background_rebalance CASCADE; +DROP USER non_super_user_rebalance; diff --git a/src/test/regress/sql/cpu_priority.sql b/src/test/regress/sql/cpu_priority.sql index 921934f88..beb156fa8 100644 --- a/src/test/regress/sql/cpu_priority.sql +++ b/src/test/regress/sql/cpu_priority.sql @@ -71,7 +71,7 @@ SET citus.cpu_priority_for_logical_replication_senders = 15; SELECT master_move_shard_placement(11568900, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'force_logical'); SET citus.max_high_priority_background_processes = 3; SELECT master_move_shard_placement(11568900, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical'); -CALL citus_cleanup_orphaned_shards(); +SELECT public.wait_for_resource_cleanup(); -- Make sure shard splits use citus.cpu_priority_for_logical_replication_senders -- in their CREATE SUBSCRIPTION commands. diff --git a/src/test/regress/sql/single_node_enterprise.sql b/src/test/regress/sql/single_node_enterprise.sql index 249a13cd2..fb6e47b9a 100644 --- a/src/test/regress/sql/single_node_enterprise.sql +++ b/src/test/regress/sql/single_node_enterprise.sql @@ -310,12 +310,15 @@ SET search_path TO single_node_ent; -- Cleanup RESET citus.log_remote_commands; SET client_min_messages TO WARNING; +CALL pg_catalog.citus_cleanup_orphaned_resources(); DROP SCHEMA single_node_ent CASCADE; DROP OWNED BY full_access_single_node; DROP OWNED BY read_access_single_node; +DROP OWNED BY no_access_single_node; DROP ROLE full_access_single_node; DROP ROLE read_access_single_node; +DROP ROLE no_access_single_node; -- remove the nodes for next tests SELECT 1 FROM master_remove_node('localhost', :master_port);