mirror of https://github.com/citusdata/citus.git
Fix some flaky tests (#6587)
Fix for some simple flakiness'. All `DROP USER' and cleanup function calls.pull/6589/head
parent
f824c996b3
commit
0c74e4cc0f
|
@ -120,7 +120,7 @@ SELECT pg_sleep(1);
|
|||
|
||||
-- failing move due to a stopped rebalance, first clean orphans to make the error stable
|
||||
SET client_min_messages TO WARNING;
|
||||
CALL citus_cleanup_orphaned_shards();
|
||||
CALL citus_cleanup_orphaned_resources();
|
||||
RESET client_min_messages;
|
||||
SELECT citus_move_shard_placement(85674000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode => 'block_writes');
|
||||
WARNING: shard is already present on node localhost:xxxxx
|
||||
|
@ -212,3 +212,4 @@ SELECT citus_rebalance_stop();
|
|||
RESET ROLE;
|
||||
SET client_min_messages TO WARNING;
|
||||
DROP SCHEMA background_rebalance CASCADE;
|
||||
DROP USER non_super_user_rebalance;
|
||||
|
|
|
@ -132,8 +132,12 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|||
|
||||
(1 row)
|
||||
|
||||
CALL citus_cleanup_orphaned_shards();
|
||||
NOTICE: cleaned up 4 orphaned shards
|
||||
SELECT public.wait_for_resource_cleanup();
|
||||
wait_for_resource_cleanup
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- Make sure shard splits use citus.cpu_priority_for_logical_replication_senders
|
||||
-- in their CREATE SUBSCRIPTION commands.
|
||||
SELECT pg_catalog.citus_split_shard_by_split_points(
|
||||
|
|
|
@ -487,11 +487,14 @@ SET search_path TO single_node_ent;
|
|||
-- Cleanup
|
||||
RESET citus.log_remote_commands;
|
||||
SET client_min_messages TO WARNING;
|
||||
CALL pg_catalog.citus_cleanup_orphaned_resources();
|
||||
DROP SCHEMA single_node_ent CASCADE;
|
||||
DROP OWNED BY full_access_single_node;
|
||||
DROP OWNED BY read_access_single_node;
|
||||
DROP OWNED BY no_access_single_node;
|
||||
DROP ROLE full_access_single_node;
|
||||
DROP ROLE read_access_single_node;
|
||||
DROP ROLE no_access_single_node;
|
||||
-- remove the nodes for next tests
|
||||
SELECT 1 FROM master_remove_node('localhost', :master_port);
|
||||
?column?
|
||||
|
|
|
@ -43,7 +43,7 @@ SELECT pg_sleep(1);
|
|||
|
||||
-- failing move due to a stopped rebalance, first clean orphans to make the error stable
|
||||
SET client_min_messages TO WARNING;
|
||||
CALL citus_cleanup_orphaned_shards();
|
||||
CALL citus_cleanup_orphaned_resources();
|
||||
RESET client_min_messages;
|
||||
SELECT citus_move_shard_placement(85674000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode => 'block_writes');
|
||||
|
||||
|
@ -80,3 +80,4 @@ RESET ROLE;
|
|||
|
||||
SET client_min_messages TO WARNING;
|
||||
DROP SCHEMA background_rebalance CASCADE;
|
||||
DROP USER non_super_user_rebalance;
|
||||
|
|
|
@ -71,7 +71,7 @@ SET citus.cpu_priority_for_logical_replication_senders = 15;
|
|||
SELECT master_move_shard_placement(11568900, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'force_logical');
|
||||
SET citus.max_high_priority_background_processes = 3;
|
||||
SELECT master_move_shard_placement(11568900, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical');
|
||||
CALL citus_cleanup_orphaned_shards();
|
||||
SELECT public.wait_for_resource_cleanup();
|
||||
|
||||
-- Make sure shard splits use citus.cpu_priority_for_logical_replication_senders
|
||||
-- in their CREATE SUBSCRIPTION commands.
|
||||
|
|
|
@ -310,12 +310,15 @@ SET search_path TO single_node_ent;
|
|||
-- Cleanup
|
||||
RESET citus.log_remote_commands;
|
||||
SET client_min_messages TO WARNING;
|
||||
CALL pg_catalog.citus_cleanup_orphaned_resources();
|
||||
DROP SCHEMA single_node_ent CASCADE;
|
||||
|
||||
DROP OWNED BY full_access_single_node;
|
||||
DROP OWNED BY read_access_single_node;
|
||||
DROP OWNED BY no_access_single_node;
|
||||
DROP ROLE full_access_single_node;
|
||||
DROP ROLE read_access_single_node;
|
||||
DROP ROLE no_access_single_node;
|
||||
|
||||
-- remove the nodes for next tests
|
||||
SELECT 1 FROM master_remove_node('localhost', :master_port);
|
||||
|
|
Loading…
Reference in New Issue