Fix some flaky tests (#6587)

Fix for some simple flakiness'.
All `DROP USER' and cleanup function calls.
pull/6589/head
Ahmet Gedemenli 2022-12-29 10:19:09 +03:00 committed by GitHub
parent f824c996b3
commit 0c74e4cc0f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 17 additions and 5 deletions

View File

@ -120,7 +120,7 @@ SELECT pg_sleep(1);
-- failing move due to a stopped rebalance, first clean orphans to make the error stable -- failing move due to a stopped rebalance, first clean orphans to make the error stable
SET client_min_messages TO WARNING; SET client_min_messages TO WARNING;
CALL citus_cleanup_orphaned_shards(); CALL citus_cleanup_orphaned_resources();
RESET client_min_messages; RESET client_min_messages;
SELECT citus_move_shard_placement(85674000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode => 'block_writes'); SELECT citus_move_shard_placement(85674000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode => 'block_writes');
WARNING: shard is already present on node localhost:xxxxx WARNING: shard is already present on node localhost:xxxxx
@ -212,3 +212,4 @@ SELECT citus_rebalance_stop();
RESET ROLE; RESET ROLE;
SET client_min_messages TO WARNING; SET client_min_messages TO WARNING;
DROP SCHEMA background_rebalance CASCADE; DROP SCHEMA background_rebalance CASCADE;
DROP USER non_super_user_rebalance;

View File

@ -132,8 +132,12 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
(1 row) (1 row)
CALL citus_cleanup_orphaned_shards(); SELECT public.wait_for_resource_cleanup();
NOTICE: cleaned up 4 orphaned shards wait_for_resource_cleanup
---------------------------------------------------------------------
(1 row)
-- Make sure shard splits use citus.cpu_priority_for_logical_replication_senders -- Make sure shard splits use citus.cpu_priority_for_logical_replication_senders
-- in their CREATE SUBSCRIPTION commands. -- in their CREATE SUBSCRIPTION commands.
SELECT pg_catalog.citus_split_shard_by_split_points( SELECT pg_catalog.citus_split_shard_by_split_points(

View File

@ -487,11 +487,14 @@ SET search_path TO single_node_ent;
-- Cleanup -- Cleanup
RESET citus.log_remote_commands; RESET citus.log_remote_commands;
SET client_min_messages TO WARNING; SET client_min_messages TO WARNING;
CALL pg_catalog.citus_cleanup_orphaned_resources();
DROP SCHEMA single_node_ent CASCADE; DROP SCHEMA single_node_ent CASCADE;
DROP OWNED BY full_access_single_node; DROP OWNED BY full_access_single_node;
DROP OWNED BY read_access_single_node; DROP OWNED BY read_access_single_node;
DROP OWNED BY no_access_single_node;
DROP ROLE full_access_single_node; DROP ROLE full_access_single_node;
DROP ROLE read_access_single_node; DROP ROLE read_access_single_node;
DROP ROLE no_access_single_node;
-- remove the nodes for next tests -- remove the nodes for next tests
SELECT 1 FROM master_remove_node('localhost', :master_port); SELECT 1 FROM master_remove_node('localhost', :master_port);
?column? ?column?

View File

@ -43,7 +43,7 @@ SELECT pg_sleep(1);
-- failing move due to a stopped rebalance, first clean orphans to make the error stable -- failing move due to a stopped rebalance, first clean orphans to make the error stable
SET client_min_messages TO WARNING; SET client_min_messages TO WARNING;
CALL citus_cleanup_orphaned_shards(); CALL citus_cleanup_orphaned_resources();
RESET client_min_messages; RESET client_min_messages;
SELECT citus_move_shard_placement(85674000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode => 'block_writes'); SELECT citus_move_shard_placement(85674000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode => 'block_writes');
@ -80,3 +80,4 @@ RESET ROLE;
SET client_min_messages TO WARNING; SET client_min_messages TO WARNING;
DROP SCHEMA background_rebalance CASCADE; DROP SCHEMA background_rebalance CASCADE;
DROP USER non_super_user_rebalance;

View File

@ -71,7 +71,7 @@ SET citus.cpu_priority_for_logical_replication_senders = 15;
SELECT master_move_shard_placement(11568900, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'force_logical'); SELECT master_move_shard_placement(11568900, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'force_logical');
SET citus.max_high_priority_background_processes = 3; SET citus.max_high_priority_background_processes = 3;
SELECT master_move_shard_placement(11568900, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical'); SELECT master_move_shard_placement(11568900, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical');
CALL citus_cleanup_orphaned_shards(); SELECT public.wait_for_resource_cleanup();
-- Make sure shard splits use citus.cpu_priority_for_logical_replication_senders -- Make sure shard splits use citus.cpu_priority_for_logical_replication_senders
-- in their CREATE SUBSCRIPTION commands. -- in their CREATE SUBSCRIPTION commands.

View File

@ -310,12 +310,15 @@ SET search_path TO single_node_ent;
-- Cleanup -- Cleanup
RESET citus.log_remote_commands; RESET citus.log_remote_commands;
SET client_min_messages TO WARNING; SET client_min_messages TO WARNING;
CALL pg_catalog.citus_cleanup_orphaned_resources();
DROP SCHEMA single_node_ent CASCADE; DROP SCHEMA single_node_ent CASCADE;
DROP OWNED BY full_access_single_node; DROP OWNED BY full_access_single_node;
DROP OWNED BY read_access_single_node; DROP OWNED BY read_access_single_node;
DROP OWNED BY no_access_single_node;
DROP ROLE full_access_single_node; DROP ROLE full_access_single_node;
DROP ROLE read_access_single_node; DROP ROLE read_access_single_node;
DROP ROLE no_access_single_node;
-- remove the nodes for next tests -- remove the nodes for next tests
SELECT 1 FROM master_remove_node('localhost', :master_port); SELECT 1 FROM master_remove_node('localhost', :master_port);