Random tests refactoring (#7342)

While investigating replication slots leftovers
in PR https://github.com/citusdata/citus/pull/7338,
I ran into the following refactoring/cleanup
that can be done in our test suite:

- Add separate test to remove non default nodes
- Remove coordinator removal from `add_coordinator` test
  Use `remove_coordinator_from_metadata` test where needed
- Don't print nodeids in `multi_multiuser_auth` and
`multi_poolinfo_usage`
  tests
- Use `startswith` when checking for isolation or failure tests
- Add some dependencies accordingly in `run_test.py` for running flaky
test schedules
pull/7346/head
Naisila Puka 2023-11-14 12:49:15 +03:00 committed by GitHub
parent e4ac3e6d9a
commit cdef2d5224
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 60 additions and 132 deletions

View File

@ -135,20 +135,10 @@ DEPS = {
),
"alter_role_propagation": TestDeps("minimal_schedule"),
"background_rebalance": TestDeps(
None,
[
"multi_test_helpers",
"multi_cluster_management",
],
worker_count=3,
None, ["multi_test_helpers", "multi_cluster_management"], worker_count=3
),
"background_rebalance_parallel": TestDeps(
None,
[
"multi_test_helpers",
"multi_cluster_management",
],
worker_count=6,
None, ["multi_test_helpers", "multi_cluster_management"], worker_count=6
),
"function_propagation": TestDeps("minimal_schedule"),
"citus_shards": TestDeps("minimal_schedule"),
@ -165,30 +155,17 @@ DEPS = {
),
"schema_based_sharding": TestDeps("minimal_schedule"),
"multi_sequence_default": TestDeps(
None,
[
"multi_test_helpers",
"multi_cluster_management",
"multi_table_ddl",
],
None, ["multi_test_helpers", "multi_cluster_management", "multi_table_ddl"]
),
"grant_on_schema_propagation": TestDeps("minimal_schedule"),
"propagate_extension_commands": TestDeps("minimal_schedule"),
"multi_size_queries": TestDeps("base_schedule", ["multi_copy"]),
"multi_mx_node_metadata": TestDeps(
None,
[
"multi_extension",
"multi_test_helpers",
"multi_test_helpers_superuser",
],
None, ["multi_extension", "multi_test_helpers", "multi_test_helpers_superuser"]
),
"multi_mx_function_table_reference": TestDeps(
None,
[
"multi_cluster_management",
"remove_coordinator_from_metadata",
],
["multi_cluster_management", "remove_coordinator_from_metadata"],
# because it queries node group id and it changes as we add / remove nodes
repeatable=False,
),
@ -201,15 +178,25 @@ DEPS = {
],
),
"metadata_sync_helpers": TestDeps(
None,
[
"multi_mx_node_metadata",
"multi_cluster_management",
],
None, ["multi_mx_node_metadata", "multi_cluster_management"]
),
"multi_utilities": TestDeps(
"multi_utilities": TestDeps("minimal_schedule", ["multi_data_types"]),
"multi_tenant_isolation_nonblocking": TestDeps(
"minimal_schedule", ["multi_data_types", "remove_coordinator_from_metadata"]
),
"remove_non_default_nodes": TestDeps(
None, ["multi_mx_node_metadata", "multi_cluster_management"], repeatable=False
),
"citus_split_shard_columnar_partitioned": TestDeps(
"minimal_schedule", ["remove_coordinator_from_metadata"]
),
"add_coordinator": TestDeps(
"minimal_schedule", ["remove_coordinator_from_metadata"], repeatable=False
),
"multi_multiuser_auth": TestDeps(
"minimal_schedule",
["multi_data_types"],
["multi_create_table", "multi_create_users", "multi_multiuser_load_data"],
repeatable=False,
),
}
@ -303,9 +290,13 @@ def run_schedule_with_multiregress(test_name, schedule, dependencies, args):
worker_count = needed_worker_count(test_name, dependencies)
# find suitable make recipe
if dependencies.schedule == "base_isolation_schedule" or "isolation" in test_name:
if dependencies.schedule == "base_isolation_schedule" or test_name.startswith(
"isolation"
):
make_recipe = "check-isolation-custom-schedule"
elif dependencies.schedule == "failure_base_schedule" or "failure" in test_name:
elif dependencies.schedule == "failure_base_schedule" or test_name.startswith(
"failure"
):
make_recipe = "check-failure-custom-schedule"
else:
make_recipe = "check-custom-schedule"
@ -418,10 +409,7 @@ def test_dependencies(test_name, test_schedule, schedule_line, args):
if "upgrade_columnar_before" not in before_tests:
before_tests.append("upgrade_columnar_before")
return TestDeps(
default_base_schedule(test_schedule, args),
before_tests,
)
return TestDeps(default_base_schedule(test_schedule, args), before_tests)
# before_ tests leave stuff around on purpose for the after tests. So they
# are not repeatable by definition.

View File

@ -2,13 +2,6 @@
-- ADD_COORDINATOR
--
-- node trying to add itself without specifying groupid => 0 should error out
-- first remove the coordinator to for testing master_add_node for coordinator
SELECT master_remove_node('localhost', :master_port);
master_remove_node
---------------------------------------------------------------------
(1 row)
SELECT master_add_node('localhost', :master_port);
ERROR: Node cannot add itself as a worker.
HINT: Add the node as a coordinator by using: SELECT citus_set_coordinator_host('localhost', 57636);

View File

@ -12,19 +12,9 @@
\set bob_worker_1_pw triplex-royalty-warranty-stand-cheek
\set bob_worker_2_pw omnibus-plectrum-comet-sneezy-ensile
\set bob_fallback_pw :bob_worker_1_pw
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port;
worker_1_id
---------------------------------------------------------------------
17
(1 row)
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port
\gset
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port;
worker_2_id
---------------------------------------------------------------------
35
(1 row)
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port
\gset
-- alice is a superuser so she can update own password
CREATE USER alice PASSWORD :'alice_master_pw' SUPERUSER;

View File

@ -6,19 +6,9 @@
-- Test of ability to override host/port for a node
SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 20000000;
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port;
worker_1_id
---------------------------------------------------------------------
17
(1 row)
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port
\gset
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port;
worker_2_id
---------------------------------------------------------------------
35
(1 row)
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port
\gset
CREATE TABLE lotsa_connections (id integer, name text);
SELECT create_distributed_table('lotsa_connections', 'id');

View File

@ -1275,9 +1275,3 @@ SELECT count(*) FROM pg_catalog.pg_dist_partition WHERE colocationid > 0;
TRUNCATE TABLE pg_catalog.pg_dist_colocation;
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 100;
ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id;
SELECT citus_set_coordinator_host('localhost');
citus_set_coordinator_host
---------------------------------------------------------------------
(1 row)

View File

@ -0,0 +1,13 @@
-- The default nodes for the citus test suite are coordinator and 2 worker nodes
-- Which we identify with master_port, worker_1_port, worker_2_port.
-- When needed in some tests, GetLocalNodeId() does not behave correctly,
-- So we remove the non default nodes. This tests expects the non default nodes
-- to not have any active placements.
SELECT any_value(citus_remove_node('localhost', nodeport))
FROM pg_dist_node
WHERE nodeport NOT IN (:master_port, :worker_1_port, :worker_2_port);
any_value
---------------------------------------------------------------------
(1 row)

View File

@ -3,43 +3,6 @@ SET search_path TO worker_split_binary_copy_test;
SET citus.shard_count TO 1;
SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 81060000;
-- Remove extra nodes added, otherwise GetLocalNodeId() does not bahave correctly.
SELECT citus_remove_node('localhost', 8887);
citus_remove_node
---------------------------------------------------------------------
(1 row)
SELECT citus_remove_node('localhost', 9995);
citus_remove_node
---------------------------------------------------------------------
(1 row)
SELECT citus_remove_node('localhost', 9992);
citus_remove_node
---------------------------------------------------------------------
(1 row)
SELECT citus_remove_node('localhost', 9998);
citus_remove_node
---------------------------------------------------------------------
(1 row)
SELECT citus_remove_node('localhost', 9997);
citus_remove_node
---------------------------------------------------------------------
(1 row)
SELECT citus_remove_node('localhost', 8888);
citus_remove_node
---------------------------------------------------------------------
(1 row)
-- BEGIN: Create distributed table and insert data.
CREATE TABLE worker_split_binary_copy_test.shard_to_split_copy (
l_orderkey bigint not null,

View File

@ -295,6 +295,7 @@ test: multi_foreign_key_relation_graph
# Replicating reference tables to coordinator. Add coordinator to pg_dist_node
# and rerun some of the tests.
# --------
test: remove_coordinator_from_metadata
test: add_coordinator
test: replicate_reference_tables_to_coordinator
test: citus_local_tables

View File

@ -10,6 +10,7 @@ test: foreign_key_to_reference_table
# Split tests go here.
test: split_shard
test: worker_split_copy_test
test: remove_non_default_nodes
test: worker_split_binary_copy_test
test: worker_split_text_copy_test
test: citus_split_shard_by_split_points_negative

View File

@ -3,8 +3,6 @@
--
-- node trying to add itself without specifying groupid => 0 should error out
-- first remove the coordinator to for testing master_add_node for coordinator
SELECT master_remove_node('localhost', :master_port);
SELECT master_add_node('localhost', :master_port);
SELECT master_add_node('localhost', :master_port, groupid => 0) AS master_nodeid \gset

View File

@ -16,9 +16,9 @@
\set bob_worker_2_pw omnibus-plectrum-comet-sneezy-ensile
\set bob_fallback_pw :bob_worker_1_pw
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port;
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port
\gset
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port;
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port
\gset
-- alice is a superuser so she can update own password

View File

@ -7,9 +7,9 @@
SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 20000000;
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port;
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port
\gset
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port;
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port
\gset
CREATE TABLE lotsa_connections (id integer, name text);

View File

@ -607,6 +607,3 @@ TRUNCATE TABLE pg_catalog.pg_dist_colocation;
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 100;
ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id;
SELECT citus_set_coordinator_host('localhost');

View File

@ -0,0 +1,8 @@
-- The default nodes for the citus test suite are coordinator and 2 worker nodes
-- Which we identify with master_port, worker_1_port, worker_2_port.
-- When needed in some tests, GetLocalNodeId() does not behave correctly,
-- So we remove the non default nodes. This tests expects the non default nodes
-- to not have any active placements.
SELECT any_value(citus_remove_node('localhost', nodeport))
FROM pg_dist_node
WHERE nodeport NOT IN (:master_port, :worker_1_port, :worker_2_port);

View File

@ -4,14 +4,6 @@ SET citus.shard_count TO 1;
SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 81060000;
-- Remove extra nodes added, otherwise GetLocalNodeId() does not bahave correctly.
SELECT citus_remove_node('localhost', 8887);
SELECT citus_remove_node('localhost', 9995);
SELECT citus_remove_node('localhost', 9992);
SELECT citus_remove_node('localhost', 9998);
SELECT citus_remove_node('localhost', 9997);
SELECT citus_remove_node('localhost', 8888);
-- BEGIN: Create distributed table and insert data.
CREATE TABLE worker_split_binary_copy_test.shard_to_split_copy (
l_orderkey bigint not null,