mirror of https://github.com/citusdata/citus.git
Random tests refactoring (#7342)
While investigating replication slots leftovers in PR https://github.com/citusdata/citus/pull/7338, I ran into the following refactoring/cleanup that can be done in our test suite: - Add separate test to remove non default nodes - Remove coordinator removal from `add_coordinator` test Use `remove_coordinator_from_metadata` test where needed - Don't print nodeids in `multi_multiuser_auth` and `multi_poolinfo_usage` tests - Use `startswith` when checking for isolation or failure tests - Add some dependencies accordingly in `run_test.py` for running flaky test schedulespull/7346/head
parent
e4ac3e6d9a
commit
cdef2d5224
|
@ -135,20 +135,10 @@ DEPS = {
|
||||||
),
|
),
|
||||||
"alter_role_propagation": TestDeps("minimal_schedule"),
|
"alter_role_propagation": TestDeps("minimal_schedule"),
|
||||||
"background_rebalance": TestDeps(
|
"background_rebalance": TestDeps(
|
||||||
None,
|
None, ["multi_test_helpers", "multi_cluster_management"], worker_count=3
|
||||||
[
|
|
||||||
"multi_test_helpers",
|
|
||||||
"multi_cluster_management",
|
|
||||||
],
|
|
||||||
worker_count=3,
|
|
||||||
),
|
),
|
||||||
"background_rebalance_parallel": TestDeps(
|
"background_rebalance_parallel": TestDeps(
|
||||||
None,
|
None, ["multi_test_helpers", "multi_cluster_management"], worker_count=6
|
||||||
[
|
|
||||||
"multi_test_helpers",
|
|
||||||
"multi_cluster_management",
|
|
||||||
],
|
|
||||||
worker_count=6,
|
|
||||||
),
|
),
|
||||||
"function_propagation": TestDeps("minimal_schedule"),
|
"function_propagation": TestDeps("minimal_schedule"),
|
||||||
"citus_shards": TestDeps("minimal_schedule"),
|
"citus_shards": TestDeps("minimal_schedule"),
|
||||||
|
@ -165,30 +155,17 @@ DEPS = {
|
||||||
),
|
),
|
||||||
"schema_based_sharding": TestDeps("minimal_schedule"),
|
"schema_based_sharding": TestDeps("minimal_schedule"),
|
||||||
"multi_sequence_default": TestDeps(
|
"multi_sequence_default": TestDeps(
|
||||||
None,
|
None, ["multi_test_helpers", "multi_cluster_management", "multi_table_ddl"]
|
||||||
[
|
|
||||||
"multi_test_helpers",
|
|
||||||
"multi_cluster_management",
|
|
||||||
"multi_table_ddl",
|
|
||||||
],
|
|
||||||
),
|
),
|
||||||
"grant_on_schema_propagation": TestDeps("minimal_schedule"),
|
"grant_on_schema_propagation": TestDeps("minimal_schedule"),
|
||||||
"propagate_extension_commands": TestDeps("minimal_schedule"),
|
"propagate_extension_commands": TestDeps("minimal_schedule"),
|
||||||
"multi_size_queries": TestDeps("base_schedule", ["multi_copy"]),
|
"multi_size_queries": TestDeps("base_schedule", ["multi_copy"]),
|
||||||
"multi_mx_node_metadata": TestDeps(
|
"multi_mx_node_metadata": TestDeps(
|
||||||
None,
|
None, ["multi_extension", "multi_test_helpers", "multi_test_helpers_superuser"]
|
||||||
[
|
|
||||||
"multi_extension",
|
|
||||||
"multi_test_helpers",
|
|
||||||
"multi_test_helpers_superuser",
|
|
||||||
],
|
|
||||||
),
|
),
|
||||||
"multi_mx_function_table_reference": TestDeps(
|
"multi_mx_function_table_reference": TestDeps(
|
||||||
None,
|
None,
|
||||||
[
|
["multi_cluster_management", "remove_coordinator_from_metadata"],
|
||||||
"multi_cluster_management",
|
|
||||||
"remove_coordinator_from_metadata",
|
|
||||||
],
|
|
||||||
# because it queries node group id and it changes as we add / remove nodes
|
# because it queries node group id and it changes as we add / remove nodes
|
||||||
repeatable=False,
|
repeatable=False,
|
||||||
),
|
),
|
||||||
|
@ -201,15 +178,25 @@ DEPS = {
|
||||||
],
|
],
|
||||||
),
|
),
|
||||||
"metadata_sync_helpers": TestDeps(
|
"metadata_sync_helpers": TestDeps(
|
||||||
None,
|
None, ["multi_mx_node_metadata", "multi_cluster_management"]
|
||||||
[
|
|
||||||
"multi_mx_node_metadata",
|
|
||||||
"multi_cluster_management",
|
|
||||||
],
|
|
||||||
),
|
),
|
||||||
"multi_utilities": TestDeps(
|
"multi_utilities": TestDeps("minimal_schedule", ["multi_data_types"]),
|
||||||
|
"multi_tenant_isolation_nonblocking": TestDeps(
|
||||||
|
"minimal_schedule", ["multi_data_types", "remove_coordinator_from_metadata"]
|
||||||
|
),
|
||||||
|
"remove_non_default_nodes": TestDeps(
|
||||||
|
None, ["multi_mx_node_metadata", "multi_cluster_management"], repeatable=False
|
||||||
|
),
|
||||||
|
"citus_split_shard_columnar_partitioned": TestDeps(
|
||||||
|
"minimal_schedule", ["remove_coordinator_from_metadata"]
|
||||||
|
),
|
||||||
|
"add_coordinator": TestDeps(
|
||||||
|
"minimal_schedule", ["remove_coordinator_from_metadata"], repeatable=False
|
||||||
|
),
|
||||||
|
"multi_multiuser_auth": TestDeps(
|
||||||
"minimal_schedule",
|
"minimal_schedule",
|
||||||
["multi_data_types"],
|
["multi_create_table", "multi_create_users", "multi_multiuser_load_data"],
|
||||||
|
repeatable=False,
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -303,9 +290,13 @@ def run_schedule_with_multiregress(test_name, schedule, dependencies, args):
|
||||||
worker_count = needed_worker_count(test_name, dependencies)
|
worker_count = needed_worker_count(test_name, dependencies)
|
||||||
|
|
||||||
# find suitable make recipe
|
# find suitable make recipe
|
||||||
if dependencies.schedule == "base_isolation_schedule" or "isolation" in test_name:
|
if dependencies.schedule == "base_isolation_schedule" or test_name.startswith(
|
||||||
|
"isolation"
|
||||||
|
):
|
||||||
make_recipe = "check-isolation-custom-schedule"
|
make_recipe = "check-isolation-custom-schedule"
|
||||||
elif dependencies.schedule == "failure_base_schedule" or "failure" in test_name:
|
elif dependencies.schedule == "failure_base_schedule" or test_name.startswith(
|
||||||
|
"failure"
|
||||||
|
):
|
||||||
make_recipe = "check-failure-custom-schedule"
|
make_recipe = "check-failure-custom-schedule"
|
||||||
else:
|
else:
|
||||||
make_recipe = "check-custom-schedule"
|
make_recipe = "check-custom-schedule"
|
||||||
|
@ -418,10 +409,7 @@ def test_dependencies(test_name, test_schedule, schedule_line, args):
|
||||||
if "upgrade_columnar_before" not in before_tests:
|
if "upgrade_columnar_before" not in before_tests:
|
||||||
before_tests.append("upgrade_columnar_before")
|
before_tests.append("upgrade_columnar_before")
|
||||||
|
|
||||||
return TestDeps(
|
return TestDeps(default_base_schedule(test_schedule, args), before_tests)
|
||||||
default_base_schedule(test_schedule, args),
|
|
||||||
before_tests,
|
|
||||||
)
|
|
||||||
|
|
||||||
# before_ tests leave stuff around on purpose for the after tests. So they
|
# before_ tests leave stuff around on purpose for the after tests. So they
|
||||||
# are not repeatable by definition.
|
# are not repeatable by definition.
|
||||||
|
|
|
@ -2,13 +2,6 @@
|
||||||
-- ADD_COORDINATOR
|
-- ADD_COORDINATOR
|
||||||
--
|
--
|
||||||
-- node trying to add itself without specifying groupid => 0 should error out
|
-- node trying to add itself without specifying groupid => 0 should error out
|
||||||
-- first remove the coordinator to for testing master_add_node for coordinator
|
|
||||||
SELECT master_remove_node('localhost', :master_port);
|
|
||||||
master_remove_node
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
SELECT master_add_node('localhost', :master_port);
|
SELECT master_add_node('localhost', :master_port);
|
||||||
ERROR: Node cannot add itself as a worker.
|
ERROR: Node cannot add itself as a worker.
|
||||||
HINT: Add the node as a coordinator by using: SELECT citus_set_coordinator_host('localhost', 57636);
|
HINT: Add the node as a coordinator by using: SELECT citus_set_coordinator_host('localhost', 57636);
|
||||||
|
|
|
@ -12,19 +12,9 @@
|
||||||
\set bob_worker_1_pw triplex-royalty-warranty-stand-cheek
|
\set bob_worker_1_pw triplex-royalty-warranty-stand-cheek
|
||||||
\set bob_worker_2_pw omnibus-plectrum-comet-sneezy-ensile
|
\set bob_worker_2_pw omnibus-plectrum-comet-sneezy-ensile
|
||||||
\set bob_fallback_pw :bob_worker_1_pw
|
\set bob_fallback_pw :bob_worker_1_pw
|
||||||
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port;
|
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port
|
||||||
worker_1_id
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
17
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
\gset
|
\gset
|
||||||
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port;
|
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port
|
||||||
worker_2_id
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
35
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
\gset
|
\gset
|
||||||
-- alice is a superuser so she can update own password
|
-- alice is a superuser so she can update own password
|
||||||
CREATE USER alice PASSWORD :'alice_master_pw' SUPERUSER;
|
CREATE USER alice PASSWORD :'alice_master_pw' SUPERUSER;
|
||||||
|
|
|
@ -6,19 +6,9 @@
|
||||||
-- Test of ability to override host/port for a node
|
-- Test of ability to override host/port for a node
|
||||||
SET citus.shard_replication_factor TO 1;
|
SET citus.shard_replication_factor TO 1;
|
||||||
SET citus.next_shard_id TO 20000000;
|
SET citus.next_shard_id TO 20000000;
|
||||||
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port;
|
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port
|
||||||
worker_1_id
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
17
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
\gset
|
\gset
|
||||||
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port;
|
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port
|
||||||
worker_2_id
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
35
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
\gset
|
\gset
|
||||||
CREATE TABLE lotsa_connections (id integer, name text);
|
CREATE TABLE lotsa_connections (id integer, name text);
|
||||||
SELECT create_distributed_table('lotsa_connections', 'id');
|
SELECT create_distributed_table('lotsa_connections', 'id');
|
||||||
|
|
|
@ -1275,9 +1275,3 @@ SELECT count(*) FROM pg_catalog.pg_dist_partition WHERE colocationid > 0;
|
||||||
TRUNCATE TABLE pg_catalog.pg_dist_colocation;
|
TRUNCATE TABLE pg_catalog.pg_dist_colocation;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 100;
|
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 100;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id;
|
ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id;
|
||||||
SELECT citus_set_coordinator_host('localhost');
|
|
||||||
citus_set_coordinator_host
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,13 @@
|
||||||
|
-- The default nodes for the citus test suite are coordinator and 2 worker nodes
|
||||||
|
-- Which we identify with master_port, worker_1_port, worker_2_port.
|
||||||
|
-- When needed in some tests, GetLocalNodeId() does not behave correctly,
|
||||||
|
-- So we remove the non default nodes. This tests expects the non default nodes
|
||||||
|
-- to not have any active placements.
|
||||||
|
SELECT any_value(citus_remove_node('localhost', nodeport))
|
||||||
|
FROM pg_dist_node
|
||||||
|
WHERE nodeport NOT IN (:master_port, :worker_1_port, :worker_2_port);
|
||||||
|
any_value
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
|
@ -3,43 +3,6 @@ SET search_path TO worker_split_binary_copy_test;
|
||||||
SET citus.shard_count TO 1;
|
SET citus.shard_count TO 1;
|
||||||
SET citus.shard_replication_factor TO 1;
|
SET citus.shard_replication_factor TO 1;
|
||||||
SET citus.next_shard_id TO 81060000;
|
SET citus.next_shard_id TO 81060000;
|
||||||
-- Remove extra nodes added, otherwise GetLocalNodeId() does not bahave correctly.
|
|
||||||
SELECT citus_remove_node('localhost', 8887);
|
|
||||||
citus_remove_node
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
SELECT citus_remove_node('localhost', 9995);
|
|
||||||
citus_remove_node
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
SELECT citus_remove_node('localhost', 9992);
|
|
||||||
citus_remove_node
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
SELECT citus_remove_node('localhost', 9998);
|
|
||||||
citus_remove_node
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
SELECT citus_remove_node('localhost', 9997);
|
|
||||||
citus_remove_node
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
SELECT citus_remove_node('localhost', 8888);
|
|
||||||
citus_remove_node
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
-- BEGIN: Create distributed table and insert data.
|
-- BEGIN: Create distributed table and insert data.
|
||||||
CREATE TABLE worker_split_binary_copy_test.shard_to_split_copy (
|
CREATE TABLE worker_split_binary_copy_test.shard_to_split_copy (
|
||||||
l_orderkey bigint not null,
|
l_orderkey bigint not null,
|
||||||
|
|
|
@ -295,6 +295,7 @@ test: multi_foreign_key_relation_graph
|
||||||
# Replicating reference tables to coordinator. Add coordinator to pg_dist_node
|
# Replicating reference tables to coordinator. Add coordinator to pg_dist_node
|
||||||
# and rerun some of the tests.
|
# and rerun some of the tests.
|
||||||
# --------
|
# --------
|
||||||
|
test: remove_coordinator_from_metadata
|
||||||
test: add_coordinator
|
test: add_coordinator
|
||||||
test: replicate_reference_tables_to_coordinator
|
test: replicate_reference_tables_to_coordinator
|
||||||
test: citus_local_tables
|
test: citus_local_tables
|
||||||
|
|
|
@ -10,6 +10,7 @@ test: foreign_key_to_reference_table
|
||||||
# Split tests go here.
|
# Split tests go here.
|
||||||
test: split_shard
|
test: split_shard
|
||||||
test: worker_split_copy_test
|
test: worker_split_copy_test
|
||||||
|
test: remove_non_default_nodes
|
||||||
test: worker_split_binary_copy_test
|
test: worker_split_binary_copy_test
|
||||||
test: worker_split_text_copy_test
|
test: worker_split_text_copy_test
|
||||||
test: citus_split_shard_by_split_points_negative
|
test: citus_split_shard_by_split_points_negative
|
||||||
|
|
|
@ -3,8 +3,6 @@
|
||||||
--
|
--
|
||||||
|
|
||||||
-- node trying to add itself without specifying groupid => 0 should error out
|
-- node trying to add itself without specifying groupid => 0 should error out
|
||||||
-- first remove the coordinator to for testing master_add_node for coordinator
|
|
||||||
SELECT master_remove_node('localhost', :master_port);
|
|
||||||
SELECT master_add_node('localhost', :master_port);
|
SELECT master_add_node('localhost', :master_port);
|
||||||
|
|
||||||
SELECT master_add_node('localhost', :master_port, groupid => 0) AS master_nodeid \gset
|
SELECT master_add_node('localhost', :master_port, groupid => 0) AS master_nodeid \gset
|
||||||
|
|
|
@ -16,9 +16,9 @@
|
||||||
\set bob_worker_2_pw omnibus-plectrum-comet-sneezy-ensile
|
\set bob_worker_2_pw omnibus-plectrum-comet-sneezy-ensile
|
||||||
\set bob_fallback_pw :bob_worker_1_pw
|
\set bob_fallback_pw :bob_worker_1_pw
|
||||||
|
|
||||||
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port;
|
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port
|
||||||
\gset
|
\gset
|
||||||
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port;
|
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port
|
||||||
\gset
|
\gset
|
||||||
|
|
||||||
-- alice is a superuser so she can update own password
|
-- alice is a superuser so she can update own password
|
||||||
|
|
|
@ -7,9 +7,9 @@
|
||||||
SET citus.shard_replication_factor TO 1;
|
SET citus.shard_replication_factor TO 1;
|
||||||
SET citus.next_shard_id TO 20000000;
|
SET citus.next_shard_id TO 20000000;
|
||||||
|
|
||||||
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port;
|
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port
|
||||||
\gset
|
\gset
|
||||||
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port;
|
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port
|
||||||
\gset
|
\gset
|
||||||
|
|
||||||
CREATE TABLE lotsa_connections (id integer, name text);
|
CREATE TABLE lotsa_connections (id integer, name text);
|
||||||
|
|
|
@ -607,6 +607,3 @@ TRUNCATE TABLE pg_catalog.pg_dist_colocation;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 100;
|
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 100;
|
||||||
|
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id;
|
ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id;
|
||||||
|
|
||||||
SELECT citus_set_coordinator_host('localhost');
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,8 @@
|
||||||
|
-- The default nodes for the citus test suite are coordinator and 2 worker nodes
|
||||||
|
-- Which we identify with master_port, worker_1_port, worker_2_port.
|
||||||
|
-- When needed in some tests, GetLocalNodeId() does not behave correctly,
|
||||||
|
-- So we remove the non default nodes. This tests expects the non default nodes
|
||||||
|
-- to not have any active placements.
|
||||||
|
SELECT any_value(citus_remove_node('localhost', nodeport))
|
||||||
|
FROM pg_dist_node
|
||||||
|
WHERE nodeport NOT IN (:master_port, :worker_1_port, :worker_2_port);
|
|
@ -4,14 +4,6 @@ SET citus.shard_count TO 1;
|
||||||
SET citus.shard_replication_factor TO 1;
|
SET citus.shard_replication_factor TO 1;
|
||||||
SET citus.next_shard_id TO 81060000;
|
SET citus.next_shard_id TO 81060000;
|
||||||
|
|
||||||
-- Remove extra nodes added, otherwise GetLocalNodeId() does not bahave correctly.
|
|
||||||
SELECT citus_remove_node('localhost', 8887);
|
|
||||||
SELECT citus_remove_node('localhost', 9995);
|
|
||||||
SELECT citus_remove_node('localhost', 9992);
|
|
||||||
SELECT citus_remove_node('localhost', 9998);
|
|
||||||
SELECT citus_remove_node('localhost', 9997);
|
|
||||||
SELECT citus_remove_node('localhost', 8888);
|
|
||||||
|
|
||||||
-- BEGIN: Create distributed table and insert data.
|
-- BEGIN: Create distributed table and insert data.
|
||||||
CREATE TABLE worker_split_binary_copy_test.shard_to_split_copy (
|
CREATE TABLE worker_split_binary_copy_test.shard_to_split_copy (
|
||||||
l_orderkey bigint not null,
|
l_orderkey bigint not null,
|
||||||
|
|
Loading…
Reference in New Issue