Make coordinator always in metadata by default in regression tests. (#6847)

DESCRIPTION: Changes the regression test setups adding the coordinator
to metadata by default.

When creating a Citus cluster, coordinator can be added in metadata
explicitly by running `citus_set_coordinator_host ` function. Adding the
coordinator to metadata allows to create citus managed local tables.
Other Citus functionality is expected to be unaffected.

This change adds the coordinator to metadata by default when creating
test clusters in regression tests.

There are 3 ways to run commands in a sql file (or a schedule which is a
sequence of sql files) with Citus regression tests. Below is how this PR
adds the coordinator to metadata for each.

1. `make <schedule_name>`
Changed the sql files (sql/multi_cluster_management.sql and
sql/minimal_cluster_management.sql) which sets up the test clusters such
that they call `citus_set_coordinator_host`. This ensures any following
tests will have the coordinator in metadata by default.
 
2. `citus_tests/run_test.py <sql_file_name>`
Changed the python code that sets up the cluster to always call `
citus_set_coordinator_host`.
For the upgrade tests, a version check is included to make sure
`citus_set_coordinator_host` function is available for a given version.

3. ` make check-arbitrary-configs  `     
Changed the python code that sets up the cluster to always call
`citus_set_coordinator_host `.

#6864 will be used to track the remaining work which is to change the
tests where coordinator is added/removed as a node.
pull/6857/head
Emel Şimşek 2023-04-17 14:14:37 +03:00 committed by GitHub
parent 8782ea1582
commit 2675a68218
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 52 additions and 18 deletions

View File

@ -25,6 +25,9 @@ import utils
from psycopg import sql
from utils import USER
# This SQL returns true ( 't' ) if the Citus version >= 11.0.
IS_CITUS_VERSION_11_SQL = "SELECT (split_part(extversion, '.', 1)::int >= 11) as is_11 FROM pg_extension WHERE extname = 'citus';"
LINUX = False
MACOS = False
FREEBSD = False
@ -272,9 +275,7 @@ def stop_metadata_to_workers(pg_path, worker_ports, coordinator_port):
def add_coordinator_to_metadata(pg_path, coordinator_port):
command = "SELECT citus_add_node('localhost', {}, groupId := 0)".format(
coordinator_port
)
command = "SELECT citus_set_coordinator_host('localhost');"
utils.psql(pg_path, coordinator_port, command)
@ -327,6 +328,10 @@ def stop_databases(
stop(node_name)
def is_citus_set_coordinator_host_udf_exist(pg_path, port):
return utils.psql_capture(pg_path, port, IS_CITUS_VERSION_11_SQL) == b" t\n\n"
def initialize_citus_cluster(bindir, datadir, settings, config):
# In case there was a leftover from previous runs, stop the databases
stop_databases(
@ -339,11 +344,16 @@ def initialize_citus_cluster(bindir, datadir, settings, config):
bindir, datadir, config.node_name_to_ports, config.name, config.env_variables
)
create_citus_extension(bindir, config.node_name_to_ports.values())
# In upgrade tests, it is possible that Citus version < 11.0
# where the citus_set_coordinator_host UDF does not exist.
if is_citus_set_coordinator_host_udf_exist(bindir, config.coordinator_port()):
add_coordinator_to_metadata(bindir, config.coordinator_port())
add_workers(bindir, config.worker_ports, config.coordinator_port())
if not config.is_mx:
stop_metadata_to_workers(bindir, config.worker_ports, config.coordinator_port())
if config.add_coordinator_to_metadata:
add_coordinator_to_metadata(bindir, config.coordinator_port())
config.setup_steps()

View File

@ -110,7 +110,6 @@ class CitusBaseClusterConfig(object, metaclass=NewInitCaller):
"max_connections": 1200,
}
self.new_settings = {}
self.add_coordinator_to_metadata = False
self.env_variables = {}
self.skip_tests = []
@ -166,7 +165,6 @@ class CitusDefaultClusterConfig(CitusBaseClusterConfig):
"citus.use_citus_managed_tables": True,
}
self.settings.update(new_settings)
self.add_coordinator_to_metadata = True
self.skip_tests = [
# Alter Table statement cannot be run from an arbitrary node so this test will fail
"arbitrary_configs_alter_table_add_constraint_without_name_create",
@ -380,4 +378,3 @@ class PGUpgradeConfig(CitusBaseClusterConfig):
self.old_datadir = self.temp_dir + "/oldData"
self.new_datadir = self.temp_dir + "/newData"
self.user = SUPER_USER_NAME
self.add_coordinator_to_metadata = True

View File

@ -96,6 +96,9 @@ DEPS = {
"multi_cluster_management": TestDeps(
None, ["multi_test_helpers_superuser"], repeatable=False
),
"minimal_cluster_management": TestDeps(
None, ["multi_test_helpers_superuser"], repeatable=False
),
"create_role_propagation": TestDeps(None, ["multi_cluster_management"]),
"single_node_enterprise": TestDeps(None),
"single_node": TestDeps(None),

View File

@ -20,6 +20,13 @@ SELECT 1 FROM master_add_node('localhost', :worker_1_port);
1
(1 row)
-- make sure coordinator is always in metadata.
SELECT citus_set_coordinator_host('localhost');
citus_set_coordinator_host
---------------------------------------------------------------------
(1 row)
-- Create the same colocation groups as multi_cluster_management.sql
SET citus.shard_count TO 16;
SET citus.shard_replication_factor TO 1;

View File

@ -25,6 +25,13 @@ SELECT citus_is_coordinator();
t
(1 row)
-- make sure coordinator is always in metadata.
SELECT citus_set_coordinator_host('localhost');
citus_set_coordinator_host
---------------------------------------------------------------------
(1 row)
-- workers are not coordinator
SELECT result FROM run_command_on_workers('SELECT citus_is_coordinator()');
result
@ -144,9 +151,9 @@ SELECT citus_disable_node('localhost', :worker_1_port, synchronous:=true);
(1 row)
SELECT run_command_on_workers($$SELECT array_agg(isactive ORDER BY nodeport) FROM pg_dist_node WHERE hasmetadata and noderole='primary'::noderole AND nodecluster='default'$$);
run_command_on_workers
run_command_on_workers
---------------------------------------------------------------------
(localhost,57638,t,"{f,t}")
(localhost,57638,t,"{t,f,t}")
(1 row)
SELECT 1 FROM citus_activate_node('localhost', :worker_1_port);
@ -163,9 +170,9 @@ SELECT citus_disable_node('localhost', :worker_2_port, synchronous:=true);
(1 row)
SELECT run_command_on_workers($$SELECT array_agg(isactive ORDER BY nodeport) FROM pg_dist_node WHERE hasmetadata and noderole='primary'::noderole AND nodecluster='default'$$);
run_command_on_workers
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"{t,f}")
(localhost,57637,t,"{t,t,f}")
(1 row)
SELECT 1 FROM citus_activate_node('localhost', :worker_2_port);
@ -248,7 +255,7 @@ ERROR: node at "localhost.noexist:2345" does not exist
SELECT master_activate_node('localhost', :worker_2_port);
master_activate_node
---------------------------------------------------------------------
3
4
(1 row)
DROP TABLE test_reference_table, cluster_management_test;
@ -358,9 +365,10 @@ SELECT master_update_node(nodeid, 'localhost', :worker_2_port + 3) FROM pg_dist_
SELECT nodename, nodeport, noderole FROM pg_dist_node ORDER BY nodeport;
nodename | nodeport | noderole
---------------------------------------------------------------------
localhost | 57636 | primary
localhost | 57637 | primary
localhost | 57640 | secondary
(2 rows)
(3 rows)
ABORT;
\c - postgres - :master_port
@ -377,7 +385,7 @@ SELECT master_get_active_worker_nodes();
SELECT * FROM master_add_node('localhost', :worker_2_port);
master_add_node
---------------------------------------------------------------------
6
7
(1 row)
ALTER SEQUENCE pg_dist_node_nodeid_seq RESTART WITH 7;
@ -579,7 +587,7 @@ SELECT
SELECT count(1) FROM pg_dist_node;
count
---------------------------------------------------------------------
0
1
(1 row)
-- check that adding two nodes in the same transaction works
@ -594,9 +602,10 @@ SELECT
SELECT * FROM pg_dist_node ORDER BY nodeid;
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards
---------------------------------------------------------------------
3 | 0 | localhost | 57636 | default | t | t | primary | default | t | f
11 | 9 | localhost | 57637 | default | t | t | primary | default | t | t
12 | 10 | localhost | 57638 | default | t | t | primary | default | t | t
(2 rows)
(3 rows)
-- check that mixed add/remove node commands work fine inside transaction
BEGIN;
@ -669,7 +678,8 @@ SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node;
---------------------------------------------------------------------
(2 rows)
(3 rows)
SELECT 1 FROM master_add_node('localhost', :worker_1_port);
?column?

View File

@ -13,6 +13,9 @@ ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 16;
ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 14;
SELECT 1 FROM master_add_node('localhost', :worker_1_port);
-- make sure coordinator is always in metadata.
SELECT citus_set_coordinator_host('localhost');
-- Create the same colocation groups as multi_cluster_management.sql
SET citus.shard_count TO 16;
SET citus.shard_replication_factor TO 1;

View File

@ -13,6 +13,10 @@ RESET citus.metadata_sync_mode;
-- I am coordinator
SELECT citus_is_coordinator();
-- make sure coordinator is always in metadata.
SELECT citus_set_coordinator_host('localhost');
-- workers are not coordinator
SELECT result FROM run_command_on_workers('SELECT citus_is_coordinator()');