diff --git a/src/test/regress/citus_tests/common.py b/src/test/regress/citus_tests/common.py index d751bad5a..d0ac688f9 100644 --- a/src/test/regress/citus_tests/common.py +++ b/src/test/regress/citus_tests/common.py @@ -25,6 +25,9 @@ import utils from psycopg import sql from utils import USER +# This SQL returns true ( 't' ) if the Citus version >= 11.0. +IS_CITUS_VERSION_11_SQL = "SELECT (split_part(extversion, '.', 1)::int >= 11) as is_11 FROM pg_extension WHERE extname = 'citus';" + LINUX = False MACOS = False FREEBSD = False @@ -272,9 +275,7 @@ def stop_metadata_to_workers(pg_path, worker_ports, coordinator_port): def add_coordinator_to_metadata(pg_path, coordinator_port): - command = "SELECT citus_add_node('localhost', {}, groupId := 0)".format( - coordinator_port - ) + command = "SELECT citus_set_coordinator_host('localhost');" utils.psql(pg_path, coordinator_port, command) @@ -327,6 +328,10 @@ def stop_databases( stop(node_name) +def is_citus_set_coordinator_host_udf_exist(pg_path, port): + return utils.psql_capture(pg_path, port, IS_CITUS_VERSION_11_SQL) == b" t\n\n" + + def initialize_citus_cluster(bindir, datadir, settings, config): # In case there was a leftover from previous runs, stop the databases stop_databases( @@ -339,11 +344,16 @@ def initialize_citus_cluster(bindir, datadir, settings, config): bindir, datadir, config.node_name_to_ports, config.name, config.env_variables ) create_citus_extension(bindir, config.node_name_to_ports.values()) + + # In upgrade tests, it is possible that Citus version < 11.0 + # where the citus_set_coordinator_host UDF does not exist. + if is_citus_set_coordinator_host_udf_exist(bindir, config.coordinator_port()): + add_coordinator_to_metadata(bindir, config.coordinator_port()) + add_workers(bindir, config.worker_ports, config.coordinator_port()) if not config.is_mx: stop_metadata_to_workers(bindir, config.worker_ports, config.coordinator_port()) - if config.add_coordinator_to_metadata: - add_coordinator_to_metadata(bindir, config.coordinator_port()) + config.setup_steps() diff --git a/src/test/regress/citus_tests/config.py b/src/test/regress/citus_tests/config.py index f25f0a477..c5b7ce010 100644 --- a/src/test/regress/citus_tests/config.py +++ b/src/test/regress/citus_tests/config.py @@ -110,7 +110,6 @@ class CitusBaseClusterConfig(object, metaclass=NewInitCaller): "max_connections": 1200, } self.new_settings = {} - self.add_coordinator_to_metadata = False self.env_variables = {} self.skip_tests = [] @@ -166,7 +165,6 @@ class CitusDefaultClusterConfig(CitusBaseClusterConfig): "citus.use_citus_managed_tables": True, } self.settings.update(new_settings) - self.add_coordinator_to_metadata = True self.skip_tests = [ # Alter Table statement cannot be run from an arbitrary node so this test will fail "arbitrary_configs_alter_table_add_constraint_without_name_create", @@ -380,4 +378,3 @@ class PGUpgradeConfig(CitusBaseClusterConfig): self.old_datadir = self.temp_dir + "/oldData" self.new_datadir = self.temp_dir + "/newData" self.user = SUPER_USER_NAME - self.add_coordinator_to_metadata = True diff --git a/src/test/regress/citus_tests/run_test.py b/src/test/regress/citus_tests/run_test.py index a34a6e0a7..ca969b9c1 100755 --- a/src/test/regress/citus_tests/run_test.py +++ b/src/test/regress/citus_tests/run_test.py @@ -96,6 +96,9 @@ DEPS = { "multi_cluster_management": TestDeps( None, ["multi_test_helpers_superuser"], repeatable=False ), + "minimal_cluster_management": TestDeps( + None, ["multi_test_helpers_superuser"], repeatable=False + ), "create_role_propagation": TestDeps(None, ["multi_cluster_management"]), "single_node_enterprise": TestDeps(None), "single_node": TestDeps(None), diff --git a/src/test/regress/expected/minimal_cluster_management.out b/src/test/regress/expected/minimal_cluster_management.out index af3ac84f3..d05e83ed5 100644 --- a/src/test/regress/expected/minimal_cluster_management.out +++ b/src/test/regress/expected/minimal_cluster_management.out @@ -20,6 +20,13 @@ SELECT 1 FROM master_add_node('localhost', :worker_1_port); 1 (1 row) +-- make sure coordinator is always in metadata. +SELECT citus_set_coordinator_host('localhost'); + citus_set_coordinator_host +--------------------------------------------------------------------- + +(1 row) + -- Create the same colocation groups as multi_cluster_management.sql SET citus.shard_count TO 16; SET citus.shard_replication_factor TO 1; diff --git a/src/test/regress/expected/multi_cluster_management.out b/src/test/regress/expected/multi_cluster_management.out index 08ac5a807..7d5d25d57 100644 --- a/src/test/regress/expected/multi_cluster_management.out +++ b/src/test/regress/expected/multi_cluster_management.out @@ -25,6 +25,13 @@ SELECT citus_is_coordinator(); t (1 row) +-- make sure coordinator is always in metadata. +SELECT citus_set_coordinator_host('localhost'); + citus_set_coordinator_host +--------------------------------------------------------------------- + +(1 row) + -- workers are not coordinator SELECT result FROM run_command_on_workers('SELECT citus_is_coordinator()'); result @@ -144,9 +151,9 @@ SELECT citus_disable_node('localhost', :worker_1_port, synchronous:=true); (1 row) SELECT run_command_on_workers($$SELECT array_agg(isactive ORDER BY nodeport) FROM pg_dist_node WHERE hasmetadata and noderole='primary'::noderole AND nodecluster='default'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- - (localhost,57638,t,"{f,t}") + (localhost,57638,t,"{t,f,t}") (1 row) SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); @@ -163,9 +170,9 @@ SELECT citus_disable_node('localhost', :worker_2_port, synchronous:=true); (1 row) SELECT run_command_on_workers($$SELECT array_agg(isactive ORDER BY nodeport) FROM pg_dist_node WHERE hasmetadata and noderole='primary'::noderole AND nodecluster='default'$$); - run_command_on_workers + run_command_on_workers --------------------------------------------------------------------- - (localhost,57637,t,"{t,f}") + (localhost,57637,t,"{t,t,f}") (1 row) SELECT 1 FROM citus_activate_node('localhost', :worker_2_port); @@ -248,7 +255,7 @@ ERROR: node at "localhost.noexist:2345" does not exist SELECT master_activate_node('localhost', :worker_2_port); master_activate_node --------------------------------------------------------------------- - 3 + 4 (1 row) DROP TABLE test_reference_table, cluster_management_test; @@ -358,9 +365,10 @@ SELECT master_update_node(nodeid, 'localhost', :worker_2_port + 3) FROM pg_dist_ SELECT nodename, nodeport, noderole FROM pg_dist_node ORDER BY nodeport; nodename | nodeport | noderole --------------------------------------------------------------------- + localhost | 57636 | primary localhost | 57637 | primary localhost | 57640 | secondary -(2 rows) +(3 rows) ABORT; \c - postgres - :master_port @@ -377,7 +385,7 @@ SELECT master_get_active_worker_nodes(); SELECT * FROM master_add_node('localhost', :worker_2_port); master_add_node --------------------------------------------------------------------- - 6 + 7 (1 row) ALTER SEQUENCE pg_dist_node_nodeid_seq RESTART WITH 7; @@ -579,7 +587,7 @@ SELECT SELECT count(1) FROM pg_dist_node; count --------------------------------------------------------------------- - 0 + 1 (1 row) -- check that adding two nodes in the same transaction works @@ -594,9 +602,10 @@ SELECT SELECT * FROM pg_dist_node ORDER BY nodeid; nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards --------------------------------------------------------------------- + 3 | 0 | localhost | 57636 | default | t | t | primary | default | t | f 11 | 9 | localhost | 57637 | default | t | t | primary | default | t | t 12 | 10 | localhost | 57638 | default | t | t | primary | default | t | t -(2 rows) +(3 rows) -- check that mixed add/remove node commands work fine inside transaction BEGIN; @@ -669,7 +678,8 @@ SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node; --------------------------------------------------------------------- -(2 rows) + +(3 rows) SELECT 1 FROM master_add_node('localhost', :worker_1_port); ?column? diff --git a/src/test/regress/sql/minimal_cluster_management.sql b/src/test/regress/sql/minimal_cluster_management.sql index 424daccac..30f69d43d 100644 --- a/src/test/regress/sql/minimal_cluster_management.sql +++ b/src/test/regress/sql/minimal_cluster_management.sql @@ -13,6 +13,9 @@ ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 16; ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 14; SELECT 1 FROM master_add_node('localhost', :worker_1_port); +-- make sure coordinator is always in metadata. +SELECT citus_set_coordinator_host('localhost'); + -- Create the same colocation groups as multi_cluster_management.sql SET citus.shard_count TO 16; SET citus.shard_replication_factor TO 1; diff --git a/src/test/regress/sql/multi_cluster_management.sql b/src/test/regress/sql/multi_cluster_management.sql index 126d8385e..367fa9d58 100644 --- a/src/test/regress/sql/multi_cluster_management.sql +++ b/src/test/regress/sql/multi_cluster_management.sql @@ -13,6 +13,10 @@ RESET citus.metadata_sync_mode; -- I am coordinator SELECT citus_is_coordinator(); + +-- make sure coordinator is always in metadata. +SELECT citus_set_coordinator_host('localhost'); + -- workers are not coordinator SELECT result FROM run_command_on_workers('SELECT citus_is_coordinator()');