From 1383c442ea6aa767fac9d174dbb9f062651f0e25 Mon Sep 17 00:00:00 2001 From: Burak Velioglu Date: Fri, 7 Jan 2022 12:18:06 +0300 Subject: [PATCH] Update multiple table integration sync --- .../distributed/metadata/node_metadata.c | 2 +- .../expected/multi_cluster_management.out | 662 ++++++++---------- .../regress/sql/multi_cluster_management.sql | 1 - src/test/regress/sql/single_node.sql | 6 - 4 files changed, 310 insertions(+), 361 deletions(-) diff --git a/src/backend/distributed/metadata/node_metadata.c b/src/backend/distributed/metadata/node_metadata.c index a8732749f..323f71fc1 100644 --- a/src/backend/distributed/metadata/node_metadata.c +++ b/src/backend/distributed/metadata/node_metadata.c @@ -895,6 +895,7 @@ SetUpDistributedTableWithDependencies(WorkerNode *newWorkerNode) PropagateNodeWideObjects(newWorkerNode); ReplicateAllDependenciesToNode(newWorkerNode->workerName, newWorkerNode->workerPort); + SetUpMultipleDistributedTableIntegrations(newWorkerNode); } if (ReplicateReferenceTablesOnActivate) @@ -1208,7 +1209,6 @@ ActivateNode(char *nodeName, int nodePort) } SetUpDistributedTableWithDependencies(workerNode); - SetUpMultipleDistributedTableIntegrations(workerNode); if (syncMetadata) { diff --git a/src/test/regress/expected/multi_cluster_management.out b/src/test/regress/expected/multi_cluster_management.out index 78fb46b9e..56e90da08 100644 --- a/src/test/regress/expected/multi_cluster_management.out +++ b/src/test/regress/expected/multi_cluster_management.out @@ -1,86 +1,79 @@ SET citus.next_shard_id TO 1220000; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1390000; ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 1; -SET citus.enable_object_propagation TO off; -- prevent object propagation on add node during setup -- Tests functions related to cluster membership -- add the nodes to the cluster SELECT 1 FROM master_add_node('localhost', :worker_1_port); -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created - ?column? ---------------------------------------------------------------------- + ?column? +---------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created - ?column? ---------------------------------------------------------------------- + ?column? +---------- 1 (1 row) -- get the active nodes SELECT master_get_active_worker_nodes(); - master_get_active_worker_nodes ---------------------------------------------------------------------- + master_get_active_worker_nodes +-------------------------------- (localhost,57638) (localhost,57637) (2 rows) -- try to add a node that is already in the cluster SELECT * FROM master_add_node('localhost', :worker_1_port); - master_add_node ---------------------------------------------------------------------- + master_add_node +----------------- 1 (1 row) -- get the active nodes SELECT master_get_active_worker_nodes(); - master_get_active_worker_nodes ---------------------------------------------------------------------- + master_get_active_worker_nodes +-------------------------------- (localhost,57638) (localhost,57637) (2 rows) -- try to remove a node (with no placements) SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node ---------------------------------------------------------------------- - + master_remove_node +-------------------- + (1 row) -- verify that the node has been deleted SELECT master_get_active_worker_nodes(); - master_get_active_worker_nodes ---------------------------------------------------------------------- + master_get_active_worker_nodes +-------------------------------- (localhost,57637) (1 row) -- try to disable a node with no placements see that node is s=removed SELECT 1 FROM master_add_node('localhost', :worker_2_port); -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created - ?column? ---------------------------------------------------------------------- + ?column? +---------- 1 (1 row) SELECT citus_disable_node('localhost', :worker_2_port); - citus_disable_node ---------------------------------------------------------------------- - + citus_disable_node +-------------------- + (1 row) SELECT public.wait_until_metadata_sync(); - wait_until_metadata_sync ---------------------------------------------------------------------- - + wait_until_metadata_sync +-------------------------- + (1 row) SELECT master_get_active_worker_nodes(); - master_get_active_worker_nodes ---------------------------------------------------------------------- + master_get_active_worker_nodes +-------------------------------- (localhost,57637) (1 row) @@ -95,14 +88,14 @@ DETAIL: Citus determines the replication model based on the replication factor ROLLBACK; -- check that the rebalancer works even if there are no distributed tables SELECT * FROM get_rebalance_table_shards_plan(); - table_name | shardid | shard_size | sourcename | sourceport | targetname | targetport ---------------------------------------------------------------------- + table_name | shardid | shard_size | sourcename | sourceport | targetname | targetport +------------+---------+------------+------------+------------+------------+------------ (0 rows) SELECT * FROM rebalance_table_shards(); - rebalance_table_shards ---------------------------------------------------------------------- - + rebalance_table_shards +------------------------ + (1 row) -- TODO: Figure out why this is necessary, rebalance_table_shards shouldn't @@ -110,24 +103,22 @@ SELECT * FROM rebalance_table_shards(); TRUNCATE pg_dist_colocation; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1390000; SELECT * FROM citus_activate_node('localhost', :worker_2_port); -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created - citus_activate_node ---------------------------------------------------------------------- + citus_activate_node +--------------------- 3 (1 row) CREATE TABLE cluster_management_test (col_1 text, col_2 int); SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash'); - create_distributed_table ---------------------------------------------------------------------- - + create_distributed_table +-------------------------- + (1 row) -- see that there are some active placements in the candidate node SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport=:worker_2_port; - shardid | shardstate | nodename | nodeport ---------------------------------------------------------------------- + shardid | shardstate | nodename | nodeport +---------+------------+-----------+---------- 1220001 | 1 | localhost | 57638 1220003 | 1 | localhost | 57638 1220005 | 1 | localhost | 57638 @@ -140,12 +131,12 @@ SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHER -- try to remove a node with active placements and see that node removal is failed SELECT master_remove_node('localhost', :worker_2_port); -ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx +ERROR: cannot remove or disable the node localhost:57638 because because it contains the only shard placement for shard 1220001 DETAIL: One of the table(s) that prevents the operation complete successfully is public.cluster_management_test HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables SELECT master_get_active_worker_nodes(); - master_get_active_worker_nodes ---------------------------------------------------------------------- + master_get_active_worker_nodes +-------------------------------- (localhost,57638) (localhost,57637) (2 rows) @@ -153,27 +144,27 @@ SELECT master_get_active_worker_nodes(); -- insert a row so that citus_disable_node() exercises closing connections CREATE TABLE test_reference_table (y int primary key, name text); SELECT create_reference_table('test_reference_table'); - create_reference_table ---------------------------------------------------------------------- - + create_reference_table +------------------------ + (1 row) INSERT INTO test_reference_table VALUES (1, '1'); -- try to remove a node with active placements and reference tables SELECT citus_remove_node('localhost', :worker_2_port); -ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx +ERROR: cannot remove or disable the node localhost:57638 because because it contains the only shard placement for shard 1220001 DETAIL: One of the table(s) that prevents the operation complete successfully is public.cluster_management_test HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables -- try to disable a node with active placements -- which should fail because there are some placements -- which are the only placements for a given shard SELECT citus_disable_node('localhost', :worker_2_port); -ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx +ERROR: cannot remove or disable the node localhost:57638 because because it contains the only shard placement for shard 1220001 DETAIL: One of the table(s) that prevents the operation complete successfully is public.cluster_management_test HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables SELECT master_get_active_worker_nodes(); - master_get_active_worker_nodes ---------------------------------------------------------------------- + master_get_active_worker_nodes +-------------------------------- (localhost,57638) (localhost,57637) (2 rows) @@ -182,29 +173,26 @@ SELECT master_get_active_worker_nodes(); SELECT citus_disable_node('localhost.noexist', 2345); ERROR: node at "localhost.noexist:2345" does not exist -- drop the table without leaving a shard placement behind (messes up other tests) +-- TODO: Replication ref table multiple times SELECT master_activate_node('localhost', :worker_2_port); -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created - master_activate_node ---------------------------------------------------------------------- + master_activate_node +---------------------- 3 (1 row) DROP TABLE test_reference_table, cluster_management_test; -- create users like this so results of community and enterprise are same -SET citus.enable_object_propagation TO ON; SET client_min_messages TO ERROR; CREATE USER non_super_user; CREATE USER node_metadata_user; SELECT 1 FROM run_command_on_workers('CREATE USER node_metadata_user'); - ?column? ---------------------------------------------------------------------- + ?column? +---------- 1 1 (2 rows) RESET client_min_messages; -SET citus.enable_object_propagation TO OFF; GRANT EXECUTE ON FUNCTION master_activate_node(text,int) TO node_metadata_user; GRANT EXECUTE ON FUNCTION master_add_inactive_node(text,int,int,noderole,name) TO node_metadata_user; GRANT EXECUTE ON FUNCTION master_add_node(text,int,int,noderole,name) TO node_metadata_user; @@ -215,37 +203,37 @@ GRANT EXECUTE ON FUNCTION master_remove_node(text,int) TO node_metadata_user; GRANT EXECUTE ON FUNCTION master_update_node(int,text,int,bool,int) TO node_metadata_user; -- user needs permission for the pg_dist_node and pg_dist_local_group for metadata syncing SELECT run_command_on_workers('GRANT ALL ON pg_dist_node TO node_metadata_user'); - run_command_on_workers ---------------------------------------------------------------------- + run_command_on_workers +--------------------------- (localhost,57637,t,GRANT) (localhost,57638,t,GRANT) (2 rows) SELECT run_command_on_workers('GRANT ALL ON pg_dist_local_group TO node_metadata_user'); - run_command_on_workers ---------------------------------------------------------------------- + run_command_on_workers +--------------------------- (localhost,57637,t,GRANT) (localhost,57638,t,GRANT) (2 rows) SELECT run_command_on_workers('GRANT ALL ON ALL TABLES IN SCHEMA citus TO node_metadata_user'); - run_command_on_workers ---------------------------------------------------------------------- + run_command_on_workers +--------------------------- (localhost,57637,t,GRANT) (localhost,57638,t,GRANT) (2 rows) SELECT run_command_on_workers('GRANT ALL ON SCHEMA citus TO node_metadata_user'); - run_command_on_workers ---------------------------------------------------------------------- + run_command_on_workers +--------------------------- (localhost,57637,t,GRANT) (localhost,57638,t,GRANT) (2 rows) SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node ---------------------------------------------------------------------- - + master_remove_node +-------------------- + (1 row) -- Removing public schema from pg_dist_object because it breaks the next tests @@ -269,51 +257,46 @@ SELECT master_update_node(nodeid, 'localhost', :worker_2_port + 3) FROM pg_dist_ ERROR: permission denied for function master_update_node -- try to manipulate node metadata via privileged user SET ROLE node_metadata_user; -SET citus.enable_object_propagation TO off; -- prevent master activate node to actually connect for this test BEGIN; SELECT 1 FROM master_add_inactive_node('localhost', :worker_2_port); - ?column? ---------------------------------------------------------------------- + ?column? +---------- 1 (1 row) SELECT 1 FROM master_activate_node('localhost', :worker_2_port); -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created - ?column? ---------------------------------------------------------------------- + ?column? +---------- 1 (1 row) SELECT 1 FROM master_remove_node('localhost', :worker_2_port); - ?column? ---------------------------------------------------------------------- + ?column? +---------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created - ?column? ---------------------------------------------------------------------- + ?column? +---------- 1 (1 row) SELECT 1 FROM master_add_secondary_node('localhost', :worker_2_port + 2, 'localhost', :worker_2_port); - ?column? ---------------------------------------------------------------------- + ?column? +---------- 1 (1 row) SELECT master_update_node(nodeid, 'localhost', :worker_2_port + 3) FROM pg_dist_node WHERE nodeport = :worker_2_port; - master_update_node ---------------------------------------------------------------------- - + master_update_node +-------------------- + (1 row) SELECT nodename, nodeport, noderole FROM pg_dist_node ORDER BY nodeport; - nodename | nodeport | noderole ---------------------------------------------------------------------- + nodename | nodeport | noderole +-----------+----------+----------- localhost | 57637 | primary localhost | 57640 | secondary localhost | 57641 | primary @@ -322,21 +305,18 @@ SELECT nodename, nodeport, noderole FROM pg_dist_node ORDER BY nodeport; ABORT; \c - postgres - :master_port SET citus.next_shard_id TO 1220000; -SET citus.enable_object_propagation TO off; -- prevent object propagation on add node during setup SET citus.shard_count TO 16; SET citus.shard_replication_factor TO 1; SELECT master_get_active_worker_nodes(); - master_get_active_worker_nodes ---------------------------------------------------------------------- + master_get_active_worker_nodes +-------------------------------- (localhost,57637) (1 row) -- restore the node for next tests SELECT * FROM master_add_node('localhost', :worker_2_port); -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created - master_add_node ---------------------------------------------------------------------- + master_add_node +----------------- 7 (1 row) @@ -344,14 +324,14 @@ ALTER SEQUENCE pg_dist_node_nodeid_seq RESTART WITH 7; ALTER SEQUENCE pg_dist_groupid_seq RESTART WITH 6; CREATE TABLE cluster_management_test (col_1 text, col_2 int); SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash'); - create_distributed_table ---------------------------------------------------------------------- - + create_distributed_table +-------------------------- + (1 row) -- try to remove a node with active placements and see that node removal is failed SELECT master_remove_node('localhost', :worker_2_port); -ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx +ERROR: cannot remove or disable the node localhost:57638 because because it contains the only shard placement for shard 1220001 DETAIL: One of the table(s) that prevents the operation complete successfully is public.cluster_management_test HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables -- mark all placements in the candidate node as inactive @@ -359,15 +339,15 @@ SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport=:worker_2_port UPDATE pg_dist_placement SET shardstate=3 WHERE groupid=:worker_2_group; -- manual updates to pg_dist* tables are not automatically reflected to the workers, so we manually do that too SELECT run_command_on_workers('UPDATE pg_dist_placement SET shardstate=3 WHERE groupid=' || :'worker_2_group'); - run_command_on_workers ---------------------------------------------------------------------- + run_command_on_workers +-------------------------------- (localhost,57637,t,"UPDATE 8") (localhost,57638,t,"UPDATE 8") (2 rows) SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport=:worker_2_port; - shardid | shardstate | nodename | nodeport ---------------------------------------------------------------------- + shardid | shardstate | nodename | nodeport +---------+------------+-----------+---------- 1220001 | 3 | localhost | 57638 1220003 | 3 | localhost | 57638 1220005 | 3 | localhost | 57638 @@ -380,12 +360,12 @@ SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHER -- try to remove a node with only inactive placements and see that removal still fails SELECT master_remove_node('localhost', :worker_2_port); -ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx +ERROR: cannot remove or disable the node localhost:57638 because because it contains the only shard placement for shard 1220001 DETAIL: One of the table(s) that prevents the operation complete successfully is public.cluster_management_test HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables SELECT master_get_active_worker_nodes(); - master_get_active_worker_nodes ---------------------------------------------------------------------- + master_get_active_worker_nodes +-------------------------------- (localhost,57638) (localhost,57637) (2 rows) @@ -393,15 +373,15 @@ SELECT master_get_active_worker_nodes(); -- mark all placements in the candidate node as to be deleted UPDATE pg_dist_placement SET shardstate=4 WHERE groupid=:worker_2_group; SELECT run_command_on_workers('UPDATE pg_dist_placement SET shardstate=4 WHERE groupid=' || :'worker_2_group'); - run_command_on_workers ---------------------------------------------------------------------- + run_command_on_workers +-------------------------------- (localhost,57637,t,"UPDATE 8") (localhost,57638,t,"UPDATE 8") (2 rows) SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport=:worker_2_port; - shardid | shardstate | nodename | nodeport ---------------------------------------------------------------------- + shardid | shardstate | nodename | nodeport +---------+------------+-----------+---------- 1220001 | 4 | localhost | 57638 1220003 | 4 | localhost | 57638 1220005 | 4 | localhost | 57638 @@ -439,15 +419,15 @@ WARNING: could not find any shard placements for shardId 1220017 WARNING: could not find any shard placements for shardId 1220017 WARNING: could not find any shard placements for shardId 1220017 WARNING: could not find any shard placements for shardId 1220017 - create_distributed_table ---------------------------------------------------------------------- - + create_distributed_table +-------------------------- + (1 row) -- Check that colocated shards don't get created for shards that are to be deleted SELECT logicalrelid, shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement NATURAL JOIN pg_dist_shard ORDER BY shardstate, shardid; - logicalrelid | shardid | shardstate | nodename | nodeport ---------------------------------------------------------------------- + logicalrelid | shardid | shardstate | nodename | nodeport +-----------------------------------+---------+------------+-----------+---------- cluster_management_test | 1220000 | 1 | localhost | 57637 cluster_management_test | 1220002 | 1 | localhost | 57637 cluster_management_test | 1220004 | 1 | localhost | 57637 @@ -476,15 +456,15 @@ SELECT logicalrelid, shardid, shardstate, nodename, nodeport FROM pg_dist_shard_ -- clean-up SELECT 1 FROM master_add_node('localhost', :worker_2_port); - ?column? ---------------------------------------------------------------------- + ?column? +---------- 1 (1 row) UPDATE pg_dist_placement SET shardstate=1 WHERE groupid=:worker_2_group; SELECT run_command_on_workers('UPDATE pg_dist_placement SET shardstate=1 WHERE groupid=' || :'worker_2_group'); - run_command_on_workers ---------------------------------------------------------------------- + run_command_on_workers +-------------------------------- (localhost,57637,t,"UPDATE 8") (localhost,57638,t,"UPDATE 8") (2 rows) @@ -499,19 +479,19 @@ ERROR: node group 6 does not have a primary node -- when there is no node at all in the group we should get a different error DELETE FROM pg_dist_node WHERE nodeport=:worker_2_port; SELECT run_command_on_workers('DELETE FROM pg_dist_node WHERE nodeport=' || :'worker_2_port'); - run_command_on_workers ---------------------------------------------------------------------- + run_command_on_workers +-------------------------------- (localhost,57637,t,"DELETE 1") (1 row) SELECT * FROM cluster_management_test; -ERROR: there is a shard placement in node group 6 but there are no nodes in that group +NOTICE: there is a shard placement in node group 6 but there are no nodes in that group +ERROR: no active placements were found for shard 1220001 -- clean-up SELECT * INTO old_placements FROM pg_dist_placement WHERE groupid = :worker_2_group; DELETE FROM pg_dist_placement WHERE groupid = :worker_2_group; SELECT master_add_node('localhost', :worker_2_port) AS new_node \gset -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created +WARNING: could not find any shard placements for shardId 1220001 WARNING: could not find any shard placements for shardId 1220001 WARNING: could not find any shard placements for shardId 1220003 WARNING: could not find any shard placements for shardId 1220005 @@ -524,116 +504,110 @@ INSERT INTO pg_dist_placement SELECT * FROM old_placements; SELECT groupid AS new_group FROM pg_dist_node WHERE nodeid = :new_node \gset UPDATE pg_dist_placement SET groupid = :new_group WHERE groupid = :worker_2_group; SELECT run_command_on_workers('UPDATE pg_dist_placement SET groupid = ' || :'new_group' || ' WHERE groupid = ' || :'worker_2_group'); - run_command_on_workers ---------------------------------------------------------------------- + run_command_on_workers +-------------------------------- (localhost,57637,t,"UPDATE 8") (localhost,57638,t,"UPDATE 0") (2 rows) SELECT start_metadata_sync_to_node('localhost', :worker_2_port); - start_metadata_sync_to_node ---------------------------------------------------------------------- - + start_metadata_sync_to_node +----------------------------- + (1 row) -- test that you are allowed to remove secondary nodes even if there are placements SELECT 1 FROM master_add_node('localhost', 9990, groupid => :new_group, noderole => 'secondary'); - ?column? ---------------------------------------------------------------------- + ?column? +---------- 1 (1 row) SELECT master_remove_node('localhost', :worker_2_port); -ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx +ERROR: cannot remove or disable the node localhost:57638 because because it contains the only shard placement for shard 1220001 DETAIL: One of the table(s) that prevents the operation complete successfully is public.cluster_management_test HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables SELECT master_remove_node('localhost', 9990); - master_remove_node ---------------------------------------------------------------------- - + master_remove_node +-------------------- + (1 row) -- clean-up DROP TABLE cluster_management_test; -- check that adding/removing nodes are propagated to nodes with metadata SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node ---------------------------------------------------------------------- - + master_remove_node +-------------------- + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ---------------------------------------------------------------------- - + start_metadata_sync_to_node +----------------------------- + (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created - ?column? ---------------------------------------------------------------------- + ?column? +---------- 1 (1 row) \c - - - :worker_1_port SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; - nodename | nodeport ---------------------------------------------------------------------- + nodename | nodeport +-----------+---------- localhost | 57638 (1 row) \c - - - :master_port SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node ---------------------------------------------------------------------- - + master_remove_node +-------------------- + (1 row) \c - - - :worker_1_port SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; - nodename | nodeport ---------------------------------------------------------------------- + nodename | nodeport +----------+---------- (0 rows) \c - - - :master_port -SET citus.enable_object_propagation TO off; -- prevent object propagation on add node during setup -- check that added nodes are not propagated to nodes without metadata SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); NOTICE: dropping metadata on the node (localhost,57637) - stop_metadata_sync_to_node ---------------------------------------------------------------------- - + stop_metadata_sync_to_node +---------------------------- + (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created - ?column? ---------------------------------------------------------------------- + ?column? +---------- 1 (1 row) \c - - - :worker_1_port SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; - nodename | nodeport ---------------------------------------------------------------------- + nodename | nodeport +----------+---------- (0 rows) \c - - - :master_port -SET citus.enable_object_propagation TO off; -- prevent object propagation on add node during setup -- check that removing two nodes in the same transaction works SELECT master_remove_node('localhost', :worker_1_port), master_remove_node('localhost', :worker_2_port); - master_remove_node | master_remove_node ---------------------------------------------------------------------- - | + master_remove_node | master_remove_node +--------------------+-------------------- + | (1 row) SELECT count(1) FROM pg_dist_node; - count ---------------------------------------------------------------------- + count +------- 0 (1 row) @@ -641,18 +615,14 @@ SELECT count(1) FROM pg_dist_node; SELECT master_add_node('localhost', :worker_1_port), master_add_node('localhost', :worker_2_port); -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created - master_add_node | master_add_node ---------------------------------------------------------------------- + master_add_node | master_add_node +-----------------+----------------- 11 | 12 (1 row) SELECT * FROM pg_dist_node ORDER BY nodeid; - nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards ---------------------------------------------------------------------- + nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards +--------+---------+-----------+----------+----------+-------------+----------+----------+-------------+----------------+------------------ 11 | 9 | localhost | 57637 | default | t | t | primary | default | t | t 12 | 10 | localhost | 57638 | default | t | t | primary | default | t | t (2 rows) @@ -660,129 +630,116 @@ SELECT * FROM pg_dist_node ORDER BY nodeid; -- check that mixed add/remove node commands work fine inside transaction BEGIN; SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node ---------------------------------------------------------------------- - + master_remove_node +-------------------- + (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created - ?column? ---------------------------------------------------------------------- + ?column? +---------- 1 (1 row) SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node ---------------------------------------------------------------------- - + master_remove_node +-------------------- + (1 row) COMMIT; SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; - nodename | nodeport ---------------------------------------------------------------------- + nodename | nodeport +----------+---------- (0 rows) SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ---------------------------------------------------------------------- - + start_metadata_sync_to_node +----------------------------- + (1 row) BEGIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created - ?column? ---------------------------------------------------------------------- + ?column? +---------- 1 (1 row) SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node ---------------------------------------------------------------------- - + master_remove_node +-------------------- + (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created - ?column? ---------------------------------------------------------------------- + ?column? +---------- 1 (1 row) COMMIT; SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; - nodename | nodeport ---------------------------------------------------------------------- + nodename | nodeport +-----------+---------- localhost | 57638 (1 row) \c - - - :worker_1_port SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; - nodename | nodeport ---------------------------------------------------------------------- + nodename | nodeport +-----------+---------- localhost | 57638 (1 row) \c - - - :master_port -SET citus.enable_object_propagation TO off; -- prevent object propagation on add node during setup SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node; - master_remove_node ---------------------------------------------------------------------- - - + master_remove_node +-------------------- + + (2 rows) SELECT 1 FROM master_add_node('localhost', :worker_1_port); -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created - ?column? ---------------------------------------------------------------------- + ?column? +---------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created - ?column? ---------------------------------------------------------------------- + ?column? +---------- 1 (1 row) -- check that a distributed table can be created after adding a node in a transaction SET citus.shard_count TO 4; SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node ---------------------------------------------------------------------- - + master_remove_node +-------------------- + (1 row) BEGIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created - ?column? ---------------------------------------------------------------------- + ?column? +---------- 1 (1 row) CREATE TABLE temp(col1 text, col2 int); SELECT create_distributed_table('temp', 'col1'); - create_distributed_table ---------------------------------------------------------------------- - + create_distributed_table +-------------------------- + (1 row) INSERT INTO temp VALUES ('row1', 1); INSERT INTO temp VALUES ('row2', 2); COMMIT; SELECT col1, col2 FROM temp ORDER BY col1; - col1 | col2 ---------------------------------------------------------------------- + col1 | col2 +------+------ row1 | 1 row2 | 2 (2 rows) @@ -795,8 +752,8 @@ WHERE pg_dist_shard_placement.shardid = pg_dist_shard.shardid AND pg_dist_shard.logicalrelid = 'temp'::regclass AND pg_dist_shard_placement.nodeport = :worker_2_port; - count ---------------------------------------------------------------------- + count +------- 4 (1 row) @@ -807,19 +764,18 @@ DELETE FROM pg_dist_shard; DELETE FROM pg_dist_placement; DELETE FROM pg_dist_node; \c - - - :master_port -SET citus.enable_object_propagation TO off; -- prevent object propagation on add node during setup SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); NOTICE: dropping metadata on the node (localhost,57637) - stop_metadata_sync_to_node ---------------------------------------------------------------------- - + stop_metadata_sync_to_node +---------------------------- + (1 row) SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); NOTICE: dropping metadata on the node (localhost,57638) - stop_metadata_sync_to_node ---------------------------------------------------------------------- - + stop_metadata_sync_to_node +---------------------------- + (1 row) -- check that you can't add a primary to a non-default cluster @@ -832,64 +788,64 @@ ERROR: group 14 already has a primary node -- check that you can add secondaries and unavailable nodes to a group SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset SELECT 1 FROM master_add_node('localhost', 9998, groupid => :worker_1_group, noderole => 'secondary'); - ?column? ---------------------------------------------------------------------- + ?column? +---------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', 9997, groupid => :worker_1_group, noderole => 'unavailable'); - ?column? ---------------------------------------------------------------------- + ?column? +---------- 1 (1 row) -- add_inactive_node also works with secondaries SELECT 1 FROM master_add_inactive_node('localhost', 9996, groupid => :worker_2_group, noderole => 'secondary'); - ?column? ---------------------------------------------------------------------- + ?column? +---------- 1 (1 row) -- check that you can add a seconary to a non-default cluster, and activate it, and remove it SELECT master_add_inactive_node('localhost', 9999, groupid => :worker_2_group, nodecluster => 'olap', noderole => 'secondary'); - master_add_inactive_node ---------------------------------------------------------------------- + master_add_inactive_node +-------------------------- 22 (1 row) SELECT master_activate_node('localhost', 9999); - master_activate_node ---------------------------------------------------------------------- + master_activate_node +---------------------- 22 (1 row) SELECT citus_disable_node('localhost', 9999); - citus_disable_node ---------------------------------------------------------------------- - + citus_disable_node +-------------------- + (1 row) SELECT public.wait_until_metadata_sync(); - wait_until_metadata_sync ---------------------------------------------------------------------- - + wait_until_metadata_sync +-------------------------- + (1 row) SELECT master_remove_node('localhost', 9999); - master_remove_node ---------------------------------------------------------------------- - + master_remove_node +-------------------- + (1 row) -- check that you can't manually add two primaries to a group INSERT INTO pg_dist_node (nodename, nodeport, groupid, noderole) VALUES ('localhost', 5000, :worker_1_group, 'primary'); ERROR: there cannot be two primary nodes in a group -CONTEXT: PL/pgSQL function citus_internal.pg_dist_node_trigger_func() line XX at RAISE +CONTEXT: PL/pgSQL function citus_internal.pg_dist_node_trigger_func() line 10 at RAISE UPDATE pg_dist_node SET noderole = 'primary' WHERE groupid = :worker_1_group AND nodeport = 9998; ERROR: there cannot be two primary nodes in a group -CONTEXT: PL/pgSQL function citus_internal.pg_dist_node_trigger_func() line XX at RAISE +CONTEXT: PL/pgSQL function citus_internal.pg_dist_node_trigger_func() line 18 at RAISE -- check that you can't manually add a primary to a non-default cluster INSERT INTO pg_dist_node (nodename, nodeport, groupid, noderole, nodecluster) VALUES ('localhost', 5000, 1000, 'primary', 'olap'); @@ -902,8 +858,8 @@ DETAIL: Failing row contains (16, 14, localhost, 57637, default, f, t, primary, -- check that you /can/ add a secondary node to a non-default cluster SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset SELECT master_add_node('localhost', 8888, groupid => :worker_1_group, noderole => 'secondary', nodecluster=> 'olap'); - master_add_node ---------------------------------------------------------------------- + master_add_node +----------------- 25 (1 row) @@ -915,14 +871,14 @@ SELECT master_add_node('localhost', 8887, groupid => :worker_1_group, noderole = 'thisisasixtyfourcharacterstringrepeatedfourtimestomake256chars.' 'overflow' ); - master_add_node ---------------------------------------------------------------------- + master_add_node +----------------- 26 (1 row) SELECT * FROM pg_dist_node WHERE nodeport=8887; - nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards ---------------------------------------------------------------------- + nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards +--------+---------+-----------+----------+----------+-------------+----------+-----------+-----------------------------------------------------------------+----------------+------------------ 26 | 14 | localhost | 8887 | default | f | t | secondary | thisisasixtyfourcharacterstringrepeatedfourtimestomake256chars. | f | t (1 row) @@ -930,22 +886,22 @@ SELECT * FROM pg_dist_node WHERE nodeport=8887; -- them in any of the remaining tests -- master_add_secondary_node lets you skip looking up the groupid SELECT master_add_secondary_node('localhost', 9995, 'localhost', :worker_1_port); - master_add_secondary_node ---------------------------------------------------------------------- + master_add_secondary_node +--------------------------- 27 (1 row) SELECT master_add_secondary_node('localhost', 9994, primaryname => 'localhost', primaryport => :worker_2_port); - master_add_secondary_node ---------------------------------------------------------------------- + master_add_secondary_node +--------------------------- 28 (1 row) SELECT master_add_secondary_node('localhost', 9993, 'localhost', 2000); -ERROR: node at "localhost:xxxxx" does not exist +ERROR: node at "localhost:2000" does not exist SELECT master_add_secondary_node('localhost', 9992, 'localhost', :worker_1_port, nodecluster => 'second-cluster'); - master_add_secondary_node ---------------------------------------------------------------------- + master_add_secondary_node +--------------------------- 29 (1 row) @@ -958,52 +914,52 @@ SELECT master_update_node(:worker_1_node, 'localhost', :worker_2_port); ERROR: there is already another node with the specified hostname and port -- master_update_node moves a node SELECT master_update_node(:worker_1_node, 'somehost', 9000); - master_update_node ---------------------------------------------------------------------- - + master_update_node +-------------------- + (1 row) SELECT * FROM pg_dist_node WHERE nodeid = :worker_1_node; - nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards ---------------------------------------------------------------------- + nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards +--------+---------+----------+----------+----------+-------------+----------+----------+-------------+----------------+------------------ 16 | 14 | somehost | 9000 | default | f | t | primary | default | f | t (1 row) -- cleanup SELECT master_update_node(:worker_1_node, 'localhost', :worker_1_port); - master_update_node ---------------------------------------------------------------------- - + master_update_node +-------------------- + (1 row) SELECT * FROM pg_dist_node WHERE nodeid = :worker_1_node; - nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards ---------------------------------------------------------------------- + nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards +--------+---------+-----------+----------+----------+-------------+----------+----------+-------------+----------------+------------------ 16 | 14 | localhost | 57637 | default | f | t | primary | default | f | t (1 row) SET client_min_messages TO ERROR; SELECT start_metadata_sync_to_node(nodename, nodeport) FROM pg_dist_node WHERE isactive = 't' and noderole = 'primary'; - start_metadata_sync_to_node ---------------------------------------------------------------------- - - + start_metadata_sync_to_node +----------------------------- + + (2 rows) RESET client_min_messages; SET citus.shard_replication_factor TO 1; CREATE TABLE test_dist (x int, y int); SELECT create_distributed_table('test_dist', 'x'); - create_distributed_table ---------------------------------------------------------------------- - + create_distributed_table +-------------------------- + (1 row) -- testing behaviour when setting shouldhaveshards to false on partially empty node SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', false); - master_set_node_property ---------------------------------------------------------------------- - + master_set_node_property +-------------------------- + (1 row) CREATE TABLE test_dist_colocated (x int, y int); @@ -1011,35 +967,35 @@ CREATE TABLE test_dist_non_colocated (x int, y int); CREATE TABLE test_dist_colocated_with_non_colocated (x int, y int); CREATE TABLE test_ref (a int, b int); SELECT create_distributed_table('test_dist_colocated', 'x'); - create_distributed_table ---------------------------------------------------------------------- - + create_distributed_table +-------------------------- + (1 row) SELECT create_distributed_table('test_dist_non_colocated', 'x', colocate_with => 'none'); - create_distributed_table ---------------------------------------------------------------------- - + create_distributed_table +-------------------------- + (1 row) SELECT create_distributed_table('test_dist_colocated_with_non_colocated', 'x', colocate_with => 'test_dist_non_colocated'); - create_distributed_table ---------------------------------------------------------------------- - + create_distributed_table +-------------------------- + (1 row) SELECT create_reference_table('test_ref'); - create_reference_table ---------------------------------------------------------------------- - + create_reference_table +------------------------ + (1 row) -- colocated tables should still be placed on shouldhaveshards false nodes for safety SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist_colocated'::regclass GROUP BY nodeport ORDER BY nodeport; - nodeport | count ---------------------------------------------------------------------- + nodeport | count +----------+------- 57637 | 2 57638 | 2 (2 rows) @@ -1048,8 +1004,8 @@ WHERE logicalrelid = 'test_dist_colocated'::regclass GROUP BY nodeport ORDER BY SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist_non_colocated'::regclass GROUP BY nodeport ORDER BY nodeport; - nodeport | count ---------------------------------------------------------------------- + nodeport | count +----------+------- 57637 | 4 (1 row) @@ -1058,8 +1014,8 @@ WHERE logicalrelid = 'test_dist_non_colocated'::regclass GROUP BY nodeport ORDER SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist_colocated_with_non_colocated'::regclass GROUP BY nodeport ORDER BY nodeport; - nodeport | count ---------------------------------------------------------------------- + nodeport | count +----------+------- 57637 | 4 (1 row) @@ -1067,8 +1023,8 @@ WHERE logicalrelid = 'test_dist_colocated_with_non_colocated'::regclass GROUP BY SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_ref'::regclass GROUP BY nodeport ORDER BY nodeport; - nodeport | count ---------------------------------------------------------------------- + nodeport | count +----------+------- 57637 | 1 57638 | 1 (2 rows) @@ -1077,9 +1033,9 @@ WHERE logicalrelid = 'test_ref'::regclass GROUP BY nodeport ORDER BY nodeport; DROP TABLE test_dist, test_ref, test_dist_colocated, test_dist_non_colocated, test_dist_colocated_with_non_colocated; -- testing behaviour when setting shouldhaveshards to false on fully empty node SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', false); - master_set_node_property ---------------------------------------------------------------------- - + master_set_node_property +-------------------------- + (1 row) CREATE TABLE test_dist (x int, y int); @@ -1087,23 +1043,23 @@ CREATE TABLE test_dist_colocated (x int, y int); CREATE TABLE test_dist_non_colocated (x int, y int); CREATE TABLE test_ref (a int, b int); SELECT create_distributed_table('test_dist', 'x'); - create_distributed_table ---------------------------------------------------------------------- - + create_distributed_table +-------------------------- + (1 row) SELECT create_reference_table('test_ref'); - create_reference_table ---------------------------------------------------------------------- - + create_reference_table +------------------------ + (1 row) -- distributed tables should not be placed on nodes with shouldhaveshards false SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist'::regclass GROUP BY nodeport ORDER BY nodeport; - nodeport | count ---------------------------------------------------------------------- + nodeport | count +----------+------- 57637 | 4 (1 row) @@ -1111,16 +1067,16 @@ WHERE logicalrelid = 'test_dist'::regclass GROUP BY nodeport ORDER BY nodeport; SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_ref'::regclass GROUP BY nodeport ORDER BY nodeport; - nodeport | count ---------------------------------------------------------------------- + nodeport | count +----------+------- 57637 | 1 57638 | 1 (2 rows) SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', true); - master_set_node_property ---------------------------------------------------------------------- - + master_set_node_property +-------------------------- + (1 row) -- distributed tables should still not be placed on nodes that were switched to @@ -1128,8 +1084,8 @@ SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaves SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist'::regclass GROUP BY nodeport ORDER BY nodeport; - nodeport | count ---------------------------------------------------------------------- + nodeport | count +----------+------- 57637 | 4 (1 row) @@ -1137,22 +1093,22 @@ WHERE logicalrelid = 'test_dist'::regclass GROUP BY nodeport ORDER BY nodeport; SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_ref'::regclass GROUP BY nodeport ORDER BY nodeport; - nodeport | count ---------------------------------------------------------------------- + nodeport | count +----------+------- 57637 | 1 57638 | 1 (2 rows) SELECT create_distributed_table('test_dist_colocated', 'x'); - create_distributed_table ---------------------------------------------------------------------- - + create_distributed_table +-------------------------- + (1 row) SELECT create_distributed_table('test_dist_non_colocated', 'x', colocate_with => 'none'); - create_distributed_table ---------------------------------------------------------------------- - + create_distributed_table +-------------------------- + (1 row) -- colocated tables should not be placed on nodedes that were switched to @@ -1160,8 +1116,8 @@ SELECT create_distributed_table('test_dist_non_colocated', 'x', colocate_with => SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist_colocated'::regclass GROUP BY nodeport ORDER BY nodeport; - nodeport | count ---------------------------------------------------------------------- + nodeport | count +----------+------- 57637 | 4 (1 row) @@ -1170,8 +1126,8 @@ WHERE logicalrelid = 'test_dist_colocated'::regclass GROUP BY nodeport ORDER BY SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist_non_colocated'::regclass GROUP BY nodeport ORDER BY nodeport; - nodeport | count ---------------------------------------------------------------------- + nodeport | count +----------+------- 57637 | 2 57638 | 2 (2 rows) @@ -1181,8 +1137,8 @@ ERROR: only the 'shouldhaveshards' property can be set using this function DROP TABLE test_dist, test_ref, test_dist_colocated, test_dist_non_colocated; -- verify that at the end of this file, all primary nodes have metadata synced SELECT bool_and(hasmetadata) AND bool_and(metadatasynced) FROM pg_dist_node WHERE isactive = 't' and noderole = 'primary'; - ?column? ---------------------------------------------------------------------- + ?column? +---------- t (1 row) diff --git a/src/test/regress/sql/multi_cluster_management.sql b/src/test/regress/sql/multi_cluster_management.sql index bc53a7b01..83d6c0f5b 100644 --- a/src/test/regress/sql/multi_cluster_management.sql +++ b/src/test/regress/sql/multi_cluster_management.sql @@ -79,7 +79,6 @@ SELECT master_get_active_worker_nodes(); SELECT citus_disable_node('localhost.noexist', 2345); -- drop the table without leaving a shard placement behind (messes up other tests) --- TODO: Replication ref table multiple times SELECT master_activate_node('localhost', :worker_2_port); DROP TABLE test_reference_table, cluster_management_test; diff --git a/src/test/regress/sql/single_node.sql b/src/test/regress/sql/single_node.sql index eb1895feb..0305cbd48 100644 --- a/src/test/regress/sql/single_node.sql +++ b/src/test/regress/sql/single_node.sql @@ -4,10 +4,6 @@ SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; SET citus.next_shard_id TO 90630500; -table pg_dist_node; -table pg_dist_partition; -\d - -- Ensure tuple data in explain analyze output is the same on all PG versions SET citus.enable_binary_protocol = TRUE; @@ -82,10 +78,8 @@ SELECT 1 FROM master_remove_node('localhost', :worker_1_port); SELECT 1 FROM citus_set_coordinator_host('127.0.0.1'); -- adding workers with specific IP is ok now -set citus.log_remote_commands to true; SELECT 1 FROM master_add_node('127.0.0.1', :worker_1_port); SELECT 1 FROM master_remove_node('127.0.0.1', :worker_1_port); -reset citus.log_remote_commands; -- set the coordinator host back to localhost for the remainder of tests SELECT 1 FROM citus_set_coordinator_host('localhost');