SET citus.next_shard_id TO 1220000; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1390000; ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 1; SET citus.enable_object_propagation TO off; -- prevent object propagation on add node during setup -- Tests functions related to cluster membership -- add the nodes to the cluster SELECT 1 FROM master_add_node('localhost', :worker_1_port); WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created ?column? --------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created ?column? --------------------------------------------------------------------- 1 (1 row) -- get the active nodes SELECT master_get_active_worker_nodes(); master_get_active_worker_nodes --------------------------------------------------------------------- (localhost,57638) (localhost,57637) (2 rows) -- try to add a node that is already in the cluster SELECT * FROM master_add_node('localhost', :worker_1_port); master_add_node --------------------------------------------------------------------- 1 (1 row) -- make sure that when there are no distributed tables, we don't crash. SELECT 1 FROM get_rebalance_table_shards_plan(); ?column? --------------------------------------------------------------------- (0 rows) -- get the active nodes SELECT master_get_active_worker_nodes(); master_get_active_worker_nodes --------------------------------------------------------------------- (localhost,57638) (localhost,57637) (2 rows) -- try to remove a node (with no placements) SELECT master_remove_node('localhost', :worker_2_port); master_remove_node --------------------------------------------------------------------- (1 row) -- verify that the node has been deleted SELECT master_get_active_worker_nodes(); master_get_active_worker_nodes --------------------------------------------------------------------- (localhost,57637) (1 row) -- try to disable a node with no placements see that node is removed SELECT 1 FROM master_add_node('localhost', :worker_2_port); WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created ?column? --------------------------------------------------------------------- 1 (1 row) SELECT master_disable_node('localhost', :worker_2_port); master_disable_node --------------------------------------------------------------------- (1 row) SELECT master_get_active_worker_nodes(); master_get_active_worker_nodes --------------------------------------------------------------------- (localhost,57637) (1 row) -- add some shard placements to the cluster SET citus.shard_count TO 16; SET citus.shard_replication_factor TO 1; -- test warnings on setting the deprecated guc for replication model BEGIN; SET citus.replication_model to 'statement'; NOTICE: Setting citus.replication_model has no effect. Please use citus.shard_replication_factor instead. DETAIL: Citus determines the replication model based on the replication factor and the replication models of the colocated shards. If a colocated table is present, the replication model is inherited. Otherwise 'streaming' replication is preferred if supported by the replication factor. ROLLBACK; SELECT * FROM citus_activate_node('localhost', :worker_2_port); WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created citus_activate_node --------------------------------------------------------------------- 3 (1 row) CREATE TABLE cluster_management_test (col_1 text, col_2 int); SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash'); create_distributed_table --------------------------------------------------------------------- (1 row) -- see that there are some active placements in the candidate node SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport=:worker_2_port; shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- 1220001 | 1 | localhost | 57638 1220003 | 1 | localhost | 57638 1220005 | 1 | localhost | 57638 1220007 | 1 | localhost | 57638 1220009 | 1 | localhost | 57638 1220011 | 1 | localhost | 57638 1220013 | 1 | localhost | 57638 1220015 | 1 | localhost | 57638 (8 rows) -- try to remove a node with active placements and see that node removal is failed SELECT master_remove_node('localhost', :worker_2_port); ERROR: cannot remove the primary node of a node group which has shard placements HINT: To proceed, either drop the distributed tables or use undistribute_table() function to convert them to local tables SELECT master_get_active_worker_nodes(); master_get_active_worker_nodes --------------------------------------------------------------------- (localhost,57638) (localhost,57637) (2 rows) -- insert a row so that master_disable_node() exercises closing connections CREATE TABLE test_reference_table (y int primary key, name text); SELECT create_reference_table('test_reference_table'); create_reference_table --------------------------------------------------------------------- (1 row) INSERT INTO test_reference_table VALUES (1, '1'); -- try to remove a node with active placements and reference tables SELECT citus_remove_node('localhost', :worker_2_port); ERROR: cannot remove the primary node of a node group which has shard placements HINT: To proceed, either drop the distributed tables or use undistribute_table() function to convert them to local tables -- try to disable a node with active placements see that node is removed -- observe that a notification is displayed SELECT master_disable_node('localhost', :worker_2_port); NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT master_activate_node('localhost', 57638) to activate this node back. master_disable_node --------------------------------------------------------------------- (1 row) SELECT master_get_active_worker_nodes(); master_get_active_worker_nodes --------------------------------------------------------------------- (localhost,57637) (1 row) -- try to disable a node which does not exist and see that an error is thrown SELECT master_disable_node('localhost.noexist', 2345); ERROR: node at "localhost.noexist:2345" does not exist -- drop the table without leaving a shard placement behind (messes up other tests) SELECT master_activate_node('localhost', :worker_2_port); WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created NOTICE: Replicating reference table "test_reference_table" to the node localhost:xxxxx master_activate_node --------------------------------------------------------------------- 3 (1 row) DROP TABLE test_reference_table; SELECT master_disable_node('localhost', :worker_2_port); NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT master_activate_node('localhost', 57638) to activate this node back. master_disable_node --------------------------------------------------------------------- (1 row) CREATE USER non_super_user; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. CREATE USER node_metadata_user; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. GRANT EXECUTE ON FUNCTION master_activate_node(text,int) TO node_metadata_user; GRANT EXECUTE ON FUNCTION master_add_inactive_node(text,int,int,noderole,name) TO node_metadata_user; GRANT EXECUTE ON FUNCTION master_add_node(text,int,int,noderole,name) TO node_metadata_user; GRANT EXECUTE ON FUNCTION master_add_secondary_node(text,int,text,int,name) TO node_metadata_user; GRANT EXECUTE ON FUNCTION master_disable_node(text,int) TO node_metadata_user; GRANT EXECUTE ON FUNCTION master_remove_node(text,int) TO node_metadata_user; GRANT EXECUTE ON FUNCTION master_update_node(int,text,int,bool,int) TO node_metadata_user; -- Removing public schema from pg_dist_object because it breaks the next tests DELETE FROM citus.pg_dist_object WHERE objid = 'public'::regnamespace::oid; -- try to manipulate node metadata via non-super user SET ROLE non_super_user; SELECT 1 FROM master_add_inactive_node('localhost', :worker_2_port + 1); ERROR: permission denied for function master_add_inactive_node SELECT 1 FROM master_activate_node('localhost', :worker_2_port + 1); ERROR: permission denied for function master_activate_node SELECT 1 FROM master_disable_node('localhost', :worker_2_port + 1); ERROR: permission denied for function master_disable_node SELECT 1 FROM master_remove_node('localhost', :worker_2_port + 1); ERROR: permission denied for function master_remove_node SELECT 1 FROM master_add_node('localhost', :worker_2_port + 1); ERROR: permission denied for function master_add_node SELECT 1 FROM master_add_secondary_node('localhost', :worker_2_port + 2, 'localhost', :worker_2_port); ERROR: permission denied for function master_add_secondary_node SELECT master_update_node(nodeid, 'localhost', :worker_2_port + 3) FROM pg_dist_node WHERE nodeport = :worker_2_port; ERROR: permission denied for function master_update_node -- try to manipulate node metadata via privileged user SET ROLE node_metadata_user; SET citus.enable_object_propagation TO off; -- prevent master activate node to actually connect for this test BEGIN; SELECT 1 FROM master_add_inactive_node('localhost', :worker_2_port + 1); ?column? --------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_activate_node('localhost', :worker_2_port + 1); WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created ?column? --------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_disable_node('localhost', :worker_2_port + 1); ?column? --------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_remove_node('localhost', :worker_2_port + 1); ?column? --------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port + 1); WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created ?column? --------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_add_secondary_node('localhost', :worker_2_port + 2, 'localhost', :worker_2_port); ?column? --------------------------------------------------------------------- 1 (1 row) SELECT master_update_node(nodeid, 'localhost', :worker_2_port + 3) FROM pg_dist_node WHERE nodeport = :worker_2_port; master_update_node --------------------------------------------------------------------- (1 row) SELECT nodename, nodeport, noderole FROM pg_dist_node ORDER BY nodeport; nodename | nodeport | noderole --------------------------------------------------------------------- localhost | 57637 | primary localhost | 57639 | primary localhost | 57640 | secondary localhost | 57641 | primary (4 rows) ABORT; \c - postgres - :master_port SET citus.next_shard_id TO 1220016; SET citus.enable_object_propagation TO off; -- prevent object propagation on add node during setup SET citus.shard_replication_factor TO 1; SELECT master_get_active_worker_nodes(); master_get_active_worker_nodes --------------------------------------------------------------------- (localhost,57637) (1 row) -- restore the node for next tests SELECT * FROM master_activate_node('localhost', :worker_2_port); WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created master_activate_node --------------------------------------------------------------------- 3 (1 row) -- try to remove a node with active placements and see that node removal is failed SELECT master_remove_node('localhost', :worker_2_port); ERROR: cannot remove the primary node of a node group which has shard placements HINT: To proceed, either drop the distributed tables or use undistribute_table() function to convert them to local tables -- mark all placements in the candidate node as inactive SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport=:worker_2_port \gset UPDATE pg_dist_placement SET shardstate=3 WHERE groupid=:worker_2_group; SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport=:worker_2_port; shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- 1220001 | 3 | localhost | 57638 1220003 | 3 | localhost | 57638 1220005 | 3 | localhost | 57638 1220007 | 3 | localhost | 57638 1220009 | 3 | localhost | 57638 1220011 | 3 | localhost | 57638 1220013 | 3 | localhost | 57638 1220015 | 3 | localhost | 57638 (8 rows) -- try to remove a node with only inactive placements and see that removal still fails SELECT master_remove_node('localhost', :worker_2_port); ERROR: cannot remove the primary node of a node group which has shard placements HINT: To proceed, either drop the distributed tables or use undistribute_table() function to convert them to local tables SELECT master_get_active_worker_nodes(); master_get_active_worker_nodes --------------------------------------------------------------------- (localhost,57638) (localhost,57637) (2 rows) -- mark all placements in the candidate node as to be deleted UPDATE pg_dist_placement SET shardstate=4 WHERE groupid=:worker_2_group; SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport=:worker_2_port; shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- 1220001 | 4 | localhost | 57638 1220003 | 4 | localhost | 57638 1220005 | 4 | localhost | 57638 1220007 | 4 | localhost | 57638 1220009 | 4 | localhost | 57638 1220011 | 4 | localhost | 57638 1220013 | 4 | localhost | 57638 1220015 | 4 | localhost | 57638 (8 rows) CREATE TABLE cluster_management_test_colocated (col_1 text, col_2 int); -- Check that we warn the user about colocated shards that will not get created for shards that do not have active placements SELECT create_distributed_table('cluster_management_test_colocated', 'col_1', 'hash', colocate_with => 'cluster_management_test'); WARNING: could not find any shard placements for shardId 1220017 WARNING: could not find any shard placements for shardId 1220019 WARNING: could not find any shard placements for shardId 1220021 WARNING: could not find any shard placements for shardId 1220023 WARNING: could not find any shard placements for shardId 1220025 WARNING: could not find any shard placements for shardId 1220027 WARNING: could not find any shard placements for shardId 1220029 WARNING: could not find any shard placements for shardId 1220031 create_distributed_table --------------------------------------------------------------------- (1 row) -- Check that colocated shards don't get created for shards that are to be deleted SELECT logicalrelid, shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement NATURAL JOIN pg_dist_shard ORDER BY shardstate, shardid; logicalrelid | shardid | shardstate | nodename | nodeport --------------------------------------------------------------------- cluster_management_test | 1220000 | 1 | localhost | 57637 cluster_management_test | 1220002 | 1 | localhost | 57637 cluster_management_test | 1220004 | 1 | localhost | 57637 cluster_management_test | 1220006 | 1 | localhost | 57637 cluster_management_test | 1220008 | 1 | localhost | 57637 cluster_management_test | 1220010 | 1 | localhost | 57637 cluster_management_test | 1220012 | 1 | localhost | 57637 cluster_management_test | 1220014 | 1 | localhost | 57637 cluster_management_test_colocated | 1220016 | 1 | localhost | 57637 cluster_management_test_colocated | 1220018 | 1 | localhost | 57637 cluster_management_test_colocated | 1220020 | 1 | localhost | 57637 cluster_management_test_colocated | 1220022 | 1 | localhost | 57637 cluster_management_test_colocated | 1220024 | 1 | localhost | 57637 cluster_management_test_colocated | 1220026 | 1 | localhost | 57637 cluster_management_test_colocated | 1220028 | 1 | localhost | 57637 cluster_management_test_colocated | 1220030 | 1 | localhost | 57637 cluster_management_test | 1220001 | 4 | localhost | 57638 cluster_management_test | 1220003 | 4 | localhost | 57638 cluster_management_test | 1220005 | 4 | localhost | 57638 cluster_management_test | 1220007 | 4 | localhost | 57638 cluster_management_test | 1220009 | 4 | localhost | 57638 cluster_management_test | 1220011 | 4 | localhost | 57638 cluster_management_test | 1220013 | 4 | localhost | 57638 cluster_management_test | 1220015 | 4 | localhost | 57638 (24 rows) SELECT * INTO removed_placements FROM pg_dist_placement WHERE shardstate = 4; -- try to remove a node with only to be deleted placements and see that removal succeeds SELECT master_remove_node('localhost', :worker_2_port); master_remove_node --------------------------------------------------------------------- (1 row) SELECT master_get_active_worker_nodes(); master_get_active_worker_nodes --------------------------------------------------------------------- (localhost,57637) (1 row) SELECT master_add_node('localhost', :worker_2_port, groupId := :worker_2_group); WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created master_add_node --------------------------------------------------------------------- 7 (1 row) -- put removed placements back for testing purposes(in practice we wouldn't have only old placements for a shard) INSERT INTO pg_dist_placement SELECT * FROM removed_placements; -- clean-up SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? --------------------------------------------------------------------- 1 (1 row) UPDATE pg_dist_placement SET shardstate=1 WHERE groupid=:worker_2_group; SET client_min_messages TO ERROR; DROP TABLE cluster_management_test_colocated; RESET client_min_messages; -- when there is no primary we should get a pretty error UPDATE pg_dist_node SET noderole = 'secondary' WHERE nodeport=:worker_2_port; SELECT * FROM cluster_management_test; ERROR: node group 3 does not have a primary node -- when there is no node at all in the group we should get a different error DELETE FROM pg_dist_node WHERE nodeport=:worker_2_port; SELECT * FROM cluster_management_test; ERROR: there is a shard placement in node group 3 but there are no nodes in that group -- clean-up SELECT master_add_node('localhost', :worker_2_port) AS new_node \gset WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created SELECT groupid AS new_group FROM pg_dist_node WHERE nodeid = :new_node \gset UPDATE pg_dist_placement SET groupid = :new_group WHERE groupid = :worker_2_group; -- test that you are allowed to remove secondary nodes even if there are placements SELECT 1 FROM master_add_node('localhost', 9990, groupid => :new_group, noderole => 'secondary'); ?column? --------------------------------------------------------------------- 1 (1 row) SELECT master_remove_node('localhost', :worker_2_port); ERROR: cannot remove the primary node of a node group which has shard placements HINT: To proceed, either drop the distributed tables or use undistribute_table() function to convert them to local tables SELECT master_remove_node('localhost', 9990); master_remove_node --------------------------------------------------------------------- (1 row) -- clean-up DROP TABLE cluster_management_test; -- check that adding/removing nodes are propagated to nodes with metadata SELECT master_remove_node('localhost', :worker_2_port); master_remove_node --------------------------------------------------------------------- (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node --------------------------------------------------------------------- (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created ?column? --------------------------------------------------------------------- 1 (1 row) \c - - - :worker_1_port SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; nodename | nodeport --------------------------------------------------------------------- localhost | 57638 (1 row) \c - - - :master_port SELECT master_remove_node('localhost', :worker_2_port); master_remove_node --------------------------------------------------------------------- (1 row) \c - - - :worker_1_port SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; nodename | nodeport --------------------------------------------------------------------- (0 rows) \c - - - :master_port SET citus.enable_object_propagation TO off; -- prevent object propagation on add node during setup -- check that added nodes are not propagated to nodes without metadata SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); NOTICE: dropping metadata on the node (localhost,57637) stop_metadata_sync_to_node --------------------------------------------------------------------- (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created ?column? --------------------------------------------------------------------- 1 (1 row) \c - - - :worker_1_port SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; nodename | nodeport --------------------------------------------------------------------- (0 rows) \c - - - :master_port SET citus.enable_object_propagation TO off; -- prevent object propagation on add node during setup -- check that removing two nodes in the same transaction works SELECT master_remove_node('localhost', :worker_1_port), master_remove_node('localhost', :worker_2_port); master_remove_node | master_remove_node --------------------------------------------------------------------- | (1 row) SELECT count(1) FROM pg_dist_node; count --------------------------------------------------------------------- 0 (1 row) -- check that adding two nodes in the same transaction works SELECT master_add_node('localhost', :worker_1_port), master_add_node('localhost', :worker_2_port); WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created master_add_node | master_add_node --------------------------------------------------------------------- 12 | 13 (1 row) SELECT * FROM pg_dist_node ORDER BY nodeid; nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards --------------------------------------------------------------------- 12 | 9 | localhost | 57637 | default | f | t | primary | default | f | t 13 | 10 | localhost | 57638 | default | f | t | primary | default | f | t (2 rows) -- check that mixed add/remove node commands work fine inside transaction BEGIN; SELECT master_remove_node('localhost', :worker_2_port); master_remove_node --------------------------------------------------------------------- (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created ?column? --------------------------------------------------------------------- 1 (1 row) SELECT master_remove_node('localhost', :worker_2_port); master_remove_node --------------------------------------------------------------------- (1 row) COMMIT; SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; nodename | nodeport --------------------------------------------------------------------- (0 rows) SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node --------------------------------------------------------------------- (1 row) BEGIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created ?column? --------------------------------------------------------------------- 1 (1 row) SELECT master_remove_node('localhost', :worker_2_port); master_remove_node --------------------------------------------------------------------- (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created ?column? --------------------------------------------------------------------- 1 (1 row) COMMIT; SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; nodename | nodeport --------------------------------------------------------------------- localhost | 57638 (1 row) \c - - - :worker_1_port SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; nodename | nodeport --------------------------------------------------------------------- localhost | 57638 (1 row) \c - - - :master_port SET citus.enable_object_propagation TO off; -- prevent object propagation on add node during setup SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node; master_remove_node --------------------------------------------------------------------- (2 rows) SELECT 1 FROM master_add_node('localhost', :worker_1_port); WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created ?column? --------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created ?column? --------------------------------------------------------------------- 1 (1 row) -- check that a distributed table can be created after adding a node in a transaction SET citus.shard_count TO 4; SELECT master_remove_node('localhost', :worker_2_port); master_remove_node --------------------------------------------------------------------- (1 row) BEGIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created ?column? --------------------------------------------------------------------- 1 (1 row) CREATE TABLE temp(col1 text, col2 int); SELECT create_distributed_table('temp', 'col1'); create_distributed_table --------------------------------------------------------------------- (1 row) INSERT INTO temp VALUES ('row1', 1); INSERT INTO temp VALUES ('row2', 2); COMMIT; SELECT col1, col2 FROM temp ORDER BY col1; col1 | col2 --------------------------------------------------------------------- row1 | 1 row2 | 2 (2 rows) SELECT count(*) FROM pg_dist_shard_placement, pg_dist_shard WHERE pg_dist_shard_placement.shardid = pg_dist_shard.shardid AND pg_dist_shard.logicalrelid = 'temp'::regclass AND pg_dist_shard_placement.nodeport = :worker_2_port; count --------------------------------------------------------------------- 4 (1 row) DROP TABLE temp; \c - - - :worker_1_port DELETE FROM pg_dist_partition; DELETE FROM pg_dist_shard; DELETE FROM pg_dist_placement; DELETE FROM pg_dist_node; \c - - - :master_port SET citus.enable_object_propagation TO off; -- prevent object propagation on add node during setup SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); NOTICE: dropping metadata on the node (localhost,57637) stop_metadata_sync_to_node --------------------------------------------------------------------- (1 row) SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); NOTICE: dropping metadata on the node (localhost,57638) stop_metadata_sync_to_node --------------------------------------------------------------------- (1 row) -- check that you can't add a primary to a non-default cluster SELECT master_add_node('localhost', 9999, nodecluster => 'olap'); ERROR: primaries must be added to the default cluster -- check that you can't add more than one primary to a group SELECT groupid AS worker_1_group FROM pg_dist_node WHERE nodeport = :worker_1_port \gset SELECT master_add_node('localhost', 9999, groupid => :worker_1_group, noderole => 'primary'); ERROR: group 14 already has a primary node -- check that you can add secondaries and unavailable nodes to a group SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset SELECT 1 FROM master_add_node('localhost', 9998, groupid => :worker_1_group, noderole => 'secondary'); ?column? --------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', 9997, groupid => :worker_1_group, noderole => 'unavailable'); ?column? --------------------------------------------------------------------- 1 (1 row) -- add_inactive_node also works with secondaries SELECT 1 FROM master_add_inactive_node('localhost', 9996, groupid => :worker_2_group, noderole => 'secondary'); ?column? --------------------------------------------------------------------- 1 (1 row) -- check that you can add a seconary to a non-default cluster, and activate it, and remove it SELECT master_add_inactive_node('localhost', 9999, groupid => :worker_2_group, nodecluster => 'olap', noderole => 'secondary'); master_add_inactive_node --------------------------------------------------------------------- 23 (1 row) SELECT master_activate_node('localhost', 9999); master_activate_node --------------------------------------------------------------------- 23 (1 row) SELECT master_disable_node('localhost', 9999); master_disable_node --------------------------------------------------------------------- (1 row) SELECT master_remove_node('localhost', 9999); master_remove_node --------------------------------------------------------------------- (1 row) -- check that you can't manually add two primaries to a group INSERT INTO pg_dist_node (nodename, nodeport, groupid, noderole) VALUES ('localhost', 5000, :worker_1_group, 'primary'); ERROR: there cannot be two primary nodes in a group CONTEXT: PL/pgSQL function citus_internal.pg_dist_node_trigger_func() line XX at RAISE UPDATE pg_dist_node SET noderole = 'primary' WHERE groupid = :worker_1_group AND nodeport = 9998; ERROR: there cannot be two primary nodes in a group CONTEXT: PL/pgSQL function citus_internal.pg_dist_node_trigger_func() line XX at RAISE -- check that you can't manually add a primary to a non-default cluster INSERT INTO pg_dist_node (nodename, nodeport, groupid, noderole, nodecluster) VALUES ('localhost', 5000, 1000, 'primary', 'olap'); ERROR: new row for relation "pg_dist_node" violates check constraint "primaries_are_only_allowed_in_the_default_cluster" DETAIL: Failing row contains (25, 1000, localhost, 5000, default, f, t, primary, olap, f, t). UPDATE pg_dist_node SET nodecluster = 'olap' WHERE nodeport = :worker_1_port; ERROR: new row for relation "pg_dist_node" violates check constraint "primaries_are_only_allowed_in_the_default_cluster" DETAIL: Failing row contains (17, 14, localhost, 57637, default, f, t, primary, olap, f, t). -- check that you /can/ add a secondary node to a non-default cluster SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset SELECT master_add_node('localhost', 8888, groupid => :worker_1_group, noderole => 'secondary', nodecluster=> 'olap'); master_add_node --------------------------------------------------------------------- 26 (1 row) -- check that super-long cluster names are truncated SELECT master_add_node('localhost', 8887, groupid => :worker_1_group, noderole => 'secondary', nodecluster=> 'thisisasixtyfourcharacterstringrepeatedfourtimestomake256chars.' 'thisisasixtyfourcharacterstringrepeatedfourtimestomake256chars.' 'thisisasixtyfourcharacterstringrepeatedfourtimestomake256chars.' 'thisisasixtyfourcharacterstringrepeatedfourtimestomake256chars.' 'overflow' ); master_add_node --------------------------------------------------------------------- 27 (1 row) SELECT * FROM pg_dist_node WHERE nodeport=8887; nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards --------------------------------------------------------------------- 27 | 14 | localhost | 8887 | default | f | t | secondary | thisisasixtyfourcharacterstringrepeatedfourtimestomake256chars. | f | t (1 row) -- don't remove the secondary and unavailable nodes, check that no commands are sent to -- them in any of the remaining tests -- master_add_secondary_node lets you skip looking up the groupid SELECT master_add_secondary_node('localhost', 9995, 'localhost', :worker_1_port); master_add_secondary_node --------------------------------------------------------------------- 28 (1 row) SELECT master_add_secondary_node('localhost', 9994, primaryname => 'localhost', primaryport => :worker_2_port); master_add_secondary_node --------------------------------------------------------------------- 29 (1 row) SELECT master_add_secondary_node('localhost', 9993, 'localhost', 2000); ERROR: node at "localhost:xxxxx" does not exist SELECT master_add_secondary_node('localhost', 9992, 'localhost', :worker_1_port, nodecluster => 'second-cluster'); master_add_secondary_node --------------------------------------------------------------------- 30 (1 row) SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset -- master_update_node checks node exists SELECT master_update_node(100, 'localhost', 8000); ERROR: node 100 not found -- master_update_node disallows aliasing existing node SELECT master_update_node(:worker_1_node, 'localhost', :worker_2_port); ERROR: there is already another node with the specified hostname and port -- master_update_node moves a node SELECT master_update_node(:worker_1_node, 'somehost', 9000); master_update_node --------------------------------------------------------------------- (1 row) SELECT * FROM pg_dist_node WHERE nodeid = :worker_1_node; nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards --------------------------------------------------------------------- 17 | 14 | somehost | 9000 | default | f | t | primary | default | f | t (1 row) -- cleanup SELECT master_update_node(:worker_1_node, 'localhost', :worker_1_port); master_update_node --------------------------------------------------------------------- (1 row) SELECT * FROM pg_dist_node WHERE nodeid = :worker_1_node; nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards --------------------------------------------------------------------- 17 | 14 | localhost | 57637 | default | f | t | primary | default | f | t (1 row) SET citus.shard_replication_factor TO 1; CREATE TABLE test_dist (x int, y int); SELECT create_distributed_table('test_dist', 'x'); create_distributed_table --------------------------------------------------------------------- (1 row) -- testing behaviour when setting shouldhaveshards to false on partially empty node SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', false); master_set_node_property --------------------------------------------------------------------- (1 row) CREATE TABLE test_dist_colocated (x int, y int); CREATE TABLE test_dist_non_colocated (x int, y int); CREATE TABLE test_dist_colocated_with_non_colocated (x int, y int); CREATE TABLE test_ref (a int, b int); SELECT create_distributed_table('test_dist_colocated', 'x'); create_distributed_table --------------------------------------------------------------------- (1 row) SELECT create_distributed_table('test_dist_non_colocated', 'x', colocate_with => 'none'); create_distributed_table --------------------------------------------------------------------- (1 row) SELECT create_distributed_table('test_dist_colocated_with_non_colocated', 'x', colocate_with => 'test_dist_non_colocated'); create_distributed_table --------------------------------------------------------------------- (1 row) SELECT create_reference_table('test_ref'); create_reference_table --------------------------------------------------------------------- (1 row) -- colocated tables should still be placed on shouldhaveshards false nodes for safety SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist_colocated'::regclass GROUP BY nodeport ORDER BY nodeport; nodeport | count --------------------------------------------------------------------- 57637 | 2 57638 | 2 (2 rows) -- non colocated tables should not be placed on shouldhaveshards false nodes anymore SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist_non_colocated'::regclass GROUP BY nodeport ORDER BY nodeport; nodeport | count --------------------------------------------------------------------- 57637 | 4 (1 row) -- this table should be colocated with the test_dist_non_colocated table -- correctly only on nodes with shouldhaveshards true SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist_colocated_with_non_colocated'::regclass GROUP BY nodeport ORDER BY nodeport; nodeport | count --------------------------------------------------------------------- 57637 | 4 (1 row) -- reference tables should be placed on with shouldhaveshards false SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_ref'::regclass GROUP BY nodeport ORDER BY nodeport; nodeport | count --------------------------------------------------------------------- 57637 | 1 57638 | 1 (2 rows) -- cleanup for next test DROP TABLE test_dist, test_ref, test_dist_colocated, test_dist_non_colocated, test_dist_colocated_with_non_colocated; -- testing behaviour when setting shouldhaveshards to false on fully empty node SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', false); master_set_node_property --------------------------------------------------------------------- (1 row) CREATE TABLE test_dist (x int, y int); CREATE TABLE test_dist_colocated (x int, y int); CREATE TABLE test_dist_non_colocated (x int, y int); CREATE TABLE test_ref (a int, b int); SELECT create_distributed_table('test_dist', 'x'); create_distributed_table --------------------------------------------------------------------- (1 row) SELECT create_reference_table('test_ref'); create_reference_table --------------------------------------------------------------------- (1 row) -- distributed tables should not be placed on nodes with shouldhaveshards false SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist'::regclass GROUP BY nodeport ORDER BY nodeport; nodeport | count --------------------------------------------------------------------- 57637 | 4 (1 row) -- reference tables should be placed on nodes with shouldhaveshards false SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_ref'::regclass GROUP BY nodeport ORDER BY nodeport; nodeport | count --------------------------------------------------------------------- 57637 | 1 57638 | 1 (2 rows) SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', true); master_set_node_property --------------------------------------------------------------------- (1 row) -- distributed tables should still not be placed on nodes that were switched to -- shouldhaveshards true SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist'::regclass GROUP BY nodeport ORDER BY nodeport; nodeport | count --------------------------------------------------------------------- 57637 | 4 (1 row) -- reference tables should still be placed on all nodes with isdatanode 'true' SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_ref'::regclass GROUP BY nodeport ORDER BY nodeport; nodeport | count --------------------------------------------------------------------- 57637 | 1 57638 | 1 (2 rows) SELECT create_distributed_table('test_dist_colocated', 'x'); create_distributed_table --------------------------------------------------------------------- (1 row) SELECT create_distributed_table('test_dist_non_colocated', 'x', colocate_with => 'none'); create_distributed_table --------------------------------------------------------------------- (1 row) -- colocated tables should not be placed on nodedes that were switched to -- shouldhaveshards true SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist_colocated'::regclass GROUP BY nodeport ORDER BY nodeport; nodeport | count --------------------------------------------------------------------- 57637 | 4 (1 row) -- non colocated tables should be placed on nodedes that were switched to -- shouldhaveshards true SELECT nodeport, count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'test_dist_non_colocated'::regclass GROUP BY nodeport ORDER BY nodeport; nodeport | count --------------------------------------------------------------------- 57637 | 2 57638 | 2 (2 rows) SELECT * from master_set_node_property('localhost', :worker_2_port, 'bogusproperty', false); ERROR: only the 'shouldhaveshards' property can be set using this function DROP TABLE test_dist, test_ref, test_dist_colocated, test_dist_non_colocated;