mirror of https://github.com/citusdata/citus.git
Update activate node
parent
c60f77bb99
commit
299043dfaa
|
@ -389,6 +389,14 @@ ShouldSyncTableMetadata(Oid relationId)
|
||||||
bool citusTableWithNoDistKey =
|
bool citusTableWithNoDistKey =
|
||||||
IsCitusTableTypeCacheEntry(tableEntry, CITUS_TABLE_WITH_NO_DIST_KEY);
|
IsCitusTableTypeCacheEntry(tableEntry, CITUS_TABLE_WITH_NO_DIST_KEY);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We shouldn't sync metadata for distributed tables created with master_create_distributed_table
|
||||||
|
*/
|
||||||
|
if (hashDistributed && tableEntry->colocationId == INVALID_COLOCATION_ID)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
return ShouldSyncTableMetadataInternal(hashDistributed, citusTableWithNoDistKey);
|
return ShouldSyncTableMetadataInternal(hashDistributed, citusTableWithNoDistKey);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -510,10 +510,6 @@ citus_disable_node(PG_FUNCTION_ARGS)
|
||||||
workerNode->workerName,
|
workerNode->workerName,
|
||||||
nodePort)));
|
nodePort)));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool forceRemoteDelete = false;
|
|
||||||
DeleteAllReplicatedTablePlacementsFromNodeGroup(workerNode->groupId,
|
|
||||||
forceRemoteDelete);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TransactionModifiedNodeMetadata = true;
|
TransactionModifiedNodeMetadata = true;
|
||||||
|
@ -1190,7 +1186,7 @@ ActivateNode(char *nodeName, int nodePort)
|
||||||
*/
|
*/
|
||||||
if (!NodeIsCoordinator(workerNode) && NodeIsPrimary(workerNode) && !workerNode->isActive)
|
if (!NodeIsCoordinator(workerNode) && NodeIsPrimary(workerNode) && !workerNode->isActive)
|
||||||
{
|
{
|
||||||
; bool forceRemoteDelete = true;
|
bool forceRemoteDelete = true;
|
||||||
DeleteAllReplicatedTablePlacementsFromNodeGroup(workerNode->groupId,
|
DeleteAllReplicatedTablePlacementsFromNodeGroup(workerNode->groupId,
|
||||||
forceRemoteDelete);
|
forceRemoteDelete);
|
||||||
}
|
}
|
||||||
|
@ -1212,6 +1208,7 @@ ActivateNode(char *nodeName, int nodePort)
|
||||||
}
|
}
|
||||||
|
|
||||||
SetUpDistributedTableWithDependencies(workerNode);
|
SetUpDistributedTableWithDependencies(workerNode);
|
||||||
|
SetUpMultipleDistributedTableIntegrations(workerNode);
|
||||||
|
|
||||||
if (syncMetadata)
|
if (syncMetadata)
|
||||||
{
|
{
|
||||||
|
@ -1220,7 +1217,6 @@ ActivateNode(char *nodeName, int nodePort)
|
||||||
if (!NodeIsCoordinator(workerNode) && NodeIsPrimary(workerNode))
|
if (!NodeIsCoordinator(workerNode) && NodeIsPrimary(workerNode))
|
||||||
{
|
{
|
||||||
ClearDistributedObjectsFromNode(workerNode);
|
ClearDistributedObjectsFromNode(workerNode);
|
||||||
SetUpMultipleDistributedTableIntegrations(workerNode);
|
|
||||||
SetUpObjectMetadata(workerNode);
|
SetUpObjectMetadata(workerNode);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -270,8 +270,7 @@ worker_drop_distributed_table_metadata_only(PG_FUNCTION_ARGS)
|
||||||
List *shardPlacementList = ShardPlacementListIncludingOrphanedPlacements(shardId);
|
List *shardPlacementList = ShardPlacementListIncludingOrphanedPlacements(shardId);
|
||||||
if (shardPlacementList == NULL)
|
if (shardPlacementList == NULL)
|
||||||
{
|
{
|
||||||
ereport(NOTICE, (errmsg("placement for relation with oid %d does not exist, skipping", relationId)));
|
ereport(WARNING, (errmsg("placement for relation with oid %d does not exist, skipping", relationId)));
|
||||||
PG_RETURN_VOID();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ShardPlacement *placement = NULL;
|
ShardPlacement *placement = NULL;
|
||||||
|
|
|
@ -71,6 +71,8 @@ extern void GetDependentSequencesWithRelation(Oid relationId, List **attnumList,
|
||||||
extern Oid GetAttributeTypeOid(Oid relationId, AttrNumber attnum);
|
extern Oid GetAttributeTypeOid(Oid relationId, AttrNumber attnum);
|
||||||
|
|
||||||
#define DELETE_ALL_NODES "TRUNCATE pg_dist_node CASCADE"
|
#define DELETE_ALL_NODES "TRUNCATE pg_dist_node CASCADE"
|
||||||
|
#define DELETE_ALL_PLACEMENTS "TRUNCATE pg_dist_placement CASCADE"
|
||||||
|
#define DELETE_ALL_SHARDS "TRUNCATE pg_dist_shard CASCADE"
|
||||||
#define DELETE_ALL_DISTRIBUTED_OBJECTS "TRUNCATE citus.pg_dist_object"
|
#define DELETE_ALL_DISTRIBUTED_OBJECTS "TRUNCATE citus.pg_dist_object"
|
||||||
#define REMOVE_ALL_CLUSTERED_TABLES_ONLY_COMMAND \
|
#define REMOVE_ALL_CLUSTERED_TABLES_ONLY_COMMAND \
|
||||||
"SELECT worker_drop_distributed_table_only(logicalrelid::regclass::text) FROM pg_dist_partition"
|
"SELECT worker_drop_distributed_table_only(logicalrelid::regclass::text) FROM pg_dist_partition"
|
||||||
|
|
|
@ -500,14 +500,7 @@ SELECT shardid, nodename, nodeport
|
||||||
WHERE logicalrelid = 'numbers_append'::regclass order by placementid;
|
WHERE logicalrelid = 'numbers_append'::regclass order by placementid;
|
||||||
|
|
||||||
-- add the node back
|
-- add the node back
|
||||||
table pg_dist_node;
|
DROP FOREIGN TABLE foreign_table_to_distribute;
|
||||||
\c - - - :worker_1_port
|
|
||||||
table pg_dist_node;
|
|
||||||
table pg_dist_placement;
|
|
||||||
table pg_dist_shard;
|
|
||||||
table pg_dist_shard_placement;
|
|
||||||
\c - - - :master_port
|
|
||||||
set citus.log_remote_commands to true;
|
|
||||||
SELECT 1 FROM master_activate_node('localhost', :worker_1_port);
|
SELECT 1 FROM master_activate_node('localhost', :worker_1_port);
|
||||||
reset citus.log_remote_commands;
|
reset citus.log_remote_commands;
|
||||||
RESET client_min_messages;
|
RESET client_min_messages;
|
||||||
|
|
|
@ -36,7 +36,6 @@ test: alter_database_owner
|
||||||
test: multi_test_catalog_views
|
test: multi_test_catalog_views
|
||||||
test: multi_table_ddl
|
test: multi_table_ddl
|
||||||
test: multi_sequence_default
|
test: multi_sequence_default
|
||||||
test: turn_mx_off
|
|
||||||
test: multi_name_lengths
|
test: multi_name_lengths
|
||||||
test: multi_name_resolution
|
test: multi_name_resolution
|
||||||
test: multi_metadata_access
|
test: multi_metadata_access
|
||||||
|
|
|
@ -76,10 +76,8 @@ CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name tex
|
||||||
AS 'citus', $$master_create_worker_shards$$
|
AS 'citus', $$master_create_worker_shards$$
|
||||||
LANGUAGE C STRICT;
|
LANGUAGE C STRICT;
|
||||||
-- re-add the nodes to the cluster
|
-- re-add the nodes to the cluster
|
||||||
set citus.log_remote_commands to true;
|
|
||||||
SELECT 1 FROM master_add_node('localhost', :worker_1_port);
|
SELECT 1 FROM master_add_node('localhost', :worker_1_port);
|
||||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||||
reset citus.log_remote_commands;
|
|
||||||
|
|
||||||
-- verify that a table can be created after the extension has been dropped and recreated
|
-- verify that a table can be created after the extension has been dropped and recreated
|
||||||
CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL);
|
CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL);
|
||||||
|
|
|
@ -6,6 +6,12 @@
|
||||||
-- metadata changes to MX tables.
|
-- metadata changes to MX tables.
|
||||||
|
|
||||||
-- Turn metadata sync off at first
|
-- Turn metadata sync off at first
|
||||||
|
\c - - - :worker_1_port
|
||||||
|
table pg_dist_partition;
|
||||||
|
table pg_dist_node;
|
||||||
|
|
||||||
|
\c - - - :master_port
|
||||||
|
|
||||||
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
|
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
|
||||||
SELECT stop_metadata_sync_to_node('localhost', :worker_2_port);
|
SELECT stop_metadata_sync_to_node('localhost', :worker_2_port);
|
||||||
|
|
||||||
|
@ -47,18 +53,14 @@ COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regcla
|
||||||
distribution_method citus.distribution_type)
|
distribution_method citus.distribution_type)
|
||||||
IS 'define the table distribution functions';
|
IS 'define the table distribution functions';
|
||||||
|
|
||||||
-- this function is dropped in Citus10, added here for tests
|
|
||||||
CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name text, shard_count integer,
|
|
||||||
replication_factor integer DEFAULT 2)
|
|
||||||
RETURNS void
|
|
||||||
AS 'citus', $$master_create_worker_shards$$
|
|
||||||
LANGUAGE C STRICT;
|
|
||||||
|
|
||||||
-- Create a test table with constraints and SERIAL and default from user defined sequence
|
-- Create a test table with constraints and SERIAL and default from user defined sequence
|
||||||
CREATE SEQUENCE user_defined_seq;
|
CREATE SEQUENCE user_defined_seq;
|
||||||
CREATE TABLE mx_test_table (col_1 int UNIQUE, col_2 text NOT NULL, col_3 BIGSERIAL, col_4 BIGINT DEFAULT nextval('user_defined_seq'));
|
CREATE TABLE mx_test_table (col_1 int UNIQUE, col_2 text NOT NULL, col_3 BIGSERIAL, col_4 BIGINT DEFAULT nextval('user_defined_seq'));
|
||||||
SELECT master_create_distributed_table('mx_test_table', 'col_1', 'hash');
|
set citus.shard_count to 8;
|
||||||
SELECT master_create_worker_shards('mx_test_table', 8, 1);
|
set citus.shard_replication_factor to 1;
|
||||||
|
SELECT create_distributed_table('mx_test_table', 'col_1');
|
||||||
|
reset citus.shard_count;
|
||||||
|
reset citus.shard_replication_factor;
|
||||||
|
|
||||||
-- Set the replication model of the test table to streaming replication so that it is
|
-- Set the replication model of the test table to streaming replication so that it is
|
||||||
-- considered as an MX table
|
-- considered as an MX table
|
||||||
|
@ -87,7 +89,7 @@ UPDATE pg_dist_partition SET partmethod='r' WHERE logicalrelid='non_mx_test_tabl
|
||||||
SELECT unnest(master_metadata_snapshot()) order by 1;
|
SELECT unnest(master_metadata_snapshot()) order by 1;
|
||||||
|
|
||||||
|
|
||||||
-- Test start_metadata_sync_to_node UDF
|
-- Test start_metadata_sync_to_node and citus_activate_node UDFs
|
||||||
|
|
||||||
-- Ensure that hasmetadata=false for all nodes
|
-- Ensure that hasmetadata=false for all nodes
|
||||||
SELECT count(*) FROM pg_dist_node WHERE hasmetadata=true;
|
SELECT count(*) FROM pg_dist_node WHERE hasmetadata=true;
|
||||||
|
@ -103,12 +105,27 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport = 8888;
|
||||||
-- Add a node to another cluster to make sure it's also synced
|
-- Add a node to another cluster to make sure it's also synced
|
||||||
SELECT master_add_secondary_node('localhost', 8889, 'localhost', :worker_1_port, nodecluster => 'second-cluster');
|
SELECT master_add_secondary_node('localhost', 8889, 'localhost', :worker_1_port, nodecluster => 'second-cluster');
|
||||||
|
|
||||||
-- Run start_metadata_sync_to_node and check that it marked hasmetadata for that worker
|
\c - - - :worker_1_port
|
||||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
table pg_dist_partition;
|
||||||
|
table pg_dist_node;
|
||||||
|
table pg_dist_shard;
|
||||||
|
table pg_dist_shard_placement;
|
||||||
|
|
||||||
|
\c - - - :master_port
|
||||||
|
-- Run start_metadata_sync_to_node and citus_activate_node and check that it marked hasmetadata for that worker
|
||||||
|
table pg_dist_partition;
|
||||||
|
\d
|
||||||
|
set citus.log_remote_commands to true;
|
||||||
|
set citus.worker_min_messages to debug5;
|
||||||
|
SELECT citus_activate_node('localhost', :worker_1_port);
|
||||||
|
reset citus.log_remote_commands;
|
||||||
|
reset citus.worker_min_messages;
|
||||||
|
|
||||||
SELECT nodeid, hasmetadata FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_1_port;
|
SELECT nodeid, hasmetadata FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_1_port;
|
||||||
|
|
||||||
-- Check that the metadata has been copied to the worker
|
-- Check that the metadata has been copied to the worker
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
|
table pg_dist_partition;
|
||||||
SELECT * FROM pg_dist_local_group;
|
SELECT * FROM pg_dist_local_group;
|
||||||
SELECT * FROM pg_dist_node ORDER BY nodeid;
|
SELECT * FROM pg_dist_node ORDER BY nodeid;
|
||||||
SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid;
|
SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid;
|
||||||
|
@ -126,7 +143,7 @@ SELECT * FROM pg_dist_colocation ORDER BY colocationid;
|
||||||
-- Make sure that truncate trigger has been set for the MX table on worker
|
-- Make sure that truncate trigger has been set for the MX table on worker
|
||||||
SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table'::regclass;
|
SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table'::regclass;
|
||||||
|
|
||||||
-- Make sure that start_metadata_sync_to_node considers foreign key constraints
|
-- Make sure that citus_activate_node considers foreign key constraints
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
|
||||||
-- Since we're superuser, we can set the replication model to 'streaming' to
|
-- Since we're superuser, we can set the replication model to 'streaming' to
|
||||||
|
@ -142,7 +159,7 @@ CREATE TABLE mx_testing_schema_2.fk_test_2 (col1 int, col2 int, col3 text,
|
||||||
SELECT create_distributed_table('mx_testing_schema.fk_test_1', 'col1');
|
SELECT create_distributed_table('mx_testing_schema.fk_test_1', 'col1');
|
||||||
SELECT create_distributed_table('mx_testing_schema_2.fk_test_2', 'col1');
|
SELECT create_distributed_table('mx_testing_schema_2.fk_test_2', 'col1');
|
||||||
|
|
||||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
SELECT citus_activate_node('localhost', :worker_1_port);
|
||||||
|
|
||||||
-- Check that foreign key metadata exists on the worker
|
-- Check that foreign key metadata exists on the worker
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
|
@ -154,10 +171,10 @@ DROP TABLE mx_testing_schema.fk_test_1;
|
||||||
|
|
||||||
RESET citus.shard_replication_factor;
|
RESET citus.shard_replication_factor;
|
||||||
|
|
||||||
-- Check that repeated calls to start_metadata_sync_to_node has no side effects
|
-- Check that repeated calls to citus_activate_node has no side effects
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
SELECT citus_activate_node('localhost', :worker_1_port);
|
||||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
SELECT citus_activate_node('localhost', :worker_1_port);
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
SELECT * FROM pg_dist_local_group;
|
SELECT * FROM pg_dist_local_group;
|
||||||
SELECT * FROM pg_dist_node ORDER BY nodeid;
|
SELECT * FROM pg_dist_node ORDER BY nodeid;
|
||||||
|
@ -171,10 +188,11 @@ SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
|
||||||
relid = 'mx_testing_schema.mx_index'::regclass;
|
relid = 'mx_testing_schema.mx_index'::regclass;
|
||||||
SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table'::regclass;
|
SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table'::regclass;
|
||||||
|
|
||||||
-- Make sure that start_metadata_sync_to_node can be called inside a transaction and rollbacked
|
-- Make sure that citus_activate_node can be called inside a transaction and rollbacked
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
BEGIN;
|
BEGIN;
|
||||||
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
|
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_port);
|
||||||
ROLLBACK;
|
ROLLBACK;
|
||||||
|
|
||||||
SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port;
|
SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port;
|
||||||
|
@ -182,7 +200,7 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port;
|
||||||
-- Check that the distributed table can be queried from the worker
|
-- Check that the distributed table can be queried from the worker
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
SET citus.shard_replication_factor TO 1;
|
SET citus.shard_replication_factor TO 1;
|
||||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
SELECT citus_activate_node('localhost', :worker_1_port);
|
||||||
|
|
||||||
CREATE TABLE mx_query_test (a int, b text, c int);
|
CREATE TABLE mx_query_test (a int, b text, c int);
|
||||||
SELECT create_distributed_table('mx_query_test', 'a');
|
SELECT create_distributed_table('mx_query_test', 'a');
|
||||||
|
@ -441,7 +459,7 @@ SELECT stop_metadata_sync_to_node('localhost', :worker_2_port);
|
||||||
-- sync table with serial column after create_distributed_table
|
-- sync table with serial column after create_distributed_table
|
||||||
CREATE TABLE mx_table_with_small_sequence(a int, b SERIAL, c SMALLSERIAL);
|
CREATE TABLE mx_table_with_small_sequence(a int, b SERIAL, c SMALLSERIAL);
|
||||||
SELECT create_distributed_table('mx_table_with_small_sequence', 'a');
|
SELECT create_distributed_table('mx_table_with_small_sequence', 'a');
|
||||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
SELECT citus_activate_node('localhost', :worker_1_port);
|
||||||
DROP TABLE mx_table_with_small_sequence;
|
DROP TABLE mx_table_with_small_sequence;
|
||||||
|
|
||||||
-- Show that create_distributed_table works with a serial column
|
-- Show that create_distributed_table works with a serial column
|
||||||
|
@ -486,7 +504,7 @@ SELECT nextval('mx_table_with_sequence_c_seq');
|
||||||
|
|
||||||
-- Check that adding a new metadata node sets the sequence space correctly
|
-- Check that adding a new metadata node sets the sequence space correctly
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
|
SELECT citus_activate_node('localhost', :worker_2_port);
|
||||||
|
|
||||||
\c - - - :worker_2_port
|
\c - - - :worker_2_port
|
||||||
SELECT groupid FROM pg_dist_local_group;
|
SELECT groupid FROM pg_dist_local_group;
|
||||||
|
@ -531,7 +549,7 @@ DROP TABLE mx_table_with_small_sequence, mx_table_with_sequence;
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
|
||||||
-- Remove a node so that shards and sequences won't be created on table creation. Therefore,
|
-- Remove a node so that shards and sequences won't be created on table creation. Therefore,
|
||||||
-- we can test that start_metadata_sync_to_node can actually create the sequence with proper
|
-- we can test that citus_activate_node can actually create the sequence with proper
|
||||||
-- owner
|
-- owner
|
||||||
CREATE TABLE pg_dist_placement_temp AS SELECT * FROM pg_dist_placement;
|
CREATE TABLE pg_dist_placement_temp AS SELECT * FROM pg_dist_placement;
|
||||||
CREATE TABLE pg_dist_partition_temp AS SELECT * FROM pg_dist_partition;
|
CREATE TABLE pg_dist_partition_temp AS SELECT * FROM pg_dist_partition;
|
||||||
|
@ -557,7 +575,7 @@ SELECT create_distributed_table('mx_table', 'a');
|
||||||
|
|
||||||
\c - postgres - :master_port
|
\c - postgres - :master_port
|
||||||
SELECT master_add_node('localhost', :worker_2_port);
|
SELECT master_add_node('localhost', :worker_2_port);
|
||||||
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
|
SELECT citus_activate_node('localhost', :worker_2_port);
|
||||||
|
|
||||||
\c - mx_user - :worker_1_port
|
\c - mx_user - :worker_1_port
|
||||||
SELECT nextval('mx_table_b_seq');
|
SELECT nextval('mx_table_b_seq');
|
||||||
|
@ -771,7 +789,7 @@ SELECT pg_reload_conf();
|
||||||
UPDATE pg_dist_node SET metadatasynced=true WHERE nodeport=:worker_1_port;
|
UPDATE pg_dist_node SET metadatasynced=true WHERE nodeport=:worker_1_port;
|
||||||
|
|
||||||
SELECT master_add_node('localhost', :worker_2_port);
|
SELECT master_add_node('localhost', :worker_2_port);
|
||||||
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
|
SELECT citus_activate_node('localhost', :worker_2_port);
|
||||||
|
|
||||||
CREATE SEQUENCE mx_test_sequence_0;
|
CREATE SEQUENCE mx_test_sequence_0;
|
||||||
CREATE SEQUENCE mx_test_sequence_1;
|
CREATE SEQUENCE mx_test_sequence_1;
|
||||||
|
@ -865,6 +883,8 @@ ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART :last_node_id;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART :last_colocation_id;
|
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART :last_colocation_id;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id;
|
ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id;
|
||||||
|
|
||||||
-- Turn metadata sync back on at the end
|
-- Turn metadata sync back on and ativate them at the end
|
||||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||||
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
|
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
|
||||||
|
SELECT citus_activate_node('localhost', :worker_1_port);
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_port);
|
||||||
|
|
Loading…
Reference in New Issue