mirror of https://github.com/citusdata/citus.git
Reorder metadata sync commands
parent
9c470ed6d4
commit
86bcb7bfdd
|
@ -107,8 +107,8 @@ static void InsertPlaceholderCoordinatorRecord(void);
|
|||
static void InsertNodeRow(int nodeid, char *nodename, int32 nodeport, NodeMetadata
|
||||
*nodeMetadata);
|
||||
static void DeleteNodeRow(char *nodename, int32 nodeport);
|
||||
static List * PgDistMetadataSyncCommandList();
|
||||
static void SyncTableMetadataToNode(WorkerNode *workerNode);
|
||||
static void SyncObjectDependenciesToNode(WorkerNode *workerNode);
|
||||
static void SyncPgDistTableMetadataToNode(WorkerNode *workerNode);
|
||||
static List * InterTableRelationshipCommandList();
|
||||
static WorkerNode * TupleToWorkerNode(TupleDesc tupleDescriptor, HeapTuple heapTuple);
|
||||
static List * PropagateNodeWideObjectsCommandList();
|
||||
|
@ -628,11 +628,11 @@ InterTableRelationshipCommandList()
|
|||
|
||||
|
||||
/*
|
||||
* PgDistMetadataSyncCommandList returns the command list to sync the pg_dist_*
|
||||
* metadata.
|
||||
* PgDistTableMetadataSyncCommandList returns the command list to sync the pg_dist_*
|
||||
* (except pg_dist_node) metadata. We call them as table metadata.
|
||||
*/
|
||||
static List *
|
||||
PgDistMetadataSyncCommandList()
|
||||
List *
|
||||
PgDistTableMetadataSyncCommandList(void)
|
||||
{
|
||||
List *distributedTableList = CitusTableList();
|
||||
List *propagatedTableList = NIL;
|
||||
|
@ -648,7 +648,16 @@ PgDistMetadataSyncCommandList()
|
|||
}
|
||||
}
|
||||
|
||||
/* after all tables are created, create the metadata */
|
||||
/* remove all dist table related metadata first */
|
||||
metadataSnapshotCommandList = lappend(metadataSnapshotCommandList,
|
||||
DELETE_ALL_PARTITIONS);
|
||||
metadataSnapshotCommandList = lappend(metadataSnapshotCommandList, DELETE_ALL_SHARDS);
|
||||
metadataSnapshotCommandList = lappend(metadataSnapshotCommandList,
|
||||
DELETE_ALL_PLACEMENTS);
|
||||
metadataSnapshotCommandList = lappend(metadataSnapshotCommandList,
|
||||
DELETE_ALL_DISTRIBUTED_OBJECTS);
|
||||
|
||||
/* create pg_dist_partition, pg_dist_shard and pg_dist_placement entries */
|
||||
foreach_ptr(cacheEntry, propagatedTableList)
|
||||
{
|
||||
Oid clusteredTableId = cacheEntry->relationId;
|
||||
|
@ -726,24 +735,27 @@ PropagateNodeWideObjectsCommandList()
|
|||
|
||||
|
||||
/*
|
||||
* SyncTableMetadataCommandList returns commands to sync table metadata to the
|
||||
* given worker node. To be idempotent, it first drops the ones required to be
|
||||
* SyncObjectDependenciesCommandList returns commands to sync object dependencies
|
||||
* to the given worker node. To be idempotent, it first drops the ones required to be
|
||||
* dropped.
|
||||
*
|
||||
* Table metadata includes:
|
||||
* Object dependencies include:
|
||||
*
|
||||
* - All dependencies (e.g., types, schemas, sequences)
|
||||
* - All shell distributed tables
|
||||
* - pg_dist_partition, pg_dist_shard, pg_dist_placement, pg_dist_object
|
||||
* - Inter relation between those shell tables
|
||||
* - Node wide objects
|
||||
*
|
||||
* We also update the local group id here, as handling sequence dependencies
|
||||
* requires it.
|
||||
*/
|
||||
List *
|
||||
SyncTableMetadataCommandList(WorkerNode *workerNode)
|
||||
SyncObjectDependenciesCommandList(WorkerNode *workerNode)
|
||||
{
|
||||
List *commandList = NIL;
|
||||
|
||||
/*
|
||||
* Remove shell tables first.
|
||||
* Detach partitions, remove shell tables and delete all objects first.
|
||||
*/
|
||||
commandList = list_concat(commandList, DetachPartitionCommandList());
|
||||
commandList = lappend(commandList, REMOVE_ALL_CLUSTERED_TABLES_ONLY_COMMAND);
|
||||
|
@ -757,8 +769,10 @@ SyncTableMetadataCommandList(WorkerNode *workerNode)
|
|||
* Replicate all objects of the pg_dist_object to the remote node. We need to
|
||||
* update local group id first, as sequence replication logic depends on it.
|
||||
*/
|
||||
commandList = list_concat(commandList, list_make1(LocalGroupIdUpdateCommand(workerNode->groupId)));
|
||||
commandList = list_concat(commandList, ReplicateAllObjectsToNodeCommandList(workerNode->workerName, workerNode->workerPort));
|
||||
commandList = list_concat(commandList, list_make1(LocalGroupIdUpdateCommand(
|
||||
workerNode->groupId)));
|
||||
commandList = list_concat(commandList, ReplicateAllObjectsToNodeCommandList(
|
||||
workerNode->workerName, workerNode->workerPort));
|
||||
|
||||
/*
|
||||
* After creating each table, handle the inter table relationship between
|
||||
|
@ -766,47 +780,36 @@ SyncTableMetadataCommandList(WorkerNode *workerNode)
|
|||
*/
|
||||
commandList = list_concat(commandList, InterTableRelationshipCommandList());
|
||||
|
||||
commandList = lappend(commandList, DELETE_ALL_DISTRIBUTED_OBJECTS);
|
||||
commandList = lappend(commandList, DELETE_ALL_PLACEMENTS);
|
||||
commandList = lappend(commandList, DELETE_ALL_SHARDS);
|
||||
commandList = lappend(commandList, DELETE_ALL_PARTITIONS);
|
||||
|
||||
/*
|
||||
* Finally create pg_dist_* entries
|
||||
*/
|
||||
List *syncPgDistMetadataCommandList = PgDistMetadataSyncCommandList();
|
||||
commandList = list_concat(commandList, syncPgDistMetadataCommandList);
|
||||
|
||||
return commandList;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* SyncTableMetadataToNode sync the table metadata to the node. Metadata includes
|
||||
* SyncObjectDependenciesToNode sync the object dependencies to the node. It includes
|
||||
* - All dependencies (e.g., types, schemas, sequences)
|
||||
* - All shell distributed table
|
||||
* - pg_dist_partition, pg_dist_shard, pg_dist_placement, pg_dist_object
|
||||
* - Inter relation between those shell tables
|
||||
*
|
||||
* Note that we do not create the distributed dependencies on the coordinator
|
||||
* since all the dependencies should be present in the coordinator already.
|
||||
*/
|
||||
static void
|
||||
SyncTableMetadataToNode(WorkerNode *newWorkerNode)
|
||||
SyncObjectDependenciesToNode(WorkerNode *workerNode)
|
||||
{
|
||||
if (NodeIsPrimary(newWorkerNode))
|
||||
if (NodeIsPrimary(workerNode))
|
||||
{
|
||||
EnsureNoModificationsHaveBeenDone();
|
||||
|
||||
Assert(ShouldPropagate());
|
||||
if (!NodeIsCoordinator(newWorkerNode))
|
||||
if (!NodeIsCoordinator(workerNode))
|
||||
{
|
||||
List *commandList = SyncTableMetadataCommandList(newWorkerNode);
|
||||
List *commandList = SyncObjectDependenciesCommandList(workerNode);
|
||||
|
||||
/* send commands to new workers, the current user should be a superuser */
|
||||
Assert(superuser());
|
||||
SendMetadataCommandListToWorkerInCoordinatedTransaction(
|
||||
newWorkerNode->workerName,
|
||||
newWorkerNode->workerPort,
|
||||
workerNode->workerName,
|
||||
workerNode->workerPort,
|
||||
CurrentUserName(),
|
||||
commandList);
|
||||
}
|
||||
|
@ -814,6 +817,24 @@ SyncTableMetadataToNode(WorkerNode *newWorkerNode)
|
|||
}
|
||||
|
||||
|
||||
static void
|
||||
SyncPgDistTableMetadataToNode(WorkerNode *workerNode)
|
||||
{
|
||||
if (NodeIsPrimary(workerNode) && !NodeIsCoordinator(workerNode))
|
||||
{
|
||||
List *syncPgDistMetadataCommandList = PgDistTableMetadataSyncCommandList();
|
||||
|
||||
/* send commands to new workers, the current user should be a superuser */
|
||||
Assert(superuser());
|
||||
SendMetadataCommandListToWorkerInCoordinatedTransaction(
|
||||
workerNode->workerName,
|
||||
workerNode->workerPort,
|
||||
CurrentUserName(),
|
||||
syncPgDistMetadataCommandList);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ModifiableWorkerNode gets the requested WorkerNode and also gets locks
|
||||
* required for modifying it. This fails if the node does not exist.
|
||||
|
@ -1074,10 +1095,11 @@ ActivateNode(char *nodeName, int nodePort)
|
|||
BoolGetDatum(true));
|
||||
|
||||
/*
|
||||
* Sync table metadata first. Please check the comment on SyncTableMetadataToNode
|
||||
* for the definition of table metadata.
|
||||
* Sync object dependencies first. We must sync object dependencies before
|
||||
* replicating reference tables to the remote node, as reference tables may
|
||||
* need such objects.
|
||||
*/
|
||||
SyncTableMetadataToNode(workerNode);
|
||||
SyncObjectDependenciesToNode(workerNode);
|
||||
|
||||
/*
|
||||
* We need to replicate reference tables before syncing node metadata, otherwise
|
||||
|
@ -1091,9 +1113,17 @@ ActivateNode(char *nodeName, int nodePort)
|
|||
}
|
||||
|
||||
/*
|
||||
* Sync node metadata (pg_dist_node) finally.
|
||||
* Sync node metadata. We must sync node metadata before syncing table
|
||||
* related pg_dist_xxx metadata.
|
||||
*/
|
||||
SyncNodeMetadataToNode(nodeName, nodePort);
|
||||
|
||||
/*
|
||||
* As the last step, sync the table related metadata to the remote node.
|
||||
* We must handle it as the last step because of limitations shared with
|
||||
* above comments.
|
||||
*/
|
||||
SyncPgDistTableMetadataToNode(workerNode);
|
||||
}
|
||||
|
||||
/* finally, let all other active metadata nodes to learn about this change */
|
||||
|
|
|
@ -48,18 +48,21 @@ activate_node_snapshot(PG_FUNCTION_ARGS)
|
|||
*/
|
||||
WorkerNode *dummyWorkerNode = GetFirstPrimaryWorkerNode();
|
||||
|
||||
List *syncTableMetadataCommandList = SyncTableMetadataCommandList(dummyWorkerNode);
|
||||
List *syncObjectDepCommands = SyncObjectDependenciesCommandList(dummyWorkerNode);
|
||||
List *dropSnapshotCommands = NodeMetadataDropCommands();
|
||||
List *createSnapshotCommands = NodeMetadataCreateCommands();
|
||||
List *pgDistTableMetadataSyncCommands = PgDistTableMetadataSyncCommandList();
|
||||
|
||||
List *activateNodeCommandList = NIL;
|
||||
int activateNodeCommandIndex = 0;
|
||||
Oid ddlCommandTypeId = TEXTOID;
|
||||
|
||||
activateNodeCommandList = list_concat(activateNodeCommandList,
|
||||
syncTableMetadataCommandList);
|
||||
activateNodeCommandList = list_concat(activateNodeCommandList, syncObjectDepCommands);
|
||||
activateNodeCommandList = list_concat(activateNodeCommandList, dropSnapshotCommands);
|
||||
activateNodeCommandList = list_concat(activateNodeCommandList, createSnapshotCommands);
|
||||
activateNodeCommandList = list_concat(activateNodeCommandList,
|
||||
createSnapshotCommands);
|
||||
activateNodeCommandList = list_concat(activateNodeCommandList,
|
||||
pgDistTableMetadataSyncCommands);
|
||||
|
||||
int activateNodeCommandCount = list_length(activateNodeCommandList);
|
||||
Datum *activateNodeCommandDatumArray = palloc0(activateNodeCommandCount *
|
||||
|
|
|
@ -103,9 +103,8 @@ extern WorkerNode * SetWorkerColumnLocalOnly(WorkerNode *workerNode, int columnI
|
|||
Datum value);
|
||||
extern uint32 CountPrimariesWithMetadata(void);
|
||||
extern WorkerNode * GetFirstPrimaryWorkerNode(void);
|
||||
extern List * RecreateDistributedTablesWithDependenciesCommandList(
|
||||
WorkerNode *workerNode);
|
||||
extern List * SyncTableMetadataCommandList(WorkerNode *workerNode);
|
||||
extern List * SyncObjectDependenciesCommandList(WorkerNode *workerNode);
|
||||
extern List * PgDistTableMetadataSyncCommandList(void);
|
||||
|
||||
/* Function declarations for worker node utilities */
|
||||
extern int CompareWorkerNodes(const void *leftElement, const void *rightElement);
|
||||
|
|
|
@ -670,6 +670,10 @@ NOTICE: issuing ALTER TABLE fix_idx_names.p2 OWNER TO postgres
|
|||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing ALTER TABLE fix_idx_names.p2 OWNER TO postgres
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing SELECT worker_create_truncate_trigger('fix_idx_names.p2')
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing SELECT worker_create_truncate_trigger('fix_idx_names.p2')
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid) AS (VALUES ('table', ARRAY['fix_idx_names', 'p2']::text[], ARRAY[]::text[], -1, 0)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int) FROM distributed_object_data;
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid) AS (VALUES ('table', ARRAY['fix_idx_names', 'p2']::text[], ARRAY[]::text[], -1, 0)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int) FROM distributed_object_data;
|
||||
|
@ -682,10 +686,6 @@ NOTICE: issuing SELECT citus_internal_add_partition_metadata ('fix_idx_names.p2
|
|||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing SELECT citus_internal_add_partition_metadata ('fix_idx_names.p2'::regclass, 'h', 'dist_col', 1370000, 's')
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing SELECT worker_create_truncate_trigger('fix_idx_names.p2')
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing SELECT worker_create_truncate_trigger('fix_idx_names.p2')
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('fix_idx_names.p2'::regclass, 915002, 't'::"char", '-2147483648', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('fix_idx_names.p2'::regclass, 915002, 't'::"char", '-2147483648', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
|
|
|
@ -78,10 +78,6 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
|
@ -89,7 +85,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
TRUNCATE pg_dist_node CASCADE
|
||||
UPDATE pg_dist_local_group SET groupid = 1
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int) FROM distributed_object_data;
|
||||
(30 rows)
|
||||
(26 rows)
|
||||
|
||||
-- this function is dropped in Citus10, added here for tests
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
|
||||
|
@ -151,10 +147,6 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
|
@ -164,7 +156,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0), ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0), ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int) FROM distributed_object_data;
|
||||
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 2, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 2, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 2, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(42 rows)
|
||||
(38 rows)
|
||||
|
||||
-- Show that CREATE INDEX commands are included in the activate node snapshot
|
||||
CREATE INDEX mx_index ON mx_test_table(col_2);
|
||||
|
@ -201,10 +193,6 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
|
@ -214,7 +202,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0), ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0), ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int) FROM distributed_object_data;
|
||||
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 2, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 2, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 2, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(43 rows)
|
||||
(39 rows)
|
||||
|
||||
-- Show that schema changes are included in the activate node snapshot
|
||||
CREATE SCHEMA mx_testing_schema;
|
||||
|
@ -253,10 +241,6 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
|
@ -266,7 +250,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0), ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int) FROM distributed_object_data;
|
||||
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 2, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 2, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 2, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(44 rows)
|
||||
(40 rows)
|
||||
|
||||
-- Show that append distributed tables are not included in the activate node snapshot
|
||||
CREATE TABLE non_mx_test_table (col_1 int, col_2 text);
|
||||
|
@ -311,10 +295,6 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
|
@ -324,7 +304,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0), ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int) FROM distributed_object_data;
|
||||
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 2, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 2, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 2, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(44 rows)
|
||||
(40 rows)
|
||||
|
||||
-- Show that range distributed tables are not included in the activate node snapshot
|
||||
UPDATE pg_dist_partition SET partmethod='r' WHERE logicalrelid='non_mx_test_table'::regclass;
|
||||
|
@ -362,10 +342,6 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
|
@ -375,7 +351,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0), ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int) FROM distributed_object_data;
|
||||
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 2, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 2, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 2, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(44 rows)
|
||||
(40 rows)
|
||||
|
||||
-- Test start_metadata_sync_to_node and citus_activate_node UDFs
|
||||
-- Ensure that hasmetadata=false for all nodes
|
||||
|
@ -1903,10 +1879,6 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
|
@ -1926,7 +1898,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.dist_table_1'::regclass, 1310074, 't'::"char", '-2147483648', '-1073741825'), ('public.dist_table_1'::regclass, 1310075, 't'::"char", '-1073741824', '-1'), ('public.dist_table_1'::regclass, 1310076, 't'::"char", '0', '1073741823'), ('public.dist_table_1'::regclass, 1310077, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_ref'::regclass, 1310073, 't'::"char", NULL, NULL)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.test_table'::regclass, 1310083, 't'::"char", '-2147483648', '-1073741825'), ('public.test_table'::regclass, 1310084, 't'::"char", '-1073741824', '-1'), ('public.test_table'::regclass, 1310085, 't'::"char", '0', '1073741823'), ('public.test_table'::regclass, 1310086, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(87 rows)
|
||||
(83 rows)
|
||||
|
||||
-- shouldn't work since test_table is MX
|
||||
ALTER TABLE test_table ADD COLUMN id3 bigserial;
|
||||
|
|
|
@ -164,15 +164,6 @@ HINT: Use SELECT rebalance_table_shards(); to balance shards data between worke
|
|||
(1 row)
|
||||
|
||||
COMMIT;
|
||||
-- After adding and removing non-metadata synced worker node, shell table
|
||||
-- can stay on the remote node. So we are deleting it manually.
|
||||
-- TODO: Update the test once sync by default guc will be removed
|
||||
SELECT run_command_on_workers($$DROP TABLE single_node.test$$);
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"DROP TABLE")
|
||||
(1 row)
|
||||
|
||||
-- we don't need this node anymore
|
||||
SELECT 1 FROM master_remove_node('localhost', :worker_1_port);
|
||||
?column?
|
||||
|
|
|
@ -35,11 +35,7 @@ FOR EACH ROW EXECUTE FUNCTION dummy_function();
|
|||
|
||||
-- Show that we can activate node successfully. That means, we create
|
||||
-- the function that trigger needs in mx workers too.
|
||||
set citus.log_remote_commands to true;
|
||||
set citus.worker_min_messages to debug4;
|
||||
SELECT citus_activate_node('localhost', :worker_1_port);
|
||||
reset citus.log_remote_commands;
|
||||
reset citus.worker_min_messages;
|
||||
|
||||
CREATE EXTENSION seg;
|
||||
ALTER TRIGGER dummy_function_trigger ON citus_local_table DEPENDS ON EXTENSION seg;
|
||||
|
|
|
@ -87,11 +87,6 @@ BEGIN;
|
|||
SELECT 1 FROM master_add_node('localhost', :worker_1_port);
|
||||
COMMIT;
|
||||
|
||||
-- After adding and removing non-metadata synced worker node, shell table
|
||||
-- can stay on the remote node. So we are deleting it manually.
|
||||
-- TODO: Update the test once sync by default guc will be removed
|
||||
SELECT run_command_on_workers($$DROP TABLE single_node.test$$);
|
||||
|
||||
-- we don't need this node anymore
|
||||
SELECT 1 FROM master_remove_node('localhost', :worker_1_port);
|
||||
|
||||
|
|
Loading…
Reference in New Issue