Citus_disable_node

velioglu/wo_seq_test_1
Burak Velioglu 2021-12-21 17:20:13 +03:00
parent 2e61c3e6b8
commit 9fec89d70b
No known key found for this signature in database
GPG Key ID: F6827E620F6549C6
4 changed files with 44 additions and 24 deletions

View File

@ -345,7 +345,6 @@ CreateCitusLocalTable(Oid relationId, bool cascadeViaForeignKeys, bool autoConve
* via process utility.
*/
ExecuteAndLogUtilityCommandList(shellTableDDLEvents);
MarkObjectDistributed(&tableAddress);
/*
* Set shellRelationId as the relation with relationId now points
@ -368,6 +367,13 @@ CreateCitusLocalTable(Oid relationId, bool cascadeViaForeignKeys, bool autoConve
InsertMetadataForCitusLocalTable(shellRelationId, shardId, autoConverted);
FinalizeCitusLocalTableCreation(shellRelationId, dependentSequenceList);
/*
* Mark the shell relation as distributed on each node as the last step.
*/
ObjectAddress shellRelationAddress = { 0 };
ObjectAddressSet(shellRelationAddress, RelationRelationId, shellRelationId);
MarkObjectDistributed(&shellRelationAddress);
}
@ -1240,6 +1246,8 @@ FinalizeCitusLocalTableCreation(Oid relationId, List *dependentSequenceList)
CreateTruncateTrigger(relationId);
}
CreateShellTableOnWorkers(relationId);
if (ShouldSyncTableMetadata(relationId))
{
CreateTableMetadataOnWorkers(relationId);

View File

@ -509,16 +509,6 @@ citus_disable_node(PG_FUNCTION_ARGS)
workerNode->workerName,
nodePort)));
}
/*
* Delete replicated table placements from the coordinator's metadata,
* but not remotely. That is because one more more of the remote
* nodes might be down. Instead, we let the background worker
* to sync the metadata when possible.
*/
bool forceRemoteDelete = false;
DeleteAllReplicatedTablePlacementsFromNodeGroup(workerNode->groupId,
forceRemoteDelete);
}
TransactionModifiedNodeMetadata = true;
@ -1182,30 +1172,47 @@ ActivateNode(char *nodeName, int nodePort)
BoolGetDatum(isActive));
}
if (syncMetadata)
{
StartMetadataSyncToNode(nodeName, nodePort);
/*
* Delete replicated table placements from the coordinator's metadata,
* including remote ones.
*/
bool forceRemoteDelete = true;
DeleteAllReplicatedTablePlacementsFromNodeGroup(workerNode->groupId,
forceRemoteDelete);
/*
* Since coordinator node already has both objects and related metadata
* we don't need to recreate them.
*/
if (NodeIsPrimary(workerNode))
{
if (workerNode->groupId != COORDINATOR_GROUP_ID)
{
/* TODO: Consider calling function below according to other states like primary/secondary */
/* Should we check syncMetadata always on as well? */
ClearDistributedObjectsWithMetadataFromNode(workerNode);
SetUpDistributedTableWithDependencies(workerNode);
SetUpMultipleDistributedTableIntegrations(workerNode);
SetUpObjectMetadata(workerNode);
}
else if (ReplicateReferenceTablesOnActivate)
{
// We only need to replicate reference table to the coordinator node
ReplicateAllReferenceTablesToNode(workerNode->workerName,
workerNode->workerPort);
}
}
if (syncMetadata)
{
StartMetadataSyncToNode(nodeName, nodePort);
if (workerNode->groupId != COORDINATOR_GROUP_ID)
{
SetUpMultipleDistributedTableIntegrations(workerNode);
SetUpObjectMetadata(workerNode);
}
}
/* finally, let all other active metadata nodes to learn about this change */
WorkerNode *newWorkerNode = SetNodeState(nodeName, nodePort, isActive);
Assert(newWorkerNode->nodeId == workerNode->nodeId);

View File

@ -371,7 +371,7 @@ ReplicateShardToNode(ShardInterval *shardInterval, char *nodeName, int nodePort)
nodePort)));
EnsureNoModificationsHaveBeenDone();
SendCommandListToWorkerOutsideTransaction(nodeName, nodePort, tableOwner,
SendMetadataCommandListToWorkerInCoordinatedTransaction(nodeName, nodePort, tableOwner,
ddlCommandList);
int32 groupId = GroupForNode(nodeName, nodePort);
@ -596,7 +596,9 @@ ReplicateAllReferenceTablesToNode(char *nodeName, int nodePort)
char *tableOwner = TableOwner(shardInterval->relationId);
List *commandList = CopyShardForeignConstraintCommandList(shardInterval);
SendCommandListToWorkerOutsideTransaction(nodeName, nodePort, tableOwner,
SendMetadataCommandListToWorkerInCoordinatedTransaction(nodeName,
nodePort,
tableOwner,
commandList);
}
}

View File

@ -78,6 +78,9 @@ SELECT master_get_active_worker_nodes();
-- try to disable a node which does not exist and see that an error is thrown
SELECT citus_disable_node('localhost.noexist', 2345);
table pg_dist_node;
\d
-- drop the table without leaving a shard placement behind (messes up other tests)
SELECT master_activate_node('localhost', :worker_2_port);