mirror of https://github.com/citusdata/citus.git
Citus_disable_node
parent
2e61c3e6b8
commit
9fec89d70b
|
@ -345,7 +345,6 @@ CreateCitusLocalTable(Oid relationId, bool cascadeViaForeignKeys, bool autoConve
|
|||
* via process utility.
|
||||
*/
|
||||
ExecuteAndLogUtilityCommandList(shellTableDDLEvents);
|
||||
MarkObjectDistributed(&tableAddress);
|
||||
|
||||
/*
|
||||
* Set shellRelationId as the relation with relationId now points
|
||||
|
@ -368,6 +367,13 @@ CreateCitusLocalTable(Oid relationId, bool cascadeViaForeignKeys, bool autoConve
|
|||
InsertMetadataForCitusLocalTable(shellRelationId, shardId, autoConverted);
|
||||
|
||||
FinalizeCitusLocalTableCreation(shellRelationId, dependentSequenceList);
|
||||
|
||||
/*
|
||||
* Mark the shell relation as distributed on each node as the last step.
|
||||
*/
|
||||
ObjectAddress shellRelationAddress = { 0 };
|
||||
ObjectAddressSet(shellRelationAddress, RelationRelationId, shellRelationId);
|
||||
MarkObjectDistributed(&shellRelationAddress);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1240,6 +1246,8 @@ FinalizeCitusLocalTableCreation(Oid relationId, List *dependentSequenceList)
|
|||
CreateTruncateTrigger(relationId);
|
||||
}
|
||||
|
||||
CreateShellTableOnWorkers(relationId);
|
||||
|
||||
if (ShouldSyncTableMetadata(relationId))
|
||||
{
|
||||
CreateTableMetadataOnWorkers(relationId);
|
||||
|
|
|
@ -509,16 +509,6 @@ citus_disable_node(PG_FUNCTION_ARGS)
|
|||
workerNode->workerName,
|
||||
nodePort)));
|
||||
}
|
||||
|
||||
/*
|
||||
* Delete replicated table placements from the coordinator's metadata,
|
||||
* but not remotely. That is because one more more of the remote
|
||||
* nodes might be down. Instead, we let the background worker
|
||||
* to sync the metadata when possible.
|
||||
*/
|
||||
bool forceRemoteDelete = false;
|
||||
DeleteAllReplicatedTablePlacementsFromNodeGroup(workerNode->groupId,
|
||||
forceRemoteDelete);
|
||||
}
|
||||
|
||||
TransactionModifiedNodeMetadata = true;
|
||||
|
@ -1182,27 +1172,44 @@ ActivateNode(char *nodeName, int nodePort)
|
|||
BoolGetDatum(isActive));
|
||||
}
|
||||
|
||||
if (syncMetadata)
|
||||
{
|
||||
StartMetadataSyncToNode(nodeName, nodePort);
|
||||
/*
|
||||
* Delete replicated table placements from the coordinator's metadata,
|
||||
* including remote ones.
|
||||
*/
|
||||
bool forceRemoteDelete = true;
|
||||
DeleteAllReplicatedTablePlacementsFromNodeGroup(workerNode->groupId,
|
||||
forceRemoteDelete);
|
||||
|
||||
/*
|
||||
* Since coordinator node already has both objects and related metadata
|
||||
* we don't need to recreate them.
|
||||
*/
|
||||
/*
|
||||
* Since coordinator node already has both objects and related metadata
|
||||
* we don't need to recreate them.
|
||||
*/
|
||||
if (NodeIsPrimary(workerNode))
|
||||
{
|
||||
if (workerNode->groupId != COORDINATOR_GROUP_ID)
|
||||
{
|
||||
/* TODO: Consider calling function below according to other states like primary/secondary */
|
||||
/* Should we check syncMetadata always on as well? */
|
||||
ClearDistributedObjectsWithMetadataFromNode(workerNode);
|
||||
SetUpDistributedTableWithDependencies(workerNode);
|
||||
SetUpMultipleDistributedTableIntegrations(workerNode);
|
||||
SetUpObjectMetadata(workerNode);
|
||||
|
||||
}
|
||||
else if (ReplicateReferenceTablesOnActivate)
|
||||
{
|
||||
// We only need to replicate reference table to the coordinator node
|
||||
ReplicateAllReferenceTablesToNode(workerNode->workerName,
|
||||
workerNode->workerPort);
|
||||
workerNode->workerPort);
|
||||
}
|
||||
}
|
||||
|
||||
if (syncMetadata)
|
||||
{
|
||||
StartMetadataSyncToNode(nodeName, nodePort);
|
||||
|
||||
if (workerNode->groupId != COORDINATOR_GROUP_ID)
|
||||
{
|
||||
SetUpMultipleDistributedTableIntegrations(workerNode);
|
||||
SetUpObjectMetadata(workerNode);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -371,7 +371,7 @@ ReplicateShardToNode(ShardInterval *shardInterval, char *nodeName, int nodePort)
|
|||
nodePort)));
|
||||
|
||||
EnsureNoModificationsHaveBeenDone();
|
||||
SendCommandListToWorkerOutsideTransaction(nodeName, nodePort, tableOwner,
|
||||
SendMetadataCommandListToWorkerInCoordinatedTransaction(nodeName, nodePort, tableOwner,
|
||||
ddlCommandList);
|
||||
int32 groupId = GroupForNode(nodeName, nodePort);
|
||||
|
||||
|
@ -596,8 +596,10 @@ ReplicateAllReferenceTablesToNode(char *nodeName, int nodePort)
|
|||
char *tableOwner = TableOwner(shardInterval->relationId);
|
||||
List *commandList = CopyShardForeignConstraintCommandList(shardInterval);
|
||||
|
||||
SendCommandListToWorkerOutsideTransaction(nodeName, nodePort, tableOwner,
|
||||
commandList);
|
||||
SendMetadataCommandListToWorkerInCoordinatedTransaction(nodeName,
|
||||
nodePort,
|
||||
tableOwner,
|
||||
commandList);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -78,6 +78,9 @@ SELECT master_get_active_worker_nodes();
|
|||
-- try to disable a node which does not exist and see that an error is thrown
|
||||
SELECT citus_disable_node('localhost.noexist', 2345);
|
||||
|
||||
table pg_dist_node;
|
||||
\d
|
||||
|
||||
-- drop the table without leaving a shard placement behind (messes up other tests)
|
||||
SELECT master_activate_node('localhost', :worker_2_port);
|
||||
|
||||
|
|
Loading…
Reference in New Issue