mirror of https://github.com/citusdata/citus.git
Dependency update
parent
48c5ce8960
commit
6598a23963
|
@ -405,9 +405,7 @@ ReplicateAllDependenciesToNode(const char *nodeName, int nodePort)
|
|||
ddlCommands = lcons(DISABLE_DDL_PROPAGATION, ddlCommands);
|
||||
ddlCommands = lappend(ddlCommands, ENABLE_DDL_PROPAGATION);
|
||||
|
||||
SendMetadataCommandListToWorkerInCoordinatedTransaction(nodeName, nodePort,
|
||||
CitusExtensionOwnerName(),
|
||||
ddlCommands);
|
||||
SendCommandListToWorkerOutsideTransaction(nodeName, nodePort, CitusExtensionOwnerName(), ddlCommands);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -863,21 +863,14 @@ SetUpDistributedTableWithDependencies(WorkerNode *newWorkerNode)
|
|||
{
|
||||
EnsureNoModificationsHaveBeenDone();
|
||||
|
||||
if (ShouldPropagate() && !NodeIsCoordinator(newWorkerNode))
|
||||
Assert(ShouldPropagate());
|
||||
if (!NodeIsCoordinator(newWorkerNode))
|
||||
{
|
||||
ClearDistributedObjectsWithMetadataFromNode(newWorkerNode);
|
||||
PropagateNodeWideObjects(newWorkerNode);
|
||||
ReplicateAllDependenciesToNode(newWorkerNode->workerName,
|
||||
newWorkerNode->workerPort);
|
||||
}
|
||||
else if (!NodeIsCoordinator(newWorkerNode))
|
||||
{
|
||||
ereport(WARNING, (errmsg("citus.enable_object_propagation is off, not "
|
||||
"creating distributed objects on worker"),
|
||||
errdetail("distributed objects are only kept in sync when "
|
||||
"citus.enable_object_propagation is set to on. "
|
||||
"Newly activated nodes will not get these "
|
||||
"objects created")));
|
||||
}
|
||||
|
||||
if (ReplicateReferenceTablesOnActivate)
|
||||
{
|
||||
|
@ -890,12 +883,14 @@ SetUpDistributedTableWithDependencies(WorkerNode *newWorkerNode)
|
|||
* We prefer this because otherwise node activation might fail within
|
||||
* transaction blocks.
|
||||
*/
|
||||
if (ClusterHasDistributedFunctionWithDistArgument())
|
||||
// TODO: Doesn't make sense to have that here as we won't handle placement metadata
|
||||
// with maintenance daemon anymore
|
||||
/* if (ClusterHasDistributedFunctionWithDistArgument())
|
||||
{
|
||||
SetWorkerColumnLocalOnly(newWorkerNode, Anum_pg_dist_node_hasmetadata,
|
||||
BoolGetDatum(true));
|
||||
TriggerMetadataSyncOnCommit();
|
||||
}
|
||||
}*/
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1180,33 +1175,13 @@ ActivateNode(char *nodeName, int nodePort)
|
|||
DeleteAllReplicatedTablePlacementsFromNodeGroup(workerNode->groupId,
|
||||
forceRemoteDelete);
|
||||
|
||||
/*
|
||||
* Since coordinator node already has both objects and related metadata
|
||||
* we don't need to recreate them.
|
||||
*/
|
||||
if (NodeIsPrimary(workerNode))
|
||||
{
|
||||
if (workerNode->groupId != COORDINATOR_GROUP_ID)
|
||||
{
|
||||
/* TODO: Consider calling function below according to other states like primary/secondary */
|
||||
/* Should we check syncMetadata always on as well? */
|
||||
ClearDistributedObjectsWithMetadataFromNode(workerNode);
|
||||
SetUpDistributedTableWithDependencies(workerNode);
|
||||
|
||||
}
|
||||
else if (ReplicateReferenceTablesOnActivate)
|
||||
{
|
||||
// We only need to replicate reference table to the coordinator node
|
||||
ReplicateAllReferenceTablesToNode(workerNode->workerName,
|
||||
workerNode->workerPort);
|
||||
}
|
||||
}
|
||||
|
||||
if (syncMetadata)
|
||||
{
|
||||
StartMetadataSyncToNode(nodeName, nodePort);
|
||||
|
||||
if (workerNode->groupId != COORDINATOR_GROUP_ID)
|
||||
if (!NodeIsCoordinator(workerNode) && NodeIsPrimary(workerNode))
|
||||
{
|
||||
SetUpMultipleDistributedTableIntegrations(workerNode);
|
||||
SetUpObjectMetadata(workerNode);
|
||||
|
|
|
@ -329,8 +329,7 @@ upgrade_to_reference_table(PG_FUNCTION_ARGS)
|
|||
/*
|
||||
* ReplicateShardToNode function replicates given shard to the given worker node
|
||||
* in a separate transaction. If the worker already has
|
||||
* a replica of the shard this is a no-op. This function also modifies metadata
|
||||
* by inserting/updating related rows in pg_dist_placement.
|
||||
* a replica of the shard this is a no-op.
|
||||
*
|
||||
* IMPORTANT: This should only be used to replicate shards of a reference
|
||||
* table.
|
||||
|
@ -371,17 +370,13 @@ ReplicateShardToNode(ShardInterval *shardInterval, char *nodeName, int nodePort)
|
|||
nodePort)));
|
||||
|
||||
EnsureNoModificationsHaveBeenDone();
|
||||
SendMetadataCommandListToWorkerInCoordinatedTransaction(nodeName, nodePort, tableOwner,
|
||||
SendCommandListToWorkerOutsideTransaction(nodeName, nodePort, tableOwner,
|
||||
ddlCommandList);
|
||||
int32 groupId = GroupForNode(nodeName, nodePort);
|
||||
|
||||
uint64 placementId = GetNextPlacementId();
|
||||
InsertShardPlacementRow(shardId, placementId, SHARD_STATE_ACTIVE, 0,
|
||||
groupId);
|
||||
|
||||
// Since having a duplicate on pg_dist_placement can cause issue, we don't add
|
||||
// it to all nodes here. Caller of this function must propagate pg_dist_placement to
|
||||
// other nodes if it is required.
|
||||
}
|
||||
|
||||
|
||||
|
@ -544,6 +539,8 @@ ReferenceTableReplicationFactor(void)
|
|||
* table to update the replication factor column when necessary. This function
|
||||
* skips reference tables if that node already has healthy placement of that
|
||||
* reference table to prevent unnecessary data transfer.
|
||||
*
|
||||
* TODO: Make is static and updatr comment
|
||||
*/
|
||||
void
|
||||
ReplicateAllReferenceTablesToNode(char *nodeName, int nodePort)
|
||||
|
@ -584,17 +581,5 @@ ReplicateAllReferenceTablesToNode(char *nodeName, int nodePort)
|
|||
|
||||
ReplicateShardToNode(shardInterval, nodeName, nodePort);
|
||||
}
|
||||
|
||||
/* create foreign constraints between reference tables */
|
||||
foreach_ptr(shardInterval, referenceShardIntervalList)
|
||||
{
|
||||
char *tableOwner = TableOwner(shardInterval->relationId);
|
||||
List *commandList = CopyShardForeignConstraintCommandList(shardInterval);
|
||||
|
||||
SendMetadataCommandListToWorkerInCoordinatedTransaction(nodeName,
|
||||
nodePort,
|
||||
tableOwner,
|
||||
commandList);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue