mirror of https://github.com/citusdata/citus.git
Lock parent shard resource while creating new partitions
parent
364f902ff0
commit
d3faf974ad
|
@ -704,6 +704,7 @@ static void SetAttributeInputMetadata(DistributedExecution *execution,
|
|||
static void LookupTaskPlacementHostAndPort(ShardPlacement *taskPlacement, char **nodeName,
|
||||
int *nodePort);
|
||||
static bool IsDummyPlacement(ShardPlacement *taskPlacement);
|
||||
static void LockParentShardResouceIfPartitionTaskList(List *taskList);
|
||||
|
||||
/*
|
||||
* AdaptiveExecutorPreExecutorRun gets called right before postgres starts its executor
|
||||
|
@ -1566,7 +1567,7 @@ LockPartitionsForDistributedPlan(DistributedPlan *distributedPlan)
|
|||
|
||||
/*
|
||||
* Postgres only takes the lock on parent when the session accesses the
|
||||
* partition for the first time. So it should be okay to get this lock from
|
||||
* partition for the first time. So it should be okay to get this lock from
|
||||
* PG perspective. Even though we diverge from PG behavior for concurrent
|
||||
* modifications on partitions vs CREATE/DROP partitions, we consider this as
|
||||
* a reasonable trade-off to avoid distributed deadlocks.
|
||||
|
@ -1609,6 +1610,11 @@ AcquireExecutorShardLocksForExecution(DistributedExecution *execution)
|
|||
/* acquire the locks for both the remote and local tasks */
|
||||
List *taskList = execution->remoteAndLocalTaskList;
|
||||
|
||||
if (modLevel == ROW_MODIFY_NONE)
|
||||
{
|
||||
LockParentShardResouceIfPartitionTaskList(taskList);
|
||||
}
|
||||
|
||||
if (modLevel <= ROW_MODIFY_READONLY &&
|
||||
!SelectForUpdateOnReferenceTable(taskList))
|
||||
{
|
||||
|
@ -1638,6 +1644,34 @@ AcquireExecutorShardLocksForExecution(DistributedExecution *execution)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* LockParentShardResouceIfPartitionTaskList locks the parent shard
|
||||
* resource if the given taskList is on a partition table.
|
||||
*/
|
||||
static void
|
||||
LockParentShardResouceIfPartitionTaskList(List *taskList)
|
||||
{
|
||||
if (list_length(taskList) < 1)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
Task *task = (Task *) linitial(taskList);
|
||||
uint64 shardId = task->anchorShardId;
|
||||
if (shardId == INVALID_SHARD_ID)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
ShardInterval *shardInterval = LoadShardInterval(shardId);
|
||||
Oid relationId = shardInterval->relationId;
|
||||
if (PartitionTable(relationId))
|
||||
{
|
||||
LockParentShardResourceIfPartition(shardId, AccessExclusiveLock);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* FinishDistributedExecution cleans up resources associated with a
|
||||
* distributed execution.
|
||||
|
|
|
@ -81,4 +81,4 @@ permutation "s2-begin" "s2-truncate" "s1-update" "s2-commit"
|
|||
permutation "s1-begin" "s1-update" "s2-detach" "s1-commit"
|
||||
permutation "s2-begin" "s2-detach" "s1-update" "s2-commit"
|
||||
permutation "s1-begin" "s1-update" "s2-alter-table" "s1-commit"
|
||||
permutation "s2-begin" "s2-alter-table" "s1-update" "s2-commit"
|
||||
permutation "s2-begin" "s2-alter-table" "s1-update" "s2-commit"
|
||||
|
|
Loading…
Reference in New Issue