Fix new method of locking shard distribition metadata (#3407)

In #3374 a new way of locking shard distribution metadata was
implemented. However, this was only done in the function
`LockShardDistributionMetadata` and not in
`TryLockShardDistributionMetadata`. This is bad, since it causes these
locks to not block eachother in some cases.

This commit fixes this issue by sharing the code that sets the locktag
between the two function.
pull/3416/head
Jelte Fennema 2020-01-22 16:44:17 +01:00 committed by GitHub
parent cd5259a25a
commit c62b756f34
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 31 additions and 25 deletions

View File

@ -78,6 +78,7 @@ static bool IsFirstWorkerNode();
static void CitusRangeVarCallbackForLockTable(const RangeVar *rangeVar, Oid relationId, static void CitusRangeVarCallbackForLockTable(const RangeVar *rangeVar, Oid relationId,
Oid oldRelationId, void *arg); Oid oldRelationId, void *arg);
static AclResult CitusLockTableAclCheck(Oid relationId, LOCKMODE lockmode, Oid userId); static AclResult CitusLockTableAclCheck(Oid relationId, LOCKMODE lockmode, Oid userId);
static void SetLocktagForShardDistributionMetadata(int64 shardId, LOCKTAG *tag);
/* exports for SQL callable functions */ /* exports for SQL callable functions */
@ -332,6 +333,34 @@ LockShardDistributionMetadata(int64 shardId, LOCKMODE lockMode)
const bool sessionLock = false; const bool sessionLock = false;
const bool dontWait = false; const bool dontWait = false;
SetLocktagForShardDistributionMetadata(shardId, &tag);
(void) LockAcquire(&tag, lockMode, sessionLock, dontWait);
}
/*
* TryLockShardDistributionMetadata tries to grab a lock for distribution
* metadata related to the specified shard, returning false if the lock
* is currently taken. Any locks acquired using this method are released
* at transaction end.
*/
bool
TryLockShardDistributionMetadata(int64 shardId, LOCKMODE lockMode)
{
LOCKTAG tag;
const bool sessionLock = false;
const bool dontWait = true;
SetLocktagForShardDistributionMetadata(shardId, &tag);
bool lockAcquired = LockAcquire(&tag, lockMode, sessionLock, dontWait);
return lockAcquired;
}
static void
SetLocktagForShardDistributionMetadata(int64 shardId, LOCKTAG *tag)
{
ShardInterval *shardInterval = LoadShardInterval(shardId); ShardInterval *shardInterval = LoadShardInterval(shardId);
Oid distributedTableId = shardInterval->relationId; Oid distributedTableId = shardInterval->relationId;
DistTableCacheEntry *distributedTable = DistributedTableCacheEntry( DistTableCacheEntry *distributedTable = DistributedTableCacheEntry(
@ -341,15 +370,13 @@ LockShardDistributionMetadata(int64 shardId, LOCKMODE lockMode)
if (colocationId == INVALID_COLOCATION_ID || if (colocationId == INVALID_COLOCATION_ID ||
distributedTable->partitionMethod != DISTRIBUTE_BY_HASH) distributedTable->partitionMethod != DISTRIBUTE_BY_HASH)
{ {
SET_LOCKTAG_SHARD_METADATA_RESOURCE(tag, MyDatabaseId, shardId); SET_LOCKTAG_SHARD_METADATA_RESOURCE(*tag, MyDatabaseId, shardId);
} }
else else
{ {
SET_LOCKTAG_COLOCATED_SHARDS_METADATA_RESOURCE(tag, MyDatabaseId, colocationId, SET_LOCKTAG_COLOCATED_SHARDS_METADATA_RESOURCE(*tag, MyDatabaseId, colocationId,
shardInterval->shardIndex); shardInterval->shardIndex);
} }
(void) LockAcquire(&tag, lockMode, sessionLock, dontWait);
} }
@ -461,27 +488,6 @@ GetSortedReferenceShardIntervals(List *relationList)
} }
/*
* TryLockShardDistributionMetadata tries to grab a lock for distribution
* metadata related to the specified shard, returning false if the lock
* is currently taken. Any locks acquired using this method are released
* at transaction end.
*/
bool
TryLockShardDistributionMetadata(int64 shardId, LOCKMODE lockMode)
{
LOCKTAG tag;
const bool sessionLock = false;
const bool dontWait = true;
SET_LOCKTAG_SHARD_METADATA_RESOURCE(tag, MyDatabaseId, shardId);
bool lockAcquired = LockAcquire(&tag, lockMode, sessionLock, dontWait);
return lockAcquired;
}
/* /*
* LockShardResource acquires a lock needed to modify data on a remote shard. * LockShardResource acquires a lock needed to modify data on a remote shard.
* This task may be assigned to multiple backends at the same time, so the lock * This task may be assigned to multiple backends at the same time, so the lock