mirror of https://github.com/citusdata/citus.git
Improve locking semantics for backend management
We use the backend shared memory lock for preventing new backends to be part of a new distributed transaction or an existing backend to leave a distributed transaction while we're reading the all backends' data. The primary goal is to provide consistent view of the current distributed transactions while doing the deadlock detection.pull/1516/head
parent
2e0916e15a
commit
b5ea3ab6a3
|
@ -89,6 +89,12 @@ assign_distributed_transaction_id(PG_FUNCTION_ARGS)
|
||||||
ereport(ERROR, (errmsg("backend is not ready for distributed transactions")));
|
ereport(ERROR, (errmsg("backend is not ready for distributed transactions")));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Note that we don't need to lock shared memory (i.e., LockBackendSharedMemory()) here
|
||||||
|
* since this function is executed after AssignDistributedTransactionId() issued on the
|
||||||
|
* initiator node, which already takes the required lock to enforce the consistency.
|
||||||
|
*/
|
||||||
|
|
||||||
SpinLockAcquire(&MyBackendData->mutex);
|
SpinLockAcquire(&MyBackendData->mutex);
|
||||||
|
|
||||||
/* if an id is already assigned, release the lock and error */
|
/* if an id is already assigned, release the lock and error */
|
||||||
|
@ -236,8 +242,8 @@ get_all_active_transactions(PG_FUNCTION_ARGS)
|
||||||
memset(values, 0, sizeof(values));
|
memset(values, 0, sizeof(values));
|
||||||
memset(isNulls, false, sizeof(isNulls));
|
memset(isNulls, false, sizeof(isNulls));
|
||||||
|
|
||||||
/* we're reading all the backend data, take a lock to prevent concurrent additions */
|
/* we're reading all distributed transactions, prevent new backends */
|
||||||
LWLockAcquire(AddinShmemInitLock, LW_SHARED);
|
LockBackendSharedMemory(LW_SHARED);
|
||||||
|
|
||||||
for (backendIndex = 0; backendIndex < MaxBackends; ++backendIndex)
|
for (backendIndex = 0; backendIndex < MaxBackends; ++backendIndex)
|
||||||
{
|
{
|
||||||
|
@ -272,7 +278,7 @@ get_all_active_transactions(PG_FUNCTION_ARGS)
|
||||||
memset(isNulls, false, sizeof(isNulls));
|
memset(isNulls, false, sizeof(isNulls));
|
||||||
}
|
}
|
||||||
|
|
||||||
LWLockRelease(AddinShmemInitLock);
|
UnlockBackendSharedMemory();
|
||||||
|
|
||||||
/* clean up and return the tuplestore */
|
/* clean up and return the tuplestore */
|
||||||
tuplestore_donestoring(tupleStore);
|
tuplestore_donestoring(tupleStore);
|
||||||
|
@ -400,6 +406,8 @@ InitializeBackendData(void)
|
||||||
|
|
||||||
Assert(MyBackendData);
|
Assert(MyBackendData);
|
||||||
|
|
||||||
|
LockBackendSharedMemory(LW_EXCLUSIVE);
|
||||||
|
|
||||||
SpinLockAcquire(&MyBackendData->mutex);
|
SpinLockAcquire(&MyBackendData->mutex);
|
||||||
|
|
||||||
MyBackendData->databaseId = MyDatabaseId;
|
MyBackendData->databaseId = MyDatabaseId;
|
||||||
|
@ -408,6 +416,8 @@ InitializeBackendData(void)
|
||||||
MyBackendData->transactionId.timestamp = 0;
|
MyBackendData->transactionId.timestamp = 0;
|
||||||
|
|
||||||
SpinLockRelease(&MyBackendData->mutex);
|
SpinLockRelease(&MyBackendData->mutex);
|
||||||
|
|
||||||
|
UnlockBackendSharedMemory();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -433,6 +443,35 @@ UnSetDistributedTransactionId(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* LockBackendSharedMemory is a simple wrapper around LWLockAcquire on the
|
||||||
|
* shared memory lock.
|
||||||
|
*
|
||||||
|
* We use the backend shared memory lock for preventing new backends to be part
|
||||||
|
* of a new distributed transaction or an existing backend to leave a distributed
|
||||||
|
* transaction while we're reading the all backends' data.
|
||||||
|
*
|
||||||
|
* The primary goal is to provide consistent view of the current distributed
|
||||||
|
* transactions while doing the deadlock detection.
|
||||||
|
*/
|
||||||
|
void
|
||||||
|
LockBackendSharedMemory(LWLockMode lockMode)
|
||||||
|
{
|
||||||
|
LWLockAcquire(&backendManagementShmemData->lock, lockMode);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* UnlockBackendSharedMemory is a simple wrapper around LWLockRelease on the
|
||||||
|
* shared memory lock.
|
||||||
|
*/
|
||||||
|
void
|
||||||
|
UnlockBackendSharedMemory(void)
|
||||||
|
{
|
||||||
|
LWLockRelease(&backendManagementShmemData->lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* GetCurrentDistributedTransactionId reads the backend's distributed transaction id and
|
* GetCurrentDistributedTransactionId reads the backend's distributed transaction id and
|
||||||
* returns a copy of it.
|
* returns a copy of it.
|
||||||
|
|
|
@ -503,12 +503,17 @@ BuildWaitGraphForSourceNode(int sourceNodeId)
|
||||||
/*
|
/*
|
||||||
* LockLockData takes locks the shared lock data structure, which prevents
|
* LockLockData takes locks the shared lock data structure, which prevents
|
||||||
* concurrent lock acquisitions/releases.
|
* concurrent lock acquisitions/releases.
|
||||||
|
*
|
||||||
|
* The function also acquires lock on the backend shared memory to prevent
|
||||||
|
* new backends to start.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
LockLockData(void)
|
LockLockData(void)
|
||||||
{
|
{
|
||||||
int partitionNum = 0;
|
int partitionNum = 0;
|
||||||
|
|
||||||
|
LockBackendSharedMemory(LW_SHARED);
|
||||||
|
|
||||||
for (partitionNum = 0; partitionNum < NUM_LOCK_PARTITIONS; partitionNum++)
|
for (partitionNum = 0; partitionNum < NUM_LOCK_PARTITIONS; partitionNum++)
|
||||||
{
|
{
|
||||||
LWLockAcquire(LockHashPartitionLockByIndex(partitionNum), LW_SHARED);
|
LWLockAcquire(LockHashPartitionLockByIndex(partitionNum), LW_SHARED);
|
||||||
|
@ -520,6 +525,9 @@ LockLockData(void)
|
||||||
* UnlockLockData unlocks the locks on the shared lock data structure in reverse
|
* UnlockLockData unlocks the locks on the shared lock data structure in reverse
|
||||||
* order since LWLockRelease searches the given lock from the end of the
|
* order since LWLockRelease searches the given lock from the end of the
|
||||||
* held_lwlocks array.
|
* held_lwlocks array.
|
||||||
|
*
|
||||||
|
* The function also releases the shared memory lock to allow new backends to
|
||||||
|
* start.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
UnlockLockData(void)
|
UnlockLockData(void)
|
||||||
|
@ -530,6 +538,8 @@ UnlockLockData(void)
|
||||||
{
|
{
|
||||||
LWLockRelease(LockHashPartitionLockByIndex(partitionNum));
|
LWLockRelease(LockHashPartitionLockByIndex(partitionNum));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
UnlockBackendSharedMemory();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -35,6 +35,8 @@ typedef struct BackendData
|
||||||
|
|
||||||
extern void InitializeBackendManagement(void);
|
extern void InitializeBackendManagement(void);
|
||||||
extern void InitializeBackendData(void);
|
extern void InitializeBackendData(void);
|
||||||
|
extern void LockBackendSharedMemory(LWLockMode lockMode);
|
||||||
|
extern void UnlockBackendSharedMemory(void);
|
||||||
extern void UnSetDistributedTransactionId(void);
|
extern void UnSetDistributedTransactionId(void);
|
||||||
extern void AssignDistributedTransactionId(void);
|
extern void AssignDistributedTransactionId(void);
|
||||||
extern void GetBackendDataForProc(PGPROC *proc, BackendData *result);
|
extern void GetBackendDataForProc(PGPROC *proc, BackendData *result);
|
||||||
|
|
Loading…
Reference in New Issue