mirror of https://github.com/citusdata/citus.git
Merge pull request #2384 from citusdata/fix_stuck_spinlock_max_backend
Prevent overflow of memory accesses during deadlock detectionpull/2376/head
commit
8762af4473
|
@ -541,9 +541,11 @@ BackendManagementShmemInit(void)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We need to init per backend's spinlock before any backend
|
* We need to init per backend's spinlock before any backend
|
||||||
* starts its execution.
|
* starts its execution. Note that we initialize TotalProcs (e.g., not
|
||||||
|
* MaxBackends) since some of the blocking processes could be prepared
|
||||||
|
* transactions, which aren't covered by MaxBackends.
|
||||||
*/
|
*/
|
||||||
for (backendIndex = 0; backendIndex < MaxBackends; ++backendIndex)
|
for (backendIndex = 0; backendIndex < TotalProcs; ++backendIndex)
|
||||||
{
|
{
|
||||||
SpinLockInit(&backendManagementShmemData->backends[backendIndex].mutex);
|
SpinLockInit(&backendManagementShmemData->backends[backendIndex].mutex);
|
||||||
}
|
}
|
||||||
|
@ -568,7 +570,7 @@ BackendManagementShmemSize(void)
|
||||||
Size size = 0;
|
Size size = 0;
|
||||||
|
|
||||||
size = add_size(size, sizeof(BackendManagementShmemData));
|
size = add_size(size, sizeof(BackendManagementShmemData));
|
||||||
size = add_size(size, mul_size(sizeof(BackendData), MaxBackends));
|
size = add_size(size, mul_size(sizeof(BackendData), TotalProcs));
|
||||||
|
|
||||||
return size;
|
return size;
|
||||||
}
|
}
|
||||||
|
|
|
@ -398,12 +398,12 @@ BuildLocalWaitGraph(void)
|
||||||
*/
|
*/
|
||||||
waitGraph = (WaitGraph *) palloc0(sizeof(WaitGraph));
|
waitGraph = (WaitGraph *) palloc0(sizeof(WaitGraph));
|
||||||
waitGraph->localNodeId = GetLocalGroupId();
|
waitGraph->localNodeId = GetLocalGroupId();
|
||||||
waitGraph->allocatedSize = MaxBackends * 3;
|
waitGraph->allocatedSize = TotalProcs * 3;
|
||||||
waitGraph->edgeCount = 0;
|
waitGraph->edgeCount = 0;
|
||||||
waitGraph->edges = (WaitEdge *) palloc(waitGraph->allocatedSize * sizeof(WaitEdge));
|
waitGraph->edges = (WaitEdge *) palloc(waitGraph->allocatedSize * sizeof(WaitEdge));
|
||||||
|
|
||||||
remaining.procs = (PGPROC **) palloc(sizeof(PGPROC *) * MaxBackends);
|
remaining.procs = (PGPROC **) palloc(sizeof(PGPROC *) * TotalProcs);
|
||||||
remaining.procAdded = (bool *) palloc0(sizeof(bool *) * MaxBackends);
|
remaining.procAdded = (bool *) palloc0(sizeof(bool *) * TotalProcs);
|
||||||
remaining.procCount = 0;
|
remaining.procCount = 0;
|
||||||
|
|
||||||
LockLockData();
|
LockLockData();
|
||||||
|
@ -416,7 +416,7 @@ BuildLocalWaitGraph(void)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* build list of starting procs */
|
/* build list of starting procs */
|
||||||
for (curBackend = 0; curBackend < MaxBackends; curBackend++)
|
for (curBackend = 0; curBackend < TotalProcs; curBackend++)
|
||||||
{
|
{
|
||||||
PGPROC *currentProc = &ProcGlobal->allProcs[curBackend];
|
PGPROC *currentProc = &ProcGlobal->allProcs[curBackend];
|
||||||
BackendData currentBackendData;
|
BackendData currentBackendData;
|
||||||
|
@ -762,7 +762,7 @@ AddProcToVisit(PROCStack *remaining, PGPROC *proc)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
Assert(remaining->procCount < MaxBackends);
|
Assert(remaining->procCount < TotalProcs);
|
||||||
|
|
||||||
remaining->procs[remaining->procCount++] = proc;
|
remaining->procs[remaining->procCount++] = proc;
|
||||||
remaining->procAdded[proc->pgprocno] = true;
|
remaining->procAdded[proc->pgprocno] = true;
|
||||||
|
|
|
@ -13,6 +13,7 @@
|
||||||
#define BACKEND_DATA_H
|
#define BACKEND_DATA_H
|
||||||
|
|
||||||
|
|
||||||
|
#include "access/twophase.h"
|
||||||
#include "datatype/timestamp.h"
|
#include "datatype/timestamp.h"
|
||||||
#include "distributed/transaction_identifier.h"
|
#include "distributed/transaction_identifier.h"
|
||||||
#include "nodes/pg_list.h"
|
#include "nodes/pg_list.h"
|
||||||
|
@ -21,6 +22,9 @@
|
||||||
#include "storage/s_lock.h"
|
#include "storage/s_lock.h"
|
||||||
|
|
||||||
|
|
||||||
|
#define TotalProcs (MaxBackends + NUM_AUXILIARY_PROCS + max_prepared_xacts)
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Each backend's active distributed transaction information is tracked via
|
* Each backend's active distributed transaction information is tracked via
|
||||||
* BackendData in shared memory.
|
* BackendData in shared memory.
|
||||||
|
|
Loading…
Reference in New Issue