Merge pull request #2384 from citusdata/fix_stuck_spinlock_max_backend

Prevent overflow of memory accesses during deadlock detection
pull/2376/head
Önder Kalacı 2018-09-17 18:08:31 +03:00 committed by GitHub
commit 8762af4473
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 14 additions and 8 deletions

View File

@ -541,9 +541,11 @@ BackendManagementShmemInit(void)
/* /*
* We need to init per backend's spinlock before any backend * We need to init per backend's spinlock before any backend
* starts its execution. * starts its execution. Note that we initialize TotalProcs (e.g., not
* MaxBackends) since some of the blocking processes could be prepared
* transactions, which aren't covered by MaxBackends.
*/ */
for (backendIndex = 0; backendIndex < MaxBackends; ++backendIndex) for (backendIndex = 0; backendIndex < TotalProcs; ++backendIndex)
{ {
SpinLockInit(&backendManagementShmemData->backends[backendIndex].mutex); SpinLockInit(&backendManagementShmemData->backends[backendIndex].mutex);
} }
@ -568,7 +570,7 @@ BackendManagementShmemSize(void)
Size size = 0; Size size = 0;
size = add_size(size, sizeof(BackendManagementShmemData)); size = add_size(size, sizeof(BackendManagementShmemData));
size = add_size(size, mul_size(sizeof(BackendData), MaxBackends)); size = add_size(size, mul_size(sizeof(BackendData), TotalProcs));
return size; return size;
} }

View File

@ -398,12 +398,12 @@ BuildLocalWaitGraph(void)
*/ */
waitGraph = (WaitGraph *) palloc0(sizeof(WaitGraph)); waitGraph = (WaitGraph *) palloc0(sizeof(WaitGraph));
waitGraph->localNodeId = GetLocalGroupId(); waitGraph->localNodeId = GetLocalGroupId();
waitGraph->allocatedSize = MaxBackends * 3; waitGraph->allocatedSize = TotalProcs * 3;
waitGraph->edgeCount = 0; waitGraph->edgeCount = 0;
waitGraph->edges = (WaitEdge *) palloc(waitGraph->allocatedSize * sizeof(WaitEdge)); waitGraph->edges = (WaitEdge *) palloc(waitGraph->allocatedSize * sizeof(WaitEdge));
remaining.procs = (PGPROC **) palloc(sizeof(PGPROC *) * MaxBackends); remaining.procs = (PGPROC **) palloc(sizeof(PGPROC *) * TotalProcs);
remaining.procAdded = (bool *) palloc0(sizeof(bool *) * MaxBackends); remaining.procAdded = (bool *) palloc0(sizeof(bool *) * TotalProcs);
remaining.procCount = 0; remaining.procCount = 0;
LockLockData(); LockLockData();
@ -416,7 +416,7 @@ BuildLocalWaitGraph(void)
*/ */
/* build list of starting procs */ /* build list of starting procs */
for (curBackend = 0; curBackend < MaxBackends; curBackend++) for (curBackend = 0; curBackend < TotalProcs; curBackend++)
{ {
PGPROC *currentProc = &ProcGlobal->allProcs[curBackend]; PGPROC *currentProc = &ProcGlobal->allProcs[curBackend];
BackendData currentBackendData; BackendData currentBackendData;
@ -762,7 +762,7 @@ AddProcToVisit(PROCStack *remaining, PGPROC *proc)
return; return;
} }
Assert(remaining->procCount < MaxBackends); Assert(remaining->procCount < TotalProcs);
remaining->procs[remaining->procCount++] = proc; remaining->procs[remaining->procCount++] = proc;
remaining->procAdded[proc->pgprocno] = true; remaining->procAdded[proc->pgprocno] = true;

View File

@ -13,6 +13,7 @@
#define BACKEND_DATA_H #define BACKEND_DATA_H
#include "access/twophase.h"
#include "datatype/timestamp.h" #include "datatype/timestamp.h"
#include "distributed/transaction_identifier.h" #include "distributed/transaction_identifier.h"
#include "nodes/pg_list.h" #include "nodes/pg_list.h"
@ -21,6 +22,9 @@
#include "storage/s_lock.h" #include "storage/s_lock.h"
#define TotalProcs (MaxBackends + NUM_AUXILIARY_PROCS + max_prepared_xacts)
/* /*
* Each backend's active distributed transaction information is tracked via * Each backend's active distributed transaction information is tracked via
* BackendData in shared memory. * BackendData in shared memory.