Prevent overflow of memory accesses during deadlock detection

In the distributed deadlock detection design, we concluded that prepared transactions
cannot be part of a distributed deadlock. The idea is that (a) when the transaction
is prepared it already acquires all the locks, so cannot be part of a deadlock
(b) even if some other processes blocked on the prepared transaction,  prepared transactions
 would eventually be committed (or rollbacked) and the system will continue operating.

With the above in mind, we probably had a mistake in terms of memory allocations. For each
backend initialized, we keep a `BackendData` struct. The bug we've introduced is that, we
assumed there would only be `MaxBackend` number of backends. However, `MaxBackends` doesn't
include the prepared transactions and axuliary processes. When you check Postgres' InitProcGlobal`
you'd see that `TotalProcs = MaxBackends + NUM_AUXILIARY_PROCS + max_prepared_xacts;`

This commit aligns with total procs processed with that.
pull/2384/head
Onder Kalaci 2018-09-14 14:51:35 +03:00
parent e0942d9df5
commit a94184fff8
3 changed files with 14 additions and 8 deletions

View File

@ -541,9 +541,11 @@ BackendManagementShmemInit(void)
/* /*
* We need to init per backend's spinlock before any backend * We need to init per backend's spinlock before any backend
* starts its execution. * starts its execution. Note that we initialize TotalProcs (e.g., not
* MaxBackends) since some of the blocking processes could be prepared
* transactions, which aren't covered by MaxBackends.
*/ */
for (backendIndex = 0; backendIndex < MaxBackends; ++backendIndex) for (backendIndex = 0; backendIndex < TotalProcs; ++backendIndex)
{ {
SpinLockInit(&backendManagementShmemData->backends[backendIndex].mutex); SpinLockInit(&backendManagementShmemData->backends[backendIndex].mutex);
} }
@ -568,7 +570,7 @@ BackendManagementShmemSize(void)
Size size = 0; Size size = 0;
size = add_size(size, sizeof(BackendManagementShmemData)); size = add_size(size, sizeof(BackendManagementShmemData));
size = add_size(size, mul_size(sizeof(BackendData), MaxBackends)); size = add_size(size, mul_size(sizeof(BackendData), TotalProcs));
return size; return size;
} }

View File

@ -398,12 +398,12 @@ BuildLocalWaitGraph(void)
*/ */
waitGraph = (WaitGraph *) palloc0(sizeof(WaitGraph)); waitGraph = (WaitGraph *) palloc0(sizeof(WaitGraph));
waitGraph->localNodeId = GetLocalGroupId(); waitGraph->localNodeId = GetLocalGroupId();
waitGraph->allocatedSize = MaxBackends * 3; waitGraph->allocatedSize = TotalProcs * 3;
waitGraph->edgeCount = 0; waitGraph->edgeCount = 0;
waitGraph->edges = (WaitEdge *) palloc(waitGraph->allocatedSize * sizeof(WaitEdge)); waitGraph->edges = (WaitEdge *) palloc(waitGraph->allocatedSize * sizeof(WaitEdge));
remaining.procs = (PGPROC **) palloc(sizeof(PGPROC *) * MaxBackends); remaining.procs = (PGPROC **) palloc(sizeof(PGPROC *) * TotalProcs);
remaining.procAdded = (bool *) palloc0(sizeof(bool *) * MaxBackends); remaining.procAdded = (bool *) palloc0(sizeof(bool *) * TotalProcs);
remaining.procCount = 0; remaining.procCount = 0;
LockLockData(); LockLockData();
@ -416,7 +416,7 @@ BuildLocalWaitGraph(void)
*/ */
/* build list of starting procs */ /* build list of starting procs */
for (curBackend = 0; curBackend < MaxBackends; curBackend++) for (curBackend = 0; curBackend < TotalProcs; curBackend++)
{ {
PGPROC *currentProc = &ProcGlobal->allProcs[curBackend]; PGPROC *currentProc = &ProcGlobal->allProcs[curBackend];
BackendData currentBackendData; BackendData currentBackendData;
@ -762,7 +762,7 @@ AddProcToVisit(PROCStack *remaining, PGPROC *proc)
return; return;
} }
Assert(remaining->procCount < MaxBackends); Assert(remaining->procCount < TotalProcs);
remaining->procs[remaining->procCount++] = proc; remaining->procs[remaining->procCount++] = proc;
remaining->procAdded[proc->pgprocno] = true; remaining->procAdded[proc->pgprocno] = true;

View File

@ -13,6 +13,7 @@
#define BACKEND_DATA_H #define BACKEND_DATA_H
#include "access/twophase.h"
#include "datatype/timestamp.h" #include "datatype/timestamp.h"
#include "distributed/transaction_identifier.h" #include "distributed/transaction_identifier.h"
#include "nodes/pg_list.h" #include "nodes/pg_list.h"
@ -21,6 +22,9 @@
#include "storage/s_lock.h" #include "storage/s_lock.h"
#define TotalProcs (MaxBackends + NUM_AUXILIARY_PROCS + max_prepared_xacts)
/* /*
* Each backend's active distributed transaction information is tracked via * Each backend's active distributed transaction information is tracked via
* BackendData in shared memory. * BackendData in shared memory.