Ensure to use initialized MaxBackends

Postgresql loads shared libraries before calculating MaxBackends.
However, Citus relies on MaxBackends being set. Thus, with this
commit we use the same steps to calculate MaxBackends while
Citus is being loaded (e.g., PG_Init is called).

Note that this is safe since all the elements that are used to
calculate MaxBackends are PGC_POSTMASTER gucs and a constant
value.
pull/2514/head
Onder Kalaci 2018-12-01 21:06:14 +03:00
parent b6ebd791a6
commit 621ccf3946
3 changed files with 60 additions and 10 deletions

View File

@ -25,6 +25,7 @@
#include "distributed/remote_commands.h" #include "distributed/remote_commands.h"
#include "distributed/transaction_identifier.h" #include "distributed/transaction_identifier.h"
#include "nodes/execnodes.h" #include "nodes/execnodes.h"
#include "postmaster/autovacuum.h" /* to access autovacuum_max_workers */
#include "storage/ipc.h" #include "storage/ipc.h"
#include "storage/lmgr.h" #include "storage/lmgr.h"
#include "storage/lwlock.h" #include "storage/lwlock.h"
@ -512,6 +513,7 @@ BackendManagementShmemInit(void)
if (!alreadyInitialized) if (!alreadyInitialized)
{ {
int backendIndex = 0; int backendIndex = 0;
int totalProcs = 0;
char *trancheName = "Backend Management Tranche"; char *trancheName = "Backend Management Tranche";
#if (PG_VERSION_NUM >= 100000) #if (PG_VERSION_NUM >= 100000)
@ -557,7 +559,8 @@ BackendManagementShmemInit(void)
* We also initiate initiatorNodeIdentifier to -1, which can never be * We also initiate initiatorNodeIdentifier to -1, which can never be
* used as a node id. * used as a node id.
*/ */
for (backendIndex = 0; backendIndex < TotalProcs; ++backendIndex) totalProcs = TotalProcCount();
for (backendIndex = 0; backendIndex < totalProcs; ++backendIndex)
{ {
backendManagementShmemData->backends[backendIndex].citusBackend. backendManagementShmemData->backends[backendIndex].citusBackend.
initiatorNodeIdentifier = -1; initiatorNodeIdentifier = -1;
@ -582,14 +585,62 @@ static size_t
BackendManagementShmemSize(void) BackendManagementShmemSize(void)
{ {
Size size = 0; Size size = 0;
int totalProcs = TotalProcCount();
size = add_size(size, sizeof(BackendManagementShmemData)); size = add_size(size, sizeof(BackendManagementShmemData));
size = add_size(size, mul_size(sizeof(BackendData), TotalProcs)); size = add_size(size, mul_size(sizeof(BackendData), totalProcs));
return size; return size;
} }
/*
* TotalProcCount returns the total processes that could run via the current
* postgres server. See the details in the function comments.
*
* There is one thing we should warn the readers. Citus enforces to be loaded
* as the first extension in shared_preload_libraries. However, if any other
* extension overrides MaxConnections, autovacuum_max_workers or
* max_worker_processes, our reasoning in this function may not work as expected.
* Given that it is not a usual pattern for extension, we consider Citus' behaviour
* good enough for now.
*/
int
TotalProcCount(void)
{
int maxBackends = 0;
int totalProcs = 0;
#ifdef WIN32
/* autovacuum_max_workers is not PGDLLIMPORT, so use a high estimate for windows */
int estimatedMaxAutovacuumWorkers = 30;
maxBackends =
MaxConnections + estimatedMaxAutovacuumWorkers + 1 + max_worker_processes;
#else
/*
* We're simply imitating Postgrsql's InitializeMaxBackends(). Given that all
* the items used here PGC_POSTMASTER, should be safe to access them
* anytime during the execution even before InitializeMaxBackends() is called.
*/
maxBackends = MaxConnections + autovacuum_max_workers + 1 + max_worker_processes;
#endif
/*
* We prefer to maintain space for auxiliary procs or preperad transactions in
* the backend space because they could be blocking processes and our current
* implementation of distributed deadlock detection could process them
* as a regular backend. In the future, we could consider chaning deadlock
* detection algorithm to ignore auxiliary procs or preperad transactions and
* save same space.
*/
totalProcs = maxBackends + NUM_AUXILIARY_PROCS + max_prepared_xacts;
return totalProcs;
}
/* /*
* InitializeBackendData initialises MyBackendData to the shared memory segment * InitializeBackendData initialises MyBackendData to the shared memory segment
* belonging to the current backend. * belonging to the current backend.

View File

@ -389,6 +389,7 @@ BuildLocalWaitGraph(void)
WaitGraph *waitGraph = NULL; WaitGraph *waitGraph = NULL;
int curBackend = 0; int curBackend = 0;
PROCStack remaining; PROCStack remaining;
int totalProcs = TotalProcCount();
/* /*
* Try hard to avoid allocations while holding lock. Thus we pre-allocate * Try hard to avoid allocations while holding lock. Thus we pre-allocate
@ -398,12 +399,12 @@ BuildLocalWaitGraph(void)
*/ */
waitGraph = (WaitGraph *) palloc0(sizeof(WaitGraph)); waitGraph = (WaitGraph *) palloc0(sizeof(WaitGraph));
waitGraph->localNodeId = GetLocalGroupId(); waitGraph->localNodeId = GetLocalGroupId();
waitGraph->allocatedSize = TotalProcs * 3; waitGraph->allocatedSize = totalProcs * 3;
waitGraph->edgeCount = 0; waitGraph->edgeCount = 0;
waitGraph->edges = (WaitEdge *) palloc(waitGraph->allocatedSize * sizeof(WaitEdge)); waitGraph->edges = (WaitEdge *) palloc(waitGraph->allocatedSize * sizeof(WaitEdge));
remaining.procs = (PGPROC **) palloc(sizeof(PGPROC *) * TotalProcs); remaining.procs = (PGPROC **) palloc(sizeof(PGPROC *) * totalProcs);
remaining.procAdded = (bool *) palloc0(sizeof(bool *) * TotalProcs); remaining.procAdded = (bool *) palloc0(sizeof(bool *) * totalProcs);
remaining.procCount = 0; remaining.procCount = 0;
LockLockData(); LockLockData();
@ -416,7 +417,7 @@ BuildLocalWaitGraph(void)
*/ */
/* build list of starting procs */ /* build list of starting procs */
for (curBackend = 0; curBackend < TotalProcs; curBackend++) for (curBackend = 0; curBackend < totalProcs; curBackend++)
{ {
PGPROC *currentProc = &ProcGlobal->allProcs[curBackend]; PGPROC *currentProc = &ProcGlobal->allProcs[curBackend];
BackendData currentBackendData; BackendData currentBackendData;
@ -762,7 +763,7 @@ AddProcToVisit(PROCStack *remaining, PGPROC *proc)
return; return;
} }
Assert(remaining->procCount < TotalProcs); Assert(remaining->procCount < TotalProcCount());
remaining->procs[remaining->procCount++] = proc; remaining->procs[remaining->procCount++] = proc;
remaining->procAdded[proc->pgprocno] = true; remaining->procAdded[proc->pgprocno] = true;

View File

@ -22,9 +22,6 @@
#include "storage/s_lock.h" #include "storage/s_lock.h"
#define TotalProcs (MaxBackends + NUM_AUXILIARY_PROCS + max_prepared_xacts)
/* /*
* CitusInitiatedBackend keeps some information about the backends that are * CitusInitiatedBackend keeps some information about the backends that are
* initiated by Citus. * initiated by Citus.
@ -58,6 +55,7 @@ typedef struct BackendData
extern void InitializeBackendManagement(void); extern void InitializeBackendManagement(void);
extern int TotalProcCount(void);
extern void InitializeBackendData(void); extern void InitializeBackendData(void);
extern void LockBackendSharedMemory(LWLockMode lockMode); extern void LockBackendSharedMemory(LWLockMode lockMode);
extern void UnlockBackendSharedMemory(void); extern void UnlockBackendSharedMemory(void);