mirror of https://github.com/citusdata/citus.git
Add infrastructure for distributed deadlock detection
This commit adds all the necessary pieces to do the distributed deadlock detection. Each distributed transaction is already assigned with distributed transaction ids introduced withpull/1529/head3369f3486f
. The dependency among the distributed transactions are gathered with80ea233ec1
. With this commit, we implement a DFS (depth first seach) on the dependency graph and search for cycles. Finding a cycle reveals a distributed deadlock. Once we find the deadlock, we examine the path that the cycle exists and cancel the youngest distributed transaction. Note that, we're not yet enabling the deadlock detection by default with this commit.
parent
d19818de21
commit
a333c9f16c
|
@ -11,7 +11,7 @@ EXTVERSIONS = 5.0 5.0-1 5.0-2 \
|
|||
6.0-1 6.0-2 6.0-3 6.0-4 6.0-5 6.0-6 6.0-7 6.0-8 6.0-9 6.0-10 6.0-11 6.0-12 6.0-13 6.0-14 6.0-15 6.0-16 6.0-17 6.0-18 \
|
||||
6.1-1 6.1-2 6.1-3 6.1-4 6.1-5 6.1-6 6.1-7 6.1-8 6.1-9 6.1-10 6.1-11 6.1-12 6.1-13 6.1-14 6.1-15 6.1-16 6.1-17 \
|
||||
6.2-1 6.2-2 6.2-3 6.2-4 \
|
||||
7.0-1 7.0-2 7.0-3 7.0-4 7.0-5 7.0-6 7.0-7 7.0-8 7.0-9 7.0-10 7.0-11 7.0-12 7.0-13
|
||||
7.0-1 7.0-2 7.0-3 7.0-4 7.0-5 7.0-6 7.0-7 7.0-8 7.0-9 7.0-10 7.0-11 7.0-12 7.0-13 7.0-14
|
||||
|
||||
# All citus--*.sql files in the source directory
|
||||
DATA = $(patsubst $(citus_abs_srcdir)/%.sql,%.sql,$(wildcard $(citus_abs_srcdir)/$(EXTENSION)--*--*.sql))
|
||||
|
@ -165,6 +165,8 @@ $(EXTENSION)--7.0-12.sql: $(EXTENSION)--7.0-11.sql $(EXTENSION)--7.0-11--7.0-12.
|
|||
cat $^ > $@
|
||||
$(EXTENSION)--7.0-13.sql: $(EXTENSION)--7.0-12.sql $(EXTENSION)--7.0-12--7.0-13.sql
|
||||
cat $^ > $@
|
||||
$(EXTENSION)--7.0-14.sql: $(EXTENSION)--7.0-13.sql $(EXTENSION)--7.0-13--7.0-14.sql
|
||||
cat $^ > $@
|
||||
|
||||
NO_PGXS = 1
|
||||
|
||||
|
|
|
@ -44,3 +44,4 @@ COMMENT ON FUNCTION citus_drop_trigger()
|
|||
IS 'perform checks and actions at the end of DROP actions';
|
||||
|
||||
RESET search_path;
|
||||
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
/* citus--7.0-13--7.0-14.sql */
|
||||
|
||||
SET search_path = 'pg_catalog';
|
||||
|
||||
CREATE OR REPLACE FUNCTION check_distributed_deadlocks()
|
||||
RETURNS BOOL
|
||||
LANGUAGE 'c' STRICT
|
||||
AS $$MODULE_PATHNAME$$, $$check_distributed_deadlocks$$;
|
||||
COMMENT ON FUNCTION check_distributed_deadlocks()
|
||||
IS 'does a distributed deadlock check, if a deadlock found cancels one of the participating backends and returns true ';
|
||||
|
||||
RESET search_path;
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
# Citus extension
|
||||
comment = 'Citus distributed database'
|
||||
default_version = '7.0-13'
|
||||
default_version = '7.0-14'
|
||||
module_pathname = '$libdir/citus'
|
||||
relocatable = false
|
||||
schema = pg_catalog
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include "datatype/timestamp.h"
|
||||
#include "distributed/backend_data.h"
|
||||
#include "distributed/listutils.h"
|
||||
#include "distributed/lock_graph.h"
|
||||
#include "distributed/metadata_cache.h"
|
||||
#include "distributed/transaction_identifier.h"
|
||||
#include "nodes/execnodes.h"
|
||||
|
@ -566,3 +567,70 @@ GetBackendDataForProc(PGPROC *proc, BackendData *result)
|
|||
|
||||
SpinLockRelease(&backendData->mutex);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CancelTransactionDueToDeadlock cancels the input proc and also marks the backend
|
||||
* data with this information.
|
||||
*/
|
||||
void
|
||||
CancelTransactionDueToDeadlock(PGPROC *proc)
|
||||
{
|
||||
BackendData *backendData = &backendManagementShmemData->backends[proc->pgprocno];
|
||||
|
||||
/* backend might not have used citus yet and thus not initialized backend data */
|
||||
if (!backendData)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
SpinLockAcquire(&backendData->mutex);
|
||||
|
||||
/* send a SIGINT only if the process is still in a distributed transaction */
|
||||
if (backendData->transactionId.transactionNumber != 0)
|
||||
{
|
||||
backendData->cancelledDueToDeadlock = true;
|
||||
SpinLockRelease(&backendData->mutex);
|
||||
|
||||
if (kill(proc->pid, SIGINT) != 0)
|
||||
{
|
||||
ereport(WARNING,
|
||||
(errmsg("attempted to cancel this backend (pid: %d) to resolve a "
|
||||
"distributed deadlock but the backend could not "
|
||||
"be cancelled", proc->pid)));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
SpinLockRelease(&backendData->mutex);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* MyBackendGotCancelledDueToDeadlock returns whether the current distributed
|
||||
* transaction was cancelled due to a deadlock. If the backend is not in a
|
||||
* distributed transaction, the function returns false.
|
||||
*/
|
||||
bool
|
||||
MyBackendGotCancelledDueToDeadlock(void)
|
||||
{
|
||||
bool cancelledDueToDeadlock = false;
|
||||
|
||||
/* backend might not have used citus yet and thus not initialized backend data */
|
||||
if (!MyBackendData)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
SpinLockAcquire(&MyBackendData->mutex);
|
||||
|
||||
if (IsInDistributedTransaction(MyBackendData))
|
||||
{
|
||||
cancelledDueToDeadlock = MyBackendData->cancelledDueToDeadlock;
|
||||
}
|
||||
|
||||
SpinLockRelease(&MyBackendData->mutex);
|
||||
|
||||
return cancelledDueToDeadlock;
|
||||
}
|
||||
|
|
|
@ -25,6 +25,31 @@
|
|||
#include "utils/timestamp.h"
|
||||
|
||||
|
||||
/* used only for finding the deadlock cycle path */
|
||||
typedef struct QueuedTransactionNode
|
||||
{
|
||||
TransactionNode *transactionNode;
|
||||
|
||||
int currentStackDepth;
|
||||
} QueuedTransactionNode;
|
||||
|
||||
|
||||
/* GUC, determining whether debug messages for deadlock detection sent to LOG */
|
||||
bool LogDistributedDeadlockDetection = false;
|
||||
|
||||
|
||||
static bool CheckDeadlockForTransactionNode(TransactionNode *startingTransactionNode,
|
||||
TransactionNode **transactionNodeStack,
|
||||
List **deadlockPath);
|
||||
static void PrependOutgoingNodesToQueue(TransactionNode *queuedTransactionNode,
|
||||
int currentStackDepth,
|
||||
List **toBeVisitedNodes);
|
||||
static void BuildDeadlockPathList(QueuedTransactionNode *cycledTransactionNode,
|
||||
TransactionNode **transactionNodeStack,
|
||||
List **deadlockPath);
|
||||
static void ResetVisitedFields(HTAB *adjacencyList);
|
||||
static void AssociateDistributedTransactionWithBackendProc(TransactionNode *
|
||||
transactionNode);
|
||||
static TransactionNode * GetOrCreateTransactionNode(HTAB *adjacencyList,
|
||||
DistributedTransactionId *
|
||||
transactionId);
|
||||
|
@ -32,6 +57,311 @@ static uint32 DistributedTransactionIdHash(const void *key, Size keysize);
|
|||
static int DistributedTransactionIdCompare(const void *a, const void *b, Size keysize);
|
||||
|
||||
|
||||
PG_FUNCTION_INFO_V1(check_distributed_deadlocks);
|
||||
|
||||
|
||||
/*
|
||||
* check_distributed_deadlocks is the external API for manually
|
||||
* checking for distributed deadlocks. For the details, see
|
||||
* CheckForDistributedDeadlocks().
|
||||
*/
|
||||
Datum
|
||||
check_distributed_deadlocks(PG_FUNCTION_ARGS)
|
||||
{
|
||||
bool deadlockFound = CheckForDistributedDeadlocks();
|
||||
|
||||
return BoolGetDatum(deadlockFound);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CheckForDistributedDeadlocks is the entry point for detecing
|
||||
* distributed deadlocks.
|
||||
*
|
||||
* In plain words, the function first builds a wait graph by
|
||||
* adding the wait edges from the local node and then adding the
|
||||
* remote wait edges to form a global wait graph. Later, the wait
|
||||
* graph is converted into another graph representation (adjacency
|
||||
* lists) for more efficient searches. Finally, a DFS is done on
|
||||
* the adjacency lists. Finding a cycle in the graph unveils a
|
||||
* distributed deadlock. Upon finding a deadlock, the youngest
|
||||
* participant backend is cancelled.
|
||||
*
|
||||
* The complexity of the algorithm is O(N) for each distributed
|
||||
* transaction that's checked for deadlocks. Note that there exists
|
||||
* 0 to MaxBackends number of transactions.
|
||||
*
|
||||
* The function returns true if a deadlock is found. Otherwise, returns
|
||||
* false.
|
||||
*/
|
||||
bool
|
||||
CheckForDistributedDeadlocks(void)
|
||||
{
|
||||
WaitGraph *waitGraph = BuildGlobalWaitGraph();
|
||||
HTAB *adjacencyLists = BuildAdjacencyListsForWaitGraph(waitGraph);
|
||||
HASH_SEQ_STATUS status;
|
||||
TransactionNode *transactionNode = NULL;
|
||||
int edgeCount = waitGraph->edgeCount;
|
||||
int localGroupId = GetLocalGroupId();
|
||||
|
||||
/*
|
||||
* We iterate on transaction nodes and search for deadlocks where the
|
||||
* starting node is the given transaction node.
|
||||
*/
|
||||
hash_seq_init(&status, adjacencyLists);
|
||||
while ((transactionNode = (TransactionNode *) hash_seq_search(&status)) != 0)
|
||||
{
|
||||
bool deadlockFound = false;
|
||||
List *deadlockPath = NIL;
|
||||
TransactionNode *transactionNodeStack[edgeCount];
|
||||
|
||||
/* we're only interested in finding deadlocks originating from this node */
|
||||
if (transactionNode->transactionId.initiatorNodeIdentifier != localGroupId)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
ResetVisitedFields(adjacencyLists);
|
||||
|
||||
deadlockFound = CheckDeadlockForTransactionNode(transactionNode,
|
||||
transactionNodeStack,
|
||||
&deadlockPath);
|
||||
if (deadlockFound)
|
||||
{
|
||||
TransactionNode *youngestTransaction = transactionNode;
|
||||
ListCell *participantTransactionCell = NULL;
|
||||
|
||||
/* there should be at least two transactions to get into a deadlock */
|
||||
Assert(list_length(deadlockPath) > 1);
|
||||
|
||||
/*
|
||||
* We search for the youngest participant for two reasons
|
||||
* (i) predictable results (ii) cancel the youngest transaction
|
||||
* (i.e., if a DDL continues for 1 hour and deadlocks with a
|
||||
* SELECT continues for 10 msec, we prefer to cancel the SELECT).
|
||||
*
|
||||
* We're also searching for the youngest transactions initiated by
|
||||
* this node.
|
||||
*/
|
||||
foreach(participantTransactionCell, deadlockPath)
|
||||
{
|
||||
TransactionNode *currentNode =
|
||||
(TransactionNode *) lfirst(participantTransactionCell);
|
||||
|
||||
TimestampTz youngestTimestamp =
|
||||
youngestTransaction->transactionId.timestamp;
|
||||
TimestampTz currentTimestamp = currentNode->transactionId.timestamp;
|
||||
|
||||
AssociateDistributedTransactionWithBackendProc(currentNode);
|
||||
|
||||
if (currentNode->transactionId.initiatorNodeIdentifier ==
|
||||
GetLocalGroupId() &&
|
||||
timestamptz_cmp_internal(currentTimestamp, youngestTimestamp) == 1)
|
||||
{
|
||||
youngestTransaction = currentNode;
|
||||
}
|
||||
}
|
||||
|
||||
/* we should find the backend */
|
||||
Assert(youngestTransaction->initiatorProc != NULL);
|
||||
|
||||
CancelTransactionDueToDeadlock(youngestTransaction->initiatorProc);
|
||||
|
||||
hash_seq_term(&status);
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CheckDeadlockForDistributedTransaction does a DFS starting with the given
|
||||
* transaction node and checks for a cycle (i.e., the node can be reached again
|
||||
* while traversing the graph).
|
||||
*
|
||||
* Finding a cycle indicates a distributed deadlock and the function returns
|
||||
* true on that case. Also, the deadlockPath is filled with the transaction
|
||||
* nodes that form the cycle.
|
||||
*/
|
||||
static bool
|
||||
CheckDeadlockForTransactionNode(TransactionNode *startingTransactionNode,
|
||||
TransactionNode **transactionNodeStack,
|
||||
List **deadlockPath)
|
||||
{
|
||||
List *toBeVisitedNodes = NIL;
|
||||
int currentStackDepth = 0;
|
||||
|
||||
/*
|
||||
* We keep transactionNodeStack to keep track of the deadlock paths. At this point,
|
||||
* adjust the depth of the starting node and set the stack's first element with
|
||||
* the starting node.
|
||||
*/
|
||||
transactionNodeStack[currentStackDepth] = startingTransactionNode;
|
||||
|
||||
PrependOutgoingNodesToQueue(startingTransactionNode, currentStackDepth,
|
||||
&toBeVisitedNodes);
|
||||
|
||||
/* traverse the graph and search for the deadlocks */
|
||||
while (toBeVisitedNodes != NIL)
|
||||
{
|
||||
QueuedTransactionNode *queuedTransactionNode =
|
||||
(QueuedTransactionNode *) linitial(toBeVisitedNodes);
|
||||
TransactionNode *currentTransactionNode = queuedTransactionNode->transactionNode;
|
||||
|
||||
toBeVisitedNodes = list_delete_first(toBeVisitedNodes);
|
||||
|
||||
/* cycle found, let the caller know about the cycle */
|
||||
if (currentTransactionNode == startingTransactionNode)
|
||||
{
|
||||
BuildDeadlockPathList(queuedTransactionNode, transactionNodeStack,
|
||||
deadlockPath);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* don't need to revisit the node again */
|
||||
if (currentTransactionNode->transactionVisited)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
currentTransactionNode->transactionVisited = true;
|
||||
|
||||
/* set the stack's corresponding element with the current node */
|
||||
currentStackDepth = queuedTransactionNode->currentStackDepth;
|
||||
transactionNodeStack[currentStackDepth] = currentTransactionNode;
|
||||
|
||||
PrependOutgoingNodesToQueue(currentTransactionNode, currentStackDepth,
|
||||
&toBeVisitedNodes);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* PrependOutgoingNodesToQueue prepends the waiters of the input transaction nodes to the
|
||||
* toBeVisitedNodes.
|
||||
*/
|
||||
static void
|
||||
PrependOutgoingNodesToQueue(TransactionNode *transactionNode, int currentStackDepth,
|
||||
List **toBeVisitedNodes)
|
||||
{
|
||||
ListCell *currentWaitForCell = NULL;
|
||||
|
||||
/* as we traverse outgoing edges, increment the depth */
|
||||
currentStackDepth++;
|
||||
|
||||
/* prepend to the list to continue depth-first search */
|
||||
foreach(currentWaitForCell, transactionNode->waitsFor)
|
||||
{
|
||||
TransactionNode *waitForTransaction =
|
||||
(TransactionNode *) lfirst(currentWaitForCell);
|
||||
QueuedTransactionNode *queuedNode = palloc0(sizeof(QueuedTransactionNode));
|
||||
|
||||
queuedNode->transactionNode = waitForTransaction;
|
||||
queuedNode->currentStackDepth = currentStackDepth;
|
||||
|
||||
*toBeVisitedNodes = lappend(*toBeVisitedNodes, queuedNode);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* BuildDeadlockPathList fills deadlockPath with a list of transactions involved
|
||||
* in a distributed deadlock (i.e. a cycle in the graph).
|
||||
*/
|
||||
static void
|
||||
BuildDeadlockPathList(QueuedTransactionNode *cycledTransactionNode,
|
||||
TransactionNode **transactionNodeStack,
|
||||
List **deadlockPath)
|
||||
{
|
||||
int deadlockStackDepth = cycledTransactionNode->currentStackDepth;
|
||||
int stackIndex = 0;
|
||||
|
||||
*deadlockPath = NIL;
|
||||
|
||||
for (stackIndex = 0; stackIndex < deadlockStackDepth; stackIndex++)
|
||||
{
|
||||
*deadlockPath = lappend(*deadlockPath, transactionNodeStack[stackIndex]);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ResetVisitedFields goes over all the elements of the input adjacency list
|
||||
* and sets transactionVisited to false.
|
||||
*/
|
||||
static void
|
||||
ResetVisitedFields(HTAB *adjacencyList)
|
||||
{
|
||||
HASH_SEQ_STATUS status;
|
||||
TransactionNode *resetNode = NULL;
|
||||
|
||||
/* reset all visited fields */
|
||||
hash_seq_init(&status, adjacencyList);
|
||||
|
||||
while ((resetNode = (TransactionNode *) hash_seq_search(&status)) != 0)
|
||||
{
|
||||
resetNode->transactionVisited = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* AssociateDistributedTransactionWithBackendProc gets a transaction node
|
||||
* and searches the corresponding backend. Once found, transactionNodes'
|
||||
* initiatorProc is set to it.
|
||||
*
|
||||
* The function goes over all the backends, checks for the backend with
|
||||
* the same transaction number as the given transaction node.
|
||||
*/
|
||||
static void
|
||||
AssociateDistributedTransactionWithBackendProc(TransactionNode *transactionNode)
|
||||
{
|
||||
int backendIndex = 0;
|
||||
|
||||
for (backendIndex = 0; backendIndex < MaxBackends; ++backendIndex)
|
||||
{
|
||||
PGPROC *currentProc = &ProcGlobal->allProcs[backendIndex];
|
||||
BackendData currentBackendData;
|
||||
DistributedTransactionId *currentTransactionId = NULL;
|
||||
|
||||
/* we're not interested in processes that are not active or waiting on a lock */
|
||||
if (currentProc->pid <= 0)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
GetBackendDataForProc(currentProc, ¤tBackendData);
|
||||
|
||||
/* we're only interested in distribtued transactions */
|
||||
if (!IsInDistributedTransaction(¤tBackendData))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
currentTransactionId = ¤tBackendData.transactionId;
|
||||
|
||||
if (currentTransactionId->transactionNumber !=
|
||||
transactionNode->transactionId.transactionNumber)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
/* at the point we should only have transactions initiated by this node */
|
||||
Assert(currentTransactionId->initiatorNodeIdentifier == GetLocalGroupId());
|
||||
|
||||
transactionNode->initiatorProc = currentProc;
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* BuildAdjacencyListsForWaitGraph converts the input wait graph to
|
||||
* an adjacency list for further processing.
|
||||
|
@ -121,6 +451,7 @@ GetOrCreateTransactionNode(HTAB *adjacencyList, DistributedTransactionId *transa
|
|||
if (!found)
|
||||
{
|
||||
transactionNode->waitsFor = NIL;
|
||||
transactionNode->initiatorProc = NULL;
|
||||
}
|
||||
|
||||
return transactionNode;
|
||||
|
|
|
@ -56,10 +56,8 @@ static void AddEdgesForWaitQueue(WaitGraph *waitGraph, PGPROC *waitingProc,
|
|||
static void AddWaitEdge(WaitGraph *waitGraph, PGPROC *waitingProc, PGPROC *blockingProc,
|
||||
PROCStack *remaining);
|
||||
static WaitEdge * AllocWaitEdge(WaitGraph *waitGraph);
|
||||
static bool IsProcessWaitingForLock(PGPROC *proc);
|
||||
static bool IsSameLockGroup(PGPROC *leftProc, PGPROC *rightProc);
|
||||
static bool IsConflictingLockMask(int holdMask, int conflictMask);
|
||||
static bool IsInDistributedTransaction(BackendData *backendData);
|
||||
|
||||
|
||||
PG_FUNCTION_INFO_V1(dump_local_wait_edges);
|
||||
|
@ -710,7 +708,7 @@ AllocWaitEdge(WaitGraph *waitGraph)
|
|||
/*
|
||||
* IsProcessWaitingForLock returns whether a given process is waiting for a lock.
|
||||
*/
|
||||
static bool
|
||||
bool
|
||||
IsProcessWaitingForLock(PGPROC *proc)
|
||||
{
|
||||
return proc->waitStatus == STATUS_WAITING;
|
||||
|
@ -750,7 +748,7 @@ IsConflictingLockMask(int holdMask, int conflictMask)
|
|||
* IsInDistributedTransaction returns whether the given backend is in a
|
||||
* distributed transaction.
|
||||
*/
|
||||
static bool
|
||||
bool
|
||||
IsInDistributedTransaction(BackendData *backendData)
|
||||
{
|
||||
return backendData->transactionId.transactionNumber != 0;
|
||||
|
|
|
@ -29,6 +29,7 @@ typedef struct BackendData
|
|||
{
|
||||
Oid databaseId;
|
||||
slock_t mutex;
|
||||
bool cancelledDueToDeadlock;
|
||||
DistributedTransactionId transactionId;
|
||||
} BackendData;
|
||||
|
||||
|
@ -40,5 +41,7 @@ extern void UnlockBackendSharedMemory(void);
|
|||
extern void UnSetDistributedTransactionId(void);
|
||||
extern void AssignDistributedTransactionId(void);
|
||||
extern void GetBackendDataForProc(PGPROC *proc, BackendData *result);
|
||||
extern void CancelTransactionDueToDeadlock(PGPROC *proc);
|
||||
extern bool MyBackendGotCancelledDueToDeadlock(void);
|
||||
|
||||
#endif /* BACKEND_DATA_H */
|
||||
|
|
|
@ -26,10 +26,21 @@ typedef struct TransactionNode
|
|||
|
||||
/* list of TransactionNode that this distributed transaction is waiting for */
|
||||
List *waitsFor;
|
||||
|
||||
/* backend that is on the initiator node */
|
||||
PGPROC *initiatorProc;
|
||||
|
||||
bool transactionVisited;
|
||||
} TransactionNode;
|
||||
|
||||
|
||||
HTAB * BuildAdjacencyListsForWaitGraph(WaitGraph *waitGraph);
|
||||
/* GUC, determining whether debug messages for deadlock detection sent to LOG */
|
||||
extern bool LogDistributedDeadlockDetection;
|
||||
|
||||
|
||||
extern bool CheckForDistributedDeadlocks(void);
|
||||
extern HTAB * BuildAdjacencyListsForWaitGraph(WaitGraph *waitGraph);
|
||||
extern char * WaitsForToString(List *waitsFor);
|
||||
|
||||
|
||||
#endif /* DISTRIBUTED_DEADLOCK_DETECTION_H */
|
||||
|
|
|
@ -55,6 +55,8 @@ typedef struct WaitGraph
|
|||
|
||||
|
||||
extern WaitGraph * BuildGlobalWaitGraph(void);
|
||||
extern bool IsProcessWaitingForLock(PGPROC *proc);
|
||||
extern bool IsInDistributedTransaction(BackendData *backendData);
|
||||
|
||||
|
||||
#endif /* LOCK_GRAPH_H */
|
||||
|
|
|
@ -123,6 +123,7 @@ ALTER EXTENSION citus UPDATE TO '7.0-10';
|
|||
ALTER EXTENSION citus UPDATE TO '7.0-11';
|
||||
ALTER EXTENSION citus UPDATE TO '7.0-12';
|
||||
ALTER EXTENSION citus UPDATE TO '7.0-13';
|
||||
ALTER EXTENSION citus UPDATE TO '7.0-14';
|
||||
-- show running version
|
||||
SHOW citus.version;
|
||||
citus.version
|
||||
|
|
|
@ -352,6 +352,8 @@ Custom Scan (Citus Router)
|
|||
-> Index Scan using lineitem_mx_pkey_1220052 on lineitem_mx_1220052 lineitem_mx
|
||||
Index Cond: (l_orderkey = 1)
|
||||
Filter: (l_partkey = 0)
|
||||
-- make the outputs more consistent
|
||||
VACUUM ANALYZE lineitem_mx;
|
||||
-- Test single-shard SELECT
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT l_quantity FROM lineitem_mx WHERE l_orderkey = 5;
|
||||
|
@ -360,10 +362,8 @@ Custom Scan (Citus Router)
|
|||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Bitmap Heap Scan on lineitem_mx_1220055 lineitem_mx
|
||||
Recheck Cond: (l_orderkey = 5)
|
||||
-> Bitmap Index Scan on lineitem_mx_pkey_1220055
|
||||
Index Cond: (l_orderkey = 5)
|
||||
-> Index Scan using lineitem_mx_pkey_1220055 on lineitem_mx_1220055 lineitem_mx
|
||||
Index Cond: (l_orderkey = 5)
|
||||
SELECT true AS valid FROM explain_xml($$
|
||||
SELECT l_quantity FROM lineitem_mx WHERE l_orderkey = 5$$);
|
||||
t
|
||||
|
@ -391,68 +391,68 @@ Aggregate
|
|||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_mx_1220052 lineitem_mx
|
||||
Filter: (l_orderkey > 9030)
|
||||
-> Index Only Scan using lineitem_mx_pkey_1220052 on lineitem_mx_1220052 lineitem_mx
|
||||
Index Cond: (l_orderkey > 9030)
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_mx_1220053 lineitem_mx
|
||||
Filter: (l_orderkey > 9030)
|
||||
-> Index Only Scan using lineitem_mx_pkey_1220053 on lineitem_mx_1220053 lineitem_mx
|
||||
Index Cond: (l_orderkey > 9030)
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_mx_1220054 lineitem_mx
|
||||
Filter: (l_orderkey > 9030)
|
||||
-> Index Only Scan using lineitem_mx_pkey_1220054 on lineitem_mx_1220054 lineitem_mx
|
||||
Index Cond: (l_orderkey > 9030)
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_mx_1220055 lineitem_mx
|
||||
Filter: (l_orderkey > 9030)
|
||||
-> Index Only Scan using lineitem_mx_pkey_1220055 on lineitem_mx_1220055 lineitem_mx
|
||||
Index Cond: (l_orderkey > 9030)
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_mx_1220056 lineitem_mx
|
||||
Filter: (l_orderkey > 9030)
|
||||
-> Index Only Scan using lineitem_mx_pkey_1220056 on lineitem_mx_1220056 lineitem_mx
|
||||
Index Cond: (l_orderkey > 9030)
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_mx_1220057 lineitem_mx
|
||||
Filter: (l_orderkey > 9030)
|
||||
-> Index Only Scan using lineitem_mx_pkey_1220057 on lineitem_mx_1220057 lineitem_mx
|
||||
Index Cond: (l_orderkey > 9030)
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_mx_1220058 lineitem_mx
|
||||
Filter: (l_orderkey > 9030)
|
||||
-> Index Only Scan using lineitem_mx_pkey_1220058 on lineitem_mx_1220058 lineitem_mx
|
||||
Index Cond: (l_orderkey > 9030)
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_mx_1220059 lineitem_mx
|
||||
Filter: (l_orderkey > 9030)
|
||||
-> Index Only Scan using lineitem_mx_pkey_1220059 on lineitem_mx_1220059 lineitem_mx
|
||||
Index Cond: (l_orderkey > 9030)
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_mx_1220060 lineitem_mx
|
||||
Filter: (l_orderkey > 9030)
|
||||
-> Index Only Scan using lineitem_mx_pkey_1220060 on lineitem_mx_1220060 lineitem_mx
|
||||
Index Cond: (l_orderkey > 9030)
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_mx_1220061 lineitem_mx
|
||||
Filter: (l_orderkey > 9030)
|
||||
-> Index Only Scan using lineitem_mx_pkey_1220061 on lineitem_mx_1220061 lineitem_mx
|
||||
Index Cond: (l_orderkey > 9030)
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_mx_1220062 lineitem_mx
|
||||
Filter: (l_orderkey > 9030)
|
||||
-> Index Only Scan using lineitem_mx_pkey_1220062 on lineitem_mx_1220062 lineitem_mx
|
||||
Index Cond: (l_orderkey > 9030)
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_mx_1220063 lineitem_mx
|
||||
Filter: (l_orderkey > 9030)
|
||||
-> Index Only Scan using lineitem_mx_pkey_1220063 on lineitem_mx_1220063 lineitem_mx
|
||||
Index Cond: (l_orderkey > 9030)
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_mx_1220064 lineitem_mx
|
||||
Filter: (l_orderkey > 9030)
|
||||
-> Index Only Scan using lineitem_mx_pkey_1220064 on lineitem_mx_1220064 lineitem_mx
|
||||
Index Cond: (l_orderkey > 9030)
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
|
@ -461,13 +461,13 @@ Aggregate
|
|||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_mx_1220066 lineitem_mx
|
||||
Filter: (l_orderkey > 9030)
|
||||
-> Index Only Scan using lineitem_mx_pkey_1220066 on lineitem_mx_1220066 lineitem_mx
|
||||
Index Cond: (l_orderkey > 9030)
|
||||
-> Task
|
||||
Node: host=localhost port=57638 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_mx_1220067 lineitem_mx
|
||||
Filter: (l_orderkey > 9030)
|
||||
-> Index Only Scan using lineitem_mx_pkey_1220067 on lineitem_mx_1220067 lineitem_mx
|
||||
Index Cond: (l_orderkey > 9030)
|
||||
SELECT true AS valid FROM explain_xml($$
|
||||
SELECT avg(l_linenumber) FROM lineitem_mx WHERE l_orderkey > 9030$$);
|
||||
t
|
||||
|
@ -486,8 +486,8 @@ Aggregate
|
|||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
-> Aggregate
|
||||
-> Seq Scan on lineitem_mx_1220052 lineitem_mx
|
||||
Filter: (l_orderkey > 9030)
|
||||
-> Index Only Scan using lineitem_mx_pkey_1220052 on lineitem_mx_1220052 lineitem_mx
|
||||
Index Cond: (l_orderkey > 9030)
|
||||
-- Test re-partition join
|
||||
SET citus.large_table_shard_count TO 1;
|
||||
EXPLAIN (COSTS FALSE)
|
||||
|
|
|
@ -123,6 +123,7 @@ ALTER EXTENSION citus UPDATE TO '7.0-10';
|
|||
ALTER EXTENSION citus UPDATE TO '7.0-11';
|
||||
ALTER EXTENSION citus UPDATE TO '7.0-12';
|
||||
ALTER EXTENSION citus UPDATE TO '7.0-13';
|
||||
ALTER EXTENSION citus UPDATE TO '7.0-14';
|
||||
|
||||
-- show running version
|
||||
SHOW citus.version;
|
||||
|
|
|
@ -127,6 +127,9 @@ EXPLAIN (COSTS FALSE)
|
|||
DELETE FROM lineitem_mx
|
||||
WHERE l_orderkey = 1 AND l_partkey = 0;
|
||||
|
||||
-- make the outputs more consistent
|
||||
VACUUM ANALYZE lineitem_mx;
|
||||
|
||||
-- Test single-shard SELECT
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT l_quantity FROM lineitem_mx WHERE l_orderkey = 5;
|
||||
|
|
Loading…
Reference in New Issue