global_pid_guc
Onder Kalaci 2022-01-31 14:50:17 +01:00
parent 6292662f9d
commit 37f01f44a6
7 changed files with 47 additions and 2 deletions

View File

@ -234,8 +234,16 @@ GetConnParams(ConnectionHashKey *key, char ***keywords, char ***values,
char nodePortString[12] = "";
StringInfo applicationName = makeStringInfo();
appendStringInfo(applicationName, "%s%ld", CITUS_APPLICATION_NAME_PREFIX,
GetGlobalPID());
elog(WARNING, "InternalConnectionName::%s::", InternalConnectionName);
if (strcmp(InternalConnectionName, "") == 0)
{
appendStringInfo(applicationName, "%s%ld", CITUS_APPLICATION_NAME_PREFIX,
GetGlobalPID());
}
else
{
appendStringInfo(applicationName, "%s", InternalConnectionName);
}
/*
* This function has three sections:

View File

@ -66,6 +66,10 @@ int GroupSize = 1;
/* config variable managed via guc.c */
char *CurrentCluster = "default";
/*TODO: add comments, move correct file */
char *InternalConnectionName = "";
/*
* Config variable to control whether we should replicate reference tables on
* node activation or we should defer it to shard creation.

View File

@ -58,6 +58,8 @@
#include "utils/memutils.h"
#include "utils/syscache.h"
#include "distributed/distributed_deadlock_detection.h"
#if PG_VERSION_NUM >= PG_VERSION_13
#include "common/hashfn.h"
#endif
@ -824,6 +826,9 @@ rebalance_table_shards(PG_FUNCTION_ARGS)
.rebalanceStrategy = strategy,
.improvementThreshold = strategy->improvementThreshold,
};
SetLocalInternalConnectionName("rebalancer");
Oid shardTransferModeOid = PG_GETARG_OID(4);
RebalanceTableShards(&options, shardTransferModeOid);
PG_RETURN_VOID();
@ -1696,6 +1701,7 @@ UpdateShardPlacement(PlacementUpdateEvent *placementUpdateEvent,
* In case of failure, we throw an error such that rebalance_table_shards
* fails early.
*/
ExecuteCriticalCommandInSeparateTransaction(placementUpdateCommand->data);
UpdateColocatedShardPlacementProgress(shardId,

View File

@ -1142,6 +1142,17 @@ RegisterCitusConfigVariables(void)
HideShardsFromAppNamePrefixesAssignHook,
NULL);
DefineCustomStringVariable(
"citus.internal_connection_name",
gettext_noop("TODO: add comment"),
NULL,
&InternalConnectionName,
"",
PGC_USERSET,
GUC_NO_SHOW_ALL,
NULL, NULL, NULL);
DefineCustomIntVariable(
"citus.isolation_test_session_process_id",
NULL,

View File

@ -63,6 +63,8 @@ static void LogCancellingBackend(TransactionNode *transactionNode);
static void LogTransactionNode(TransactionNode *transactionNode);
static void LogDistributedDeadlockDebugMessage(const char *errorMessage);
PG_FUNCTION_INFO_V1(check_distributed_deadlocks);
@ -119,6 +121,8 @@ CheckForDistributedDeadlocks(void)
return false;
}
SetLocalInternalConnectionName("distributed deadlock detection");
WaitGraph *waitGraph = BuildGlobalWaitGraph();
HTAB *adjacencyLists = BuildAdjacencyListsForWaitGraph(waitGraph);
@ -219,6 +223,15 @@ CheckForDistributedDeadlocks(void)
}
void
SetLocalInternalConnectionName(char *internalConnectionName)
{
set_config_option("citus.internal_connection_name", internalConnectionName,
(superuser() ? PGC_SUSET : PGC_USERSET), PGC_S_SESSION,
GUC_ACTION_LOCAL, true, 0, false);
}
/*
* CheckDeadlockForTransactionNode does a DFS starting with the given
* transaction node and checks for a cycle (i.e., the node can be reached again

View File

@ -37,6 +37,7 @@ typedef struct TransactionNode
/* GUC, determining whether debug messages for deadlock detection sent to LOG */
extern bool LogDistributedDeadlockDetection;
extern void SetLocalInternalConnectionName(char *internalConnectionName);
extern bool CheckForDistributedDeadlocks(void);
extern HTAB * BuildAdjacencyListsForWaitGraph(WaitGraph *waitGraph);

View File

@ -63,6 +63,8 @@ extern char *WorkerListFileName;
extern char *CurrentCluster;
extern bool ReplicateReferenceTablesOnActivate;
extern char *InternalConnectionName;
/* Function declarations for finding worker nodes to place shards on */
extern WorkerNode * WorkerGetRandomCandidateNode(List *currentNodeList);