mirror of https://github.com/citusdata/citus.git
Automatically convert useless declarations using regex replace (#3181)
* Add declaration removal to CI * Convert declarationspull/3181/merge
parent
9961297d7b
commit
1d8dde232f
|
@ -22,6 +22,12 @@ jobs:
|
|||
- run:
|
||||
name: 'Check Style'
|
||||
command: citus_indent --check
|
||||
- run:
|
||||
name: 'Remove useless declarations'
|
||||
command: ci/remove_useless_declarations.sh
|
||||
- run:
|
||||
name: 'Check if changed'
|
||||
command: git diff --cached --exit-code
|
||||
check-sql-snapshots:
|
||||
docker:
|
||||
- image: 'citus/extbuilder:latest'
|
||||
|
|
|
@ -0,0 +1,23 @@
|
|||
#!/bin/sh
|
||||
|
||||
set -eu
|
||||
|
||||
files=$(find src -iname '*.c' | git check-attr --stdin citus-style | grep -v ': unset$' | sed 's/: citus-style: set$//')
|
||||
while true; do
|
||||
# shellcheck disable=SC2086
|
||||
perl -i -p0e 's/\n\t(?!return )(?P<type>(\w+ )+\**)(?>(?P<variable>\w+)( = *[\w>\s\n-]*?)?;\n(?P<code_between>(?>(?P<comment_or_string_or_not_preprocessor>\/\*.*?\*\/|"(?>\\"|.)*?"|[^#]))*?)(\t)?(?=\b(?P=variable)\b))(?<=\n\t)(?P=variable) =(?![^;]*?[^>_]\b(?P=variable)\b[^_])/\n$+{code_between}\t$+{type}$+{variable} =/sg' $files
|
||||
# The following are simply the same regex, but repeated for different tab sizes
|
||||
# (this is needed because variable sized backtracking is not supported in perl)
|
||||
# shellcheck disable=SC2086
|
||||
perl -i -p0e 's/\n\t\t(?!return )(?P<type>(\w+ )+\**)(?>(?P<variable>\w+)( = *[\w>\s\n-]*?)?;\n(?P<code_between>(?>(?P<comment_or_string_or_not_preprocessor>\/\*.*?\*\/|"(?>\\"|.)*?"|[^#]))*?)(\t\t)?(?=\b(?P=variable)\b))(?<=\n\t\t)(?P=variable) =(?![^;]*?[^>_]\b(?P=variable)\b[^_])/\n$+{code_between}\t\t$+{type}$+{variable} =/sg' $files
|
||||
# shellcheck disable=SC2086
|
||||
perl -i -p0e 's/\n\t\t\t(?!return )(?P<type>(\w+ )+\**)(?>(?P<variable>\w+)( = *[\w>\s\n-]*?)?;\n(?P<code_between>(?>(?P<comment_or_string_or_not_preprocessor>\/\*.*?\*\/|"(?>\\"|.)*?"|[^#]))*?)(\t\t\t)?(?=\b(?P=variable)\b))(?<=\n\t\t\t)(?P=variable) =(?![^;]*?[^>_]\b(?P=variable)\b[^_])/\n$+{code_between}\t\t\t$+{type}$+{variable} =/sg' $files
|
||||
# shellcheck disable=SC2086
|
||||
perl -i -p0e 's/\n\t\t\t\t(?!return )(?P<type>(\w+ )+\**)(?>(?P<variable>\w+)( = *[\w>\s\n-]*?)?;\n(?P<code_between>(?>(?P<comment_or_string_or_not_preprocessor>\/\*.*?\*\/|"(?>\\"|.)*?"|[^#]))*?)(\t\t\t\t)?(?=\b(?P=variable)\b))(?<=\n\t\t\t\t)(?P=variable) =(?![^;]*?[^>_]\b(?P=variable)\b[^_])/\n$+{code_between}\t\t\t\t$+{type}$+{variable} =/sg' $files
|
||||
# shellcheck disable=SC2086
|
||||
perl -i -p0e 's/\n\t\t\t\t\t(?!return )(?P<type>(\w+ )+\**)(?>(?P<variable>\w+)( = *[\w>\s\n-]*?)?;\n(?P<code_between>(?>(?P<comment_or_string_or_not_preprocessor>\/\*.*?\*\/|"(?>\\"|.)*?"|[^#]))*?)(\t\t\t\t\t)?(?=\b(?P=variable)\b))(?<=\n\t\t\t\t\t)(?P=variable) =(?![^;]*?[^>_]\b(?P=variable)\b[^_])/\n$+{code_between}\t\t\t\t\t$+{type}$+{variable} =/sg' $files
|
||||
# shellcheck disable=SC2086
|
||||
perl -i -p0e 's/\n\t\t\t\t\t\t(?!return )(?P<type>(\w+ )+\**)(?>(?P<variable>\w+)( = *[\w>\s\n-]*?)?;\n(?P<code_between>(?>(?P<comment_or_string_or_not_preprocessor>\/\*.*?\*\/|"(?>\\"|.)*?"|[^#]))*?)(\t\t\t\t\t\t)?(?=\b(?P=variable)\b))(?<=\n\t\t\t\t\t\t)(?P=variable) =(?![^;]*?[^>_]\b(?P=variable)\b[^_])/\n$+{code_between}\t\t\t\t\t\t$+{type}$+{variable} =/sg' $files
|
||||
git diff --quiet && break;
|
||||
git add .;
|
||||
done
|
|
@ -47,11 +47,11 @@ static bool CallFuncExprRemotely(CallStmt *callStmt,
|
|||
bool
|
||||
CallDistributedProcedureRemotely(CallStmt *callStmt, DestReceiver *dest)
|
||||
{
|
||||
DistObjectCacheEntry *procedure = NULL;
|
||||
FuncExpr *funcExpr = callStmt->funcexpr;
|
||||
Oid functionId = funcExpr->funcid;
|
||||
|
||||
procedure = LookupDistObjectCacheEntry(ProcedureRelationId, functionId, 0);
|
||||
DistObjectCacheEntry *procedure = LookupDistObjectCacheEntry(ProcedureRelationId,
|
||||
functionId, 0);
|
||||
if (procedure == NULL || !procedure->isDistributed)
|
||||
{
|
||||
return false;
|
||||
|
@ -68,25 +68,13 @@ static bool
|
|||
CallFuncExprRemotely(CallStmt *callStmt, DistObjectCacheEntry *procedure,
|
||||
FuncExpr *funcExpr, DestReceiver *dest)
|
||||
{
|
||||
Oid colocatedRelationId = InvalidOid;
|
||||
Node *partitionValueNode = NULL;
|
||||
Const *partitionValue = NULL;
|
||||
Datum partitionValueDatum = 0;
|
||||
ShardInterval *shardInterval = NULL;
|
||||
List *placementList = NIL;
|
||||
DistTableCacheEntry *distTable = NULL;
|
||||
Var *partitionColumn = NULL;
|
||||
ShardPlacement *placement = NULL;
|
||||
WorkerNode *workerNode = NULL;
|
||||
StringInfo callCommand = NULL;
|
||||
|
||||
if (IsMultiStatementTransaction())
|
||||
{
|
||||
ereport(DEBUG1, (errmsg("cannot push down CALL in multi-statement transaction")));
|
||||
return false;
|
||||
}
|
||||
|
||||
colocatedRelationId = ColocatedTableId(procedure->colocationId);
|
||||
Oid colocatedRelationId = ColocatedTableId(procedure->colocationId);
|
||||
if (colocatedRelationId == InvalidOid)
|
||||
{
|
||||
ereport(DEBUG1, (errmsg("stored procedure does not have co-located tables")));
|
||||
|
@ -107,8 +95,8 @@ CallFuncExprRemotely(CallStmt *callStmt, DistObjectCacheEntry *procedure,
|
|||
return false;
|
||||
}
|
||||
|
||||
distTable = DistributedTableCacheEntry(colocatedRelationId);
|
||||
partitionColumn = distTable->partitionColumn;
|
||||
DistTableCacheEntry *distTable = DistributedTableCacheEntry(colocatedRelationId);
|
||||
Var *partitionColumn = distTable->partitionColumn;
|
||||
if (partitionColumn == NULL)
|
||||
{
|
||||
/* This can happen if colocated with a reference table. Punt for now. */
|
||||
|
@ -117,17 +105,17 @@ CallFuncExprRemotely(CallStmt *callStmt, DistObjectCacheEntry *procedure,
|
|||
return false;
|
||||
}
|
||||
|
||||
partitionValueNode = (Node *) list_nth(funcExpr->args,
|
||||
procedure->distributionArgIndex);
|
||||
Node *partitionValueNode = (Node *) list_nth(funcExpr->args,
|
||||
procedure->distributionArgIndex);
|
||||
partitionValueNode = strip_implicit_coercions(partitionValueNode);
|
||||
if (!IsA(partitionValueNode, Const))
|
||||
{
|
||||
ereport(DEBUG1, (errmsg("distribution argument value must be a constant")));
|
||||
return false;
|
||||
}
|
||||
partitionValue = (Const *) partitionValueNode;
|
||||
Const *partitionValue = (Const *) partitionValueNode;
|
||||
|
||||
partitionValueDatum = partitionValue->constvalue;
|
||||
Datum partitionValueDatum = partitionValue->constvalue;
|
||||
if (partitionValue->consttype != partitionColumn->vartype)
|
||||
{
|
||||
CopyCoercionData coercionData;
|
||||
|
@ -138,14 +126,14 @@ CallFuncExprRemotely(CallStmt *callStmt, DistObjectCacheEntry *procedure,
|
|||
partitionValueDatum = CoerceColumnValue(partitionValueDatum, &coercionData);
|
||||
}
|
||||
|
||||
shardInterval = FindShardInterval(partitionValueDatum, distTable);
|
||||
ShardInterval *shardInterval = FindShardInterval(partitionValueDatum, distTable);
|
||||
if (shardInterval == NULL)
|
||||
{
|
||||
ereport(DEBUG1, (errmsg("cannot push down call, failed to find shard interval")));
|
||||
return false;
|
||||
}
|
||||
|
||||
placementList = FinalizedShardPlacementList(shardInterval->shardId);
|
||||
List *placementList = FinalizedShardPlacementList(shardInterval->shardId);
|
||||
if (list_length(placementList) != 1)
|
||||
{
|
||||
/* punt on this for now */
|
||||
|
@ -154,8 +142,8 @@ CallFuncExprRemotely(CallStmt *callStmt, DistObjectCacheEntry *procedure,
|
|||
return false;
|
||||
}
|
||||
|
||||
placement = (ShardPlacement *) linitial(placementList);
|
||||
workerNode = FindWorkerNode(placement->nodeName, placement->nodePort);
|
||||
ShardPlacement *placement = (ShardPlacement *) linitial(placementList);
|
||||
WorkerNode *workerNode = FindWorkerNode(placement->nodeName, placement->nodePort);
|
||||
if (workerNode == NULL || !workerNode->hasMetadata || !workerNode->metadataSynced)
|
||||
{
|
||||
ereport(DEBUG1, (errmsg("there is no worker node with metadata")));
|
||||
|
@ -165,7 +153,7 @@ CallFuncExprRemotely(CallStmt *callStmt, DistObjectCacheEntry *procedure,
|
|||
ereport(DEBUG1, (errmsg("pushing down the procedure")));
|
||||
|
||||
/* build remote command with fully qualified names */
|
||||
callCommand = makeStringInfo();
|
||||
StringInfo callCommand = makeStringInfo();
|
||||
appendStringInfo(callCommand, "CALL %s", pg_get_rule_expr((Node *) funcExpr));
|
||||
|
||||
{
|
||||
|
|
|
@ -28,11 +28,10 @@ PlanClusterStmt(ClusterStmt *clusterStmt, const char *clusterCommand)
|
|||
}
|
||||
else
|
||||
{
|
||||
Oid relationId = InvalidOid;
|
||||
bool missingOK = false;
|
||||
|
||||
relationId = RangeVarGetRelid(clusterStmt->relation, AccessShareLock,
|
||||
missingOK);
|
||||
Oid relationId = RangeVarGetRelid(clusterStmt->relation, AccessShareLock,
|
||||
missingOK);
|
||||
|
||||
if (OidIsValid(relationId))
|
||||
{
|
||||
|
|
|
@ -126,14 +126,10 @@ master_create_distributed_table(PG_FUNCTION_ARGS)
|
|||
text *distributionColumnText = PG_GETARG_TEXT_P(1);
|
||||
Oid distributionMethodOid = PG_GETARG_OID(2);
|
||||
|
||||
char *distributionColumnName = NULL;
|
||||
Var *distributionColumn = NULL;
|
||||
char distributionMethod = 0;
|
||||
char *colocateWithTableName = NULL;
|
||||
bool viaDeprecatedAPI = true;
|
||||
ObjectAddress tableAddress = { 0 };
|
||||
|
||||
Relation relation = NULL;
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
EnsureCoordinator();
|
||||
|
@ -153,7 +149,7 @@ master_create_distributed_table(PG_FUNCTION_ARGS)
|
|||
* sense of this table until we've committed, and we don't want multiple
|
||||
* backends manipulating this relation.
|
||||
*/
|
||||
relation = try_relation_open(relationId, ExclusiveLock);
|
||||
Relation relation = try_relation_open(relationId, ExclusiveLock);
|
||||
|
||||
if (relation == NULL)
|
||||
{
|
||||
|
@ -168,10 +164,10 @@ master_create_distributed_table(PG_FUNCTION_ARGS)
|
|||
*/
|
||||
EnsureRelationKindSupported(relationId);
|
||||
|
||||
distributionColumnName = text_to_cstring(distributionColumnText);
|
||||
distributionColumn = BuildDistributionKeyFromColumnName(relation,
|
||||
distributionColumnName);
|
||||
distributionMethod = LookupDistributionMethod(distributionMethodOid);
|
||||
char *distributionColumnName = text_to_cstring(distributionColumnText);
|
||||
Var *distributionColumn = BuildDistributionKeyFromColumnName(relation,
|
||||
distributionColumnName);
|
||||
char distributionMethod = LookupDistributionMethod(distributionMethodOid);
|
||||
|
||||
CreateDistributedTable(relationId, distributionColumn, distributionMethod,
|
||||
colocateWithTableName, viaDeprecatedAPI);
|
||||
|
@ -190,28 +186,18 @@ master_create_distributed_table(PG_FUNCTION_ARGS)
|
|||
Datum
|
||||
create_distributed_table(PG_FUNCTION_ARGS)
|
||||
{
|
||||
Oid relationId = InvalidOid;
|
||||
text *distributionColumnText = NULL;
|
||||
Oid distributionMethodOid = InvalidOid;
|
||||
text *colocateWithTableNameText = NULL;
|
||||
ObjectAddress tableAddress = { 0 };
|
||||
|
||||
Relation relation = NULL;
|
||||
char *distributionColumnName = NULL;
|
||||
Var *distributionColumn = NULL;
|
||||
char distributionMethod = 0;
|
||||
|
||||
char *colocateWithTableName = NULL;
|
||||
|
||||
bool viaDeprecatedAPI = false;
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
EnsureCoordinator();
|
||||
|
||||
relationId = PG_GETARG_OID(0);
|
||||
distributionColumnText = PG_GETARG_TEXT_P(1);
|
||||
distributionMethodOid = PG_GETARG_OID(2);
|
||||
colocateWithTableNameText = PG_GETARG_TEXT_P(3);
|
||||
Oid relationId = PG_GETARG_OID(0);
|
||||
text *distributionColumnText = PG_GETARG_TEXT_P(1);
|
||||
Oid distributionMethodOid = PG_GETARG_OID(2);
|
||||
text *colocateWithTableNameText = PG_GETARG_TEXT_P(3);
|
||||
|
||||
EnsureTableOwner(relationId);
|
||||
|
||||
|
@ -229,7 +215,7 @@ create_distributed_table(PG_FUNCTION_ARGS)
|
|||
* sense of this table until we've committed, and we don't want multiple
|
||||
* backends manipulating this relation.
|
||||
*/
|
||||
relation = try_relation_open(relationId, ExclusiveLock);
|
||||
Relation relation = try_relation_open(relationId, ExclusiveLock);
|
||||
|
||||
if (relation == NULL)
|
||||
{
|
||||
|
@ -244,12 +230,12 @@ create_distributed_table(PG_FUNCTION_ARGS)
|
|||
*/
|
||||
EnsureRelationKindSupported(relationId);
|
||||
|
||||
distributionColumnName = text_to_cstring(distributionColumnText);
|
||||
distributionColumn = BuildDistributionKeyFromColumnName(relation,
|
||||
distributionColumnName);
|
||||
distributionMethod = LookupDistributionMethod(distributionMethodOid);
|
||||
char *distributionColumnName = text_to_cstring(distributionColumnText);
|
||||
Var *distributionColumn = BuildDistributionKeyFromColumnName(relation,
|
||||
distributionColumnName);
|
||||
char distributionMethod = LookupDistributionMethod(distributionMethodOid);
|
||||
|
||||
colocateWithTableName = text_to_cstring(colocateWithTableNameText);
|
||||
char *colocateWithTableName = text_to_cstring(colocateWithTableNameText);
|
||||
|
||||
CreateDistributedTable(relationId, distributionColumn, distributionMethod,
|
||||
colocateWithTableName, viaDeprecatedAPI);
|
||||
|
@ -270,10 +256,7 @@ create_reference_table(PG_FUNCTION_ARGS)
|
|||
{
|
||||
Oid relationId = PG_GETARG_OID(0);
|
||||
|
||||
Relation relation = NULL;
|
||||
char *colocateWithTableName = NULL;
|
||||
List *workerNodeList = NIL;
|
||||
int workerCount = 0;
|
||||
Var *distributionColumn = NULL;
|
||||
ObjectAddress tableAddress = { 0 };
|
||||
|
||||
|
@ -297,7 +280,7 @@ create_reference_table(PG_FUNCTION_ARGS)
|
|||
* sense of this table until we've committed, and we don't want multiple
|
||||
* backends manipulating this relation.
|
||||
*/
|
||||
relation = relation_open(relationId, ExclusiveLock);
|
||||
Relation relation = relation_open(relationId, ExclusiveLock);
|
||||
|
||||
/*
|
||||
* We should do this check here since the codes in the following lines rely
|
||||
|
@ -306,8 +289,8 @@ create_reference_table(PG_FUNCTION_ARGS)
|
|||
*/
|
||||
EnsureRelationKindSupported(relationId);
|
||||
|
||||
workerNodeList = ActivePrimaryNodeList(ShareLock);
|
||||
workerCount = list_length(workerNodeList);
|
||||
List *workerNodeList = ActivePrimaryNodeList(ShareLock);
|
||||
int workerCount = list_length(workerNodeList);
|
||||
|
||||
/* if there are no workers, error out */
|
||||
if (workerCount == 0)
|
||||
|
@ -344,27 +327,24 @@ void
|
|||
CreateDistributedTable(Oid relationId, Var *distributionColumn, char distributionMethod,
|
||||
char *colocateWithTableName, bool viaDeprecatedAPI)
|
||||
{
|
||||
char replicationModel = REPLICATION_MODEL_INVALID;
|
||||
uint32 colocationId = INVALID_COLOCATION_ID;
|
||||
Oid colocatedTableId = InvalidOid;
|
||||
bool localTableEmpty = false;
|
||||
|
||||
replicationModel = AppropriateReplicationModel(distributionMethod, viaDeprecatedAPI);
|
||||
char replicationModel = AppropriateReplicationModel(distributionMethod,
|
||||
viaDeprecatedAPI);
|
||||
|
||||
/*
|
||||
* ColocationIdForNewTable assumes caller acquires lock on relationId. In our case,
|
||||
* our caller already acquired lock on relationId.
|
||||
*/
|
||||
colocationId = ColocationIdForNewTable(relationId, distributionColumn,
|
||||
distributionMethod, replicationModel,
|
||||
colocateWithTableName, viaDeprecatedAPI);
|
||||
uint32 colocationId = ColocationIdForNewTable(relationId, distributionColumn,
|
||||
distributionMethod, replicationModel,
|
||||
colocateWithTableName,
|
||||
viaDeprecatedAPI);
|
||||
|
||||
EnsureRelationCanBeDistributed(relationId, distributionColumn, distributionMethod,
|
||||
colocationId, replicationModel, viaDeprecatedAPI);
|
||||
|
||||
/* we need to calculate these variables before creating distributed metadata */
|
||||
localTableEmpty = LocalTableEmpty(relationId);
|
||||
colocatedTableId = ColocatedTableId(colocationId);
|
||||
bool localTableEmpty = LocalTableEmpty(relationId);
|
||||
Oid colocatedTableId = ColocatedTableId(colocationId);
|
||||
|
||||
/* create an entry for distributed table in pg_dist_partition */
|
||||
InsertIntoPgDistPartition(relationId, distributionMethod, distributionColumn,
|
||||
|
@ -642,9 +622,6 @@ EnsureRelationCanBeDistributed(Oid relationId, Var *distributionColumn,
|
|||
char distributionMethod, uint32 colocationId,
|
||||
char replicationModel, bool viaDeprecatedAPI)
|
||||
{
|
||||
Relation relation = NULL;
|
||||
TupleDesc relationDesc = NULL;
|
||||
char *relationName = NULL;
|
||||
Oid parentRelationId = InvalidOid;
|
||||
|
||||
EnsureTableNotDistributed(relationId);
|
||||
|
@ -652,9 +629,9 @@ EnsureRelationCanBeDistributed(Oid relationId, Var *distributionColumn,
|
|||
EnsureReplicationSettings(InvalidOid, replicationModel);
|
||||
|
||||
/* we assume callers took necessary locks */
|
||||
relation = relation_open(relationId, NoLock);
|
||||
relationDesc = RelationGetDescr(relation);
|
||||
relationName = RelationGetRelationName(relation);
|
||||
Relation relation = relation_open(relationId, NoLock);
|
||||
TupleDesc relationDesc = RelationGetDescr(relation);
|
||||
char *relationName = RelationGetRelationName(relation);
|
||||
|
||||
if (!RelationUsesHeapAccessMethodOrNone(relation))
|
||||
{
|
||||
|
@ -805,7 +782,6 @@ EnsureTableCanBeColocatedWith(Oid relationId, char replicationModel,
|
|||
char sourceDistributionMethod = sourceTableEntry->partitionMethod;
|
||||
char sourceReplicationModel = sourceTableEntry->replicationModel;
|
||||
Var *sourceDistributionColumn = DistPartitionKey(sourceRelationId);
|
||||
Oid sourceDistributionColumnType = InvalidOid;
|
||||
|
||||
if (sourceDistributionMethod != DISTRIBUTE_BY_HASH)
|
||||
{
|
||||
|
@ -826,7 +802,7 @@ EnsureTableCanBeColocatedWith(Oid relationId, char replicationModel,
|
|||
sourceRelationName, relationName)));
|
||||
}
|
||||
|
||||
sourceDistributionColumnType = sourceDistributionColumn->vartype;
|
||||
Oid sourceDistributionColumnType = sourceDistributionColumn->vartype;
|
||||
if (sourceDistributionColumnType != distributionColumnType)
|
||||
{
|
||||
char *relationName = get_rel_name(relationId);
|
||||
|
@ -898,9 +874,8 @@ static void
|
|||
EnsureTableNotDistributed(Oid relationId)
|
||||
{
|
||||
char *relationName = get_rel_name(relationId);
|
||||
bool isDistributedTable = false;
|
||||
|
||||
isDistributedTable = IsDistributedTable(relationId);
|
||||
bool isDistributedTable = IsDistributedTable(relationId);
|
||||
|
||||
if (isDistributedTable)
|
||||
{
|
||||
|
@ -949,20 +924,18 @@ EnsureReplicationSettings(Oid relationId, char replicationModel)
|
|||
static char
|
||||
LookupDistributionMethod(Oid distributionMethodOid)
|
||||
{
|
||||
HeapTuple enumTuple = NULL;
|
||||
Form_pg_enum enumForm = NULL;
|
||||
char distributionMethod = 0;
|
||||
const char *enumLabel = NULL;
|
||||
|
||||
enumTuple = SearchSysCache1(ENUMOID, ObjectIdGetDatum(distributionMethodOid));
|
||||
HeapTuple enumTuple = SearchSysCache1(ENUMOID, ObjectIdGetDatum(
|
||||
distributionMethodOid));
|
||||
if (!HeapTupleIsValid(enumTuple))
|
||||
{
|
||||
ereport(ERROR, (errmsg("invalid internal value for enum: %u",
|
||||
distributionMethodOid)));
|
||||
}
|
||||
|
||||
enumForm = (Form_pg_enum) GETSTRUCT(enumTuple);
|
||||
enumLabel = NameStr(enumForm->enumlabel);
|
||||
Form_pg_enum enumForm = (Form_pg_enum) GETSTRUCT(enumTuple);
|
||||
const char *enumLabel = NameStr(enumForm->enumlabel);
|
||||
|
||||
if (strncmp(enumLabel, "append", NAMEDATALEN) == 0)
|
||||
{
|
||||
|
@ -997,9 +970,6 @@ static Oid
|
|||
SupportFunctionForColumn(Var *partitionColumn, Oid accessMethodId,
|
||||
int16 supportFunctionNumber)
|
||||
{
|
||||
Oid operatorFamilyId = InvalidOid;
|
||||
Oid supportFunctionOid = InvalidOid;
|
||||
Oid operatorClassInputType = InvalidOid;
|
||||
Oid columnOid = partitionColumn->vartype;
|
||||
Oid operatorClassId = GetDefaultOpClass(columnOid, accessMethodId);
|
||||
|
||||
|
@ -1014,11 +984,11 @@ SupportFunctionForColumn(Var *partitionColumn, Oid accessMethodId,
|
|||
" class defined.")));
|
||||
}
|
||||
|
||||
operatorFamilyId = get_opclass_family(operatorClassId);
|
||||
operatorClassInputType = get_opclass_input_type(operatorClassId);
|
||||
supportFunctionOid = get_opfamily_proc(operatorFamilyId, operatorClassInputType,
|
||||
operatorClassInputType,
|
||||
supportFunctionNumber);
|
||||
Oid operatorFamilyId = get_opclass_family(operatorClassId);
|
||||
Oid operatorClassInputType = get_opclass_input_type(operatorClassId);
|
||||
Oid supportFunctionOid = get_opfamily_proc(operatorFamilyId, operatorClassInputType,
|
||||
operatorClassInputType,
|
||||
supportFunctionNumber);
|
||||
|
||||
return supportFunctionOid;
|
||||
}
|
||||
|
@ -1037,13 +1007,8 @@ LocalTableEmpty(Oid tableId)
|
|||
char *tableName = get_rel_name(tableId);
|
||||
char *tableQualifiedName = quote_qualified_identifier(schemaName, tableName);
|
||||
|
||||
int spiConnectionResult = 0;
|
||||
int spiQueryResult = 0;
|
||||
StringInfo selectExistQueryString = makeStringInfo();
|
||||
|
||||
HeapTuple tuple = NULL;
|
||||
Datum hasDataDatum = 0;
|
||||
bool localTableEmpty = false;
|
||||
bool columnNull = false;
|
||||
bool readOnly = true;
|
||||
|
||||
|
@ -1052,7 +1017,7 @@ LocalTableEmpty(Oid tableId)
|
|||
|
||||
AssertArg(!IsDistributedTable(tableId));
|
||||
|
||||
spiConnectionResult = SPI_connect();
|
||||
int spiConnectionResult = SPI_connect();
|
||||
if (spiConnectionResult != SPI_OK_CONNECT)
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not connect to SPI manager")));
|
||||
|
@ -1060,7 +1025,7 @@ LocalTableEmpty(Oid tableId)
|
|||
|
||||
appendStringInfo(selectExistQueryString, SELECT_EXIST_QUERY, tableQualifiedName);
|
||||
|
||||
spiQueryResult = SPI_execute(selectExistQueryString->data, readOnly, 0);
|
||||
int spiQueryResult = SPI_execute(selectExistQueryString->data, readOnly, 0);
|
||||
if (spiQueryResult != SPI_OK_SELECT)
|
||||
{
|
||||
ereport(ERROR, (errmsg("execution was not successful \"%s\"",
|
||||
|
@ -1070,9 +1035,10 @@ LocalTableEmpty(Oid tableId)
|
|||
/* we expect that SELECT EXISTS query will return single value in a single row */
|
||||
Assert(SPI_processed == 1);
|
||||
|
||||
tuple = SPI_tuptable->vals[rowId];
|
||||
hasDataDatum = SPI_getbinval(tuple, SPI_tuptable->tupdesc, attributeId, &columnNull);
|
||||
localTableEmpty = !DatumGetBool(hasDataDatum);
|
||||
HeapTuple tuple = SPI_tuptable->vals[rowId];
|
||||
Datum hasDataDatum = SPI_getbinval(tuple, SPI_tuptable->tupdesc, attributeId,
|
||||
&columnNull);
|
||||
bool localTableEmpty = !DatumGetBool(hasDataDatum);
|
||||
|
||||
SPI_finish();
|
||||
|
||||
|
@ -1145,13 +1111,12 @@ CanUseExclusiveConnections(Oid relationId, bool localTableEmpty)
|
|||
void
|
||||
CreateTruncateTrigger(Oid relationId)
|
||||
{
|
||||
CreateTrigStmt *trigger = NULL;
|
||||
StringInfo triggerName = makeStringInfo();
|
||||
bool internal = true;
|
||||
|
||||
appendStringInfo(triggerName, "truncate_trigger");
|
||||
|
||||
trigger = makeNode(CreateTrigStmt);
|
||||
CreateTrigStmt *trigger = makeNode(CreateTrigStmt);
|
||||
trigger->trigname = triggerName->data;
|
||||
trigger->relation = NULL;
|
||||
trigger->funcname = SystemFuncName("citus_truncate_trigger");
|
||||
|
@ -1232,9 +1197,7 @@ CopyLocalDataIntoShards(Oid distributedRelationId)
|
|||
HeapScanDesc scan = NULL;
|
||||
#endif
|
||||
HeapTuple tuple = NULL;
|
||||
ExprContext *econtext = NULL;
|
||||
MemoryContext oldContext = NULL;
|
||||
TupleTableSlot *slot = NULL;
|
||||
uint64 rowsCopied = 0;
|
||||
|
||||
/* take an ExclusiveLock to block all operations except SELECT */
|
||||
|
@ -1264,7 +1227,8 @@ CopyLocalDataIntoShards(Oid distributedRelationId)
|
|||
|
||||
/* get the table columns */
|
||||
tupleDescriptor = RelationGetDescr(distributedRelation);
|
||||
slot = MakeSingleTupleTableSlotCompat(tupleDescriptor, &TTSOpsHeapTuple);
|
||||
TupleTableSlot *slot = MakeSingleTupleTableSlotCompat(tupleDescriptor,
|
||||
&TTSOpsHeapTuple);
|
||||
columnNameList = TupleDescColumnNameList(tupleDescriptor);
|
||||
|
||||
/* determine the partition column in the tuple descriptor */
|
||||
|
@ -1276,7 +1240,7 @@ CopyLocalDataIntoShards(Oid distributedRelationId)
|
|||
|
||||
/* initialise per-tuple memory context */
|
||||
estate = CreateExecutorState();
|
||||
econtext = GetPerTupleExprContext(estate);
|
||||
ExprContext *econtext = GetPerTupleExprContext(estate);
|
||||
econtext->ecxt_scantuple = slot;
|
||||
|
||||
copyDest =
|
||||
|
@ -1362,9 +1326,8 @@ static List *
|
|||
TupleDescColumnNameList(TupleDesc tupleDescriptor)
|
||||
{
|
||||
List *columnNameList = NIL;
|
||||
int columnIndex = 0;
|
||||
|
||||
for (columnIndex = 0; columnIndex < tupleDescriptor->natts; columnIndex++)
|
||||
for (int columnIndex = 0; columnIndex < tupleDescriptor->natts; columnIndex++)
|
||||
{
|
||||
Form_pg_attribute currentColumn = TupleDescAttr(tupleDescriptor, columnIndex);
|
||||
char *columnName = NameStr(currentColumn->attname);
|
||||
|
@ -1392,9 +1355,7 @@ TupleDescColumnNameList(TupleDesc tupleDescriptor)
|
|||
static bool
|
||||
RelationUsesIdentityColumns(TupleDesc relationDesc)
|
||||
{
|
||||
int attributeIndex = 0;
|
||||
|
||||
for (attributeIndex = 0; attributeIndex < relationDesc->natts; attributeIndex++)
|
||||
for (int attributeIndex = 0; attributeIndex < relationDesc->natts; attributeIndex++)
|
||||
{
|
||||
Form_pg_attribute attributeForm = TupleDescAttr(relationDesc, attributeIndex);
|
||||
|
||||
|
|
|
@ -50,7 +50,6 @@ void
|
|||
EnsureDependenciesExistsOnAllNodes(const ObjectAddress *target)
|
||||
{
|
||||
/* local variables to work with dependencies */
|
||||
List *dependencies = NIL;
|
||||
List *dependenciesWithCommands = NIL;
|
||||
ListCell *dependencyCell = NULL;
|
||||
|
||||
|
@ -58,13 +57,12 @@ EnsureDependenciesExistsOnAllNodes(const ObjectAddress *target)
|
|||
List *ddlCommands = NULL;
|
||||
|
||||
/* local variables to work with worker nodes */
|
||||
List *workerNodeList = NULL;
|
||||
ListCell *workerNodeCell = NULL;
|
||||
|
||||
/*
|
||||
* collect all dependencies in creation order and get their ddl commands
|
||||
*/
|
||||
dependencies = GetDependenciesForObject(target);
|
||||
List *dependencies = GetDependenciesForObject(target);
|
||||
foreach(dependencyCell, dependencies)
|
||||
{
|
||||
ObjectAddress *dependency = (ObjectAddress *) lfirst(dependencyCell);
|
||||
|
@ -94,7 +92,7 @@ EnsureDependenciesExistsOnAllNodes(const ObjectAddress *target)
|
|||
* either get it now, or get it in master_add_node after this transaction finishes and
|
||||
* the pg_dist_object record becomes visible.
|
||||
*/
|
||||
workerNodeList = ActivePrimaryWorkerNodeList(RowShareLock);
|
||||
List *workerNodeList = ActivePrimaryWorkerNodeList(RowShareLock);
|
||||
|
||||
/*
|
||||
* right after we acquired the lock we mark our objects as distributed, these changes
|
||||
|
@ -216,13 +214,12 @@ void
|
|||
ReplicateAllDependenciesToNode(const char *nodeName, int nodePort)
|
||||
{
|
||||
ListCell *dependencyCell = NULL;
|
||||
List *dependencies = NIL;
|
||||
List *ddlCommands = NIL;
|
||||
|
||||
/*
|
||||
* collect all dependencies in creation order and get their ddl commands
|
||||
*/
|
||||
dependencies = GetDistributedObjectAddressList();
|
||||
List *dependencies = GetDistributedObjectAddressList();
|
||||
|
||||
/*
|
||||
* Depending on changes in the environment, such as the enable_object_propagation guc
|
||||
|
|
|
@ -126,8 +126,6 @@ static void
|
|||
MasterRemoveDistributedTableMetadataFromWorkers(Oid relationId, char *schemaName,
|
||||
char *tableName)
|
||||
{
|
||||
char *deleteDistributionCommand = NULL;
|
||||
|
||||
/*
|
||||
* The SQL_DROP trigger calls this function even for tables that are
|
||||
* not distributed. In that case, silently ignore. This is not very
|
||||
|
@ -147,6 +145,6 @@ MasterRemoveDistributedTableMetadataFromWorkers(Oid relationId, char *schemaName
|
|||
}
|
||||
|
||||
/* drop the distributed table metadata on the workers */
|
||||
deleteDistributionCommand = DistributionDeleteCommand(schemaName, tableName);
|
||||
char *deleteDistributionCommand = DistributionDeleteCommand(schemaName, tableName);
|
||||
SendCommandToWorkers(WORKERS_WITH_METADATA, deleteDistributionCommand);
|
||||
}
|
||||
|
|
|
@ -82,8 +82,6 @@ ErrorIfUnstableCreateOrAlterExtensionStmt(Node *parseTree)
|
|||
static char *
|
||||
ExtractNewExtensionVersion(Node *parseTree)
|
||||
{
|
||||
Value *newVersionValue = NULL;
|
||||
|
||||
List *optionsList = NIL;
|
||||
|
||||
if (IsA(parseTree, CreateExtensionStmt))
|
||||
|
@ -100,7 +98,7 @@ ExtractNewExtensionVersion(Node *parseTree)
|
|||
Assert(false);
|
||||
}
|
||||
|
||||
newVersionValue = GetExtensionOption(optionsList, "new_version");
|
||||
Value *newVersionValue = GetExtensionOption(optionsList, "new_version");
|
||||
|
||||
/* return target string safely */
|
||||
if (newVersionValue)
|
||||
|
@ -126,9 +124,6 @@ ExtractNewExtensionVersion(Node *parseTree)
|
|||
List *
|
||||
PlanCreateExtensionStmt(CreateExtensionStmt *createExtensionStmt, const char *queryString)
|
||||
{
|
||||
List *commands = NIL;
|
||||
const char *createExtensionStmtSql = NULL;
|
||||
|
||||
if (!ShouldPropagateExtensionCommand((Node *) createExtensionStmt))
|
||||
{
|
||||
return NIL;
|
||||
|
@ -168,15 +163,15 @@ PlanCreateExtensionStmt(CreateExtensionStmt *createExtensionStmt, const char *qu
|
|||
*/
|
||||
AddSchemaFieldIfMissing(createExtensionStmt);
|
||||
|
||||
createExtensionStmtSql = DeparseTreeNode((Node *) createExtensionStmt);
|
||||
const char *createExtensionStmtSql = DeparseTreeNode((Node *) createExtensionStmt);
|
||||
|
||||
/*
|
||||
* To prevent recursive propagation in mx architecture, we disable ddl
|
||||
* propagation before sending the command to workers.
|
||||
*/
|
||||
commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) createExtensionStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) createExtensionStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
}
|
||||
|
@ -229,8 +224,6 @@ void
|
|||
ProcessCreateExtensionStmt(CreateExtensionStmt *createExtensionStmt, const
|
||||
char *queryString)
|
||||
{
|
||||
const ObjectAddress *extensionAddress = NULL;
|
||||
|
||||
if (!ShouldPropagateExtensionCommand((Node *) createExtensionStmt))
|
||||
{
|
||||
return;
|
||||
|
@ -246,7 +239,8 @@ ProcessCreateExtensionStmt(CreateExtensionStmt *createExtensionStmt, const
|
|||
return;
|
||||
}
|
||||
|
||||
extensionAddress = GetObjectAddressFromParseTree((Node *) createExtensionStmt, false);
|
||||
const ObjectAddress *extensionAddress = GetObjectAddressFromParseTree(
|
||||
(Node *) createExtensionStmt, false);
|
||||
|
||||
EnsureDependenciesExistsOnAllNodes(extensionAddress);
|
||||
|
||||
|
@ -267,11 +261,6 @@ PlanDropExtensionStmt(DropStmt *dropStmt, const char *queryString)
|
|||
{
|
||||
List *allDroppedExtensions = dropStmt->objects;
|
||||
|
||||
List *distributedExtensions = NIL;
|
||||
List *distributedExtensionAddresses = NIL;
|
||||
|
||||
List *commands = NIL;
|
||||
const char *deparsedStmt = NULL;
|
||||
|
||||
ListCell *addressCell = NULL;
|
||||
|
||||
|
@ -281,7 +270,7 @@ PlanDropExtensionStmt(DropStmt *dropStmt, const char *queryString)
|
|||
}
|
||||
|
||||
/* get distributed extensions to be dropped in worker nodes as well */
|
||||
distributedExtensions = FilterDistributedExtensions(allDroppedExtensions);
|
||||
List *distributedExtensions = FilterDistributedExtensions(allDroppedExtensions);
|
||||
|
||||
if (list_length(distributedExtensions) <= 0)
|
||||
{
|
||||
|
@ -308,7 +297,7 @@ PlanDropExtensionStmt(DropStmt *dropStmt, const char *queryString)
|
|||
*/
|
||||
EnsureSequentialModeForExtensionDDL();
|
||||
|
||||
distributedExtensionAddresses = ExtensionNameListToObjectAddressList(
|
||||
List *distributedExtensionAddresses = ExtensionNameListToObjectAddressList(
|
||||
distributedExtensions);
|
||||
|
||||
/* unmark each distributed extension */
|
||||
|
@ -326,7 +315,7 @@ PlanDropExtensionStmt(DropStmt *dropStmt, const char *queryString)
|
|||
* its execution.
|
||||
*/
|
||||
dropStmt->objects = distributedExtensions;
|
||||
deparsedStmt = DeparseTreeNode((Node *) dropStmt);
|
||||
const char *deparsedStmt = DeparseTreeNode((Node *) dropStmt);
|
||||
|
||||
dropStmt->objects = allDroppedExtensions;
|
||||
|
||||
|
@ -334,9 +323,9 @@ PlanDropExtensionStmt(DropStmt *dropStmt, const char *queryString)
|
|||
* To prevent recursive propagation in mx architecture, we disable ddl
|
||||
* propagation before sending the command to workers.
|
||||
*/
|
||||
commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) deparsedStmt,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) deparsedStmt,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
}
|
||||
|
@ -425,9 +414,6 @@ List *
|
|||
PlanAlterExtensionSchemaStmt(AlterObjectSchemaStmt *alterExtensionStmt, const
|
||||
char *queryString)
|
||||
{
|
||||
const char *alterExtensionStmtSql = NULL;
|
||||
List *commands = NIL;
|
||||
|
||||
if (!ShouldPropagateExtensionCommand((Node *) alterExtensionStmt))
|
||||
{
|
||||
return NIL;
|
||||
|
@ -451,15 +437,15 @@ PlanAlterExtensionSchemaStmt(AlterObjectSchemaStmt *alterExtensionStmt, const
|
|||
*/
|
||||
EnsureSequentialModeForExtensionDDL();
|
||||
|
||||
alterExtensionStmtSql = DeparseTreeNode((Node *) alterExtensionStmt);
|
||||
const char *alterExtensionStmtSql = DeparseTreeNode((Node *) alterExtensionStmt);
|
||||
|
||||
/*
|
||||
* To prevent recursive propagation in mx architecture, we disable ddl
|
||||
* propagation before sending the command to workers.
|
||||
*/
|
||||
commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) alterExtensionStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) alterExtensionStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
}
|
||||
|
@ -474,9 +460,8 @@ void
|
|||
ProcessAlterExtensionSchemaStmt(AlterObjectSchemaStmt *alterExtensionStmt, const
|
||||
char *queryString)
|
||||
{
|
||||
const ObjectAddress *extensionAddress = NULL;
|
||||
|
||||
extensionAddress = GetObjectAddressFromParseTree((Node *) alterExtensionStmt, false);
|
||||
const ObjectAddress *extensionAddress = GetObjectAddressFromParseTree(
|
||||
(Node *) alterExtensionStmt, false);
|
||||
|
||||
if (!ShouldPropagateExtensionCommand((Node *) alterExtensionStmt))
|
||||
{
|
||||
|
@ -495,9 +480,6 @@ List *
|
|||
PlanAlterExtensionUpdateStmt(AlterExtensionStmt *alterExtensionStmt, const
|
||||
char *queryString)
|
||||
{
|
||||
const char *alterExtensionStmtSql = NULL;
|
||||
List *commands = NIL;
|
||||
|
||||
if (!ShouldPropagateExtensionCommand((Node *) alterExtensionStmt))
|
||||
{
|
||||
return NIL;
|
||||
|
@ -522,15 +504,15 @@ PlanAlterExtensionUpdateStmt(AlterExtensionStmt *alterExtensionStmt, const
|
|||
*/
|
||||
EnsureSequentialModeForExtensionDDL();
|
||||
|
||||
alterExtensionStmtSql = DeparseTreeNode((Node *) alterExtensionStmt);
|
||||
const char *alterExtensionStmtSql = DeparseTreeNode((Node *) alterExtensionStmt);
|
||||
|
||||
/*
|
||||
* To prevent recursive propagation in mx architecture, we disable ddl
|
||||
* propagation before sending the command to workers.
|
||||
*/
|
||||
commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) alterExtensionStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) alterExtensionStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
}
|
||||
|
@ -711,18 +693,13 @@ IsAlterExtensionSetSchemaCitus(Node *parseTree)
|
|||
List *
|
||||
CreateExtensionDDLCommand(const ObjectAddress *extensionAddress)
|
||||
{
|
||||
List *ddlCommands = NIL;
|
||||
const char *ddlCommand = NULL;
|
||||
|
||||
Node *stmt = NULL;
|
||||
|
||||
/* generate a statement for creation of the extension in "if not exists" construct */
|
||||
stmt = RecreateExtensionStmt(extensionAddress->objectId);
|
||||
Node *stmt = RecreateExtensionStmt(extensionAddress->objectId);
|
||||
|
||||
/* capture ddl command for the create statement */
|
||||
ddlCommand = DeparseTreeNode(stmt);
|
||||
const char *ddlCommand = DeparseTreeNode(stmt);
|
||||
|
||||
ddlCommands = list_make1((void *) ddlCommand);
|
||||
List *ddlCommands = list_make1((void *) ddlCommand);
|
||||
|
||||
return ddlCommands;
|
||||
}
|
||||
|
@ -747,26 +724,22 @@ RecreateExtensionStmt(Oid extensionOid)
|
|||
}
|
||||
|
||||
/* schema DefElement related variables */
|
||||
Oid extensionSchemaOid = InvalidOid;
|
||||
char *extensionSchemaName = NULL;
|
||||
Node *schemaNameArg = NULL;
|
||||
|
||||
/* set location to -1 as it is unknown */
|
||||
int location = -1;
|
||||
DefElem *schemaDefElement = NULL;
|
||||
|
||||
/* set extension name and if_not_exists fields */
|
||||
createExtensionStmt->extname = extensionName;
|
||||
createExtensionStmt->if_not_exists = true;
|
||||
|
||||
/* get schema name that extension was created on */
|
||||
extensionSchemaOid = get_extension_schema(extensionOid);
|
||||
extensionSchemaName = get_namespace_name(extensionSchemaOid);
|
||||
Oid extensionSchemaOid = get_extension_schema(extensionOid);
|
||||
char *extensionSchemaName = get_namespace_name(extensionSchemaOid);
|
||||
|
||||
/* make DefEleme for extensionSchemaName */
|
||||
schemaNameArg = (Node *) makeString(extensionSchemaName);
|
||||
Node *schemaNameArg = (Node *) makeString(extensionSchemaName);
|
||||
|
||||
schemaDefElement = makeDefElem("schema", schemaNameArg, location);
|
||||
DefElem *schemaDefElement = makeDefElem("schema", schemaNameArg, location);
|
||||
|
||||
/* append the schema name DefElem finally */
|
||||
createExtensionStmt->options = lappend(createExtensionStmt->options,
|
||||
|
@ -784,15 +757,11 @@ ObjectAddress *
|
|||
AlterExtensionSchemaStmtObjectAddress(AlterObjectSchemaStmt *alterExtensionSchemaStmt,
|
||||
bool missing_ok)
|
||||
{
|
||||
ObjectAddress *extensionAddress = NULL;
|
||||
Oid extensionOid = InvalidOid;
|
||||
const char *extensionName = NULL;
|
||||
|
||||
Assert(alterExtensionSchemaStmt->objectType == OBJECT_EXTENSION);
|
||||
|
||||
extensionName = strVal(alterExtensionSchemaStmt->object);
|
||||
const char *extensionName = strVal(alterExtensionSchemaStmt->object);
|
||||
|
||||
extensionOid = get_extension_oid(extensionName, missing_ok);
|
||||
Oid extensionOid = get_extension_oid(extensionName, missing_ok);
|
||||
|
||||
if (extensionOid == InvalidOid)
|
||||
{
|
||||
|
@ -801,7 +770,7 @@ AlterExtensionSchemaStmtObjectAddress(AlterObjectSchemaStmt *alterExtensionSchem
|
|||
extensionName)));
|
||||
}
|
||||
|
||||
extensionAddress = palloc0(sizeof(ObjectAddress));
|
||||
ObjectAddress *extensionAddress = palloc0(sizeof(ObjectAddress));
|
||||
ObjectAddressSet(*extensionAddress, ExtensionRelationId, extensionOid);
|
||||
|
||||
return extensionAddress;
|
||||
|
@ -816,13 +785,9 @@ ObjectAddress *
|
|||
AlterExtensionUpdateStmtObjectAddress(AlterExtensionStmt *alterExtensionStmt,
|
||||
bool missing_ok)
|
||||
{
|
||||
ObjectAddress *extensionAddress = NULL;
|
||||
Oid extensionOid = InvalidOid;
|
||||
const char *extensionName = NULL;
|
||||
const char *extensionName = alterExtensionStmt->extname;
|
||||
|
||||
extensionName = alterExtensionStmt->extname;
|
||||
|
||||
extensionOid = get_extension_oid(extensionName, missing_ok);
|
||||
Oid extensionOid = get_extension_oid(extensionName, missing_ok);
|
||||
|
||||
if (extensionOid == InvalidOid)
|
||||
{
|
||||
|
@ -831,7 +796,7 @@ AlterExtensionUpdateStmtObjectAddress(AlterExtensionStmt *alterExtensionStmt,
|
|||
extensionName)));
|
||||
}
|
||||
|
||||
extensionAddress = palloc0(sizeof(ObjectAddress));
|
||||
ObjectAddress *extensionAddress = palloc0(sizeof(ObjectAddress));
|
||||
ObjectAddressSet(*extensionAddress, ExtensionRelationId, extensionOid);
|
||||
|
||||
return extensionAddress;
|
||||
|
|
|
@ -49,25 +49,21 @@ static void ForeignConstraintFindDistKeys(HeapTuple pgConstraintTuple,
|
|||
bool
|
||||
ConstraintIsAForeignKeyToReferenceTable(char *constraintName, Oid relationId)
|
||||
{
|
||||
Relation pgConstraint = NULL;
|
||||
SysScanDesc scanDescriptor = NULL;
|
||||
ScanKeyData scanKey[1];
|
||||
int scanKeyCount = 1;
|
||||
HeapTuple heapTuple = NULL;
|
||||
bool foreignKeyToReferenceTable = false;
|
||||
|
||||
|
||||
pgConstraint = heap_open(ConstraintRelationId, AccessShareLock);
|
||||
Relation pgConstraint = heap_open(ConstraintRelationId, AccessShareLock);
|
||||
|
||||
ScanKeyInit(&scanKey[0], Anum_pg_constraint_contype, BTEqualStrategyNumber, F_CHAREQ,
|
||||
CharGetDatum(CONSTRAINT_FOREIGN));
|
||||
scanDescriptor = systable_beginscan(pgConstraint, InvalidOid, false,
|
||||
NULL, scanKeyCount, scanKey);
|
||||
SysScanDesc scanDescriptor = systable_beginscan(pgConstraint, InvalidOid, false,
|
||||
NULL, scanKeyCount, scanKey);
|
||||
|
||||
heapTuple = systable_getnext(scanDescriptor);
|
||||
HeapTuple heapTuple = systable_getnext(scanDescriptor);
|
||||
while (HeapTupleIsValid(heapTuple))
|
||||
{
|
||||
Oid referencedTableId = InvalidOid;
|
||||
Form_pg_constraint constraintForm = (Form_pg_constraint) GETSTRUCT(heapTuple);
|
||||
char *tupleConstraintName = (constraintForm->conname).data;
|
||||
|
||||
|
@ -78,7 +74,7 @@ ConstraintIsAForeignKeyToReferenceTable(char *constraintName, Oid relationId)
|
|||
continue;
|
||||
}
|
||||
|
||||
referencedTableId = constraintForm->confrelid;
|
||||
Oid referencedTableId = constraintForm->confrelid;
|
||||
|
||||
Assert(IsDistributedTable(referencedTableId));
|
||||
|
||||
|
@ -122,11 +118,8 @@ ErrorIfUnsupportedForeignConstraintExists(Relation relation, char referencingDis
|
|||
Var *referencingDistKey,
|
||||
uint32 referencingColocationId)
|
||||
{
|
||||
Relation pgConstraint = NULL;
|
||||
SysScanDesc scanDescriptor = NULL;
|
||||
ScanKeyData scanKey[1];
|
||||
int scanKeyCount = 1;
|
||||
HeapTuple heapTuple = NULL;
|
||||
|
||||
Oid referencingTableId = relation->rd_id;
|
||||
Oid referencedTableId = InvalidOid;
|
||||
|
@ -145,26 +138,22 @@ ErrorIfUnsupportedForeignConstraintExists(Relation relation, char referencingDis
|
|||
referencingNotReplicated = (ShardReplicationFactor == 1);
|
||||
}
|
||||
|
||||
pgConstraint = heap_open(ConstraintRelationId, AccessShareLock);
|
||||
Relation pgConstraint = heap_open(ConstraintRelationId, AccessShareLock);
|
||||
ScanKeyInit(&scanKey[0], Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ,
|
||||
relation->rd_id);
|
||||
scanDescriptor = systable_beginscan(pgConstraint, ConstraintRelidTypidNameIndexId,
|
||||
true, NULL,
|
||||
scanKeyCount, scanKey);
|
||||
SysScanDesc scanDescriptor = systable_beginscan(pgConstraint,
|
||||
ConstraintRelidTypidNameIndexId,
|
||||
true, NULL,
|
||||
scanKeyCount, scanKey);
|
||||
|
||||
heapTuple = systable_getnext(scanDescriptor);
|
||||
HeapTuple heapTuple = systable_getnext(scanDescriptor);
|
||||
while (HeapTupleIsValid(heapTuple))
|
||||
{
|
||||
Form_pg_constraint constraintForm = (Form_pg_constraint) GETSTRUCT(heapTuple);
|
||||
bool referencedIsDistributed = false;
|
||||
char referencedDistMethod = 0;
|
||||
Var *referencedDistKey = NULL;
|
||||
bool referencingIsReferenceTable = false;
|
||||
bool referencedIsReferenceTable = false;
|
||||
int referencingAttrIndex = -1;
|
||||
int referencedAttrIndex = -1;
|
||||
bool referencingColumnsIncludeDistKey = false;
|
||||
bool foreignConstraintOnDistKey = false;
|
||||
|
||||
if (constraintForm->contype != CONSTRAINT_FOREIGN)
|
||||
{
|
||||
|
@ -175,7 +164,7 @@ ErrorIfUnsupportedForeignConstraintExists(Relation relation, char referencingDis
|
|||
referencedTableId = constraintForm->confrelid;
|
||||
selfReferencingTable = (referencingTableId == referencedTableId);
|
||||
|
||||
referencedIsDistributed = IsDistributedTable(referencedTableId);
|
||||
bool referencedIsDistributed = IsDistributedTable(referencedTableId);
|
||||
if (!referencedIsDistributed && !selfReferencingTable)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
|
||||
|
@ -199,8 +188,8 @@ ErrorIfUnsupportedForeignConstraintExists(Relation relation, char referencingDis
|
|||
referencedColocationId = referencingColocationId;
|
||||
}
|
||||
|
||||
referencingIsReferenceTable = (referencingDistMethod == DISTRIBUTE_BY_NONE);
|
||||
referencedIsReferenceTable = (referencedDistMethod == DISTRIBUTE_BY_NONE);
|
||||
bool referencingIsReferenceTable = (referencingDistMethod == DISTRIBUTE_BY_NONE);
|
||||
bool referencedIsReferenceTable = (referencedDistMethod == DISTRIBUTE_BY_NONE);
|
||||
|
||||
|
||||
/*
|
||||
|
@ -250,8 +239,8 @@ ErrorIfUnsupportedForeignConstraintExists(Relation relation, char referencingDis
|
|||
referencedDistKey,
|
||||
&referencingAttrIndex,
|
||||
&referencedAttrIndex);
|
||||
referencingColumnsIncludeDistKey = (referencingAttrIndex != -1);
|
||||
foreignConstraintOnDistKey =
|
||||
bool referencingColumnsIncludeDistKey = (referencingAttrIndex != -1);
|
||||
bool foreignConstraintOnDistKey =
|
||||
(referencingColumnsIncludeDistKey && referencingAttrIndex ==
|
||||
referencedAttrIndex);
|
||||
|
||||
|
@ -353,14 +342,11 @@ ForeignConstraintFindDistKeys(HeapTuple pgConstraintTuple,
|
|||
int *referencingAttrIndex,
|
||||
int *referencedAttrIndex)
|
||||
{
|
||||
Datum referencingColumnsDatum = 0;
|
||||
Datum *referencingColumnArray = NULL;
|
||||
int referencingColumnCount = 0;
|
||||
Datum referencedColumnsDatum = 0;
|
||||
Datum *referencedColumnArray = NULL;
|
||||
int referencedColumnCount = 0;
|
||||
bool isNull = false;
|
||||
int attrIdx = 0;
|
||||
|
||||
*referencedAttrIndex = -1;
|
||||
*referencedAttrIndex = -1;
|
||||
|
@ -371,10 +357,10 @@ ForeignConstraintFindDistKeys(HeapTuple pgConstraintTuple,
|
|||
* attributes together because partition column must be at the same place in both
|
||||
* referencing and referenced side of the foreign key constraint.
|
||||
*/
|
||||
referencingColumnsDatum = SysCacheGetAttr(CONSTROID, pgConstraintTuple,
|
||||
Anum_pg_constraint_conkey, &isNull);
|
||||
referencedColumnsDatum = SysCacheGetAttr(CONSTROID, pgConstraintTuple,
|
||||
Anum_pg_constraint_confkey, &isNull);
|
||||
Datum referencingColumnsDatum = SysCacheGetAttr(CONSTROID, pgConstraintTuple,
|
||||
Anum_pg_constraint_conkey, &isNull);
|
||||
Datum referencedColumnsDatum = SysCacheGetAttr(CONSTROID, pgConstraintTuple,
|
||||
Anum_pg_constraint_confkey, &isNull);
|
||||
|
||||
deconstruct_array(DatumGetArrayTypeP(referencingColumnsDatum), INT2OID, 2, true,
|
||||
's', &referencingColumnArray, NULL, &referencingColumnCount);
|
||||
|
@ -383,7 +369,7 @@ ForeignConstraintFindDistKeys(HeapTuple pgConstraintTuple,
|
|||
|
||||
Assert(referencingColumnCount == referencedColumnCount);
|
||||
|
||||
for (attrIdx = 0; attrIdx < referencingColumnCount; ++attrIdx)
|
||||
for (int attrIdx = 0; attrIdx < referencingColumnCount; ++attrIdx)
|
||||
{
|
||||
AttrNumber referencingAttrNo = DatumGetInt16(referencingColumnArray[attrIdx]);
|
||||
AttrNumber referencedAttrNo = DatumGetInt16(referencedColumnArray[attrIdx]);
|
||||
|
@ -412,31 +398,26 @@ ForeignConstraintFindDistKeys(HeapTuple pgConstraintTuple,
|
|||
bool
|
||||
ColumnAppearsInForeignKeyToReferenceTable(char *columnName, Oid relationId)
|
||||
{
|
||||
Relation pgConstraint = NULL;
|
||||
SysScanDesc scanDescriptor = NULL;
|
||||
ScanKeyData scanKey[1];
|
||||
int scanKeyCount = 1;
|
||||
HeapTuple heapTuple = NULL;
|
||||
bool foreignKeyToReferenceTableIncludesGivenColumn = false;
|
||||
|
||||
pgConstraint = heap_open(ConstraintRelationId, AccessShareLock);
|
||||
Relation pgConstraint = heap_open(ConstraintRelationId, AccessShareLock);
|
||||
|
||||
ScanKeyInit(&scanKey[0], Anum_pg_constraint_contype, BTEqualStrategyNumber, F_CHAREQ,
|
||||
CharGetDatum(CONSTRAINT_FOREIGN));
|
||||
|
||||
scanDescriptor = systable_beginscan(pgConstraint, InvalidOid, false,
|
||||
NULL, scanKeyCount, scanKey);
|
||||
SysScanDesc scanDescriptor = systable_beginscan(pgConstraint, InvalidOid, false,
|
||||
NULL, scanKeyCount, scanKey);
|
||||
|
||||
heapTuple = systable_getnext(scanDescriptor);
|
||||
HeapTuple heapTuple = systable_getnext(scanDescriptor);
|
||||
while (HeapTupleIsValid(heapTuple))
|
||||
{
|
||||
Oid referencedTableId = InvalidOid;
|
||||
Oid referencingTableId = InvalidOid;
|
||||
int pgConstraintKey = 0;
|
||||
Form_pg_constraint constraintForm = (Form_pg_constraint) GETSTRUCT(heapTuple);
|
||||
|
||||
referencedTableId = constraintForm->confrelid;
|
||||
referencingTableId = constraintForm->conrelid;
|
||||
Oid referencedTableId = constraintForm->confrelid;
|
||||
Oid referencingTableId = constraintForm->conrelid;
|
||||
|
||||
if (referencedTableId == relationId)
|
||||
{
|
||||
|
@ -493,11 +474,8 @@ GetTableForeignConstraintCommands(Oid relationId)
|
|||
{
|
||||
List *tableForeignConstraints = NIL;
|
||||
|
||||
Relation pgConstraint = NULL;
|
||||
SysScanDesc scanDescriptor = NULL;
|
||||
ScanKeyData scanKey[1];
|
||||
int scanKeyCount = 1;
|
||||
HeapTuple heapTuple = NULL;
|
||||
|
||||
/*
|
||||
* Set search_path to NIL so that all objects outside of pg_catalog will be
|
||||
|
@ -510,14 +488,15 @@ GetTableForeignConstraintCommands(Oid relationId)
|
|||
PushOverrideSearchPath(overridePath);
|
||||
|
||||
/* open system catalog and scan all constraints that belong to this table */
|
||||
pgConstraint = heap_open(ConstraintRelationId, AccessShareLock);
|
||||
Relation pgConstraint = heap_open(ConstraintRelationId, AccessShareLock);
|
||||
ScanKeyInit(&scanKey[0], Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ,
|
||||
relationId);
|
||||
scanDescriptor = systable_beginscan(pgConstraint, ConstraintRelidTypidNameIndexId,
|
||||
true, NULL,
|
||||
scanKeyCount, scanKey);
|
||||
SysScanDesc scanDescriptor = systable_beginscan(pgConstraint,
|
||||
ConstraintRelidTypidNameIndexId,
|
||||
true, NULL,
|
||||
scanKeyCount, scanKey);
|
||||
|
||||
heapTuple = systable_getnext(scanDescriptor);
|
||||
HeapTuple heapTuple = systable_getnext(scanDescriptor);
|
||||
while (HeapTupleIsValid(heapTuple))
|
||||
{
|
||||
Form_pg_constraint constraintForm = (Form_pg_constraint) GETSTRUCT(heapTuple);
|
||||
|
@ -556,24 +535,21 @@ GetTableForeignConstraintCommands(Oid relationId)
|
|||
bool
|
||||
HasForeignKeyToReferenceTable(Oid relationId)
|
||||
{
|
||||
Relation pgConstraint = NULL;
|
||||
SysScanDesc scanDescriptor = NULL;
|
||||
ScanKeyData scanKey[1];
|
||||
int scanKeyCount = 1;
|
||||
HeapTuple heapTuple = NULL;
|
||||
bool hasForeignKeyToReferenceTable = false;
|
||||
|
||||
pgConstraint = heap_open(ConstraintRelationId, AccessShareLock);
|
||||
Relation pgConstraint = heap_open(ConstraintRelationId, AccessShareLock);
|
||||
ScanKeyInit(&scanKey[0], Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ,
|
||||
relationId);
|
||||
scanDescriptor = systable_beginscan(pgConstraint, ConstraintRelidTypidNameIndexId,
|
||||
true, NULL,
|
||||
scanKeyCount, scanKey);
|
||||
SysScanDesc scanDescriptor = systable_beginscan(pgConstraint,
|
||||
ConstraintRelidTypidNameIndexId,
|
||||
true, NULL,
|
||||
scanKeyCount, scanKey);
|
||||
|
||||
heapTuple = systable_getnext(scanDescriptor);
|
||||
HeapTuple heapTuple = systable_getnext(scanDescriptor);
|
||||
while (HeapTupleIsValid(heapTuple))
|
||||
{
|
||||
Oid referencedTableId = InvalidOid;
|
||||
Form_pg_constraint constraintForm = (Form_pg_constraint) GETSTRUCT(heapTuple);
|
||||
|
||||
if (constraintForm->contype != CONSTRAINT_FOREIGN)
|
||||
|
@ -582,7 +558,7 @@ HasForeignKeyToReferenceTable(Oid relationId)
|
|||
continue;
|
||||
}
|
||||
|
||||
referencedTableId = constraintForm->confrelid;
|
||||
Oid referencedTableId = constraintForm->confrelid;
|
||||
|
||||
if (!IsDistributedTable(referencedTableId))
|
||||
{
|
||||
|
@ -615,22 +591,20 @@ HasForeignKeyToReferenceTable(Oid relationId)
|
|||
bool
|
||||
TableReferenced(Oid relationId)
|
||||
{
|
||||
Relation pgConstraint = NULL;
|
||||
HeapTuple heapTuple = NULL;
|
||||
SysScanDesc scanDescriptor = NULL;
|
||||
ScanKeyData scanKey[1];
|
||||
int scanKeyCount = 1;
|
||||
Oid scanIndexId = InvalidOid;
|
||||
bool useIndex = false;
|
||||
|
||||
pgConstraint = heap_open(ConstraintRelationId, AccessShareLock);
|
||||
Relation pgConstraint = heap_open(ConstraintRelationId, AccessShareLock);
|
||||
|
||||
ScanKeyInit(&scanKey[0], Anum_pg_constraint_confrelid, BTEqualStrategyNumber, F_OIDEQ,
|
||||
relationId);
|
||||
scanDescriptor = systable_beginscan(pgConstraint, scanIndexId, useIndex, NULL,
|
||||
scanKeyCount, scanKey);
|
||||
SysScanDesc scanDescriptor = systable_beginscan(pgConstraint, scanIndexId, useIndex,
|
||||
NULL,
|
||||
scanKeyCount, scanKey);
|
||||
|
||||
heapTuple = systable_getnext(scanDescriptor);
|
||||
HeapTuple heapTuple = systable_getnext(scanDescriptor);
|
||||
while (HeapTupleIsValid(heapTuple))
|
||||
{
|
||||
Form_pg_constraint constraintForm = (Form_pg_constraint) GETSTRUCT(heapTuple);
|
||||
|
@ -661,17 +635,15 @@ static bool
|
|||
HeapTupleOfForeignConstraintIncludesColumn(HeapTuple heapTuple, Oid relationId,
|
||||
int pgConstraintKey, char *columnName)
|
||||
{
|
||||
Datum columnsDatum = 0;
|
||||
Datum *columnArray = NULL;
|
||||
int columnCount = 0;
|
||||
int attrIdx = 0;
|
||||
bool isNull = false;
|
||||
|
||||
columnsDatum = SysCacheGetAttr(CONSTROID, heapTuple, pgConstraintKey, &isNull);
|
||||
Datum columnsDatum = SysCacheGetAttr(CONSTROID, heapTuple, pgConstraintKey, &isNull);
|
||||
deconstruct_array(DatumGetArrayTypeP(columnsDatum), INT2OID, 2, true,
|
||||
's', &columnArray, NULL, &columnCount);
|
||||
|
||||
for (attrIdx = 0; attrIdx < columnCount; ++attrIdx)
|
||||
for (int attrIdx = 0; attrIdx < columnCount; ++attrIdx)
|
||||
{
|
||||
AttrNumber attrNo = DatumGetInt16(columnArray[attrIdx]);
|
||||
|
||||
|
@ -696,22 +668,20 @@ HeapTupleOfForeignConstraintIncludesColumn(HeapTuple heapTuple, Oid relationId,
|
|||
bool
|
||||
TableReferencing(Oid relationId)
|
||||
{
|
||||
Relation pgConstraint = NULL;
|
||||
HeapTuple heapTuple = NULL;
|
||||
SysScanDesc scanDescriptor = NULL;
|
||||
ScanKeyData scanKey[1];
|
||||
int scanKeyCount = 1;
|
||||
Oid scanIndexId = InvalidOid;
|
||||
bool useIndex = false;
|
||||
|
||||
pgConstraint = heap_open(ConstraintRelationId, AccessShareLock);
|
||||
Relation pgConstraint = heap_open(ConstraintRelationId, AccessShareLock);
|
||||
|
||||
ScanKeyInit(&scanKey[0], Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ,
|
||||
relationId);
|
||||
scanDescriptor = systable_beginscan(pgConstraint, scanIndexId, useIndex, NULL,
|
||||
scanKeyCount, scanKey);
|
||||
SysScanDesc scanDescriptor = systable_beginscan(pgConstraint, scanIndexId, useIndex,
|
||||
NULL,
|
||||
scanKeyCount, scanKey);
|
||||
|
||||
heapTuple = systable_getnext(scanDescriptor);
|
||||
HeapTuple heapTuple = systable_getnext(scanDescriptor);
|
||||
while (HeapTupleIsValid(heapTuple))
|
||||
{
|
||||
Form_pg_constraint constraintForm = (Form_pg_constraint) GETSTRUCT(heapTuple);
|
||||
|
@ -741,20 +711,17 @@ TableReferencing(Oid relationId)
|
|||
bool
|
||||
ConstraintIsAForeignKey(char *constraintNameInput, Oid relationId)
|
||||
{
|
||||
Relation pgConstraint = NULL;
|
||||
SysScanDesc scanDescriptor = NULL;
|
||||
ScanKeyData scanKey[1];
|
||||
int scanKeyCount = 1;
|
||||
HeapTuple heapTuple = NULL;
|
||||
|
||||
pgConstraint = heap_open(ConstraintRelationId, AccessShareLock);
|
||||
Relation pgConstraint = heap_open(ConstraintRelationId, AccessShareLock);
|
||||
|
||||
ScanKeyInit(&scanKey[0], Anum_pg_constraint_contype, BTEqualStrategyNumber, F_CHAREQ,
|
||||
CharGetDatum(CONSTRAINT_FOREIGN));
|
||||
scanDescriptor = systable_beginscan(pgConstraint, InvalidOid, false,
|
||||
NULL, scanKeyCount, scanKey);
|
||||
SysScanDesc scanDescriptor = systable_beginscan(pgConstraint, InvalidOid, false,
|
||||
NULL, scanKeyCount, scanKey);
|
||||
|
||||
heapTuple = systable_getnext(scanDescriptor);
|
||||
HeapTuple heapTuple = systable_getnext(scanDescriptor);
|
||||
while (HeapTupleIsValid(heapTuple))
|
||||
{
|
||||
Form_pg_constraint constraintForm = (Form_pg_constraint) GETSTRUCT(heapTuple);
|
||||
|
|
|
@ -99,8 +99,6 @@ create_distributed_function(PG_FUNCTION_ARGS)
|
|||
text *colocateWithText = NULL; /* optional */
|
||||
|
||||
StringInfoData ddlCommand = { 0 };
|
||||
const char *createFunctionSQL = NULL;
|
||||
const char *alterFunctionOwnerSQL = NULL;
|
||||
ObjectAddress functionAddress = { 0 };
|
||||
|
||||
int distributionArgumentIndex = -1;
|
||||
|
@ -159,8 +157,8 @@ create_distributed_function(PG_FUNCTION_ARGS)
|
|||
|
||||
EnsureDependenciesExistsOnAllNodes(&functionAddress);
|
||||
|
||||
createFunctionSQL = GetFunctionDDLCommand(funcOid, true);
|
||||
alterFunctionOwnerSQL = GetFunctionAlterOwnerCommand(funcOid);
|
||||
const char *createFunctionSQL = GetFunctionDDLCommand(funcOid, true);
|
||||
const char *alterFunctionOwnerSQL = GetFunctionAlterOwnerCommand(funcOid);
|
||||
initStringInfo(&ddlCommand);
|
||||
appendStringInfo(&ddlCommand, "%s;%s", createFunctionSQL, alterFunctionOwnerSQL);
|
||||
SendCommandToWorkersAsUser(ALL_WORKERS, CurrentUserName(), ddlCommand.data);
|
||||
|
@ -221,13 +219,10 @@ create_distributed_function(PG_FUNCTION_ARGS)
|
|||
List *
|
||||
CreateFunctionDDLCommandsIdempotent(const ObjectAddress *functionAddress)
|
||||
{
|
||||
char *ddlCommand = NULL;
|
||||
char *alterFunctionOwnerSQL = NULL;
|
||||
|
||||
Assert(functionAddress->classId == ProcedureRelationId);
|
||||
|
||||
ddlCommand = GetFunctionDDLCommand(functionAddress->objectId, true);
|
||||
alterFunctionOwnerSQL = GetFunctionAlterOwnerCommand(functionAddress->objectId);
|
||||
char *ddlCommand = GetFunctionDDLCommand(functionAddress->objectId, true);
|
||||
char *alterFunctionOwnerSQL = GetFunctionAlterOwnerCommand(functionAddress->objectId);
|
||||
|
||||
return list_make2(ddlCommand, alterFunctionOwnerSQL);
|
||||
}
|
||||
|
@ -243,23 +238,20 @@ GetDistributionArgIndex(Oid functionOid, char *distributionArgumentName,
|
|||
{
|
||||
int distributionArgumentIndex = -1;
|
||||
|
||||
int numberOfArgs = 0;
|
||||
int argIndex = 0;
|
||||
Oid *argTypes = NULL;
|
||||
char **argNames = NULL;
|
||||
char *argModes = NULL;
|
||||
|
||||
HeapTuple proctup = NULL;
|
||||
|
||||
*distributionArgumentOid = InvalidOid;
|
||||
|
||||
proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(functionOid));
|
||||
HeapTuple proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(functionOid));
|
||||
if (!HeapTupleIsValid(proctup))
|
||||
{
|
||||
elog(ERROR, "cache lookup failed for function %u", functionOid);
|
||||
}
|
||||
|
||||
numberOfArgs = get_func_arg_info(proctup, &argTypes, &argNames, &argModes);
|
||||
int numberOfArgs = get_func_arg_info(proctup, &argTypes, &argNames, &argModes);
|
||||
|
||||
if (argumentStartsWith(distributionArgumentName, "$"))
|
||||
{
|
||||
|
@ -301,7 +293,7 @@ GetDistributionArgIndex(Oid functionOid, char *distributionArgumentName,
|
|||
* So, loop over the arguments and try to find the argument name that matches
|
||||
* the parameter that user provided.
|
||||
*/
|
||||
for (argIndex = 0; argIndex < numberOfArgs; ++argIndex)
|
||||
for (int argIndex = 0; argIndex < numberOfArgs; ++argIndex)
|
||||
{
|
||||
char *argNameOnIndex = argNames != NULL ? argNames[argIndex] : NULL;
|
||||
|
||||
|
@ -352,8 +344,6 @@ GetFunctionColocationId(Oid functionOid, char *colocateWithTableName,
|
|||
|
||||
if (pg_strncasecmp(colocateWithTableName, "default", NAMEDATALEN) == 0)
|
||||
{
|
||||
Oid colocatedTableId = InvalidOid;
|
||||
|
||||
/* check for default colocation group */
|
||||
colocationId = ColocationId(ShardCount, ShardReplicationFactor,
|
||||
distributionArgumentOid);
|
||||
|
@ -369,7 +359,7 @@ GetFunctionColocationId(Oid functionOid, char *colocateWithTableName,
|
|||
"option to create_distributed_function()")));
|
||||
}
|
||||
|
||||
colocatedTableId = ColocatedTableId(colocationId);
|
||||
Oid colocatedTableId = ColocatedTableId(colocationId);
|
||||
if (colocatedTableId != InvalidOid)
|
||||
{
|
||||
EnsureFunctionCanBeColocatedWithTable(functionOid, distributionArgumentOid,
|
||||
|
@ -415,7 +405,6 @@ EnsureFunctionCanBeColocatedWithTable(Oid functionOid, Oid distributionColumnTyp
|
|||
char sourceDistributionMethod = sourceTableEntry->partitionMethod;
|
||||
char sourceReplicationModel = sourceTableEntry->replicationModel;
|
||||
Var *sourceDistributionColumn = DistPartitionKey(sourceRelationId);
|
||||
Oid sourceDistributionColumnType = InvalidOid;
|
||||
|
||||
if (sourceDistributionMethod != DISTRIBUTE_BY_HASH)
|
||||
{
|
||||
|
@ -447,13 +436,12 @@ EnsureFunctionCanBeColocatedWithTable(Oid functionOid, Oid distributionColumnTyp
|
|||
* If the types are the same, we're good. If not, we still check if there
|
||||
* is any coercion path between the types.
|
||||
*/
|
||||
sourceDistributionColumnType = sourceDistributionColumn->vartype;
|
||||
Oid sourceDistributionColumnType = sourceDistributionColumn->vartype;
|
||||
if (sourceDistributionColumnType != distributionColumnType)
|
||||
{
|
||||
Oid coercionFuncId = InvalidOid;
|
||||
CoercionPathType coercionType = COERCION_PATH_NONE;
|
||||
|
||||
coercionType =
|
||||
CoercionPathType coercionType =
|
||||
find_coercion_pathway(distributionColumnType, sourceDistributionColumnType,
|
||||
COERCION_EXPLICIT, &coercionFuncId);
|
||||
|
||||
|
@ -483,17 +471,13 @@ UpdateFunctionDistributionInfo(const ObjectAddress *distAddress,
|
|||
{
|
||||
const bool indexOK = true;
|
||||
|
||||
Relation pgDistObjectRel = NULL;
|
||||
TupleDesc tupleDescriptor = NULL;
|
||||
ScanKeyData scanKey[3];
|
||||
SysScanDesc scanDescriptor = NULL;
|
||||
HeapTuple heapTuple = NULL;
|
||||
Datum values[Natts_pg_dist_object];
|
||||
bool isnull[Natts_pg_dist_object];
|
||||
bool replace[Natts_pg_dist_object];
|
||||
|
||||
pgDistObjectRel = heap_open(DistObjectRelationId(), RowExclusiveLock);
|
||||
tupleDescriptor = RelationGetDescr(pgDistObjectRel);
|
||||
Relation pgDistObjectRel = heap_open(DistObjectRelationId(), RowExclusiveLock);
|
||||
TupleDesc tupleDescriptor = RelationGetDescr(pgDistObjectRel);
|
||||
|
||||
/* scan pg_dist_object for classid = $1 AND objid = $2 AND objsubid = $3 via index */
|
||||
ScanKeyInit(&scanKey[0], Anum_pg_dist_object_classid, BTEqualStrategyNumber, F_OIDEQ,
|
||||
|
@ -503,11 +487,12 @@ UpdateFunctionDistributionInfo(const ObjectAddress *distAddress,
|
|||
ScanKeyInit(&scanKey[2], Anum_pg_dist_object_objsubid, BTEqualStrategyNumber,
|
||||
F_INT4EQ, ObjectIdGetDatum(distAddress->objectSubId));
|
||||
|
||||
scanDescriptor = systable_beginscan(pgDistObjectRel, DistObjectPrimaryKeyIndexId(),
|
||||
indexOK,
|
||||
NULL, 3, scanKey);
|
||||
SysScanDesc scanDescriptor = systable_beginscan(pgDistObjectRel,
|
||||
DistObjectPrimaryKeyIndexId(),
|
||||
indexOK,
|
||||
NULL, 3, scanKey);
|
||||
|
||||
heapTuple = systable_getnext(scanDescriptor);
|
||||
HeapTuple heapTuple = systable_getnext(scanDescriptor);
|
||||
if (!HeapTupleIsValid(heapTuple))
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not find valid entry for node \"%d,%d,%d\" "
|
||||
|
@ -609,17 +594,10 @@ GetFunctionAlterOwnerCommand(const RegProcedure funcOid)
|
|||
char *kindString = "FUNCTION";
|
||||
Oid procOwner = InvalidOid;
|
||||
|
||||
char *functionSignature = NULL;
|
||||
char *functionOwner = NULL;
|
||||
|
||||
OverrideSearchPath *overridePath = NULL;
|
||||
Datum functionSignatureDatum = 0;
|
||||
|
||||
if (HeapTupleIsValid(proctup))
|
||||
{
|
||||
Form_pg_proc procform;
|
||||
|
||||
procform = (Form_pg_proc) GETSTRUCT(proctup);
|
||||
Form_pg_proc procform = (Form_pg_proc) GETSTRUCT(proctup);
|
||||
|
||||
procOwner = procform->proowner;
|
||||
|
||||
|
@ -644,7 +622,7 @@ GetFunctionAlterOwnerCommand(const RegProcedure funcOid)
|
|||
* schema-prefixed. pg_catalog will be added automatically when we call
|
||||
* PushOverrideSearchPath(), since we set addCatalog to true;
|
||||
*/
|
||||
overridePath = GetOverrideSearchPath(CurrentMemoryContext);
|
||||
OverrideSearchPath *overridePath = GetOverrideSearchPath(CurrentMemoryContext);
|
||||
overridePath->schemas = NIL;
|
||||
overridePath->addCatalog = true;
|
||||
|
||||
|
@ -654,16 +632,16 @@ GetFunctionAlterOwnerCommand(const RegProcedure funcOid)
|
|||
* If the function exists we want to use pg_get_function_identity_arguments to
|
||||
* serialize its canonical arguments
|
||||
*/
|
||||
functionSignatureDatum =
|
||||
Datum functionSignatureDatum =
|
||||
DirectFunctionCall1(regprocedureout, ObjectIdGetDatum(funcOid));
|
||||
|
||||
/* revert back to original search_path */
|
||||
PopOverrideSearchPath();
|
||||
|
||||
/* regprocedureout returns cstring */
|
||||
functionSignature = DatumGetCString(functionSignatureDatum);
|
||||
char *functionSignature = DatumGetCString(functionSignatureDatum);
|
||||
|
||||
functionOwner = GetUserNameFromId(procOwner, false);
|
||||
char *functionOwner = GetUserNameFromId(procOwner, false);
|
||||
|
||||
appendStringInfo(alterCommand, "ALTER %s %s OWNER TO %s;",
|
||||
kindString,
|
||||
|
@ -686,12 +664,8 @@ static char *
|
|||
GetAggregateDDLCommand(const RegProcedure funcOid, bool useCreateOrReplace)
|
||||
{
|
||||
StringInfoData buf = { 0 };
|
||||
HeapTuple proctup = NULL;
|
||||
Form_pg_proc proc = NULL;
|
||||
HeapTuple aggtup = NULL;
|
||||
Form_pg_aggregate agg = NULL;
|
||||
const char *name = NULL;
|
||||
const char *nsp = NULL;
|
||||
int numargs = 0;
|
||||
int i = 0;
|
||||
Oid *argtypes = NULL;
|
||||
|
@ -701,20 +675,20 @@ GetAggregateDDLCommand(const RegProcedure funcOid, bool useCreateOrReplace)
|
|||
int argsprinted = 0;
|
||||
int inputargno = 0;
|
||||
|
||||
proctup = SearchSysCache1(PROCOID, funcOid);
|
||||
HeapTuple proctup = SearchSysCache1(PROCOID, funcOid);
|
||||
if (!HeapTupleIsValid(proctup))
|
||||
{
|
||||
elog(ERROR, "cache lookup failed for %d", funcOid);
|
||||
}
|
||||
|
||||
proc = (Form_pg_proc) GETSTRUCT(proctup);
|
||||
Form_pg_proc proc = (Form_pg_proc) GETSTRUCT(proctup);
|
||||
|
||||
Assert(proc->prokind == PROKIND_AGGREGATE);
|
||||
|
||||
initStringInfo(&buf);
|
||||
|
||||
name = NameStr(proc->proname);
|
||||
nsp = get_namespace_name(proc->pronamespace);
|
||||
const char *name = NameStr(proc->proname);
|
||||
const char *nsp = get_namespace_name(proc->pronamespace);
|
||||
|
||||
#if PG_VERSION_NUM >= 120000
|
||||
if (useCreateOrReplace)
|
||||
|
@ -1112,8 +1086,6 @@ TriggerSyncMetadataToPrimaryNodes(void)
|
|||
static bool
|
||||
ShouldPropagateCreateFunction(CreateFunctionStmt *stmt)
|
||||
{
|
||||
const ObjectAddress *address = NULL;
|
||||
|
||||
if (creating_extension)
|
||||
{
|
||||
/*
|
||||
|
@ -1144,7 +1116,7 @@ ShouldPropagateCreateFunction(CreateFunctionStmt *stmt)
|
|||
* Even though its a replace we should accept an non-existing function, it will just
|
||||
* not be distributed
|
||||
*/
|
||||
address = GetObjectAddressFromParseTree((Node *) stmt, true);
|
||||
const ObjectAddress *address = GetObjectAddressFromParseTree((Node *) stmt, true);
|
||||
if (!IsObjectDistributed(address))
|
||||
{
|
||||
/* do not propagate alter function for non-distributed functions */
|
||||
|
@ -1231,21 +1203,18 @@ PlanCreateFunctionStmt(CreateFunctionStmt *stmt, const char *queryString)
|
|||
List *
|
||||
ProcessCreateFunctionStmt(CreateFunctionStmt *stmt, const char *queryString)
|
||||
{
|
||||
const ObjectAddress *address = NULL;
|
||||
List *commands = NIL;
|
||||
|
||||
if (!ShouldPropagateCreateFunction(stmt))
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
address = GetObjectAddressFromParseTree((Node *) stmt, false);
|
||||
const ObjectAddress *address = GetObjectAddressFromParseTree((Node *) stmt, false);
|
||||
EnsureDependenciesExistsOnAllNodes(address);
|
||||
|
||||
commands = list_make4(DISABLE_DDL_PROPAGATION,
|
||||
GetFunctionDDLCommand(address->objectId, true),
|
||||
GetFunctionAlterOwnerCommand(address->objectId),
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
List *commands = list_make4(DISABLE_DDL_PROPAGATION,
|
||||
GetFunctionDDLCommand(address->objectId, true),
|
||||
GetFunctionAlterOwnerCommand(address->objectId),
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
}
|
||||
|
@ -1260,7 +1229,6 @@ ObjectAddress *
|
|||
CreateFunctionStmtObjectAddress(CreateFunctionStmt *stmt, bool missing_ok)
|
||||
{
|
||||
ObjectType objectType = OBJECT_FUNCTION;
|
||||
ObjectWithArgs *objectWithArgs = NULL;
|
||||
ListCell *parameterCell = NULL;
|
||||
|
||||
if (stmt->is_procedure)
|
||||
|
@ -1268,7 +1236,7 @@ CreateFunctionStmtObjectAddress(CreateFunctionStmt *stmt, bool missing_ok)
|
|||
objectType = OBJECT_PROCEDURE;
|
||||
}
|
||||
|
||||
objectWithArgs = makeNode(ObjectWithArgs);
|
||||
ObjectWithArgs *objectWithArgs = makeNode(ObjectWithArgs);
|
||||
objectWithArgs->objname = stmt->funcname;
|
||||
|
||||
foreach(parameterCell, stmt->parameters)
|
||||
|
@ -1292,12 +1260,11 @@ CreateFunctionStmtObjectAddress(CreateFunctionStmt *stmt, bool missing_ok)
|
|||
ObjectAddress *
|
||||
DefineAggregateStmtObjectAddress(DefineStmt *stmt, bool missing_ok)
|
||||
{
|
||||
ObjectWithArgs *objectWithArgs = NULL;
|
||||
ListCell *parameterCell = NULL;
|
||||
|
||||
Assert(stmt->kind == OBJECT_AGGREGATE);
|
||||
|
||||
objectWithArgs = makeNode(ObjectWithArgs);
|
||||
ObjectWithArgs *objectWithArgs = makeNode(ObjectWithArgs);
|
||||
objectWithArgs->objname = stmt->defnames;
|
||||
|
||||
foreach(parameterCell, linitial(stmt->args))
|
||||
|
@ -1318,13 +1285,9 @@ DefineAggregateStmtObjectAddress(DefineStmt *stmt, bool missing_ok)
|
|||
List *
|
||||
PlanAlterFunctionStmt(AlterFunctionStmt *stmt, const char *queryString)
|
||||
{
|
||||
const char *sql = NULL;
|
||||
const ObjectAddress *address = NULL;
|
||||
List *commands = NIL;
|
||||
|
||||
AssertObjectTypeIsFunctional(stmt->objtype);
|
||||
|
||||
address = GetObjectAddressFromParseTree((Node *) stmt, false);
|
||||
const ObjectAddress *address = GetObjectAddressFromParseTree((Node *) stmt, false);
|
||||
if (!ShouldPropagateAlterFunction(address))
|
||||
{
|
||||
return NIL;
|
||||
|
@ -1334,11 +1297,11 @@ PlanAlterFunctionStmt(AlterFunctionStmt *stmt, const char *queryString)
|
|||
ErrorIfUnsupportedAlterFunctionStmt(stmt);
|
||||
EnsureSequentialModeForFunctionDDL();
|
||||
QualifyTreeNode((Node *) stmt);
|
||||
sql = DeparseTreeNode((Node *) stmt);
|
||||
const char *sql = DeparseTreeNode((Node *) stmt);
|
||||
|
||||
commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
}
|
||||
|
@ -1355,13 +1318,9 @@ PlanAlterFunctionStmt(AlterFunctionStmt *stmt, const char *queryString)
|
|||
List *
|
||||
PlanRenameFunctionStmt(RenameStmt *stmt, const char *queryString)
|
||||
{
|
||||
const char *sql = NULL;
|
||||
const ObjectAddress *address = NULL;
|
||||
List *commands = NIL;
|
||||
|
||||
AssertObjectTypeIsFunctional(stmt->renameType);
|
||||
|
||||
address = GetObjectAddressFromParseTree((Node *) stmt, false);
|
||||
const ObjectAddress *address = GetObjectAddressFromParseTree((Node *) stmt, false);
|
||||
if (!ShouldPropagateAlterFunction(address))
|
||||
{
|
||||
return NIL;
|
||||
|
@ -1370,11 +1329,11 @@ PlanRenameFunctionStmt(RenameStmt *stmt, const char *queryString)
|
|||
EnsureCoordinator();
|
||||
EnsureSequentialModeForFunctionDDL();
|
||||
QualifyTreeNode((Node *) stmt);
|
||||
sql = DeparseTreeNode((Node *) stmt);
|
||||
const char *sql = DeparseTreeNode((Node *) stmt);
|
||||
|
||||
commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
}
|
||||
|
@ -1389,13 +1348,9 @@ PlanRenameFunctionStmt(RenameStmt *stmt, const char *queryString)
|
|||
List *
|
||||
PlanAlterFunctionSchemaStmt(AlterObjectSchemaStmt *stmt, const char *queryString)
|
||||
{
|
||||
const char *sql = NULL;
|
||||
const ObjectAddress *address = NULL;
|
||||
List *commands = NIL;
|
||||
|
||||
AssertObjectTypeIsFunctional(stmt->objectType);
|
||||
|
||||
address = GetObjectAddressFromParseTree((Node *) stmt, false);
|
||||
const ObjectAddress *address = GetObjectAddressFromParseTree((Node *) stmt, false);
|
||||
if (!ShouldPropagateAlterFunction(address))
|
||||
{
|
||||
return NIL;
|
||||
|
@ -1404,11 +1359,11 @@ PlanAlterFunctionSchemaStmt(AlterObjectSchemaStmt *stmt, const char *queryString
|
|||
EnsureCoordinator();
|
||||
EnsureSequentialModeForFunctionDDL();
|
||||
QualifyTreeNode((Node *) stmt);
|
||||
sql = DeparseTreeNode((Node *) stmt);
|
||||
const char *sql = DeparseTreeNode((Node *) stmt);
|
||||
|
||||
commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
}
|
||||
|
@ -1424,13 +1379,9 @@ PlanAlterFunctionSchemaStmt(AlterObjectSchemaStmt *stmt, const char *queryString
|
|||
List *
|
||||
PlanAlterFunctionOwnerStmt(AlterOwnerStmt *stmt, const char *queryString)
|
||||
{
|
||||
const ObjectAddress *address = NULL;
|
||||
const char *sql = NULL;
|
||||
List *commands = NULL;
|
||||
|
||||
AssertObjectTypeIsFunctional(stmt->objectType);
|
||||
|
||||
address = GetObjectAddressFromParseTree((Node *) stmt, false);
|
||||
const ObjectAddress *address = GetObjectAddressFromParseTree((Node *) stmt, false);
|
||||
if (!ShouldPropagateAlterFunction(address))
|
||||
{
|
||||
return NIL;
|
||||
|
@ -1439,11 +1390,11 @@ PlanAlterFunctionOwnerStmt(AlterOwnerStmt *stmt, const char *queryString)
|
|||
EnsureCoordinator();
|
||||
EnsureSequentialModeForFunctionDDL();
|
||||
QualifyTreeNode((Node *) stmt);
|
||||
sql = DeparseTreeNode((Node *) stmt);
|
||||
const char *sql = DeparseTreeNode((Node *) stmt);
|
||||
|
||||
commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
}
|
||||
|
@ -1465,10 +1416,7 @@ PlanDropFunctionStmt(DropStmt *stmt, const char *queryString)
|
|||
List *distributedObjectWithArgsList = NIL;
|
||||
List *distributedFunctionAddresses = NIL;
|
||||
ListCell *addressCell = NULL;
|
||||
const char *dropStmtSql = NULL;
|
||||
List *commands = NULL;
|
||||
ListCell *objectWithArgsListCell = NULL;
|
||||
DropStmt *stmtCopy = NULL;
|
||||
|
||||
AssertObjectTypeIsFunctional(stmt->removeType);
|
||||
|
||||
|
@ -1502,11 +1450,9 @@ PlanDropFunctionStmt(DropStmt *stmt, const char *queryString)
|
|||
*/
|
||||
foreach(objectWithArgsListCell, deletingObjectWithArgsList)
|
||||
{
|
||||
ObjectWithArgs *func = NULL;
|
||||
ObjectAddress *address = NULL;
|
||||
|
||||
func = castNode(ObjectWithArgs, lfirst(objectWithArgsListCell));
|
||||
address = FunctionToObjectAddress(stmt->removeType, func, stmt->missing_ok);
|
||||
ObjectWithArgs *func = castNode(ObjectWithArgs, lfirst(objectWithArgsListCell));
|
||||
ObjectAddress *address = FunctionToObjectAddress(stmt->removeType, func,
|
||||
stmt->missing_ok);
|
||||
|
||||
if (!IsObjectDistributed(address))
|
||||
{
|
||||
|
@ -1543,13 +1489,13 @@ PlanDropFunctionStmt(DropStmt *stmt, const char *queryString)
|
|||
* Swap the list of objects before deparsing and restore the old list after. This
|
||||
* ensures we only have distributed functions in the deparsed drop statement.
|
||||
*/
|
||||
stmtCopy = copyObject(stmt);
|
||||
DropStmt *stmtCopy = copyObject(stmt);
|
||||
stmtCopy->objects = distributedObjectWithArgsList;
|
||||
dropStmtSql = DeparseTreeNode((Node *) stmtCopy);
|
||||
const char *dropStmtSql = DeparseTreeNode((Node *) stmtCopy);
|
||||
|
||||
commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) dropStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) dropStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
}
|
||||
|
@ -1569,9 +1515,6 @@ PlanDropFunctionStmt(DropStmt *stmt, const char *queryString)
|
|||
List *
|
||||
PlanAlterFunctionDependsStmt(AlterObjectDependsStmt *stmt, const char *queryString)
|
||||
{
|
||||
const ObjectAddress *address = NULL;
|
||||
const char *functionName = NULL;
|
||||
|
||||
AssertObjectTypeIsFunctional(stmt->objectType);
|
||||
|
||||
if (creating_extension)
|
||||
|
@ -1591,7 +1534,7 @@ PlanAlterFunctionDependsStmt(AlterObjectDependsStmt *stmt, const char *queryStri
|
|||
return NIL;
|
||||
}
|
||||
|
||||
address = GetObjectAddressFromParseTree((Node *) stmt, true);
|
||||
const ObjectAddress *address = GetObjectAddressFromParseTree((Node *) stmt, true);
|
||||
if (!IsObjectDistributed(address))
|
||||
{
|
||||
return NIL;
|
||||
|
@ -1603,7 +1546,7 @@ PlanAlterFunctionDependsStmt(AlterObjectDependsStmt *stmt, const char *queryStri
|
|||
* workers
|
||||
*/
|
||||
|
||||
functionName = getObjectIdentity(address);
|
||||
const char *functionName = getObjectIdentity(address);
|
||||
ereport(ERROR, (errmsg("distrtibuted functions are not allowed to depend on an "
|
||||
"extension"),
|
||||
errdetail("Function \"%s\" is already distributed. Functions from "
|
||||
|
@ -1635,11 +1578,9 @@ AlterFunctionDependsStmtObjectAddress(AlterObjectDependsStmt *stmt, bool missing
|
|||
void
|
||||
ProcessAlterFunctionSchemaStmt(AlterObjectSchemaStmt *stmt, const char *queryString)
|
||||
{
|
||||
const ObjectAddress *address = NULL;
|
||||
|
||||
AssertObjectTypeIsFunctional(stmt->objectType);
|
||||
|
||||
address = GetObjectAddressFromParseTree((Node *) stmt, false);
|
||||
const ObjectAddress *address = GetObjectAddressFromParseTree((Node *) stmt, false);
|
||||
if (!ShouldPropagateAlterFunction(address))
|
||||
{
|
||||
return;
|
||||
|
@ -1698,16 +1639,11 @@ AlterFunctionOwnerObjectAddress(AlterOwnerStmt *stmt, bool missing_ok)
|
|||
ObjectAddress *
|
||||
AlterFunctionSchemaStmtObjectAddress(AlterObjectSchemaStmt *stmt, bool missing_ok)
|
||||
{
|
||||
ObjectWithArgs *objectWithArgs = NULL;
|
||||
Oid funcOid = InvalidOid;
|
||||
List *names = NIL;
|
||||
ObjectAddress *address = NULL;
|
||||
|
||||
AssertObjectTypeIsFunctional(stmt->objectType);
|
||||
|
||||
objectWithArgs = castNode(ObjectWithArgs, stmt->object);
|
||||
funcOid = LookupFuncWithArgs(stmt->objectType, objectWithArgs, true);
|
||||
names = objectWithArgs->objname;
|
||||
ObjectWithArgs *objectWithArgs = castNode(ObjectWithArgs, stmt->object);
|
||||
Oid funcOid = LookupFuncWithArgs(stmt->objectType, objectWithArgs, true);
|
||||
List *names = objectWithArgs->objname;
|
||||
|
||||
if (funcOid == InvalidOid)
|
||||
{
|
||||
|
@ -1744,7 +1680,7 @@ AlterFunctionSchemaStmtObjectAddress(AlterObjectSchemaStmt *stmt, bool missing_o
|
|||
}
|
||||
}
|
||||
|
||||
address = palloc0(sizeof(ObjectAddress));
|
||||
ObjectAddress *address = palloc0(sizeof(ObjectAddress));
|
||||
ObjectAddressSet(*address, ProcedureRelationId, funcOid);
|
||||
|
||||
return address;
|
||||
|
@ -1766,7 +1702,6 @@ GenerateBackupNameForProcCollision(const ObjectAddress *address)
|
|||
address->objectId)));
|
||||
char *baseName = get_func_name(address->objectId);
|
||||
int baseLength = strlen(baseName);
|
||||
int numargs = 0;
|
||||
Oid *argtypes = NULL;
|
||||
char **argnames = NULL;
|
||||
char *argmodes = NULL;
|
||||
|
@ -1777,15 +1712,13 @@ GenerateBackupNameForProcCollision(const ObjectAddress *address)
|
|||
elog(ERROR, "citus cache lookup failed.");
|
||||
}
|
||||
|
||||
numargs = get_func_arg_info(proctup, &argtypes, &argnames, &argmodes);
|
||||
int numargs = get_func_arg_info(proctup, &argtypes, &argnames, &argmodes);
|
||||
ReleaseSysCache(proctup);
|
||||
|
||||
while (true)
|
||||
{
|
||||
int suffixLength = snprintf(suffix, NAMEDATALEN - 1, "(citus_backup_%d)",
|
||||
count);
|
||||
List *newProcName = NIL;
|
||||
FuncCandidateList clist = NULL;
|
||||
|
||||
/* trim the base name at the end to leave space for the suffix and trailing \0 */
|
||||
baseLength = Min(baseLength, NAMEDATALEN - suffixLength - 1);
|
||||
|
@ -1795,10 +1728,11 @@ GenerateBackupNameForProcCollision(const ObjectAddress *address)
|
|||
strncpy(newName, baseName, baseLength);
|
||||
strncpy(newName + baseLength, suffix, suffixLength);
|
||||
|
||||
newProcName = list_make2(namespace, makeString(newName));
|
||||
List *newProcName = list_make2(namespace, makeString(newName));
|
||||
|
||||
/* don't need to rename if the input arguments don't match */
|
||||
clist = FuncnameGetCandidates(newProcName, numargs, NIL, false, false, true);
|
||||
FuncCandidateList clist = FuncnameGetCandidates(newProcName, numargs, NIL, false,
|
||||
false, true);
|
||||
for (; clist; clist = clist->next)
|
||||
{
|
||||
if (memcmp(clist->args, argtypes, sizeof(Oid) * numargs) == 0)
|
||||
|
@ -1828,8 +1762,6 @@ ObjectWithArgsFromOid(Oid funcOid)
|
|||
Oid *argTypes = NULL;
|
||||
char **argNames = NULL;
|
||||
char *argModes = NULL;
|
||||
int numargs = 0;
|
||||
int i = 0;
|
||||
HeapTuple proctup = SearchSysCache1(PROCOID, funcOid);
|
||||
|
||||
if (!HeapTupleIsValid(proctup))
|
||||
|
@ -1837,14 +1769,14 @@ ObjectWithArgsFromOid(Oid funcOid)
|
|||
elog(ERROR, "citus cache lookup failed.");
|
||||
}
|
||||
|
||||
numargs = get_func_arg_info(proctup, &argTypes, &argNames, &argModes);
|
||||
int numargs = get_func_arg_info(proctup, &argTypes, &argNames, &argModes);
|
||||
|
||||
objectWithArgs->objname = list_make2(
|
||||
makeString(get_namespace_name(get_func_namespace(funcOid))),
|
||||
makeString(get_func_name(funcOid))
|
||||
);
|
||||
|
||||
for (i = 0; i < numargs; i++)
|
||||
for (int i = 0; i < numargs; i++)
|
||||
{
|
||||
if (argModes == NULL ||
|
||||
argModes[i] != PROARGMODE_OUT || argModes[i] != PROARGMODE_TABLE)
|
||||
|
@ -1870,13 +1802,10 @@ static ObjectAddress *
|
|||
FunctionToObjectAddress(ObjectType objectType, ObjectWithArgs *objectWithArgs,
|
||||
bool missing_ok)
|
||||
{
|
||||
Oid funcOid = InvalidOid;
|
||||
ObjectAddress *address = NULL;
|
||||
|
||||
AssertObjectTypeIsFunctional(objectType);
|
||||
|
||||
funcOid = LookupFuncWithArgs(objectType, objectWithArgs, missing_ok);
|
||||
address = palloc0(sizeof(ObjectAddress));
|
||||
Oid funcOid = LookupFuncWithArgs(objectType, objectWithArgs, missing_ok);
|
||||
ObjectAddress *address = palloc0(sizeof(ObjectAddress));
|
||||
ObjectAddressSet(*address, ProcedureRelationId, funcOid);
|
||||
|
||||
return address;
|
||||
|
|
|
@ -115,9 +115,6 @@ PlanIndexStmt(IndexStmt *createIndexStatement, const char *createIndexCommand)
|
|||
*/
|
||||
if (createIndexStatement->relation != NULL)
|
||||
{
|
||||
Relation relation = NULL;
|
||||
Oid relationId = InvalidOid;
|
||||
bool isDistributedRelation = false;
|
||||
LOCKMODE lockmode = ShareLock;
|
||||
MemoryContext relationContext = NULL;
|
||||
|
||||
|
@ -137,10 +134,10 @@ PlanIndexStmt(IndexStmt *createIndexStatement, const char *createIndexCommand)
|
|||
* checked permissions, and will only fail when executing the actual
|
||||
* index statements.
|
||||
*/
|
||||
relation = heap_openrv(createIndexStatement->relation, lockmode);
|
||||
relationId = RelationGetRelid(relation);
|
||||
Relation relation = heap_openrv(createIndexStatement->relation, lockmode);
|
||||
Oid relationId = RelationGetRelid(relation);
|
||||
|
||||
isDistributedRelation = IsDistributedTable(relationId);
|
||||
bool isDistributedRelation = IsDistributedTable(relationId);
|
||||
|
||||
if (createIndexStatement->relation->schemaname == NULL)
|
||||
{
|
||||
|
@ -163,15 +160,13 @@ PlanIndexStmt(IndexStmt *createIndexStatement, const char *createIndexCommand)
|
|||
|
||||
if (isDistributedRelation)
|
||||
{
|
||||
Oid namespaceId = InvalidOid;
|
||||
Oid indexRelationId = InvalidOid;
|
||||
char *indexName = createIndexStatement->idxname;
|
||||
char *namespaceName = createIndexStatement->relation->schemaname;
|
||||
|
||||
ErrorIfUnsupportedIndexStmt(createIndexStatement);
|
||||
|
||||
namespaceId = get_namespace_oid(namespaceName, false);
|
||||
indexRelationId = get_relname_relid(indexName, namespaceId);
|
||||
Oid namespaceId = get_namespace_oid(namespaceName, false);
|
||||
Oid indexRelationId = get_relname_relid(indexName, namespaceId);
|
||||
|
||||
/* if index does not exist, send the command to workers */
|
||||
if (!OidIsValid(indexRelationId))
|
||||
|
@ -319,9 +314,6 @@ PlanDropIndexStmt(DropStmt *dropIndexStatement, const char *dropIndexCommand)
|
|||
/* check if any of the indexes being dropped belong to a distributed table */
|
||||
foreach(dropObjectCell, dropIndexStatement->objects)
|
||||
{
|
||||
Oid indexId = InvalidOid;
|
||||
Oid relationId = InvalidOid;
|
||||
bool isDistributedRelation = false;
|
||||
struct DropRelationCallbackState state;
|
||||
uint32 rvrFlags = RVR_MISSING_OK;
|
||||
LOCKMODE lockmode = AccessExclusiveLock;
|
||||
|
@ -349,9 +341,9 @@ PlanDropIndexStmt(DropStmt *dropIndexStatement, const char *dropIndexCommand)
|
|||
state.heapOid = InvalidOid;
|
||||
state.concurrent = dropIndexStatement->concurrent;
|
||||
|
||||
indexId = RangeVarGetRelidExtended(rangeVar, lockmode, rvrFlags,
|
||||
RangeVarCallbackForDropIndex,
|
||||
(void *) &state);
|
||||
Oid indexId = RangeVarGetRelidExtended(rangeVar, lockmode, rvrFlags,
|
||||
RangeVarCallbackForDropIndex,
|
||||
(void *) &state);
|
||||
|
||||
/*
|
||||
* If the index does not exist, we don't do anything here, and allow
|
||||
|
@ -362,8 +354,8 @@ PlanDropIndexStmt(DropStmt *dropIndexStatement, const char *dropIndexCommand)
|
|||
continue;
|
||||
}
|
||||
|
||||
relationId = IndexGetRelation(indexId, false);
|
||||
isDistributedRelation = IsDistributedTable(relationId);
|
||||
Oid relationId = IndexGetRelation(indexId, false);
|
||||
bool isDistributedRelation = IsDistributedTable(relationId);
|
||||
if (isDistributedRelation)
|
||||
{
|
||||
distributedIndexId = indexId;
|
||||
|
@ -400,13 +392,6 @@ PlanDropIndexStmt(DropStmt *dropIndexStatement, const char *dropIndexCommand)
|
|||
void
|
||||
PostProcessIndexStmt(IndexStmt *indexStmt)
|
||||
{
|
||||
Relation relation = NULL;
|
||||
Oid indexRelationId = InvalidOid;
|
||||
Relation indexRelation = NULL;
|
||||
Relation pg_index = NULL;
|
||||
HeapTuple indexTuple = NULL;
|
||||
Form_pg_index indexForm = NULL;
|
||||
|
||||
/* we are only processing CONCURRENT index statements */
|
||||
if (!indexStmt->concurrent)
|
||||
{
|
||||
|
@ -424,10 +409,10 @@ PostProcessIndexStmt(IndexStmt *indexStmt)
|
|||
StartTransactionCommand();
|
||||
|
||||
/* get the affected relation and index */
|
||||
relation = heap_openrv(indexStmt->relation, ShareUpdateExclusiveLock);
|
||||
indexRelationId = get_relname_relid(indexStmt->idxname,
|
||||
RelationGetNamespace(relation));
|
||||
indexRelation = index_open(indexRelationId, RowExclusiveLock);
|
||||
Relation relation = heap_openrv(indexStmt->relation, ShareUpdateExclusiveLock);
|
||||
Oid indexRelationId = get_relname_relid(indexStmt->idxname,
|
||||
RelationGetNamespace(relation));
|
||||
Relation indexRelation = index_open(indexRelationId, RowExclusiveLock);
|
||||
|
||||
/* close relations but retain locks */
|
||||
heap_close(relation, NoLock);
|
||||
|
@ -441,13 +426,14 @@ PostProcessIndexStmt(IndexStmt *indexStmt)
|
|||
StartTransactionCommand();
|
||||
|
||||
/* now, update index's validity in a way that can roll back */
|
||||
pg_index = heap_open(IndexRelationId, RowExclusiveLock);
|
||||
Relation pg_index = heap_open(IndexRelationId, RowExclusiveLock);
|
||||
|
||||
indexTuple = SearchSysCacheCopy1(INDEXRELID, ObjectIdGetDatum(indexRelationId));
|
||||
HeapTuple indexTuple = SearchSysCacheCopy1(INDEXRELID, ObjectIdGetDatum(
|
||||
indexRelationId));
|
||||
Assert(HeapTupleIsValid(indexTuple)); /* better be present, we have lock! */
|
||||
|
||||
/* mark as valid, save, and update pg_index indexes */
|
||||
indexForm = (Form_pg_index) GETSTRUCT(indexTuple);
|
||||
Form_pg_index indexForm = (Form_pg_index) GETSTRUCT(indexTuple);
|
||||
indexForm->indisvalid = true;
|
||||
|
||||
CatalogTupleUpdate(pg_index, &indexTuple->t_self, indexTuple);
|
||||
|
@ -528,11 +514,10 @@ CreateIndexTaskList(Oid relationId, IndexStmt *indexStmt)
|
|||
{
|
||||
ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell);
|
||||
uint64 shardId = shardInterval->shardId;
|
||||
Task *task = NULL;
|
||||
|
||||
deparse_shard_index_statement(indexStmt, relationId, shardId, &ddlString);
|
||||
|
||||
task = CitusMakeNode(Task);
|
||||
Task *task = CitusMakeNode(Task);
|
||||
task->jobId = jobId;
|
||||
task->taskId = taskId++;
|
||||
task->taskType = DDL_TASK;
|
||||
|
@ -574,11 +559,10 @@ CreateReindexTaskList(Oid relationId, ReindexStmt *reindexStmt)
|
|||
{
|
||||
ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell);
|
||||
uint64 shardId = shardInterval->shardId;
|
||||
Task *task = NULL;
|
||||
|
||||
deparse_shard_reindex_statement(reindexStmt, relationId, shardId, &ddlString);
|
||||
|
||||
task = CitusMakeNode(Task);
|
||||
Task *task = CitusMakeNode(Task);
|
||||
task->jobId = jobId;
|
||||
task->taskId = taskId++;
|
||||
task->taskType = DDL_TASK;
|
||||
|
@ -612,13 +596,11 @@ RangeVarCallbackForDropIndex(const RangeVar *rel, Oid relOid, Oid oldRelOid, voi
|
|||
{
|
||||
/* *INDENT-OFF* */
|
||||
HeapTuple tuple;
|
||||
struct DropRelationCallbackState *state;
|
||||
char relkind;
|
||||
char expected_relkind;
|
||||
Form_pg_class classform;
|
||||
LOCKMODE heap_lockmode;
|
||||
|
||||
state = (struct DropRelationCallbackState *) arg;
|
||||
struct DropRelationCallbackState *state = (struct DropRelationCallbackState *) arg;
|
||||
relkind = state->relkind;
|
||||
heap_lockmode = state->concurrent ?
|
||||
ShareUpdateExclusiveLock : AccessExclusiveLock;
|
||||
|
@ -643,7 +625,7 @@ RangeVarCallbackForDropIndex(const RangeVar *rel, Oid relOid, Oid oldRelOid, voi
|
|||
tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relOid));
|
||||
if (!HeapTupleIsValid(tuple))
|
||||
return; /* concurrently dropped, so nothing to do */
|
||||
classform = (Form_pg_class) GETSTRUCT(tuple);
|
||||
Form_pg_class classform = (Form_pg_class) GETSTRUCT(tuple);
|
||||
|
||||
/*
|
||||
* PG 11 sends relkind as partitioned index for an index
|
||||
|
@ -805,7 +787,6 @@ ErrorIfUnsupportedIndexStmt(IndexStmt *createIndexStatement)
|
|||
Oid relationId = RangeVarGetRelid(relation, lockMode, missingOk);
|
||||
Var *partitionKey = DistPartitionKey(relationId);
|
||||
char partitionMethod = PartitionMethod(relationId);
|
||||
List *indexParameterList = NIL;
|
||||
ListCell *indexParameterCell = NULL;
|
||||
bool indexContainsPartitionColumn = false;
|
||||
|
||||
|
@ -825,12 +806,11 @@ ErrorIfUnsupportedIndexStmt(IndexStmt *createIndexStatement)
|
|||
"is currently unsupported")));
|
||||
}
|
||||
|
||||
indexParameterList = createIndexStatement->indexParams;
|
||||
List *indexParameterList = createIndexStatement->indexParams;
|
||||
foreach(indexParameterCell, indexParameterList)
|
||||
{
|
||||
IndexElem *indexElement = (IndexElem *) lfirst(indexParameterCell);
|
||||
char *columnName = indexElement->name;
|
||||
AttrNumber attributeNumber = InvalidAttrNumber;
|
||||
|
||||
/* column name is null for index expressions, skip it */
|
||||
if (columnName == NULL)
|
||||
|
@ -838,7 +818,7 @@ ErrorIfUnsupportedIndexStmt(IndexStmt *createIndexStatement)
|
|||
continue;
|
||||
}
|
||||
|
||||
attributeNumber = get_attnum(relationId, columnName);
|
||||
AttrNumber attributeNumber = get_attnum(relationId, columnName);
|
||||
if (attributeNumber == partitionKey->varattno)
|
||||
{
|
||||
indexContainsPartitionColumn = true;
|
||||
|
@ -902,7 +882,6 @@ DropIndexTaskList(Oid relationId, Oid indexId, DropStmt *dropStmt)
|
|||
ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell);
|
||||
uint64 shardId = shardInterval->shardId;
|
||||
char *shardIndexName = pstrdup(indexName);
|
||||
Task *task = NULL;
|
||||
|
||||
AppendShardIdToName(&shardIndexName, shardId);
|
||||
|
||||
|
@ -913,7 +892,7 @@ DropIndexTaskList(Oid relationId, Oid indexId, DropStmt *dropStmt)
|
|||
quote_qualified_identifier(schemaName, shardIndexName),
|
||||
(dropStmt->behavior == DROP_RESTRICT ? "RESTRICT" : "CASCADE"));
|
||||
|
||||
task = CitusMakeNode(Task);
|
||||
Task *task = CitusMakeNode(Task);
|
||||
task->jobId = jobId;
|
||||
task->taskId = taskId++;
|
||||
task->taskType = DDL_TASK;
|
||||
|
|
|
@ -298,8 +298,6 @@ PG_FUNCTION_INFO_V1(citus_text_send_as_jsonb);
|
|||
static void
|
||||
CitusCopyFrom(CopyStmt *copyStatement, char *completionTag)
|
||||
{
|
||||
bool isCopyFromWorker = false;
|
||||
|
||||
BeginOrContinueCoordinatedTransaction();
|
||||
|
||||
/* disallow COPY to/from file or program except for superusers */
|
||||
|
@ -324,7 +322,7 @@ CitusCopyFrom(CopyStmt *copyStatement, char *completionTag)
|
|||
}
|
||||
|
||||
masterConnection = NULL; /* reset, might still be set after error */
|
||||
isCopyFromWorker = IsCopyFromWorker(copyStatement);
|
||||
bool isCopyFromWorker = IsCopyFromWorker(copyStatement);
|
||||
if (isCopyFromWorker)
|
||||
{
|
||||
CopyFromWorkerNode(copyStatement, completionTag);
|
||||
|
@ -387,9 +385,6 @@ CopyFromWorkerNode(CopyStmt *copyStatement, char *completionTag)
|
|||
NodeAddress *masterNodeAddress = MasterNodeAddress(copyStatement);
|
||||
char *nodeName = masterNodeAddress->nodeName;
|
||||
int32 nodePort = masterNodeAddress->nodePort;
|
||||
Oid relationId = InvalidOid;
|
||||
char partitionMethod = 0;
|
||||
char *schemaName = NULL;
|
||||
uint32 connectionFlags = FOR_DML;
|
||||
|
||||
masterConnection = GetNodeConnection(connectionFlags, nodeName, nodePort);
|
||||
|
@ -399,14 +394,14 @@ CopyFromWorkerNode(CopyStmt *copyStatement, char *completionTag)
|
|||
RemoteTransactionBeginIfNecessary(masterConnection);
|
||||
|
||||
/* strip schema name for local reference */
|
||||
schemaName = copyStatement->relation->schemaname;
|
||||
char *schemaName = copyStatement->relation->schemaname;
|
||||
copyStatement->relation->schemaname = NULL;
|
||||
|
||||
relationId = RangeVarGetRelid(copyStatement->relation, NoLock, false);
|
||||
Oid relationId = RangeVarGetRelid(copyStatement->relation, NoLock, false);
|
||||
|
||||
/* put schema name back */
|
||||
copyStatement->relation->schemaname = schemaName;
|
||||
partitionMethod = MasterPartitionMethod(copyStatement->relation);
|
||||
char partitionMethod = MasterPartitionMethod(copyStatement->relation);
|
||||
if (partitionMethod != DISTRIBUTE_BY_APPEND)
|
||||
{
|
||||
ereport(ERROR, (errmsg("copy from worker nodes is only supported "
|
||||
|
@ -439,18 +434,10 @@ CopyToExistingShards(CopyStmt *copyStatement, char *completionTag)
|
|||
CitusCopyDestReceiver *copyDest = NULL;
|
||||
DestReceiver *dest = NULL;
|
||||
|
||||
Relation distributedRelation = NULL;
|
||||
Relation copiedDistributedRelation = NULL;
|
||||
Form_pg_class copiedDistributedRelationTuple = NULL;
|
||||
TupleDesc tupleDescriptor = NULL;
|
||||
uint32 columnCount = 0;
|
||||
Datum *columnValues = NULL;
|
||||
bool *columnNulls = NULL;
|
||||
int columnIndex = 0;
|
||||
List *columnNameList = NIL;
|
||||
Var *partitionColumn = NULL;
|
||||
int partitionColumnIndex = INVALID_PARTITION_COLUMN_INDEX;
|
||||
TupleTableSlot *tupleTableSlot = NULL;
|
||||
|
||||
EState *executorState = NULL;
|
||||
MemoryContext executorTupleContext = NULL;
|
||||
|
@ -465,27 +452,28 @@ CopyToExistingShards(CopyStmt *copyStatement, char *completionTag)
|
|||
ErrorContextCallback errorCallback;
|
||||
|
||||
/* allocate column values and nulls arrays */
|
||||
distributedRelation = heap_open(tableId, RowExclusiveLock);
|
||||
tupleDescriptor = RelationGetDescr(distributedRelation);
|
||||
columnCount = tupleDescriptor->natts;
|
||||
columnValues = palloc0(columnCount * sizeof(Datum));
|
||||
columnNulls = palloc0(columnCount * sizeof(bool));
|
||||
Relation distributedRelation = heap_open(tableId, RowExclusiveLock);
|
||||
TupleDesc tupleDescriptor = RelationGetDescr(distributedRelation);
|
||||
uint32 columnCount = tupleDescriptor->natts;
|
||||
Datum *columnValues = palloc0(columnCount * sizeof(Datum));
|
||||
bool *columnNulls = palloc0(columnCount * sizeof(bool));
|
||||
|
||||
/* set up a virtual tuple table slot */
|
||||
tupleTableSlot = MakeSingleTupleTableSlotCompat(tupleDescriptor, &TTSOpsVirtual);
|
||||
TupleTableSlot *tupleTableSlot = MakeSingleTupleTableSlotCompat(tupleDescriptor,
|
||||
&TTSOpsVirtual);
|
||||
tupleTableSlot->tts_nvalid = columnCount;
|
||||
tupleTableSlot->tts_values = columnValues;
|
||||
tupleTableSlot->tts_isnull = columnNulls;
|
||||
|
||||
/* determine the partition column index in the tuple descriptor */
|
||||
partitionColumn = PartitionColumn(tableId, 0);
|
||||
Var *partitionColumn = PartitionColumn(tableId, 0);
|
||||
if (partitionColumn != NULL)
|
||||
{
|
||||
partitionColumnIndex = partitionColumn->varattno - 1;
|
||||
}
|
||||
|
||||
/* build the list of column names for remote COPY statements */
|
||||
for (columnIndex = 0; columnIndex < columnCount; columnIndex++)
|
||||
for (int columnIndex = 0; columnIndex < columnCount; columnIndex++)
|
||||
{
|
||||
Form_pg_attribute currentColumn = TupleDescAttr(tupleDescriptor, columnIndex);
|
||||
char *columnName = NameStr(currentColumn->attname);
|
||||
|
@ -566,16 +554,13 @@ CopyToExistingShards(CopyStmt *copyStatement, char *completionTag)
|
|||
|
||||
while (true)
|
||||
{
|
||||
bool nextRowFound = false;
|
||||
MemoryContext oldContext = NULL;
|
||||
|
||||
ResetPerTupleExprContext(executorState);
|
||||
|
||||
oldContext = MemoryContextSwitchTo(executorTupleContext);
|
||||
MemoryContext oldContext = MemoryContextSwitchTo(executorTupleContext);
|
||||
|
||||
/* parse a row from the input */
|
||||
nextRowFound = NextCopyFromCompat(copyState, executorExpressionContext,
|
||||
columnValues, columnNulls);
|
||||
bool nextRowFound = NextCopyFromCompat(copyState, executorExpressionContext,
|
||||
columnValues, columnNulls);
|
||||
|
||||
if (!nextRowFound)
|
||||
{
|
||||
|
@ -625,8 +610,6 @@ CopyToExistingShards(CopyStmt *copyStatement, char *completionTag)
|
|||
static void
|
||||
CopyToNewShards(CopyStmt *copyStatement, char *completionTag, Oid relationId)
|
||||
{
|
||||
FmgrInfo *columnOutputFunctions = NULL;
|
||||
|
||||
/* allocate column values and nulls arrays */
|
||||
Relation distributedRelation = heap_open(relationId, RowExclusiveLock);
|
||||
TupleDesc tupleDescriptor = RelationGetDescr(distributedRelation);
|
||||
|
@ -668,7 +651,8 @@ CopyToNewShards(CopyStmt *copyStatement, char *completionTag, Oid relationId)
|
|||
copyOutState->fe_msgbuf = makeStringInfo();
|
||||
copyOutState->rowcontext = executorTupleContext;
|
||||
|
||||
columnOutputFunctions = ColumnOutputFunctions(tupleDescriptor, copyOutState->binary);
|
||||
FmgrInfo *columnOutputFunctions = ColumnOutputFunctions(tupleDescriptor,
|
||||
copyOutState->binary);
|
||||
|
||||
/* set up callback to identify error line number */
|
||||
errorCallback.callback = CopyFromErrorCallback;
|
||||
|
@ -684,19 +668,15 @@ CopyToNewShards(CopyStmt *copyStatement, char *completionTag, Oid relationId)
|
|||
|
||||
while (true)
|
||||
{
|
||||
bool nextRowFound = false;
|
||||
MemoryContext oldContext = NULL;
|
||||
uint64 messageBufferSize = 0;
|
||||
|
||||
ResetPerTupleExprContext(executorState);
|
||||
|
||||
/* switch to tuple memory context and start showing line number in errors */
|
||||
error_context_stack = &errorCallback;
|
||||
oldContext = MemoryContextSwitchTo(executorTupleContext);
|
||||
MemoryContext oldContext = MemoryContextSwitchTo(executorTupleContext);
|
||||
|
||||
/* parse a row from the input */
|
||||
nextRowFound = NextCopyFromCompat(copyState, executorExpressionContext,
|
||||
columnValues, columnNulls);
|
||||
bool nextRowFound = NextCopyFromCompat(copyState, executorExpressionContext,
|
||||
columnValues, columnNulls);
|
||||
|
||||
if (!nextRowFound)
|
||||
{
|
||||
|
@ -739,7 +719,7 @@ CopyToNewShards(CopyStmt *copyStatement, char *completionTag, Oid relationId)
|
|||
SendCopyDataToAll(copyOutState->fe_msgbuf, currentShardId,
|
||||
shardConnections->connectionList);
|
||||
|
||||
messageBufferSize = copyOutState->fe_msgbuf->len;
|
||||
uint64 messageBufferSize = copyOutState->fe_msgbuf->len;
|
||||
copiedDataSizeInBytes = copiedDataSizeInBytes + messageBufferSize;
|
||||
|
||||
/*
|
||||
|
@ -841,7 +821,6 @@ static char
|
|||
MasterPartitionMethod(RangeVar *relation)
|
||||
{
|
||||
char partitionMethod = '\0';
|
||||
PGresult *queryResult = NULL;
|
||||
bool raiseInterrupts = true;
|
||||
|
||||
char *relationName = relation->relname;
|
||||
|
@ -855,7 +834,7 @@ MasterPartitionMethod(RangeVar *relation)
|
|||
{
|
||||
ReportConnectionError(masterConnection, ERROR);
|
||||
}
|
||||
queryResult = GetRemoteCommandResult(masterConnection, raiseInterrupts);
|
||||
PGresult *queryResult = GetRemoteCommandResult(masterConnection, raiseInterrupts);
|
||||
if (PQresultStatus(queryResult) == PGRES_TUPLES_OK)
|
||||
{
|
||||
char *partitionMethodString = PQgetvalue((PGresult *) queryResult, 0, 0);
|
||||
|
@ -923,7 +902,6 @@ OpenCopyConnectionsForNewShards(CopyStmt *copyStatement,
|
|||
ShardConnections *shardConnections,
|
||||
bool stopOnFailure, bool useBinaryCopyFormat)
|
||||
{
|
||||
List *finalizedPlacementList = NIL;
|
||||
int failedPlacementCount = 0;
|
||||
ListCell *placementCell = NULL;
|
||||
List *connectionList = NULL;
|
||||
|
@ -940,7 +918,7 @@ OpenCopyConnectionsForNewShards(CopyStmt *copyStatement,
|
|||
/* release finalized placement list at the end of this function */
|
||||
MemoryContext oldContext = MemoryContextSwitchTo(localContext);
|
||||
|
||||
finalizedPlacementList = MasterShardPlacementList(shardId);
|
||||
List *finalizedPlacementList = MasterShardPlacementList(shardId);
|
||||
|
||||
MemoryContextSwitchTo(oldContext);
|
||||
|
||||
|
@ -948,10 +926,7 @@ OpenCopyConnectionsForNewShards(CopyStmt *copyStatement,
|
|||
{
|
||||
ShardPlacement *placement = (ShardPlacement *) lfirst(placementCell);
|
||||
char *nodeUser = CurrentUserName();
|
||||
MultiConnection *connection = NULL;
|
||||
uint32 connectionFlags = FOR_DML;
|
||||
StringInfo copyCommand = NULL;
|
||||
PGresult *result = NULL;
|
||||
|
||||
/*
|
||||
* For hash partitioned tables, connection establishment happens in
|
||||
|
@ -959,7 +934,8 @@ OpenCopyConnectionsForNewShards(CopyStmt *copyStatement,
|
|||
*/
|
||||
Assert(placement->partitionMethod != DISTRIBUTE_BY_HASH);
|
||||
|
||||
connection = GetPlacementConnection(connectionFlags, placement, nodeUser);
|
||||
MultiConnection *connection = GetPlacementConnection(connectionFlags, placement,
|
||||
nodeUser);
|
||||
|
||||
if (PQstatus(connection->pgConn) != CONNECTION_OK)
|
||||
{
|
||||
|
@ -987,14 +963,15 @@ OpenCopyConnectionsForNewShards(CopyStmt *copyStatement,
|
|||
ClaimConnectionExclusively(connection);
|
||||
RemoteTransactionBeginIfNecessary(connection);
|
||||
|
||||
copyCommand = ConstructCopyStatement(copyStatement, shardConnections->shardId,
|
||||
useBinaryCopyFormat);
|
||||
StringInfo copyCommand = ConstructCopyStatement(copyStatement,
|
||||
shardConnections->shardId,
|
||||
useBinaryCopyFormat);
|
||||
|
||||
if (!SendRemoteCommand(connection, copyCommand->data))
|
||||
{
|
||||
ReportConnectionError(connection, ERROR);
|
||||
}
|
||||
result = GetRemoteCommandResult(connection, raiseInterrupts);
|
||||
PGresult *result = GetRemoteCommandResult(connection, raiseInterrupts);
|
||||
if (PQresultStatus(result) != PGRES_COPY_IN)
|
||||
{
|
||||
ReportResultError(connection, result, ERROR);
|
||||
|
@ -1035,9 +1012,8 @@ CanUseBinaryCopyFormat(TupleDesc tupleDescription)
|
|||
{
|
||||
bool useBinaryCopyFormat = true;
|
||||
int totalColumnCount = tupleDescription->natts;
|
||||
int columnIndex = 0;
|
||||
|
||||
for (columnIndex = 0; columnIndex < totalColumnCount; columnIndex++)
|
||||
for (int columnIndex = 0; columnIndex < totalColumnCount; columnIndex++)
|
||||
{
|
||||
Form_pg_attribute currentColumn = TupleDescAttr(tupleDescription, columnIndex);
|
||||
Oid typeId = InvalidOid;
|
||||
|
@ -1149,7 +1125,6 @@ static List *
|
|||
RemoteFinalizedShardPlacementList(uint64 shardId)
|
||||
{
|
||||
List *finalizedPlacementList = NIL;
|
||||
PGresult *queryResult = NULL;
|
||||
bool raiseInterrupts = true;
|
||||
|
||||
StringInfo shardPlacementsCommand = makeStringInfo();
|
||||
|
@ -1159,13 +1134,12 @@ RemoteFinalizedShardPlacementList(uint64 shardId)
|
|||
{
|
||||
ReportConnectionError(masterConnection, ERROR);
|
||||
}
|
||||
queryResult = GetRemoteCommandResult(masterConnection, raiseInterrupts);
|
||||
PGresult *queryResult = GetRemoteCommandResult(masterConnection, raiseInterrupts);
|
||||
if (PQresultStatus(queryResult) == PGRES_TUPLES_OK)
|
||||
{
|
||||
int rowCount = PQntuples(queryResult);
|
||||
int rowIndex = 0;
|
||||
|
||||
for (rowIndex = 0; rowIndex < rowCount; rowIndex++)
|
||||
for (int rowIndex = 0; rowIndex < rowCount; rowIndex++)
|
||||
{
|
||||
char *placementIdString = PQgetvalue(queryResult, rowIndex, 0);
|
||||
char *nodeName = pstrdup(PQgetvalue(queryResult, rowIndex, 1));
|
||||
|
@ -1236,11 +1210,10 @@ ConstructCopyStatement(CopyStmt *copyStatement, int64 shardId, bool useBinaryCop
|
|||
char *relationName = copyStatement->relation->relname;
|
||||
|
||||
char *shardName = pstrdup(relationName);
|
||||
char *shardQualifiedName = NULL;
|
||||
|
||||
AppendShardIdToName(&shardName, shardId);
|
||||
|
||||
shardQualifiedName = quote_qualified_identifier(schemaName, shardName);
|
||||
char *shardQualifiedName = quote_qualified_identifier(schemaName, shardName);
|
||||
|
||||
appendStringInfo(command, "COPY %s ", shardQualifiedName);
|
||||
|
||||
|
@ -1331,7 +1304,6 @@ EndRemoteCopy(int64 shardId, List *connectionList)
|
|||
foreach(connectionCell, connectionList)
|
||||
{
|
||||
MultiConnection *connection = (MultiConnection *) lfirst(connectionCell);
|
||||
PGresult *result = NULL;
|
||||
bool raiseInterrupts = true;
|
||||
|
||||
/* end the COPY input */
|
||||
|
@ -1343,7 +1315,7 @@ EndRemoteCopy(int64 shardId, List *connectionList)
|
|||
}
|
||||
|
||||
/* check whether there were any COPY errors */
|
||||
result = GetRemoteCommandResult(connection, raiseInterrupts);
|
||||
PGresult *result = GetRemoteCommandResult(connection, raiseInterrupts);
|
||||
if (PQresultStatus(result) != PGRES_COMMAND_OK)
|
||||
{
|
||||
ReportCopyError(connection, result);
|
||||
|
@ -1487,14 +1459,13 @@ static Oid
|
|||
TypeForColumnName(Oid relationId, TupleDesc tupleDescriptor, char *columnName)
|
||||
{
|
||||
AttrNumber destAttrNumber = get_attnum(relationId, columnName);
|
||||
Form_pg_attribute attr = NULL;
|
||||
|
||||
if (destAttrNumber == InvalidAttrNumber)
|
||||
{
|
||||
ereport(ERROR, (errmsg("invalid attr? %s", columnName)));
|
||||
}
|
||||
|
||||
attr = TupleDescAttr(tupleDescriptor, destAttrNumber - 1);
|
||||
Form_pg_attribute attr = TupleDescAttr(tupleDescriptor, destAttrNumber - 1);
|
||||
return attr->atttypid;
|
||||
}
|
||||
|
||||
|
@ -1508,9 +1479,8 @@ TypeArrayFromTupleDescriptor(TupleDesc tupleDescriptor)
|
|||
{
|
||||
int columnCount = tupleDescriptor->natts;
|
||||
Oid *typeArray = palloc0(columnCount * sizeof(Oid));
|
||||
int columnIndex = 0;
|
||||
|
||||
for (columnIndex = 0; columnIndex < columnCount; columnIndex++)
|
||||
for (int columnIndex = 0; columnIndex < columnCount; columnIndex++)
|
||||
{
|
||||
Form_pg_attribute attr = TupleDescAttr(tupleDescriptor, columnIndex);
|
||||
if (attr->attisdropped)
|
||||
|
@ -1537,15 +1507,13 @@ ColumnCoercionPaths(TupleDesc destTupleDescriptor, TupleDesc inputTupleDescripto
|
|||
Oid destRelId, List *columnNameList,
|
||||
Oid *finalColumnTypeArray)
|
||||
{
|
||||
int columnIndex = 0;
|
||||
int columnCount = inputTupleDescriptor->natts;
|
||||
CopyCoercionData *coercePaths = palloc0(columnCount * sizeof(CopyCoercionData));
|
||||
Oid *inputTupleTypes = TypeArrayFromTupleDescriptor(inputTupleDescriptor);
|
||||
ListCell *currentColumnName = list_head(columnNameList);
|
||||
|
||||
for (columnIndex = 0; columnIndex < columnCount; columnIndex++)
|
||||
for (int columnIndex = 0; columnIndex < columnCount; columnIndex++)
|
||||
{
|
||||
Oid destTupleType = InvalidOid;
|
||||
Oid inputTupleType = inputTupleTypes[columnIndex];
|
||||
char *columnName = lfirst(currentColumnName);
|
||||
|
||||
|
@ -1555,7 +1523,7 @@ ColumnCoercionPaths(TupleDesc destTupleDescriptor, TupleDesc inputTupleDescripto
|
|||
continue;
|
||||
}
|
||||
|
||||
destTupleType = TypeForColumnName(destRelId, destTupleDescriptor, columnName);
|
||||
Oid destTupleType = TypeForColumnName(destRelId, destTupleDescriptor, columnName);
|
||||
|
||||
finalColumnTypeArray[columnIndex] = destTupleType;
|
||||
|
||||
|
@ -1584,8 +1552,7 @@ TypeOutputFunctions(uint32 columnCount, Oid *typeIdArray, bool binaryFormat)
|
|||
{
|
||||
FmgrInfo *columnOutputFunctions = palloc0(columnCount * sizeof(FmgrInfo));
|
||||
|
||||
uint32 columnIndex = 0;
|
||||
for (columnIndex = 0; columnIndex < columnCount; columnIndex++)
|
||||
for (uint32 columnIndex = 0; columnIndex < columnCount; columnIndex++)
|
||||
{
|
||||
FmgrInfo *currentOutputFunction = &columnOutputFunctions[columnIndex];
|
||||
Oid columnTypeId = typeIdArray[columnIndex];
|
||||
|
@ -1665,7 +1632,6 @@ AppendCopyRowData(Datum *valueArray, bool *isNullArray, TupleDesc rowDescriptor,
|
|||
uint32 totalColumnCount = (uint32) rowDescriptor->natts;
|
||||
uint32 availableColumnCount = AvailableColumnCount(rowDescriptor);
|
||||
uint32 appendedColumnCount = 0;
|
||||
uint32 columnIndex = 0;
|
||||
|
||||
MemoryContext oldContext = MemoryContextSwitchTo(rowOutputState->rowcontext);
|
||||
|
||||
|
@ -1673,7 +1639,7 @@ AppendCopyRowData(Datum *valueArray, bool *isNullArray, TupleDesc rowDescriptor,
|
|||
{
|
||||
CopySendInt16(rowOutputState, availableColumnCount);
|
||||
}
|
||||
for (columnIndex = 0; columnIndex < totalColumnCount; columnIndex++)
|
||||
for (uint32 columnIndex = 0; columnIndex < totalColumnCount; columnIndex++)
|
||||
{
|
||||
Form_pg_attribute currentColumn = TupleDescAttr(rowDescriptor, columnIndex);
|
||||
Datum value = valueArray[columnIndex];
|
||||
|
@ -1803,9 +1769,8 @@ static uint32
|
|||
AvailableColumnCount(TupleDesc tupleDescriptor)
|
||||
{
|
||||
uint32 columnCount = 0;
|
||||
uint32 columnIndex = 0;
|
||||
|
||||
for (columnIndex = 0; columnIndex < tupleDescriptor->natts; columnIndex++)
|
||||
for (uint32 columnIndex = 0; columnIndex < tupleDescriptor->natts; columnIndex++)
|
||||
{
|
||||
Form_pg_attribute currentColumn = TupleDescAttr(tupleDescriptor, columnIndex);
|
||||
|
||||
|
@ -1916,13 +1881,11 @@ MasterCreateEmptyShard(char *relationName)
|
|||
static int64
|
||||
CreateEmptyShard(char *relationName)
|
||||
{
|
||||
int64 shardId = 0;
|
||||
|
||||
text *relationNameText = cstring_to_text(relationName);
|
||||
Datum relationNameDatum = PointerGetDatum(relationNameText);
|
||||
Datum shardIdDatum = DirectFunctionCall1(master_create_empty_shard,
|
||||
relationNameDatum);
|
||||
shardId = DatumGetInt64(shardIdDatum);
|
||||
int64 shardId = DatumGetInt64(shardIdDatum);
|
||||
|
||||
return shardId;
|
||||
}
|
||||
|
@ -1936,7 +1899,6 @@ static int64
|
|||
RemoteCreateEmptyShard(char *relationName)
|
||||
{
|
||||
int64 shardId = 0;
|
||||
PGresult *queryResult = NULL;
|
||||
bool raiseInterrupts = true;
|
||||
|
||||
StringInfo createEmptyShardCommand = makeStringInfo();
|
||||
|
@ -1946,7 +1908,7 @@ RemoteCreateEmptyShard(char *relationName)
|
|||
{
|
||||
ReportConnectionError(masterConnection, ERROR);
|
||||
}
|
||||
queryResult = GetRemoteCommandResult(masterConnection, raiseInterrupts);
|
||||
PGresult *queryResult = GetRemoteCommandResult(masterConnection, raiseInterrupts);
|
||||
if (PQresultStatus(queryResult) == PGRES_TUPLES_OK)
|
||||
{
|
||||
char *shardIdString = PQgetvalue((PGresult *) queryResult, 0, 0);
|
||||
|
@ -1991,7 +1953,6 @@ MasterUpdateShardStatistics(uint64 shardId)
|
|||
static void
|
||||
RemoteUpdateShardStatistics(uint64 shardId)
|
||||
{
|
||||
PGresult *queryResult = NULL;
|
||||
bool raiseInterrupts = true;
|
||||
|
||||
StringInfo updateShardStatisticsCommand = makeStringInfo();
|
||||
|
@ -2002,7 +1963,7 @@ RemoteUpdateShardStatistics(uint64 shardId)
|
|||
{
|
||||
ReportConnectionError(masterConnection, ERROR);
|
||||
}
|
||||
queryResult = GetRemoteCommandResult(masterConnection, raiseInterrupts);
|
||||
PGresult *queryResult = GetRemoteCommandResult(masterConnection, raiseInterrupts);
|
||||
if (PQresultStatus(queryResult) != PGRES_TUPLES_OK)
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not update shard statistics")));
|
||||
|
@ -2067,7 +2028,6 @@ static void
|
|||
CopyAttributeOutText(CopyOutState cstate, char *string)
|
||||
{
|
||||
char *pointer = NULL;
|
||||
char *start = NULL;
|
||||
char c = '\0';
|
||||
char delimc = cstate->delim[0];
|
||||
|
||||
|
@ -2092,7 +2052,7 @@ CopyAttributeOutText(CopyOutState cstate, char *string)
|
|||
* skip doing pg_encoding_mblen(), because in valid backend encodings,
|
||||
* extra bytes of a multibyte character never look like ASCII.
|
||||
*/
|
||||
start = pointer;
|
||||
char *start = pointer;
|
||||
while ((c = *pointer) != '\0')
|
||||
{
|
||||
if ((unsigned char) c < (unsigned char) 0x20)
|
||||
|
@ -2184,9 +2144,8 @@ CreateCitusCopyDestReceiver(Oid tableId, List *columnNameList, int partitionColu
|
|||
EState *executorState, bool stopOnFailure,
|
||||
char *intermediateResultIdPrefix)
|
||||
{
|
||||
CitusCopyDestReceiver *copyDest = NULL;
|
||||
|
||||
copyDest = (CitusCopyDestReceiver *) palloc0(sizeof(CitusCopyDestReceiver));
|
||||
CitusCopyDestReceiver *copyDest = (CitusCopyDestReceiver *) palloc0(
|
||||
sizeof(CitusCopyDestReceiver));
|
||||
|
||||
/* set up the DestReceiver function pointers */
|
||||
copyDest->pub.receiveSlot = CitusCopyDestReceiverReceive;
|
||||
|
@ -2225,20 +2184,14 @@ CitusCopyDestReceiverStartup(DestReceiver *dest, int operation,
|
|||
Oid schemaOid = get_rel_namespace(tableId);
|
||||
char *schemaName = get_namespace_name(schemaOid);
|
||||
|
||||
Relation distributedRelation = NULL;
|
||||
List *columnNameList = copyDest->columnNameList;
|
||||
List *quotedColumnNameList = NIL;
|
||||
|
||||
ListCell *columnNameCell = NULL;
|
||||
|
||||
char partitionMethod = '\0';
|
||||
DistTableCacheEntry *cacheEntry = NULL;
|
||||
|
||||
CopyStmt *copyStatement = NULL;
|
||||
|
||||
List *shardIntervalList = NULL;
|
||||
|
||||
CopyOutState copyOutState = NULL;
|
||||
const char *delimiterCharacter = "\t";
|
||||
const char *nullPrintCharacter = "\\N";
|
||||
|
||||
|
@ -2246,15 +2199,15 @@ CitusCopyDestReceiverStartup(DestReceiver *dest, int operation,
|
|||
ErrorIfLocalExecutionHappened();
|
||||
|
||||
/* look up table properties */
|
||||
distributedRelation = heap_open(tableId, RowExclusiveLock);
|
||||
cacheEntry = DistributedTableCacheEntry(tableId);
|
||||
Relation distributedRelation = heap_open(tableId, RowExclusiveLock);
|
||||
DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(tableId);
|
||||
partitionMethod = cacheEntry->partitionMethod;
|
||||
|
||||
copyDest->distributedRelation = distributedRelation;
|
||||
copyDest->tupleDescriptor = inputTupleDescriptor;
|
||||
|
||||
/* load the list of shards and verify that we have shards to copy into */
|
||||
shardIntervalList = LoadShardIntervalList(tableId);
|
||||
List *shardIntervalList = LoadShardIntervalList(tableId);
|
||||
if (shardIntervalList == NIL)
|
||||
{
|
||||
if (partitionMethod == DISTRIBUTE_BY_HASH)
|
||||
|
@ -2307,7 +2260,7 @@ CitusCopyDestReceiverStartup(DestReceiver *dest, int operation,
|
|||
}
|
||||
|
||||
/* define how tuples will be serialised */
|
||||
copyOutState = (CopyOutState) palloc0(sizeof(CopyOutStateData));
|
||||
CopyOutState copyOutState = (CopyOutState) palloc0(sizeof(CopyOutStateData));
|
||||
copyOutState->delim = (char *) delimiterCharacter;
|
||||
copyOutState->null_print = (char *) nullPrintCharacter;
|
||||
copyOutState->null_print_client = (char *) nullPrintCharacter;
|
||||
|
@ -2349,15 +2302,15 @@ CitusCopyDestReceiverStartup(DestReceiver *dest, int operation,
|
|||
}
|
||||
|
||||
/* define the template for the COPY statement that is sent to workers */
|
||||
copyStatement = makeNode(CopyStmt);
|
||||
CopyStmt *copyStatement = makeNode(CopyStmt);
|
||||
|
||||
if (copyDest->intermediateResultIdPrefix != NULL)
|
||||
{
|
||||
DefElem *formatResultOption = NULL;
|
||||
copyStatement->relation = makeRangeVar(NULL, copyDest->intermediateResultIdPrefix,
|
||||
-1);
|
||||
|
||||
formatResultOption = makeDefElem("format", (Node *) makeString("result"), -1);
|
||||
DefElem *formatResultOption = makeDefElem("format", (Node *) makeString("result"),
|
||||
-1);
|
||||
copyStatement->options = list_make1(formatResultOption);
|
||||
}
|
||||
else
|
||||
|
@ -2422,7 +2375,6 @@ CitusSendTupleToPlacements(TupleTableSlot *slot, CitusCopyDestReceiver *copyDest
|
|||
TupleDesc tupleDescriptor = copyDest->tupleDescriptor;
|
||||
CopyStmt *copyStatement = copyDest->copyStatement;
|
||||
|
||||
CopyShardState *shardState = NULL;
|
||||
CopyOutState copyOutState = copyDest->copyOutState;
|
||||
FmgrInfo *columnOutputFunctions = copyDest->columnOutputFunctions;
|
||||
CopyCoercionData *columnCoercionPaths = copyDest->columnCoercionPaths;
|
||||
|
@ -2432,10 +2384,6 @@ CitusSendTupleToPlacements(TupleTableSlot *slot, CitusCopyDestReceiver *copyDest
|
|||
|
||||
bool stopOnFailure = copyDest->stopOnFailure;
|
||||
|
||||
Datum *columnValues = NULL;
|
||||
bool *columnNulls = NULL;
|
||||
|
||||
int64 shardId = 0;
|
||||
|
||||
EState *executorState = copyDest->executorState;
|
||||
MemoryContext executorTupleContext = GetPerTupleMemoryContext(executorState);
|
||||
|
@ -2443,17 +2391,18 @@ CitusSendTupleToPlacements(TupleTableSlot *slot, CitusCopyDestReceiver *copyDest
|
|||
|
||||
slot_getallattrs(slot);
|
||||
|
||||
columnValues = slot->tts_values;
|
||||
columnNulls = slot->tts_isnull;
|
||||
Datum *columnValues = slot->tts_values;
|
||||
bool *columnNulls = slot->tts_isnull;
|
||||
|
||||
shardId = ShardIdForTuple(copyDest, columnValues, columnNulls);
|
||||
int64 shardId = ShardIdForTuple(copyDest, columnValues, columnNulls);
|
||||
|
||||
/* connections hash is kept in memory context */
|
||||
MemoryContextSwitchTo(copyDest->memoryContext);
|
||||
|
||||
shardState = GetShardState(shardId, copyDest->shardStateHash,
|
||||
copyDest->connectionStateHash, stopOnFailure,
|
||||
&cachedShardStateFound);
|
||||
CopyShardState *shardState = GetShardState(shardId, copyDest->shardStateHash,
|
||||
copyDest->connectionStateHash,
|
||||
stopOnFailure,
|
||||
&cachedShardStateFound);
|
||||
if (!cachedShardStateFound)
|
||||
{
|
||||
firstTupleInShard = true;
|
||||
|
@ -2564,7 +2513,6 @@ ShardIdForTuple(CitusCopyDestReceiver *copyDest, Datum *columnValues, bool *colu
|
|||
int partitionColumnIndex = copyDest->partitionColumnIndex;
|
||||
Datum partitionColumnValue = 0;
|
||||
CopyCoercionData *columnCoercionPaths = copyDest->columnCoercionPaths;
|
||||
ShardInterval *shardInterval = NULL;
|
||||
|
||||
/*
|
||||
* Find the partition column value and corresponding shard interval
|
||||
|
@ -2605,7 +2553,8 @@ ShardIdForTuple(CitusCopyDestReceiver *copyDest, Datum *columnValues, bool *colu
|
|||
* For reference table, this function blindly returns the tables single
|
||||
* shard.
|
||||
*/
|
||||
shardInterval = FindShardInterval(partitionColumnValue, copyDest->tableMetadata);
|
||||
ShardInterval *shardInterval = FindShardInterval(partitionColumnValue,
|
||||
copyDest->tableMetadata);
|
||||
if (shardInterval == NULL)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
|
@ -2628,11 +2577,10 @@ CitusCopyDestReceiverShutdown(DestReceiver *destReceiver)
|
|||
CitusCopyDestReceiver *copyDest = (CitusCopyDestReceiver *) destReceiver;
|
||||
|
||||
HTAB *connectionStateHash = copyDest->connectionStateHash;
|
||||
List *connectionStateList = NIL;
|
||||
ListCell *connectionStateCell = NULL;
|
||||
Relation distributedRelation = copyDest->distributedRelation;
|
||||
|
||||
connectionStateList = ConnectionStateList(connectionStateHash);
|
||||
List *connectionStateList = ConnectionStateList(connectionStateHash);
|
||||
|
||||
PG_TRY();
|
||||
{
|
||||
|
@ -2820,21 +2768,20 @@ ProcessCopyStmt(CopyStmt *copyStatement, char *completionTag, const char *queryS
|
|||
else
|
||||
{
|
||||
bool isFrom = copyStatement->is_from;
|
||||
Relation copiedRelation = NULL;
|
||||
char *schemaName = NULL;
|
||||
MemoryContext relationContext = NULL;
|
||||
|
||||
/* consider using RangeVarGetRelidExtended to check perms before locking */
|
||||
copiedRelation = heap_openrv(copyStatement->relation,
|
||||
isFrom ? RowExclusiveLock : AccessShareLock);
|
||||
Relation copiedRelation = heap_openrv(copyStatement->relation,
|
||||
isFrom ? RowExclusiveLock :
|
||||
AccessShareLock);
|
||||
|
||||
isDistributedRelation = IsDistributedTable(RelationGetRelid(copiedRelation));
|
||||
|
||||
/* ensure future lookups hit the same relation */
|
||||
schemaName = get_namespace_name(RelationGetNamespace(copiedRelation));
|
||||
char *schemaName = get_namespace_name(RelationGetNamespace(copiedRelation));
|
||||
|
||||
/* ensure we copy string into proper context */
|
||||
relationContext = GetMemoryChunkContext(copyStatement->relation);
|
||||
MemoryContext relationContext = GetMemoryChunkContext(
|
||||
copyStatement->relation);
|
||||
schemaName = MemoryContextStrdup(relationContext, schemaName);
|
||||
copyStatement->relation->schemaname = schemaName;
|
||||
|
||||
|
@ -2906,16 +2853,15 @@ ProcessCopyStmt(CopyStmt *copyStatement, char *completionTag, const char *queryS
|
|||
!copyStatement->is_from && !is_absolute_path(filename))
|
||||
{
|
||||
bool binaryCopyFormat = CopyStatementHasFormat(copyStatement, "binary");
|
||||
int64 tuplesSent = 0;
|
||||
Query *query = NULL;
|
||||
Node *queryNode = copyStatement->query;
|
||||
List *queryTreeList = NIL;
|
||||
StringInfo userFilePath = makeStringInfo();
|
||||
|
||||
RawStmt *rawStmt = makeNode(RawStmt);
|
||||
rawStmt->stmt = queryNode;
|
||||
|
||||
queryTreeList = pg_analyze_and_rewrite(rawStmt, queryString, NULL, 0, NULL);
|
||||
List *queryTreeList = pg_analyze_and_rewrite(rawStmt, queryString, NULL, 0,
|
||||
NULL);
|
||||
|
||||
if (list_length(queryTreeList) != 1)
|
||||
{
|
||||
|
@ -2931,7 +2877,7 @@ ProcessCopyStmt(CopyStmt *copyStatement, char *completionTag, const char *queryS
|
|||
*/
|
||||
appendStringInfo(userFilePath, "%s.%u", filename, GetUserId());
|
||||
|
||||
tuplesSent = WorkerExecuteSqlTask(query, filename, binaryCopyFormat);
|
||||
int64 tuplesSent = WorkerExecuteSqlTask(query, filename, binaryCopyFormat);
|
||||
|
||||
snprintf(completionTag, COMPLETION_TAG_BUFSIZE,
|
||||
"COPY " UINT64_FORMAT, tuplesSent);
|
||||
|
@ -2952,7 +2898,6 @@ ProcessCopyStmt(CopyStmt *copyStatement, char *completionTag, const char *queryS
|
|||
static void
|
||||
CreateLocalTable(RangeVar *relation, char *nodeName, int32 nodePort)
|
||||
{
|
||||
List *ddlCommandList = NIL;
|
||||
ListCell *ddlCommandCell = NULL;
|
||||
|
||||
char *relationName = relation->relname;
|
||||
|
@ -2964,7 +2909,7 @@ CreateLocalTable(RangeVar *relation, char *nodeName, int32 nodePort)
|
|||
* enough; therefore, we just throw an error which says that we could not
|
||||
* run the copy operation.
|
||||
*/
|
||||
ddlCommandList = TableDDLCommandList(nodeName, nodePort, qualifiedRelationName);
|
||||
List *ddlCommandList = TableDDLCommandList(nodeName, nodePort, qualifiedRelationName);
|
||||
if (ddlCommandList == NIL)
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not run copy from the worker node")));
|
||||
|
@ -3045,14 +2990,13 @@ CheckCopyPermissions(CopyStmt *copyStatement)
|
|||
AclMode required_access = (is_from ? ACL_INSERT : ACL_SELECT);
|
||||
List *attnums;
|
||||
ListCell *cur;
|
||||
RangeTblEntry *rte;
|
||||
|
||||
rel = heap_openrv(copyStatement->relation,
|
||||
is_from ? RowExclusiveLock : AccessShareLock);
|
||||
|
||||
relid = RelationGetRelid(rel);
|
||||
|
||||
rte = makeNode(RangeTblEntry);
|
||||
RangeTblEntry *rte = makeNode(RangeTblEntry);
|
||||
rte->rtekind = RTE_RELATION;
|
||||
rte->relid = relid;
|
||||
rte->relkind = rel->rd_rel->relkind;
|
||||
|
@ -3166,18 +3110,16 @@ CopyGetAttnums(TupleDesc tupDesc, Relation rel, List *attnamelist)
|
|||
static HTAB *
|
||||
CreateConnectionStateHash(MemoryContext memoryContext)
|
||||
{
|
||||
HTAB *connectionStateHash = NULL;
|
||||
int hashFlags = 0;
|
||||
HASHCTL info;
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
info.keysize = sizeof(int);
|
||||
info.entrysize = sizeof(CopyConnectionState);
|
||||
info.hcxt = memoryContext;
|
||||
hashFlags = (HASH_ELEM | HASH_CONTEXT | HASH_BLOBS);
|
||||
int hashFlags = (HASH_ELEM | HASH_CONTEXT | HASH_BLOBS);
|
||||
|
||||
connectionStateHash = hash_create("Copy Connection State Hash", 128, &info,
|
||||
hashFlags);
|
||||
HTAB *connectionStateHash = hash_create("Copy Connection State Hash", 128, &info,
|
||||
hashFlags);
|
||||
|
||||
return connectionStateHash;
|
||||
}
|
||||
|
@ -3191,17 +3133,15 @@ CreateConnectionStateHash(MemoryContext memoryContext)
|
|||
static HTAB *
|
||||
CreateShardStateHash(MemoryContext memoryContext)
|
||||
{
|
||||
HTAB *shardStateHash = NULL;
|
||||
int hashFlags = 0;
|
||||
HASHCTL info;
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
info.keysize = sizeof(uint64);
|
||||
info.entrysize = sizeof(CopyShardState);
|
||||
info.hcxt = memoryContext;
|
||||
hashFlags = (HASH_ELEM | HASH_CONTEXT | HASH_BLOBS);
|
||||
int hashFlags = (HASH_ELEM | HASH_CONTEXT | HASH_BLOBS);
|
||||
|
||||
shardStateHash = hash_create("Copy Shard State Hash", 128, &info, hashFlags);
|
||||
HTAB *shardStateHash = hash_create("Copy Shard State Hash", 128, &info, hashFlags);
|
||||
|
||||
return shardStateHash;
|
||||
}
|
||||
|
@ -3214,14 +3154,15 @@ CreateShardStateHash(MemoryContext memoryContext)
|
|||
static CopyConnectionState *
|
||||
GetConnectionState(HTAB *connectionStateHash, MultiConnection *connection)
|
||||
{
|
||||
CopyConnectionState *connectionState = NULL;
|
||||
bool found = false;
|
||||
|
||||
int sock = PQsocket(connection->pgConn);
|
||||
Assert(sock != -1);
|
||||
|
||||
connectionState = (CopyConnectionState *) hash_search(connectionStateHash, &sock,
|
||||
HASH_ENTER, &found);
|
||||
CopyConnectionState *connectionState = (CopyConnectionState *) hash_search(
|
||||
connectionStateHash, &sock,
|
||||
HASH_ENTER,
|
||||
&found);
|
||||
if (!found)
|
||||
{
|
||||
connectionState->socket = sock;
|
||||
|
@ -3243,11 +3184,11 @@ ConnectionStateList(HTAB *connectionStateHash)
|
|||
{
|
||||
List *connectionStateList = NIL;
|
||||
HASH_SEQ_STATUS status;
|
||||
CopyConnectionState *connectionState = NULL;
|
||||
|
||||
hash_seq_init(&status, connectionStateHash);
|
||||
|
||||
connectionState = (CopyConnectionState *) hash_seq_search(&status);
|
||||
CopyConnectionState *connectionState = (CopyConnectionState *) hash_seq_search(
|
||||
&status);
|
||||
while (connectionState != NULL)
|
||||
{
|
||||
connectionStateList = lappend(connectionStateList, connectionState);
|
||||
|
@ -3268,10 +3209,8 @@ static CopyShardState *
|
|||
GetShardState(uint64 shardId, HTAB *shardStateHash,
|
||||
HTAB *connectionStateHash, bool stopOnFailure, bool *found)
|
||||
{
|
||||
CopyShardState *shardState = NULL;
|
||||
|
||||
shardState = (CopyShardState *) hash_search(shardStateHash, &shardId,
|
||||
HASH_ENTER, found);
|
||||
CopyShardState *shardState = (CopyShardState *) hash_search(shardStateHash, &shardId,
|
||||
HASH_ENTER, found);
|
||||
if (!*found)
|
||||
{
|
||||
InitializeCopyShardState(shardState, connectionStateHash,
|
||||
|
@ -3292,7 +3231,6 @@ InitializeCopyShardState(CopyShardState *shardState,
|
|||
HTAB *connectionStateHash, uint64 shardId,
|
||||
bool stopOnFailure)
|
||||
{
|
||||
List *finalizedPlacementList = NIL;
|
||||
ListCell *placementCell = NULL;
|
||||
int failedPlacementCount = 0;
|
||||
|
||||
|
@ -3306,7 +3244,7 @@ InitializeCopyShardState(CopyShardState *shardState,
|
|||
/* release finalized placement list at the end of this function */
|
||||
MemoryContext oldContext = MemoryContextSwitchTo(localContext);
|
||||
|
||||
finalizedPlacementList = MasterShardPlacementList(shardId);
|
||||
List *finalizedPlacementList = MasterShardPlacementList(shardId);
|
||||
|
||||
MemoryContextSwitchTo(oldContext);
|
||||
|
||||
|
@ -3316,8 +3254,6 @@ InitializeCopyShardState(CopyShardState *shardState,
|
|||
foreach(placementCell, finalizedPlacementList)
|
||||
{
|
||||
ShardPlacement *placement = (ShardPlacement *) lfirst(placementCell);
|
||||
CopyConnectionState *connectionState = NULL;
|
||||
CopyPlacementState *placementState = NULL;
|
||||
|
||||
MultiConnection *connection =
|
||||
CopyGetPlacementConnection(placement, stopOnFailure);
|
||||
|
@ -3327,7 +3263,8 @@ InitializeCopyShardState(CopyShardState *shardState,
|
|||
continue;
|
||||
}
|
||||
|
||||
connectionState = GetConnectionState(connectionStateHash, connection);
|
||||
CopyConnectionState *connectionState = GetConnectionState(connectionStateHash,
|
||||
connection);
|
||||
|
||||
/*
|
||||
* If this is the first time we are using this connection for copying a
|
||||
|
@ -3338,7 +3275,7 @@ InitializeCopyShardState(CopyShardState *shardState,
|
|||
RemoteTransactionBeginIfNecessary(connection);
|
||||
}
|
||||
|
||||
placementState = palloc0(sizeof(CopyPlacementState));
|
||||
CopyPlacementState *placementState = palloc0(sizeof(CopyPlacementState));
|
||||
placementState->shardState = shardState;
|
||||
placementState->data = makeStringInfo();
|
||||
placementState->connectionState = connectionState;
|
||||
|
@ -3380,19 +3317,19 @@ InitializeCopyShardState(CopyShardState *shardState,
|
|||
static MultiConnection *
|
||||
CopyGetPlacementConnection(ShardPlacement *placement, bool stopOnFailure)
|
||||
{
|
||||
MultiConnection *connection = NULL;
|
||||
uint32 connectionFlags = FOR_DML;
|
||||
char *nodeUser = CurrentUserName();
|
||||
ShardPlacementAccess *placementAccess = NULL;
|
||||
|
||||
/*
|
||||
* Determine whether the task has to be assigned to a particular connection
|
||||
* due to a preceding access to the placement in the same transaction.
|
||||
*/
|
||||
placementAccess = CreatePlacementAccess(placement, PLACEMENT_ACCESS_DML);
|
||||
connection = GetConnectionIfPlacementAccessedInXact(connectionFlags,
|
||||
list_make1(placementAccess),
|
||||
NULL);
|
||||
ShardPlacementAccess *placementAccess = CreatePlacementAccess(placement,
|
||||
PLACEMENT_ACCESS_DML);
|
||||
MultiConnection *connection = GetConnectionIfPlacementAccessedInXact(connectionFlags,
|
||||
list_make1(
|
||||
placementAccess),
|
||||
NULL);
|
||||
if (connection != NULL)
|
||||
{
|
||||
return connection;
|
||||
|
@ -3451,21 +3388,19 @@ static void
|
|||
StartPlacementStateCopyCommand(CopyPlacementState *placementState,
|
||||
CopyStmt *copyStatement, CopyOutState copyOutState)
|
||||
{
|
||||
StringInfo copyCommand = NULL;
|
||||
PGresult *result = NULL;
|
||||
MultiConnection *connection = placementState->connectionState->connection;
|
||||
uint64 shardId = placementState->shardState->shardId;
|
||||
bool raiseInterrupts = true;
|
||||
bool binaryCopy = copyOutState->binary;
|
||||
|
||||
copyCommand = ConstructCopyStatement(copyStatement, shardId, binaryCopy);
|
||||
StringInfo copyCommand = ConstructCopyStatement(copyStatement, shardId, binaryCopy);
|
||||
|
||||
if (!SendRemoteCommand(connection, copyCommand->data))
|
||||
{
|
||||
ReportConnectionError(connection, ERROR);
|
||||
}
|
||||
|
||||
result = GetRemoteCommandResult(connection, raiseInterrupts);
|
||||
PGresult *result = GetRemoteCommandResult(connection, raiseInterrupts);
|
||||
if (PQresultStatus(result) != PGRES_COPY_IN)
|
||||
{
|
||||
ReportResultError(connection, result, ERROR);
|
||||
|
|
|
@ -30,8 +30,6 @@ PlanRenameStmt(RenameStmt *renameStmt, const char *renameCommand)
|
|||
{
|
||||
Oid objectRelationId = InvalidOid; /* SQL Object OID */
|
||||
Oid tableRelationId = InvalidOid; /* Relation OID, maybe not the same. */
|
||||
bool isDistributedRelation = false;
|
||||
DDLJob *ddlJob = NULL;
|
||||
|
||||
/*
|
||||
* We only support some of the PostgreSQL supported RENAME statements, and
|
||||
|
@ -97,7 +95,7 @@ PlanRenameStmt(RenameStmt *renameStmt, const char *renameCommand)
|
|||
return NIL;
|
||||
}
|
||||
|
||||
isDistributedRelation = IsDistributedTable(tableRelationId);
|
||||
bool isDistributedRelation = IsDistributedTable(tableRelationId);
|
||||
if (!isDistributedRelation)
|
||||
{
|
||||
return NIL;
|
||||
|
@ -110,7 +108,7 @@ PlanRenameStmt(RenameStmt *renameStmt, const char *renameCommand)
|
|||
*/
|
||||
ErrorIfUnsupportedRenameStmt(renameStmt);
|
||||
|
||||
ddlJob = palloc0(sizeof(DDLJob));
|
||||
DDLJob *ddlJob = palloc0(sizeof(DDLJob));
|
||||
ddlJob->targetRelationId = tableRelationId;
|
||||
ddlJob->concurrentIndexCmd = false;
|
||||
ddlJob->commandString = renameCommand;
|
||||
|
|
|
@ -46,7 +46,6 @@ List *
|
|||
ProcessAlterRoleStmt(AlterRoleStmt *stmt, const char *queryString)
|
||||
{
|
||||
ListCell *optionCell = NULL;
|
||||
List *commands = NIL;
|
||||
|
||||
if (!EnableAlterRolePropagation || !IsCoordinator())
|
||||
{
|
||||
|
@ -82,7 +81,7 @@ ProcessAlterRoleStmt(AlterRoleStmt *stmt, const char *queryString)
|
|||
break;
|
||||
}
|
||||
}
|
||||
commands = list_make1((void *) CreateAlterRoleIfExistsCommand(stmt));
|
||||
List *commands = list_make1((void *) CreateAlterRoleIfExistsCommand(stmt));
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
}
|
||||
|
@ -120,15 +119,14 @@ ExtractEncryptedPassword(Oid roleOid)
|
|||
TupleDesc pgAuthIdDescription = RelationGetDescr(pgAuthId);
|
||||
HeapTuple tuple = SearchSysCache1(AUTHOID, roleOid);
|
||||
bool isNull = true;
|
||||
Datum passwordDatum;
|
||||
|
||||
if (!HeapTupleIsValid(tuple))
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
passwordDatum = heap_getattr(tuple, Anum_pg_authid_rolpassword,
|
||||
pgAuthIdDescription, &isNull);
|
||||
Datum passwordDatum = heap_getattr(tuple, Anum_pg_authid_rolpassword,
|
||||
pgAuthIdDescription, &isNull);
|
||||
|
||||
heap_close(pgAuthId, AccessShareLock);
|
||||
ReleaseSysCache(tuple);
|
||||
|
@ -151,8 +149,6 @@ GenerateAlterRoleIfExistsCommand(HeapTuple tuple, TupleDesc pgAuthIdDescription)
|
|||
{
|
||||
char *rolPassword = "";
|
||||
char *rolValidUntil = "infinity";
|
||||
Datum rolValidUntilDatum;
|
||||
Datum rolPasswordDatum;
|
||||
bool isNull = true;
|
||||
Form_pg_authid role = ((Form_pg_authid) GETSTRUCT(tuple));
|
||||
AlterRoleStmt *stmt = makeNode(AlterRoleStmt);
|
||||
|
@ -199,8 +195,8 @@ GenerateAlterRoleIfExistsCommand(HeapTuple tuple, TupleDesc pgAuthIdDescription)
|
|||
makeDefElemInt("connectionlimit", role->rolconnlimit));
|
||||
|
||||
|
||||
rolPasswordDatum = heap_getattr(tuple, Anum_pg_authid_rolpassword,
|
||||
pgAuthIdDescription, &isNull);
|
||||
Datum rolPasswordDatum = heap_getattr(tuple, Anum_pg_authid_rolpassword,
|
||||
pgAuthIdDescription, &isNull);
|
||||
if (!isNull)
|
||||
{
|
||||
rolPassword = pstrdup(TextDatumGetCString(rolPasswordDatum));
|
||||
|
@ -214,8 +210,8 @@ GenerateAlterRoleIfExistsCommand(HeapTuple tuple, TupleDesc pgAuthIdDescription)
|
|||
stmt->options = lappend(stmt->options, makeDefElem("password", NULL, -1));
|
||||
}
|
||||
|
||||
rolValidUntilDatum = heap_getattr(tuple, Anum_pg_authid_rolvaliduntil,
|
||||
pgAuthIdDescription, &isNull);
|
||||
Datum rolValidUntilDatum = heap_getattr(tuple, Anum_pg_authid_rolvaliduntil,
|
||||
pgAuthIdDescription, &isNull);
|
||||
if (!isNull)
|
||||
{
|
||||
rolValidUntil = pstrdup((char *) timestamptz_to_str(rolValidUntilDatum));
|
||||
|
|
|
@ -158,16 +158,14 @@ PlanAlterObjectSchemaStmt(AlterObjectSchemaStmt *stmt, const char *queryString)
|
|||
List *
|
||||
PlanAlterTableSchemaStmt(AlterObjectSchemaStmt *stmt, const char *queryString)
|
||||
{
|
||||
Oid relationId = InvalidOid;
|
||||
|
||||
if (stmt->relation == NULL)
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
relationId = RangeVarGetRelid(stmt->relation,
|
||||
AccessExclusiveLock,
|
||||
stmt->missing_ok);
|
||||
Oid relationId = RangeVarGetRelid(stmt->relation,
|
||||
AccessExclusiveLock,
|
||||
stmt->missing_ok);
|
||||
|
||||
/* first check whether a distributed relation is affected */
|
||||
if (!OidIsValid(relationId) || !IsDistributedTable(relationId))
|
||||
|
|
|
@ -56,7 +56,6 @@ ErrorIfDistributedAlterSeqOwnedBy(AlterSeqStmt *alterSeqStmt)
|
|||
{
|
||||
Oid sequenceId = RangeVarGetRelid(alterSeqStmt->sequence, AccessShareLock,
|
||||
alterSeqStmt->missing_ok);
|
||||
bool sequenceOwned = false;
|
||||
Oid ownedByTableId = InvalidOid;
|
||||
Oid newOwnedByTableId = InvalidOid;
|
||||
int32 ownedByColumnId = 0;
|
||||
|
@ -68,8 +67,8 @@ ErrorIfDistributedAlterSeqOwnedBy(AlterSeqStmt *alterSeqStmt)
|
|||
return;
|
||||
}
|
||||
|
||||
sequenceOwned = sequenceIsOwned(sequenceId, DEPENDENCY_AUTO, &ownedByTableId,
|
||||
&ownedByColumnId);
|
||||
bool sequenceOwned = sequenceIsOwned(sequenceId, DEPENDENCY_AUTO, &ownedByTableId,
|
||||
&ownedByColumnId);
|
||||
if (!sequenceOwned)
|
||||
{
|
||||
sequenceOwned = sequenceIsOwned(sequenceId, DEPENDENCY_INTERNAL, &ownedByTableId,
|
||||
|
|
|
@ -73,7 +73,6 @@ ProcessDropTableStmt(DropStmt *dropTableStatement)
|
|||
List *tableNameList = (List *) lfirst(dropTableCell);
|
||||
RangeVar *tableRangeVar = makeRangeVarFromNameList(tableNameList);
|
||||
bool missingOK = true;
|
||||
List *partitionList = NIL;
|
||||
ListCell *partitionCell = NULL;
|
||||
|
||||
Oid relationId = RangeVarGetRelid(tableRangeVar, AccessShareLock, missingOK);
|
||||
|
@ -98,7 +97,7 @@ ProcessDropTableStmt(DropStmt *dropTableStatement)
|
|||
|
||||
EnsureCoordinator();
|
||||
|
||||
partitionList = PartitionList(relationId);
|
||||
List *partitionList = PartitionList(relationId);
|
||||
if (list_length(partitionList) == 0)
|
||||
{
|
||||
continue;
|
||||
|
@ -254,14 +253,7 @@ ProcessAlterTableStmtAttachPartition(AlterTableStmt *alterTableStatement)
|
|||
List *
|
||||
PlanAlterTableStmt(AlterTableStmt *alterTableStatement, const char *alterTableCommand)
|
||||
{
|
||||
List *ddlJobs = NIL;
|
||||
DDLJob *ddlJob = NULL;
|
||||
LOCKMODE lockmode = 0;
|
||||
Oid leftRelationId = InvalidOid;
|
||||
Oid rightRelationId = InvalidOid;
|
||||
char leftRelationKind;
|
||||
bool isDistributedRelation = false;
|
||||
List *commandList = NIL;
|
||||
ListCell *commandCell = NULL;
|
||||
bool executeSequentially = false;
|
||||
|
||||
|
@ -271,8 +263,8 @@ PlanAlterTableStmt(AlterTableStmt *alterTableStatement, const char *alterTableCo
|
|||
return NIL;
|
||||
}
|
||||
|
||||
lockmode = AlterTableGetLockLevel(alterTableStatement->cmds);
|
||||
leftRelationId = AlterTableLookupRelation(alterTableStatement, lockmode);
|
||||
LOCKMODE lockmode = AlterTableGetLockLevel(alterTableStatement->cmds);
|
||||
Oid leftRelationId = AlterTableLookupRelation(alterTableStatement, lockmode);
|
||||
if (!OidIsValid(leftRelationId))
|
||||
{
|
||||
return NIL;
|
||||
|
@ -283,13 +275,13 @@ PlanAlterTableStmt(AlterTableStmt *alterTableStatement, const char *alterTableCo
|
|||
* SET/SET storage parameters in Citus, so we might have to check for
|
||||
* another relation here.
|
||||
*/
|
||||
leftRelationKind = get_rel_relkind(leftRelationId);
|
||||
char leftRelationKind = get_rel_relkind(leftRelationId);
|
||||
if (leftRelationKind == RELKIND_INDEX)
|
||||
{
|
||||
leftRelationId = IndexGetRelation(leftRelationId, false);
|
||||
}
|
||||
|
||||
isDistributedRelation = IsDistributedTable(leftRelationId);
|
||||
bool isDistributedRelation = IsDistributedTable(leftRelationId);
|
||||
if (!isDistributedRelation)
|
||||
{
|
||||
return NIL;
|
||||
|
@ -317,7 +309,7 @@ PlanAlterTableStmt(AlterTableStmt *alterTableStatement, const char *alterTableCo
|
|||
* set skip_validation to true to prevent PostgreSQL to verify validity of the
|
||||
* foreign constraint in master. Validity will be checked in workers anyway.
|
||||
*/
|
||||
commandList = alterTableStatement->cmds;
|
||||
List *commandList = alterTableStatement->cmds;
|
||||
|
||||
foreach(commandCell, commandList)
|
||||
{
|
||||
|
@ -426,7 +418,7 @@ PlanAlterTableStmt(AlterTableStmt *alterTableStatement, const char *alterTableCo
|
|||
SetLocalMultiShardModifyModeToSequential();
|
||||
}
|
||||
|
||||
ddlJob = palloc0(sizeof(DDLJob));
|
||||
DDLJob *ddlJob = palloc0(sizeof(DDLJob));
|
||||
ddlJob->targetRelationId = leftRelationId;
|
||||
ddlJob->concurrentIndexCmd = false;
|
||||
ddlJob->commandString = alterTableCommand;
|
||||
|
@ -450,7 +442,7 @@ PlanAlterTableStmt(AlterTableStmt *alterTableStatement, const char *alterTableCo
|
|||
ddlJob->taskList = DDLTaskList(leftRelationId, alterTableCommand);
|
||||
}
|
||||
|
||||
ddlJobs = list_make1(ddlJob);
|
||||
List *ddlJobs = list_make1(ddlJob);
|
||||
|
||||
return ddlJobs;
|
||||
}
|
||||
|
@ -465,10 +457,6 @@ Node *
|
|||
WorkerProcessAlterTableStmt(AlterTableStmt *alterTableStatement,
|
||||
const char *alterTableCommand)
|
||||
{
|
||||
LOCKMODE lockmode = 0;
|
||||
Oid leftRelationId = InvalidOid;
|
||||
bool isDistributedRelation = false;
|
||||
List *commandList = NIL;
|
||||
ListCell *commandCell = NULL;
|
||||
|
||||
/* first check whether a distributed relation is affected */
|
||||
|
@ -477,14 +465,14 @@ WorkerProcessAlterTableStmt(AlterTableStmt *alterTableStatement,
|
|||
return (Node *) alterTableStatement;
|
||||
}
|
||||
|
||||
lockmode = AlterTableGetLockLevel(alterTableStatement->cmds);
|
||||
leftRelationId = AlterTableLookupRelation(alterTableStatement, lockmode);
|
||||
LOCKMODE lockmode = AlterTableGetLockLevel(alterTableStatement->cmds);
|
||||
Oid leftRelationId = AlterTableLookupRelation(alterTableStatement, lockmode);
|
||||
if (!OidIsValid(leftRelationId))
|
||||
{
|
||||
return (Node *) alterTableStatement;
|
||||
}
|
||||
|
||||
isDistributedRelation = IsDistributedTable(leftRelationId);
|
||||
bool isDistributedRelation = IsDistributedTable(leftRelationId);
|
||||
if (!isDistributedRelation)
|
||||
{
|
||||
return (Node *) alterTableStatement;
|
||||
|
@ -496,7 +484,7 @@ WorkerProcessAlterTableStmt(AlterTableStmt *alterTableStatement,
|
|||
* set skip_validation to true to prevent PostgreSQL to verify validity of the
|
||||
* foreign constraint in master. Validity will be checked in workers anyway.
|
||||
*/
|
||||
commandList = alterTableStatement->cmds;
|
||||
List *commandList = alterTableStatement->cmds;
|
||||
|
||||
foreach(commandCell, commandList)
|
||||
{
|
||||
|
@ -559,9 +547,6 @@ IsAlterTableRenameStmt(RenameStmt *renameStmt)
|
|||
void
|
||||
ErrorIfAlterDropsPartitionColumn(AlterTableStmt *alterTableStatement)
|
||||
{
|
||||
LOCKMODE lockmode = 0;
|
||||
Oid leftRelationId = InvalidOid;
|
||||
bool isDistributedRelation = false;
|
||||
List *commandList = alterTableStatement->cmds;
|
||||
ListCell *commandCell = NULL;
|
||||
|
||||
|
@ -571,14 +556,14 @@ ErrorIfAlterDropsPartitionColumn(AlterTableStmt *alterTableStatement)
|
|||
return;
|
||||
}
|
||||
|
||||
lockmode = AlterTableGetLockLevel(alterTableStatement->cmds);
|
||||
leftRelationId = AlterTableLookupRelation(alterTableStatement, lockmode);
|
||||
LOCKMODE lockmode = AlterTableGetLockLevel(alterTableStatement->cmds);
|
||||
Oid leftRelationId = AlterTableLookupRelation(alterTableStatement, lockmode);
|
||||
if (!OidIsValid(leftRelationId))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
isDistributedRelation = IsDistributedTable(leftRelationId);
|
||||
bool isDistributedRelation = IsDistributedTable(leftRelationId);
|
||||
if (!isDistributedRelation)
|
||||
{
|
||||
return;
|
||||
|
@ -613,11 +598,9 @@ PostProcessAlterTableStmt(AlterTableStmt *alterTableStatement)
|
|||
{
|
||||
List *commandList = alterTableStatement->cmds;
|
||||
ListCell *commandCell = NULL;
|
||||
LOCKMODE lockmode = NoLock;
|
||||
Oid relationId = InvalidOid;
|
||||
|
||||
lockmode = AlterTableGetLockLevel(alterTableStatement->cmds);
|
||||
relationId = AlterTableLookupRelation(alterTableStatement, lockmode);
|
||||
LOCKMODE lockmode = AlterTableGetLockLevel(alterTableStatement->cmds);
|
||||
Oid relationId = AlterTableLookupRelation(alterTableStatement, lockmode);
|
||||
|
||||
if (relationId != InvalidOid)
|
||||
{
|
||||
|
@ -634,8 +617,6 @@ PostProcessAlterTableStmt(AlterTableStmt *alterTableStatement)
|
|||
|
||||
if (alterTableType == AT_AddConstraint)
|
||||
{
|
||||
Constraint *constraint = NULL;
|
||||
|
||||
Assert(list_length(commandList) == 1);
|
||||
|
||||
ErrorIfUnsupportedAlterAddConstraintStmt(alterTableStatement);
|
||||
|
@ -645,7 +626,7 @@ PostProcessAlterTableStmt(AlterTableStmt *alterTableStatement)
|
|||
continue;
|
||||
}
|
||||
|
||||
constraint = (Constraint *) command->def;
|
||||
Constraint *constraint = (Constraint *) command->def;
|
||||
if (constraint->contype == CONSTR_FOREIGN)
|
||||
{
|
||||
InvalidateForeignKeyGraph();
|
||||
|
@ -653,11 +634,10 @@ PostProcessAlterTableStmt(AlterTableStmt *alterTableStatement)
|
|||
}
|
||||
else if (alterTableType == AT_AddColumn)
|
||||
{
|
||||
List *columnConstraints = NIL;
|
||||
ListCell *columnConstraint = NULL;
|
||||
|
||||
ColumnDef *columnDefinition = (ColumnDef *) command->def;
|
||||
columnConstraints = columnDefinition->constraints;
|
||||
List *columnConstraints = columnDefinition->constraints;
|
||||
if (columnConstraints)
|
||||
{
|
||||
ErrorIfUnsupportedAlterAddConstraintStmt(alterTableStatement);
|
||||
|
@ -792,8 +772,6 @@ void
|
|||
ErrorIfUnsupportedConstraint(Relation relation, char distributionMethod,
|
||||
Var *distributionColumn, uint32 colocationId)
|
||||
{
|
||||
char *relationName = NULL;
|
||||
List *indexOidList = NULL;
|
||||
ListCell *indexOidCell = NULL;
|
||||
|
||||
/*
|
||||
|
@ -817,21 +795,17 @@ ErrorIfUnsupportedConstraint(Relation relation, char distributionMethod,
|
|||
return;
|
||||
}
|
||||
|
||||
relationName = RelationGetRelationName(relation);
|
||||
indexOidList = RelationGetIndexList(relation);
|
||||
char *relationName = RelationGetRelationName(relation);
|
||||
List *indexOidList = RelationGetIndexList(relation);
|
||||
|
||||
foreach(indexOidCell, indexOidList)
|
||||
{
|
||||
Oid indexOid = lfirst_oid(indexOidCell);
|
||||
Relation indexDesc = index_open(indexOid, RowExclusiveLock);
|
||||
IndexInfo *indexInfo = NULL;
|
||||
AttrNumber *attributeNumberArray = NULL;
|
||||
bool hasDistributionColumn = false;
|
||||
int attributeCount = 0;
|
||||
int attributeIndex = 0;
|
||||
|
||||
/* extract index key information from the index's pg_index info */
|
||||
indexInfo = BuildIndexInfo(indexDesc);
|
||||
IndexInfo *indexInfo = BuildIndexInfo(indexDesc);
|
||||
|
||||
/* only check unique indexes and exclusion constraints. */
|
||||
if (indexInfo->ii_Unique == false && indexInfo->ii_ExclusionOps == NULL)
|
||||
|
@ -856,25 +830,23 @@ ErrorIfUnsupportedConstraint(Relation relation, char distributionMethod,
|
|||
errhint("Consider using hash partitioning.")));
|
||||
}
|
||||
|
||||
attributeCount = indexInfo->ii_NumIndexAttrs;
|
||||
attributeNumberArray = indexInfo->ii_IndexAttrNumbers;
|
||||
int attributeCount = indexInfo->ii_NumIndexAttrs;
|
||||
AttrNumber *attributeNumberArray = indexInfo->ii_IndexAttrNumbers;
|
||||
|
||||
for (attributeIndex = 0; attributeIndex < attributeCount; attributeIndex++)
|
||||
for (int attributeIndex = 0; attributeIndex < attributeCount; attributeIndex++)
|
||||
{
|
||||
AttrNumber attributeNumber = attributeNumberArray[attributeIndex];
|
||||
bool uniqueConstraint = false;
|
||||
bool exclusionConstraintWithEquality = false;
|
||||
|
||||
if (distributionColumn->varattno != attributeNumber)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
uniqueConstraint = indexInfo->ii_Unique;
|
||||
exclusionConstraintWithEquality = (indexInfo->ii_ExclusionOps != NULL &&
|
||||
OperatorImplementsEquality(
|
||||
indexInfo->ii_ExclusionOps[
|
||||
attributeIndex]));
|
||||
bool uniqueConstraint = indexInfo->ii_Unique;
|
||||
bool exclusionConstraintWithEquality = (indexInfo->ii_ExclusionOps != NULL &&
|
||||
OperatorImplementsEquality(
|
||||
indexInfo->ii_ExclusionOps[
|
||||
attributeIndex]));
|
||||
|
||||
if (uniqueConstraint || exclusionConstraintWithEquality)
|
||||
{
|
||||
|
@ -1278,15 +1250,13 @@ InterShardDDLTaskList(Oid leftRelationId, Oid rightRelationId,
|
|||
*/
|
||||
if (rightPartitionMethod == DISTRIBUTE_BY_NONE)
|
||||
{
|
||||
ShardInterval *rightShardInterval = NULL;
|
||||
int rightShardCount = list_length(rightShardList);
|
||||
int leftShardCount = list_length(leftShardList);
|
||||
int shardCounter = 0;
|
||||
|
||||
Assert(rightShardCount == 1);
|
||||
|
||||
rightShardInterval = (ShardInterval *) linitial(rightShardList);
|
||||
for (shardCounter = rightShardCount; shardCounter < leftShardCount;
|
||||
ShardInterval *rightShardInterval = (ShardInterval *) linitial(rightShardList);
|
||||
for (int shardCounter = rightShardCount; shardCounter < leftShardCount;
|
||||
shardCounter++)
|
||||
{
|
||||
rightShardList = lappend(rightShardList, rightShardInterval);
|
||||
|
@ -1301,7 +1271,6 @@ InterShardDDLTaskList(Oid leftRelationId, Oid rightRelationId,
|
|||
ShardInterval *leftShardInterval = (ShardInterval *) lfirst(leftShardCell);
|
||||
uint64 leftShardId = leftShardInterval->shardId;
|
||||
StringInfo applyCommand = makeStringInfo();
|
||||
Task *task = NULL;
|
||||
RelationShard *leftRelationShard = CitusMakeNode(RelationShard);
|
||||
RelationShard *rightRelationShard = CitusMakeNode(RelationShard);
|
||||
|
||||
|
@ -1318,7 +1287,7 @@ InterShardDDLTaskList(Oid leftRelationId, Oid rightRelationId,
|
|||
leftShardId, escapedLeftSchemaName, rightShardId,
|
||||
escapedRightSchemaName, escapedCommandString);
|
||||
|
||||
task = CitusMakeNode(Task);
|
||||
Task *task = CitusMakeNode(Task);
|
||||
task->jobId = jobId;
|
||||
task->taskId = taskId++;
|
||||
task->taskType = DDL_TASK;
|
||||
|
@ -1345,8 +1314,6 @@ AlterInvolvesPartitionColumn(AlterTableStmt *alterTableStatement,
|
|||
AlterTableCmd *command)
|
||||
{
|
||||
bool involvesPartitionColumn = false;
|
||||
Var *partitionColumn = NULL;
|
||||
HeapTuple tuple = NULL;
|
||||
char *alterColumnName = command->name;
|
||||
|
||||
LOCKMODE lockmode = AlterTableGetLockLevel(alterTableStatement->cmds);
|
||||
|
@ -1356,9 +1323,9 @@ AlterInvolvesPartitionColumn(AlterTableStmt *alterTableStatement,
|
|||
return false;
|
||||
}
|
||||
|
||||
partitionColumn = DistPartitionKey(relationId);
|
||||
Var *partitionColumn = DistPartitionKey(relationId);
|
||||
|
||||
tuple = SearchSysCacheAttName(relationId, alterColumnName);
|
||||
HeapTuple tuple = SearchSysCacheAttName(relationId, alterColumnName);
|
||||
if (HeapTupleIsValid(tuple))
|
||||
{
|
||||
Form_pg_attribute targetAttr = (Form_pg_attribute) GETSTRUCT(tuple);
|
||||
|
|
|
@ -42,7 +42,6 @@ void
|
|||
RedirectCopyDataToRegularFile(const char *filename)
|
||||
{
|
||||
StringInfo copyData = makeStringInfo();
|
||||
bool copyDone = false;
|
||||
const int fileFlags = (O_APPEND | O_CREAT | O_RDWR | O_TRUNC | PG_BINARY);
|
||||
const int fileMode = (S_IRUSR | S_IWUSR);
|
||||
File fileDesc = FileOpenForTransmit(filename, fileFlags, fileMode);
|
||||
|
@ -50,7 +49,7 @@ RedirectCopyDataToRegularFile(const char *filename)
|
|||
|
||||
SendCopyInStart();
|
||||
|
||||
copyDone = ReceiveCopyData(copyData);
|
||||
bool copyDone = ReceiveCopyData(copyData);
|
||||
while (!copyDone)
|
||||
{
|
||||
/* if received data has contents, append to regular file */
|
||||
|
@ -83,8 +82,6 @@ RedirectCopyDataToRegularFile(const char *filename)
|
|||
void
|
||||
SendRegularFile(const char *filename)
|
||||
{
|
||||
StringInfo fileBuffer = NULL;
|
||||
int readBytes = -1;
|
||||
const uint32 fileBufferSize = 32768; /* 32 KB */
|
||||
const int fileFlags = (O_RDONLY | PG_BINARY);
|
||||
const int fileMode = 0;
|
||||
|
@ -97,13 +94,13 @@ SendRegularFile(const char *filename)
|
|||
* We read file's contents into buffers of 32 KB. This buffer size is twice
|
||||
* as large as Hadoop's default buffer size, and may later be configurable.
|
||||
*/
|
||||
fileBuffer = makeStringInfo();
|
||||
StringInfo fileBuffer = makeStringInfo();
|
||||
enlargeStringInfo(fileBuffer, fileBufferSize);
|
||||
|
||||
SendCopyOutStart();
|
||||
|
||||
readBytes = FileReadCompat(&fileCompat, fileBuffer->data, fileBufferSize,
|
||||
PG_WAIT_IO);
|
||||
int readBytes = FileReadCompat(&fileCompat, fileBuffer->data, fileBufferSize,
|
||||
PG_WAIT_IO);
|
||||
while (readBytes > 0)
|
||||
{
|
||||
fileBuffer->len = readBytes;
|
||||
|
@ -141,11 +138,9 @@ FreeStringInfo(StringInfo stringInfo)
|
|||
File
|
||||
FileOpenForTransmit(const char *filename, int fileFlags, int fileMode)
|
||||
{
|
||||
File fileDesc = -1;
|
||||
int fileStated = -1;
|
||||
struct stat fileStat;
|
||||
|
||||
fileStated = stat(filename, &fileStat);
|
||||
int fileStated = stat(filename, &fileStat);
|
||||
if (fileStated >= 0)
|
||||
{
|
||||
if (S_ISDIR(fileStat.st_mode))
|
||||
|
@ -155,7 +150,7 @@ FileOpenForTransmit(const char *filename, int fileFlags, int fileMode)
|
|||
}
|
||||
}
|
||||
|
||||
fileDesc = PathNameOpenFilePerm((char *) filename, fileFlags, fileMode);
|
||||
File fileDesc = PathNameOpenFilePerm((char *) filename, fileFlags, fileMode);
|
||||
if (fileDesc < 0)
|
||||
{
|
||||
ereport(ERROR, (errcode_for_file_access(),
|
||||
|
@ -175,7 +170,6 @@ SendCopyInStart(void)
|
|||
{
|
||||
StringInfoData copyInStart = { NULL, 0, 0, 0 };
|
||||
const char copyFormat = 1; /* binary copy format */
|
||||
int flushed = 0;
|
||||
|
||||
pq_beginmessage(©InStart, 'G');
|
||||
pq_sendbyte(©InStart, copyFormat);
|
||||
|
@ -183,7 +177,7 @@ SendCopyInStart(void)
|
|||
pq_endmessage(©InStart);
|
||||
|
||||
/* flush here to ensure that FE knows it can send data */
|
||||
flushed = pq_flush();
|
||||
int flushed = pq_flush();
|
||||
if (flushed != 0)
|
||||
{
|
||||
ereport(WARNING, (errmsg("could not flush copy start data")));
|
||||
|
@ -213,13 +207,12 @@ static void
|
|||
SendCopyDone(void)
|
||||
{
|
||||
StringInfoData copyDone = { NULL, 0, 0, 0 };
|
||||
int flushed = 0;
|
||||
|
||||
pq_beginmessage(©Done, 'c');
|
||||
pq_endmessage(©Done);
|
||||
|
||||
/* flush here to signal to FE that we are done */
|
||||
flushed = pq_flush();
|
||||
int flushed = pq_flush();
|
||||
if (flushed != 0)
|
||||
{
|
||||
ereport(WARNING, (errmsg("could not flush copy start data")));
|
||||
|
@ -250,14 +243,12 @@ SendCopyData(StringInfo fileBuffer)
|
|||
static bool
|
||||
ReceiveCopyData(StringInfo copyData)
|
||||
{
|
||||
int messageType = 0;
|
||||
int messageCopied = 0;
|
||||
bool copyDone = true;
|
||||
const int unlimitedSize = 0;
|
||||
|
||||
HOLD_CANCEL_INTERRUPTS();
|
||||
pq_startmsgread();
|
||||
messageType = pq_getbyte();
|
||||
int messageType = pq_getbyte();
|
||||
if (messageType == EOF)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_CONNECTION_FAILURE),
|
||||
|
@ -265,7 +256,7 @@ ReceiveCopyData(StringInfo copyData)
|
|||
}
|
||||
|
||||
/* consume the rest of message before checking for message type */
|
||||
messageCopied = pq_getmessage(copyData, unlimitedSize);
|
||||
int messageCopied = pq_getmessage(copyData, unlimitedSize);
|
||||
if (messageCopied == EOF)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_CONNECTION_FAILURE),
|
||||
|
@ -382,8 +373,6 @@ TransmitStatementUser(CopyStmt *copyStatement)
|
|||
void
|
||||
VerifyTransmitStmt(CopyStmt *copyStatement)
|
||||
{
|
||||
char *fileName = NULL;
|
||||
|
||||
EnsureSuperUser();
|
||||
|
||||
/* do some minimal option verification */
|
||||
|
@ -394,7 +383,7 @@ VerifyTransmitStmt(CopyStmt *copyStatement)
|
|||
errmsg("FORMAT 'transmit' requires a target file")));
|
||||
}
|
||||
|
||||
fileName = copyStatement->relation->relname;
|
||||
char *fileName = copyStatement->relation->relname;
|
||||
|
||||
if (is_absolute_path(fileName))
|
||||
{
|
||||
|
|
|
@ -180,8 +180,6 @@ LockTruncatedRelationMetadataInWorkers(TruncateStmt *truncateStatement)
|
|||
{
|
||||
RangeVar *rangeVar = (RangeVar *) lfirst(relationCell);
|
||||
Oid relationId = RangeVarGetRelid(rangeVar, NoLock, false);
|
||||
DistTableCacheEntry *cacheEntry = NULL;
|
||||
List *referencingTableList = NIL;
|
||||
Oid referencingRelationId = InvalidOid;
|
||||
|
||||
if (!IsDistributedTable(relationId))
|
||||
|
@ -196,10 +194,10 @@ LockTruncatedRelationMetadataInWorkers(TruncateStmt *truncateStatement)
|
|||
|
||||
distributedRelationList = lappend_oid(distributedRelationList, relationId);
|
||||
|
||||
cacheEntry = DistributedTableCacheEntry(relationId);
|
||||
DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(relationId);
|
||||
Assert(cacheEntry != NULL);
|
||||
|
||||
referencingTableList = cacheEntry->referencingRelationsViaForeignKey;
|
||||
List *referencingTableList = cacheEntry->referencingRelationsViaForeignKey;
|
||||
foreach_oid(referencingRelationId, referencingTableList)
|
||||
{
|
||||
distributedRelationList = list_append_unique_oid(distributedRelationList,
|
||||
|
|
|
@ -114,9 +114,6 @@ static bool ShouldPropagateTypeCreate(void);
|
|||
List *
|
||||
PlanCompositeTypeStmt(CompositeTypeStmt *stmt, const char *queryString)
|
||||
{
|
||||
const char *compositeTypeStmtSql = NULL;
|
||||
List *commands = NIL;
|
||||
|
||||
if (!ShouldPropagateTypeCreate())
|
||||
{
|
||||
return NIL;
|
||||
|
@ -149,7 +146,7 @@ PlanCompositeTypeStmt(CompositeTypeStmt *stmt, const char *queryString)
|
|||
* type previously has been attempted to be created in a transaction which did not
|
||||
* commit on the coordinator.
|
||||
*/
|
||||
compositeTypeStmtSql = DeparseCompositeTypeStmt(stmt);
|
||||
const char *compositeTypeStmtSql = DeparseCompositeTypeStmt(stmt);
|
||||
compositeTypeStmtSql = WrapCreateOrReplace(compositeTypeStmtSql);
|
||||
|
||||
/*
|
||||
|
@ -158,9 +155,9 @@ PlanCompositeTypeStmt(CompositeTypeStmt *stmt, const char *queryString)
|
|||
*/
|
||||
EnsureSequentialModeForTypeDDL();
|
||||
|
||||
commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) compositeTypeStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) compositeTypeStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
}
|
||||
|
@ -174,8 +171,6 @@ PlanCompositeTypeStmt(CompositeTypeStmt *stmt, const char *queryString)
|
|||
void
|
||||
ProcessCompositeTypeStmt(CompositeTypeStmt *stmt, const char *queryString)
|
||||
{
|
||||
const ObjectAddress *typeAddress = NULL;
|
||||
|
||||
/* same check we perform during planning of the statement */
|
||||
if (!ShouldPropagateTypeCreate())
|
||||
{
|
||||
|
@ -186,7 +181,8 @@ ProcessCompositeTypeStmt(CompositeTypeStmt *stmt, const char *queryString)
|
|||
* find object address of the just created object, because the type has been created
|
||||
* locally it can't be missing
|
||||
*/
|
||||
typeAddress = GetObjectAddressFromParseTree((Node *) stmt, false);
|
||||
const ObjectAddress *typeAddress = GetObjectAddressFromParseTree((Node *) stmt,
|
||||
false);
|
||||
EnsureDependenciesExistsOnAllNodes(typeAddress);
|
||||
|
||||
MarkObjectDistributed(typeAddress);
|
||||
|
@ -202,13 +198,10 @@ ProcessCompositeTypeStmt(CompositeTypeStmt *stmt, const char *queryString)
|
|||
List *
|
||||
PlanAlterTypeStmt(AlterTableStmt *stmt, const char *queryString)
|
||||
{
|
||||
const char *alterTypeStmtSql = NULL;
|
||||
const ObjectAddress *typeAddress = NULL;
|
||||
List *commands = NIL;
|
||||
|
||||
Assert(stmt->relkind == OBJECT_TYPE);
|
||||
|
||||
typeAddress = GetObjectAddressFromParseTree((Node *) stmt, false);
|
||||
const ObjectAddress *typeAddress = GetObjectAddressFromParseTree((Node *) stmt,
|
||||
false);
|
||||
if (!ShouldPropagateObject(typeAddress))
|
||||
{
|
||||
return NIL;
|
||||
|
@ -218,7 +211,7 @@ PlanAlterTypeStmt(AlterTableStmt *stmt, const char *queryString)
|
|||
|
||||
/* reconstruct alter statement in a portable fashion */
|
||||
QualifyTreeNode((Node *) stmt);
|
||||
alterTypeStmtSql = DeparseTreeNode((Node *) stmt);
|
||||
const char *alterTypeStmtSql = DeparseTreeNode((Node *) stmt);
|
||||
|
||||
/*
|
||||
* all types that are distributed will need their alter statements propagated
|
||||
|
@ -227,9 +220,9 @@ PlanAlterTypeStmt(AlterTableStmt *stmt, const char *queryString)
|
|||
*/
|
||||
EnsureSequentialModeForTypeDDL();
|
||||
|
||||
commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) alterTypeStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) alterTypeStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
}
|
||||
|
@ -248,9 +241,6 @@ PlanAlterTypeStmt(AlterTableStmt *stmt, const char *queryString)
|
|||
List *
|
||||
PlanCreateEnumStmt(CreateEnumStmt *stmt, const char *queryString)
|
||||
{
|
||||
const char *createEnumStmtSql = NULL;
|
||||
List *commands = NIL;
|
||||
|
||||
if (!ShouldPropagateTypeCreate())
|
||||
{
|
||||
return NIL;
|
||||
|
@ -266,7 +256,7 @@ PlanCreateEnumStmt(CreateEnumStmt *stmt, const char *queryString)
|
|||
QualifyTreeNode((Node *) stmt);
|
||||
|
||||
/* reconstruct creation statement in a portable fashion */
|
||||
createEnumStmtSql = DeparseCreateEnumStmt(stmt);
|
||||
const char *createEnumStmtSql = DeparseCreateEnumStmt(stmt);
|
||||
createEnumStmtSql = WrapCreateOrReplace(createEnumStmtSql);
|
||||
|
||||
/*
|
||||
|
@ -276,9 +266,9 @@ PlanCreateEnumStmt(CreateEnumStmt *stmt, const char *queryString)
|
|||
EnsureSequentialModeForTypeDDL();
|
||||
|
||||
/* to prevent recursion with mx we disable ddl propagation */
|
||||
commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) createEnumStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) createEnumStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
}
|
||||
|
@ -295,15 +285,14 @@ PlanCreateEnumStmt(CreateEnumStmt *stmt, const char *queryString)
|
|||
void
|
||||
ProcessCreateEnumStmt(CreateEnumStmt *stmt, const char *queryString)
|
||||
{
|
||||
const ObjectAddress *typeAddress = NULL;
|
||||
|
||||
if (!ShouldPropagateTypeCreate())
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
/* lookup type address of just created type */
|
||||
typeAddress = GetObjectAddressFromParseTree((Node *) stmt, false);
|
||||
const ObjectAddress *typeAddress = GetObjectAddressFromParseTree((Node *) stmt,
|
||||
false);
|
||||
EnsureDependenciesExistsOnAllNodes(typeAddress);
|
||||
|
||||
/*
|
||||
|
@ -326,11 +315,10 @@ ProcessCreateEnumStmt(CreateEnumStmt *stmt, const char *queryString)
|
|||
List *
|
||||
PlanAlterEnumStmt(AlterEnumStmt *stmt, const char *queryString)
|
||||
{
|
||||
const char *alterEnumStmtSql = NULL;
|
||||
const ObjectAddress *typeAddress = NULL;
|
||||
List *commands = NIL;
|
||||
|
||||
typeAddress = GetObjectAddressFromParseTree((Node *) stmt, false);
|
||||
const ObjectAddress *typeAddress = GetObjectAddressFromParseTree((Node *) stmt,
|
||||
false);
|
||||
if (!ShouldPropagateObject(typeAddress))
|
||||
{
|
||||
return NIL;
|
||||
|
@ -351,7 +339,7 @@ PlanAlterEnumStmt(AlterEnumStmt *stmt, const char *queryString)
|
|||
EnsureCoordinator();
|
||||
|
||||
QualifyTreeNode((Node *) stmt);
|
||||
alterEnumStmtSql = DeparseTreeNode((Node *) stmt);
|
||||
const char *alterEnumStmtSql = DeparseTreeNode((Node *) stmt);
|
||||
|
||||
/*
|
||||
* Before pg12 ALTER ENUM ... ADD VALUE could not be within a xact block. Instead of
|
||||
|
@ -396,9 +384,8 @@ PlanAlterEnumStmt(AlterEnumStmt *stmt, const char *queryString)
|
|||
void
|
||||
ProcessAlterEnumStmt(AlterEnumStmt *stmt, const char *queryString)
|
||||
{
|
||||
const ObjectAddress *typeAddress = NULL;
|
||||
|
||||
typeAddress = GetObjectAddressFromParseTree((Node *) stmt, false);
|
||||
const ObjectAddress *typeAddress = GetObjectAddressFromParseTree((Node *) stmt,
|
||||
false);
|
||||
if (!ShouldPropagateObject(typeAddress))
|
||||
{
|
||||
return;
|
||||
|
@ -422,25 +409,22 @@ ProcessAlterEnumStmt(AlterEnumStmt *stmt, const char *queryString)
|
|||
* might already be added to some nodes, but not all.
|
||||
*/
|
||||
|
||||
int result = 0;
|
||||
List *commands = NIL;
|
||||
const char *alterEnumStmtSql = NULL;
|
||||
|
||||
/* qualification of the stmt happened during planning */
|
||||
alterEnumStmtSql = DeparseTreeNode((Node *) stmt);
|
||||
const char *alterEnumStmtSql = DeparseTreeNode((Node *) stmt);
|
||||
|
||||
commands = list_make2(DISABLE_DDL_PROPAGATION, (void *) alterEnumStmtSql);
|
||||
List *commands = list_make2(DISABLE_DDL_PROPAGATION, (void *) alterEnumStmtSql);
|
||||
|
||||
result = SendBareOptionalCommandListToWorkersAsUser(ALL_WORKERS, commands, NULL);
|
||||
int result = SendBareOptionalCommandListToWorkersAsUser(ALL_WORKERS, commands,
|
||||
NULL);
|
||||
|
||||
if (result != RESPONSE_OKAY)
|
||||
{
|
||||
const char *alterEnumStmtIfNotExistsSql = NULL;
|
||||
bool oldSkipIfNewValueExists = stmt->skipIfNewValExists;
|
||||
|
||||
/* deparse the query with IF NOT EXISTS */
|
||||
stmt->skipIfNewValExists = true;
|
||||
alterEnumStmtIfNotExistsSql = DeparseTreeNode((Node *) stmt);
|
||||
const char *alterEnumStmtIfNotExistsSql = DeparseTreeNode((Node *) stmt);
|
||||
stmt->skipIfNewValExists = oldSkipIfNewValueExists;
|
||||
|
||||
ereport(WARNING, (errmsg("not all workers applied change to enum"),
|
||||
|
@ -466,18 +450,15 @@ PlanDropTypeStmt(DropStmt *stmt, const char *queryString)
|
|||
* the old list to put back
|
||||
*/
|
||||
List *oldTypes = stmt->objects;
|
||||
List *distributedTypes = NIL;
|
||||
const char *dropStmtSql = NULL;
|
||||
ListCell *addressCell = NULL;
|
||||
List *distributedTypeAddresses = NIL;
|
||||
List *commands = NIL;
|
||||
|
||||
if (!ShouldPropagate())
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
distributedTypes = FilterNameListForDistributedTypes(oldTypes, stmt->missing_ok);
|
||||
List *distributedTypes = FilterNameListForDistributedTypes(oldTypes,
|
||||
stmt->missing_ok);
|
||||
if (list_length(distributedTypes) <= 0)
|
||||
{
|
||||
/* no distributed types to drop */
|
||||
|
@ -494,7 +475,7 @@ PlanDropTypeStmt(DropStmt *stmt, const char *queryString)
|
|||
/*
|
||||
* remove the entries for the distributed objects on dropping
|
||||
*/
|
||||
distributedTypeAddresses = TypeNameListToObjectAddresses(distributedTypes);
|
||||
List *distributedTypeAddresses = TypeNameListToObjectAddresses(distributedTypes);
|
||||
foreach(addressCell, distributedTypeAddresses)
|
||||
{
|
||||
ObjectAddress *address = (ObjectAddress *) lfirst(addressCell);
|
||||
|
@ -506,15 +487,15 @@ PlanDropTypeStmt(DropStmt *stmt, const char *queryString)
|
|||
* deparse to an executable sql statement for the workers
|
||||
*/
|
||||
stmt->objects = distributedTypes;
|
||||
dropStmtSql = DeparseTreeNode((Node *) stmt);
|
||||
const char *dropStmtSql = DeparseTreeNode((Node *) stmt);
|
||||
stmt->objects = oldTypes;
|
||||
|
||||
/* to prevent recursion with mx we disable ddl propagation */
|
||||
EnsureSequentialModeForTypeDDL();
|
||||
|
||||
commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) dropStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) dropStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
}
|
||||
|
@ -531,11 +512,8 @@ PlanDropTypeStmt(DropStmt *stmt, const char *queryString)
|
|||
List *
|
||||
PlanRenameTypeStmt(RenameStmt *stmt, const char *queryString)
|
||||
{
|
||||
const char *renameStmtSql = NULL;
|
||||
const ObjectAddress *typeAddress = NULL;
|
||||
List *commands = NIL;
|
||||
|
||||
typeAddress = GetObjectAddressFromParseTree((Node *) stmt, false);
|
||||
const ObjectAddress *typeAddress = GetObjectAddressFromParseTree((Node *) stmt,
|
||||
false);
|
||||
if (!ShouldPropagateObject(typeAddress))
|
||||
{
|
||||
return NIL;
|
||||
|
@ -545,14 +523,14 @@ PlanRenameTypeStmt(RenameStmt *stmt, const char *queryString)
|
|||
QualifyTreeNode((Node *) stmt);
|
||||
|
||||
/* deparse sql*/
|
||||
renameStmtSql = DeparseTreeNode((Node *) stmt);
|
||||
const char *renameStmtSql = DeparseTreeNode((Node *) stmt);
|
||||
|
||||
/* to prevent recursion with mx we disable ddl propagation */
|
||||
EnsureSequentialModeForTypeDDL();
|
||||
|
||||
commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) renameStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) renameStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
}
|
||||
|
@ -568,14 +546,11 @@ PlanRenameTypeStmt(RenameStmt *stmt, const char *queryString)
|
|||
List *
|
||||
PlanRenameTypeAttributeStmt(RenameStmt *stmt, const char *queryString)
|
||||
{
|
||||
const char *sql = NULL;
|
||||
const ObjectAddress *typeAddress = NULL;
|
||||
List *commands = NIL;
|
||||
|
||||
Assert(stmt->renameType == OBJECT_ATTRIBUTE);
|
||||
Assert(stmt->relationType == OBJECT_TYPE);
|
||||
|
||||
typeAddress = GetObjectAddressFromParseTree((Node *) stmt, false);
|
||||
const ObjectAddress *typeAddress = GetObjectAddressFromParseTree((Node *) stmt,
|
||||
false);
|
||||
if (!ShouldPropagateObject(typeAddress))
|
||||
{
|
||||
return NIL;
|
||||
|
@ -583,12 +558,12 @@ PlanRenameTypeAttributeStmt(RenameStmt *stmt, const char *queryString)
|
|||
|
||||
QualifyTreeNode((Node *) stmt);
|
||||
|
||||
sql = DeparseTreeNode((Node *) stmt);
|
||||
const char *sql = DeparseTreeNode((Node *) stmt);
|
||||
|
||||
EnsureSequentialModeForTypeDDL();
|
||||
commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
}
|
||||
|
@ -603,13 +578,10 @@ PlanRenameTypeAttributeStmt(RenameStmt *stmt, const char *queryString)
|
|||
List *
|
||||
PlanAlterTypeSchemaStmt(AlterObjectSchemaStmt *stmt, const char *queryString)
|
||||
{
|
||||
const char *sql = NULL;
|
||||
const ObjectAddress *typeAddress = NULL;
|
||||
List *commands = NIL;
|
||||
|
||||
Assert(stmt->objectType == OBJECT_TYPE);
|
||||
|
||||
typeAddress = GetObjectAddressFromParseTree((Node *) stmt, false);
|
||||
const ObjectAddress *typeAddress = GetObjectAddressFromParseTree((Node *) stmt,
|
||||
false);
|
||||
if (!ShouldPropagateObject(typeAddress))
|
||||
{
|
||||
return NIL;
|
||||
|
@ -618,13 +590,13 @@ PlanAlterTypeSchemaStmt(AlterObjectSchemaStmt *stmt, const char *queryString)
|
|||
EnsureCoordinator();
|
||||
|
||||
QualifyTreeNode((Node *) stmt);
|
||||
sql = DeparseTreeNode((Node *) stmt);
|
||||
const char *sql = DeparseTreeNode((Node *) stmt);
|
||||
|
||||
EnsureSequentialModeForTypeDDL();
|
||||
|
||||
commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
}
|
||||
|
@ -638,11 +610,10 @@ PlanAlterTypeSchemaStmt(AlterObjectSchemaStmt *stmt, const char *queryString)
|
|||
void
|
||||
ProcessAlterTypeSchemaStmt(AlterObjectSchemaStmt *stmt, const char *queryString)
|
||||
{
|
||||
const ObjectAddress *typeAddress = NULL;
|
||||
|
||||
Assert(stmt->objectType == OBJECT_TYPE);
|
||||
|
||||
typeAddress = GetObjectAddressFromParseTree((Node *) stmt, false);
|
||||
const ObjectAddress *typeAddress = GetObjectAddressFromParseTree((Node *) stmt,
|
||||
false);
|
||||
if (!ShouldPropagateObject(typeAddress))
|
||||
{
|
||||
return;
|
||||
|
@ -663,13 +634,10 @@ ProcessAlterTypeSchemaStmt(AlterObjectSchemaStmt *stmt, const char *queryString)
|
|||
List *
|
||||
PlanAlterTypeOwnerStmt(AlterOwnerStmt *stmt, const char *queryString)
|
||||
{
|
||||
const ObjectAddress *typeAddress = NULL;
|
||||
const char *sql = NULL;
|
||||
List *commands = NULL;
|
||||
|
||||
Assert(stmt->objectType == OBJECT_TYPE);
|
||||
|
||||
typeAddress = GetObjectAddressFromParseTree((Node *) stmt, false);
|
||||
const ObjectAddress *typeAddress = GetObjectAddressFromParseTree((Node *) stmt,
|
||||
false);
|
||||
if (!ShouldPropagateObject(typeAddress))
|
||||
{
|
||||
return NIL;
|
||||
|
@ -678,12 +646,12 @@ PlanAlterTypeOwnerStmt(AlterOwnerStmt *stmt, const char *queryString)
|
|||
EnsureCoordinator();
|
||||
|
||||
QualifyTreeNode((Node *) stmt);
|
||||
sql = DeparseTreeNode((Node *) stmt);
|
||||
const char *sql = DeparseTreeNode((Node *) stmt);
|
||||
|
||||
EnsureSequentialModeForTypeDDL();
|
||||
commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(ALL_WORKERS, commands);
|
||||
}
|
||||
|
@ -726,13 +694,10 @@ CreateTypeStmtByObjectAddress(const ObjectAddress *address)
|
|||
static CompositeTypeStmt *
|
||||
RecreateCompositeTypeStmt(Oid typeOid)
|
||||
{
|
||||
CompositeTypeStmt *stmt = NULL;
|
||||
List *names = NIL;
|
||||
|
||||
Assert(get_typtype(typeOid) == TYPTYPE_COMPOSITE);
|
||||
|
||||
stmt = makeNode(CompositeTypeStmt);
|
||||
names = stringToQualifiedNameList(format_type_be_qualified(typeOid));
|
||||
CompositeTypeStmt *stmt = makeNode(CompositeTypeStmt);
|
||||
List *names = stringToQualifiedNameList(format_type_be_qualified(typeOid));
|
||||
stmt->typevar = makeRangeVarFromNameList(names);
|
||||
stmt->coldeflist = CompositeTypeColumnDefList(typeOid);
|
||||
|
||||
|
@ -763,17 +728,14 @@ attributeFormToColumnDef(Form_pg_attribute attributeForm)
|
|||
static List *
|
||||
CompositeTypeColumnDefList(Oid typeOid)
|
||||
{
|
||||
Relation relation = NULL;
|
||||
Oid relationId = InvalidOid;
|
||||
TupleDesc tupleDescriptor = NULL;
|
||||
int attributeIndex = 0;
|
||||
List *columnDefs = NIL;
|
||||
|
||||
relationId = typeidTypeRelid(typeOid);
|
||||
relation = relation_open(relationId, AccessShareLock);
|
||||
Oid relationId = typeidTypeRelid(typeOid);
|
||||
Relation relation = relation_open(relationId, AccessShareLock);
|
||||
|
||||
tupleDescriptor = RelationGetDescr(relation);
|
||||
for (attributeIndex = 0; attributeIndex < tupleDescriptor->natts; attributeIndex++)
|
||||
TupleDesc tupleDescriptor = RelationGetDescr(relation);
|
||||
for (int attributeIndex = 0; attributeIndex < tupleDescriptor->natts;
|
||||
attributeIndex++)
|
||||
{
|
||||
Form_pg_attribute attributeForm = TupleDescAttr(tupleDescriptor, attributeIndex);
|
||||
|
||||
|
@ -799,11 +761,9 @@ CompositeTypeColumnDefList(Oid typeOid)
|
|||
static CreateEnumStmt *
|
||||
RecreateEnumStmt(Oid typeOid)
|
||||
{
|
||||
CreateEnumStmt *stmt = NULL;
|
||||
|
||||
Assert(get_typtype(typeOid) == TYPTYPE_ENUM);
|
||||
|
||||
stmt = makeNode(CreateEnumStmt);
|
||||
CreateEnumStmt *stmt = makeNode(CreateEnumStmt);
|
||||
stmt->typeName = stringToQualifiedNameList(format_type_be_qualified(typeOid));
|
||||
stmt->vals = EnumValsList(typeOid);
|
||||
|
||||
|
@ -818,8 +778,6 @@ RecreateEnumStmt(Oid typeOid)
|
|||
static List *
|
||||
EnumValsList(Oid typeOid)
|
||||
{
|
||||
Relation enum_rel = NULL;
|
||||
SysScanDesc enum_scan = NULL;
|
||||
HeapTuple enum_tuple = NULL;
|
||||
ScanKeyData skey = { 0 };
|
||||
|
||||
|
@ -831,11 +789,11 @@ EnumValsList(Oid typeOid)
|
|||
BTEqualStrategyNumber, F_OIDEQ,
|
||||
ObjectIdGetDatum(typeOid));
|
||||
|
||||
enum_rel = heap_open(EnumRelationId, AccessShareLock);
|
||||
enum_scan = systable_beginscan(enum_rel,
|
||||
EnumTypIdSortOrderIndexId,
|
||||
true, NULL,
|
||||
1, &skey);
|
||||
Relation enum_rel = heap_open(EnumRelationId, AccessShareLock);
|
||||
SysScanDesc enum_scan = systable_beginscan(enum_rel,
|
||||
EnumTypIdSortOrderIndexId,
|
||||
true, NULL,
|
||||
1, &skey);
|
||||
|
||||
/* collect all value names in CREATE TYPE ... AS ENUM stmt */
|
||||
while (HeapTupleIsValid(enum_tuple = systable_getnext(enum_scan)))
|
||||
|
@ -861,13 +819,9 @@ EnumValsList(Oid typeOid)
|
|||
ObjectAddress *
|
||||
CompositeTypeStmtObjectAddress(CompositeTypeStmt *stmt, bool missing_ok)
|
||||
{
|
||||
TypeName *typeName = NULL;
|
||||
Oid typeOid = InvalidOid;
|
||||
ObjectAddress *address = NULL;
|
||||
|
||||
typeName = MakeTypeNameFromRangeVar(stmt->typevar);
|
||||
typeOid = LookupTypeNameOid(NULL, typeName, missing_ok);
|
||||
address = palloc0(sizeof(ObjectAddress));
|
||||
TypeName *typeName = MakeTypeNameFromRangeVar(stmt->typevar);
|
||||
Oid typeOid = LookupTypeNameOid(NULL, typeName, missing_ok);
|
||||
ObjectAddress *address = palloc0(sizeof(ObjectAddress));
|
||||
ObjectAddressSet(*address, TypeRelationId, typeOid);
|
||||
|
||||
return address;
|
||||
|
@ -885,13 +839,9 @@ CompositeTypeStmtObjectAddress(CompositeTypeStmt *stmt, bool missing_ok)
|
|||
ObjectAddress *
|
||||
CreateEnumStmtObjectAddress(CreateEnumStmt *stmt, bool missing_ok)
|
||||
{
|
||||
TypeName *typeName = NULL;
|
||||
Oid typeOid = InvalidOid;
|
||||
ObjectAddress *address = NULL;
|
||||
|
||||
typeName = makeTypeNameFromNameList(stmt->typeName);
|
||||
typeOid = LookupTypeNameOid(NULL, typeName, missing_ok);
|
||||
address = palloc0(sizeof(ObjectAddress));
|
||||
TypeName *typeName = makeTypeNameFromNameList(stmt->typeName);
|
||||
Oid typeOid = LookupTypeNameOid(NULL, typeName, missing_ok);
|
||||
ObjectAddress *address = palloc0(sizeof(ObjectAddress));
|
||||
ObjectAddressSet(*address, TypeRelationId, typeOid);
|
||||
|
||||
return address;
|
||||
|
@ -909,15 +859,11 @@ CreateEnumStmtObjectAddress(CreateEnumStmt *stmt, bool missing_ok)
|
|||
ObjectAddress *
|
||||
AlterTypeStmtObjectAddress(AlterTableStmt *stmt, bool missing_ok)
|
||||
{
|
||||
TypeName *typeName = NULL;
|
||||
Oid typeOid = InvalidOid;
|
||||
ObjectAddress *address = NULL;
|
||||
|
||||
Assert(stmt->relkind == OBJECT_TYPE);
|
||||
|
||||
typeName = MakeTypeNameFromRangeVar(stmt->relation);
|
||||
typeOid = LookupTypeNameOid(NULL, typeName, missing_ok);
|
||||
address = palloc0(sizeof(ObjectAddress));
|
||||
TypeName *typeName = MakeTypeNameFromRangeVar(stmt->relation);
|
||||
Oid typeOid = LookupTypeNameOid(NULL, typeName, missing_ok);
|
||||
ObjectAddress *address = palloc0(sizeof(ObjectAddress));
|
||||
ObjectAddressSet(*address, TypeRelationId, typeOid);
|
||||
|
||||
return address;
|
||||
|
@ -931,13 +877,9 @@ AlterTypeStmtObjectAddress(AlterTableStmt *stmt, bool missing_ok)
|
|||
ObjectAddress *
|
||||
AlterEnumStmtObjectAddress(AlterEnumStmt *stmt, bool missing_ok)
|
||||
{
|
||||
TypeName *typeName = NULL;
|
||||
Oid typeOid = InvalidOid;
|
||||
ObjectAddress *address = NULL;
|
||||
|
||||
typeName = makeTypeNameFromNameList(stmt->typeName);
|
||||
typeOid = LookupTypeNameOid(NULL, typeName, missing_ok);
|
||||
address = palloc0(sizeof(ObjectAddress));
|
||||
TypeName *typeName = makeTypeNameFromNameList(stmt->typeName);
|
||||
Oid typeOid = LookupTypeNameOid(NULL, typeName, missing_ok);
|
||||
ObjectAddress *address = palloc0(sizeof(ObjectAddress));
|
||||
ObjectAddressSet(*address, TypeRelationId, typeOid);
|
||||
|
||||
return address;
|
||||
|
@ -951,15 +893,11 @@ AlterEnumStmtObjectAddress(AlterEnumStmt *stmt, bool missing_ok)
|
|||
ObjectAddress *
|
||||
RenameTypeStmtObjectAddress(RenameStmt *stmt, bool missing_ok)
|
||||
{
|
||||
TypeName *typeName = NULL;
|
||||
Oid typeOid = InvalidOid;
|
||||
ObjectAddress *address = NULL;
|
||||
|
||||
Assert(stmt->renameType == OBJECT_TYPE);
|
||||
|
||||
typeName = makeTypeNameFromNameList((List *) stmt->object);
|
||||
typeOid = LookupTypeNameOid(NULL, typeName, missing_ok);
|
||||
address = palloc0(sizeof(ObjectAddress));
|
||||
TypeName *typeName = makeTypeNameFromNameList((List *) stmt->object);
|
||||
Oid typeOid = LookupTypeNameOid(NULL, typeName, missing_ok);
|
||||
ObjectAddress *address = palloc0(sizeof(ObjectAddress));
|
||||
ObjectAddressSet(*address, TypeRelationId, typeOid);
|
||||
|
||||
return address;
|
||||
|
@ -978,21 +916,16 @@ RenameTypeStmtObjectAddress(RenameStmt *stmt, bool missing_ok)
|
|||
ObjectAddress *
|
||||
AlterTypeSchemaStmtObjectAddress(AlterObjectSchemaStmt *stmt, bool missing_ok)
|
||||
{
|
||||
ObjectAddress *address = NULL;
|
||||
TypeName *typeName = NULL;
|
||||
Oid typeOid = InvalidOid;
|
||||
List *names = NIL;
|
||||
|
||||
Assert(stmt->objectType == OBJECT_TYPE);
|
||||
|
||||
names = (List *) stmt->object;
|
||||
List *names = (List *) stmt->object;
|
||||
|
||||
/*
|
||||
* we hardcode missing_ok here during LookupTypeNameOid because if we can't find it it
|
||||
* might have already been moved in this transaction.
|
||||
*/
|
||||
typeName = makeTypeNameFromNameList(names);
|
||||
typeOid = LookupTypeNameOid(NULL, typeName, true);
|
||||
TypeName *typeName = makeTypeNameFromNameList(names);
|
||||
Oid typeOid = LookupTypeNameOid(NULL, typeName, true);
|
||||
|
||||
if (typeOid == InvalidOid)
|
||||
{
|
||||
|
@ -1024,7 +957,7 @@ AlterTypeSchemaStmtObjectAddress(AlterObjectSchemaStmt *stmt, bool missing_ok)
|
|||
}
|
||||
}
|
||||
|
||||
address = palloc0(sizeof(ObjectAddress));
|
||||
ObjectAddress *address = palloc0(sizeof(ObjectAddress));
|
||||
ObjectAddressSet(*address, TypeRelationId, typeOid);
|
||||
|
||||
return address;
|
||||
|
@ -1042,16 +975,12 @@ AlterTypeSchemaStmtObjectAddress(AlterObjectSchemaStmt *stmt, bool missing_ok)
|
|||
ObjectAddress *
|
||||
RenameTypeAttributeStmtObjectAddress(RenameStmt *stmt, bool missing_ok)
|
||||
{
|
||||
TypeName *typeName = NULL;
|
||||
Oid typeOid = InvalidOid;
|
||||
ObjectAddress *address = NULL;
|
||||
|
||||
Assert(stmt->renameType == OBJECT_ATTRIBUTE);
|
||||
Assert(stmt->relationType == OBJECT_TYPE);
|
||||
|
||||
typeName = MakeTypeNameFromRangeVar(stmt->relation);
|
||||
typeOid = LookupTypeNameOid(NULL, typeName, missing_ok);
|
||||
address = palloc0(sizeof(ObjectAddress));
|
||||
TypeName *typeName = MakeTypeNameFromRangeVar(stmt->relation);
|
||||
Oid typeOid = LookupTypeNameOid(NULL, typeName, missing_ok);
|
||||
ObjectAddress *address = palloc0(sizeof(ObjectAddress));
|
||||
ObjectAddressSet(*address, TypeRelationId, typeOid);
|
||||
|
||||
return address;
|
||||
|
@ -1065,15 +994,11 @@ RenameTypeAttributeStmtObjectAddress(RenameStmt *stmt, bool missing_ok)
|
|||
ObjectAddress *
|
||||
AlterTypeOwnerObjectAddress(AlterOwnerStmt *stmt, bool missing_ok)
|
||||
{
|
||||
TypeName *typeName = NULL;
|
||||
Oid typeOid = InvalidOid;
|
||||
ObjectAddress *address = NULL;
|
||||
|
||||
Assert(stmt->objectType == OBJECT_TYPE);
|
||||
|
||||
typeName = makeTypeNameFromNameList((List *) stmt->object);
|
||||
typeOid = LookupTypeNameOid(NULL, typeName, missing_ok);
|
||||
address = palloc0(sizeof(ObjectAddress));
|
||||
TypeName *typeName = makeTypeNameFromNameList((List *) stmt->object);
|
||||
Oid typeOid = LookupTypeNameOid(NULL, typeName, missing_ok);
|
||||
ObjectAddress *address = palloc0(sizeof(ObjectAddress));
|
||||
ObjectAddressSet(*address, TypeRelationId, typeOid);
|
||||
|
||||
return address;
|
||||
|
@ -1088,10 +1013,7 @@ List *
|
|||
CreateTypeDDLCommandsIdempotent(const ObjectAddress *typeAddress)
|
||||
{
|
||||
List *ddlCommands = NIL;
|
||||
const char *ddlCommand = NULL;
|
||||
Node *stmt = NULL;
|
||||
StringInfoData buf = { 0 };
|
||||
const char *username = NULL;
|
||||
|
||||
Assert(typeAddress->classId == TypeRelationId);
|
||||
|
||||
|
@ -1106,15 +1028,15 @@ CreateTypeDDLCommandsIdempotent(const ObjectAddress *typeAddress)
|
|||
return NIL;
|
||||
}
|
||||
|
||||
stmt = CreateTypeStmtByObjectAddress(typeAddress);
|
||||
Node *stmt = CreateTypeStmtByObjectAddress(typeAddress);
|
||||
|
||||
/* capture ddl command for recreation and wrap in create if not exists construct */
|
||||
ddlCommand = DeparseTreeNode(stmt);
|
||||
const char *ddlCommand = DeparseTreeNode(stmt);
|
||||
ddlCommand = WrapCreateOrReplace(ddlCommand);
|
||||
ddlCommands = lappend(ddlCommands, (void *) ddlCommand);
|
||||
|
||||
/* add owner ship change so the creation command can be run as a different user */
|
||||
username = GetUserNameFromId(GetTypeOwner(typeAddress->objectId), false);
|
||||
const char *username = GetUserNameFromId(GetTypeOwner(typeAddress->objectId), false);
|
||||
initStringInfo(&buf);
|
||||
appendStringInfo(&buf, ALTER_TYPE_OWNER_COMMAND, getObjectIdentity(typeAddress),
|
||||
quote_identifier(username));
|
||||
|
@ -1145,8 +1067,6 @@ GenerateBackupNameForTypeCollision(const ObjectAddress *address)
|
|||
{
|
||||
int suffixLength = snprintf(suffix, NAMEDATALEN - 1, "(citus_backup_%d)",
|
||||
count);
|
||||
TypeName *newTypeName = NULL;
|
||||
Oid typeOid = InvalidOid;
|
||||
|
||||
/* trim the base name at the end to leave space for the suffix and trailing \0 */
|
||||
baseLength = Min(baseLength, NAMEDATALEN - suffixLength - 1);
|
||||
|
@ -1157,9 +1077,9 @@ GenerateBackupNameForTypeCollision(const ObjectAddress *address)
|
|||
strncpy(newName + baseLength, suffix, suffixLength);
|
||||
|
||||
rel->relname = newName;
|
||||
newTypeName = makeTypeNameFromNameList(MakeNameListFromRangeVar(rel));
|
||||
TypeName *newTypeName = makeTypeNameFromNameList(MakeNameListFromRangeVar(rel));
|
||||
|
||||
typeOid = LookupTypeNameOid(NULL, newTypeName, true);
|
||||
Oid typeOid = LookupTypeNameOid(NULL, newTypeName, true);
|
||||
if (typeOid == InvalidOid)
|
||||
{
|
||||
return newName;
|
||||
|
@ -1235,9 +1155,8 @@ static Oid
|
|||
GetTypeOwner(Oid typeOid)
|
||||
{
|
||||
Oid result = InvalidOid;
|
||||
HeapTuple tp = NULL;
|
||||
|
||||
tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typeOid));
|
||||
HeapTuple tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typeOid));
|
||||
if (HeapTupleIsValid(tp))
|
||||
{
|
||||
Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tp);
|
||||
|
|
|
@ -117,7 +117,6 @@ multi_ProcessUtility(PlannedStmt *pstmt,
|
|||
{
|
||||
Node *parsetree = pstmt->utilityStmt;
|
||||
List *ddlJobs = NIL;
|
||||
bool checkCreateAlterExtensionVersion = false;
|
||||
|
||||
if (IsA(parsetree, TransactionStmt) ||
|
||||
IsA(parsetree, LockStmt) ||
|
||||
|
@ -143,7 +142,8 @@ multi_ProcessUtility(PlannedStmt *pstmt,
|
|||
return;
|
||||
}
|
||||
|
||||
checkCreateAlterExtensionVersion = IsCreateAlterExtensionUpdateCitusStmt(parsetree);
|
||||
bool checkCreateAlterExtensionVersion = IsCreateAlterExtensionUpdateCitusStmt(
|
||||
parsetree);
|
||||
if (EnableVersionChecks && checkCreateAlterExtensionVersion)
|
||||
{
|
||||
ErrorIfUnstableCreateOrAlterExtensionStmt(parsetree);
|
||||
|
@ -332,12 +332,11 @@ multi_ProcessUtility(PlannedStmt *pstmt,
|
|||
if (IsA(parsetree, CopyStmt))
|
||||
{
|
||||
MemoryContext planContext = GetMemoryChunkContext(parsetree);
|
||||
MemoryContext previousContext;
|
||||
|
||||
parsetree = copyObject(parsetree);
|
||||
parsetree = ProcessCopyStmt((CopyStmt *) parsetree, completionTag, queryString);
|
||||
|
||||
previousContext = MemoryContextSwitchTo(planContext);
|
||||
MemoryContext previousContext = MemoryContextSwitchTo(planContext);
|
||||
parsetree = copyObject(parsetree);
|
||||
MemoryContextSwitchTo(previousContext);
|
||||
|
||||
|
@ -886,14 +885,12 @@ multi_ProcessUtility(PlannedStmt *pstmt,
|
|||
static bool
|
||||
IsDropSchemaOrDB(Node *parsetree)
|
||||
{
|
||||
DropStmt *dropStatement = NULL;
|
||||
|
||||
if (!IsA(parsetree, DropStmt))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
dropStatement = (DropStmt *) parsetree;
|
||||
DropStmt *dropStatement = (DropStmt *) parsetree;
|
||||
return (dropStatement->removeType == OBJECT_SCHEMA) ||
|
||||
(dropStatement->removeType == OBJECT_DATABASE);
|
||||
}
|
||||
|
@ -1091,7 +1088,6 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob)
|
|||
static char *
|
||||
SetSearchPathToCurrentSearchPathCommand(void)
|
||||
{
|
||||
StringInfo setCommand = NULL;
|
||||
char *currentSearchPath = CurrentSearchPath();
|
||||
|
||||
if (currentSearchPath == NULL)
|
||||
|
@ -1099,7 +1095,7 @@ SetSearchPathToCurrentSearchPathCommand(void)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
setCommand = makeStringInfo();
|
||||
StringInfo setCommand = makeStringInfo();
|
||||
appendStringInfo(setCommand, "SET search_path TO %s;", currentSearchPath);
|
||||
|
||||
return setCommand->data;
|
||||
|
@ -1217,7 +1213,6 @@ DDLTaskList(Oid relationId, const char *commandString)
|
|||
ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell);
|
||||
uint64 shardId = shardInterval->shardId;
|
||||
StringInfo applyCommand = makeStringInfo();
|
||||
Task *task = NULL;
|
||||
|
||||
/*
|
||||
* If rightRelationId is not InvalidOid, instead of worker_apply_shard_ddl_command
|
||||
|
@ -1226,7 +1221,7 @@ DDLTaskList(Oid relationId, const char *commandString)
|
|||
appendStringInfo(applyCommand, WORKER_APPLY_SHARD_DDL_COMMAND, shardId,
|
||||
escapedSchemaName, escapedCommandString);
|
||||
|
||||
task = CitusMakeNode(Task);
|
||||
Task *task = CitusMakeNode(Task);
|
||||
task->jobId = jobId;
|
||||
task->taskId = taskId++;
|
||||
task->taskType = DDL_TASK;
|
||||
|
@ -1252,9 +1247,7 @@ NodeDDLTaskList(TargetWorkerSet targets, List *commands)
|
|||
{
|
||||
List *workerNodes = TargetWorkerSetNodeList(targets, NoLock);
|
||||
char *concatenatedCommands = StringJoin(commands, ';');
|
||||
DDLJob *ddlJob = NULL;
|
||||
ListCell *workerNodeCell = NULL;
|
||||
Task *task = NULL;
|
||||
|
||||
if (list_length(workerNodes) <= 0)
|
||||
{
|
||||
|
@ -1265,16 +1258,15 @@ NodeDDLTaskList(TargetWorkerSet targets, List *commands)
|
|||
return NIL;
|
||||
}
|
||||
|
||||
task = CitusMakeNode(Task);
|
||||
Task *task = CitusMakeNode(Task);
|
||||
task->taskType = DDL_TASK;
|
||||
task->queryString = concatenatedCommands;
|
||||
|
||||
foreach(workerNodeCell, workerNodes)
|
||||
{
|
||||
WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell);
|
||||
ShardPlacement *targetPlacement = NULL;
|
||||
|
||||
targetPlacement = CitusMakeNode(ShardPlacement);
|
||||
ShardPlacement *targetPlacement = CitusMakeNode(ShardPlacement);
|
||||
targetPlacement->nodeName = workerNode->workerName;
|
||||
targetPlacement->nodePort = workerNode->workerPort;
|
||||
targetPlacement->groupId = workerNode->groupId;
|
||||
|
@ -1282,7 +1274,7 @@ NodeDDLTaskList(TargetWorkerSet targets, List *commands)
|
|||
task->taskPlacementList = lappend(task->taskPlacementList, targetPlacement);
|
||||
}
|
||||
|
||||
ddlJob = palloc0(sizeof(DDLJob));
|
||||
DDLJob *ddlJob = palloc0(sizeof(DDLJob));
|
||||
ddlJob->targetRelationId = InvalidOid;
|
||||
ddlJob->concurrentIndexCmd = false;
|
||||
ddlJob->commandString = NULL;
|
||||
|
|
|
@ -62,7 +62,6 @@ void
|
|||
ProcessVacuumStmt(VacuumStmt *vacuumStmt, const char *vacuumCommand)
|
||||
{
|
||||
int relationIndex = 0;
|
||||
bool distributedVacuumStmt = false;
|
||||
List *vacuumRelationList = ExtractVacuumTargetRels(vacuumStmt);
|
||||
ListCell *vacuumRelationCell = NULL;
|
||||
List *relationIdList = NIL;
|
||||
|
@ -79,7 +78,8 @@ ProcessVacuumStmt(VacuumStmt *vacuumStmt, const char *vacuumCommand)
|
|||
relationIdList = lappend_oid(relationIdList, relationId);
|
||||
}
|
||||
|
||||
distributedVacuumStmt = IsDistributedVacuumStmt(vacuumParams.options, relationIdList);
|
||||
bool distributedVacuumStmt = IsDistributedVacuumStmt(vacuumParams.options,
|
||||
relationIdList);
|
||||
if (!distributedVacuumStmt)
|
||||
{
|
||||
return;
|
||||
|
@ -91,9 +91,6 @@ ProcessVacuumStmt(VacuumStmt *vacuumStmt, const char *vacuumCommand)
|
|||
Oid relationId = lfirst_oid(relationIdCell);
|
||||
if (IsDistributedTable(relationId))
|
||||
{
|
||||
List *vacuumColumnList = NIL;
|
||||
List *taskList = NIL;
|
||||
|
||||
/*
|
||||
* VACUUM commands cannot run inside a transaction block, so we use
|
||||
* the "bare" commit protocol without BEGIN/COMMIT. However, ANALYZE
|
||||
|
@ -108,8 +105,8 @@ ProcessVacuumStmt(VacuumStmt *vacuumStmt, const char *vacuumCommand)
|
|||
MultiShardCommitProtocol = COMMIT_PROTOCOL_BARE;
|
||||
}
|
||||
|
||||
vacuumColumnList = VacuumColumnList(vacuumStmt, relationIndex);
|
||||
taskList = VacuumTaskList(relationId, vacuumParams, vacuumColumnList);
|
||||
List *vacuumColumnList = VacuumColumnList(vacuumStmt, relationIndex);
|
||||
List *taskList = VacuumTaskList(relationId, vacuumParams, vacuumColumnList);
|
||||
|
||||
/* use adaptive executor when enabled */
|
||||
ExecuteUtilityTaskListWithoutResults(taskList);
|
||||
|
@ -135,13 +132,12 @@ IsDistributedVacuumStmt(int vacuumOptions, List *vacuumRelationIdList)
|
|||
bool distributeStmt = false;
|
||||
ListCell *relationIdCell = NULL;
|
||||
int distributedRelationCount = 0;
|
||||
int vacuumedRelationCount = 0;
|
||||
|
||||
/*
|
||||
* No table in the vacuum statement means vacuuming all relations
|
||||
* which is not supported by citus.
|
||||
*/
|
||||
vacuumedRelationCount = list_length(vacuumRelationIdList);
|
||||
int vacuumedRelationCount = list_length(vacuumRelationIdList);
|
||||
if (vacuumedRelationCount == 0)
|
||||
{
|
||||
/* WARN for unqualified VACUUM commands */
|
||||
|
@ -188,18 +184,16 @@ static List *
|
|||
VacuumTaskList(Oid relationId, CitusVacuumParams vacuumParams, List *vacuumColumnList)
|
||||
{
|
||||
List *taskList = NIL;
|
||||
List *shardIntervalList = NIL;
|
||||
ListCell *shardIntervalCell = NULL;
|
||||
uint64 jobId = INVALID_JOB_ID;
|
||||
int taskId = 1;
|
||||
StringInfo vacuumString = DeparseVacuumStmtPrefix(vacuumParams);
|
||||
const char *columnNames = NULL;
|
||||
const int vacuumPrefixLen = vacuumString->len;
|
||||
Oid schemaId = get_rel_namespace(relationId);
|
||||
char *schemaName = get_namespace_name(schemaId);
|
||||
char *tableName = get_rel_name(relationId);
|
||||
|
||||
columnNames = DeparseVacuumColumnNames(vacuumColumnList);
|
||||
const char *columnNames = DeparseVacuumColumnNames(vacuumColumnList);
|
||||
|
||||
/*
|
||||
* We obtain ShareUpdateExclusiveLock here to not conflict with INSERT's
|
||||
|
@ -209,7 +203,7 @@ VacuumTaskList(Oid relationId, CitusVacuumParams vacuumParams, List *vacuumColum
|
|||
*/
|
||||
LockRelationOid(relationId, ShareUpdateExclusiveLock);
|
||||
|
||||
shardIntervalList = LoadShardIntervalList(relationId);
|
||||
List *shardIntervalList = LoadShardIntervalList(relationId);
|
||||
|
||||
/* grab shard lock before getting placement list */
|
||||
LockShardListMetadata(shardIntervalList, ShareLock);
|
||||
|
@ -218,7 +212,6 @@ VacuumTaskList(Oid relationId, CitusVacuumParams vacuumParams, List *vacuumColum
|
|||
{
|
||||
ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell);
|
||||
uint64 shardId = shardInterval->shardId;
|
||||
Task *task = NULL;
|
||||
|
||||
char *shardName = pstrdup(tableName);
|
||||
AppendShardIdToName(&shardName, shardInterval->shardId);
|
||||
|
@ -228,7 +221,7 @@ VacuumTaskList(Oid relationId, CitusVacuumParams vacuumParams, List *vacuumColum
|
|||
appendStringInfoString(vacuumString, shardName);
|
||||
appendStringInfoString(vacuumString, columnNames);
|
||||
|
||||
task = CitusMakeNode(Task);
|
||||
Task *task = CitusMakeNode(Task);
|
||||
task->jobId = jobId;
|
||||
task->taskId = taskId++;
|
||||
task->taskType = VACUUM_ANALYZE_TASK;
|
||||
|
|
|
@ -96,9 +96,8 @@ IsSettingSafeToPropagate(char *name)
|
|||
"exit_on_error",
|
||||
"max_stack_depth"
|
||||
};
|
||||
Index settingIndex = 0;
|
||||
|
||||
for (settingIndex = 0; settingIndex < lengthof(skipSettings); settingIndex++)
|
||||
for (Index settingIndex = 0; settingIndex < lengthof(skipSettings); settingIndex++)
|
||||
{
|
||||
if (pg_strcasecmp(skipSettings[settingIndex], name) == 0)
|
||||
{
|
||||
|
@ -138,9 +137,8 @@ ProcessVariableSetStmt(VariableSetStmt *setStmt, const char *setStmtString)
|
|||
{
|
||||
MultiConnection *connection = dlist_container(MultiConnection, transactionNode,
|
||||
iter.cur);
|
||||
RemoteTransaction *transaction = NULL;
|
||||
|
||||
transaction = &connection->remoteTransaction;
|
||||
RemoteTransaction *transaction = &connection->remoteTransaction;
|
||||
if (transaction->transactionFailed)
|
||||
{
|
||||
continue;
|
||||
|
@ -162,10 +160,9 @@ ProcessVariableSetStmt(VariableSetStmt *setStmt, const char *setStmtString)
|
|||
{
|
||||
MultiConnection *connection = dlist_container(MultiConnection, transactionNode,
|
||||
iter.cur);
|
||||
RemoteTransaction *transaction = NULL;
|
||||
const bool raiseErrors = true;
|
||||
|
||||
transaction = &connection->remoteTransaction;
|
||||
RemoteTransaction *transaction = &connection->remoteTransaction;
|
||||
if (transaction->transactionFailed)
|
||||
{
|
||||
continue;
|
||||
|
|
|
@ -76,8 +76,7 @@ InitConnParams()
|
|||
void
|
||||
ResetConnParams()
|
||||
{
|
||||
Index paramIdx = 0;
|
||||
for (paramIdx = 0; paramIdx < ConnParams.size; paramIdx++)
|
||||
for (Index paramIdx = 0; paramIdx < ConnParams.size; paramIdx++)
|
||||
{
|
||||
free((void *) ConnParams.keywords[paramIdx]);
|
||||
free((void *) ConnParams.values[paramIdx]);
|
||||
|
@ -135,7 +134,6 @@ bool
|
|||
CheckConninfo(const char *conninfo, const char **whitelist,
|
||||
Size whitelistLength, char **errorMsg)
|
||||
{
|
||||
PQconninfoOption *optionArray = NULL;
|
||||
PQconninfoOption *option = NULL;
|
||||
Index whitelistIdx PG_USED_FOR_ASSERTS_ONLY = 0;
|
||||
char *errorMsgString = NULL;
|
||||
|
@ -165,7 +163,7 @@ CheckConninfo(const char *conninfo, const char **whitelist,
|
|||
}
|
||||
|
||||
/* this should at least parse */
|
||||
optionArray = PQconninfoParse(conninfo, NULL);
|
||||
PQconninfoOption *optionArray = PQconninfoParse(conninfo, NULL);
|
||||
if (optionArray == NULL)
|
||||
{
|
||||
*errorMsg = "Provided string is not a valid libpq connection info string";
|
||||
|
@ -187,15 +185,13 @@ CheckConninfo(const char *conninfo, const char **whitelist,
|
|||
|
||||
for (option = optionArray; option->keyword != NULL; option++)
|
||||
{
|
||||
void *matchingKeyword = NULL;
|
||||
|
||||
if (option->val == NULL || option->val[0] == '\0')
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
matchingKeyword = bsearch(&option->keyword, whitelist, whitelistLength,
|
||||
sizeof(char *), pg_qsort_strcmp);
|
||||
void *matchingKeyword = bsearch(&option->keyword, whitelist, whitelistLength,
|
||||
sizeof(char *), pg_qsort_strcmp);
|
||||
if (matchingKeyword == NULL)
|
||||
{
|
||||
/* the whitelist lacks this keyword; error out! */
|
||||
|
@ -283,8 +279,6 @@ GetConnParams(ConnectionHashKey *key, char ***keywords, char ***values,
|
|||
/* auth keywords will begin after global and runtime ones are appended */
|
||||
Index authParamsIdx = ConnParams.size + lengthof(runtimeKeywords);
|
||||
|
||||
Index paramIndex = 0;
|
||||
Index runtimeParamIndex = 0;
|
||||
|
||||
if (ConnParams.size + lengthof(runtimeKeywords) >= ConnParams.maxSize)
|
||||
{
|
||||
|
@ -296,7 +290,7 @@ GetConnParams(ConnectionHashKey *key, char ***keywords, char ***values,
|
|||
pg_ltoa(key->port, nodePortString); /* populate node port string with port */
|
||||
|
||||
/* first step: copy global parameters to beginning of array */
|
||||
for (paramIndex = 0; paramIndex < ConnParams.size; paramIndex++)
|
||||
for (Index paramIndex = 0; paramIndex < ConnParams.size; paramIndex++)
|
||||
{
|
||||
/* copy the keyword&value pointers to the new array */
|
||||
connKeywords[paramIndex] = ConnParams.keywords[paramIndex];
|
||||
|
@ -311,7 +305,7 @@ GetConnParams(ConnectionHashKey *key, char ***keywords, char ***values,
|
|||
*runtimeParamStart = ConnParams.size;
|
||||
|
||||
/* second step: begin after global params and copy runtime params into our context */
|
||||
for (runtimeParamIndex = 0;
|
||||
for (Index runtimeParamIndex = 0;
|
||||
runtimeParamIndex < lengthof(runtimeKeywords);
|
||||
runtimeParamIndex++)
|
||||
{
|
||||
|
@ -334,9 +328,7 @@ GetConnParams(ConnectionHashKey *key, char ***keywords, char ***values,
|
|||
const char *
|
||||
GetConnParam(const char *keyword)
|
||||
{
|
||||
Index i = 0;
|
||||
|
||||
for (i = 0; i < ConnParams.size; i++)
|
||||
for (Index i = 0; i < ConnParams.size; i++)
|
||||
{
|
||||
if (strcmp(keyword, ConnParams.keywords[i]) == 0)
|
||||
{
|
||||
|
@ -357,10 +349,9 @@ static Size
|
|||
CalculateMaxSize()
|
||||
{
|
||||
PQconninfoOption *defaults = PQconndefaults();
|
||||
PQconninfoOption *option = NULL;
|
||||
Size maxSize = 0;
|
||||
|
||||
for (option = defaults;
|
||||
for (PQconninfoOption *option = defaults;
|
||||
option->keyword != NULL;
|
||||
option++, maxSize++)
|
||||
{
|
||||
|
|
|
@ -85,7 +85,6 @@ void
|
|||
InitializeConnectionManagement(void)
|
||||
{
|
||||
HASHCTL info, connParamsInfo;
|
||||
uint32 hashFlags = 0;
|
||||
|
||||
/*
|
||||
* Create a single context for connection and transaction related memory
|
||||
|
@ -105,7 +104,7 @@ InitializeConnectionManagement(void)
|
|||
info.hash = ConnectionHashHash;
|
||||
info.match = ConnectionHashCompare;
|
||||
info.hcxt = ConnectionContext;
|
||||
hashFlags = (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT | HASH_COMPARE);
|
||||
uint32 hashFlags = (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT | HASH_COMPARE);
|
||||
|
||||
memcpy(&connParamsInfo, &info, sizeof(HASHCTL));
|
||||
connParamsInfo.entrysize = sizeof(ConnParamsHashEntry);
|
||||
|
@ -187,9 +186,7 @@ GetNodeConnection(uint32 flags, const char *hostname, int32 port)
|
|||
MultiConnection *
|
||||
GetNonDataAccessConnection(const char *hostname, int32 port)
|
||||
{
|
||||
MultiConnection *connection;
|
||||
|
||||
connection = StartNonDataAccessConnection(hostname, port);
|
||||
MultiConnection *connection = StartNonDataAccessConnection(hostname, port);
|
||||
|
||||
FinishConnectionEstablishment(connection);
|
||||
|
||||
|
@ -243,9 +240,8 @@ MultiConnection *
|
|||
GetNodeUserDatabaseConnection(uint32 flags, const char *hostname, int32 port, const
|
||||
char *user, const char *database)
|
||||
{
|
||||
MultiConnection *connection;
|
||||
|
||||
connection = StartNodeUserDatabaseConnection(flags, hostname, port, user, database);
|
||||
MultiConnection *connection = StartNodeUserDatabaseConnection(flags, hostname, port,
|
||||
user, database);
|
||||
|
||||
FinishConnectionEstablishment(connection);
|
||||
|
||||
|
@ -269,11 +265,11 @@ StartWorkerListConnections(List *workerNodeList, uint32 flags, const char *user,
|
|||
WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell);
|
||||
char *nodeName = workerNode->workerName;
|
||||
int nodePort = workerNode->workerPort;
|
||||
MultiConnection *connection = NULL;
|
||||
int connectionFlags = 0;
|
||||
|
||||
connection = StartNodeUserDatabaseConnection(connectionFlags, nodeName, nodePort,
|
||||
user, database);
|
||||
MultiConnection *connection = StartNodeUserDatabaseConnection(connectionFlags,
|
||||
nodeName, nodePort,
|
||||
user, database);
|
||||
|
||||
connectionList = lappend(connectionList, connection);
|
||||
}
|
||||
|
@ -298,7 +294,6 @@ StartNodeUserDatabaseConnection(uint32 flags, const char *hostname, int32 port,
|
|||
char *user, const char *database)
|
||||
{
|
||||
ConnectionHashKey key;
|
||||
ConnectionHashEntry *entry = NULL;
|
||||
MultiConnection *connection;
|
||||
bool found;
|
||||
|
||||
|
@ -340,7 +335,7 @@ StartNodeUserDatabaseConnection(uint32 flags, const char *hostname, int32 port,
|
|||
* connection list empty.
|
||||
*/
|
||||
|
||||
entry = hash_search(ConnectionHash, &key, HASH_ENTER, &found);
|
||||
ConnectionHashEntry *entry = hash_search(ConnectionHash, &key, HASH_ENTER, &found);
|
||||
if (!found)
|
||||
{
|
||||
entry->connections = MemoryContextAlloc(ConnectionContext,
|
||||
|
@ -412,14 +407,13 @@ CloseNodeConnectionsAfterTransaction(char *nodeName, int nodePort)
|
|||
while ((entry = (ConnectionHashEntry *) hash_seq_search(&status)) != 0)
|
||||
{
|
||||
dlist_iter iter;
|
||||
dlist_head *connections = NULL;
|
||||
|
||||
if (strcmp(entry->key.hostname, nodeName) != 0 || entry->key.port != nodePort)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
connections = entry->connections;
|
||||
dlist_head *connections = entry->connections;
|
||||
dlist_foreach(iter, connections)
|
||||
{
|
||||
MultiConnection *connection =
|
||||
|
@ -575,7 +569,6 @@ EventSetSizeForConnectionList(List *connections)
|
|||
static WaitEventSet *
|
||||
WaitEventSetFromMultiConnectionStates(List *connections, int *waitCount)
|
||||
{
|
||||
WaitEventSet *waitEventSet = NULL;
|
||||
ListCell *connectionCell = NULL;
|
||||
|
||||
const int eventSetSize = EventSetSizeForConnectionList(connections);
|
||||
|
@ -586,7 +579,7 @@ WaitEventSetFromMultiConnectionStates(List *connections, int *waitCount)
|
|||
*waitCount = 0;
|
||||
}
|
||||
|
||||
waitEventSet = CreateWaitEventSet(CurrentMemoryContext, eventSetSize);
|
||||
WaitEventSet *waitEventSet = CreateWaitEventSet(CurrentMemoryContext, eventSetSize);
|
||||
EnsureReleaseResource((MemoryContextCallbackFunction) (&FreeWaitEventSet),
|
||||
waitEventSet);
|
||||
|
||||
|
@ -602,8 +595,6 @@ WaitEventSetFromMultiConnectionStates(List *connections, int *waitCount)
|
|||
{
|
||||
MultiConnectionPollState *connectionState = (MultiConnectionPollState *) lfirst(
|
||||
connectionCell);
|
||||
int sock = 0;
|
||||
int eventMask = 0;
|
||||
|
||||
if (numEventsAdded >= eventSetSize)
|
||||
{
|
||||
|
@ -617,9 +608,9 @@ WaitEventSetFromMultiConnectionStates(List *connections, int *waitCount)
|
|||
continue;
|
||||
}
|
||||
|
||||
sock = PQsocket(connectionState->connection->pgConn);
|
||||
int sock = PQsocket(connectionState->connection->pgConn);
|
||||
|
||||
eventMask = MultiConnectionStateEventMask(connectionState);
|
||||
int eventMask = MultiConnectionStateEventMask(connectionState);
|
||||
|
||||
AddWaitEventToSet(waitEventSet, eventMask, sock, NULL, connectionState);
|
||||
numEventsAdded++;
|
||||
|
@ -672,8 +663,6 @@ FinishConnectionListEstablishment(List *multiConnectionList)
|
|||
WaitEventSet *waitEventSet = NULL;
|
||||
bool waitEventSetRebuild = true;
|
||||
int waitCount = 0;
|
||||
WaitEvent *events = NULL;
|
||||
MemoryContext oldContext = NULL;
|
||||
|
||||
foreach(multiConnectionCell, multiConnectionList)
|
||||
{
|
||||
|
@ -699,23 +688,22 @@ FinishConnectionListEstablishment(List *multiConnectionList)
|
|||
}
|
||||
|
||||
/* prepare space for socket events */
|
||||
events = (WaitEvent *) palloc0(EventSetSizeForConnectionList(connectionStates) *
|
||||
sizeof(WaitEvent));
|
||||
WaitEvent *events = (WaitEvent *) palloc0(EventSetSizeForConnectionList(
|
||||
connectionStates) *
|
||||
sizeof(WaitEvent));
|
||||
|
||||
/*
|
||||
* for high connection counts with lots of round trips we could potentially have a lot
|
||||
* of (big) waitsets that we'd like to clean right after we have used them. To do this
|
||||
* we switch to a temporary memory context for this loop which gets reset at the end
|
||||
*/
|
||||
oldContext = MemoryContextSwitchTo(
|
||||
MemoryContext oldContext = MemoryContextSwitchTo(
|
||||
AllocSetContextCreate(CurrentMemoryContext,
|
||||
"connection establishment temporary context",
|
||||
ALLOCSET_DEFAULT_SIZES));
|
||||
while (waitCount > 0)
|
||||
{
|
||||
long timeout = DeadlineTimestampTzToTimeout(deadline);
|
||||
int eventCount = 0;
|
||||
int eventIndex = 0;
|
||||
|
||||
if (waitEventSetRebuild)
|
||||
{
|
||||
|
@ -730,13 +718,12 @@ FinishConnectionListEstablishment(List *multiConnectionList)
|
|||
}
|
||||
}
|
||||
|
||||
eventCount = WaitEventSetWait(waitEventSet, timeout, events, waitCount,
|
||||
WAIT_EVENT_CLIENT_READ);
|
||||
int eventCount = WaitEventSetWait(waitEventSet, timeout, events, waitCount,
|
||||
WAIT_EVENT_CLIENT_READ);
|
||||
|
||||
for (eventIndex = 0; eventIndex < eventCount; eventIndex++)
|
||||
for (int eventIndex = 0; eventIndex < eventCount; eventIndex++)
|
||||
{
|
||||
WaitEvent *event = &events[eventIndex];
|
||||
bool connectionStateChanged = false;
|
||||
MultiConnectionPollState *connectionState =
|
||||
(MultiConnectionPollState *) event->user_data;
|
||||
|
||||
|
@ -764,7 +751,7 @@ FinishConnectionListEstablishment(List *multiConnectionList)
|
|||
continue;
|
||||
}
|
||||
|
||||
connectionStateChanged = MultiConnectionStatePoll(connectionState);
|
||||
bool connectionStateChanged = MultiConnectionStatePoll(connectionState);
|
||||
if (connectionStateChanged)
|
||||
{
|
||||
if (connectionState->phase != MULTI_CONNECTION_PHASE_CONNECTING)
|
||||
|
@ -909,9 +896,8 @@ static uint32
|
|||
ConnectionHashHash(const void *key, Size keysize)
|
||||
{
|
||||
ConnectionHashKey *entry = (ConnectionHashKey *) key;
|
||||
uint32 hash = 0;
|
||||
|
||||
hash = string_hash(entry->hostname, NAMEDATALEN);
|
||||
uint32 hash = string_hash(entry->hostname, NAMEDATALEN);
|
||||
hash = hash_combine(hash, hash_uint32(entry->port));
|
||||
hash = hash_combine(hash, string_hash(entry->user, NAMEDATALEN));
|
||||
hash = hash_combine(hash, string_hash(entry->database, NAMEDATALEN));
|
||||
|
@ -948,11 +934,9 @@ static MultiConnection *
|
|||
StartConnectionEstablishment(ConnectionHashKey *key)
|
||||
{
|
||||
bool found = false;
|
||||
MultiConnection *connection = NULL;
|
||||
ConnParamsHashEntry *entry = NULL;
|
||||
|
||||
/* search our cache for precomputed connection settings */
|
||||
entry = hash_search(ConnParamsHash, key, HASH_ENTER, &found);
|
||||
ConnParamsHashEntry *entry = hash_search(ConnParamsHash, key, HASH_ENTER, &found);
|
||||
if (!found || !entry->isValid)
|
||||
{
|
||||
/* avoid leaking memory in the keys and values arrays */
|
||||
|
@ -968,7 +952,8 @@ StartConnectionEstablishment(ConnectionHashKey *key)
|
|||
entry->isValid = true;
|
||||
}
|
||||
|
||||
connection = MemoryContextAllocZero(ConnectionContext, sizeof(MultiConnection));
|
||||
MultiConnection *connection = MemoryContextAllocZero(ConnectionContext,
|
||||
sizeof(MultiConnection));
|
||||
|
||||
strlcpy(connection->hostname, key->hostname, MAX_NODE_LENGTH);
|
||||
connection->port = key->port;
|
||||
|
@ -1218,9 +1203,8 @@ char *
|
|||
TrimLogLevel(const char *message)
|
||||
{
|
||||
char *chompedMessage = pchomp(message);
|
||||
size_t n;
|
||||
|
||||
n = 0;
|
||||
size_t n = 0;
|
||||
while (n < strlen(chompedMessage) && chompedMessage[n] != ':')
|
||||
{
|
||||
n++;
|
||||
|
|
|
@ -267,14 +267,15 @@ StartPlacementListConnection(uint32 flags, List *placementAccessList,
|
|||
const char *userName)
|
||||
{
|
||||
char *freeUserName = NULL;
|
||||
MultiConnection *chosenConnection = NULL;
|
||||
|
||||
if (userName == NULL)
|
||||
{
|
||||
userName = freeUserName = CurrentUserName();
|
||||
}
|
||||
|
||||
chosenConnection = FindPlacementListConnection(flags, placementAccessList, userName);
|
||||
MultiConnection *chosenConnection = FindPlacementListConnection(flags,
|
||||
placementAccessList,
|
||||
userName);
|
||||
if (chosenConnection == NULL)
|
||||
{
|
||||
/* use the first placement from the list to extract nodename and nodeport */
|
||||
|
@ -346,10 +347,6 @@ AssignPlacementListToConnection(List *placementAccessList, MultiConnection *conn
|
|||
ShardPlacement *placement = placementAccess->placement;
|
||||
ShardPlacementAccessType accessType = placementAccess->accessType;
|
||||
|
||||
ConnectionPlacementHashEntry *placementEntry = NULL;
|
||||
ConnectionReference *placementConnection = NULL;
|
||||
|
||||
Oid relationId = InvalidOid;
|
||||
|
||||
if (placement->shardId == INVALID_SHARD_ID)
|
||||
{
|
||||
|
@ -363,8 +360,9 @@ AssignPlacementListToConnection(List *placementAccessList, MultiConnection *conn
|
|||
continue;
|
||||
}
|
||||
|
||||
placementEntry = FindOrCreatePlacementEntry(placement);
|
||||
placementConnection = placementEntry->primaryConnection;
|
||||
ConnectionPlacementHashEntry *placementEntry = FindOrCreatePlacementEntry(
|
||||
placement);
|
||||
ConnectionReference *placementConnection = placementEntry->primaryConnection;
|
||||
|
||||
if (placementConnection->connection == connection)
|
||||
{
|
||||
|
@ -438,7 +436,7 @@ AssignPlacementListToConnection(List *placementAccessList, MultiConnection *conn
|
|||
}
|
||||
|
||||
/* record the relation access */
|
||||
relationId = RelationIdForShard(placement->shardId);
|
||||
Oid relationId = RelationIdForShard(placement->shardId);
|
||||
RecordRelationAccessIfReferenceTable(relationId, accessType);
|
||||
}
|
||||
}
|
||||
|
@ -453,7 +451,6 @@ MultiConnection *
|
|||
GetConnectionIfPlacementAccessedInXact(int flags, List *placementAccessList,
|
||||
const char *userName)
|
||||
{
|
||||
MultiConnection *connection = NULL;
|
||||
char *freeUserName = NULL;
|
||||
|
||||
if (userName == NULL)
|
||||
|
@ -461,8 +458,8 @@ GetConnectionIfPlacementAccessedInXact(int flags, List *placementAccessList,
|
|||
userName = freeUserName = CurrentUserName();
|
||||
}
|
||||
|
||||
connection = FindPlacementListConnection(flags, placementAccessList,
|
||||
userName);
|
||||
MultiConnection *connection = FindPlacementListConnection(flags, placementAccessList,
|
||||
userName);
|
||||
|
||||
if (freeUserName != NULL)
|
||||
{
|
||||
|
@ -515,9 +512,6 @@ FindPlacementListConnection(int flags, List *placementAccessList, const char *us
|
|||
ShardPlacement *placement = placementAccess->placement;
|
||||
ShardPlacementAccessType accessType = placementAccess->accessType;
|
||||
|
||||
ConnectionPlacementHashEntry *placementEntry = NULL;
|
||||
ColocatedPlacementsHashEntry *colocatedEntry = NULL;
|
||||
ConnectionReference *placementConnection = NULL;
|
||||
|
||||
if (placement->shardId == INVALID_SHARD_ID)
|
||||
{
|
||||
|
@ -530,9 +524,10 @@ FindPlacementListConnection(int flags, List *placementAccessList, const char *us
|
|||
continue;
|
||||
}
|
||||
|
||||
placementEntry = FindOrCreatePlacementEntry(placement);
|
||||
colocatedEntry = placementEntry->colocatedEntry;
|
||||
placementConnection = placementEntry->primaryConnection;
|
||||
ConnectionPlacementHashEntry *placementEntry = FindOrCreatePlacementEntry(
|
||||
placement);
|
||||
ColocatedPlacementsHashEntry *colocatedEntry = placementEntry->colocatedEntry;
|
||||
ConnectionReference *placementConnection = placementEntry->primaryConnection;
|
||||
|
||||
/* note: the Asserts below are primarily for clarifying the conditions */
|
||||
|
||||
|
@ -628,12 +623,13 @@ static ConnectionPlacementHashEntry *
|
|||
FindOrCreatePlacementEntry(ShardPlacement *placement)
|
||||
{
|
||||
ConnectionPlacementHashKey connKey;
|
||||
ConnectionPlacementHashEntry *placementEntry = NULL;
|
||||
bool found = false;
|
||||
|
||||
connKey.placementId = placement->placementId;
|
||||
|
||||
placementEntry = hash_search(ConnectionPlacementHash, &connKey, HASH_ENTER, &found);
|
||||
ConnectionPlacementHashEntry *placementEntry = hash_search(ConnectionPlacementHash,
|
||||
&connKey, HASH_ENTER,
|
||||
&found);
|
||||
if (!found)
|
||||
{
|
||||
/* no connection has been chosen for this placement */
|
||||
|
@ -646,15 +642,15 @@ FindOrCreatePlacementEntry(ShardPlacement *placement)
|
|||
placement->partitionMethod == DISTRIBUTE_BY_NONE)
|
||||
{
|
||||
ColocatedPlacementsHashKey coloKey;
|
||||
ColocatedPlacementsHashEntry *colocatedEntry = NULL;
|
||||
|
||||
coloKey.nodeId = placement->nodeId;
|
||||
coloKey.colocationGroupId = placement->colocationGroupId;
|
||||
coloKey.representativeValue = placement->representativeValue;
|
||||
|
||||
/* look for a connection assigned to co-located placements */
|
||||
colocatedEntry = hash_search(ColocatedPlacementsHash, &coloKey, HASH_ENTER,
|
||||
&found);
|
||||
ColocatedPlacementsHashEntry *colocatedEntry = hash_search(
|
||||
ColocatedPlacementsHash, &coloKey, HASH_ENTER,
|
||||
&found);
|
||||
if (!found)
|
||||
{
|
||||
void *conRef = MemoryContextAllocZero(TopTransactionContext,
|
||||
|
@ -835,12 +831,12 @@ AssociatePlacementWithShard(ConnectionPlacementHashEntry *placementEntry,
|
|||
ShardPlacement *placement)
|
||||
{
|
||||
ConnectionShardHashKey shardKey;
|
||||
ConnectionShardHashEntry *shardEntry = NULL;
|
||||
bool found = false;
|
||||
dlist_iter placementIter;
|
||||
|
||||
shardKey.shardId = placement->shardId;
|
||||
shardEntry = hash_search(ConnectionShardHash, &shardKey, HASH_ENTER, &found);
|
||||
ConnectionShardHashEntry *shardEntry = hash_search(ConnectionShardHash, &shardKey,
|
||||
HASH_ENTER, &found);
|
||||
if (!found)
|
||||
{
|
||||
dlist_init(&shardEntry->placementConnections);
|
||||
|
@ -1033,7 +1029,6 @@ CheckShardPlacements(ConnectionShardHashEntry *shardEntry)
|
|||
ConnectionPlacementHashEntry *placementEntry =
|
||||
dlist_container(ConnectionPlacementHashEntry, shardNode, placementIter.cur);
|
||||
ConnectionReference *primaryConnection = placementEntry->primaryConnection;
|
||||
MultiConnection *connection = NULL;
|
||||
|
||||
/* we only consider shards that are modified */
|
||||
if (primaryConnection == NULL ||
|
||||
|
@ -1042,7 +1037,7 @@ CheckShardPlacements(ConnectionShardHashEntry *shardEntry)
|
|||
continue;
|
||||
}
|
||||
|
||||
connection = primaryConnection->connection;
|
||||
MultiConnection *connection = primaryConnection->connection;
|
||||
|
||||
if (!connection || connection->remoteTransaction.transactionFailed)
|
||||
{
|
||||
|
@ -1096,7 +1091,6 @@ void
|
|||
InitPlacementConnectionManagement(void)
|
||||
{
|
||||
HASHCTL info;
|
||||
uint32 hashFlags = 0;
|
||||
|
||||
/* create (placementId) -> [ConnectionReference] hash */
|
||||
memset(&info, 0, sizeof(info));
|
||||
|
@ -1104,7 +1098,7 @@ InitPlacementConnectionManagement(void)
|
|||
info.entrysize = sizeof(ConnectionPlacementHashEntry);
|
||||
info.hash = tag_hash;
|
||||
info.hcxt = ConnectionContext;
|
||||
hashFlags = (HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
|
||||
uint32 hashFlags = (HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
|
||||
|
||||
ConnectionPlacementHash = hash_create("citus connection cache (placementid)",
|
||||
64, &info, hashFlags);
|
||||
|
@ -1141,9 +1135,8 @@ static uint32
|
|||
ColocatedPlacementsHashHash(const void *key, Size keysize)
|
||||
{
|
||||
ColocatedPlacementsHashKey *entry = (ColocatedPlacementsHashKey *) key;
|
||||
uint32 hash = 0;
|
||||
|
||||
hash = hash_uint32(entry->nodeId);
|
||||
uint32 hash = hash_uint32(entry->nodeId);
|
||||
hash = hash_combine(hash, hash_uint32(entry->colocationGroupId));
|
||||
hash = hash_combine(hash, hash_uint32(entry->representativeValue));
|
||||
|
||||
|
|
|
@ -171,9 +171,6 @@ ClearResultsIfReady(MultiConnection *connection)
|
|||
|
||||
while (true)
|
||||
{
|
||||
PGresult *result = NULL;
|
||||
ExecStatusType resultStatus;
|
||||
|
||||
/*
|
||||
* If busy, there might still be results already received and buffered
|
||||
* by the OS. As connection is in non-blocking mode, we can check for
|
||||
|
@ -199,14 +196,14 @@ ClearResultsIfReady(MultiConnection *connection)
|
|||
return false;
|
||||
}
|
||||
|
||||
result = PQgetResult(pgConn);
|
||||
PGresult *result = PQgetResult(pgConn);
|
||||
if (result == NULL)
|
||||
{
|
||||
/* no more results available */
|
||||
return true;
|
||||
}
|
||||
|
||||
resultStatus = PQresultStatus(result);
|
||||
ExecStatusType resultStatus = PQresultStatus(result);
|
||||
|
||||
/* only care about the status, can clear now */
|
||||
PQclear(result);
|
||||
|
@ -241,18 +238,16 @@ bool
|
|||
SqlStateMatchesCategory(char *sqlStateString, int category)
|
||||
{
|
||||
bool sqlStateMatchesCategory = false;
|
||||
int sqlState = 0;
|
||||
int sqlStateCategory = 0;
|
||||
|
||||
if (sqlStateString == NULL)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
sqlState = MAKE_SQLSTATE(sqlStateString[0], sqlStateString[1], sqlStateString[2],
|
||||
sqlStateString[3], sqlStateString[4]);
|
||||
int sqlState = MAKE_SQLSTATE(sqlStateString[0], sqlStateString[1], sqlStateString[2],
|
||||
sqlStateString[3], sqlStateString[4]);
|
||||
|
||||
sqlStateCategory = ERRCODE_TO_CATEGORY(sqlState);
|
||||
int sqlStateCategory = ERRCODE_TO_CATEGORY(sqlState);
|
||||
if (sqlStateCategory == category)
|
||||
{
|
||||
sqlStateMatchesCategory = true;
|
||||
|
@ -390,17 +385,15 @@ ExecuteCriticalRemoteCommandList(MultiConnection *connection, List *commandList)
|
|||
void
|
||||
ExecuteCriticalRemoteCommand(MultiConnection *connection, const char *command)
|
||||
{
|
||||
int querySent = 0;
|
||||
PGresult *result = NULL;
|
||||
bool raiseInterrupts = true;
|
||||
|
||||
querySent = SendRemoteCommand(connection, command);
|
||||
int querySent = SendRemoteCommand(connection, command);
|
||||
if (querySent == 0)
|
||||
{
|
||||
ReportConnectionError(connection, ERROR);
|
||||
}
|
||||
|
||||
result = GetRemoteCommandResult(connection, raiseInterrupts);
|
||||
PGresult *result = GetRemoteCommandResult(connection, raiseInterrupts);
|
||||
if (!IsResponseOK(result))
|
||||
{
|
||||
ReportResultError(connection, result, ERROR);
|
||||
|
@ -422,18 +415,16 @@ int
|
|||
ExecuteOptionalRemoteCommand(MultiConnection *connection, const char *command,
|
||||
PGresult **result)
|
||||
{
|
||||
int querySent = 0;
|
||||
PGresult *localResult = NULL;
|
||||
bool raiseInterrupts = true;
|
||||
|
||||
querySent = SendRemoteCommand(connection, command);
|
||||
int querySent = SendRemoteCommand(connection, command);
|
||||
if (querySent == 0)
|
||||
{
|
||||
ReportConnectionError(connection, WARNING);
|
||||
return QUERY_SEND_FAILED;
|
||||
}
|
||||
|
||||
localResult = GetRemoteCommandResult(connection, raiseInterrupts);
|
||||
PGresult *localResult = GetRemoteCommandResult(connection, raiseInterrupts);
|
||||
if (!IsResponseOK(localResult))
|
||||
{
|
||||
ReportResultError(connection, localResult, WARNING);
|
||||
|
@ -473,7 +464,6 @@ SendRemoteCommandParams(MultiConnection *connection, const char *command,
|
|||
const char *const *parameterValues)
|
||||
{
|
||||
PGconn *pgConn = connection->pgConn;
|
||||
int rc = 0;
|
||||
|
||||
LogRemoteCommand(connection, command);
|
||||
|
||||
|
@ -488,8 +478,8 @@ SendRemoteCommandParams(MultiConnection *connection, const char *command,
|
|||
|
||||
Assert(PQisnonblocking(pgConn));
|
||||
|
||||
rc = PQsendQueryParams(pgConn, command, parameterCount, parameterTypes,
|
||||
parameterValues, NULL, NULL, 0);
|
||||
int rc = PQsendQueryParams(pgConn, command, parameterCount, parameterTypes,
|
||||
parameterValues, NULL, NULL, 0);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -506,7 +496,6 @@ int
|
|||
SendRemoteCommand(MultiConnection *connection, const char *command)
|
||||
{
|
||||
PGconn *pgConn = connection->pgConn;
|
||||
int rc = 0;
|
||||
|
||||
LogRemoteCommand(connection, command);
|
||||
|
||||
|
@ -521,7 +510,7 @@ SendRemoteCommand(MultiConnection *connection, const char *command)
|
|||
|
||||
Assert(PQisnonblocking(pgConn));
|
||||
|
||||
rc = PQsendQuery(pgConn, command);
|
||||
int rc = PQsendQuery(pgConn, command);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -536,7 +525,6 @@ ReadFirstColumnAsText(PGresult *queryResult)
|
|||
{
|
||||
List *resultRowList = NIL;
|
||||
const int columnIndex = 0;
|
||||
int64 rowIndex = 0;
|
||||
int64 rowCount = 0;
|
||||
|
||||
ExecStatusType status = PQresultStatus(queryResult);
|
||||
|
@ -545,7 +533,7 @@ ReadFirstColumnAsText(PGresult *queryResult)
|
|||
rowCount = PQntuples(queryResult);
|
||||
}
|
||||
|
||||
for (rowIndex = 0; rowIndex < rowCount; rowIndex++)
|
||||
for (int64 rowIndex = 0; rowIndex < rowCount; rowIndex++)
|
||||
{
|
||||
char *rowValue = PQgetvalue(queryResult, rowIndex, columnIndex);
|
||||
|
||||
|
@ -579,7 +567,6 @@ PGresult *
|
|||
GetRemoteCommandResult(MultiConnection *connection, bool raiseInterrupts)
|
||||
{
|
||||
PGconn *pgConn = connection->pgConn;
|
||||
PGresult *result = NULL;
|
||||
|
||||
/*
|
||||
* Short circuit tests around the more expensive parts of this
|
||||
|
@ -605,7 +592,7 @@ GetRemoteCommandResult(MultiConnection *connection, bool raiseInterrupts)
|
|||
/* no IO should be necessary to get result */
|
||||
Assert(!PQisBusy(pgConn));
|
||||
|
||||
result = PQgetResult(connection->pgConn);
|
||||
PGresult *result = PQgetResult(connection->pgConn);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -621,7 +608,6 @@ bool
|
|||
PutRemoteCopyData(MultiConnection *connection, const char *buffer, int nbytes)
|
||||
{
|
||||
PGconn *pgConn = connection->pgConn;
|
||||
int copyState = 0;
|
||||
bool allowInterrupts = true;
|
||||
|
||||
if (PQstatus(pgConn) != CONNECTION_OK)
|
||||
|
@ -631,7 +617,7 @@ PutRemoteCopyData(MultiConnection *connection, const char *buffer, int nbytes)
|
|||
|
||||
Assert(PQisnonblocking(pgConn));
|
||||
|
||||
copyState = PQputCopyData(pgConn, buffer, nbytes);
|
||||
int copyState = PQputCopyData(pgConn, buffer, nbytes);
|
||||
if (copyState == -1)
|
||||
{
|
||||
return false;
|
||||
|
@ -670,7 +656,6 @@ bool
|
|||
PutRemoteCopyEnd(MultiConnection *connection, const char *errormsg)
|
||||
{
|
||||
PGconn *pgConn = connection->pgConn;
|
||||
int copyState = 0;
|
||||
bool allowInterrupts = true;
|
||||
|
||||
if (PQstatus(pgConn) != CONNECTION_OK)
|
||||
|
@ -680,7 +665,7 @@ PutRemoteCopyEnd(MultiConnection *connection, const char *errormsg)
|
|||
|
||||
Assert(PQisnonblocking(pgConn));
|
||||
|
||||
copyState = PQputCopyEnd(pgConn, errormsg);
|
||||
int copyState = PQputCopyEnd(pgConn, errormsg);
|
||||
if (copyState == -1)
|
||||
{
|
||||
return false;
|
||||
|
@ -720,12 +705,10 @@ FinishConnectionIO(MultiConnection *connection, bool raiseInterrupts)
|
|||
/* perform the necessary IO */
|
||||
while (true)
|
||||
{
|
||||
int sendStatus = 0;
|
||||
int rc = 0;
|
||||
int waitFlags = WL_POSTMASTER_DEATH | WL_LATCH_SET;
|
||||
|
||||
/* try to send all pending data */
|
||||
sendStatus = PQflush(pgConn);
|
||||
int sendStatus = PQflush(pgConn);
|
||||
|
||||
/* if sending failed, there's nothing more we can do */
|
||||
if (sendStatus == -1)
|
||||
|
@ -753,7 +736,7 @@ FinishConnectionIO(MultiConnection *connection, bool raiseInterrupts)
|
|||
return true;
|
||||
}
|
||||
|
||||
rc = WaitLatchOrSocket(MyLatch, waitFlags, sock, 0, PG_WAIT_EXTENSION);
|
||||
int rc = WaitLatchOrSocket(MyLatch, waitFlags, sock, 0, PG_WAIT_EXTENSION);
|
||||
if (rc & WL_POSTMASTER_DEATH)
|
||||
{
|
||||
ereport(ERROR, (errmsg("postmaster was shut down, exiting")));
|
||||
|
@ -837,7 +820,6 @@ WaitForAllConnections(List *connectionList, bool raiseInterrupts)
|
|||
{
|
||||
bool cancellationReceived = false;
|
||||
int eventIndex = 0;
|
||||
int eventCount = 0;
|
||||
long timeout = -1;
|
||||
int pendingConnectionCount = totalConnectionCount -
|
||||
pendingConnectionsStartIndex;
|
||||
|
@ -857,14 +839,14 @@ WaitForAllConnections(List *connectionList, bool raiseInterrupts)
|
|||
}
|
||||
|
||||
/* wait for I/O events */
|
||||
eventCount = WaitEventSetWait(waitEventSet, timeout, events,
|
||||
pendingConnectionCount, WAIT_EVENT_CLIENT_READ);
|
||||
int eventCount = WaitEventSetWait(waitEventSet, timeout, events,
|
||||
pendingConnectionCount,
|
||||
WAIT_EVENT_CLIENT_READ);
|
||||
|
||||
/* process I/O events */
|
||||
for (; eventIndex < eventCount; eventIndex++)
|
||||
{
|
||||
WaitEvent *event = &events[eventIndex];
|
||||
MultiConnection *connection = NULL;
|
||||
bool connectionIsReady = false;
|
||||
|
||||
if (event->events & WL_POSTMASTER_DEATH)
|
||||
|
@ -896,7 +878,7 @@ WaitForAllConnections(List *connectionList, bool raiseInterrupts)
|
|||
continue;
|
||||
}
|
||||
|
||||
connection = (MultiConnection *) event->user_data;
|
||||
MultiConnection *connection = (MultiConnection *) event->user_data;
|
||||
|
||||
if (event->events & WL_SOCKET_WRITEABLE)
|
||||
{
|
||||
|
@ -1028,8 +1010,6 @@ BuildWaitEventSet(MultiConnection **allConnections, int totalConnectionCount,
|
|||
int pendingConnectionsStartIndex)
|
||||
{
|
||||
int pendingConnectionCount = totalConnectionCount - pendingConnectionsStartIndex;
|
||||
WaitEventSet *waitEventSet = NULL;
|
||||
int connectionIndex = 0;
|
||||
|
||||
/*
|
||||
* subtract 3 to make room for WL_POSTMASTER_DEATH, WL_LATCH_SET, and
|
||||
|
@ -1042,9 +1022,11 @@ BuildWaitEventSet(MultiConnection **allConnections, int totalConnectionCount,
|
|||
|
||||
/* allocate pending connections + 2 for the signal latch and postmaster death */
|
||||
/* (CreateWaitEventSet makes room for pgwin32_signal_event automatically) */
|
||||
waitEventSet = CreateWaitEventSet(CurrentMemoryContext, pendingConnectionCount + 2);
|
||||
WaitEventSet *waitEventSet = CreateWaitEventSet(CurrentMemoryContext,
|
||||
pendingConnectionCount + 2);
|
||||
|
||||
for (connectionIndex = 0; connectionIndex < pendingConnectionCount; connectionIndex++)
|
||||
for (int connectionIndex = 0; connectionIndex < pendingConnectionCount;
|
||||
connectionIndex++)
|
||||
{
|
||||
MultiConnection *connection = allConnections[pendingConnectionsStartIndex +
|
||||
connectionIndex];
|
||||
|
@ -1078,7 +1060,6 @@ bool
|
|||
SendCancelationRequest(MultiConnection *connection)
|
||||
{
|
||||
char errorBuffer[ERROR_BUFFER_SIZE] = { 0 };
|
||||
bool cancelSent = false;
|
||||
|
||||
PGcancel *cancelObject = PQgetCancel(connection->pgConn);
|
||||
if (cancelObject == NULL)
|
||||
|
@ -1087,7 +1068,7 @@ SendCancelationRequest(MultiConnection *connection)
|
|||
return false;
|
||||
}
|
||||
|
||||
cancelSent = PQcancel(cancelObject, errorBuffer, sizeof(errorBuffer));
|
||||
bool cancelSent = PQcancel(cancelObject, errorBuffer, sizeof(errorBuffer));
|
||||
if (!cancelSent)
|
||||
{
|
||||
ereport(WARNING, (errmsg("could not issue cancel request"),
|
||||
|
|
|
@ -200,19 +200,15 @@ pg_get_serverdef_string(Oid tableRelationId)
|
|||
char *
|
||||
pg_get_sequencedef_string(Oid sequenceRelationId)
|
||||
{
|
||||
char *qualifiedSequenceName = NULL;
|
||||
char *sequenceDef = NULL;
|
||||
Form_pg_sequence pgSequenceForm = NULL;
|
||||
|
||||
pgSequenceForm = pg_get_sequencedef(sequenceRelationId);
|
||||
Form_pg_sequence pgSequenceForm = pg_get_sequencedef(sequenceRelationId);
|
||||
|
||||
/* build our DDL command */
|
||||
qualifiedSequenceName = generate_qualified_relation_name(sequenceRelationId);
|
||||
char *qualifiedSequenceName = generate_qualified_relation_name(sequenceRelationId);
|
||||
|
||||
sequenceDef = psprintf(CREATE_SEQUENCE_COMMAND, qualifiedSequenceName,
|
||||
pgSequenceForm->seqincrement, pgSequenceForm->seqmin,
|
||||
pgSequenceForm->seqmax, pgSequenceForm->seqstart,
|
||||
pgSequenceForm->seqcycle ? "" : "NO ");
|
||||
char *sequenceDef = psprintf(CREATE_SEQUENCE_COMMAND, qualifiedSequenceName,
|
||||
pgSequenceForm->seqincrement, pgSequenceForm->seqmin,
|
||||
pgSequenceForm->seqmax, pgSequenceForm->seqstart,
|
||||
pgSequenceForm->seqcycle ? "" : "NO ");
|
||||
|
||||
return sequenceDef;
|
||||
}
|
||||
|
@ -225,16 +221,13 @@ pg_get_sequencedef_string(Oid sequenceRelationId)
|
|||
Form_pg_sequence
|
||||
pg_get_sequencedef(Oid sequenceRelationId)
|
||||
{
|
||||
Form_pg_sequence pgSequenceForm = NULL;
|
||||
HeapTuple heapTuple = NULL;
|
||||
|
||||
heapTuple = SearchSysCache1(SEQRELID, sequenceRelationId);
|
||||
HeapTuple heapTuple = SearchSysCache1(SEQRELID, sequenceRelationId);
|
||||
if (!HeapTupleIsValid(heapTuple))
|
||||
{
|
||||
elog(ERROR, "cache lookup failed for sequence %u", sequenceRelationId);
|
||||
}
|
||||
|
||||
pgSequenceForm = (Form_pg_sequence) GETSTRUCT(heapTuple);
|
||||
Form_pg_sequence pgSequenceForm = (Form_pg_sequence) GETSTRUCT(heapTuple);
|
||||
|
||||
ReleaseSysCache(heapTuple);
|
||||
|
||||
|
@ -253,12 +246,7 @@ pg_get_sequencedef(Oid sequenceRelationId)
|
|||
char *
|
||||
pg_get_tableschemadef_string(Oid tableRelationId, bool includeSequenceDefaults)
|
||||
{
|
||||
Relation relation = NULL;
|
||||
char *relationName = NULL;
|
||||
char relationKind = 0;
|
||||
TupleDesc tupleDescriptor = NULL;
|
||||
TupleConstr *tupleConstraints = NULL;
|
||||
int attributeIndex = 0;
|
||||
bool firstAttributePrinted = false;
|
||||
AttrNumber defaultValueIndex = 0;
|
||||
AttrNumber constraintIndex = 0;
|
||||
|
@ -273,8 +261,8 @@ pg_get_tableschemadef_string(Oid tableRelationId, bool includeSequenceDefaults)
|
|||
* pg_attribute, pg_constraint, and pg_class; and therefore using the
|
||||
* descriptor saves us from a lot of additional work.
|
||||
*/
|
||||
relation = relation_open(tableRelationId, AccessShareLock);
|
||||
relationName = generate_relation_name(tableRelationId, NIL);
|
||||
Relation relation = relation_open(tableRelationId, AccessShareLock);
|
||||
char *relationName = generate_relation_name(tableRelationId, NIL);
|
||||
|
||||
EnsureRelationKindSupported(tableRelationId);
|
||||
|
||||
|
@ -301,10 +289,11 @@ pg_get_tableschemadef_string(Oid tableRelationId, bool includeSequenceDefaults)
|
|||
* and is not inherited from another table, print the column's name and its
|
||||
* formatted type.
|
||||
*/
|
||||
tupleDescriptor = RelationGetDescr(relation);
|
||||
tupleConstraints = tupleDescriptor->constr;
|
||||
TupleDesc tupleDescriptor = RelationGetDescr(relation);
|
||||
TupleConstr *tupleConstraints = tupleDescriptor->constr;
|
||||
|
||||
for (attributeIndex = 0; attributeIndex < tupleDescriptor->natts; attributeIndex++)
|
||||
for (int attributeIndex = 0; attributeIndex < tupleDescriptor->natts;
|
||||
attributeIndex++)
|
||||
{
|
||||
Form_pg_attribute attributeForm = TupleDescAttr(tupleDescriptor, attributeIndex);
|
||||
|
||||
|
@ -318,45 +307,40 @@ pg_get_tableschemadef_string(Oid tableRelationId, bool includeSequenceDefaults)
|
|||
*/
|
||||
if (!attributeForm->attisdropped)
|
||||
{
|
||||
const char *attributeName = NULL;
|
||||
const char *attributeTypeName = NULL;
|
||||
|
||||
if (firstAttributePrinted)
|
||||
{
|
||||
appendStringInfoString(&buffer, ", ");
|
||||
}
|
||||
firstAttributePrinted = true;
|
||||
|
||||
attributeName = NameStr(attributeForm->attname);
|
||||
const char *attributeName = NameStr(attributeForm->attname);
|
||||
appendStringInfo(&buffer, "%s ", quote_identifier(attributeName));
|
||||
|
||||
attributeTypeName = format_type_with_typemod(attributeForm->atttypid,
|
||||
attributeForm->atttypmod);
|
||||
const char *attributeTypeName = format_type_with_typemod(
|
||||
attributeForm->atttypid,
|
||||
attributeForm->
|
||||
atttypmod);
|
||||
appendStringInfoString(&buffer, attributeTypeName);
|
||||
|
||||
/* if this column has a default value, append the default value */
|
||||
if (attributeForm->atthasdef)
|
||||
{
|
||||
AttrDefault *defaultValueList = NULL;
|
||||
AttrDefault *defaultValue = NULL;
|
||||
|
||||
Node *defaultNode = NULL;
|
||||
List *defaultContext = NULL;
|
||||
char *defaultString = NULL;
|
||||
|
||||
Assert(tupleConstraints != NULL);
|
||||
|
||||
defaultValueList = tupleConstraints->defval;
|
||||
AttrDefault *defaultValueList = tupleConstraints->defval;
|
||||
Assert(defaultValueList != NULL);
|
||||
|
||||
defaultValue = &(defaultValueList[defaultValueIndex]);
|
||||
AttrDefault *defaultValue = &(defaultValueList[defaultValueIndex]);
|
||||
defaultValueIndex++;
|
||||
|
||||
Assert(defaultValue->adnum == (attributeIndex + 1));
|
||||
Assert(defaultValueIndex <= tupleConstraints->num_defval);
|
||||
|
||||
/* convert expression to node tree, and prepare deparse context */
|
||||
defaultNode = (Node *) stringToNode(defaultValue->adbin);
|
||||
Node *defaultNode = (Node *) stringToNode(defaultValue->adbin);
|
||||
|
||||
/*
|
||||
* if column default value is explicitly requested, or it is
|
||||
|
@ -418,9 +402,6 @@ pg_get_tableschemadef_string(Oid tableRelationId, bool includeSequenceDefaults)
|
|||
ConstrCheck *checkConstraintList = tupleConstraints->check;
|
||||
ConstrCheck *checkConstraint = &(checkConstraintList[constraintIndex]);
|
||||
|
||||
Node *checkNode = NULL;
|
||||
List *checkContext = NULL;
|
||||
char *checkString = NULL;
|
||||
|
||||
/* if an attribute or constraint has been printed, format properly */
|
||||
if (firstAttributePrinted || constraintIndex > 0)
|
||||
|
@ -432,11 +413,11 @@ pg_get_tableschemadef_string(Oid tableRelationId, bool includeSequenceDefaults)
|
|||
quote_identifier(checkConstraint->ccname));
|
||||
|
||||
/* convert expression to node tree, and prepare deparse context */
|
||||
checkNode = (Node *) stringToNode(checkConstraint->ccbin);
|
||||
checkContext = deparse_context_for(relationName, tableRelationId);
|
||||
Node *checkNode = (Node *) stringToNode(checkConstraint->ccbin);
|
||||
List *checkContext = deparse_context_for(relationName, tableRelationId);
|
||||
|
||||
/* deparse check constraint string */
|
||||
checkString = deparse_expression(checkNode, checkContext, false, false);
|
||||
char *checkString = deparse_expression(checkNode, checkContext, false, false);
|
||||
|
||||
appendStringInfoString(&buffer, checkString);
|
||||
}
|
||||
|
@ -491,10 +472,9 @@ void
|
|||
EnsureRelationKindSupported(Oid relationId)
|
||||
{
|
||||
char relationKind = get_rel_relkind(relationId);
|
||||
bool supportedRelationKind = false;
|
||||
|
||||
supportedRelationKind = RegularTable(relationId) ||
|
||||
relationKind == RELKIND_FOREIGN_TABLE;
|
||||
bool supportedRelationKind = RegularTable(relationId) ||
|
||||
relationKind == RELKIND_FOREIGN_TABLE;
|
||||
|
||||
/*
|
||||
* Citus doesn't support bare inherited tables (i.e., not a partition or
|
||||
|
@ -523,9 +503,6 @@ EnsureRelationKindSupported(Oid relationId)
|
|||
char *
|
||||
pg_get_tablecolumnoptionsdef_string(Oid tableRelationId)
|
||||
{
|
||||
Relation relation = NULL;
|
||||
TupleDesc tupleDescriptor = NULL;
|
||||
AttrNumber attributeIndex = 0;
|
||||
List *columnOptionList = NIL;
|
||||
ListCell *columnOptionCell = NULL;
|
||||
bool firstOptionPrinted = false;
|
||||
|
@ -536,7 +513,7 @@ pg_get_tablecolumnoptionsdef_string(Oid tableRelationId)
|
|||
* and use the relation's tuple descriptor to access attribute information.
|
||||
* This is primarily to maintain symmetry with pg_get_tableschemadef.
|
||||
*/
|
||||
relation = relation_open(tableRelationId, AccessShareLock);
|
||||
Relation relation = relation_open(tableRelationId, AccessShareLock);
|
||||
|
||||
EnsureRelationKindSupported(tableRelationId);
|
||||
|
||||
|
@ -545,9 +522,10 @@ pg_get_tablecolumnoptionsdef_string(Oid tableRelationId)
|
|||
* and is not inherited from another table, check if column storage or
|
||||
* statistics statements need to be printed.
|
||||
*/
|
||||
tupleDescriptor = RelationGetDescr(relation);
|
||||
TupleDesc tupleDescriptor = RelationGetDescr(relation);
|
||||
|
||||
for (attributeIndex = 0; attributeIndex < tupleDescriptor->natts; attributeIndex++)
|
||||
for (AttrNumber attributeIndex = 0; attributeIndex < tupleDescriptor->natts;
|
||||
attributeIndex++)
|
||||
{
|
||||
Form_pg_attribute attributeForm = TupleDescAttr(tupleDescriptor, attributeIndex);
|
||||
char *attributeName = NameStr(attributeForm->attname);
|
||||
|
@ -631,8 +609,6 @@ pg_get_tablecolumnoptionsdef_string(Oid tableRelationId)
|
|||
*/
|
||||
foreach(columnOptionCell, columnOptionList)
|
||||
{
|
||||
char *columnOptionStatement = NULL;
|
||||
|
||||
if (!firstOptionPrinted)
|
||||
{
|
||||
initStringInfo(&buffer);
|
||||
|
@ -645,7 +621,7 @@ pg_get_tablecolumnoptionsdef_string(Oid tableRelationId)
|
|||
}
|
||||
firstOptionPrinted = true;
|
||||
|
||||
columnOptionStatement = (char *) lfirst(columnOptionCell);
|
||||
char *columnOptionStatement = (char *) lfirst(columnOptionCell);
|
||||
appendStringInfoString(&buffer, columnOptionStatement);
|
||||
|
||||
pfree(columnOptionStatement);
|
||||
|
@ -670,14 +646,13 @@ deparse_shard_index_statement(IndexStmt *origStmt, Oid distrelid, int64 shardid,
|
|||
IndexStmt *indexStmt = copyObject(origStmt); /* copy to avoid modifications */
|
||||
char *relationName = indexStmt->relation->relname;
|
||||
char *indexName = indexStmt->idxname;
|
||||
List *deparseContext = NULL;
|
||||
|
||||
/* extend relation and index name using shard identifier */
|
||||
AppendShardIdToName(&relationName, shardid);
|
||||
AppendShardIdToName(&indexName, shardid);
|
||||
|
||||
/* use extended shard name and transformed stmt for deparsing */
|
||||
deparseContext = deparse_context_for(relationName, distrelid);
|
||||
List *deparseContext = deparse_context_for(relationName, distrelid);
|
||||
indexStmt = transformIndexStmt(distrelid, indexStmt, NULL);
|
||||
|
||||
appendStringInfo(buffer, "CREATE %s INDEX %s %s %s ON %s USING %s ",
|
||||
|
@ -850,19 +825,17 @@ deparse_index_columns(StringInfo buffer, List *indexParameterList, List *deparse
|
|||
char *
|
||||
pg_get_indexclusterdef_string(Oid indexRelationId)
|
||||
{
|
||||
HeapTuple indexTuple = NULL;
|
||||
Form_pg_index indexForm = NULL;
|
||||
Oid tableRelationId = InvalidOid;
|
||||
StringInfoData buffer = { NULL, 0, 0, 0 };
|
||||
|
||||
indexTuple = SearchSysCache(INDEXRELID, ObjectIdGetDatum(indexRelationId), 0, 0, 0);
|
||||
HeapTuple indexTuple = SearchSysCache(INDEXRELID, ObjectIdGetDatum(indexRelationId),
|
||||
0, 0, 0);
|
||||
if (!HeapTupleIsValid(indexTuple))
|
||||
{
|
||||
ereport(ERROR, (errmsg("cache lookup failed for index %u", indexRelationId)));
|
||||
}
|
||||
|
||||
indexForm = (Form_pg_index) GETSTRUCT(indexTuple);
|
||||
tableRelationId = indexForm->indrelid;
|
||||
Form_pg_index indexForm = (Form_pg_index) GETSTRUCT(indexTuple);
|
||||
Oid tableRelationId = indexForm->indrelid;
|
||||
|
||||
/* check if the table is clustered on this index */
|
||||
if (indexForm->indisclustered)
|
||||
|
@ -892,20 +865,16 @@ pg_get_table_grants(Oid relationId)
|
|||
{
|
||||
/* *INDENT-OFF* */
|
||||
StringInfoData buffer;
|
||||
Relation relation = NULL;
|
||||
char *relationName = NULL;
|
||||
List *defs = NIL;
|
||||
HeapTuple classTuple = NULL;
|
||||
Datum aclDatum = 0;
|
||||
bool isNull = false;
|
||||
|
||||
relation = relation_open(relationId, AccessShareLock);
|
||||
relationName = generate_relation_name(relationId, NIL);
|
||||
Relation relation = relation_open(relationId, AccessShareLock);
|
||||
char *relationName = generate_relation_name(relationId, NIL);
|
||||
|
||||
initStringInfo(&buffer);
|
||||
|
||||
/* lookup all table level grants */
|
||||
classTuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relationId));
|
||||
HeapTuple classTuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relationId));
|
||||
if (!HeapTupleIsValid(classTuple))
|
||||
{
|
||||
ereport(ERROR,
|
||||
|
@ -914,17 +883,13 @@ pg_get_table_grants(Oid relationId)
|
|||
relationId)));
|
||||
}
|
||||
|
||||
aclDatum = SysCacheGetAttr(RELOID, classTuple, Anum_pg_class_relacl,
|
||||
Datum aclDatum = SysCacheGetAttr(RELOID, classTuple, Anum_pg_class_relacl,
|
||||
&isNull);
|
||||
|
||||
ReleaseSysCache(classTuple);
|
||||
|
||||
if (!isNull)
|
||||
{
|
||||
int i = 0;
|
||||
AclItem *aidat = NULL;
|
||||
Acl *acl = NULL;
|
||||
int offtype = 0;
|
||||
|
||||
/*
|
||||
* First revoke all default permissions, so we can start adding the
|
||||
|
@ -943,11 +908,11 @@ pg_get_table_grants(Oid relationId)
|
|||
|
||||
/* iterate through the acl datastructure, emit GRANTs */
|
||||
|
||||
acl = DatumGetAclP(aclDatum);
|
||||
aidat = ACL_DAT(acl);
|
||||
Acl *acl = DatumGetAclP(aclDatum);
|
||||
AclItem *aidat = ACL_DAT(acl);
|
||||
|
||||
offtype = -1;
|
||||
i = 0;
|
||||
int offtype = -1;
|
||||
int i = 0;
|
||||
while (i < ACL_NUM(acl))
|
||||
{
|
||||
AclItem *aidata = NULL;
|
||||
|
@ -975,9 +940,8 @@ pg_get_table_grants(Oid relationId)
|
|||
|
||||
if (aidata->ai_grantee != 0)
|
||||
{
|
||||
HeapTuple htup;
|
||||
|
||||
htup = SearchSysCache1(AUTHOID, ObjectIdGetDatum(aidata->ai_grantee));
|
||||
HeapTuple htup = SearchSysCache1(AUTHOID, ObjectIdGetDatum(aidata->ai_grantee));
|
||||
if (HeapTupleIsValid(htup))
|
||||
{
|
||||
Form_pg_authid authForm = ((Form_pg_authid) GETSTRUCT(htup));
|
||||
|
@ -1029,28 +993,22 @@ pg_get_table_grants(Oid relationId)
|
|||
char *
|
||||
generate_qualified_relation_name(Oid relid)
|
||||
{
|
||||
HeapTuple tp;
|
||||
Form_pg_class reltup;
|
||||
char *relname;
|
||||
char *nspname;
|
||||
char *result;
|
||||
|
||||
tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
|
||||
HeapTuple tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
|
||||
if (!HeapTupleIsValid(tp))
|
||||
{
|
||||
elog(ERROR, "cache lookup failed for relation %u", relid);
|
||||
}
|
||||
reltup = (Form_pg_class) GETSTRUCT(tp);
|
||||
relname = NameStr(reltup->relname);
|
||||
Form_pg_class reltup = (Form_pg_class) GETSTRUCT(tp);
|
||||
char *relname = NameStr(reltup->relname);
|
||||
|
||||
nspname = get_namespace_name(reltup->relnamespace);
|
||||
char *nspname = get_namespace_name(reltup->relnamespace);
|
||||
if (!nspname)
|
||||
{
|
||||
elog(ERROR, "cache lookup failed for namespace %u",
|
||||
reltup->relnamespace);
|
||||
}
|
||||
|
||||
result = quote_qualified_identifier(nspname, relname);
|
||||
char *result = quote_qualified_identifier(nspname, relname);
|
||||
|
||||
ReleaseSysCache(tp);
|
||||
|
||||
|
@ -1202,16 +1160,13 @@ contain_nextval_expression_walker(Node *node, void *context)
|
|||
char *
|
||||
pg_get_replica_identity_command(Oid tableRelationId)
|
||||
{
|
||||
Relation relation = NULL;
|
||||
StringInfo buf = makeStringInfo();
|
||||
char *relationName = NULL;
|
||||
char replicaIdentity = 0;
|
||||
|
||||
relation = heap_open(tableRelationId, AccessShareLock);
|
||||
Relation relation = heap_open(tableRelationId, AccessShareLock);
|
||||
|
||||
replicaIdentity = relation->rd_rel->relreplident;
|
||||
char replicaIdentity = relation->rd_rel->relreplident;
|
||||
|
||||
relationName = generate_qualified_relation_name(tableRelationId);
|
||||
char *relationName = generate_qualified_relation_name(tableRelationId);
|
||||
|
||||
if (replicaIdentity == REPLICA_IDENTITY_INDEX)
|
||||
{
|
||||
|
@ -1251,18 +1206,16 @@ static char *
|
|||
flatten_reloptions(Oid relid)
|
||||
{
|
||||
char *result = NULL;
|
||||
HeapTuple tuple;
|
||||
Datum reloptions;
|
||||
bool isnull;
|
||||
|
||||
tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
|
||||
HeapTuple tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
|
||||
if (!HeapTupleIsValid(tuple))
|
||||
{
|
||||
elog(ERROR, "cache lookup failed for relation %u", relid);
|
||||
}
|
||||
|
||||
reloptions = SysCacheGetAttr(RELOID, tuple,
|
||||
Anum_pg_class_reloptions, &isnull);
|
||||
Datum reloptions = SysCacheGetAttr(RELOID, tuple,
|
||||
Anum_pg_class_reloptions, &isnull);
|
||||
if (!isnull)
|
||||
{
|
||||
StringInfoData buf;
|
||||
|
@ -1279,16 +1232,14 @@ flatten_reloptions(Oid relid)
|
|||
for (i = 0; i < noptions; i++)
|
||||
{
|
||||
char *option = TextDatumGetCString(options[i]);
|
||||
char *name;
|
||||
char *separator;
|
||||
char *value;
|
||||
|
||||
/*
|
||||
* Each array element should have the form name=value. If the "="
|
||||
* is missing for some reason, treat it like an empty value.
|
||||
*/
|
||||
name = option;
|
||||
separator = strchr(option, '=');
|
||||
char *name = option;
|
||||
char *separator = strchr(option, '=');
|
||||
if (separator)
|
||||
{
|
||||
*separator = '\0';
|
||||
|
@ -1343,15 +1294,13 @@ flatten_reloptions(Oid relid)
|
|||
static void
|
||||
simple_quote_literal(StringInfo buf, const char *val)
|
||||
{
|
||||
const char *valptr;
|
||||
|
||||
/*
|
||||
* We form the string literal according to the prevailing setting of
|
||||
* standard_conforming_strings; we never use E''. User is responsible for
|
||||
* making sure result is used correctly.
|
||||
*/
|
||||
appendStringInfoChar(buf, '\'');
|
||||
for (valptr = val; *valptr; valptr++)
|
||||
for (const char *valptr = val; *valptr; valptr++)
|
||||
{
|
||||
char ch = *valptr;
|
||||
|
||||
|
|
|
@ -270,11 +270,9 @@ static void
|
|||
AppendAlterExtensionSchemaStmt(StringInfo buf,
|
||||
AlterObjectSchemaStmt *alterExtensionSchemaStmt)
|
||||
{
|
||||
const char *extensionName = NULL;
|
||||
|
||||
Assert(alterExtensionSchemaStmt->objectType == OBJECT_EXTENSION);
|
||||
|
||||
extensionName = strVal(alterExtensionSchemaStmt->object);
|
||||
const char *extensionName = strVal(alterExtensionSchemaStmt->object);
|
||||
appendStringInfo(buf, "ALTER EXTENSION %s SET SCHEMA %s;", extensionName,
|
||||
quote_identifier(alterExtensionSchemaStmt->newschema));
|
||||
}
|
||||
|
|
|
@ -488,14 +488,13 @@ AppendFunctionNameList(StringInfo buf, List *objects, ObjectType objtype)
|
|||
foreach(objectCell, objects)
|
||||
{
|
||||
Node *object = lfirst(objectCell);
|
||||
ObjectWithArgs *func = NULL;
|
||||
|
||||
if (objectCell != list_head(objects))
|
||||
{
|
||||
appendStringInfo(buf, ", ");
|
||||
}
|
||||
|
||||
func = castNode(ObjectWithArgs, object);
|
||||
ObjectWithArgs *func = castNode(ObjectWithArgs, object);
|
||||
|
||||
AppendFunctionName(buf, func, objtype);
|
||||
}
|
||||
|
@ -508,14 +507,11 @@ AppendFunctionNameList(StringInfo buf, List *objects, ObjectType objtype)
|
|||
static void
|
||||
AppendFunctionName(StringInfo buf, ObjectWithArgs *func, ObjectType objtype)
|
||||
{
|
||||
Oid funcid = InvalidOid;
|
||||
HeapTuple proctup;
|
||||
char *functionName = NULL;
|
||||
char *schemaName = NULL;
|
||||
char *qualifiedFunctionName;
|
||||
|
||||
funcid = LookupFuncWithArgs(objtype, func, true);
|
||||
proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));
|
||||
Oid funcid = LookupFuncWithArgs(objtype, func, true);
|
||||
HeapTuple proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));
|
||||
|
||||
if (!HeapTupleIsValid(proctup))
|
||||
{
|
||||
|
@ -529,9 +525,7 @@ AppendFunctionName(StringInfo buf, ObjectWithArgs *func, ObjectType objtype)
|
|||
}
|
||||
else
|
||||
{
|
||||
Form_pg_proc procform;
|
||||
|
||||
procform = (Form_pg_proc) GETSTRUCT(proctup);
|
||||
Form_pg_proc procform = (Form_pg_proc) GETSTRUCT(proctup);
|
||||
functionName = NameStr(procform->proname);
|
||||
functionName = pstrdup(functionName); /* we release the tuple before used */
|
||||
schemaName = get_namespace_name(procform->pronamespace);
|
||||
|
@ -539,7 +533,7 @@ AppendFunctionName(StringInfo buf, ObjectWithArgs *func, ObjectType objtype)
|
|||
ReleaseSysCache(proctup);
|
||||
}
|
||||
|
||||
qualifiedFunctionName = quote_qualified_identifier(schemaName, functionName);
|
||||
char *qualifiedFunctionName = quote_qualified_identifier(schemaName, functionName);
|
||||
appendStringInfoString(buf, qualifiedFunctionName);
|
||||
|
||||
if (OidIsValid(funcid))
|
||||
|
@ -548,28 +542,25 @@ AppendFunctionName(StringInfo buf, ObjectWithArgs *func, ObjectType objtype)
|
|||
* If the function exists we want to use pg_get_function_identity_arguments to
|
||||
* serialize its canonical arguments
|
||||
*/
|
||||
OverrideSearchPath *overridePath = NULL;
|
||||
Datum sqlTextDatum = 0;
|
||||
const char *args = NULL;
|
||||
|
||||
/*
|
||||
* Set search_path to NIL so that all objects outside of pg_catalog will be
|
||||
* schema-prefixed. pg_catalog will be added automatically when we call
|
||||
* PushOverrideSearchPath(), since we set addCatalog to true;
|
||||
*/
|
||||
overridePath = GetOverrideSearchPath(CurrentMemoryContext);
|
||||
OverrideSearchPath *overridePath = GetOverrideSearchPath(CurrentMemoryContext);
|
||||
overridePath->schemas = NIL;
|
||||
overridePath->addCatalog = true;
|
||||
|
||||
PushOverrideSearchPath(overridePath);
|
||||
|
||||
sqlTextDatum = DirectFunctionCall1(pg_get_function_identity_arguments,
|
||||
ObjectIdGetDatum(funcid));
|
||||
Datum sqlTextDatum = DirectFunctionCall1(pg_get_function_identity_arguments,
|
||||
ObjectIdGetDatum(funcid));
|
||||
|
||||
/* revert back to original search_path */
|
||||
PopOverrideSearchPath();
|
||||
|
||||
args = TextDatumGetCString(sqlTextDatum);
|
||||
const char *args = TextDatumGetCString(sqlTextDatum);
|
||||
appendStringInfo(buf, "(%s)", args);
|
||||
}
|
||||
else if (!func->args_unspecified)
|
||||
|
@ -580,9 +571,8 @@ AppendFunctionName(StringInfo buf, ObjectWithArgs *func, ObjectType objtype)
|
|||
* postgres' TypeNameListToString. For now the best we can do until we understand
|
||||
* the underlying cause better.
|
||||
*/
|
||||
const char *args = NULL;
|
||||
|
||||
args = TypeNameListToString(func->objargs);
|
||||
const char *args = TypeNameListToString(func->objargs);
|
||||
appendStringInfo(buf, "(%s)", args);
|
||||
}
|
||||
|
||||
|
|
|
@ -137,14 +137,12 @@ AppendAlterTypeStmt(StringInfo buf, AlterTableStmt *stmt)
|
|||
appendStringInfo(buf, "ALTER TYPE %s", identifier);
|
||||
foreach(cmdCell, stmt->cmds)
|
||||
{
|
||||
AlterTableCmd *alterTableCmd = NULL;
|
||||
|
||||
if (cmdCell != list_head(stmt->cmds))
|
||||
{
|
||||
appendStringInfoString(buf, ", ");
|
||||
}
|
||||
|
||||
alterTableCmd = castNode(AlterTableCmd, lfirst(cmdCell));
|
||||
AlterTableCmd *alterTableCmd = castNode(AlterTableCmd, lfirst(cmdCell));
|
||||
AppendAlterTypeCmd(buf, alterTableCmd);
|
||||
}
|
||||
|
||||
|
@ -317,13 +315,11 @@ AppendCompositeTypeStmt(StringInfo str, CompositeTypeStmt *stmt)
|
|||
static void
|
||||
AppendCreateEnumStmt(StringInfo str, CreateEnumStmt *stmt)
|
||||
{
|
||||
RangeVar *typevar = NULL;
|
||||
const char *identifier = NULL;
|
||||
|
||||
typevar = makeRangeVarFromNameList(stmt->typeName);
|
||||
RangeVar *typevar = makeRangeVarFromNameList(stmt->typeName);
|
||||
|
||||
/* create the identifier from the fully qualified rangevar */
|
||||
identifier = quote_qualified_identifier(typevar->schemaname, typevar->relname);
|
||||
const char *identifier = quote_qualified_identifier(typevar->schemaname,
|
||||
typevar->relname);
|
||||
|
||||
appendStringInfo(str, "CREATE TYPE %s AS ENUM (", identifier);
|
||||
AppendStringList(str, stmt->vals);
|
||||
|
@ -472,11 +468,9 @@ DeparseAlterTypeSchemaStmt(AlterObjectSchemaStmt *stmt)
|
|||
static void
|
||||
AppendAlterTypeSchemaStmt(StringInfo buf, AlterObjectSchemaStmt *stmt)
|
||||
{
|
||||
List *names = NIL;
|
||||
|
||||
Assert(stmt->objectType == OBJECT_TYPE);
|
||||
|
||||
names = (List *) stmt->object;
|
||||
List *names = (List *) stmt->object;
|
||||
appendStringInfo(buf, "ALTER TYPE %s SET SCHEMA %s;", NameListToQuotedString(names),
|
||||
quote_identifier(stmt->newschema));
|
||||
}
|
||||
|
@ -499,11 +493,9 @@ DeparseAlterTypeOwnerStmt(AlterOwnerStmt *stmt)
|
|||
static void
|
||||
AppendAlterTypeOwnerStmt(StringInfo buf, AlterOwnerStmt *stmt)
|
||||
{
|
||||
List *names = NIL;
|
||||
|
||||
Assert(stmt->objectType == OBJECT_TYPE);
|
||||
|
||||
names = (List *) stmt->object;
|
||||
List *names = (List *) stmt->object;
|
||||
appendStringInfo(buf, "ALTER TYPE %s OWNER TO %s;", NameListToQuotedString(names),
|
||||
RoleSpecString(stmt->newowner, true));
|
||||
}
|
||||
|
|
|
@ -60,18 +60,14 @@ FormatCollateBEQualified(Oid collate_oid)
|
|||
char *
|
||||
FormatCollateExtended(Oid collid, bits16 flags)
|
||||
{
|
||||
HeapTuple tuple = NULL;
|
||||
Form_pg_collation collform = NULL;
|
||||
char *buf = NULL;
|
||||
char *nspname = NULL;
|
||||
char *typname = NULL;
|
||||
|
||||
if (collid == InvalidOid && (flags & FORMAT_COLLATE_ALLOW_INVALID) != 0)
|
||||
{
|
||||
return pstrdup("-");
|
||||
}
|
||||
|
||||
tuple = SearchSysCache1(COLLOID, ObjectIdGetDatum(collid));
|
||||
HeapTuple tuple = SearchSysCache1(COLLOID, ObjectIdGetDatum(collid));
|
||||
if (!HeapTupleIsValid(tuple))
|
||||
{
|
||||
if ((flags & FORMAT_COLLATE_ALLOW_INVALID) != 0)
|
||||
|
@ -83,7 +79,7 @@ FormatCollateExtended(Oid collid, bits16 flags)
|
|||
elog(ERROR, "cache lookup failed for collate %u", collid);
|
||||
}
|
||||
}
|
||||
collform = (Form_pg_collation) GETSTRUCT(tuple);
|
||||
Form_pg_collation collform = (Form_pg_collation) GETSTRUCT(tuple);
|
||||
|
||||
if ((flags & FORMAT_COLLATE_FORCE_QUALIFY) == 0 && CollationIsVisible(collid))
|
||||
{
|
||||
|
@ -94,9 +90,9 @@ FormatCollateExtended(Oid collid, bits16 flags)
|
|||
nspname = get_namespace_name_or_temp(collform->collnamespace);
|
||||
}
|
||||
|
||||
typname = NameStr(collform->collname);
|
||||
char *typname = NameStr(collform->collname);
|
||||
|
||||
buf = quote_qualified_identifier(nspname, typname);
|
||||
char *buf = quote_qualified_identifier(nspname, typname);
|
||||
|
||||
ReleaseSysCache(tuple);
|
||||
|
||||
|
|
|
@ -143,11 +143,9 @@ QualifyFunctionSchemaName(ObjectWithArgs *func, ObjectType type)
|
|||
{
|
||||
char *schemaName = NULL;
|
||||
char *functionName = NULL;
|
||||
Oid funcid = InvalidOid;
|
||||
HeapTuple proctup;
|
||||
|
||||
funcid = LookupFuncWithArgs(type, func, true);
|
||||
proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));
|
||||
Oid funcid = LookupFuncWithArgs(type, func, true);
|
||||
HeapTuple proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));
|
||||
|
||||
/*
|
||||
* We can not qualify the function if the catalogs do not have any records.
|
||||
|
@ -156,9 +154,7 @@ QualifyFunctionSchemaName(ObjectWithArgs *func, ObjectType type)
|
|||
*/
|
||||
if (HeapTupleIsValid(proctup))
|
||||
{
|
||||
Form_pg_proc procform;
|
||||
|
||||
procform = (Form_pg_proc) GETSTRUCT(proctup);
|
||||
Form_pg_proc procform = (Form_pg_proc) GETSTRUCT(proctup);
|
||||
schemaName = get_namespace_name(procform->pronamespace);
|
||||
functionName = NameStr(procform->proname);
|
||||
functionName = pstrdup(functionName); /* we release the tuple before used */
|
||||
|
|
|
@ -53,17 +53,15 @@ GetTypeNamespaceNameByNameList(List *names)
|
|||
static Oid
|
||||
TypeOidGetNamespaceOid(Oid typeOid)
|
||||
{
|
||||
Form_pg_type typeData = NULL;
|
||||
HeapTuple typeTuple = SearchSysCache1(TYPEOID, typeOid);
|
||||
Oid typnamespace = InvalidOid;
|
||||
|
||||
if (!HeapTupleIsValid(typeTuple))
|
||||
{
|
||||
elog(ERROR, "citus cache lookup failed");
|
||||
return InvalidOid;
|
||||
}
|
||||
typeData = (Form_pg_type) GETSTRUCT(typeTuple);
|
||||
typnamespace = typeData->typnamespace;
|
||||
Form_pg_type typeData = (Form_pg_type) GETSTRUCT(typeTuple);
|
||||
Oid typnamespace = typeData->typnamespace;
|
||||
|
||||
ReleaseSysCache(typeTuple);
|
||||
|
||||
|
@ -161,11 +159,9 @@ QualifyCreateEnumStmt(CreateEnumStmt *stmt)
|
|||
void
|
||||
QualifyAlterTypeSchemaStmt(AlterObjectSchemaStmt *stmt)
|
||||
{
|
||||
List *names = NIL;
|
||||
|
||||
Assert(stmt->objectType == OBJECT_TYPE);
|
||||
|
||||
names = (List *) stmt->object;
|
||||
List *names = (List *) stmt->object;
|
||||
if (list_length(names) == 1)
|
||||
{
|
||||
/* not qualified with schema, lookup type and its schema s*/
|
||||
|
@ -179,11 +175,9 @@ QualifyAlterTypeSchemaStmt(AlterObjectSchemaStmt *stmt)
|
|||
void
|
||||
QualifyAlterTypeOwnerStmt(AlterOwnerStmt *stmt)
|
||||
{
|
||||
List *names = NIL;
|
||||
|
||||
Assert(stmt->objectType == OBJECT_TYPE);
|
||||
|
||||
names = (List *) stmt->object;
|
||||
List *names = (List *) stmt->object;
|
||||
if (list_length(names) == 1)
|
||||
{
|
||||
/* not qualified with schema, lookup type and its schema s*/
|
||||
|
|
|
@ -612,7 +612,6 @@ AdaptiveExecutor(CitusScanState *scanState)
|
|||
TupleTableSlot *resultSlot = NULL;
|
||||
|
||||
DistributedPlan *distributedPlan = scanState->distributedPlan;
|
||||
DistributedExecution *execution = NULL;
|
||||
EState *executorState = ScanStateGetExecutorState(scanState);
|
||||
ParamListInfo paramListInfo = executorState->es_param_list_info;
|
||||
TupleDesc tupleDescriptor = ScanStateGetTupleDescriptor(scanState);
|
||||
|
@ -645,10 +644,13 @@ AdaptiveExecutor(CitusScanState *scanState)
|
|||
scanState->tuplestorestate =
|
||||
tuplestore_begin_heap(randomAccess, interTransactions, work_mem);
|
||||
|
||||
execution = CreateDistributedExecution(distributedPlan->modLevel, taskList,
|
||||
distributedPlan->hasReturning, paramListInfo,
|
||||
tupleDescriptor,
|
||||
scanState->tuplestorestate, targetPoolSize);
|
||||
DistributedExecution *execution = CreateDistributedExecution(
|
||||
distributedPlan->modLevel, taskList,
|
||||
distributedPlan->
|
||||
hasReturning, paramListInfo,
|
||||
tupleDescriptor,
|
||||
scanState->
|
||||
tuplestorestate, targetPoolSize);
|
||||
|
||||
/*
|
||||
* Make sure that we acquire the appropriate locks even if the local tasks
|
||||
|
@ -715,7 +717,6 @@ static void
|
|||
RunLocalExecution(CitusScanState *scanState, DistributedExecution *execution)
|
||||
{
|
||||
uint64 rowsProcessed = ExecuteLocalTaskList(scanState, execution->localTaskList);
|
||||
EState *executorState = NULL;
|
||||
|
||||
LocalExecutionHappened = true;
|
||||
|
||||
|
@ -725,7 +726,7 @@ RunLocalExecution(CitusScanState *scanState, DistributedExecution *execution)
|
|||
* and in AdaptiveExecutor. Instead, we set executorState here and skip updating it
|
||||
* for reference table modifications in AdaptiveExecutor.
|
||||
*/
|
||||
executorState = ScanStateGetExecutorState(scanState);
|
||||
EState *executorState = ScanStateGetExecutorState(scanState);
|
||||
executorState->es_processed = rowsProcessed;
|
||||
}
|
||||
|
||||
|
@ -782,7 +783,6 @@ ExecuteTaskListExtended(RowModifyLevel modLevel, List *taskList,
|
|||
TupleDesc tupleDescriptor, Tuplestorestate *tupleStore,
|
||||
bool hasReturning, int targetPoolSize)
|
||||
{
|
||||
DistributedExecution *execution = NULL;
|
||||
ParamListInfo paramListInfo = NULL;
|
||||
|
||||
/*
|
||||
|
@ -796,7 +796,7 @@ ExecuteTaskListExtended(RowModifyLevel modLevel, List *taskList,
|
|||
targetPoolSize = 1;
|
||||
}
|
||||
|
||||
execution =
|
||||
DistributedExecution *execution =
|
||||
CreateDistributedExecution(modLevel, taskList, hasReturning, paramListInfo,
|
||||
tupleDescriptor, tupleStore, targetPoolSize);
|
||||
|
||||
|
@ -993,8 +993,6 @@ DistributedPlanModifiesDatabase(DistributedPlan *plan)
|
|||
static bool
|
||||
TaskListModifiesDatabase(RowModifyLevel modLevel, List *taskList)
|
||||
{
|
||||
Task *firstTask = NULL;
|
||||
|
||||
if (modLevel > ROW_MODIFY_READONLY)
|
||||
{
|
||||
return true;
|
||||
|
@ -1010,7 +1008,7 @@ TaskListModifiesDatabase(RowModifyLevel modLevel, List *taskList)
|
|||
return false;
|
||||
}
|
||||
|
||||
firstTask = (Task *) linitial(taskList);
|
||||
Task *firstTask = (Task *) linitial(taskList);
|
||||
|
||||
return !ReadOnlyTask(firstTask->taskType);
|
||||
}
|
||||
|
@ -1027,8 +1025,6 @@ DistributedExecutionRequiresRollback(DistributedExecution *execution)
|
|||
{
|
||||
List *taskList = execution->tasksToExecute;
|
||||
int taskCount = list_length(taskList);
|
||||
Task *task = NULL;
|
||||
bool selectForUpdate = false;
|
||||
|
||||
if (MultiShardCommitProtocol == COMMIT_PROTOCOL_BARE)
|
||||
{
|
||||
|
@ -1040,9 +1036,9 @@ DistributedExecutionRequiresRollback(DistributedExecution *execution)
|
|||
return false;
|
||||
}
|
||||
|
||||
task = (Task *) linitial(taskList);
|
||||
Task *task = (Task *) linitial(taskList);
|
||||
|
||||
selectForUpdate = task->relationRowLockList != NIL;
|
||||
bool selectForUpdate = task->relationRowLockList != NIL;
|
||||
if (selectForUpdate)
|
||||
{
|
||||
/*
|
||||
|
@ -1114,16 +1110,12 @@ DistributedExecutionRequiresRollback(DistributedExecution *execution)
|
|||
static bool
|
||||
TaskListRequires2PC(List *taskList)
|
||||
{
|
||||
Task *task = NULL;
|
||||
bool multipleTasks = false;
|
||||
uint64 anchorShardId = INVALID_SHARD_ID;
|
||||
|
||||
if (taskList == NIL)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
task = (Task *) linitial(taskList);
|
||||
Task *task = (Task *) linitial(taskList);
|
||||
if (task->replicationModel == REPLICATION_MODEL_2PC)
|
||||
{
|
||||
return true;
|
||||
|
@ -1136,13 +1128,13 @@ TaskListRequires2PC(List *taskList)
|
|||
* TODO: Do we ever need replicationModel in the Task structure?
|
||||
* Can't we always rely on anchorShardId?
|
||||
*/
|
||||
anchorShardId = task->anchorShardId;
|
||||
uint64 anchorShardId = task->anchorShardId;
|
||||
if (anchorShardId != INVALID_SHARD_ID && ReferenceTableShardId(anchorShardId))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
multipleTasks = list_length(taskList) > 1;
|
||||
bool multipleTasks = list_length(taskList) > 1;
|
||||
if (!ReadOnlyTask(task->taskType) &&
|
||||
multipleTasks && MultiShardCommitProtocol == COMMIT_PROTOCOL_2PC)
|
||||
{
|
||||
|
@ -1190,7 +1182,6 @@ ReadOnlyTask(TaskType taskType)
|
|||
static bool
|
||||
SelectForUpdateOnReferenceTable(RowModifyLevel modLevel, List *taskList)
|
||||
{
|
||||
Task *task = NULL;
|
||||
ListCell *rtiLockCell = NULL;
|
||||
|
||||
if (modLevel != ROW_MODIFY_READONLY)
|
||||
|
@ -1204,7 +1195,7 @@ SelectForUpdateOnReferenceTable(RowModifyLevel modLevel, List *taskList)
|
|||
return false;
|
||||
}
|
||||
|
||||
task = (Task *) linitial(taskList);
|
||||
Task *task = (Task *) linitial(taskList);
|
||||
foreach(rtiLockCell, task->relationRowLockList)
|
||||
{
|
||||
RelationRowLock *relationRowLock = (RelationRowLock *) lfirst(rtiLockCell);
|
||||
|
@ -1441,7 +1432,6 @@ AssignTasksToConnections(DistributedExecution *execution)
|
|||
foreach(taskCell, taskList)
|
||||
{
|
||||
Task *task = (Task *) lfirst(taskCell);
|
||||
ShardCommandExecution *shardCommandExecution = NULL;
|
||||
ListCell *taskPlacementCell = NULL;
|
||||
bool placementExecutionReady = true;
|
||||
int placementExecutionIndex = 0;
|
||||
|
@ -1450,7 +1440,7 @@ AssignTasksToConnections(DistributedExecution *execution)
|
|||
/*
|
||||
* Execution of a command on a shard, which may have multiple replicas.
|
||||
*/
|
||||
shardCommandExecution =
|
||||
ShardCommandExecution *shardCommandExecution =
|
||||
(ShardCommandExecution *) palloc0(sizeof(ShardCommandExecution));
|
||||
shardCommandExecution->task = task;
|
||||
shardCommandExecution->executionOrder = ExecutionOrderForTask(modLevel, task);
|
||||
|
@ -1467,10 +1457,7 @@ AssignTasksToConnections(DistributedExecution *execution)
|
|||
foreach(taskPlacementCell, task->taskPlacementList)
|
||||
{
|
||||
ShardPlacement *taskPlacement = (ShardPlacement *) lfirst(taskPlacementCell);
|
||||
List *placementAccessList = NULL;
|
||||
MultiConnection *connection = NULL;
|
||||
int connectionFlags = 0;
|
||||
TaskPlacementExecution *placementExecution = NULL;
|
||||
char *nodeName = taskPlacement->nodeName;
|
||||
int nodePort = taskPlacement->nodePort;
|
||||
WorkerPool *workerPool = FindOrCreateWorkerPool(execution, nodeName,
|
||||
|
@ -1480,7 +1467,7 @@ AssignTasksToConnections(DistributedExecution *execution)
|
|||
* Execution of a command on a shard placement, which may not always
|
||||
* happen if the query is read-only and the shard has multiple placements.
|
||||
*/
|
||||
placementExecution =
|
||||
TaskPlacementExecution *placementExecution =
|
||||
(TaskPlacementExecution *) palloc0(sizeof(TaskPlacementExecution));
|
||||
placementExecution->shardCommandExecution = shardCommandExecution;
|
||||
placementExecution->shardPlacement = taskPlacement;
|
||||
|
@ -1501,15 +1488,16 @@ AssignTasksToConnections(DistributedExecution *execution)
|
|||
|
||||
placementExecutionIndex++;
|
||||
|
||||
placementAccessList = PlacementAccessListForTask(task, taskPlacement);
|
||||
List *placementAccessList = PlacementAccessListForTask(task, taskPlacement);
|
||||
|
||||
/*
|
||||
* Determine whether the task has to be assigned to a particular connection
|
||||
* due to a preceding access to the placement in the same transaction.
|
||||
*/
|
||||
connection = GetConnectionIfPlacementAccessedInXact(connectionFlags,
|
||||
placementAccessList,
|
||||
NULL);
|
||||
MultiConnection *connection = GetConnectionIfPlacementAccessedInXact(
|
||||
connectionFlags,
|
||||
placementAccessList,
|
||||
NULL);
|
||||
if (connection != NULL)
|
||||
{
|
||||
/*
|
||||
|
@ -1670,7 +1658,6 @@ FindOrCreateWorkerPool(DistributedExecution *execution, char *nodeName, int node
|
|||
{
|
||||
WorkerPool *workerPool = NULL;
|
||||
ListCell *workerCell = NULL;
|
||||
int nodeConnectionCount = 0;
|
||||
|
||||
foreach(workerCell, execution->workerList)
|
||||
{
|
||||
|
@ -1690,7 +1677,7 @@ FindOrCreateWorkerPool(DistributedExecution *execution, char *nodeName, int node
|
|||
workerPool->distributedExecution = execution;
|
||||
|
||||
/* "open" connections aggressively when there are cached connections */
|
||||
nodeConnectionCount = MaxCachedConnectionsPerWorker;
|
||||
int nodeConnectionCount = MaxCachedConnectionsPerWorker;
|
||||
workerPool->maxNewConnectionsPerCycle = Max(1, nodeConnectionCount);
|
||||
|
||||
dlist_init(&workerPool->pendingTaskQueue);
|
||||
|
@ -1775,8 +1762,6 @@ FindOrCreateWorkerSession(WorkerPool *workerPool, MultiConnection *connection)
|
|||
static bool
|
||||
ShouldRunTasksSequentially(List *taskList)
|
||||
{
|
||||
Task *initialTask = NULL;
|
||||
|
||||
if (list_length(taskList) < 2)
|
||||
{
|
||||
/* single task plans are already qualified as sequential by definition */
|
||||
|
@ -1784,7 +1769,7 @@ ShouldRunTasksSequentially(List *taskList)
|
|||
}
|
||||
|
||||
/* all the tasks are the same, so we only look one */
|
||||
initialTask = (Task *) linitial(taskList);
|
||||
Task *initialTask = (Task *) linitial(taskList);
|
||||
if (initialTask->rowValuesLists != NIL)
|
||||
{
|
||||
/* found a multi-row INSERT */
|
||||
|
@ -1860,7 +1845,6 @@ RunDistributedExecution(DistributedExecution *execution)
|
|||
|
||||
while (execution->unfinishedTaskCount > 0 && !cancellationReceived)
|
||||
{
|
||||
int eventCount = 0;
|
||||
int eventIndex = 0;
|
||||
ListCell *workerCell = NULL;
|
||||
long timeout = NextEventTimeout(execution);
|
||||
|
@ -1906,14 +1890,13 @@ RunDistributedExecution(DistributedExecution *execution)
|
|||
}
|
||||
|
||||
/* wait for I/O events */
|
||||
eventCount = WaitEventSetWait(execution->waitEventSet, timeout, events,
|
||||
eventSetSize, WAIT_EVENT_CLIENT_READ);
|
||||
int eventCount = WaitEventSetWait(execution->waitEventSet, timeout, events,
|
||||
eventSetSize, WAIT_EVENT_CLIENT_READ);
|
||||
|
||||
/* process I/O events */
|
||||
for (; eventIndex < eventCount; eventIndex++)
|
||||
{
|
||||
WaitEvent *event = &events[eventIndex];
|
||||
WorkerSession *session = NULL;
|
||||
|
||||
if (event->events & WL_POSTMASTER_DEATH)
|
||||
{
|
||||
|
@ -1944,7 +1927,7 @@ RunDistributedExecution(DistributedExecution *execution)
|
|||
continue;
|
||||
}
|
||||
|
||||
session = (WorkerSession *) event->user_data;
|
||||
WorkerSession *session = (WorkerSession *) event->user_data;
|
||||
session->latestUnconsumedWaitEvents = event->events;
|
||||
|
||||
ConnectionStateMachine(session);
|
||||
|
@ -2001,7 +1984,6 @@ ManageWorkerPool(WorkerPool *workerPool)
|
|||
int failedConnectionCount = workerPool->failedConnectionCount;
|
||||
int readyTaskCount = workerPool->readyTaskCount;
|
||||
int newConnectionCount = 0;
|
||||
int connectionIndex = 0;
|
||||
|
||||
/* we should always have more (or equal) active connections than idle connections */
|
||||
Assert(activeConnectionCount >= idleConnectionCount);
|
||||
|
@ -2091,19 +2073,16 @@ ManageWorkerPool(WorkerPool *workerPool)
|
|||
ereport(DEBUG4, (errmsg("opening %d new connections to %s:%d", newConnectionCount,
|
||||
workerPool->nodeName, workerPool->nodePort)));
|
||||
|
||||
for (connectionIndex = 0; connectionIndex < newConnectionCount; connectionIndex++)
|
||||
for (int connectionIndex = 0; connectionIndex < newConnectionCount; connectionIndex++)
|
||||
{
|
||||
MultiConnection *connection = NULL;
|
||||
WorkerSession *session = NULL;
|
||||
|
||||
/* experimental: just to see the perf benefits of caching connections */
|
||||
int connectionFlags = 0;
|
||||
|
||||
/* open a new connection to the worker */
|
||||
connection = StartNodeUserDatabaseConnection(connectionFlags,
|
||||
workerPool->nodeName,
|
||||
workerPool->nodePort,
|
||||
NULL, NULL);
|
||||
MultiConnection *connection = StartNodeUserDatabaseConnection(connectionFlags,
|
||||
workerPool->nodeName,
|
||||
workerPool->nodePort,
|
||||
NULL, NULL);
|
||||
|
||||
/*
|
||||
* Assign the initial state in the connection state machine. The connection
|
||||
|
@ -2119,7 +2098,7 @@ ManageWorkerPool(WorkerPool *workerPool)
|
|||
connection->claimedExclusively = true;
|
||||
|
||||
/* create a session for the connection */
|
||||
session = FindOrCreateWorkerSession(workerPool, connection);
|
||||
WorkerSession *session = FindOrCreateWorkerSession(workerPool, connection);
|
||||
|
||||
/* always poll the connection in the first round */
|
||||
UpdateConnectionWaitFlags(session, WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE);
|
||||
|
@ -2250,7 +2229,6 @@ NextEventTimeout(DistributedExecution *execution)
|
|||
foreach(workerCell, execution->workerList)
|
||||
{
|
||||
WorkerPool *workerPool = (WorkerPool *) lfirst(workerCell);
|
||||
int initiatedConnectionCount = 0;
|
||||
|
||||
if (workerPool->failed)
|
||||
{
|
||||
|
@ -2278,7 +2256,7 @@ NextEventTimeout(DistributedExecution *execution)
|
|||
}
|
||||
}
|
||||
|
||||
initiatedConnectionCount = list_length(workerPool->sessionList);
|
||||
int initiatedConnectionCount = list_length(workerPool->sessionList);
|
||||
|
||||
/*
|
||||
* If there are connections to open we wait at most up to the end of the
|
||||
|
@ -2347,8 +2325,6 @@ ConnectionStateMachine(WorkerSession *session)
|
|||
|
||||
case MULTI_CONNECTION_CONNECTING:
|
||||
{
|
||||
PostgresPollingStatusType pollMode;
|
||||
|
||||
ConnStatusType status = PQstatus(connection->pgConn);
|
||||
if (status == CONNECTION_OK)
|
||||
{
|
||||
|
@ -2372,7 +2348,7 @@ ConnectionStateMachine(WorkerSession *session)
|
|||
break;
|
||||
}
|
||||
|
||||
pollMode = PQconnectPoll(connection->pgConn);
|
||||
PostgresPollingStatusType pollMode = PQconnectPoll(connection->pgConn);
|
||||
if (pollMode == PGRES_POLLING_FAILED)
|
||||
{
|
||||
connection->connectionState = MULTI_CONNECTION_FAILED;
|
||||
|
@ -2543,15 +2519,13 @@ ConnectionStateMachine(WorkerSession *session)
|
|||
static void
|
||||
Activate2PCIfModifyingTransactionExpandsToNewNode(WorkerSession *session)
|
||||
{
|
||||
DistributedExecution *execution = NULL;
|
||||
|
||||
if (MultiShardCommitProtocol != COMMIT_PROTOCOL_2PC)
|
||||
{
|
||||
/* we don't need 2PC, so no need to continue */
|
||||
return;
|
||||
}
|
||||
|
||||
execution = session->workerPool->distributedExecution;
|
||||
DistributedExecution *execution = session->workerPool->distributedExecution;
|
||||
if (TransactionModifiedDistributedTable(execution) &&
|
||||
DistributedExecutionModifiesDatabase(execution) &&
|
||||
!ConnectionModifiedPlacement(session->connection))
|
||||
|
@ -2622,10 +2596,8 @@ TransactionStateMachine(WorkerSession *session)
|
|||
}
|
||||
else
|
||||
{
|
||||
TaskPlacementExecution *placementExecution = NULL;
|
||||
bool placementExecutionStarted = false;
|
||||
|
||||
placementExecution = PopPlacementExecution(session);
|
||||
TaskPlacementExecution *placementExecution = PopPlacementExecution(
|
||||
session);
|
||||
if (placementExecution == NULL)
|
||||
{
|
||||
/*
|
||||
|
@ -2637,7 +2609,7 @@ TransactionStateMachine(WorkerSession *session)
|
|||
break;
|
||||
}
|
||||
|
||||
placementExecutionStarted =
|
||||
bool placementExecutionStarted =
|
||||
StartPlacementExecutionOnSession(placementExecution, session);
|
||||
if (!placementExecutionStarted)
|
||||
{
|
||||
|
@ -2659,9 +2631,7 @@ TransactionStateMachine(WorkerSession *session)
|
|||
case REMOTE_TRANS_SENT_BEGIN:
|
||||
case REMOTE_TRANS_CLEARING_RESULTS:
|
||||
{
|
||||
PGresult *result = NULL;
|
||||
|
||||
result = PQgetResult(connection->pgConn);
|
||||
PGresult *result = PQgetResult(connection->pgConn);
|
||||
if (result != NULL)
|
||||
{
|
||||
if (!IsResponseOK(result))
|
||||
|
@ -2715,10 +2685,8 @@ TransactionStateMachine(WorkerSession *session)
|
|||
|
||||
case REMOTE_TRANS_STARTED:
|
||||
{
|
||||
TaskPlacementExecution *placementExecution = NULL;
|
||||
bool placementExecutionStarted = false;
|
||||
|
||||
placementExecution = PopPlacementExecution(session);
|
||||
TaskPlacementExecution *placementExecution = PopPlacementExecution(
|
||||
session);
|
||||
if (placementExecution == NULL)
|
||||
{
|
||||
/* no tasks are ready to be executed at the moment */
|
||||
|
@ -2726,7 +2694,7 @@ TransactionStateMachine(WorkerSession *session)
|
|||
break;
|
||||
}
|
||||
|
||||
placementExecutionStarted =
|
||||
bool placementExecutionStarted =
|
||||
StartPlacementExecutionOnSession(placementExecution, session);
|
||||
if (!placementExecutionStarted)
|
||||
{
|
||||
|
@ -2742,7 +2710,6 @@ TransactionStateMachine(WorkerSession *session)
|
|||
|
||||
case REMOTE_TRANS_SENT_COMMAND:
|
||||
{
|
||||
bool fetchDone = false;
|
||||
TaskPlacementExecution *placementExecution = session->currentTask;
|
||||
ShardCommandExecution *shardCommandExecution =
|
||||
placementExecution->shardCommandExecution;
|
||||
|
@ -2754,7 +2721,7 @@ TransactionStateMachine(WorkerSession *session)
|
|||
storeRows = false;
|
||||
}
|
||||
|
||||
fetchDone = ReceiveResults(session, storeRows);
|
||||
bool fetchDone = ReceiveResults(session, storeRows);
|
||||
if (!fetchDone)
|
||||
{
|
||||
break;
|
||||
|
@ -2810,7 +2777,6 @@ UpdateConnectionWaitFlags(WorkerSession *session, int waitFlags)
|
|||
static bool
|
||||
CheckConnectionReady(WorkerSession *session)
|
||||
{
|
||||
int sendStatus = 0;
|
||||
MultiConnection *connection = session->connection;
|
||||
int waitFlags = WL_SOCKET_READABLE;
|
||||
bool connectionReady = false;
|
||||
|
@ -2823,7 +2789,7 @@ CheckConnectionReady(WorkerSession *session)
|
|||
}
|
||||
|
||||
/* try to send all pending data */
|
||||
sendStatus = PQflush(connection->pgConn);
|
||||
int sendStatus = PQflush(connection->pgConn);
|
||||
if (sendStatus == -1)
|
||||
{
|
||||
connection->connectionState = MULTI_CONNECTION_LOST;
|
||||
|
@ -2865,10 +2831,9 @@ CheckConnectionReady(WorkerSession *session)
|
|||
static TaskPlacementExecution *
|
||||
PopPlacementExecution(WorkerSession *session)
|
||||
{
|
||||
TaskPlacementExecution *placementExecution = NULL;
|
||||
WorkerPool *workerPool = session->workerPool;
|
||||
|
||||
placementExecution = PopAssignedPlacementExecution(session);
|
||||
TaskPlacementExecution *placementExecution = PopAssignedPlacementExecution(session);
|
||||
if (placementExecution == NULL)
|
||||
{
|
||||
if (session->commandsSent > 0 && UseConnectionPerPlacement())
|
||||
|
@ -2894,7 +2859,6 @@ PopPlacementExecution(WorkerSession *session)
|
|||
static TaskPlacementExecution *
|
||||
PopAssignedPlacementExecution(WorkerSession *session)
|
||||
{
|
||||
TaskPlacementExecution *placementExecution = NULL;
|
||||
dlist_head *readyTaskQueue = &(session->readyTaskQueue);
|
||||
|
||||
if (dlist_is_empty(readyTaskQueue))
|
||||
|
@ -2902,9 +2866,10 @@ PopAssignedPlacementExecution(WorkerSession *session)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
placementExecution = dlist_container(TaskPlacementExecution,
|
||||
sessionReadyQueueNode,
|
||||
dlist_pop_head_node(readyTaskQueue));
|
||||
TaskPlacementExecution *placementExecution = dlist_container(TaskPlacementExecution,
|
||||
sessionReadyQueueNode,
|
||||
dlist_pop_head_node(
|
||||
readyTaskQueue));
|
||||
|
||||
return placementExecution;
|
||||
}
|
||||
|
@ -2916,7 +2881,6 @@ PopAssignedPlacementExecution(WorkerSession *session)
|
|||
static TaskPlacementExecution *
|
||||
PopUnassignedPlacementExecution(WorkerPool *workerPool)
|
||||
{
|
||||
TaskPlacementExecution *placementExecution = NULL;
|
||||
dlist_head *readyTaskQueue = &(workerPool->readyTaskQueue);
|
||||
|
||||
if (dlist_is_empty(readyTaskQueue))
|
||||
|
@ -2924,9 +2888,10 @@ PopUnassignedPlacementExecution(WorkerPool *workerPool)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
placementExecution = dlist_container(TaskPlacementExecution,
|
||||
workerReadyQueueNode,
|
||||
dlist_pop_head_node(readyTaskQueue));
|
||||
TaskPlacementExecution *placementExecution = dlist_container(TaskPlacementExecution,
|
||||
workerReadyQueueNode,
|
||||
dlist_pop_head_node(
|
||||
readyTaskQueue));
|
||||
|
||||
workerPool->readyTaskCount--;
|
||||
|
||||
|
@ -2960,7 +2925,6 @@ StartPlacementExecutionOnSession(TaskPlacementExecution *placementExecution,
|
|||
List *placementAccessList = PlacementAccessListForTask(task, taskPlacement);
|
||||
char *queryString = task->queryString;
|
||||
int querySent = 0;
|
||||
int singleRowMode = 0;
|
||||
|
||||
/*
|
||||
* Make sure that subsequent commands on the same placement
|
||||
|
@ -3007,7 +2971,7 @@ StartPlacementExecutionOnSession(TaskPlacementExecution *placementExecution,
|
|||
return false;
|
||||
}
|
||||
|
||||
singleRowMode = PQsetSingleRowMode(connection->pgConn);
|
||||
int singleRowMode = PQsetSingleRowMode(connection->pgConn);
|
||||
if (singleRowMode == 0)
|
||||
{
|
||||
connection->connectionState = MULTI_CONNECTION_LOST;
|
||||
|
@ -3036,7 +3000,6 @@ ReceiveResults(WorkerSession *session, bool storeRows)
|
|||
uint32 expectedColumnCount = 0;
|
||||
char **columnArray = execution->columnArray;
|
||||
Tuplestorestate *tupleStore = execution->tupleStore;
|
||||
MemoryContext ioContext = NULL;
|
||||
|
||||
if (tupleDescriptor != NULL)
|
||||
{
|
||||
|
@ -3048,19 +3011,16 @@ ReceiveResults(WorkerSession *session, bool storeRows)
|
|||
* into tuple. The context is reseted on every row, thus we create it at the
|
||||
* start of the loop and reset on every iteration.
|
||||
*/
|
||||
ioContext = AllocSetContextCreate(CurrentMemoryContext,
|
||||
"IoContext",
|
||||
ALLOCSET_DEFAULT_MINSIZE,
|
||||
ALLOCSET_DEFAULT_INITSIZE,
|
||||
ALLOCSET_DEFAULT_MAXSIZE);
|
||||
MemoryContext ioContext = AllocSetContextCreate(CurrentMemoryContext,
|
||||
"IoContext",
|
||||
ALLOCSET_DEFAULT_MINSIZE,
|
||||
ALLOCSET_DEFAULT_INITSIZE,
|
||||
ALLOCSET_DEFAULT_MAXSIZE);
|
||||
|
||||
while (!PQisBusy(connection->pgConn))
|
||||
{
|
||||
uint32 rowIndex = 0;
|
||||
uint32 columnIndex = 0;
|
||||
uint32 rowsProcessed = 0;
|
||||
uint32 columnCount = 0;
|
||||
ExecStatusType resultStatus = 0;
|
||||
|
||||
PGresult *result = PQgetResult(connection->pgConn);
|
||||
if (result == NULL)
|
||||
|
@ -3070,7 +3030,7 @@ ReceiveResults(WorkerSession *session, bool storeRows)
|
|||
break;
|
||||
}
|
||||
|
||||
resultStatus = PQresultStatus(result);
|
||||
ExecStatusType resultStatus = PQresultStatus(result);
|
||||
if (resultStatus == PGRES_COMMAND_OK)
|
||||
{
|
||||
char *currentAffectedTupleString = PQcmdTuples(result);
|
||||
|
@ -3121,7 +3081,7 @@ ReceiveResults(WorkerSession *session, bool storeRows)
|
|||
}
|
||||
|
||||
rowsProcessed = PQntuples(result);
|
||||
columnCount = PQnfields(result);
|
||||
uint32 columnCount = PQnfields(result);
|
||||
|
||||
if (columnCount != expectedColumnCount)
|
||||
{
|
||||
|
@ -3130,10 +3090,8 @@ ReceiveResults(WorkerSession *session, bool storeRows)
|
|||
columnCount, expectedColumnCount)));
|
||||
}
|
||||
|
||||
for (rowIndex = 0; rowIndex < rowsProcessed; rowIndex++)
|
||||
for (uint32 rowIndex = 0; rowIndex < rowsProcessed; rowIndex++)
|
||||
{
|
||||
HeapTuple heapTuple = NULL;
|
||||
MemoryContext oldContextPerRow = NULL;
|
||||
memset(columnArray, 0, columnCount * sizeof(char *));
|
||||
|
||||
for (columnIndex = 0; columnIndex < columnCount; columnIndex++)
|
||||
|
@ -3159,9 +3117,10 @@ ReceiveResults(WorkerSession *session, bool storeRows)
|
|||
* protects us from any memory leaks that might be present in I/O functions
|
||||
* called by BuildTupleFromCStrings.
|
||||
*/
|
||||
oldContextPerRow = MemoryContextSwitchTo(ioContext);
|
||||
MemoryContext oldContextPerRow = MemoryContextSwitchTo(ioContext);
|
||||
|
||||
heapTuple = BuildTupleFromCStrings(attributeInputMetadata, columnArray);
|
||||
HeapTuple heapTuple = BuildTupleFromCStrings(attributeInputMetadata,
|
||||
columnArray);
|
||||
|
||||
MemoryContextSwitchTo(oldContextPerRow);
|
||||
|
||||
|
@ -3309,7 +3268,6 @@ PlacementExecutionDone(TaskPlacementExecution *placementExecution, bool succeede
|
|||
ShardCommandExecution *shardCommandExecution =
|
||||
placementExecution->shardCommandExecution;
|
||||
TaskExecutionState executionState = shardCommandExecution->executionState;
|
||||
TaskExecutionState newExecutionState = TASK_EXECUTION_NOT_FINISHED;
|
||||
bool failedPlacementExecutionIsOnPendingQueue = false;
|
||||
|
||||
/* mark the placement execution as finished */
|
||||
|
@ -3360,7 +3318,8 @@ PlacementExecutionDone(TaskPlacementExecution *placementExecution, bool succeede
|
|||
* Update unfinishedTaskCount only when state changes from not finished to
|
||||
* finished or failed state.
|
||||
*/
|
||||
newExecutionState = TaskExecutionStateMachine(shardCommandExecution);
|
||||
TaskExecutionState newExecutionState = TaskExecutionStateMachine(
|
||||
shardCommandExecution);
|
||||
if (newExecutionState == TASK_EXECUTION_FINISHED)
|
||||
{
|
||||
execution->unfinishedTaskCount--;
|
||||
|
@ -3597,21 +3556,18 @@ TaskExecutionStateMachine(ShardCommandExecution *shardCommandExecution)
|
|||
static WaitEventSet *
|
||||
BuildWaitEventSet(List *sessionList)
|
||||
{
|
||||
WaitEventSet *waitEventSet = NULL;
|
||||
ListCell *sessionCell = NULL;
|
||||
|
||||
/* additional 2 is for postmaster and latch */
|
||||
int eventSetSize = list_length(sessionList) + 2;
|
||||
|
||||
waitEventSet =
|
||||
WaitEventSet *waitEventSet =
|
||||
CreateWaitEventSet(CurrentMemoryContext, eventSetSize);
|
||||
|
||||
foreach(sessionCell, sessionList)
|
||||
{
|
||||
WorkerSession *session = lfirst(sessionCell);
|
||||
MultiConnection *connection = session->connection;
|
||||
int sock = 0;
|
||||
int waitEventSetIndex = 0;
|
||||
|
||||
if (connection->pgConn == NULL)
|
||||
{
|
||||
|
@ -3625,15 +3581,16 @@ BuildWaitEventSet(List *sessionList)
|
|||
continue;
|
||||
}
|
||||
|
||||
sock = PQsocket(connection->pgConn);
|
||||
int sock = PQsocket(connection->pgConn);
|
||||
if (sock == -1)
|
||||
{
|
||||
/* connection was closed */
|
||||
continue;
|
||||
}
|
||||
|
||||
waitEventSetIndex = AddWaitEventToSet(waitEventSet, connection->waitFlags, sock,
|
||||
NULL, (void *) session);
|
||||
int waitEventSetIndex = AddWaitEventToSet(waitEventSet, connection->waitFlags,
|
||||
sock,
|
||||
NULL, (void *) session);
|
||||
session->waitEventSetIndex = waitEventSetIndex;
|
||||
}
|
||||
|
||||
|
@ -3657,7 +3614,6 @@ UpdateWaitEventSetFlags(WaitEventSet *waitEventSet, List *sessionList)
|
|||
{
|
||||
WorkerSession *session = lfirst(sessionCell);
|
||||
MultiConnection *connection = session->connection;
|
||||
int sock = 0;
|
||||
int waitEventSetIndex = session->waitEventSetIndex;
|
||||
|
||||
if (connection->pgConn == NULL)
|
||||
|
@ -3672,7 +3628,7 @@ UpdateWaitEventSetFlags(WaitEventSet *waitEventSet, List *sessionList)
|
|||
continue;
|
||||
}
|
||||
|
||||
sock = PQsocket(connection->pgConn);
|
||||
int sock = PQsocket(connection->pgConn);
|
||||
if (sock == -1)
|
||||
{
|
||||
/* connection was closed */
|
||||
|
@ -3724,14 +3680,13 @@ ExtractParametersFromParamList(ParamListInfo paramListInfo,
|
|||
const char ***parameterValues, bool
|
||||
useOriginalCustomTypeOids)
|
||||
{
|
||||
int parameterIndex = 0;
|
||||
int parameterCount = paramListInfo->numParams;
|
||||
|
||||
*parameterTypes = (Oid *) palloc0(parameterCount * sizeof(Oid));
|
||||
*parameterValues = (const char **) palloc0(parameterCount * sizeof(char *));
|
||||
|
||||
/* get parameter types and values */
|
||||
for (parameterIndex = 0; parameterIndex < parameterCount; parameterIndex++)
|
||||
for (int parameterIndex = 0; parameterIndex < parameterCount; parameterIndex++)
|
||||
{
|
||||
ParamExternData *parameterData = ¶mListInfo->params[parameterIndex];
|
||||
Oid typeOutputFunctionId = InvalidOid;
|
||||
|
|
|
@ -119,12 +119,11 @@ RegisterCitusCustomScanMethods(void)
|
|||
static void
|
||||
CitusBeginScan(CustomScanState *node, EState *estate, int eflags)
|
||||
{
|
||||
CitusScanState *scanState = NULL;
|
||||
DistributedPlan *distributedPlan = NULL;
|
||||
|
||||
MarkCitusInitiatedCoordinatorBackend();
|
||||
|
||||
scanState = (CitusScanState *) node;
|
||||
CitusScanState *scanState = (CitusScanState *) node;
|
||||
|
||||
#if PG_VERSION_NUM >= 120000
|
||||
ExecInitResultSlot(&scanState->customScanState.ss.ps, &TTSOpsMinimalTuple);
|
||||
|
@ -152,7 +151,6 @@ TupleTableSlot *
|
|||
CitusExecScan(CustomScanState *node)
|
||||
{
|
||||
CitusScanState *scanState = (CitusScanState *) node;
|
||||
TupleTableSlot *resultSlot = NULL;
|
||||
|
||||
if (!scanState->finishedRemoteScan)
|
||||
{
|
||||
|
@ -161,7 +159,7 @@ CitusExecScan(CustomScanState *node)
|
|||
scanState->finishedRemoteScan = true;
|
||||
}
|
||||
|
||||
resultSlot = ReturnTupleFromTuplestore(scanState);
|
||||
TupleTableSlot *resultSlot = ReturnTupleFromTuplestore(scanState);
|
||||
|
||||
return resultSlot;
|
||||
}
|
||||
|
@ -179,21 +177,18 @@ static void
|
|||
CitusModifyBeginScan(CustomScanState *node, EState *estate, int eflags)
|
||||
{
|
||||
CitusScanState *scanState = (CitusScanState *) node;
|
||||
DistributedPlan *distributedPlan = NULL;
|
||||
Job *workerJob = NULL;
|
||||
Query *jobQuery = NULL;
|
||||
List *taskList = NIL;
|
||||
|
||||
/*
|
||||
* We must not change the distributed plan since it may be reused across multiple
|
||||
* executions of a prepared statement. Instead we create a deep copy that we only
|
||||
* use for the current execution.
|
||||
*/
|
||||
distributedPlan = scanState->distributedPlan = copyObject(scanState->distributedPlan);
|
||||
DistributedPlan *distributedPlan = scanState->distributedPlan = copyObject(
|
||||
scanState->distributedPlan);
|
||||
|
||||
workerJob = distributedPlan->workerJob;
|
||||
jobQuery = workerJob->jobQuery;
|
||||
taskList = workerJob->taskList;
|
||||
Job *workerJob = distributedPlan->workerJob;
|
||||
Query *jobQuery = workerJob->jobQuery;
|
||||
List *taskList = workerJob->taskList;
|
||||
|
||||
if (workerJob->requiresMasterEvaluation)
|
||||
{
|
||||
|
@ -407,8 +402,6 @@ ScanStateGetExecutorState(CitusScanState *scanState)
|
|||
CustomScan *
|
||||
FetchCitusCustomScanIfExists(Plan *plan)
|
||||
{
|
||||
CustomScan *customScan = NULL;
|
||||
|
||||
if (plan == NULL)
|
||||
{
|
||||
return NULL;
|
||||
|
@ -419,7 +412,7 @@ FetchCitusCustomScanIfExists(Plan *plan)
|
|||
return (CustomScan *) plan;
|
||||
}
|
||||
|
||||
customScan = FetchCitusCustomScanIfExists(plan->lefttree);
|
||||
CustomScan *customScan = FetchCitusCustomScanIfExists(plan->lefttree);
|
||||
|
||||
if (customScan == NULL)
|
||||
{
|
||||
|
@ -457,9 +450,6 @@ IsCitusPlan(Plan *plan)
|
|||
bool
|
||||
IsCitusCustomScan(Plan *plan)
|
||||
{
|
||||
CustomScan *customScan = NULL;
|
||||
Node *privateNode = NULL;
|
||||
|
||||
if (plan == NULL)
|
||||
{
|
||||
return false;
|
||||
|
@ -470,13 +460,13 @@ IsCitusCustomScan(Plan *plan)
|
|||
return false;
|
||||
}
|
||||
|
||||
customScan = (CustomScan *) plan;
|
||||
CustomScan *customScan = (CustomScan *) plan;
|
||||
if (list_length(customScan->custom_private) == 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
privateNode = (Node *) linitial(customScan->custom_private);
|
||||
Node *privateNode = (Node *) linitial(customScan->custom_private);
|
||||
if (!CitusIsA(privateNode, DistributedPlan))
|
||||
{
|
||||
return false;
|
||||
|
|
|
@ -93,7 +93,6 @@ static TupleTableSlot *
|
|||
CoordinatorInsertSelectExecScanInternal(CustomScanState *node)
|
||||
{
|
||||
CitusScanState *scanState = (CitusScanState *) node;
|
||||
TupleTableSlot *resultSlot = NULL;
|
||||
|
||||
if (!scanState->finishedRemoteScan)
|
||||
{
|
||||
|
@ -197,7 +196,7 @@ CoordinatorInsertSelectExecScanInternal(CustomScanState *node)
|
|||
scanState->finishedRemoteScan = true;
|
||||
}
|
||||
|
||||
resultSlot = ReturnTupleFromTuplestore(scanState);
|
||||
TupleTableSlot *resultSlot = ReturnTupleFromTuplestore(scanState);
|
||||
|
||||
return resultSlot;
|
||||
}
|
||||
|
@ -217,36 +216,34 @@ ExecuteSelectIntoColocatedIntermediateResults(Oid targetRelationId,
|
|||
char *intermediateResultIdPrefix)
|
||||
{
|
||||
ParamListInfo paramListInfo = executorState->es_param_list_info;
|
||||
int partitionColumnIndex = -1;
|
||||
List *columnNameList = NIL;
|
||||
bool stopOnFailure = false;
|
||||
char partitionMethod = 0;
|
||||
CitusCopyDestReceiver *copyDest = NULL;
|
||||
Query *queryCopy = NULL;
|
||||
|
||||
partitionMethod = PartitionMethod(targetRelationId);
|
||||
char partitionMethod = PartitionMethod(targetRelationId);
|
||||
if (partitionMethod == DISTRIBUTE_BY_NONE)
|
||||
{
|
||||
stopOnFailure = true;
|
||||
}
|
||||
|
||||
/* Get column name list and partition column index for the target table */
|
||||
columnNameList = BuildColumnNameListFromTargetList(targetRelationId,
|
||||
insertTargetList);
|
||||
partitionColumnIndex = PartitionColumnIndexFromColumnList(targetRelationId,
|
||||
columnNameList);
|
||||
List *columnNameList = BuildColumnNameListFromTargetList(targetRelationId,
|
||||
insertTargetList);
|
||||
int partitionColumnIndex = PartitionColumnIndexFromColumnList(targetRelationId,
|
||||
columnNameList);
|
||||
|
||||
/* set up a DestReceiver that copies into the intermediate table */
|
||||
copyDest = CreateCitusCopyDestReceiver(targetRelationId, columnNameList,
|
||||
partitionColumnIndex, executorState,
|
||||
stopOnFailure, intermediateResultIdPrefix);
|
||||
CitusCopyDestReceiver *copyDest = CreateCitusCopyDestReceiver(targetRelationId,
|
||||
columnNameList,
|
||||
partitionColumnIndex,
|
||||
executorState,
|
||||
stopOnFailure,
|
||||
intermediateResultIdPrefix);
|
||||
|
||||
/*
|
||||
* Make a copy of the query, since ExecuteQueryIntoDestReceiver may scribble on it
|
||||
* and we want it to be replanned every time if it is stored in a prepared
|
||||
* statement.
|
||||
*/
|
||||
queryCopy = copyObject(selectQuery);
|
||||
Query *queryCopy = copyObject(selectQuery);
|
||||
|
||||
ExecuteQueryIntoDestReceiver(queryCopy, paramListInfo, (DestReceiver *) copyDest);
|
||||
|
||||
|
@ -268,36 +265,33 @@ ExecuteSelectIntoRelation(Oid targetRelationId, List *insertTargetList,
|
|||
Query *selectQuery, EState *executorState)
|
||||
{
|
||||
ParamListInfo paramListInfo = executorState->es_param_list_info;
|
||||
int partitionColumnIndex = -1;
|
||||
List *columnNameList = NIL;
|
||||
bool stopOnFailure = false;
|
||||
char partitionMethod = 0;
|
||||
CitusCopyDestReceiver *copyDest = NULL;
|
||||
Query *queryCopy = NULL;
|
||||
|
||||
partitionMethod = PartitionMethod(targetRelationId);
|
||||
char partitionMethod = PartitionMethod(targetRelationId);
|
||||
if (partitionMethod == DISTRIBUTE_BY_NONE)
|
||||
{
|
||||
stopOnFailure = true;
|
||||
}
|
||||
|
||||
/* Get column name list and partition column index for the target table */
|
||||
columnNameList = BuildColumnNameListFromTargetList(targetRelationId,
|
||||
insertTargetList);
|
||||
partitionColumnIndex = PartitionColumnIndexFromColumnList(targetRelationId,
|
||||
columnNameList);
|
||||
List *columnNameList = BuildColumnNameListFromTargetList(targetRelationId,
|
||||
insertTargetList);
|
||||
int partitionColumnIndex = PartitionColumnIndexFromColumnList(targetRelationId,
|
||||
columnNameList);
|
||||
|
||||
/* set up a DestReceiver that copies into the distributed table */
|
||||
copyDest = CreateCitusCopyDestReceiver(targetRelationId, columnNameList,
|
||||
partitionColumnIndex, executorState,
|
||||
stopOnFailure, NULL);
|
||||
CitusCopyDestReceiver *copyDest = CreateCitusCopyDestReceiver(targetRelationId,
|
||||
columnNameList,
|
||||
partitionColumnIndex,
|
||||
executorState,
|
||||
stopOnFailure, NULL);
|
||||
|
||||
/*
|
||||
* Make a copy of the query, since ExecuteQueryIntoDestReceiver may scribble on it
|
||||
* and we want it to be replanned every time if it is stored in a prepared
|
||||
* statement.
|
||||
*/
|
||||
queryCopy = copyObject(selectQuery);
|
||||
Query *queryCopy = copyObject(selectQuery);
|
||||
|
||||
ExecuteQueryIntoDestReceiver(queryCopy, paramListInfo, (DestReceiver *) copyDest);
|
||||
|
||||
|
|
|
@ -111,10 +111,7 @@ broadcast_intermediate_result(PG_FUNCTION_ARGS)
|
|||
char *resultIdString = text_to_cstring(resultIdText);
|
||||
text *queryText = PG_GETARG_TEXT_P(1);
|
||||
char *queryString = text_to_cstring(queryText);
|
||||
EState *estate = NULL;
|
||||
List *nodeList = NIL;
|
||||
bool writeLocalFile = false;
|
||||
RemoteFileDestReceiver *resultDest = NULL;
|
||||
ParamListInfo paramListInfo = NULL;
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
|
@ -127,11 +124,13 @@ broadcast_intermediate_result(PG_FUNCTION_ARGS)
|
|||
*/
|
||||
BeginOrContinueCoordinatedTransaction();
|
||||
|
||||
nodeList = ActivePrimaryWorkerNodeList(NoLock);
|
||||
estate = CreateExecutorState();
|
||||
resultDest = (RemoteFileDestReceiver *) CreateRemoteFileDestReceiver(resultIdString,
|
||||
estate, nodeList,
|
||||
writeLocalFile);
|
||||
List *nodeList = ActivePrimaryWorkerNodeList(NoLock);
|
||||
EState *estate = CreateExecutorState();
|
||||
RemoteFileDestReceiver *resultDest =
|
||||
(RemoteFileDestReceiver *) CreateRemoteFileDestReceiver(resultIdString,
|
||||
estate,
|
||||
nodeList,
|
||||
writeLocalFile);
|
||||
|
||||
ExecuteQueryStringIntoDestReceiver(queryString, paramListInfo,
|
||||
(DestReceiver *) resultDest);
|
||||
|
@ -153,10 +152,8 @@ create_intermediate_result(PG_FUNCTION_ARGS)
|
|||
char *resultIdString = text_to_cstring(resultIdText);
|
||||
text *queryText = PG_GETARG_TEXT_P(1);
|
||||
char *queryString = text_to_cstring(queryText);
|
||||
EState *estate = NULL;
|
||||
List *nodeList = NIL;
|
||||
bool writeLocalFile = true;
|
||||
RemoteFileDestReceiver *resultDest = NULL;
|
||||
ParamListInfo paramListInfo = NULL;
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
|
@ -169,10 +166,12 @@ create_intermediate_result(PG_FUNCTION_ARGS)
|
|||
*/
|
||||
BeginOrContinueCoordinatedTransaction();
|
||||
|
||||
estate = CreateExecutorState();
|
||||
resultDest = (RemoteFileDestReceiver *) CreateRemoteFileDestReceiver(resultIdString,
|
||||
estate, nodeList,
|
||||
writeLocalFile);
|
||||
EState *estate = CreateExecutorState();
|
||||
RemoteFileDestReceiver *resultDest =
|
||||
(RemoteFileDestReceiver *) CreateRemoteFileDestReceiver(resultIdString,
|
||||
estate,
|
||||
nodeList,
|
||||
writeLocalFile);
|
||||
|
||||
ExecuteQueryStringIntoDestReceiver(queryString, paramListInfo,
|
||||
(DestReceiver *) resultDest);
|
||||
|
@ -193,9 +192,8 @@ DestReceiver *
|
|||
CreateRemoteFileDestReceiver(char *resultId, EState *executorState,
|
||||
List *initialNodeList, bool writeLocalFile)
|
||||
{
|
||||
RemoteFileDestReceiver *resultDest = NULL;
|
||||
|
||||
resultDest = (RemoteFileDestReceiver *) palloc0(sizeof(RemoteFileDestReceiver));
|
||||
RemoteFileDestReceiver *resultDest = (RemoteFileDestReceiver *) palloc0(
|
||||
sizeof(RemoteFileDestReceiver));
|
||||
|
||||
/* set up the DestReceiver function pointers */
|
||||
resultDest->pub.receiveSlot = RemoteFileDestReceiverReceive;
|
||||
|
@ -228,7 +226,6 @@ RemoteFileDestReceiverStartup(DestReceiver *dest, int operation,
|
|||
|
||||
const char *resultId = resultDest->resultId;
|
||||
|
||||
CopyOutState copyOutState = NULL;
|
||||
const char *delimiterCharacter = "\t";
|
||||
const char *nullPrintCharacter = "\\N";
|
||||
|
||||
|
@ -240,7 +237,7 @@ RemoteFileDestReceiverStartup(DestReceiver *dest, int operation,
|
|||
resultDest->tupleDescriptor = inputTupleDescriptor;
|
||||
|
||||
/* define how tuples will be serialised */
|
||||
copyOutState = (CopyOutState) palloc0(sizeof(CopyOutStateData));
|
||||
CopyOutState copyOutState = (CopyOutState) palloc0(sizeof(CopyOutStateData));
|
||||
copyOutState->delim = (char *) delimiterCharacter;
|
||||
copyOutState->null_print = (char *) nullPrintCharacter;
|
||||
copyOutState->null_print_client = (char *) nullPrintCharacter;
|
||||
|
@ -256,12 +253,11 @@ RemoteFileDestReceiverStartup(DestReceiver *dest, int operation,
|
|||
{
|
||||
const int fileFlags = (O_APPEND | O_CREAT | O_RDWR | O_TRUNC | PG_BINARY);
|
||||
const int fileMode = (S_IRUSR | S_IWUSR);
|
||||
const char *fileName = NULL;
|
||||
|
||||
/* make sure the directory exists */
|
||||
CreateIntermediateResultsDirectory();
|
||||
|
||||
fileName = QueryResultFileName(resultId);
|
||||
const char *fileName = QueryResultFileName(resultId);
|
||||
|
||||
resultDest->fileCompat = FileCompatFromFileStart(FileOpenForTransmit(fileName,
|
||||
fileFlags,
|
||||
|
@ -273,7 +269,6 @@ RemoteFileDestReceiverStartup(DestReceiver *dest, int operation,
|
|||
WorkerNode *workerNode = (WorkerNode *) lfirst(initialNodeCell);
|
||||
char *nodeName = workerNode->workerName;
|
||||
int nodePort = workerNode->workerPort;
|
||||
MultiConnection *connection = NULL;
|
||||
|
||||
/*
|
||||
* We prefer to use a connection that is not associcated with
|
||||
|
@ -281,7 +276,7 @@ RemoteFileDestReceiverStartup(DestReceiver *dest, int operation,
|
|||
* exclusively and that would prevent the consecutive DML/DDL
|
||||
* use the same connection.
|
||||
*/
|
||||
connection = StartNonDataAccessConnection(nodeName, nodePort);
|
||||
MultiConnection *connection = StartNonDataAccessConnection(nodeName, nodePort);
|
||||
ClaimConnectionExclusively(connection);
|
||||
MarkRemoteTransactionCritical(connection);
|
||||
|
||||
|
@ -296,12 +291,10 @@ RemoteFileDestReceiverStartup(DestReceiver *dest, int operation,
|
|||
foreach(connectionCell, connectionList)
|
||||
{
|
||||
MultiConnection *connection = (MultiConnection *) lfirst(connectionCell);
|
||||
StringInfo copyCommand = NULL;
|
||||
bool querySent = false;
|
||||
|
||||
copyCommand = ConstructCopyResultStatement(resultId);
|
||||
StringInfo copyCommand = ConstructCopyResultStatement(resultId);
|
||||
|
||||
querySent = SendRemoteCommand(connection, copyCommand->data);
|
||||
bool querySent = SendRemoteCommand(connection, copyCommand->data);
|
||||
if (!querySent)
|
||||
{
|
||||
ReportConnectionError(connection, ERROR);
|
||||
|
@ -371,8 +364,6 @@ RemoteFileDestReceiverReceive(TupleTableSlot *slot, DestReceiver *dest)
|
|||
CopyOutState copyOutState = resultDest->copyOutState;
|
||||
FmgrInfo *columnOutputFunctions = resultDest->columnOutputFunctions;
|
||||
|
||||
Datum *columnValues = NULL;
|
||||
bool *columnNulls = NULL;
|
||||
StringInfo copyData = copyOutState->fe_msgbuf;
|
||||
|
||||
EState *executorState = resultDest->executorState;
|
||||
|
@ -381,8 +372,8 @@ RemoteFileDestReceiverReceive(TupleTableSlot *slot, DestReceiver *dest)
|
|||
|
||||
slot_getallattrs(slot);
|
||||
|
||||
columnValues = slot->tts_values;
|
||||
columnNulls = slot->tts_isnull;
|
||||
Datum *columnValues = slot->tts_values;
|
||||
bool *columnNulls = slot->tts_isnull;
|
||||
|
||||
resetStringInfo(copyData);
|
||||
|
||||
|
@ -526,11 +517,9 @@ RemoteFileDestReceiverDestroy(DestReceiver *destReceiver)
|
|||
void
|
||||
ReceiveQueryResultViaCopy(const char *resultId)
|
||||
{
|
||||
const char *resultFileName = NULL;
|
||||
|
||||
CreateIntermediateResultsDirectory();
|
||||
|
||||
resultFileName = QueryResultFileName(resultId);
|
||||
const char *resultFileName = QueryResultFileName(resultId);
|
||||
|
||||
RedirectCopyDataToRegularFile(resultFileName);
|
||||
}
|
||||
|
@ -671,12 +660,10 @@ RemoveIntermediateResultsDirectory(void)
|
|||
int64
|
||||
IntermediateResultSize(char *resultId)
|
||||
{
|
||||
char *resultFileName = NULL;
|
||||
struct stat fileStat;
|
||||
int statOK = 0;
|
||||
|
||||
resultFileName = QueryResultFileName(resultId);
|
||||
statOK = stat(resultFileName, &fileStat);
|
||||
char *resultFileName = QueryResultFileName(resultId);
|
||||
int statOK = stat(resultFileName, &fileStat);
|
||||
if (statOK < 0)
|
||||
{
|
||||
return -1;
|
||||
|
@ -710,24 +697,21 @@ read_intermediate_result(PG_FUNCTION_ARGS)
|
|||
Datum copyFormatLabelDatum = DirectFunctionCall1(enum_out, copyFormatOidDatum);
|
||||
char *copyFormatLabel = DatumGetCString(copyFormatLabelDatum);
|
||||
|
||||
char *resultFileName = NULL;
|
||||
struct stat fileStat;
|
||||
int statOK = 0;
|
||||
|
||||
Tuplestorestate *tupstore = NULL;
|
||||
TupleDesc tupleDescriptor = NULL;
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
resultFileName = QueryResultFileName(resultIdString);
|
||||
statOK = stat(resultFileName, &fileStat);
|
||||
char *resultFileName = QueryResultFileName(resultIdString);
|
||||
int statOK = stat(resultFileName, &fileStat);
|
||||
if (statOK != 0)
|
||||
{
|
||||
ereport(ERROR, (errcode_for_file_access(),
|
||||
errmsg("result \"%s\" does not exist", resultIdString)));
|
||||
}
|
||||
|
||||
tupstore = SetupTuplestore(fcinfo, &tupleDescriptor);
|
||||
Tuplestorestate *tupstore = SetupTuplestore(fcinfo, &tupleDescriptor);
|
||||
|
||||
ReadFileIntoTupleStore(resultFileName, copyFormatLabel, tupleDescriptor, tupstore);
|
||||
|
||||
|
|
|
@ -142,8 +142,6 @@ ExecuteLocalTaskList(CitusScanState *scanState, List *taskList)
|
|||
{
|
||||
Task *task = (Task *) lfirst(taskCell);
|
||||
|
||||
PlannedStmt *localPlan = NULL;
|
||||
int cursorOptions = 0;
|
||||
const char *shardQueryString = task->queryString;
|
||||
Query *shardQuery = ParseQueryString(shardQueryString, parameterTypes, numParams);
|
||||
|
||||
|
@ -153,7 +151,7 @@ ExecuteLocalTaskList(CitusScanState *scanState, List *taskList)
|
|||
* go through the distributed executor, which we do not want since the
|
||||
* query is already known to be local.
|
||||
*/
|
||||
cursorOptions = 0;
|
||||
int cursorOptions = 0;
|
||||
|
||||
/*
|
||||
* Altough the shardQuery is local to this node, we prefer planner()
|
||||
|
@ -163,7 +161,7 @@ ExecuteLocalTaskList(CitusScanState *scanState, List *taskList)
|
|||
* implemented. So, let planner to call distributed_planner() which
|
||||
* eventually calls standard_planner().
|
||||
*/
|
||||
localPlan = planner(shardQuery, cursorOptions, paramListInfo);
|
||||
PlannedStmt *localPlan = planner(shardQuery, cursorOptions, paramListInfo);
|
||||
|
||||
LogLocalCommand(shardQueryString);
|
||||
|
||||
|
@ -241,7 +239,6 @@ ExtractLocalAndRemoteTasks(bool readOnly, List *taskList, List **localTaskList,
|
|||
}
|
||||
else
|
||||
{
|
||||
Task *localTask = NULL;
|
||||
Task *remoteTask = NULL;
|
||||
|
||||
/*
|
||||
|
@ -252,7 +249,7 @@ ExtractLocalAndRemoteTasks(bool readOnly, List *taskList, List **localTaskList,
|
|||
*/
|
||||
task->partiallyLocalOrRemote = true;
|
||||
|
||||
localTask = copyObject(task);
|
||||
Task *localTask = copyObject(task);
|
||||
|
||||
localTask->taskPlacementList = localTaskPlacementList;
|
||||
*localTaskList = lappend(*localTaskList, localTask);
|
||||
|
@ -318,7 +315,6 @@ ExecuteLocalTaskPlan(CitusScanState *scanState, PlannedStmt *taskPlan, char *que
|
|||
DestReceiver *tupleStoreDestReceiever = CreateDestReceiver(DestTuplestore);
|
||||
ScanDirection scanDirection = ForwardScanDirection;
|
||||
QueryEnvironment *queryEnv = create_queryEnv();
|
||||
QueryDesc *queryDesc = NULL;
|
||||
int eflags = 0;
|
||||
uint64 totalRowsProcessed = 0;
|
||||
|
||||
|
@ -331,10 +327,10 @@ ExecuteLocalTaskPlan(CitusScanState *scanState, PlannedStmt *taskPlan, char *que
|
|||
CurrentMemoryContext, false);
|
||||
|
||||
/* Create a QueryDesc for the query */
|
||||
queryDesc = CreateQueryDesc(taskPlan, queryString,
|
||||
GetActiveSnapshot(), InvalidSnapshot,
|
||||
tupleStoreDestReceiever, paramListInfo,
|
||||
queryEnv, 0);
|
||||
QueryDesc *queryDesc = CreateQueryDesc(taskPlan, queryString,
|
||||
GetActiveSnapshot(), InvalidSnapshot,
|
||||
tupleStoreDestReceiever, paramListInfo,
|
||||
queryEnv, 0);
|
||||
|
||||
ExecutorStart(queryDesc, eflags);
|
||||
ExecutorRun(queryDesc, scanDirection, 0L, true);
|
||||
|
@ -365,8 +361,6 @@ ExecuteLocalTaskPlan(CitusScanState *scanState, PlannedStmt *taskPlan, char *que
|
|||
bool
|
||||
ShouldExecuteTasksLocally(List *taskList)
|
||||
{
|
||||
bool singleTask = false;
|
||||
|
||||
if (!EnableLocalExecution)
|
||||
{
|
||||
return false;
|
||||
|
@ -394,7 +388,7 @@ ShouldExecuteTasksLocally(List *taskList)
|
|||
return true;
|
||||
}
|
||||
|
||||
singleTask = (list_length(taskList) == 1);
|
||||
bool singleTask = (list_length(taskList) == 1);
|
||||
if (singleTask && TaskAccessesLocalNode((Task *) linitial(taskList)))
|
||||
{
|
||||
/*
|
||||
|
|
|
@ -55,10 +55,9 @@ static int32
|
|||
AllocateConnectionId(void)
|
||||
{
|
||||
int32 connectionId = INVALID_CONNECTION_ID;
|
||||
int32 connIndex = 0;
|
||||
|
||||
/* allocate connectionId from connection pool */
|
||||
for (connIndex = 0; connIndex < MAX_CONNECTION_COUNT; connIndex++)
|
||||
for (int32 connIndex = 0; connIndex < MAX_CONNECTION_COUNT; connIndex++)
|
||||
{
|
||||
MultiConnection *connection = ClientConnectionArray[connIndex];
|
||||
if (connection == NULL)
|
||||
|
@ -84,8 +83,6 @@ int32
|
|||
MultiClientConnect(const char *nodeName, uint32 nodePort, const char *nodeDatabase,
|
||||
const char *userName)
|
||||
{
|
||||
MultiConnection *connection = NULL;
|
||||
ConnStatusType connStatusType = CONNECTION_OK;
|
||||
int32 connectionId = AllocateConnectionId();
|
||||
int connectionFlags = FORCE_NEW_CONNECTION; /* no cached connections for now */
|
||||
|
||||
|
@ -103,10 +100,11 @@ MultiClientConnect(const char *nodeName, uint32 nodePort, const char *nodeDataba
|
|||
}
|
||||
|
||||
/* establish synchronous connection to worker node */
|
||||
connection = GetNodeUserDatabaseConnection(connectionFlags, nodeName, nodePort,
|
||||
userName, nodeDatabase);
|
||||
MultiConnection *connection = GetNodeUserDatabaseConnection(connectionFlags, nodeName,
|
||||
nodePort,
|
||||
userName, nodeDatabase);
|
||||
|
||||
connStatusType = PQstatus(connection->pgConn);
|
||||
ConnStatusType connStatusType = PQstatus(connection->pgConn);
|
||||
|
||||
if (connStatusType == CONNECTION_OK)
|
||||
{
|
||||
|
@ -132,8 +130,6 @@ int32
|
|||
MultiClientConnectStart(const char *nodeName, uint32 nodePort, const char *nodeDatabase,
|
||||
const char *userName)
|
||||
{
|
||||
MultiConnection *connection = NULL;
|
||||
ConnStatusType connStatusType = CONNECTION_OK;
|
||||
int32 connectionId = AllocateConnectionId();
|
||||
int connectionFlags = FORCE_NEW_CONNECTION; /* no cached connections for now */
|
||||
|
||||
|
@ -151,9 +147,10 @@ MultiClientConnectStart(const char *nodeName, uint32 nodePort, const char *nodeD
|
|||
}
|
||||
|
||||
/* prepare asynchronous request for worker node connection */
|
||||
connection = StartNodeUserDatabaseConnection(connectionFlags, nodeName, nodePort,
|
||||
userName, nodeDatabase);
|
||||
connStatusType = PQstatus(connection->pgConn);
|
||||
MultiConnection *connection = StartNodeUserDatabaseConnection(connectionFlags,
|
||||
nodeName, nodePort,
|
||||
userName, nodeDatabase);
|
||||
ConnStatusType connStatusType = PQstatus(connection->pgConn);
|
||||
|
||||
/*
|
||||
* If prepared, we save the connection, and set its initial polling status
|
||||
|
@ -181,15 +178,13 @@ MultiClientConnectStart(const char *nodeName, uint32 nodePort, const char *nodeD
|
|||
ConnectStatus
|
||||
MultiClientConnectPoll(int32 connectionId)
|
||||
{
|
||||
MultiConnection *connection = NULL;
|
||||
PostgresPollingStatusType pollingStatus = PGRES_POLLING_OK;
|
||||
ConnectStatus connectStatus = CLIENT_INVALID_CONNECT;
|
||||
|
||||
Assert(connectionId != INVALID_CONNECTION_ID);
|
||||
connection = ClientConnectionArray[connectionId];
|
||||
MultiConnection *connection = ClientConnectionArray[connectionId];
|
||||
Assert(connection != NULL);
|
||||
|
||||
pollingStatus = ClientPollingStatusArray[connectionId];
|
||||
PostgresPollingStatusType pollingStatus = ClientPollingStatusArray[connectionId];
|
||||
if (pollingStatus == PGRES_POLLING_OK)
|
||||
{
|
||||
connectStatus = CLIENT_CONNECTION_READY;
|
||||
|
@ -235,11 +230,10 @@ MultiClientConnectPoll(int32 connectionId)
|
|||
void
|
||||
MultiClientDisconnect(int32 connectionId)
|
||||
{
|
||||
MultiConnection *connection = NULL;
|
||||
const int InvalidPollingStatus = -1;
|
||||
|
||||
Assert(connectionId != INVALID_CONNECTION_ID);
|
||||
connection = ClientConnectionArray[connectionId];
|
||||
MultiConnection *connection = ClientConnectionArray[connectionId];
|
||||
Assert(connection != NULL);
|
||||
|
||||
CloseConnection(connection);
|
||||
|
@ -256,15 +250,13 @@ MultiClientDisconnect(int32 connectionId)
|
|||
bool
|
||||
MultiClientConnectionUp(int32 connectionId)
|
||||
{
|
||||
MultiConnection *connection = NULL;
|
||||
ConnStatusType connStatusType = CONNECTION_OK;
|
||||
bool connectionUp = true;
|
||||
|
||||
Assert(connectionId != INVALID_CONNECTION_ID);
|
||||
connection = ClientConnectionArray[connectionId];
|
||||
MultiConnection *connection = ClientConnectionArray[connectionId];
|
||||
Assert(connection != NULL);
|
||||
|
||||
connStatusType = PQstatus(connection->pgConn);
|
||||
ConnStatusType connStatusType = PQstatus(connection->pgConn);
|
||||
if (connStatusType == CONNECTION_BAD)
|
||||
{
|
||||
connectionUp = false;
|
||||
|
@ -278,15 +270,13 @@ MultiClientConnectionUp(int32 connectionId)
|
|||
bool
|
||||
MultiClientSendQuery(int32 connectionId, const char *query)
|
||||
{
|
||||
MultiConnection *connection = NULL;
|
||||
bool success = true;
|
||||
int querySent = 0;
|
||||
|
||||
Assert(connectionId != INVALID_CONNECTION_ID);
|
||||
connection = ClientConnectionArray[connectionId];
|
||||
MultiConnection *connection = ClientConnectionArray[connectionId];
|
||||
Assert(connection != NULL);
|
||||
|
||||
querySent = SendRemoteCommand(connection, query);
|
||||
int querySent = SendRemoteCommand(connection, query);
|
||||
if (querySent == 0)
|
||||
{
|
||||
char *errorMessage = pchomp(PQerrorMessage(connection->pgConn));
|
||||
|
@ -313,14 +303,11 @@ MultiClientSendQuery(int32 connectionId, const char *query)
|
|||
bool
|
||||
MultiClientCancel(int32 connectionId)
|
||||
{
|
||||
MultiConnection *connection = NULL;
|
||||
bool canceled = true;
|
||||
|
||||
Assert(connectionId != INVALID_CONNECTION_ID);
|
||||
connection = ClientConnectionArray[connectionId];
|
||||
MultiConnection *connection = ClientConnectionArray[connectionId];
|
||||
Assert(connection != NULL);
|
||||
|
||||
canceled = SendCancelationRequest(connection);
|
||||
bool canceled = SendCancelationRequest(connection);
|
||||
|
||||
return canceled;
|
||||
}
|
||||
|
@ -330,16 +317,13 @@ MultiClientCancel(int32 connectionId)
|
|||
ResultStatus
|
||||
MultiClientResultStatus(int32 connectionId)
|
||||
{
|
||||
MultiConnection *connection = NULL;
|
||||
int consumed = 0;
|
||||
ConnStatusType connStatusType = CONNECTION_OK;
|
||||
ResultStatus resultStatus = CLIENT_INVALID_RESULT_STATUS;
|
||||
|
||||
Assert(connectionId != INVALID_CONNECTION_ID);
|
||||
connection = ClientConnectionArray[connectionId];
|
||||
MultiConnection *connection = ClientConnectionArray[connectionId];
|
||||
Assert(connection != NULL);
|
||||
|
||||
connStatusType = PQstatus(connection->pgConn);
|
||||
ConnStatusType connStatusType = PQstatus(connection->pgConn);
|
||||
if (connStatusType == CONNECTION_BAD)
|
||||
{
|
||||
ereport(WARNING, (errmsg("could not maintain connection to worker node")));
|
||||
|
@ -347,7 +331,7 @@ MultiClientResultStatus(int32 connectionId)
|
|||
}
|
||||
|
||||
/* consume input to allow status change */
|
||||
consumed = PQconsumeInput(connection->pgConn);
|
||||
int consumed = PQconsumeInput(connection->pgConn);
|
||||
if (consumed != 0)
|
||||
{
|
||||
int connectionBusy = PQisBusy(connection->pgConn);
|
||||
|
@ -383,15 +367,11 @@ BatchQueryStatus
|
|||
MultiClientBatchResult(int32 connectionId, void **queryResult, int *rowCount,
|
||||
int *columnCount)
|
||||
{
|
||||
MultiConnection *connection = NULL;
|
||||
PGresult *result = NULL;
|
||||
ConnStatusType connStatusType = CONNECTION_OK;
|
||||
ExecStatusType resultStatus = PGRES_COMMAND_OK;
|
||||
BatchQueryStatus queryStatus = CLIENT_INVALID_BATCH_QUERY;
|
||||
bool raiseInterrupts = true;
|
||||
|
||||
Assert(connectionId != INVALID_CONNECTION_ID);
|
||||
connection = ClientConnectionArray[connectionId];
|
||||
MultiConnection *connection = ClientConnectionArray[connectionId];
|
||||
Assert(connection != NULL);
|
||||
|
||||
/* set default result */
|
||||
|
@ -399,20 +379,20 @@ MultiClientBatchResult(int32 connectionId, void **queryResult, int *rowCount,
|
|||
(*rowCount) = -1;
|
||||
(*columnCount) = -1;
|
||||
|
||||
connStatusType = PQstatus(connection->pgConn);
|
||||
ConnStatusType connStatusType = PQstatus(connection->pgConn);
|
||||
if (connStatusType == CONNECTION_BAD)
|
||||
{
|
||||
ereport(WARNING, (errmsg("could not maintain connection to worker node")));
|
||||
return CLIENT_BATCH_QUERY_FAILED;
|
||||
}
|
||||
|
||||
result = GetRemoteCommandResult(connection, raiseInterrupts);
|
||||
PGresult *result = GetRemoteCommandResult(connection, raiseInterrupts);
|
||||
if (result == NULL)
|
||||
{
|
||||
return CLIENT_BATCH_QUERY_DONE;
|
||||
}
|
||||
|
||||
resultStatus = PQresultStatus(result);
|
||||
ExecStatusType resultStatus = PQresultStatus(result);
|
||||
if (resultStatus == PGRES_TUPLES_OK)
|
||||
{
|
||||
(*queryResult) = (void **) result;
|
||||
|
@ -457,20 +437,16 @@ MultiClientClearResult(void *queryResult)
|
|||
QueryStatus
|
||||
MultiClientQueryStatus(int32 connectionId)
|
||||
{
|
||||
MultiConnection *connection = NULL;
|
||||
PGresult *result = NULL;
|
||||
int tupleCount PG_USED_FOR_ASSERTS_ONLY = 0;
|
||||
bool copyResults = false;
|
||||
ConnStatusType connStatusType = CONNECTION_OK;
|
||||
ExecStatusType resultStatus = PGRES_COMMAND_OK;
|
||||
QueryStatus queryStatus = CLIENT_INVALID_QUERY;
|
||||
bool raiseInterrupts = true;
|
||||
|
||||
Assert(connectionId != INVALID_CONNECTION_ID);
|
||||
connection = ClientConnectionArray[connectionId];
|
||||
MultiConnection *connection = ClientConnectionArray[connectionId];
|
||||
Assert(connection != NULL);
|
||||
|
||||
connStatusType = PQstatus(connection->pgConn);
|
||||
ConnStatusType connStatusType = PQstatus(connection->pgConn);
|
||||
if (connStatusType == CONNECTION_BAD)
|
||||
{
|
||||
ereport(WARNING, (errmsg("could not maintain connection to worker node")));
|
||||
|
@ -482,8 +458,8 @@ MultiClientQueryStatus(int32 connectionId)
|
|||
* isn't ready yet (the caller didn't wait for the connection to be ready),
|
||||
* we will block on this call.
|
||||
*/
|
||||
result = GetRemoteCommandResult(connection, raiseInterrupts);
|
||||
resultStatus = PQresultStatus(result);
|
||||
PGresult *result = GetRemoteCommandResult(connection, raiseInterrupts);
|
||||
ExecStatusType resultStatus = PQresultStatus(result);
|
||||
|
||||
if (resultStatus == PGRES_COMMAND_OK)
|
||||
{
|
||||
|
@ -536,22 +512,19 @@ MultiClientQueryStatus(int32 connectionId)
|
|||
CopyStatus
|
||||
MultiClientCopyData(int32 connectionId, int32 fileDescriptor, uint64 *returnBytesReceived)
|
||||
{
|
||||
MultiConnection *connection = NULL;
|
||||
char *receiveBuffer = NULL;
|
||||
int consumed = 0;
|
||||
int receiveLength = 0;
|
||||
const int asynchronous = 1;
|
||||
CopyStatus copyStatus = CLIENT_INVALID_COPY;
|
||||
|
||||
Assert(connectionId != INVALID_CONNECTION_ID);
|
||||
connection = ClientConnectionArray[connectionId];
|
||||
MultiConnection *connection = ClientConnectionArray[connectionId];
|
||||
Assert(connection != NULL);
|
||||
|
||||
/*
|
||||
* Consume input to handle the case where previous copy operation might have
|
||||
* received zero bytes.
|
||||
*/
|
||||
consumed = PQconsumeInput(connection->pgConn);
|
||||
int consumed = PQconsumeInput(connection->pgConn);
|
||||
if (consumed == 0)
|
||||
{
|
||||
ereport(WARNING, (errmsg("could not read data from worker node")));
|
||||
|
@ -559,11 +532,10 @@ MultiClientCopyData(int32 connectionId, int32 fileDescriptor, uint64 *returnByte
|
|||
}
|
||||
|
||||
/* receive copy data message in an asynchronous manner */
|
||||
receiveLength = PQgetCopyData(connection->pgConn, &receiveBuffer, asynchronous);
|
||||
int receiveLength = PQgetCopyData(connection->pgConn, &receiveBuffer, asynchronous);
|
||||
while (receiveLength > 0)
|
||||
{
|
||||
/* received copy data; append these data to file */
|
||||
int appended = -1;
|
||||
errno = 0;
|
||||
|
||||
if (returnBytesReceived)
|
||||
|
@ -571,7 +543,7 @@ MultiClientCopyData(int32 connectionId, int32 fileDescriptor, uint64 *returnByte
|
|||
*returnBytesReceived += receiveLength;
|
||||
}
|
||||
|
||||
appended = write(fileDescriptor, receiveBuffer, receiveLength);
|
||||
int appended = write(fileDescriptor, receiveBuffer, receiveLength);
|
||||
if (appended != receiveLength)
|
||||
{
|
||||
/* if write didn't set errno, assume problem is no disk space */
|
||||
|
|
|
@ -196,9 +196,6 @@ TupleTableSlot *
|
|||
ReturnTupleFromTuplestore(CitusScanState *scanState)
|
||||
{
|
||||
Tuplestorestate *tupleStore = scanState->tuplestorestate;
|
||||
TupleTableSlot *resultSlot = NULL;
|
||||
EState *executorState = NULL;
|
||||
ScanDirection scanDirection = NoMovementScanDirection;
|
||||
bool forwardScanDirection = true;
|
||||
|
||||
if (tupleStore == NULL)
|
||||
|
@ -206,8 +203,8 @@ ReturnTupleFromTuplestore(CitusScanState *scanState)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
executorState = ScanStateGetExecutorState(scanState);
|
||||
scanDirection = executorState->es_direction;
|
||||
EState *executorState = ScanStateGetExecutorState(scanState);
|
||||
ScanDirection scanDirection = executorState->es_direction;
|
||||
Assert(ScanDirectionIsValid(scanDirection));
|
||||
|
||||
if (ScanDirectionIsBackward(scanDirection))
|
||||
|
@ -215,7 +212,7 @@ ReturnTupleFromTuplestore(CitusScanState *scanState)
|
|||
forwardScanDirection = false;
|
||||
}
|
||||
|
||||
resultSlot = scanState->customScanState.ss.ps.ps_ResultTupleSlot;
|
||||
TupleTableSlot *resultSlot = scanState->customScanState.ss.ps.ps_ResultTupleSlot;
|
||||
tuplestore_gettupleslot(tupleStore, forwardScanDirection, false, resultSlot);
|
||||
|
||||
return resultSlot;
|
||||
|
@ -234,13 +231,12 @@ void
|
|||
LoadTuplesIntoTupleStore(CitusScanState *citusScanState, Job *workerJob)
|
||||
{
|
||||
List *workerTaskList = workerJob->taskList;
|
||||
TupleDesc tupleDescriptor = NULL;
|
||||
ListCell *workerTaskCell = NULL;
|
||||
bool randomAccess = true;
|
||||
bool interTransactions = false;
|
||||
char *copyFormat = "text";
|
||||
|
||||
tupleDescriptor = ScanStateGetTupleDescriptor(citusScanState);
|
||||
TupleDesc tupleDescriptor = ScanStateGetTupleDescriptor(citusScanState);
|
||||
|
||||
Assert(citusScanState->tuplestorestate == NULL);
|
||||
citusScanState->tuplestorestate =
|
||||
|
@ -254,11 +250,9 @@ LoadTuplesIntoTupleStore(CitusScanState *citusScanState, Job *workerJob)
|
|||
foreach(workerTaskCell, workerTaskList)
|
||||
{
|
||||
Task *workerTask = (Task *) lfirst(workerTaskCell);
|
||||
StringInfo jobDirectoryName = NULL;
|
||||
StringInfo taskFilename = NULL;
|
||||
|
||||
jobDirectoryName = MasterJobDirectoryName(workerTask->jobId);
|
||||
taskFilename = TaskFilename(jobDirectoryName, workerTask->taskId);
|
||||
StringInfo jobDirectoryName = MasterJobDirectoryName(workerTask->jobId);
|
||||
StringInfo taskFilename = TaskFilename(jobDirectoryName, workerTask->taskId);
|
||||
|
||||
ReadFileIntoTupleStore(taskFilename->data, copyFormat, tupleDescriptor,
|
||||
citusScanState->tuplestorestate);
|
||||
|
@ -277,8 +271,6 @@ void
|
|||
ReadFileIntoTupleStore(char *fileName, char *copyFormat, TupleDesc tupleDescriptor,
|
||||
Tuplestorestate *tupstore)
|
||||
{
|
||||
CopyState copyState = NULL;
|
||||
|
||||
/*
|
||||
* Trick BeginCopyFrom into using our tuple descriptor by pretending it belongs
|
||||
* to a relation.
|
||||
|
@ -293,26 +285,23 @@ ReadFileIntoTupleStore(char *fileName, char *copyFormat, TupleDesc tupleDescript
|
|||
Datum *columnValues = palloc0(columnCount * sizeof(Datum));
|
||||
bool *columnNulls = palloc0(columnCount * sizeof(bool));
|
||||
|
||||
DefElem *copyOption = NULL;
|
||||
List *copyOptions = NIL;
|
||||
|
||||
int location = -1; /* "unknown" token location */
|
||||
copyOption = makeDefElem("format", (Node *) makeString(copyFormat), location);
|
||||
DefElem *copyOption = makeDefElem("format", (Node *) makeString(copyFormat),
|
||||
location);
|
||||
copyOptions = lappend(copyOptions, copyOption);
|
||||
|
||||
copyState = BeginCopyFrom(NULL, stubRelation, fileName, false, NULL,
|
||||
NULL, copyOptions);
|
||||
CopyState copyState = BeginCopyFrom(NULL, stubRelation, fileName, false, NULL,
|
||||
NULL, copyOptions);
|
||||
|
||||
while (true)
|
||||
{
|
||||
MemoryContext oldContext = NULL;
|
||||
bool nextRowFound = false;
|
||||
|
||||
ResetPerTupleExprContext(executorState);
|
||||
oldContext = MemoryContextSwitchTo(executorTupleContext);
|
||||
MemoryContext oldContext = MemoryContextSwitchTo(executorTupleContext);
|
||||
|
||||
nextRowFound = NextCopyFromCompat(copyState, executorExpressionContext,
|
||||
columnValues, columnNulls);
|
||||
bool nextRowFound = NextCopyFromCompat(copyState, executorExpressionContext,
|
||||
columnValues, columnNulls);
|
||||
if (!nextRowFound)
|
||||
{
|
||||
MemoryContextSwitchTo(oldContext);
|
||||
|
@ -355,7 +344,6 @@ SortTupleStore(CitusScanState *scanState)
|
|||
ListCell *targetCell = NULL;
|
||||
int sortKeyIndex = 0;
|
||||
|
||||
Tuplesortstate *tuplesortstate = NULL;
|
||||
|
||||
/*
|
||||
* Iterate on the returning target list and generate the necessary information
|
||||
|
@ -380,7 +368,7 @@ SortTupleStore(CitusScanState *scanState)
|
|||
sortKeyIndex++;
|
||||
}
|
||||
|
||||
tuplesortstate =
|
||||
Tuplesortstate *tuplesortstate =
|
||||
tuplesort_begin_heap(tupleDescriptor, numberOfSortKeys, sortColIdx, sortOperators,
|
||||
collations, nullsFirst, work_mem, NULL, false);
|
||||
|
||||
|
@ -467,7 +455,6 @@ ExecuteQueryStringIntoDestReceiver(const char *queryString, ParamListInfo params
|
|||
Query *
|
||||
ParseQueryString(const char *queryString, Oid *paramOids, int numParams)
|
||||
{
|
||||
Query *query = NULL;
|
||||
RawStmt *rawStmt = (RawStmt *) ParseTreeRawStmt(queryString);
|
||||
List *queryTreeList =
|
||||
pg_analyze_and_rewrite(rawStmt, queryString, paramOids, numParams, NULL);
|
||||
|
@ -477,7 +464,7 @@ ParseQueryString(const char *queryString, Oid *paramOids, int numParams)
|
|||
ereport(ERROR, (errmsg("can only execute a single query")));
|
||||
}
|
||||
|
||||
query = (Query *) linitial(queryTreeList);
|
||||
Query *query = (Query *) linitial(queryTreeList);
|
||||
|
||||
return query;
|
||||
}
|
||||
|
@ -490,13 +477,10 @@ ParseQueryString(const char *queryString, Oid *paramOids, int numParams)
|
|||
void
|
||||
ExecuteQueryIntoDestReceiver(Query *query, ParamListInfo params, DestReceiver *dest)
|
||||
{
|
||||
PlannedStmt *queryPlan = NULL;
|
||||
int cursorOptions = 0;
|
||||
|
||||
cursorOptions = CURSOR_OPT_PARALLEL_OK;
|
||||
int cursorOptions = CURSOR_OPT_PARALLEL_OK;
|
||||
|
||||
/* plan the subquery, this may be another distributed query */
|
||||
queryPlan = pg_plan_query(query, cursorOptions, params);
|
||||
PlannedStmt *queryPlan = pg_plan_query(query, cursorOptions, params);
|
||||
|
||||
ExecutePlanIntoDestReceiver(queryPlan, params, dest);
|
||||
}
|
||||
|
@ -510,12 +494,11 @@ void
|
|||
ExecutePlanIntoDestReceiver(PlannedStmt *queryPlan, ParamListInfo params,
|
||||
DestReceiver *dest)
|
||||
{
|
||||
Portal portal = NULL;
|
||||
int eflags = 0;
|
||||
long count = FETCH_ALL;
|
||||
|
||||
/* create a new portal for executing the query */
|
||||
portal = CreateNewPortal();
|
||||
Portal portal = CreateNewPortal();
|
||||
|
||||
/* don't display the portal in pg_cursors, it is for internal use only */
|
||||
portal->visible = false;
|
||||
|
|
|
@ -170,7 +170,6 @@ InitTaskExecution(Task *task, TaskExecStatus initialTaskExecStatus)
|
|||
{
|
||||
/* each task placement (assignment) corresponds to one worker node */
|
||||
uint32 nodeCount = list_length(task->taskPlacementList);
|
||||
uint32 nodeIndex = 0;
|
||||
|
||||
TaskExecution *taskExecution = CitusMakeNode(TaskExecution);
|
||||
|
||||
|
@ -185,7 +184,7 @@ InitTaskExecution(Task *task, TaskExecStatus initialTaskExecStatus)
|
|||
taskExecution->connectionIdArray = palloc0(nodeCount * sizeof(int32));
|
||||
taskExecution->fileDescriptorArray = palloc0(nodeCount * sizeof(int32));
|
||||
|
||||
for (nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
|
||||
for (uint32 nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
|
||||
{
|
||||
taskExecution->taskStatusArray[nodeIndex] = initialTaskExecStatus;
|
||||
taskExecution->transmitStatusArray[nodeIndex] = EXEC_TRANSMIT_UNASSIGNED;
|
||||
|
@ -205,8 +204,7 @@ InitTaskExecution(Task *task, TaskExecStatus initialTaskExecStatus)
|
|||
void
|
||||
CleanupTaskExecution(TaskExecution *taskExecution)
|
||||
{
|
||||
uint32 nodeIndex = 0;
|
||||
for (nodeIndex = 0; nodeIndex < taskExecution->nodeCount; nodeIndex++)
|
||||
for (uint32 nodeIndex = 0; nodeIndex < taskExecution->nodeCount; nodeIndex++)
|
||||
{
|
||||
int32 connectionId = taskExecution->connectionIdArray[nodeIndex];
|
||||
int32 fileDescriptor = taskExecution->fileDescriptorArray[nodeIndex];
|
||||
|
@ -284,14 +282,12 @@ AdjustStateForFailure(TaskExecution *taskExecution)
|
|||
bool
|
||||
CheckIfSizeLimitIsExceeded(DistributedExecutionStats *executionStats)
|
||||
{
|
||||
uint64 maxIntermediateResultInBytes = 0;
|
||||
|
||||
if (!SubPlanLevel || MaxIntermediateResult < 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
maxIntermediateResultInBytes = MaxIntermediateResult * 1024L;
|
||||
uint64 maxIntermediateResultInBytes = MaxIntermediateResult * 1024L;
|
||||
if (executionStats->totalIntermediateResultSize < maxIntermediateResultInBytes)
|
||||
{
|
||||
return false;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -128,16 +128,16 @@ BuildPlacementAccessList(int32 groupId, List *relationShardList,
|
|||
foreach(relationShardCell, relationShardList)
|
||||
{
|
||||
RelationShard *relationShard = (RelationShard *) lfirst(relationShardCell);
|
||||
ShardPlacement *placement = NULL;
|
||||
ShardPlacementAccess *placementAccess = NULL;
|
||||
|
||||
placement = FindShardPlacementOnGroup(groupId, relationShard->shardId);
|
||||
ShardPlacement *placement = FindShardPlacementOnGroup(groupId,
|
||||
relationShard->shardId);
|
||||
if (placement == NULL)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
placementAccess = CreatePlacementAccess(placement, accessType);
|
||||
ShardPlacementAccess *placementAccess = CreatePlacementAccess(placement,
|
||||
accessType);
|
||||
placementAccessList = lappend(placementAccessList, placementAccess);
|
||||
}
|
||||
|
||||
|
@ -152,9 +152,8 @@ BuildPlacementAccessList(int32 groupId, List *relationShardList,
|
|||
ShardPlacementAccess *
|
||||
CreatePlacementAccess(ShardPlacement *placement, ShardPlacementAccessType accessType)
|
||||
{
|
||||
ShardPlacementAccess *placementAccess = NULL;
|
||||
|
||||
placementAccess = (ShardPlacementAccess *) palloc0(sizeof(ShardPlacementAccess));
|
||||
ShardPlacementAccess *placementAccess = (ShardPlacementAccess *) palloc0(
|
||||
sizeof(ShardPlacementAccess));
|
||||
placementAccess->placement = placement;
|
||||
placementAccess->accessType = accessType;
|
||||
|
||||
|
|
|
@ -36,7 +36,6 @@ ExecuteSubPlans(DistributedPlan *distributedPlan)
|
|||
uint64 planId = distributedPlan->planId;
|
||||
List *subPlanList = distributedPlan->subPlanList;
|
||||
ListCell *subPlanCell = NULL;
|
||||
HTAB *intermediateResultsHash = NULL;
|
||||
|
||||
if (subPlanList == NIL)
|
||||
{
|
||||
|
@ -44,7 +43,7 @@ ExecuteSubPlans(DistributedPlan *distributedPlan)
|
|||
return;
|
||||
}
|
||||
|
||||
intermediateResultsHash = MakeIntermediateResultHTAB();
|
||||
HTAB *intermediateResultsHash = MakeIntermediateResultHTAB();
|
||||
RecordSubplanExecutionsOnNodes(intermediateResultsHash, distributedPlan);
|
||||
|
||||
|
||||
|
@ -61,9 +60,7 @@ ExecuteSubPlans(DistributedPlan *distributedPlan)
|
|||
DistributedSubPlan *subPlan = (DistributedSubPlan *) lfirst(subPlanCell);
|
||||
PlannedStmt *plannedStmt = subPlan->plan;
|
||||
uint32 subPlanId = subPlan->subPlanId;
|
||||
DestReceiver *copyDest = NULL;
|
||||
ParamListInfo params = NULL;
|
||||
EState *estate = NULL;
|
||||
bool writeLocalFile = false;
|
||||
char *resultId = GenerateResultId(planId, subPlanId);
|
||||
List *workerNodeList =
|
||||
|
@ -94,9 +91,10 @@ ExecuteSubPlans(DistributedPlan *distributedPlan)
|
|||
}
|
||||
|
||||
SubPlanLevel++;
|
||||
estate = CreateExecutorState();
|
||||
copyDest = CreateRemoteFileDestReceiver(resultId, estate, workerNodeList,
|
||||
writeLocalFile);
|
||||
EState *estate = CreateExecutorState();
|
||||
DestReceiver *copyDest = CreateRemoteFileDestReceiver(resultId, estate,
|
||||
workerNodeList,
|
||||
writeLocalFile);
|
||||
|
||||
ExecutePlanIntoDestReceiver(plannedStmt, params, copyDest);
|
||||
|
||||
|
|
|
@ -49,9 +49,6 @@ Datum
|
|||
citus_create_restore_point(PG_FUNCTION_ARGS)
|
||||
{
|
||||
text *restoreNameText = PG_GETARG_TEXT_P(0);
|
||||
char *restoreNameString = NULL;
|
||||
XLogRecPtr localRestorePoint = InvalidXLogRecPtr;
|
||||
List *connectionList = NIL;
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
EnsureSuperUser();
|
||||
|
@ -74,7 +71,7 @@ citus_create_restore_point(PG_FUNCTION_ARGS)
|
|||
"start.")));
|
||||
}
|
||||
|
||||
restoreNameString = text_to_cstring(restoreNameText);
|
||||
char *restoreNameString = text_to_cstring(restoreNameText);
|
||||
if (strlen(restoreNameString) >= MAXFNAMELEN)
|
||||
{
|
||||
ereport(ERROR,
|
||||
|
@ -87,7 +84,7 @@ citus_create_restore_point(PG_FUNCTION_ARGS)
|
|||
* establish connections to all nodes before taking any locks
|
||||
* ShareLock prevents new nodes being added, rendering connectionList incomplete
|
||||
*/
|
||||
connectionList = OpenConnectionsToAllWorkerNodes(ShareLock);
|
||||
List *connectionList = OpenConnectionsToAllWorkerNodes(ShareLock);
|
||||
|
||||
/*
|
||||
* Send a BEGIN to bust through pgbouncer. We won't actually commit since
|
||||
|
@ -100,7 +97,7 @@ citus_create_restore_point(PG_FUNCTION_ARGS)
|
|||
BlockDistributedTransactions();
|
||||
|
||||
/* do local restore point first to bail out early if something goes wrong */
|
||||
localRestorePoint = XLogRestorePoint(restoreNameString);
|
||||
XLogRecPtr localRestorePoint = XLogRestorePoint(restoreNameString);
|
||||
|
||||
/* run pg_create_restore_point on all nodes */
|
||||
CreateRemoteRestorePoints(restoreNameString, connectionList);
|
||||
|
@ -117,19 +114,18 @@ static List *
|
|||
OpenConnectionsToAllWorkerNodes(LOCKMODE lockMode)
|
||||
{
|
||||
List *connectionList = NIL;
|
||||
List *workerNodeList = NIL;
|
||||
ListCell *workerNodeCell = NULL;
|
||||
int connectionFlags = FORCE_NEW_CONNECTION;
|
||||
|
||||
workerNodeList = ActivePrimaryWorkerNodeList(lockMode);
|
||||
List *workerNodeList = ActivePrimaryWorkerNodeList(lockMode);
|
||||
|
||||
foreach(workerNodeCell, workerNodeList)
|
||||
{
|
||||
WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell);
|
||||
MultiConnection *connection = NULL;
|
||||
|
||||
connection = StartNodeConnection(connectionFlags, workerNode->workerName,
|
||||
workerNode->workerPort);
|
||||
MultiConnection *connection = StartNodeConnection(connectionFlags,
|
||||
workerNode->workerName,
|
||||
workerNode->workerPort);
|
||||
MarkRemoteTransactionCritical(connection);
|
||||
|
||||
connectionList = lappend(connectionList, connection);
|
||||
|
|
|
@ -72,18 +72,10 @@ Datum
|
|||
master_run_on_worker(PG_FUNCTION_ARGS)
|
||||
{
|
||||
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
|
||||
MemoryContext per_query_ctx = NULL;
|
||||
MemoryContext oldcontext = NULL;
|
||||
TupleDesc tupleDescriptor = NULL;
|
||||
Tuplestorestate *tupleStore = NULL;
|
||||
bool parallelExecution = false;
|
||||
StringInfo *nodeNameArray = NULL;
|
||||
int *nodePortArray = NULL;
|
||||
StringInfo *commandStringArray = NULL;
|
||||
bool *statusArray = NULL;
|
||||
StringInfo *resultArray = NULL;
|
||||
int commandIndex = 0;
|
||||
int commandCount = 0;
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
|
@ -96,14 +88,14 @@ master_run_on_worker(PG_FUNCTION_ARGS)
|
|||
"allowed in this context")));
|
||||
}
|
||||
|
||||
commandCount = ParseCommandParameters(fcinfo, &nodeNameArray, &nodePortArray,
|
||||
&commandStringArray, ¶llelExecution);
|
||||
int commandCount = ParseCommandParameters(fcinfo, &nodeNameArray, &nodePortArray,
|
||||
&commandStringArray, ¶llelExecution);
|
||||
|
||||
per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
|
||||
oldcontext = MemoryContextSwitchTo(per_query_ctx);
|
||||
MemoryContext per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
|
||||
MemoryContext oldcontext = MemoryContextSwitchTo(per_query_ctx);
|
||||
|
||||
/* get the requested return tuple description */
|
||||
tupleDescriptor = CreateTupleDescCopy(rsinfo->expectedDesc);
|
||||
TupleDesc tupleDescriptor = CreateTupleDescCopy(rsinfo->expectedDesc);
|
||||
|
||||
/*
|
||||
* Check to make sure we have correct tuple descriptor
|
||||
|
@ -121,9 +113,9 @@ master_run_on_worker(PG_FUNCTION_ARGS)
|
|||
}
|
||||
|
||||
/* prepare storage for status and result values */
|
||||
statusArray = palloc0(commandCount * sizeof(bool));
|
||||
resultArray = palloc0(commandCount * sizeof(StringInfo));
|
||||
for (commandIndex = 0; commandIndex < commandCount; commandIndex++)
|
||||
bool *statusArray = palloc0(commandCount * sizeof(bool));
|
||||
StringInfo *resultArray = palloc0(commandCount * sizeof(StringInfo));
|
||||
for (int commandIndex = 0; commandIndex < commandCount; commandIndex++)
|
||||
{
|
||||
resultArray[commandIndex] = makeStringInfo();
|
||||
}
|
||||
|
@ -142,9 +134,10 @@ master_run_on_worker(PG_FUNCTION_ARGS)
|
|||
|
||||
/* let the caller know we're sending back a tuplestore */
|
||||
rsinfo->returnMode = SFRM_Materialize;
|
||||
tupleStore = CreateTupleStore(tupleDescriptor,
|
||||
nodeNameArray, nodePortArray, statusArray,
|
||||
resultArray, commandCount);
|
||||
Tuplestorestate *tupleStore = CreateTupleStore(tupleDescriptor,
|
||||
nodeNameArray, nodePortArray,
|
||||
statusArray,
|
||||
resultArray, commandCount);
|
||||
rsinfo->setResult = tupleStore;
|
||||
rsinfo->setDesc = tupleDescriptor;
|
||||
|
||||
|
@ -170,10 +163,6 @@ ParseCommandParameters(FunctionCallInfo fcinfo, StringInfo **nodeNameArray,
|
|||
Datum *nodeNameDatumArray = DeconstructArrayObject(nodeNameArrayObject);
|
||||
Datum *nodePortDatumArray = DeconstructArrayObject(nodePortArrayObject);
|
||||
Datum *commandStringDatumArray = DeconstructArrayObject(commandStringArrayObject);
|
||||
int index = 0;
|
||||
StringInfo *nodeNames = NULL;
|
||||
int *nodePorts = NULL;
|
||||
StringInfo *commandStrings = NULL;
|
||||
|
||||
if (nodeNameCount != nodePortCount || nodeNameCount != commandStringCount)
|
||||
{
|
||||
|
@ -182,11 +171,11 @@ ParseCommandParameters(FunctionCallInfo fcinfo, StringInfo **nodeNameArray,
|
|||
errmsg("expected same number of node name, port, and query string")));
|
||||
}
|
||||
|
||||
nodeNames = palloc0(nodeNameCount * sizeof(StringInfo));
|
||||
nodePorts = palloc0(nodeNameCount * sizeof(int));
|
||||
commandStrings = palloc0(nodeNameCount * sizeof(StringInfo));
|
||||
StringInfo *nodeNames = palloc0(nodeNameCount * sizeof(StringInfo));
|
||||
int *nodePorts = palloc0(nodeNameCount * sizeof(int));
|
||||
StringInfo *commandStrings = palloc0(nodeNameCount * sizeof(StringInfo));
|
||||
|
||||
for (index = 0; index < nodeNameCount; index++)
|
||||
for (int index = 0; index < nodeNameCount; index++)
|
||||
{
|
||||
text *nodeNameText = DatumGetTextP(nodeNameDatumArray[index]);
|
||||
char *nodeName = text_to_cstring(nodeNameText);
|
||||
|
@ -224,13 +213,12 @@ ExecuteCommandsInParallelAndStoreResults(StringInfo *nodeNameArray, int *nodePor
|
|||
bool *statusArray, StringInfo *resultStringArray,
|
||||
int commmandCount)
|
||||
{
|
||||
int commandIndex = 0;
|
||||
MultiConnection **connectionArray =
|
||||
palloc0(commmandCount * sizeof(MultiConnection *));
|
||||
int finishedCount = 0;
|
||||
|
||||
/* start connections asynchronously */
|
||||
for (commandIndex = 0; commandIndex < commmandCount; commandIndex++)
|
||||
for (int commandIndex = 0; commandIndex < commmandCount; commandIndex++)
|
||||
{
|
||||
char *nodeName = nodeNameArray[commandIndex]->data;
|
||||
int nodePort = nodePortArray[commandIndex];
|
||||
|
@ -240,7 +228,7 @@ ExecuteCommandsInParallelAndStoreResults(StringInfo *nodeNameArray, int *nodePor
|
|||
}
|
||||
|
||||
/* establish connections */
|
||||
for (commandIndex = 0; commandIndex < commmandCount; commandIndex++)
|
||||
for (int commandIndex = 0; commandIndex < commmandCount; commandIndex++)
|
||||
{
|
||||
MultiConnection *connection = connectionArray[commandIndex];
|
||||
StringInfo queryResultString = resultStringArray[commandIndex];
|
||||
|
@ -264,9 +252,8 @@ ExecuteCommandsInParallelAndStoreResults(StringInfo *nodeNameArray, int *nodePor
|
|||
}
|
||||
|
||||
/* send queries at once */
|
||||
for (commandIndex = 0; commandIndex < commmandCount; commandIndex++)
|
||||
for (int commandIndex = 0; commandIndex < commmandCount; commandIndex++)
|
||||
{
|
||||
int querySent = 0;
|
||||
MultiConnection *connection = connectionArray[commandIndex];
|
||||
char *queryString = commandStringArray[commandIndex]->data;
|
||||
StringInfo queryResultString = resultStringArray[commandIndex];
|
||||
|
@ -280,7 +267,7 @@ ExecuteCommandsInParallelAndStoreResults(StringInfo *nodeNameArray, int *nodePor
|
|||
continue;
|
||||
}
|
||||
|
||||
querySent = SendRemoteCommand(connection, queryString);
|
||||
int querySent = SendRemoteCommand(connection, queryString);
|
||||
if (querySent == 0)
|
||||
{
|
||||
StoreErrorMessage(connection, queryResultString);
|
||||
|
@ -294,20 +281,19 @@ ExecuteCommandsInParallelAndStoreResults(StringInfo *nodeNameArray, int *nodePor
|
|||
/* check for query results */
|
||||
while (finishedCount < commmandCount)
|
||||
{
|
||||
for (commandIndex = 0; commandIndex < commmandCount; commandIndex++)
|
||||
for (int commandIndex = 0; commandIndex < commmandCount; commandIndex++)
|
||||
{
|
||||
MultiConnection *connection = connectionArray[commandIndex];
|
||||
StringInfo queryResultString = resultStringArray[commandIndex];
|
||||
bool success = false;
|
||||
bool queryFinished = false;
|
||||
|
||||
if (connection == NULL)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
queryFinished = GetConnectionStatusAndResult(connection, &success,
|
||||
queryResultString);
|
||||
bool queryFinished = GetConnectionStatusAndResult(connection, &success,
|
||||
queryResultString);
|
||||
|
||||
if (queryFinished)
|
||||
{
|
||||
|
@ -343,9 +329,6 @@ GetConnectionStatusAndResult(MultiConnection *connection, bool *resultStatus,
|
|||
{
|
||||
bool finished = true;
|
||||
ConnStatusType connectionStatus = PQstatus(connection->pgConn);
|
||||
int consumeInput = 0;
|
||||
PGresult *queryResult = NULL;
|
||||
bool success = false;
|
||||
|
||||
*resultStatus = false;
|
||||
resetStringInfo(queryResultString);
|
||||
|
@ -356,7 +339,7 @@ GetConnectionStatusAndResult(MultiConnection *connection, bool *resultStatus,
|
|||
return finished;
|
||||
}
|
||||
|
||||
consumeInput = PQconsumeInput(connection->pgConn);
|
||||
int consumeInput = PQconsumeInput(connection->pgConn);
|
||||
if (consumeInput == 0)
|
||||
{
|
||||
appendStringInfo(queryResultString, "query result unavailable");
|
||||
|
@ -371,8 +354,8 @@ GetConnectionStatusAndResult(MultiConnection *connection, bool *resultStatus,
|
|||
}
|
||||
|
||||
/* query result is available at this point */
|
||||
queryResult = PQgetResult(connection->pgConn);
|
||||
success = EvaluateQueryResult(connection, queryResult, queryResultString);
|
||||
PGresult *queryResult = PQgetResult(connection->pgConn);
|
||||
bool success = EvaluateQueryResult(connection, queryResult, queryResultString);
|
||||
PQclear(queryResult);
|
||||
|
||||
*resultStatus = success;
|
||||
|
@ -449,12 +432,10 @@ StoreErrorMessage(MultiConnection *connection, StringInfo queryResultString)
|
|||
char *errorMessage = PQerrorMessage(connection->pgConn);
|
||||
if (errorMessage != NULL)
|
||||
{
|
||||
char *firstNewlineIndex = NULL;
|
||||
|
||||
/* copy the error message to a writable memory */
|
||||
errorMessage = pnstrdup(errorMessage, strlen(errorMessage));
|
||||
|
||||
firstNewlineIndex = strchr(errorMessage, '\n');
|
||||
char *firstNewlineIndex = strchr(errorMessage, '\n');
|
||||
|
||||
/* trim the error message at the line break */
|
||||
if (firstNewlineIndex != NULL)
|
||||
|
@ -484,17 +465,15 @@ ExecuteCommandsAndStoreResults(StringInfo *nodeNameArray, int *nodePortArray,
|
|||
StringInfo *commandStringArray, bool *statusArray,
|
||||
StringInfo *resultStringArray, int commmandCount)
|
||||
{
|
||||
int commandIndex = 0;
|
||||
for (commandIndex = 0; commandIndex < commmandCount; commandIndex++)
|
||||
for (int commandIndex = 0; commandIndex < commmandCount; commandIndex++)
|
||||
{
|
||||
char *nodeName = nodeNameArray[commandIndex]->data;
|
||||
int32 nodePort = nodePortArray[commandIndex];
|
||||
bool success = false;
|
||||
char *queryString = commandStringArray[commandIndex]->data;
|
||||
StringInfo queryResultString = resultStringArray[commandIndex];
|
||||
|
||||
success = ExecuteRemoteQueryOrCommand(nodeName, nodePort, queryString,
|
||||
queryResultString);
|
||||
bool success = ExecuteRemoteQueryOrCommand(nodeName, nodePort, queryString,
|
||||
queryResultString);
|
||||
|
||||
statusArray[commandIndex] = success;
|
||||
|
||||
|
@ -516,8 +495,6 @@ ExecuteRemoteQueryOrCommand(char *nodeName, uint32 nodePort, char *queryString,
|
|||
int connectionFlags = FORCE_NEW_CONNECTION;
|
||||
MultiConnection *connection =
|
||||
GetNodeConnection(connectionFlags, nodeName, nodePort);
|
||||
bool success = false;
|
||||
PGresult *queryResult = NULL;
|
||||
bool raiseInterrupts = true;
|
||||
|
||||
if (PQstatus(connection->pgConn) != CONNECTION_OK)
|
||||
|
@ -528,8 +505,8 @@ ExecuteRemoteQueryOrCommand(char *nodeName, uint32 nodePort, char *queryString,
|
|||
}
|
||||
|
||||
SendRemoteCommand(connection, queryString);
|
||||
queryResult = GetRemoteCommandResult(connection, raiseInterrupts);
|
||||
success = EvaluateQueryResult(connection, queryResult, queryResultString);
|
||||
PGresult *queryResult = GetRemoteCommandResult(connection, raiseInterrupts);
|
||||
bool success = EvaluateQueryResult(connection, queryResult, queryResultString);
|
||||
|
||||
PQclear(queryResult);
|
||||
|
||||
|
@ -547,13 +524,11 @@ CreateTupleStore(TupleDesc tupleDescriptor,
|
|||
StringInfo *resultArray, int commandCount)
|
||||
{
|
||||
Tuplestorestate *tupleStore = tuplestore_begin_heap(true, false, work_mem);
|
||||
int commandIndex = 0;
|
||||
bool nulls[4] = { false, false, false, false };
|
||||
|
||||
for (commandIndex = 0; commandIndex < commandCount; commandIndex++)
|
||||
for (int commandIndex = 0; commandIndex < commandCount; commandIndex++)
|
||||
{
|
||||
Datum values[4];
|
||||
HeapTuple tuple = NULL;
|
||||
StringInfo nodeNameString = nodeNameArray[commandIndex];
|
||||
StringInfo resultString = resultArray[commandIndex];
|
||||
text *nodeNameText = cstring_to_text_with_len(nodeNameString->data,
|
||||
|
@ -566,7 +541,7 @@ CreateTupleStore(TupleDesc tupleDescriptor,
|
|||
values[2] = BoolGetDatum(statusArray[commandIndex]);
|
||||
values[3] = PointerGetDatum(resultText);
|
||||
|
||||
tuple = heap_form_tuple(tupleDescriptor, values, nulls);
|
||||
HeapTuple tuple = heap_form_tuple(tupleDescriptor, values, nulls);
|
||||
tuplestore_puttuple(tupleStore, tuple);
|
||||
|
||||
heap_freetuple(tuple);
|
||||
|
|
|
@ -106,13 +106,6 @@ void
|
|||
CreateShardsWithRoundRobinPolicy(Oid distributedTableId, int32 shardCount,
|
||||
int32 replicationFactor, bool useExclusiveConnections)
|
||||
{
|
||||
char shardStorageType = 0;
|
||||
List *workerNodeList = NIL;
|
||||
int32 workerNodeCount = 0;
|
||||
uint32 placementAttemptCount = 0;
|
||||
uint64 hashTokenIncrement = 0;
|
||||
List *existingShardList = NIL;
|
||||
int64 shardIndex = 0;
|
||||
DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(distributedTableId);
|
||||
bool colocatedShard = false;
|
||||
List *insertedShardPlacements = NIL;
|
||||
|
@ -132,7 +125,7 @@ CreateShardsWithRoundRobinPolicy(Oid distributedTableId, int32 shardCount,
|
|||
LockRelationOid(distributedTableId, ExclusiveLock);
|
||||
|
||||
/* validate that shards haven't already been created for this table */
|
||||
existingShardList = LoadShardList(distributedTableId);
|
||||
List *existingShardList = LoadShardList(distributedTableId);
|
||||
if (existingShardList != NIL)
|
||||
{
|
||||
char *tableName = get_rel_name(distributedTableId);
|
||||
|
@ -171,16 +164,16 @@ CreateShardsWithRoundRobinPolicy(Oid distributedTableId, int32 shardCount,
|
|||
}
|
||||
|
||||
/* calculate the split of the hash space */
|
||||
hashTokenIncrement = HASH_TOKEN_COUNT / shardCount;
|
||||
uint64 hashTokenIncrement = HASH_TOKEN_COUNT / shardCount;
|
||||
|
||||
/* don't allow concurrent node list changes that require an exclusive lock */
|
||||
LockRelationOid(DistNodeRelationId(), RowShareLock);
|
||||
|
||||
/* load and sort the worker node list for deterministic placement */
|
||||
workerNodeList = DistributedTablePlacementNodeList(NoLock);
|
||||
List *workerNodeList = DistributedTablePlacementNodeList(NoLock);
|
||||
workerNodeList = SortList(workerNodeList, CompareWorkerNodes);
|
||||
|
||||
workerNodeCount = list_length(workerNodeList);
|
||||
int32 workerNodeCount = list_length(workerNodeList);
|
||||
if (replicationFactor > workerNodeCount)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
|
@ -191,26 +184,23 @@ CreateShardsWithRoundRobinPolicy(Oid distributedTableId, int32 shardCount,
|
|||
}
|
||||
|
||||
/* if we have enough nodes, add an extra placement attempt for backup */
|
||||
placementAttemptCount = (uint32) replicationFactor;
|
||||
uint32 placementAttemptCount = (uint32) replicationFactor;
|
||||
if (workerNodeCount > replicationFactor)
|
||||
{
|
||||
placementAttemptCount++;
|
||||
}
|
||||
|
||||
/* set shard storage type according to relation type */
|
||||
shardStorageType = ShardStorageType(distributedTableId);
|
||||
char shardStorageType = ShardStorageType(distributedTableId);
|
||||
|
||||
for (shardIndex = 0; shardIndex < shardCount; shardIndex++)
|
||||
for (int64 shardIndex = 0; shardIndex < shardCount; shardIndex++)
|
||||
{
|
||||
uint32 roundRobinNodeIndex = shardIndex % workerNodeCount;
|
||||
|
||||
/* initialize the hash token space for this shard */
|
||||
text *minHashTokenText = NULL;
|
||||
text *maxHashTokenText = NULL;
|
||||
int32 shardMinHashToken = INT32_MIN + (shardIndex * hashTokenIncrement);
|
||||
int32 shardMaxHashToken = shardMinHashToken + (hashTokenIncrement - 1);
|
||||
uint64 shardId = GetNextShardId();
|
||||
List *currentInsertedShardPlacements = NIL;
|
||||
|
||||
/* if we are at the last shard, make sure the max token value is INT_MAX */
|
||||
if (shardIndex == (shardCount - 1))
|
||||
|
@ -219,8 +209,8 @@ CreateShardsWithRoundRobinPolicy(Oid distributedTableId, int32 shardCount,
|
|||
}
|
||||
|
||||
/* insert the shard metadata row along with its min/max values */
|
||||
minHashTokenText = IntegerToText(shardMinHashToken);
|
||||
maxHashTokenText = IntegerToText(shardMaxHashToken);
|
||||
text *minHashTokenText = IntegerToText(shardMinHashToken);
|
||||
text *maxHashTokenText = IntegerToText(shardMaxHashToken);
|
||||
|
||||
/*
|
||||
* Grabbing the shard metadata lock isn't technically necessary since
|
||||
|
@ -233,11 +223,12 @@ CreateShardsWithRoundRobinPolicy(Oid distributedTableId, int32 shardCount,
|
|||
InsertShardRow(distributedTableId, shardId, shardStorageType,
|
||||
minHashTokenText, maxHashTokenText);
|
||||
|
||||
currentInsertedShardPlacements = InsertShardPlacementRows(distributedTableId,
|
||||
shardId,
|
||||
workerNodeList,
|
||||
roundRobinNodeIndex,
|
||||
replicationFactor);
|
||||
List *currentInsertedShardPlacements = InsertShardPlacementRows(
|
||||
distributedTableId,
|
||||
shardId,
|
||||
workerNodeList,
|
||||
roundRobinNodeIndex,
|
||||
replicationFactor);
|
||||
insertedShardPlacements = list_concat(insertedShardPlacements,
|
||||
currentInsertedShardPlacements);
|
||||
}
|
||||
|
@ -255,9 +246,6 @@ void
|
|||
CreateColocatedShards(Oid targetRelationId, Oid sourceRelationId, bool
|
||||
useExclusiveConnections)
|
||||
{
|
||||
char targetShardStorageType = 0;
|
||||
List *existingShardList = NIL;
|
||||
List *sourceShardIntervalList = NIL;
|
||||
ListCell *sourceShardCell = NULL;
|
||||
bool colocatedShard = true;
|
||||
List *insertedShardPlacements = NIL;
|
||||
|
@ -281,11 +269,11 @@ CreateColocatedShards(Oid targetRelationId, Oid sourceRelationId, bool
|
|||
LockRelationOid(sourceRelationId, AccessShareLock);
|
||||
|
||||
/* prevent placement changes of the source relation until we colocate with them */
|
||||
sourceShardIntervalList = LoadShardIntervalList(sourceRelationId);
|
||||
List *sourceShardIntervalList = LoadShardIntervalList(sourceRelationId);
|
||||
LockShardListMetadata(sourceShardIntervalList, ShareLock);
|
||||
|
||||
/* validate that shards haven't already been created for this table */
|
||||
existingShardList = LoadShardList(targetRelationId);
|
||||
List *existingShardList = LoadShardList(targetRelationId);
|
||||
if (existingShardList != NIL)
|
||||
{
|
||||
char *targetRelationName = get_rel_name(targetRelationId);
|
||||
|
@ -294,7 +282,7 @@ CreateColocatedShards(Oid targetRelationId, Oid sourceRelationId, bool
|
|||
targetRelationName)));
|
||||
}
|
||||
|
||||
targetShardStorageType = ShardStorageType(targetRelationId);
|
||||
char targetShardStorageType = ShardStorageType(targetRelationId);
|
||||
|
||||
foreach(sourceShardCell, sourceShardIntervalList)
|
||||
{
|
||||
|
@ -319,17 +307,18 @@ CreateColocatedShards(Oid targetRelationId, Oid sourceRelationId, bool
|
|||
int32 groupId = sourcePlacement->groupId;
|
||||
const RelayFileState shardState = FILE_FINALIZED;
|
||||
const uint64 shardSize = 0;
|
||||
uint64 shardPlacementId = 0;
|
||||
ShardPlacement *shardPlacement = NULL;
|
||||
|
||||
/*
|
||||
* Optimistically add shard placement row the pg_dist_shard_placement, in case
|
||||
* of any error it will be roll-backed.
|
||||
*/
|
||||
shardPlacementId = InsertShardPlacementRow(newShardId, INVALID_PLACEMENT_ID,
|
||||
shardState, shardSize, groupId);
|
||||
uint64 shardPlacementId = InsertShardPlacementRow(newShardId,
|
||||
INVALID_PLACEMENT_ID,
|
||||
shardState, shardSize,
|
||||
groupId);
|
||||
|
||||
shardPlacement = LoadShardPlacement(newShardId, shardPlacementId);
|
||||
ShardPlacement *shardPlacement = LoadShardPlacement(newShardId,
|
||||
shardPlacementId);
|
||||
insertedShardPlacements = lappend(insertedShardPlacements, shardPlacement);
|
||||
}
|
||||
}
|
||||
|
@ -347,17 +336,11 @@ CreateColocatedShards(Oid targetRelationId, Oid sourceRelationId, bool
|
|||
void
|
||||
CreateReferenceTableShard(Oid distributedTableId)
|
||||
{
|
||||
char shardStorageType = 0;
|
||||
List *nodeList = NIL;
|
||||
List *existingShardList = NIL;
|
||||
uint64 shardId = INVALID_SHARD_ID;
|
||||
int workerStartIndex = 0;
|
||||
int replicationFactor = 0;
|
||||
text *shardMinValue = NULL;
|
||||
text *shardMaxValue = NULL;
|
||||
bool useExclusiveConnection = false;
|
||||
bool colocatedShard = false;
|
||||
List *insertedShardPlacements = NIL;
|
||||
|
||||
/*
|
||||
* In contrast to append/range partitioned tables it makes more sense to
|
||||
|
@ -371,10 +354,10 @@ CreateReferenceTableShard(Oid distributedTableId)
|
|||
LockRelationOid(distributedTableId, ExclusiveLock);
|
||||
|
||||
/* set shard storage type according to relation type */
|
||||
shardStorageType = ShardStorageType(distributedTableId);
|
||||
char shardStorageType = ShardStorageType(distributedTableId);
|
||||
|
||||
/* validate that shards haven't already been created for this table */
|
||||
existingShardList = LoadShardList(distributedTableId);
|
||||
List *existingShardList = LoadShardList(distributedTableId);
|
||||
if (existingShardList != NIL)
|
||||
{
|
||||
char *tableName = get_rel_name(distributedTableId);
|
||||
|
@ -387,13 +370,13 @@ CreateReferenceTableShard(Oid distributedTableId)
|
|||
* load and sort the worker node list for deterministic placements
|
||||
* create_reference_table has already acquired pg_dist_node lock
|
||||
*/
|
||||
nodeList = ReferenceTablePlacementNodeList(ShareLock);
|
||||
List *nodeList = ReferenceTablePlacementNodeList(ShareLock);
|
||||
nodeList = SortList(nodeList, CompareWorkerNodes);
|
||||
|
||||
replicationFactor = ReferenceTableReplicationFactor();
|
||||
int replicationFactor = ReferenceTableReplicationFactor();
|
||||
|
||||
/* get the next shard id */
|
||||
shardId = GetNextShardId();
|
||||
uint64 shardId = GetNextShardId();
|
||||
|
||||
/*
|
||||
* Grabbing the shard metadata lock isn't technically necessary since
|
||||
|
@ -406,9 +389,9 @@ CreateReferenceTableShard(Oid distributedTableId)
|
|||
InsertShardRow(distributedTableId, shardId, shardStorageType, shardMinValue,
|
||||
shardMaxValue);
|
||||
|
||||
insertedShardPlacements = InsertShardPlacementRows(distributedTableId, shardId,
|
||||
nodeList, workerStartIndex,
|
||||
replicationFactor);
|
||||
List *insertedShardPlacements = InsertShardPlacementRows(distributedTableId, shardId,
|
||||
nodeList, workerStartIndex,
|
||||
replicationFactor);
|
||||
|
||||
CreateShardsOnWorkers(distributedTableId, insertedShardPlacements,
|
||||
useExclusiveConnection, colocatedShard);
|
||||
|
@ -436,11 +419,10 @@ CheckHashPartitionedTable(Oid distributedTableId)
|
|||
text *
|
||||
IntegerToText(int32 value)
|
||||
{
|
||||
text *valueText = NULL;
|
||||
StringInfo valueString = makeStringInfo();
|
||||
appendStringInfo(valueString, "%d", value);
|
||||
|
||||
valueText = cstring_to_text(valueString->data);
|
||||
text *valueText = cstring_to_text(valueString->data);
|
||||
|
||||
return valueText;
|
||||
}
|
||||
|
|
|
@ -103,23 +103,10 @@ master_apply_delete_command(PG_FUNCTION_ARGS)
|
|||
{
|
||||
text *queryText = PG_GETARG_TEXT_P(0);
|
||||
char *queryString = text_to_cstring(queryText);
|
||||
char *relationName = NULL;
|
||||
char *schemaName = NULL;
|
||||
Oid relationId = InvalidOid;
|
||||
List *shardIntervalList = NIL;
|
||||
List *deletableShardIntervalList = NIL;
|
||||
List *queryTreeList = NIL;
|
||||
Query *deleteQuery = NULL;
|
||||
Node *whereClause = NULL;
|
||||
Node *deleteCriteria = NULL;
|
||||
Node *queryTreeNode = NULL;
|
||||
DeleteStmt *deleteStatement = NULL;
|
||||
int droppedShardCount = 0;
|
||||
LOCKMODE lockMode = 0;
|
||||
char partitionMethod = 0;
|
||||
bool failOK = false;
|
||||
RawStmt *rawStmt = (RawStmt *) ParseTreeRawStmt(queryString);
|
||||
queryTreeNode = rawStmt->stmt;
|
||||
Node *queryTreeNode = rawStmt->stmt;
|
||||
|
||||
EnsureCoordinator();
|
||||
CheckCitusVersion(ERROR);
|
||||
|
@ -130,19 +117,19 @@ master_apply_delete_command(PG_FUNCTION_ARGS)
|
|||
ApplyLogRedaction(queryString))));
|
||||
}
|
||||
|
||||
deleteStatement = (DeleteStmt *) queryTreeNode;
|
||||
DeleteStmt *deleteStatement = (DeleteStmt *) queryTreeNode;
|
||||
|
||||
schemaName = deleteStatement->relation->schemaname;
|
||||
relationName = deleteStatement->relation->relname;
|
||||
char *schemaName = deleteStatement->relation->schemaname;
|
||||
char *relationName = deleteStatement->relation->relname;
|
||||
|
||||
/*
|
||||
* We take an exclusive lock while dropping shards to prevent concurrent
|
||||
* writes. We don't want to block SELECTs, which means queries might fail
|
||||
* if they access a shard that has just been dropped.
|
||||
*/
|
||||
lockMode = ExclusiveLock;
|
||||
LOCKMODE lockMode = ExclusiveLock;
|
||||
|
||||
relationId = RangeVarGetRelid(deleteStatement->relation, lockMode, failOK);
|
||||
Oid relationId = RangeVarGetRelid(deleteStatement->relation, lockMode, failOK);
|
||||
|
||||
/* schema-prefix if it is not specified already */
|
||||
if (schemaName == NULL)
|
||||
|
@ -154,15 +141,15 @@ master_apply_delete_command(PG_FUNCTION_ARGS)
|
|||
CheckDistributedTable(relationId);
|
||||
EnsureTablePermissions(relationId, ACL_DELETE);
|
||||
|
||||
queryTreeList = pg_analyze_and_rewrite(rawStmt, queryString, NULL, 0, NULL);
|
||||
deleteQuery = (Query *) linitial(queryTreeList);
|
||||
List *queryTreeList = pg_analyze_and_rewrite(rawStmt, queryString, NULL, 0, NULL);
|
||||
Query *deleteQuery = (Query *) linitial(queryTreeList);
|
||||
CheckTableCount(deleteQuery);
|
||||
|
||||
/* get where clause and flatten it */
|
||||
whereClause = (Node *) deleteQuery->jointree->quals;
|
||||
deleteCriteria = eval_const_expressions(NULL, whereClause);
|
||||
Node *whereClause = (Node *) deleteQuery->jointree->quals;
|
||||
Node *deleteCriteria = eval_const_expressions(NULL, whereClause);
|
||||
|
||||
partitionMethod = PartitionMethod(relationId);
|
||||
char partitionMethod = PartitionMethod(relationId);
|
||||
if (partitionMethod == DISTRIBUTE_BY_HASH)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
|
@ -184,7 +171,7 @@ master_apply_delete_command(PG_FUNCTION_ARGS)
|
|||
CheckDeleteCriteria(deleteCriteria);
|
||||
CheckPartitionColumn(relationId, deleteCriteria);
|
||||
|
||||
shardIntervalList = LoadShardIntervalList(relationId);
|
||||
List *shardIntervalList = LoadShardIntervalList(relationId);
|
||||
|
||||
/* drop all shards if where clause is not present */
|
||||
if (deleteCriteria == NULL)
|
||||
|
@ -199,8 +186,8 @@ master_apply_delete_command(PG_FUNCTION_ARGS)
|
|||
deleteCriteria);
|
||||
}
|
||||
|
||||
droppedShardCount = DropShards(relationId, schemaName, relationName,
|
||||
deletableShardIntervalList);
|
||||
int droppedShardCount = DropShards(relationId, schemaName, relationName,
|
||||
deletableShardIntervalList);
|
||||
|
||||
PG_RETURN_INT32(droppedShardCount);
|
||||
}
|
||||
|
@ -218,8 +205,6 @@ master_drop_all_shards(PG_FUNCTION_ARGS)
|
|||
text *schemaNameText = PG_GETARG_TEXT_P(1);
|
||||
text *relationNameText = PG_GETARG_TEXT_P(2);
|
||||
|
||||
List *shardIntervalList = NIL;
|
||||
int droppedShardCount = 0;
|
||||
|
||||
char *schemaName = text_to_cstring(schemaNameText);
|
||||
char *relationName = text_to_cstring(relationNameText);
|
||||
|
@ -246,9 +231,9 @@ master_drop_all_shards(PG_FUNCTION_ARGS)
|
|||
*/
|
||||
LockRelationOid(relationId, AccessExclusiveLock);
|
||||
|
||||
shardIntervalList = LoadShardIntervalList(relationId);
|
||||
droppedShardCount = DropShards(relationId, schemaName, relationName,
|
||||
shardIntervalList);
|
||||
List *shardIntervalList = LoadShardIntervalList(relationId);
|
||||
int droppedShardCount = DropShards(relationId, schemaName, relationName,
|
||||
shardIntervalList);
|
||||
|
||||
PG_RETURN_INT32(droppedShardCount);
|
||||
}
|
||||
|
@ -265,7 +250,6 @@ Datum
|
|||
master_drop_sequences(PG_FUNCTION_ARGS)
|
||||
{
|
||||
ArrayType *sequenceNamesArray = PG_GETARG_ARRAYTYPE_P(0);
|
||||
ArrayIterator sequenceIterator = NULL;
|
||||
Datum sequenceNameDatum = 0;
|
||||
bool isNull = false;
|
||||
StringInfo dropSeqCommand = makeStringInfo();
|
||||
|
@ -291,20 +275,17 @@ master_drop_sequences(PG_FUNCTION_ARGS)
|
|||
}
|
||||
|
||||
/* iterate over sequence names to build single command to DROP them all */
|
||||
sequenceIterator = array_create_iterator(sequenceNamesArray, 0, NULL);
|
||||
ArrayIterator sequenceIterator = array_create_iterator(sequenceNamesArray, 0, NULL);
|
||||
while (array_iterate(sequenceIterator, &sequenceNameDatum, &isNull))
|
||||
{
|
||||
text *sequenceNameText = NULL;
|
||||
Oid sequenceOid = InvalidOid;
|
||||
|
||||
if (isNull)
|
||||
{
|
||||
ereport(ERROR, (errmsg("unexpected NULL sequence name"),
|
||||
errcode(ERRCODE_INVALID_PARAMETER_VALUE)));
|
||||
}
|
||||
|
||||
sequenceNameText = DatumGetTextP(sequenceNameDatum);
|
||||
sequenceOid = ResolveRelationId(sequenceNameText, true);
|
||||
text *sequenceNameText = DatumGetTextP(sequenceNameDatum);
|
||||
Oid sequenceOid = ResolveRelationId(sequenceNameText, true);
|
||||
if (OidIsValid(sequenceOid))
|
||||
{
|
||||
/*
|
||||
|
@ -379,7 +360,6 @@ DropShards(Oid relationId, char *schemaName, char *relationName,
|
|||
List *deletableShardIntervalList)
|
||||
{
|
||||
ListCell *shardIntervalCell = NULL;
|
||||
int droppedShardCount = 0;
|
||||
|
||||
BeginOrContinueCoordinatedTransaction();
|
||||
|
||||
|
@ -391,20 +371,18 @@ DropShards(Oid relationId, char *schemaName, char *relationName,
|
|||
|
||||
foreach(shardIntervalCell, deletableShardIntervalList)
|
||||
{
|
||||
List *shardPlacementList = NIL;
|
||||
ListCell *shardPlacementCell = NULL;
|
||||
ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell);
|
||||
uint64 shardId = shardInterval->shardId;
|
||||
char *quotedShardName = NULL;
|
||||
char *shardRelationName = pstrdup(relationName);
|
||||
|
||||
Assert(shardInterval->relationId == relationId);
|
||||
|
||||
/* Build shard relation name. */
|
||||
AppendShardIdToName(&shardRelationName, shardId);
|
||||
quotedShardName = quote_qualified_identifier(schemaName, shardRelationName);
|
||||
char *quotedShardName = quote_qualified_identifier(schemaName, shardRelationName);
|
||||
|
||||
shardPlacementList = ShardPlacementList(shardId);
|
||||
List *shardPlacementList = ShardPlacementList(shardId);
|
||||
foreach(shardPlacementCell, shardPlacementList)
|
||||
{
|
||||
ShardPlacement *shardPlacement =
|
||||
|
@ -412,7 +390,6 @@ DropShards(Oid relationId, char *schemaName, char *relationName,
|
|||
char *workerName = shardPlacement->nodeName;
|
||||
uint32 workerPort = shardPlacement->nodePort;
|
||||
StringInfo workerDropQuery = makeStringInfo();
|
||||
MultiConnection *connection = NULL;
|
||||
uint32 connectionFlags = FOR_DDL;
|
||||
|
||||
char storageType = shardInterval->storageType;
|
||||
|
@ -441,8 +418,9 @@ DropShards(Oid relationId, char *schemaName, char *relationName,
|
|||
continue;
|
||||
}
|
||||
|
||||
connection = GetPlacementConnection(connectionFlags, shardPlacement,
|
||||
NULL);
|
||||
MultiConnection *connection = GetPlacementConnection(connectionFlags,
|
||||
shardPlacement,
|
||||
NULL);
|
||||
|
||||
RemoteTransactionBeginIfNecessary(connection);
|
||||
|
||||
|
@ -471,7 +449,7 @@ DropShards(Oid relationId, char *schemaName, char *relationName,
|
|||
DeleteShardRow(shardId);
|
||||
}
|
||||
|
||||
droppedShardCount = list_length(deletableShardIntervalList);
|
||||
int droppedShardCount = list_length(deletableShardIntervalList);
|
||||
|
||||
return droppedShardCount;
|
||||
}
|
||||
|
@ -573,7 +551,6 @@ ShardsMatchingDeleteCriteria(Oid relationId, List *shardIntervalList,
|
|||
Node *deleteCriteria)
|
||||
{
|
||||
List *dropShardIntervalList = NIL;
|
||||
List *deleteCriteriaList = NIL;
|
||||
ListCell *shardIntervalCell = NULL;
|
||||
|
||||
/* build the base expression for constraint */
|
||||
|
@ -582,7 +559,7 @@ ShardsMatchingDeleteCriteria(Oid relationId, List *shardIntervalList,
|
|||
Node *baseConstraint = BuildBaseConstraint(partitionColumn);
|
||||
|
||||
Assert(deleteCriteria != NULL);
|
||||
deleteCriteriaList = list_make1(deleteCriteria);
|
||||
List *deleteCriteriaList = list_make1(deleteCriteria);
|
||||
|
||||
/* walk over shard list and check if shards can be dropped */
|
||||
foreach(shardIntervalCell, shardIntervalList)
|
||||
|
@ -591,27 +568,23 @@ ShardsMatchingDeleteCriteria(Oid relationId, List *shardIntervalList,
|
|||
if (shardInterval->minValueExists && shardInterval->maxValueExists)
|
||||
{
|
||||
List *restrictInfoList = NIL;
|
||||
bool dropShard = false;
|
||||
BoolExpr *andExpr = NULL;
|
||||
Expr *lessThanExpr = NULL;
|
||||
Expr *greaterThanExpr = NULL;
|
||||
RestrictInfo *lessThanRestrictInfo = NULL;
|
||||
RestrictInfo *greaterThanRestrictInfo = NULL;
|
||||
|
||||
/* set the min/max values in the base constraint */
|
||||
UpdateConstraint(baseConstraint, shardInterval);
|
||||
|
||||
andExpr = (BoolExpr *) baseConstraint;
|
||||
lessThanExpr = (Expr *) linitial(andExpr->args);
|
||||
greaterThanExpr = (Expr *) lsecond(andExpr->args);
|
||||
BoolExpr *andExpr = (BoolExpr *) baseConstraint;
|
||||
Expr *lessThanExpr = (Expr *) linitial(andExpr->args);
|
||||
Expr *greaterThanExpr = (Expr *) lsecond(andExpr->args);
|
||||
|
||||
lessThanRestrictInfo = make_simple_restrictinfo(lessThanExpr);
|
||||
greaterThanRestrictInfo = make_simple_restrictinfo(greaterThanExpr);
|
||||
RestrictInfo *lessThanRestrictInfo = make_simple_restrictinfo(lessThanExpr);
|
||||
RestrictInfo *greaterThanRestrictInfo = make_simple_restrictinfo(
|
||||
greaterThanExpr);
|
||||
|
||||
restrictInfoList = lappend(restrictInfoList, lessThanRestrictInfo);
|
||||
restrictInfoList = lappend(restrictInfoList, greaterThanRestrictInfo);
|
||||
|
||||
dropShard = predicate_implied_by(deleteCriteriaList, restrictInfoList, false);
|
||||
bool dropShard = predicate_implied_by(deleteCriteriaList, restrictInfoList,
|
||||
false);
|
||||
if (dropShard)
|
||||
{
|
||||
dropShardIntervalList = lappend(dropShardIntervalList, shardInterval);
|
||||
|
|
|
@ -91,7 +91,6 @@ Datum
|
|||
citus_total_relation_size(PG_FUNCTION_ARGS)
|
||||
{
|
||||
Oid relationId = PG_GETARG_OID(0);
|
||||
uint64 totalRelationSize = 0;
|
||||
char *tableSizeFunction = PG_TOTAL_RELATION_SIZE_FUNCTION;
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
|
@ -101,7 +100,7 @@ citus_total_relation_size(PG_FUNCTION_ARGS)
|
|||
tableSizeFunction = CSTORE_TABLE_SIZE_FUNCTION;
|
||||
}
|
||||
|
||||
totalRelationSize = DistributedTableSize(relationId, tableSizeFunction);
|
||||
uint64 totalRelationSize = DistributedTableSize(relationId, tableSizeFunction);
|
||||
|
||||
PG_RETURN_INT64(totalRelationSize);
|
||||
}
|
||||
|
@ -115,7 +114,6 @@ Datum
|
|||
citus_table_size(PG_FUNCTION_ARGS)
|
||||
{
|
||||
Oid relationId = PG_GETARG_OID(0);
|
||||
uint64 tableSize = 0;
|
||||
char *tableSizeFunction = PG_TABLE_SIZE_FUNCTION;
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
|
@ -125,7 +123,7 @@ citus_table_size(PG_FUNCTION_ARGS)
|
|||
tableSizeFunction = CSTORE_TABLE_SIZE_FUNCTION;
|
||||
}
|
||||
|
||||
tableSize = DistributedTableSize(relationId, tableSizeFunction);
|
||||
uint64 tableSize = DistributedTableSize(relationId, tableSizeFunction);
|
||||
|
||||
PG_RETURN_INT64(tableSize);
|
||||
}
|
||||
|
@ -139,7 +137,6 @@ Datum
|
|||
citus_relation_size(PG_FUNCTION_ARGS)
|
||||
{
|
||||
Oid relationId = PG_GETARG_OID(0);
|
||||
uint64 relationSize = 0;
|
||||
char *tableSizeFunction = PG_RELATION_SIZE_FUNCTION;
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
|
@ -149,7 +146,7 @@ citus_relation_size(PG_FUNCTION_ARGS)
|
|||
tableSizeFunction = CSTORE_TABLE_SIZE_FUNCTION;
|
||||
}
|
||||
|
||||
relationSize = DistributedTableSize(relationId, tableSizeFunction);
|
||||
uint64 relationSize = DistributedTableSize(relationId, tableSizeFunction);
|
||||
|
||||
PG_RETURN_INT64(relationSize);
|
||||
}
|
||||
|
@ -163,8 +160,6 @@ citus_relation_size(PG_FUNCTION_ARGS)
|
|||
static uint64
|
||||
DistributedTableSize(Oid relationId, char *sizeQuery)
|
||||
{
|
||||
Relation relation = NULL;
|
||||
List *workerNodeList = NULL;
|
||||
ListCell *workerNodeCell = NULL;
|
||||
uint64 totalRelationSize = 0;
|
||||
|
||||
|
@ -175,7 +170,7 @@ DistributedTableSize(Oid relationId, char *sizeQuery)
|
|||
" blocks which contain multi-shard data modifications")));
|
||||
}
|
||||
|
||||
relation = try_relation_open(relationId, AccessShareLock);
|
||||
Relation relation = try_relation_open(relationId, AccessShareLock);
|
||||
|
||||
if (relation == NULL)
|
||||
{
|
||||
|
@ -185,7 +180,7 @@ DistributedTableSize(Oid relationId, char *sizeQuery)
|
|||
|
||||
ErrorIfNotSuitableToGetSize(relationId);
|
||||
|
||||
workerNodeList = ActiveReadableNodeList();
|
||||
List *workerNodeList = ActiveReadableNodeList();
|
||||
|
||||
foreach(workerNodeCell, workerNodeList)
|
||||
{
|
||||
|
@ -209,27 +204,22 @@ DistributedTableSize(Oid relationId, char *sizeQuery)
|
|||
static uint64
|
||||
DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId, char *sizeQuery)
|
||||
{
|
||||
StringInfo tableSizeQuery = NULL;
|
||||
StringInfo tableSizeStringInfo = NULL;
|
||||
char *workerNodeName = workerNode->workerName;
|
||||
uint32 workerNodePort = workerNode->workerPort;
|
||||
char *tableSizeString;
|
||||
uint64 tableSize = 0;
|
||||
MultiConnection *connection = NULL;
|
||||
uint32 connectionFlag = 0;
|
||||
PGresult *result = NULL;
|
||||
int queryResult = 0;
|
||||
List *sizeList = NIL;
|
||||
bool raiseErrors = true;
|
||||
|
||||
List *shardIntervalsOnNode = ShardIntervalsOnWorkerGroup(workerNode, relationId);
|
||||
|
||||
tableSizeQuery = GenerateSizeQueryOnMultiplePlacements(relationId,
|
||||
shardIntervalsOnNode,
|
||||
sizeQuery);
|
||||
StringInfo tableSizeQuery = GenerateSizeQueryOnMultiplePlacements(relationId,
|
||||
shardIntervalsOnNode,
|
||||
sizeQuery);
|
||||
|
||||
connection = GetNodeConnection(connectionFlag, workerNodeName, workerNodePort);
|
||||
queryResult = ExecuteOptionalRemoteCommand(connection, tableSizeQuery->data, &result);
|
||||
MultiConnection *connection = GetNodeConnection(connectionFlag, workerNodeName,
|
||||
workerNodePort);
|
||||
int queryResult = ExecuteOptionalRemoteCommand(connection, tableSizeQuery->data,
|
||||
&result);
|
||||
|
||||
if (queryResult != 0)
|
||||
{
|
||||
|
@ -237,10 +227,10 @@ DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId, char *sizeQ
|
|||
errmsg("cannot get the size because of a connection error")));
|
||||
}
|
||||
|
||||
sizeList = ReadFirstColumnAsText(result);
|
||||
tableSizeStringInfo = (StringInfo) linitial(sizeList);
|
||||
tableSizeString = tableSizeStringInfo->data;
|
||||
tableSize = atol(tableSizeString);
|
||||
List *sizeList = ReadFirstColumnAsText(result);
|
||||
StringInfo tableSizeStringInfo = (StringInfo) linitial(sizeList);
|
||||
char *tableSizeString = tableSizeStringInfo->data;
|
||||
uint64 tableSize = atol(tableSizeString);
|
||||
|
||||
PQclear(result);
|
||||
ClearResults(connection, raiseErrors);
|
||||
|
@ -260,18 +250,17 @@ GroupShardPlacementsForTableOnGroup(Oid relationId, int32 groupId)
|
|||
DistTableCacheEntry *distTableCacheEntry = DistributedTableCacheEntry(relationId);
|
||||
List *resultList = NIL;
|
||||
|
||||
int shardIndex = 0;
|
||||
int shardIntervalArrayLength = distTableCacheEntry->shardIntervalArrayLength;
|
||||
|
||||
for (shardIndex = 0; shardIndex < shardIntervalArrayLength; shardIndex++)
|
||||
for (int shardIndex = 0; shardIndex < shardIntervalArrayLength; shardIndex++)
|
||||
{
|
||||
GroupShardPlacement *placementArray =
|
||||
distTableCacheEntry->arrayOfPlacementArrays[shardIndex];
|
||||
int numberOfPlacements =
|
||||
distTableCacheEntry->arrayOfPlacementArrayLengths[shardIndex];
|
||||
int placementIndex = 0;
|
||||
|
||||
for (placementIndex = 0; placementIndex < numberOfPlacements; placementIndex++)
|
||||
for (int placementIndex = 0; placementIndex < numberOfPlacements;
|
||||
placementIndex++)
|
||||
{
|
||||
GroupShardPlacement *placement = &placementArray[placementIndex];
|
||||
|
||||
|
@ -298,24 +287,22 @@ ShardIntervalsOnWorkerGroup(WorkerNode *workerNode, Oid relationId)
|
|||
{
|
||||
DistTableCacheEntry *distTableCacheEntry = DistributedTableCacheEntry(relationId);
|
||||
List *shardIntervalList = NIL;
|
||||
int shardIndex = 0;
|
||||
int shardIntervalArrayLength = distTableCacheEntry->shardIntervalArrayLength;
|
||||
|
||||
for (shardIndex = 0; shardIndex < shardIntervalArrayLength; shardIndex++)
|
||||
for (int shardIndex = 0; shardIndex < shardIntervalArrayLength; shardIndex++)
|
||||
{
|
||||
GroupShardPlacement *placementArray =
|
||||
distTableCacheEntry->arrayOfPlacementArrays[shardIndex];
|
||||
int numberOfPlacements =
|
||||
distTableCacheEntry->arrayOfPlacementArrayLengths[shardIndex];
|
||||
int placementIndex = 0;
|
||||
|
||||
for (placementIndex = 0; placementIndex < numberOfPlacements; placementIndex++)
|
||||
for (int placementIndex = 0; placementIndex < numberOfPlacements;
|
||||
placementIndex++)
|
||||
{
|
||||
GroupShardPlacement *placement = &placementArray[placementIndex];
|
||||
uint64 shardId = placement->shardId;
|
||||
bool metadataLock = false;
|
||||
|
||||
metadataLock = TryLockShardDistributionMetadata(shardId, ShareLock);
|
||||
bool metadataLock = TryLockShardDistributionMetadata(shardId, ShareLock);
|
||||
|
||||
/* if the lock is not acquired warn the user */
|
||||
if (metadataLock == false)
|
||||
|
@ -364,12 +351,10 @@ GenerateSizeQueryOnMultiplePlacements(Oid distributedRelationId, List *shardInte
|
|||
ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell);
|
||||
uint64 shardId = shardInterval->shardId;
|
||||
char *shardName = get_rel_name(distributedRelationId);
|
||||
char *shardQualifiedName = NULL;
|
||||
char *quotedShardName = NULL;
|
||||
AppendShardIdToName(&shardName, shardId);
|
||||
|
||||
shardQualifiedName = quote_qualified_identifier(schemaName, shardName);
|
||||
quotedShardName = quote_literal_cstr(shardQualifiedName);
|
||||
char *shardQualifiedName = quote_qualified_identifier(schemaName, shardName);
|
||||
char *quotedShardName = quote_literal_cstr(shardQualifiedName);
|
||||
|
||||
appendStringInfo(selectQuery, sizeQuery, quotedShardName);
|
||||
appendStringInfo(selectQuery, " + ");
|
||||
|
@ -509,12 +494,11 @@ LoadShardIntervalList(Oid relationId)
|
|||
{
|
||||
DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(relationId);
|
||||
List *shardList = NIL;
|
||||
int i = 0;
|
||||
|
||||
for (i = 0; i < cacheEntry->shardIntervalArrayLength; i++)
|
||||
for (int i = 0; i < cacheEntry->shardIntervalArrayLength; i++)
|
||||
{
|
||||
ShardInterval *newShardInterval = NULL;
|
||||
newShardInterval = (ShardInterval *) palloc0(sizeof(ShardInterval));
|
||||
ShardInterval *newShardInterval = (ShardInterval *) palloc0(
|
||||
sizeof(ShardInterval));
|
||||
|
||||
CopyShardInterval(cacheEntry->sortedShardIntervalArray[i], newShardInterval);
|
||||
|
||||
|
@ -557,9 +541,8 @@ LoadShardList(Oid relationId)
|
|||
{
|
||||
DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(relationId);
|
||||
List *shardList = NIL;
|
||||
int i = 0;
|
||||
|
||||
for (i = 0; i < cacheEntry->shardIntervalArrayLength; i++)
|
||||
for (int i = 0; i < cacheEntry->shardIntervalArrayLength; i++)
|
||||
{
|
||||
ShardInterval *currentShardInterval = cacheEntry->sortedShardIntervalArray[i];
|
||||
uint64 *shardIdPointer = AllocateUint64(currentShardInterval->shardId);
|
||||
|
@ -673,10 +656,7 @@ NodeGroupHasShardPlacements(int32 groupId, bool onlyConsiderActivePlacements)
|
|||
const int scanKeyCount = (onlyConsiderActivePlacements ? 2 : 1);
|
||||
const bool indexOK = false;
|
||||
|
||||
bool hasFinalizedPlacements = false;
|
||||
|
||||
HeapTuple heapTuple = NULL;
|
||||
SysScanDesc scanDescriptor = NULL;
|
||||
ScanKeyData scanKey[2];
|
||||
|
||||
Relation pgPlacement = heap_open(DistPlacementRelationId(),
|
||||
|
@ -690,12 +670,13 @@ NodeGroupHasShardPlacements(int32 groupId, bool onlyConsiderActivePlacements)
|
|||
BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(FILE_FINALIZED));
|
||||
}
|
||||
|
||||
scanDescriptor = systable_beginscan(pgPlacement,
|
||||
DistPlacementGroupidIndexId(), indexOK,
|
||||
NULL, scanKeyCount, scanKey);
|
||||
SysScanDesc scanDescriptor = systable_beginscan(pgPlacement,
|
||||
DistPlacementGroupidIndexId(),
|
||||
indexOK,
|
||||
NULL, scanKeyCount, scanKey);
|
||||
|
||||
heapTuple = systable_getnext(scanDescriptor);
|
||||
hasFinalizedPlacements = HeapTupleIsValid(heapTuple);
|
||||
HeapTuple heapTuple = systable_getnext(scanDescriptor);
|
||||
bool hasFinalizedPlacements = HeapTupleIsValid(heapTuple);
|
||||
|
||||
systable_endscan(scanDescriptor);
|
||||
heap_close(pgPlacement, NoLock);
|
||||
|
@ -772,23 +753,21 @@ BuildShardPlacementList(ShardInterval *shardInterval)
|
|||
{
|
||||
int64 shardId = shardInterval->shardId;
|
||||
List *shardPlacementList = NIL;
|
||||
Relation pgPlacement = NULL;
|
||||
SysScanDesc scanDescriptor = NULL;
|
||||
ScanKeyData scanKey[1];
|
||||
int scanKeyCount = 1;
|
||||
bool indexOK = true;
|
||||
HeapTuple heapTuple = NULL;
|
||||
|
||||
pgPlacement = heap_open(DistPlacementRelationId(), AccessShareLock);
|
||||
Relation pgPlacement = heap_open(DistPlacementRelationId(), AccessShareLock);
|
||||
|
||||
ScanKeyInit(&scanKey[0], Anum_pg_dist_placement_shardid,
|
||||
BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(shardId));
|
||||
|
||||
scanDescriptor = systable_beginscan(pgPlacement,
|
||||
DistPlacementShardidIndexId(), indexOK,
|
||||
NULL, scanKeyCount, scanKey);
|
||||
SysScanDesc scanDescriptor = systable_beginscan(pgPlacement,
|
||||
DistPlacementShardidIndexId(),
|
||||
indexOK,
|
||||
NULL, scanKeyCount, scanKey);
|
||||
|
||||
heapTuple = systable_getnext(scanDescriptor);
|
||||
HeapTuple heapTuple = systable_getnext(scanDescriptor);
|
||||
while (HeapTupleIsValid(heapTuple))
|
||||
{
|
||||
TupleDesc tupleDescriptor = RelationGetDescr(pgPlacement);
|
||||
|
@ -817,23 +796,21 @@ List *
|
|||
AllShardPlacementsOnNodeGroup(int32 groupId)
|
||||
{
|
||||
List *shardPlacementList = NIL;
|
||||
Relation pgPlacement = NULL;
|
||||
SysScanDesc scanDescriptor = NULL;
|
||||
ScanKeyData scanKey[1];
|
||||
int scanKeyCount = 1;
|
||||
bool indexOK = true;
|
||||
HeapTuple heapTuple = NULL;
|
||||
|
||||
pgPlacement = heap_open(DistPlacementRelationId(), AccessShareLock);
|
||||
Relation pgPlacement = heap_open(DistPlacementRelationId(), AccessShareLock);
|
||||
|
||||
ScanKeyInit(&scanKey[0], Anum_pg_dist_placement_groupid,
|
||||
BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(groupId));
|
||||
|
||||
scanDescriptor = systable_beginscan(pgPlacement,
|
||||
DistPlacementGroupidIndexId(), indexOK,
|
||||
NULL, scanKeyCount, scanKey);
|
||||
SysScanDesc scanDescriptor = systable_beginscan(pgPlacement,
|
||||
DistPlacementGroupidIndexId(),
|
||||
indexOK,
|
||||
NULL, scanKeyCount, scanKey);
|
||||
|
||||
heapTuple = systable_getnext(scanDescriptor);
|
||||
HeapTuple heapTuple = systable_getnext(scanDescriptor);
|
||||
while (HeapTupleIsValid(heapTuple))
|
||||
{
|
||||
TupleDesc tupleDescriptor = RelationGetDescr(pgPlacement);
|
||||
|
@ -861,7 +838,6 @@ AllShardPlacementsOnNodeGroup(int32 groupId)
|
|||
static GroupShardPlacement *
|
||||
TupleToGroupShardPlacement(TupleDesc tupleDescriptor, HeapTuple heapTuple)
|
||||
{
|
||||
GroupShardPlacement *shardPlacement = NULL;
|
||||
bool isNullArray[Natts_pg_dist_placement];
|
||||
Datum datumArray[Natts_pg_dist_placement];
|
||||
|
||||
|
@ -877,7 +853,7 @@ TupleToGroupShardPlacement(TupleDesc tupleDescriptor, HeapTuple heapTuple)
|
|||
*/
|
||||
heap_deform_tuple(heapTuple, tupleDescriptor, datumArray, isNullArray);
|
||||
|
||||
shardPlacement = CitusMakeNode(GroupShardPlacement);
|
||||
GroupShardPlacement *shardPlacement = CitusMakeNode(GroupShardPlacement);
|
||||
shardPlacement->placementId = DatumGetInt64(
|
||||
datumArray[Anum_pg_dist_placement_placementid - 1]);
|
||||
shardPlacement->shardId = DatumGetInt64(
|
||||
|
@ -902,9 +878,6 @@ void
|
|||
InsertShardRow(Oid relationId, uint64 shardId, char storageType,
|
||||
text *shardMinValue, text *shardMaxValue)
|
||||
{
|
||||
Relation pgDistShard = NULL;
|
||||
TupleDesc tupleDescriptor = NULL;
|
||||
HeapTuple heapTuple = NULL;
|
||||
Datum values[Natts_pg_dist_shard];
|
||||
bool isNulls[Natts_pg_dist_shard];
|
||||
|
||||
|
@ -932,10 +905,10 @@ InsertShardRow(Oid relationId, uint64 shardId, char storageType,
|
|||
}
|
||||
|
||||
/* open shard relation and insert new tuple */
|
||||
pgDistShard = heap_open(DistShardRelationId(), RowExclusiveLock);
|
||||
Relation pgDistShard = heap_open(DistShardRelationId(), RowExclusiveLock);
|
||||
|
||||
tupleDescriptor = RelationGetDescr(pgDistShard);
|
||||
heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls);
|
||||
TupleDesc tupleDescriptor = RelationGetDescr(pgDistShard);
|
||||
HeapTuple heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls);
|
||||
|
||||
CatalogTupleInsert(pgDistShard, heapTuple);
|
||||
|
||||
|
@ -958,9 +931,6 @@ InsertShardPlacementRow(uint64 shardId, uint64 placementId,
|
|||
char shardState, uint64 shardLength,
|
||||
int32 groupId)
|
||||
{
|
||||
Relation pgDistPlacement = NULL;
|
||||
TupleDesc tupleDescriptor = NULL;
|
||||
HeapTuple heapTuple = NULL;
|
||||
Datum values[Natts_pg_dist_placement];
|
||||
bool isNulls[Natts_pg_dist_placement];
|
||||
|
||||
|
@ -979,10 +949,10 @@ InsertShardPlacementRow(uint64 shardId, uint64 placementId,
|
|||
values[Anum_pg_dist_placement_groupid - 1] = Int32GetDatum(groupId);
|
||||
|
||||
/* open shard placement relation and insert new tuple */
|
||||
pgDistPlacement = heap_open(DistPlacementRelationId(), RowExclusiveLock);
|
||||
Relation pgDistPlacement = heap_open(DistPlacementRelationId(), RowExclusiveLock);
|
||||
|
||||
tupleDescriptor = RelationGetDescr(pgDistPlacement);
|
||||
heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls);
|
||||
TupleDesc tupleDescriptor = RelationGetDescr(pgDistPlacement);
|
||||
HeapTuple heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls);
|
||||
|
||||
CatalogTupleInsert(pgDistPlacement, heapTuple);
|
||||
|
||||
|
@ -1003,15 +973,13 @@ InsertIntoPgDistPartition(Oid relationId, char distributionMethod,
|
|||
Var *distributionColumn, uint32 colocationId,
|
||||
char replicationModel)
|
||||
{
|
||||
Relation pgDistPartition = NULL;
|
||||
char *distributionColumnString = NULL;
|
||||
|
||||
HeapTuple newTuple = NULL;
|
||||
Datum newValues[Natts_pg_dist_partition];
|
||||
bool newNulls[Natts_pg_dist_partition];
|
||||
|
||||
/* open system catalog and insert new tuple */
|
||||
pgDistPartition = heap_open(DistPartitionRelationId(), RowExclusiveLock);
|
||||
Relation pgDistPartition = heap_open(DistPartitionRelationId(), RowExclusiveLock);
|
||||
|
||||
/* form new tuple for pg_dist_partition */
|
||||
memset(newValues, 0, sizeof(newValues));
|
||||
|
@ -1038,7 +1006,8 @@ InsertIntoPgDistPartition(Oid relationId, char distributionMethod,
|
|||
newNulls[Anum_pg_dist_partition_partkey - 1] = true;
|
||||
}
|
||||
|
||||
newTuple = heap_form_tuple(RelationGetDescr(pgDistPartition), newValues, newNulls);
|
||||
HeapTuple newTuple = heap_form_tuple(RelationGetDescr(pgDistPartition), newValues,
|
||||
newNulls);
|
||||
|
||||
/* finally insert tuple, build index entries & register cache invalidation */
|
||||
CatalogTupleInsert(pgDistPartition, newTuple);
|
||||
|
@ -1092,21 +1061,19 @@ RecordDistributedRelationDependencies(Oid distributedRelationId, Node *distribut
|
|||
void
|
||||
DeletePartitionRow(Oid distributedRelationId)
|
||||
{
|
||||
Relation pgDistPartition = NULL;
|
||||
HeapTuple heapTuple = NULL;
|
||||
SysScanDesc scanDescriptor = NULL;
|
||||
ScanKeyData scanKey[1];
|
||||
int scanKeyCount = 1;
|
||||
|
||||
pgDistPartition = heap_open(DistPartitionRelationId(), RowExclusiveLock);
|
||||
Relation pgDistPartition = heap_open(DistPartitionRelationId(), RowExclusiveLock);
|
||||
|
||||
ScanKeyInit(&scanKey[0], Anum_pg_dist_partition_logicalrelid,
|
||||
BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(distributedRelationId));
|
||||
|
||||
scanDescriptor = systable_beginscan(pgDistPartition, InvalidOid, false, NULL,
|
||||
scanKeyCount, scanKey);
|
||||
SysScanDesc scanDescriptor = systable_beginscan(pgDistPartition, InvalidOid, false,
|
||||
NULL,
|
||||
scanKeyCount, scanKey);
|
||||
|
||||
heapTuple = systable_getnext(scanDescriptor);
|
||||
HeapTuple heapTuple = systable_getnext(scanDescriptor);
|
||||
if (!HeapTupleIsValid(heapTuple))
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not find valid entry for partition %d",
|
||||
|
@ -1134,33 +1101,28 @@ DeletePartitionRow(Oid distributedRelationId)
|
|||
void
|
||||
DeleteShardRow(uint64 shardId)
|
||||
{
|
||||
Relation pgDistShard = NULL;
|
||||
SysScanDesc scanDescriptor = NULL;
|
||||
ScanKeyData scanKey[1];
|
||||
int scanKeyCount = 1;
|
||||
bool indexOK = true;
|
||||
HeapTuple heapTuple = NULL;
|
||||
Form_pg_dist_shard pgDistShardForm = NULL;
|
||||
Oid distributedRelationId = InvalidOid;
|
||||
|
||||
pgDistShard = heap_open(DistShardRelationId(), RowExclusiveLock);
|
||||
Relation pgDistShard = heap_open(DistShardRelationId(), RowExclusiveLock);
|
||||
|
||||
ScanKeyInit(&scanKey[0], Anum_pg_dist_shard_shardid,
|
||||
BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(shardId));
|
||||
|
||||
scanDescriptor = systable_beginscan(pgDistShard,
|
||||
DistShardShardidIndexId(), indexOK,
|
||||
NULL, scanKeyCount, scanKey);
|
||||
SysScanDesc scanDescriptor = systable_beginscan(pgDistShard,
|
||||
DistShardShardidIndexId(), indexOK,
|
||||
NULL, scanKeyCount, scanKey);
|
||||
|
||||
heapTuple = systable_getnext(scanDescriptor);
|
||||
HeapTuple heapTuple = systable_getnext(scanDescriptor);
|
||||
if (!HeapTupleIsValid(heapTuple))
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not find valid entry for shard "
|
||||
UINT64_FORMAT, shardId)));
|
||||
}
|
||||
|
||||
pgDistShardForm = (Form_pg_dist_shard) GETSTRUCT(heapTuple);
|
||||
distributedRelationId = pgDistShardForm->logicalrelid;
|
||||
Form_pg_dist_shard pgDistShardForm = (Form_pg_dist_shard) GETSTRUCT(heapTuple);
|
||||
Oid distributedRelationId = pgDistShardForm->logicalrelid;
|
||||
|
||||
simple_heap_delete(pgDistShard, &heapTuple->t_self);
|
||||
|
||||
|
@ -1181,35 +1143,31 @@ DeleteShardRow(uint64 shardId)
|
|||
void
|
||||
DeleteShardPlacementRow(uint64 placementId)
|
||||
{
|
||||
Relation pgDistPlacement = NULL;
|
||||
SysScanDesc scanDescriptor = NULL;
|
||||
const int scanKeyCount = 1;
|
||||
ScanKeyData scanKey[1];
|
||||
bool indexOK = true;
|
||||
HeapTuple heapTuple = NULL;
|
||||
TupleDesc tupleDescriptor = NULL;
|
||||
bool isNull = false;
|
||||
uint64 shardId = 0;
|
||||
|
||||
pgDistPlacement = heap_open(DistPlacementRelationId(), RowExclusiveLock);
|
||||
tupleDescriptor = RelationGetDescr(pgDistPlacement);
|
||||
Relation pgDistPlacement = heap_open(DistPlacementRelationId(), RowExclusiveLock);
|
||||
TupleDesc tupleDescriptor = RelationGetDescr(pgDistPlacement);
|
||||
|
||||
ScanKeyInit(&scanKey[0], Anum_pg_dist_placement_placementid,
|
||||
BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(placementId));
|
||||
|
||||
scanDescriptor = systable_beginscan(pgDistPlacement,
|
||||
DistPlacementPlacementidIndexId(), indexOK,
|
||||
NULL, scanKeyCount, scanKey);
|
||||
SysScanDesc scanDescriptor = systable_beginscan(pgDistPlacement,
|
||||
DistPlacementPlacementidIndexId(),
|
||||
indexOK,
|
||||
NULL, scanKeyCount, scanKey);
|
||||
|
||||
heapTuple = systable_getnext(scanDescriptor);
|
||||
HeapTuple heapTuple = systable_getnext(scanDescriptor);
|
||||
if (heapTuple == NULL)
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not find valid entry for shard placement "
|
||||
INT64_FORMAT, placementId)));
|
||||
}
|
||||
|
||||
shardId = heap_getattr(heapTuple, Anum_pg_dist_placement_shardid,
|
||||
tupleDescriptor, &isNull);
|
||||
uint64 shardId = heap_getattr(heapTuple, Anum_pg_dist_placement_shardid,
|
||||
tupleDescriptor, &isNull);
|
||||
if (HeapTupleHeaderGetNatts(heapTuple->t_data) != Natts_pg_dist_placement ||
|
||||
HeapTupleHasNulls(heapTuple))
|
||||
{
|
||||
|
@ -1233,29 +1191,25 @@ DeleteShardPlacementRow(uint64 placementId)
|
|||
void
|
||||
UpdateShardPlacementState(uint64 placementId, char shardState)
|
||||
{
|
||||
Relation pgDistPlacement = NULL;
|
||||
SysScanDesc scanDescriptor = NULL;
|
||||
ScanKeyData scanKey[1];
|
||||
int scanKeyCount = 1;
|
||||
bool indexOK = true;
|
||||
HeapTuple heapTuple = NULL;
|
||||
TupleDesc tupleDescriptor = NULL;
|
||||
Datum values[Natts_pg_dist_placement];
|
||||
bool isnull[Natts_pg_dist_placement];
|
||||
bool replace[Natts_pg_dist_placement];
|
||||
uint64 shardId = INVALID_SHARD_ID;
|
||||
bool colIsNull = false;
|
||||
|
||||
pgDistPlacement = heap_open(DistPlacementRelationId(), RowExclusiveLock);
|
||||
tupleDescriptor = RelationGetDescr(pgDistPlacement);
|
||||
Relation pgDistPlacement = heap_open(DistPlacementRelationId(), RowExclusiveLock);
|
||||
TupleDesc tupleDescriptor = RelationGetDescr(pgDistPlacement);
|
||||
ScanKeyInit(&scanKey[0], Anum_pg_dist_placement_placementid,
|
||||
BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(placementId));
|
||||
|
||||
scanDescriptor = systable_beginscan(pgDistPlacement,
|
||||
DistPlacementPlacementidIndexId(), indexOK,
|
||||
NULL, scanKeyCount, scanKey);
|
||||
SysScanDesc scanDescriptor = systable_beginscan(pgDistPlacement,
|
||||
DistPlacementPlacementidIndexId(),
|
||||
indexOK,
|
||||
NULL, scanKeyCount, scanKey);
|
||||
|
||||
heapTuple = systable_getnext(scanDescriptor);
|
||||
HeapTuple heapTuple = systable_getnext(scanDescriptor);
|
||||
if (!HeapTupleIsValid(heapTuple))
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not find valid entry for shard placement "
|
||||
|
@ -1273,9 +1227,9 @@ UpdateShardPlacementState(uint64 placementId, char shardState)
|
|||
|
||||
CatalogTupleUpdate(pgDistPlacement, &heapTuple->t_self, heapTuple);
|
||||
|
||||
shardId = DatumGetInt64(heap_getattr(heapTuple,
|
||||
Anum_pg_dist_placement_shardid,
|
||||
tupleDescriptor, &colIsNull));
|
||||
uint64 shardId = DatumGetInt64(heap_getattr(heapTuple,
|
||||
Anum_pg_dist_placement_shardid,
|
||||
tupleDescriptor, &colIsNull));
|
||||
Assert(!colIsNull);
|
||||
CitusInvalidateRelcacheByShardId(shardId);
|
||||
|
||||
|
@ -1293,9 +1247,7 @@ UpdateShardPlacementState(uint64 placementId, char shardState)
|
|||
void
|
||||
EnsureTablePermissions(Oid relationId, AclMode mode)
|
||||
{
|
||||
AclResult aclresult;
|
||||
|
||||
aclresult = pg_class_aclcheck(relationId, GetUserId(), mode);
|
||||
AclResult aclresult = pg_class_aclcheck(relationId, GetUserId(), mode);
|
||||
|
||||
if (aclresult != ACLCHECK_OK)
|
||||
{
|
||||
|
@ -1385,17 +1337,14 @@ EnsureSuperUser(void)
|
|||
char *
|
||||
TableOwner(Oid relationId)
|
||||
{
|
||||
Oid userId = InvalidOid;
|
||||
HeapTuple tuple;
|
||||
|
||||
tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relationId));
|
||||
HeapTuple tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relationId));
|
||||
if (!HeapTupleIsValid(tuple))
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_UNDEFINED_TABLE),
|
||||
errmsg("relation with OID %u does not exist", relationId)));
|
||||
}
|
||||
|
||||
userId = ((Form_pg_class) GETSTRUCT(tuple))->relowner;
|
||||
Oid userId = ((Form_pg_class) GETSTRUCT(tuple))->relowner;
|
||||
|
||||
ReleaseSysCache(tuple);
|
||||
|
||||
|
|
|
@ -94,26 +94,20 @@ master_get_table_metadata(PG_FUNCTION_ARGS)
|
|||
text *relationName = PG_GETARG_TEXT_P(0);
|
||||
Oid relationId = ResolveRelationId(relationName, false);
|
||||
|
||||
DistTableCacheEntry *partitionEntry = NULL;
|
||||
char *partitionKeyString = NULL;
|
||||
TypeFuncClass resultTypeClass = 0;
|
||||
Datum partitionKeyExpr = 0;
|
||||
Datum partitionKey = 0;
|
||||
Datum metadataDatum = 0;
|
||||
HeapTuple metadataTuple = NULL;
|
||||
TupleDesc metadataDescriptor = NULL;
|
||||
uint64 shardMaxSizeInBytes = 0;
|
||||
char shardStorageType = 0;
|
||||
Datum values[TABLE_METADATA_FIELDS];
|
||||
bool isNulls[TABLE_METADATA_FIELDS];
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
/* find partition tuple for partitioned relation */
|
||||
partitionEntry = DistributedTableCacheEntry(relationId);
|
||||
DistTableCacheEntry *partitionEntry = DistributedTableCacheEntry(relationId);
|
||||
|
||||
/* create tuple descriptor for return value */
|
||||
resultTypeClass = get_call_result_type(fcinfo, NULL, &metadataDescriptor);
|
||||
TypeFuncClass resultTypeClass = get_call_result_type(fcinfo, NULL,
|
||||
&metadataDescriptor);
|
||||
if (resultTypeClass != TYPEFUNC_COMPOSITE)
|
||||
{
|
||||
ereport(ERROR, (errmsg("return type must be a row type")));
|
||||
|
@ -123,7 +117,7 @@ master_get_table_metadata(PG_FUNCTION_ARGS)
|
|||
memset(values, 0, sizeof(values));
|
||||
memset(isNulls, false, sizeof(isNulls));
|
||||
|
||||
partitionKeyString = partitionEntry->partitionKeyString;
|
||||
char *partitionKeyString = partitionEntry->partitionKeyString;
|
||||
|
||||
/* reference tables do not have partition key */
|
||||
if (partitionKeyString == NULL)
|
||||
|
@ -140,10 +134,10 @@ master_get_table_metadata(PG_FUNCTION_ARGS)
|
|||
ObjectIdGetDatum(relationId));
|
||||
}
|
||||
|
||||
shardMaxSizeInBytes = (int64) ShardMaxSize * 1024L;
|
||||
uint64 shardMaxSizeInBytes = (int64) ShardMaxSize * 1024L;
|
||||
|
||||
/* get storage type */
|
||||
shardStorageType = ShardStorageType(relationId);
|
||||
char shardStorageType = ShardStorageType(relationId);
|
||||
|
||||
values[0] = ObjectIdGetDatum(relationId);
|
||||
values[1] = shardStorageType;
|
||||
|
@ -153,8 +147,8 @@ master_get_table_metadata(PG_FUNCTION_ARGS)
|
|||
values[5] = Int64GetDatum(shardMaxSizeInBytes);
|
||||
values[6] = Int32GetDatum(ShardPlacementPolicy);
|
||||
|
||||
metadataTuple = heap_form_tuple(metadataDescriptor, values, isNulls);
|
||||
metadataDatum = HeapTupleGetDatum(metadataTuple);
|
||||
HeapTuple metadataTuple = heap_form_tuple(metadataDescriptor, values, isNulls);
|
||||
Datum metadataDatum = HeapTupleGetDatum(metadataTuple);
|
||||
|
||||
PG_RETURN_DATUM(metadataDatum);
|
||||
}
|
||||
|
@ -212,17 +206,16 @@ master_get_table_ddl_events(PG_FUNCTION_ARGS)
|
|||
Oid relationId = ResolveRelationId(relationName, false);
|
||||
bool includeSequenceDefaults = true;
|
||||
|
||||
MemoryContext oldContext = NULL;
|
||||
List *tableDDLEventList = NIL;
|
||||
|
||||
/* create a function context for cross-call persistence */
|
||||
functionContext = SRF_FIRSTCALL_INIT();
|
||||
|
||||
/* switch to memory context appropriate for multiple function calls */
|
||||
oldContext = MemoryContextSwitchTo(functionContext->multi_call_memory_ctx);
|
||||
MemoryContext oldContext = MemoryContextSwitchTo(
|
||||
functionContext->multi_call_memory_ctx);
|
||||
|
||||
/* allocate DDL statements, and then save position in DDL statements */
|
||||
tableDDLEventList = GetTableDDLEvents(relationId, includeSequenceDefaults);
|
||||
List *tableDDLEventList = GetTableDDLEvents(relationId, includeSequenceDefaults);
|
||||
tableDDLEventCell = list_head(tableDDLEventList);
|
||||
|
||||
functionContext->user_fctx = tableDDLEventCell;
|
||||
|
@ -266,14 +259,11 @@ master_get_table_ddl_events(PG_FUNCTION_ARGS)
|
|||
Datum
|
||||
master_get_new_shardid(PG_FUNCTION_ARGS)
|
||||
{
|
||||
uint64 shardId = 0;
|
||||
Datum shardIdDatum = 0;
|
||||
|
||||
EnsureCoordinator();
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
shardId = GetNextShardId();
|
||||
shardIdDatum = Int64GetDatum(shardId);
|
||||
uint64 shardId = GetNextShardId();
|
||||
Datum shardIdDatum = Int64GetDatum(shardId);
|
||||
|
||||
PG_RETURN_DATUM(shardIdDatum);
|
||||
}
|
||||
|
@ -290,12 +280,8 @@ master_get_new_shardid(PG_FUNCTION_ARGS)
|
|||
uint64
|
||||
GetNextShardId()
|
||||
{
|
||||
text *sequenceName = NULL;
|
||||
Oid sequenceId = InvalidOid;
|
||||
Datum sequenceIdDatum = 0;
|
||||
Oid savedUserId = InvalidOid;
|
||||
int savedSecurityContext = 0;
|
||||
Datum shardIdDatum = 0;
|
||||
uint64 shardId = 0;
|
||||
|
||||
/*
|
||||
|
@ -313,15 +299,15 @@ GetNextShardId()
|
|||
return shardId;
|
||||
}
|
||||
|
||||
sequenceName = cstring_to_text(SHARDID_SEQUENCE_NAME);
|
||||
sequenceId = ResolveRelationId(sequenceName, false);
|
||||
sequenceIdDatum = ObjectIdGetDatum(sequenceId);
|
||||
text *sequenceName = cstring_to_text(SHARDID_SEQUENCE_NAME);
|
||||
Oid sequenceId = ResolveRelationId(sequenceName, false);
|
||||
Datum sequenceIdDatum = ObjectIdGetDatum(sequenceId);
|
||||
|
||||
GetUserIdAndSecContext(&savedUserId, &savedSecurityContext);
|
||||
SetUserIdAndSecContext(CitusExtensionOwner(), SECURITY_LOCAL_USERID_CHANGE);
|
||||
|
||||
/* generate new and unique shardId from sequence */
|
||||
shardIdDatum = DirectFunctionCall1(nextval_oid, sequenceIdDatum);
|
||||
Datum shardIdDatum = DirectFunctionCall1(nextval_oid, sequenceIdDatum);
|
||||
|
||||
SetUserIdAndSecContext(savedUserId, savedSecurityContext);
|
||||
|
||||
|
@ -343,14 +329,11 @@ GetNextShardId()
|
|||
Datum
|
||||
master_get_new_placementid(PG_FUNCTION_ARGS)
|
||||
{
|
||||
uint64 placementId = 0;
|
||||
Datum placementIdDatum = 0;
|
||||
|
||||
EnsureCoordinator();
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
placementId = GetNextPlacementId();
|
||||
placementIdDatum = Int64GetDatum(placementId);
|
||||
uint64 placementId = GetNextPlacementId();
|
||||
Datum placementIdDatum = Int64GetDatum(placementId);
|
||||
|
||||
PG_RETURN_DATUM(placementIdDatum);
|
||||
}
|
||||
|
@ -369,12 +352,8 @@ master_get_new_placementid(PG_FUNCTION_ARGS)
|
|||
uint64
|
||||
GetNextPlacementId(void)
|
||||
{
|
||||
text *sequenceName = NULL;
|
||||
Oid sequenceId = InvalidOid;
|
||||
Datum sequenceIdDatum = 0;
|
||||
Oid savedUserId = InvalidOid;
|
||||
int savedSecurityContext = 0;
|
||||
Datum placementIdDatum = 0;
|
||||
uint64 placementId = 0;
|
||||
|
||||
/*
|
||||
|
@ -392,15 +371,15 @@ GetNextPlacementId(void)
|
|||
return placementId;
|
||||
}
|
||||
|
||||
sequenceName = cstring_to_text(PLACEMENTID_SEQUENCE_NAME);
|
||||
sequenceId = ResolveRelationId(sequenceName, false);
|
||||
sequenceIdDatum = ObjectIdGetDatum(sequenceId);
|
||||
text *sequenceName = cstring_to_text(PLACEMENTID_SEQUENCE_NAME);
|
||||
Oid sequenceId = ResolveRelationId(sequenceName, false);
|
||||
Datum sequenceIdDatum = ObjectIdGetDatum(sequenceId);
|
||||
|
||||
GetUserIdAndSecContext(&savedUserId, &savedSecurityContext);
|
||||
SetUserIdAndSecContext(CitusExtensionOwner(), SECURITY_LOCAL_USERID_CHANGE);
|
||||
|
||||
/* generate new and unique placement id from sequence */
|
||||
placementIdDatum = DirectFunctionCall1(nextval_oid, sequenceIdDatum);
|
||||
Datum placementIdDatum = DirectFunctionCall1(nextval_oid, sequenceIdDatum);
|
||||
|
||||
SetUserIdAndSecContext(savedUserId, savedSecurityContext);
|
||||
|
||||
|
@ -465,17 +444,16 @@ master_get_active_worker_nodes(PG_FUNCTION_ARGS)
|
|||
|
||||
if (SRF_IS_FIRSTCALL())
|
||||
{
|
||||
MemoryContext oldContext = NULL;
|
||||
List *workerNodeList = NIL;
|
||||
TupleDesc tupleDescriptor = NULL;
|
||||
|
||||
/* create a function context for cross-call persistence */
|
||||
functionContext = SRF_FIRSTCALL_INIT();
|
||||
|
||||
/* switch to memory context appropriate for multiple function calls */
|
||||
oldContext = MemoryContextSwitchTo(functionContext->multi_call_memory_ctx);
|
||||
MemoryContext oldContext = MemoryContextSwitchTo(
|
||||
functionContext->multi_call_memory_ctx);
|
||||
|
||||
workerNodeList = ActiveReadableWorkerNodeList();
|
||||
List *workerNodeList = ActiveReadableWorkerNodeList();
|
||||
workerNodeCount = (uint32) list_length(workerNodeList);
|
||||
|
||||
functionContext->user_fctx = workerNodeList;
|
||||
|
@ -525,14 +503,10 @@ master_get_active_worker_nodes(PG_FUNCTION_ARGS)
|
|||
Oid
|
||||
ResolveRelationId(text *relationName, bool missingOk)
|
||||
{
|
||||
List *relationNameList = NIL;
|
||||
RangeVar *relation = NULL;
|
||||
Oid relationId = InvalidOid;
|
||||
|
||||
/* resolve relationId from passed in schema and relation name */
|
||||
relationNameList = textToQualifiedNameList(relationName);
|
||||
relation = makeRangeVarFromNameList(relationNameList);
|
||||
relationId = RangeVarGetRelid(relation, NoLock, missingOk);
|
||||
List *relationNameList = textToQualifiedNameList(relationName);
|
||||
RangeVar *relation = makeRangeVarFromNameList(relationNameList);
|
||||
Oid relationId = RangeVarGetRelid(relation, NoLock, missingOk);
|
||||
|
||||
return relationId;
|
||||
}
|
||||
|
@ -551,22 +525,18 @@ List *
|
|||
GetTableDDLEvents(Oid relationId, bool includeSequenceDefaults)
|
||||
{
|
||||
List *tableDDLEventList = NIL;
|
||||
List *tableCreationCommandList = NIL;
|
||||
List *indexAndConstraintCommandList = NIL;
|
||||
List *replicaIdentityEvents = NIL;
|
||||
List *policyCommands = NIL;
|
||||
|
||||
tableCreationCommandList = GetTableCreationCommands(relationId,
|
||||
includeSequenceDefaults);
|
||||
List *tableCreationCommandList = GetTableCreationCommands(relationId,
|
||||
includeSequenceDefaults);
|
||||
tableDDLEventList = list_concat(tableDDLEventList, tableCreationCommandList);
|
||||
|
||||
indexAndConstraintCommandList = GetTableIndexAndConstraintCommands(relationId);
|
||||
List *indexAndConstraintCommandList = GetTableIndexAndConstraintCommands(relationId);
|
||||
tableDDLEventList = list_concat(tableDDLEventList, indexAndConstraintCommandList);
|
||||
|
||||
replicaIdentityEvents = GetTableReplicaIdentityCommand(relationId);
|
||||
List *replicaIdentityEvents = GetTableReplicaIdentityCommand(relationId);
|
||||
tableDDLEventList = list_concat(tableDDLEventList, replicaIdentityEvents);
|
||||
|
||||
policyCommands = CreatePolicyCommands(relationId);
|
||||
List *policyCommands = CreatePolicyCommands(relationId);
|
||||
tableDDLEventList = list_concat(tableDDLEventList, policyCommands);
|
||||
|
||||
return tableDDLEventList;
|
||||
|
@ -581,7 +551,6 @@ static List *
|
|||
GetTableReplicaIdentityCommand(Oid relationId)
|
||||
{
|
||||
List *replicaIdentityCreateCommandList = NIL;
|
||||
char *replicaIdentityCreateCommand = NULL;
|
||||
|
||||
/*
|
||||
* We skip non-relations because postgres does not support
|
||||
|
@ -593,7 +562,7 @@ GetTableReplicaIdentityCommand(Oid relationId)
|
|||
return NIL;
|
||||
}
|
||||
|
||||
replicaIdentityCreateCommand = pg_get_replica_identity_command(relationId);
|
||||
char *replicaIdentityCreateCommand = pg_get_replica_identity_command(relationId);
|
||||
|
||||
if (replicaIdentityCreateCommand)
|
||||
{
|
||||
|
@ -614,10 +583,6 @@ List *
|
|||
GetTableCreationCommands(Oid relationId, bool includeSequenceDefaults)
|
||||
{
|
||||
List *tableDDLEventList = NIL;
|
||||
char tableType = 0;
|
||||
char *tableSchemaDef = NULL;
|
||||
char *tableColumnOptionsDef = NULL;
|
||||
char *tableOwnerDef = NULL;
|
||||
|
||||
/*
|
||||
* Set search_path to NIL so that all objects outside of pg_catalog will be
|
||||
|
@ -630,7 +595,7 @@ GetTableCreationCommands(Oid relationId, bool includeSequenceDefaults)
|
|||
PushOverrideSearchPath(overridePath);
|
||||
|
||||
/* if foreign table, fetch extension and server definitions */
|
||||
tableType = get_rel_relkind(relationId);
|
||||
char tableType = get_rel_relkind(relationId);
|
||||
if (tableType == RELKIND_FOREIGN_TABLE)
|
||||
{
|
||||
char *extensionDef = pg_get_extensiondef_string(relationId);
|
||||
|
@ -644,8 +609,9 @@ GetTableCreationCommands(Oid relationId, bool includeSequenceDefaults)
|
|||
}
|
||||
|
||||
/* fetch table schema and column option definitions */
|
||||
tableSchemaDef = pg_get_tableschemadef_string(relationId, includeSequenceDefaults);
|
||||
tableColumnOptionsDef = pg_get_tablecolumnoptionsdef_string(relationId);
|
||||
char *tableSchemaDef = pg_get_tableschemadef_string(relationId,
|
||||
includeSequenceDefaults);
|
||||
char *tableColumnOptionsDef = pg_get_tablecolumnoptionsdef_string(relationId);
|
||||
|
||||
tableDDLEventList = lappend(tableDDLEventList, tableSchemaDef);
|
||||
if (tableColumnOptionsDef != NULL)
|
||||
|
@ -653,7 +619,7 @@ GetTableCreationCommands(Oid relationId, bool includeSequenceDefaults)
|
|||
tableDDLEventList = lappend(tableDDLEventList, tableColumnOptionsDef);
|
||||
}
|
||||
|
||||
tableOwnerDef = TableOwnerResetCommand(relationId);
|
||||
char *tableOwnerDef = TableOwnerResetCommand(relationId);
|
||||
if (tableOwnerDef != NULL)
|
||||
{
|
||||
tableDDLEventList = lappend(tableDDLEventList, tableOwnerDef);
|
||||
|
@ -674,11 +640,8 @@ List *
|
|||
GetTableIndexAndConstraintCommands(Oid relationId)
|
||||
{
|
||||
List *indexDDLEventList = NIL;
|
||||
Relation pgIndex = NULL;
|
||||
SysScanDesc scanDescriptor = NULL;
|
||||
ScanKeyData scanKey[1];
|
||||
int scanKeyCount = 1;
|
||||
HeapTuple heapTuple = NULL;
|
||||
|
||||
/*
|
||||
* Set search_path to NIL so that all objects outside of pg_catalog will be
|
||||
|
@ -691,16 +654,16 @@ GetTableIndexAndConstraintCommands(Oid relationId)
|
|||
PushOverrideSearchPath(overridePath);
|
||||
|
||||
/* open system catalog and scan all indexes that belong to this table */
|
||||
pgIndex = heap_open(IndexRelationId, AccessShareLock);
|
||||
Relation pgIndex = heap_open(IndexRelationId, AccessShareLock);
|
||||
|
||||
ScanKeyInit(&scanKey[0], Anum_pg_index_indrelid,
|
||||
BTEqualStrategyNumber, F_OIDEQ, relationId);
|
||||
|
||||
scanDescriptor = systable_beginscan(pgIndex,
|
||||
IndexIndrelidIndexId, true, /* indexOK */
|
||||
NULL, scanKeyCount, scanKey);
|
||||
SysScanDesc scanDescriptor = systable_beginscan(pgIndex,
|
||||
IndexIndrelidIndexId, true, /* indexOK */
|
||||
NULL, scanKeyCount, scanKey);
|
||||
|
||||
heapTuple = systable_getnext(scanDescriptor);
|
||||
HeapTuple heapTuple = systable_getnext(scanDescriptor);
|
||||
while (HeapTupleIsValid(heapTuple))
|
||||
{
|
||||
Form_pg_index indexForm = (Form_pg_index) GETSTRUCT(heapTuple);
|
||||
|
@ -824,8 +787,6 @@ WorkerNodeGetDatum(WorkerNode *workerNode, TupleDesc tupleDescriptor)
|
|||
{
|
||||
Datum values[WORKER_NODE_FIELDS];
|
||||
bool isNulls[WORKER_NODE_FIELDS];
|
||||
HeapTuple workerNodeTuple = NULL;
|
||||
Datum workerNodeDatum = 0;
|
||||
|
||||
memset(values, 0, sizeof(values));
|
||||
memset(isNulls, false, sizeof(isNulls));
|
||||
|
@ -833,8 +794,8 @@ WorkerNodeGetDatum(WorkerNode *workerNode, TupleDesc tupleDescriptor)
|
|||
values[0] = CStringGetTextDatum(workerNode->workerName);
|
||||
values[1] = Int64GetDatum((int64) workerNode->workerPort);
|
||||
|
||||
workerNodeTuple = heap_form_tuple(tupleDescriptor, values, isNulls);
|
||||
workerNodeDatum = HeapTupleGetDatum(workerNodeTuple);
|
||||
HeapTuple workerNodeTuple = heap_form_tuple(tupleDescriptor, values, isNulls);
|
||||
Datum workerNodeDatum = HeapTupleGetDatum(workerNodeTuple);
|
||||
|
||||
return workerNodeDatum;
|
||||
}
|
||||
|
|
|
@ -139,9 +139,6 @@ BlockWritesToShardList(List *shardList)
|
|||
{
|
||||
ListCell *shardCell = NULL;
|
||||
|
||||
bool shouldSyncMetadata = false;
|
||||
ShardInterval *firstShardInterval = NULL;
|
||||
Oid firstDistributedTableId = InvalidOid;
|
||||
|
||||
foreach(shardCell, shardList)
|
||||
{
|
||||
|
@ -167,10 +164,10 @@ BlockWritesToShardList(List *shardList)
|
|||
* Since the function assumes that the input shards are colocated,
|
||||
* calculating shouldSyncMetadata for a single table is sufficient.
|
||||
*/
|
||||
firstShardInterval = (ShardInterval *) linitial(shardList);
|
||||
firstDistributedTableId = firstShardInterval->relationId;
|
||||
ShardInterval *firstShardInterval = (ShardInterval *) linitial(shardList);
|
||||
Oid firstDistributedTableId = firstShardInterval->relationId;
|
||||
|
||||
shouldSyncMetadata = ShouldSyncTableMetadata(firstDistributedTableId);
|
||||
bool shouldSyncMetadata = ShouldSyncTableMetadata(firstDistributedTableId);
|
||||
if (shouldSyncMetadata)
|
||||
{
|
||||
LockShardListMetadataOnWorkers(ExclusiveLock, shardList);
|
||||
|
@ -225,13 +222,7 @@ RepairShardPlacement(int64 shardId, char *sourceNodeName, int32 sourceNodePort,
|
|||
char relationKind = get_rel_relkind(distributedTableId);
|
||||
char *tableOwner = TableOwner(shardInterval->relationId);
|
||||
bool missingOk = false;
|
||||
bool includeData = false;
|
||||
bool partitionedTable = false;
|
||||
|
||||
List *ddlCommandList = NIL;
|
||||
List *foreignConstraintCommandList = NIL;
|
||||
List *placementList = NIL;
|
||||
ShardPlacement *placement = NULL;
|
||||
|
||||
/* prevent table from being dropped */
|
||||
LockRelationOid(distributedTableId, AccessShareLock);
|
||||
|
@ -287,13 +278,14 @@ RepairShardPlacement(int64 shardId, char *sourceNodeName, int32 sourceNodePort,
|
|||
* If the shard belongs to a partitioned table, we need to load the data after
|
||||
* creating the partitions and the partitioning hierarcy.
|
||||
*/
|
||||
partitionedTable = PartitionedTableNoLock(distributedTableId);
|
||||
includeData = !partitionedTable;
|
||||
bool partitionedTable = PartitionedTableNoLock(distributedTableId);
|
||||
bool includeData = !partitionedTable;
|
||||
|
||||
/* we generate necessary commands to recreate the shard in target node */
|
||||
ddlCommandList =
|
||||
List *ddlCommandList =
|
||||
CopyShardCommandList(shardInterval, sourceNodeName, sourceNodePort, includeData);
|
||||
foreignConstraintCommandList = CopyShardForeignConstraintCommandList(shardInterval);
|
||||
List *foreignConstraintCommandList = CopyShardForeignConstraintCommandList(
|
||||
shardInterval);
|
||||
ddlCommandList = list_concat(ddlCommandList, foreignConstraintCommandList);
|
||||
|
||||
/*
|
||||
|
@ -305,12 +297,10 @@ RepairShardPlacement(int64 shardId, char *sourceNodeName, int32 sourceNodePort,
|
|||
*/
|
||||
if (partitionedTable)
|
||||
{
|
||||
List *partitionCommandList = NIL;
|
||||
|
||||
char *shardName = ConstructQualifiedShardName(shardInterval);
|
||||
StringInfo copyShardDataCommand = makeStringInfo();
|
||||
|
||||
partitionCommandList =
|
||||
List *partitionCommandList =
|
||||
CopyPartitionShardsCommandList(shardInterval, sourceNodeName, sourceNodePort);
|
||||
ddlCommandList = list_concat(ddlCommandList, partitionCommandList);
|
||||
|
||||
|
@ -328,9 +318,10 @@ RepairShardPlacement(int64 shardId, char *sourceNodeName, int32 sourceNodePort,
|
|||
ddlCommandList);
|
||||
|
||||
/* after successful repair, we update shard state as healthy*/
|
||||
placementList = ShardPlacementList(shardId);
|
||||
placement = SearchShardPlacementInList(placementList, targetNodeName, targetNodePort,
|
||||
missingOk);
|
||||
List *placementList = ShardPlacementList(shardId);
|
||||
ShardPlacement *placement = SearchShardPlacementInList(placementList, targetNodeName,
|
||||
targetNodePort,
|
||||
missingOk);
|
||||
UpdateShardPlacementState(placement->placementId, FILE_FINALIZED);
|
||||
}
|
||||
|
||||
|
@ -347,13 +338,12 @@ CopyPartitionShardsCommandList(ShardInterval *shardInterval, char *sourceNodeNam
|
|||
int32 sourceNodePort)
|
||||
{
|
||||
Oid distributedTableId = shardInterval->relationId;
|
||||
List *partitionList = NIL;
|
||||
ListCell *partitionOidCell = NULL;
|
||||
List *ddlCommandList = NIL;
|
||||
|
||||
Assert(PartitionedTableNoLock(distributedTableId));
|
||||
|
||||
partitionList = PartitionList(distributedTableId);
|
||||
List *partitionList = PartitionList(distributedTableId);
|
||||
foreach(partitionOidCell, partitionList)
|
||||
{
|
||||
Oid partitionOid = lfirst_oid(partitionOidCell);
|
||||
|
@ -361,15 +351,13 @@ CopyPartitionShardsCommandList(ShardInterval *shardInterval, char *sourceNodeNam
|
|||
ColocatedShardIdInRelation(partitionOid, shardInterval->shardIndex);
|
||||
ShardInterval *partitionShardInterval = LoadShardInterval(partitionShardId);
|
||||
bool includeData = false;
|
||||
List *copyCommandList = NIL;
|
||||
char *attachPartitionCommand = NULL;
|
||||
|
||||
copyCommandList =
|
||||
List *copyCommandList =
|
||||
CopyShardCommandList(partitionShardInterval, sourceNodeName, sourceNodePort,
|
||||
includeData);
|
||||
ddlCommandList = list_concat(ddlCommandList, copyCommandList);
|
||||
|
||||
attachPartitionCommand =
|
||||
char *attachPartitionCommand =
|
||||
GenerateAttachShardPartitionCommand(partitionShardInterval);
|
||||
ddlCommandList = lappend(ddlCommandList, attachPartitionCommand);
|
||||
}
|
||||
|
@ -387,21 +375,23 @@ EnsureShardCanBeRepaired(int64 shardId, char *sourceNodeName, int32 sourceNodePo
|
|||
char *targetNodeName, int32 targetNodePort)
|
||||
{
|
||||
List *shardPlacementList = ShardPlacementList(shardId);
|
||||
ShardPlacement *sourcePlacement = NULL;
|
||||
ShardPlacement *targetPlacement = NULL;
|
||||
bool missingSourceOk = false;
|
||||
bool missingTargetOk = false;
|
||||
|
||||
sourcePlacement = SearchShardPlacementInList(shardPlacementList, sourceNodeName,
|
||||
sourceNodePort, missingSourceOk);
|
||||
ShardPlacement *sourcePlacement = SearchShardPlacementInList(shardPlacementList,
|
||||
sourceNodeName,
|
||||
sourceNodePort,
|
||||
missingSourceOk);
|
||||
if (sourcePlacement->shardState != FILE_FINALIZED)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("source placement must be in finalized state")));
|
||||
}
|
||||
|
||||
targetPlacement = SearchShardPlacementInList(shardPlacementList, targetNodeName,
|
||||
targetNodePort, missingTargetOk);
|
||||
ShardPlacement *targetPlacement = SearchShardPlacementInList(shardPlacementList,
|
||||
targetNodeName,
|
||||
targetNodePort,
|
||||
missingTargetOk);
|
||||
if (targetPlacement->shardState != FILE_INACTIVE)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
|
@ -462,13 +452,11 @@ CopyShardCommandList(ShardInterval *shardInterval, char *sourceNodeName,
|
|||
{
|
||||
int64 shardId = shardInterval->shardId;
|
||||
char *shardName = ConstructQualifiedShardName(shardInterval);
|
||||
List *tableRecreationCommandList = NIL;
|
||||
List *indexCommandList = NIL;
|
||||
List *copyShardToNodeCommandsList = NIL;
|
||||
StringInfo copyShardDataCommand = makeStringInfo();
|
||||
Oid relationId = shardInterval->relationId;
|
||||
|
||||
tableRecreationCommandList = RecreateTableDDLCommandList(relationId);
|
||||
List *tableRecreationCommandList = RecreateTableDDLCommandList(relationId);
|
||||
tableRecreationCommandList =
|
||||
WorkerApplyShardDDLCommandList(tableRecreationCommandList, shardId);
|
||||
|
||||
|
@ -491,7 +479,7 @@ CopyShardCommandList(ShardInterval *shardInterval, char *sourceNodeName,
|
|||
copyShardDataCommand->data);
|
||||
}
|
||||
|
||||
indexCommandList = GetTableIndexAndConstraintCommands(relationId);
|
||||
List *indexCommandList = GetTableIndexAndConstraintCommands(relationId);
|
||||
indexCommandList = WorkerApplyShardDDLCommandList(indexCommandList, shardId);
|
||||
|
||||
copyShardToNodeCommandsList = list_concat(copyShardToNodeCommandsList,
|
||||
|
@ -555,17 +543,13 @@ CopyShardForeignConstraintCommandListGrouped(ShardInterval *shardInterval,
|
|||
char *command = (char *) lfirst(commandCell);
|
||||
char *escapedCommand = quote_literal_cstr(command);
|
||||
|
||||
Oid referencedRelationId = InvalidOid;
|
||||
Oid referencedSchemaId = InvalidOid;
|
||||
char *referencedSchemaName = NULL;
|
||||
char *escapedReferencedSchemaName = NULL;
|
||||
uint64 referencedShardId = INVALID_SHARD_ID;
|
||||
bool colocatedForeignKey = false;
|
||||
|
||||
StringInfo applyForeignConstraintCommand = makeStringInfo();
|
||||
|
||||
/* we need to parse the foreign constraint command to get referencing table id */
|
||||
referencedRelationId = ForeignConstraintGetReferencedTableId(command);
|
||||
Oid referencedRelationId = ForeignConstraintGetReferencedTableId(command);
|
||||
if (referencedRelationId == InvalidOid)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
|
@ -573,9 +557,9 @@ CopyShardForeignConstraintCommandListGrouped(ShardInterval *shardInterval,
|
|||
errdetail("Referenced relation cannot be found.")));
|
||||
}
|
||||
|
||||
referencedSchemaId = get_rel_namespace(referencedRelationId);
|
||||
referencedSchemaName = get_namespace_name(referencedSchemaId);
|
||||
escapedReferencedSchemaName = quote_literal_cstr(referencedSchemaName);
|
||||
Oid referencedSchemaId = get_rel_namespace(referencedRelationId);
|
||||
char *referencedSchemaName = get_namespace_name(referencedSchemaId);
|
||||
char *escapedReferencedSchemaName = quote_literal_cstr(referencedSchemaName);
|
||||
|
||||
if (PartitionMethod(referencedRelationId) == DISTRIBUTE_BY_NONE)
|
||||
{
|
||||
|
@ -635,9 +619,8 @@ ConstructQualifiedShardName(ShardInterval *shardInterval)
|
|||
Oid schemaId = get_rel_namespace(shardInterval->relationId);
|
||||
char *schemaName = get_namespace_name(schemaId);
|
||||
char *tableName = get_rel_name(shardInterval->relationId);
|
||||
char *shardName = NULL;
|
||||
|
||||
shardName = pstrdup(tableName);
|
||||
char *shardName = pstrdup(tableName);
|
||||
AppendShardIdToName(&shardName, shardInterval->shardId);
|
||||
shardName = quote_qualified_identifier(schemaName, shardName);
|
||||
|
||||
|
@ -660,9 +643,6 @@ RecreateTableDDLCommandList(Oid relationId)
|
|||
relationName);
|
||||
|
||||
StringInfo dropCommand = makeStringInfo();
|
||||
List *createCommandList = NIL;
|
||||
List *dropCommandList = NIL;
|
||||
List *recreateCommandList = NIL;
|
||||
char relationKind = get_rel_relkind(relationId);
|
||||
bool includeSequenceDefaults = false;
|
||||
|
||||
|
@ -684,9 +664,10 @@ RecreateTableDDLCommandList(Oid relationId)
|
|||
"table")));
|
||||
}
|
||||
|
||||
dropCommandList = list_make1(dropCommand->data);
|
||||
createCommandList = GetTableCreationCommands(relationId, includeSequenceDefaults);
|
||||
recreateCommandList = list_concat(dropCommandList, createCommandList);
|
||||
List *dropCommandList = list_make1(dropCommand->data);
|
||||
List *createCommandList = GetTableCreationCommands(relationId,
|
||||
includeSequenceDefaults);
|
||||
List *recreateCommandList = list_concat(dropCommandList, createCommandList);
|
||||
|
||||
return recreateCommandList;
|
||||
}
|
||||
|
|
|
@ -62,16 +62,13 @@ Datum
|
|||
worker_hash(PG_FUNCTION_ARGS)
|
||||
{
|
||||
Datum valueDatum = PG_GETARG_DATUM(0);
|
||||
Datum hashedValueDatum = 0;
|
||||
TypeCacheEntry *typeEntry = NULL;
|
||||
FmgrInfo *hashFunction = NULL;
|
||||
Oid valueDataType = InvalidOid;
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
/* figure out hash function from the data type */
|
||||
valueDataType = get_fn_expr_argtype(fcinfo->flinfo, 0);
|
||||
typeEntry = lookup_type_cache(valueDataType, TYPECACHE_HASH_PROC_FINFO);
|
||||
Oid valueDataType = get_fn_expr_argtype(fcinfo->flinfo, 0);
|
||||
TypeCacheEntry *typeEntry = lookup_type_cache(valueDataType,
|
||||
TYPECACHE_HASH_PROC_FINFO);
|
||||
|
||||
if (typeEntry->hash_proc_finfo.fn_oid == InvalidOid)
|
||||
{
|
||||
|
@ -80,11 +77,12 @@ worker_hash(PG_FUNCTION_ARGS)
|
|||
errhint("Cast input to a data type with a hash function.")));
|
||||
}
|
||||
|
||||
hashFunction = palloc0(sizeof(FmgrInfo));
|
||||
FmgrInfo *hashFunction = palloc0(sizeof(FmgrInfo));
|
||||
fmgr_info_copy(hashFunction, &(typeEntry->hash_proc_finfo), CurrentMemoryContext);
|
||||
|
||||
/* calculate hash value */
|
||||
hashedValueDatum = FunctionCall1Coll(hashFunction, PG_GET_COLLATION(), valueDatum);
|
||||
Datum hashedValueDatum = FunctionCall1Coll(hashFunction, PG_GET_COLLATION(),
|
||||
valueDatum);
|
||||
|
||||
PG_RETURN_INT32(hashedValueDatum);
|
||||
}
|
||||
|
|
|
@ -80,21 +80,17 @@ master_create_empty_shard(PG_FUNCTION_ARGS)
|
|||
{
|
||||
text *relationNameText = PG_GETARG_TEXT_P(0);
|
||||
char *relationName = text_to_cstring(relationNameText);
|
||||
uint64 shardId = INVALID_SHARD_ID;
|
||||
uint32 attemptableNodeCount = 0;
|
||||
ObjectAddress tableAddress = { 0 };
|
||||
|
||||
uint32 candidateNodeIndex = 0;
|
||||
List *candidateNodeList = NIL;
|
||||
List *workerNodeList = NIL;
|
||||
text *nullMinValue = NULL;
|
||||
text *nullMaxValue = NULL;
|
||||
char partitionMethod = 0;
|
||||
char storageType = SHARD_STORAGE_TABLE;
|
||||
|
||||
Oid relationId = ResolveRelationId(relationNameText, false);
|
||||
char relationKind = get_rel_relkind(relationId);
|
||||
char replicationModel = REPLICATION_MODEL_INVALID;
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
|
@ -136,7 +132,7 @@ master_create_empty_shard(PG_FUNCTION_ARGS)
|
|||
}
|
||||
}
|
||||
|
||||
partitionMethod = PartitionMethod(relationId);
|
||||
char partitionMethod = PartitionMethod(relationId);
|
||||
if (partitionMethod == DISTRIBUTE_BY_HASH)
|
||||
{
|
||||
ereport(ERROR, (errmsg("relation \"%s\" is a hash partitioned table",
|
||||
|
@ -152,15 +148,15 @@ master_create_empty_shard(PG_FUNCTION_ARGS)
|
|||
"on reference tables")));
|
||||
}
|
||||
|
||||
replicationModel = TableReplicationModel(relationId);
|
||||
char replicationModel = TableReplicationModel(relationId);
|
||||
|
||||
EnsureReplicationSettings(relationId, replicationModel);
|
||||
|
||||
/* generate new and unique shardId from sequence */
|
||||
shardId = GetNextShardId();
|
||||
uint64 shardId = GetNextShardId();
|
||||
|
||||
/* if enough live groups, add an extra candidate node as backup */
|
||||
workerNodeList = DistributedTablePlacementNodeList(NoLock);
|
||||
List *workerNodeList = DistributedTablePlacementNodeList(NoLock);
|
||||
|
||||
if (list_length(workerNodeList) > ShardReplicationFactor)
|
||||
{
|
||||
|
@ -232,33 +228,20 @@ master_append_table_to_shard(PG_FUNCTION_ARGS)
|
|||
char *sourceTableName = text_to_cstring(sourceTableNameText);
|
||||
char *sourceNodeName = text_to_cstring(sourceNodeNameText);
|
||||
|
||||
Oid shardSchemaOid = 0;
|
||||
char *shardSchemaName = NULL;
|
||||
char *shardTableName = NULL;
|
||||
char *shardQualifiedName = NULL;
|
||||
List *shardPlacementList = NIL;
|
||||
ListCell *shardPlacementCell = NULL;
|
||||
uint64 newShardSize = 0;
|
||||
uint64 shardMaxSizeInBytes = 0;
|
||||
float4 shardFillLevel = 0.0;
|
||||
char partitionMethod = 0;
|
||||
|
||||
ShardInterval *shardInterval = NULL;
|
||||
Oid relationId = InvalidOid;
|
||||
bool cstoreTable = false;
|
||||
|
||||
char storageType = 0;
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
shardInterval = LoadShardInterval(shardId);
|
||||
relationId = shardInterval->relationId;
|
||||
ShardInterval *shardInterval = LoadShardInterval(shardId);
|
||||
Oid relationId = shardInterval->relationId;
|
||||
|
||||
/* don't allow the table to be dropped */
|
||||
LockRelationOid(relationId, AccessShareLock);
|
||||
|
||||
cstoreTable = CStoreTable(relationId);
|
||||
storageType = shardInterval->storageType;
|
||||
bool cstoreTable = CStoreTable(relationId);
|
||||
char storageType = shardInterval->storageType;
|
||||
|
||||
EnsureTablePermissions(relationId, ACL_INSERT);
|
||||
|
||||
|
@ -268,7 +251,7 @@ master_append_table_to_shard(PG_FUNCTION_ARGS)
|
|||
errdetail("The underlying shard is not a regular table")));
|
||||
}
|
||||
|
||||
partitionMethod = PartitionMethod(relationId);
|
||||
char partitionMethod = PartitionMethod(relationId);
|
||||
if (partitionMethod == DISTRIBUTE_BY_HASH || partitionMethod == DISTRIBUTE_BY_NONE)
|
||||
{
|
||||
ereport(ERROR, (errmsg("cannot append to shardId " UINT64_FORMAT, shardId),
|
||||
|
@ -283,16 +266,17 @@ master_append_table_to_shard(PG_FUNCTION_ARGS)
|
|||
LockShardResource(shardId, ExclusiveLock);
|
||||
|
||||
/* get schame name of the target shard */
|
||||
shardSchemaOid = get_rel_namespace(relationId);
|
||||
shardSchemaName = get_namespace_name(shardSchemaOid);
|
||||
Oid shardSchemaOid = get_rel_namespace(relationId);
|
||||
char *shardSchemaName = get_namespace_name(shardSchemaOid);
|
||||
|
||||
/* Build shard table name. */
|
||||
shardTableName = get_rel_name(relationId);
|
||||
char *shardTableName = get_rel_name(relationId);
|
||||
AppendShardIdToName(&shardTableName, shardId);
|
||||
|
||||
shardQualifiedName = quote_qualified_identifier(shardSchemaName, shardTableName);
|
||||
char *shardQualifiedName = quote_qualified_identifier(shardSchemaName,
|
||||
shardTableName);
|
||||
|
||||
shardPlacementList = FinalizedShardPlacementList(shardId);
|
||||
List *shardPlacementList = FinalizedShardPlacementList(shardId);
|
||||
if (shardPlacementList == NIL)
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not find any shard placements for shardId "
|
||||
|
@ -309,7 +293,6 @@ master_append_table_to_shard(PG_FUNCTION_ARGS)
|
|||
MultiConnection *connection = GetPlacementConnection(FOR_DML, shardPlacement,
|
||||
NULL);
|
||||
PGresult *queryResult = NULL;
|
||||
int executeResult = 0;
|
||||
|
||||
StringInfo workerAppendQuery = makeStringInfo();
|
||||
appendStringInfo(workerAppendQuery, WORKER_APPEND_TABLE_TO_SHARD,
|
||||
|
@ -319,8 +302,9 @@ master_append_table_to_shard(PG_FUNCTION_ARGS)
|
|||
|
||||
RemoteTransactionBeginIfNecessary(connection);
|
||||
|
||||
executeResult = ExecuteOptionalRemoteCommand(connection, workerAppendQuery->data,
|
||||
&queryResult);
|
||||
int executeResult = ExecuteOptionalRemoteCommand(connection,
|
||||
workerAppendQuery->data,
|
||||
&queryResult);
|
||||
PQclear(queryResult);
|
||||
ForgetResults(connection);
|
||||
|
||||
|
@ -333,10 +317,10 @@ master_append_table_to_shard(PG_FUNCTION_ARGS)
|
|||
MarkFailedShardPlacements();
|
||||
|
||||
/* update shard statistics and get new shard size */
|
||||
newShardSize = UpdateShardStatistics(shardId);
|
||||
uint64 newShardSize = UpdateShardStatistics(shardId);
|
||||
|
||||
/* calculate ratio of current shard size compared to shard max size */
|
||||
shardMaxSizeInBytes = (int64) ShardMaxSize * 1024L;
|
||||
uint64 shardMaxSizeInBytes = (int64) ShardMaxSize * 1024L;
|
||||
shardFillLevel = ((float4) newShardSize / (float4) shardMaxSizeInBytes);
|
||||
|
||||
PG_RETURN_FLOAT4(shardFillLevel);
|
||||
|
@ -351,11 +335,10 @@ Datum
|
|||
master_update_shard_statistics(PG_FUNCTION_ARGS)
|
||||
{
|
||||
int64 shardId = PG_GETARG_INT64(0);
|
||||
uint64 shardSize = 0;
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
shardSize = UpdateShardStatistics(shardId);
|
||||
uint64 shardSize = UpdateShardStatistics(shardId);
|
||||
|
||||
PG_RETURN_INT64(shardSize);
|
||||
}
|
||||
|
@ -393,7 +376,6 @@ CreateAppendDistributedShardPlacements(Oid relationId, int64 shardId,
|
|||
int attemptCount = replicationFactor;
|
||||
int workerNodeCount = list_length(workerNodeList);
|
||||
int placementsCreated = 0;
|
||||
int attemptNumber = 0;
|
||||
List *foreignConstraintCommandList = GetTableForeignConstraintCommands(relationId);
|
||||
bool includeSequenceDefaults = false;
|
||||
List *ddlCommandList = GetTableDDLEvents(relationId, includeSequenceDefaults);
|
||||
|
@ -406,7 +388,7 @@ CreateAppendDistributedShardPlacements(Oid relationId, int64 shardId,
|
|||
attemptCount++;
|
||||
}
|
||||
|
||||
for (attemptNumber = 0; attemptNumber < attemptCount; attemptNumber++)
|
||||
for (int attemptNumber = 0; attemptNumber < attemptCount; attemptNumber++)
|
||||
{
|
||||
int workerNodeIndex = attemptNumber % workerNodeCount;
|
||||
WorkerNode *workerNode = (WorkerNode *) list_nth(workerNodeList, workerNodeIndex);
|
||||
|
@ -419,7 +401,6 @@ CreateAppendDistributedShardPlacements(Oid relationId, int64 shardId,
|
|||
MultiConnection *connection =
|
||||
GetNodeUserDatabaseConnection(connectionFlag, nodeName, nodePort,
|
||||
relationOwner, NULL);
|
||||
List *commandList = NIL;
|
||||
|
||||
if (PQstatus(connection->pgConn) != CONNECTION_OK)
|
||||
{
|
||||
|
@ -429,9 +410,9 @@ CreateAppendDistributedShardPlacements(Oid relationId, int64 shardId,
|
|||
continue;
|
||||
}
|
||||
|
||||
commandList = WorkerCreateShardCommandList(relationId, shardIndex, shardId,
|
||||
ddlCommandList,
|
||||
foreignConstraintCommandList);
|
||||
List *commandList = WorkerCreateShardCommandList(relationId, shardIndex, shardId,
|
||||
ddlCommandList,
|
||||
foreignConstraintCommandList);
|
||||
|
||||
ExecuteCriticalRemoteCommandList(connection, commandList);
|
||||
|
||||
|
@ -463,23 +444,21 @@ InsertShardPlacementRows(Oid relationId, int64 shardId, List *workerNodeList,
|
|||
int workerStartIndex, int replicationFactor)
|
||||
{
|
||||
int workerNodeCount = list_length(workerNodeList);
|
||||
int attemptNumber = 0;
|
||||
int placementsInserted = 0;
|
||||
List *insertedShardPlacements = NIL;
|
||||
|
||||
for (attemptNumber = 0; attemptNumber < replicationFactor; attemptNumber++)
|
||||
for (int attemptNumber = 0; attemptNumber < replicationFactor; attemptNumber++)
|
||||
{
|
||||
int workerNodeIndex = (workerStartIndex + attemptNumber) % workerNodeCount;
|
||||
WorkerNode *workerNode = (WorkerNode *) list_nth(workerNodeList, workerNodeIndex);
|
||||
uint32 nodeGroupId = workerNode->groupId;
|
||||
const RelayFileState shardState = FILE_FINALIZED;
|
||||
const uint64 shardSize = 0;
|
||||
uint64 shardPlacementId = 0;
|
||||
ShardPlacement *shardPlacement = NULL;
|
||||
|
||||
shardPlacementId = InsertShardPlacementRow(shardId, INVALID_PLACEMENT_ID,
|
||||
shardState, shardSize, nodeGroupId);
|
||||
shardPlacement = LoadShardPlacement(shardId, shardPlacementId);
|
||||
uint64 shardPlacementId = InsertShardPlacementRow(shardId, INVALID_PLACEMENT_ID,
|
||||
shardState, shardSize,
|
||||
nodeGroupId);
|
||||
ShardPlacement *shardPlacement = LoadShardPlacement(shardId, shardPlacementId);
|
||||
insertedShardPlacements = lappend(insertedShardPlacements, shardPlacement);
|
||||
|
||||
placementsInserted++;
|
||||
|
@ -519,8 +498,6 @@ CreateShardsOnWorkers(Oid distributedRelationId, List *shardPlacements,
|
|||
uint64 shardId = shardPlacement->shardId;
|
||||
ShardInterval *shardInterval = LoadShardInterval(shardId);
|
||||
int shardIndex = -1;
|
||||
List *commandList = NIL;
|
||||
Task *task = NULL;
|
||||
List *relationShardList = RelationShardListForShardCreate(shardInterval);
|
||||
|
||||
if (colocatedShard)
|
||||
|
@ -528,11 +505,12 @@ CreateShardsOnWorkers(Oid distributedRelationId, List *shardPlacements,
|
|||
shardIndex = ShardIndex(shardInterval);
|
||||
}
|
||||
|
||||
commandList = WorkerCreateShardCommandList(distributedRelationId, shardIndex,
|
||||
shardId, ddlCommandList,
|
||||
foreignConstraintCommandList);
|
||||
List *commandList = WorkerCreateShardCommandList(distributedRelationId,
|
||||
shardIndex,
|
||||
shardId, ddlCommandList,
|
||||
foreignConstraintCommandList);
|
||||
|
||||
task = CitusMakeNode(Task);
|
||||
Task *task = CitusMakeNode(Task);
|
||||
task->jobId = INVALID_JOB_ID;
|
||||
task->taskId = taskId++;
|
||||
task->taskType = DDL_TASK;
|
||||
|
@ -580,26 +558,23 @@ CreateShardsOnWorkers(Oid distributedRelationId, List *shardPlacements,
|
|||
static List *
|
||||
RelationShardListForShardCreate(ShardInterval *shardInterval)
|
||||
{
|
||||
List *relationShardList = NIL;
|
||||
RelationShard *relationShard = NULL;
|
||||
Oid relationId = shardInterval->relationId;
|
||||
DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(relationId);
|
||||
List *referencedRelationList = cacheEntry->referencedRelationsViaForeignKey;
|
||||
List *referencingRelationList = cacheEntry->referencingRelationsViaForeignKey;
|
||||
List *allForeignKeyRelations = NIL;
|
||||
int shardIndex = -1;
|
||||
ListCell *fkeyRelationIdCell = NULL;
|
||||
|
||||
/* list_concat_*() modifies the first arg, so make a copy first */
|
||||
allForeignKeyRelations = list_copy(referencedRelationList);
|
||||
List *allForeignKeyRelations = list_copy(referencedRelationList);
|
||||
allForeignKeyRelations = list_concat_unique_oid(allForeignKeyRelations,
|
||||
referencingRelationList);
|
||||
|
||||
/* record the placement access of the shard itself */
|
||||
relationShard = CitusMakeNode(RelationShard);
|
||||
RelationShard *relationShard = CitusMakeNode(RelationShard);
|
||||
relationShard->relationId = relationId;
|
||||
relationShard->shardId = shardInterval->shardId;
|
||||
relationShardList = list_make1(relationShard);
|
||||
List *relationShardList = list_make1(relationShard);
|
||||
|
||||
if (cacheEntry->partitionMethod == DISTRIBUTE_BY_HASH &&
|
||||
cacheEntry->colocationId != INVALID_COLOCATION_ID)
|
||||
|
@ -612,7 +587,6 @@ RelationShardListForShardCreate(ShardInterval *shardInterval)
|
|||
foreach(fkeyRelationIdCell, allForeignKeyRelations)
|
||||
{
|
||||
Oid fkeyRelationid = lfirst_oid(fkeyRelationIdCell);
|
||||
RelationShard *fkeyRelationShard = NULL;
|
||||
uint64 fkeyShardId = INVALID_SHARD_ID;
|
||||
|
||||
if (!IsDistributedTable(fkeyRelationid))
|
||||
|
@ -645,7 +619,7 @@ RelationShardListForShardCreate(ShardInterval *shardInterval)
|
|||
continue;
|
||||
}
|
||||
|
||||
fkeyRelationShard = CitusMakeNode(RelationShard);
|
||||
RelationShard *fkeyRelationShard = CitusMakeNode(RelationShard);
|
||||
fkeyRelationShard->relationId = fkeyRelationid;
|
||||
fkeyRelationShard->shardId = fkeyShardId;
|
||||
|
||||
|
@ -714,16 +688,12 @@ WorkerCreateShardCommandList(Oid relationId, int shardIndex, uint64 shardId,
|
|||
char *command = (char *) lfirst(foreignConstraintCommandCell);
|
||||
char *escapedCommand = quote_literal_cstr(command);
|
||||
|
||||
Oid referencedRelationId = InvalidOid;
|
||||
Oid referencedSchemaId = InvalidOid;
|
||||
char *referencedSchemaName = NULL;
|
||||
char *escapedReferencedSchemaName = NULL;
|
||||
uint64 referencedShardId = INVALID_SHARD_ID;
|
||||
|
||||
StringInfo applyForeignConstraintCommand = makeStringInfo();
|
||||
|
||||
/* we need to parse the foreign constraint command to get referencing table id */
|
||||
referencedRelationId = ForeignConstraintGetReferencedTableId(command);
|
||||
Oid referencedRelationId = ForeignConstraintGetReferencedTableId(command);
|
||||
if (referencedRelationId == InvalidOid)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
|
@ -731,9 +701,9 @@ WorkerCreateShardCommandList(Oid relationId, int shardIndex, uint64 shardId,
|
|||
errdetail("Referenced relation cannot be found.")));
|
||||
}
|
||||
|
||||
referencedSchemaId = get_rel_namespace(referencedRelationId);
|
||||
referencedSchemaName = get_namespace_name(referencedSchemaId);
|
||||
escapedReferencedSchemaName = quote_literal_cstr(referencedSchemaName);
|
||||
Oid referencedSchemaId = get_rel_namespace(referencedRelationId);
|
||||
char *referencedSchemaName = get_namespace_name(referencedSchemaId);
|
||||
char *escapedReferencedSchemaName = quote_literal_cstr(referencedSchemaName);
|
||||
|
||||
/*
|
||||
* In case of self referencing shards, relation itself might not be distributed
|
||||
|
@ -792,8 +762,6 @@ UpdateShardStatistics(int64 shardId)
|
|||
Oid relationId = shardInterval->relationId;
|
||||
char storageType = shardInterval->storageType;
|
||||
char partitionType = PartitionMethod(relationId);
|
||||
char *shardQualifiedName = NULL;
|
||||
List *shardPlacementList = NIL;
|
||||
ListCell *shardPlacementCell = NULL;
|
||||
bool statsOK = false;
|
||||
uint64 shardSize = 0;
|
||||
|
@ -807,9 +775,9 @@ UpdateShardStatistics(int64 shardId)
|
|||
|
||||
AppendShardIdToName(&shardName, shardId);
|
||||
|
||||
shardQualifiedName = quote_qualified_identifier(schemaName, shardName);
|
||||
char *shardQualifiedName = quote_qualified_identifier(schemaName, shardName);
|
||||
|
||||
shardPlacementList = FinalizedShardPlacementList(shardId);
|
||||
List *shardPlacementList = FinalizedShardPlacementList(shardId);
|
||||
|
||||
/* get shard's statistics from a shard placement */
|
||||
foreach(shardPlacementCell, shardPlacementList)
|
||||
|
@ -881,28 +849,19 @@ static bool
|
|||
WorkerShardStats(ShardPlacement *placement, Oid relationId, char *shardName,
|
||||
uint64 *shardSize, text **shardMinValue, text **shardMaxValue)
|
||||
{
|
||||
char *quotedShardName = NULL;
|
||||
bool cstoreTable = false;
|
||||
StringInfo tableSizeQuery = makeStringInfo();
|
||||
|
||||
const uint32 unusedTableId = 1;
|
||||
char partitionType = PartitionMethod(relationId);
|
||||
Var *partitionColumn = NULL;
|
||||
char *partitionColumnName = NULL;
|
||||
StringInfo partitionValueQuery = makeStringInfo();
|
||||
|
||||
PGresult *queryResult = NULL;
|
||||
const int minValueIndex = 0;
|
||||
const int maxValueIndex = 1;
|
||||
|
||||
uint64 tableSize = 0;
|
||||
char *tableSizeString = NULL;
|
||||
char *tableSizeStringEnd = NULL;
|
||||
bool minValueIsNull = false;
|
||||
bool maxValueIsNull = false;
|
||||
|
||||
int connectionFlags = 0;
|
||||
int executeCommand = 0;
|
||||
|
||||
MultiConnection *connection = GetPlacementConnection(connectionFlags, placement,
|
||||
NULL);
|
||||
|
@ -911,9 +870,9 @@ WorkerShardStats(ShardPlacement *placement, Oid relationId, char *shardName,
|
|||
*shardMinValue = NULL;
|
||||
*shardMaxValue = NULL;
|
||||
|
||||
quotedShardName = quote_literal_cstr(shardName);
|
||||
char *quotedShardName = quote_literal_cstr(shardName);
|
||||
|
||||
cstoreTable = CStoreTable(relationId);
|
||||
bool cstoreTable = CStoreTable(relationId);
|
||||
if (cstoreTable)
|
||||
{
|
||||
appendStringInfo(tableSizeQuery, SHARD_CSTORE_TABLE_SIZE_QUERY, quotedShardName);
|
||||
|
@ -923,14 +882,14 @@ WorkerShardStats(ShardPlacement *placement, Oid relationId, char *shardName,
|
|||
appendStringInfo(tableSizeQuery, SHARD_TABLE_SIZE_QUERY, quotedShardName);
|
||||
}
|
||||
|
||||
executeCommand = ExecuteOptionalRemoteCommand(connection, tableSizeQuery->data,
|
||||
&queryResult);
|
||||
int executeCommand = ExecuteOptionalRemoteCommand(connection, tableSizeQuery->data,
|
||||
&queryResult);
|
||||
if (executeCommand != 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
tableSizeString = PQgetvalue(queryResult, 0, 0);
|
||||
char *tableSizeString = PQgetvalue(queryResult, 0, 0);
|
||||
if (tableSizeString == NULL)
|
||||
{
|
||||
PQclear(queryResult);
|
||||
|
@ -939,7 +898,7 @@ WorkerShardStats(ShardPlacement *placement, Oid relationId, char *shardName,
|
|||
}
|
||||
|
||||
errno = 0;
|
||||
tableSize = pg_strtouint64(tableSizeString, &tableSizeStringEnd, 0);
|
||||
uint64 tableSize = pg_strtouint64(tableSizeString, &tableSizeStringEnd, 0);
|
||||
if (errno != 0 || (*tableSizeStringEnd) != '\0')
|
||||
{
|
||||
PQclear(queryResult);
|
||||
|
@ -959,8 +918,8 @@ WorkerShardStats(ShardPlacement *placement, Oid relationId, char *shardName,
|
|||
}
|
||||
|
||||
/* fill in the partition column name and shard name in the query. */
|
||||
partitionColumn = PartitionColumn(relationId, unusedTableId);
|
||||
partitionColumnName = get_attname(relationId, partitionColumn->varattno, false);
|
||||
Var *partitionColumn = PartitionColumn(relationId, unusedTableId);
|
||||
char *partitionColumnName = get_attname(relationId, partitionColumn->varattno, false);
|
||||
appendStringInfo(partitionValueQuery, SHARD_RANGE_QUERY,
|
||||
partitionColumnName, partitionColumnName, shardName);
|
||||
|
||||
|
@ -971,8 +930,8 @@ WorkerShardStats(ShardPlacement *placement, Oid relationId, char *shardName,
|
|||
return false;
|
||||
}
|
||||
|
||||
minValueIsNull = PQgetisnull(queryResult, 0, minValueIndex);
|
||||
maxValueIsNull = PQgetisnull(queryResult, 0, maxValueIndex);
|
||||
bool minValueIsNull = PQgetisnull(queryResult, 0, minValueIndex);
|
||||
bool maxValueIsNull = PQgetisnull(queryResult, 0, maxValueIndex);
|
||||
|
||||
if (!minValueIsNull && !maxValueIsNull)
|
||||
{
|
||||
|
|
|
@ -41,21 +41,16 @@ PG_FUNCTION_INFO_V1(citus_truncate_trigger);
|
|||
Datum
|
||||
citus_truncate_trigger(PG_FUNCTION_ARGS)
|
||||
{
|
||||
TriggerData *triggerData = NULL;
|
||||
Relation truncatedRelation = NULL;
|
||||
Oid relationId = InvalidOid;
|
||||
char partitionMethod = 0;
|
||||
|
||||
if (!CALLED_AS_TRIGGER(fcinfo))
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
|
||||
errmsg("must be called as trigger")));
|
||||
}
|
||||
|
||||
triggerData = (TriggerData *) fcinfo->context;
|
||||
truncatedRelation = triggerData->tg_relation;
|
||||
relationId = RelationGetRelid(truncatedRelation);
|
||||
partitionMethod = PartitionMethod(relationId);
|
||||
TriggerData *triggerData = (TriggerData *) fcinfo->context;
|
||||
Relation truncatedRelation = triggerData->tg_relation;
|
||||
Oid relationId = RelationGetRelid(truncatedRelation);
|
||||
char partitionMethod = PartitionMethod(relationId);
|
||||
|
||||
if (!EnableDDLPropagation)
|
||||
{
|
||||
|
@ -110,7 +105,6 @@ TruncateTaskList(Oid relationId)
|
|||
ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell);
|
||||
uint64 shardId = shardInterval->shardId;
|
||||
StringInfo shardQueryString = makeStringInfo();
|
||||
Task *task = NULL;
|
||||
char *shardName = pstrdup(relationName);
|
||||
|
||||
AppendShardIdToName(&shardName, shardId);
|
||||
|
@ -118,7 +112,7 @@ TruncateTaskList(Oid relationId)
|
|||
appendStringInfo(shardQueryString, "TRUNCATE TABLE %s CASCADE",
|
||||
quote_qualified_identifier(schemaName, shardName));
|
||||
|
||||
task = CitusMakeNode(Task);
|
||||
Task *task = CitusMakeNode(Task);
|
||||
task->jobId = INVALID_JOB_ID;
|
||||
task->taskId = taskId++;
|
||||
task->taskType = DDL_TASK;
|
||||
|
|
|
@ -67,7 +67,6 @@ WorkerGetRandomCandidateNode(List *currentNodeList)
|
|||
WorkerNode *workerNode = NULL;
|
||||
bool wantSameRack = false;
|
||||
uint32 tryCount = WORKER_RACK_TRIES;
|
||||
uint32 tryIndex = 0;
|
||||
|
||||
uint32 currentNodeCount = list_length(currentNodeList);
|
||||
List *candidateWorkerNodeList = PrimaryNodesNotInList(currentNodeList);
|
||||
|
@ -104,17 +103,15 @@ WorkerGetRandomCandidateNode(List *currentNodeList)
|
|||
* If after a predefined number of tries, we still cannot find such a node,
|
||||
* we simply give up and return the last worker node we found.
|
||||
*/
|
||||
for (tryIndex = 0; tryIndex < tryCount; tryIndex++)
|
||||
for (uint32 tryIndex = 0; tryIndex < tryCount; tryIndex++)
|
||||
{
|
||||
WorkerNode *firstNode = (WorkerNode *) linitial(currentNodeList);
|
||||
char *firstRack = firstNode->workerRack;
|
||||
char *workerRack = NULL;
|
||||
bool sameRack = false;
|
||||
|
||||
workerNode = FindRandomNodeFromList(candidateWorkerNodeList);
|
||||
workerRack = workerNode->workerRack;
|
||||
char *workerRack = workerNode->workerRack;
|
||||
|
||||
sameRack = (strncmp(workerRack, firstRack, WORKER_LENGTH) == 0);
|
||||
bool sameRack = (strncmp(workerRack, firstRack, WORKER_LENGTH) == 0);
|
||||
if ((sameRack && wantSameRack) || (!sameRack && !wantSameRack))
|
||||
{
|
||||
break;
|
||||
|
@ -171,7 +168,6 @@ WorkerGetLocalFirstCandidateNode(List *currentNodeList)
|
|||
if (currentNodeCount == 0)
|
||||
{
|
||||
StringInfo clientHostStringInfo = makeStringInfo();
|
||||
char *clientHost = NULL;
|
||||
char *errorMessage = ClientHostAddress(clientHostStringInfo);
|
||||
|
||||
if (errorMessage != NULL)
|
||||
|
@ -184,7 +180,7 @@ WorkerGetLocalFirstCandidateNode(List *currentNodeList)
|
|||
}
|
||||
|
||||
/* if hostname is localhost.localdomain, change it to localhost */
|
||||
clientHost = clientHostStringInfo->data;
|
||||
char *clientHost = clientHostStringInfo->data;
|
||||
if (strncmp(clientHost, "localhost.localdomain", WORKER_LENGTH) == 0)
|
||||
{
|
||||
clientHost = pstrdup("localhost");
|
||||
|
@ -343,7 +339,6 @@ FilterActiveNodeListFunc(LOCKMODE lockMode, bool (*checkFunction)(WorkerNode *))
|
|||
{
|
||||
List *workerNodeList = NIL;
|
||||
WorkerNode *workerNode = NULL;
|
||||
HTAB *workerNodeHash = NULL;
|
||||
HASH_SEQ_STATUS status;
|
||||
|
||||
Assert(checkFunction != NULL);
|
||||
|
@ -353,7 +348,7 @@ FilterActiveNodeListFunc(LOCKMODE lockMode, bool (*checkFunction)(WorkerNode *))
|
|||
LockRelationOid(DistNodeRelationId(), lockMode);
|
||||
}
|
||||
|
||||
workerNodeHash = GetWorkerNodeHash();
|
||||
HTAB *workerNodeHash = GetWorkerNodeHash();
|
||||
hash_seq_init(&status, workerNodeHash);
|
||||
|
||||
while ((workerNode = hash_seq_search(&status)) != NULL)
|
||||
|
@ -568,10 +563,9 @@ CompareWorkerNodes(const void *leftElement, const void *rightElement)
|
|||
{
|
||||
const void *leftWorker = *((const void **) leftElement);
|
||||
const void *rightWorker = *((const void **) rightElement);
|
||||
int compare = 0;
|
||||
Size ignoredKeySize = 0;
|
||||
|
||||
compare = WorkerNodeCompare(leftWorker, rightWorker, ignoredKeySize);
|
||||
int compare = WorkerNodeCompare(leftWorker, rightWorker, ignoredKeySize);
|
||||
|
||||
return compare;
|
||||
}
|
||||
|
@ -588,16 +582,15 @@ WorkerNodeCompare(const void *lhsKey, const void *rhsKey, Size keySize)
|
|||
const WorkerNode *workerLhs = (const WorkerNode *) lhsKey;
|
||||
const WorkerNode *workerRhs = (const WorkerNode *) rhsKey;
|
||||
|
||||
int nameCompare = 0;
|
||||
int portCompare = 0;
|
||||
|
||||
nameCompare = strncmp(workerLhs->workerName, workerRhs->workerName, WORKER_LENGTH);
|
||||
int nameCompare = strncmp(workerLhs->workerName, workerRhs->workerName,
|
||||
WORKER_LENGTH);
|
||||
if (nameCompare != 0)
|
||||
{
|
||||
return nameCompare;
|
||||
}
|
||||
|
||||
portCompare = workerLhs->workerPort - workerRhs->workerPort;
|
||||
int portCompare = workerLhs->workerPort - workerRhs->workerPort;
|
||||
return portCompare;
|
||||
}
|
||||
|
||||
|
|
|
@ -170,9 +170,7 @@ recurse_pg_depend(const ObjectAddress *target,
|
|||
void (*apply)(ObjectAddressCollector *collector, Form_pg_depend row),
|
||||
ObjectAddressCollector *collector)
|
||||
{
|
||||
Relation depRel = NULL;
|
||||
ScanKeyData key[2];
|
||||
SysScanDesc depScan = NULL;
|
||||
HeapTuple depTup = NULL;
|
||||
List *pgDependEntries = NIL;
|
||||
ListCell *pgDependCell = NULL;
|
||||
|
@ -188,14 +186,15 @@ recurse_pg_depend(const ObjectAddress *target,
|
|||
/*
|
||||
* iterate the actual pg_depend catalog
|
||||
*/
|
||||
depRel = heap_open(DependRelationId, AccessShareLock);
|
||||
Relation depRel = heap_open(DependRelationId, AccessShareLock);
|
||||
|
||||
/* scan pg_depend for classid = $1 AND objid = $2 using pg_depend_depender_index */
|
||||
ScanKeyInit(&key[0], Anum_pg_depend_classid, BTEqualStrategyNumber, F_OIDEQ,
|
||||
ObjectIdGetDatum(target->classId));
|
||||
ScanKeyInit(&key[1], Anum_pg_depend_objid, BTEqualStrategyNumber, F_OIDEQ,
|
||||
ObjectIdGetDatum(target->objectId));
|
||||
depScan = systable_beginscan(depRel, DependDependerIndexId, true, NULL, 2, key);
|
||||
SysScanDesc depScan = systable_beginscan(depRel, DependDependerIndexId, true, NULL, 2,
|
||||
key);
|
||||
|
||||
while (HeapTupleIsValid(depTup = systable_getnext(depScan)))
|
||||
{
|
||||
|
@ -215,9 +214,7 @@ recurse_pg_depend(const ObjectAddress *target,
|
|||
*/
|
||||
if (expand != NULL)
|
||||
{
|
||||
List *expandedEntries = NIL;
|
||||
|
||||
expandedEntries = expand(collector, target);
|
||||
List *expandedEntries = expand(collector, target);
|
||||
pgDependEntries = list_concat(pgDependEntries, expandedEntries);
|
||||
}
|
||||
|
||||
|
@ -262,14 +259,13 @@ recurse_pg_depend(const ObjectAddress *target,
|
|||
static void
|
||||
InitObjectAddressCollector(ObjectAddressCollector *collector)
|
||||
{
|
||||
int hashFlags = 0;
|
||||
HASHCTL info;
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
info.keysize = sizeof(ObjectAddress);
|
||||
info.entrysize = sizeof(ObjectAddress);
|
||||
info.hcxt = CurrentMemoryContext;
|
||||
hashFlags = (HASH_ELEM | HASH_CONTEXT | HASH_BLOBS);
|
||||
int hashFlags = (HASH_ELEM | HASH_CONTEXT | HASH_BLOBS);
|
||||
|
||||
collector->dependencySet = hash_create("dependency set", 128, &info, hashFlags);
|
||||
collector->dependencyList = NULL;
|
||||
|
@ -301,12 +297,12 @@ TargetObjectVisited(ObjectAddressCollector *collector, const ObjectAddress *targ
|
|||
static void
|
||||
MarkObjectVisited(ObjectAddressCollector *collector, const ObjectAddress *target)
|
||||
{
|
||||
ObjectAddress *address = NULL;
|
||||
bool found = false;
|
||||
|
||||
/* add to set */
|
||||
address = (ObjectAddress *) hash_search(collector->visitedObjects, target,
|
||||
HASH_ENTER, &found);
|
||||
ObjectAddress *address = (ObjectAddress *) hash_search(collector->visitedObjects,
|
||||
target,
|
||||
HASH_ENTER, &found);
|
||||
|
||||
if (!found)
|
||||
{
|
||||
|
@ -322,12 +318,12 @@ MarkObjectVisited(ObjectAddressCollector *collector, const ObjectAddress *target
|
|||
static void
|
||||
CollectObjectAddress(ObjectAddressCollector *collector, const ObjectAddress *collect)
|
||||
{
|
||||
ObjectAddress *address = NULL;
|
||||
bool found = false;
|
||||
|
||||
/* add to set */
|
||||
address = (ObjectAddress *) hash_search(collector->dependencySet, collect,
|
||||
HASH_ENTER, &found);
|
||||
ObjectAddress *address = (ObjectAddress *) hash_search(collector->dependencySet,
|
||||
collect,
|
||||
HASH_ENTER, &found);
|
||||
|
||||
if (!found)
|
||||
{
|
||||
|
@ -475,20 +471,19 @@ bool
|
|||
IsObjectAddressOwnedByExtension(const ObjectAddress *target,
|
||||
ObjectAddress *extensionAddress)
|
||||
{
|
||||
Relation depRel = NULL;
|
||||
ScanKeyData key[2];
|
||||
SysScanDesc depScan = NULL;
|
||||
HeapTuple depTup = NULL;
|
||||
bool result = false;
|
||||
|
||||
depRel = heap_open(DependRelationId, AccessShareLock);
|
||||
Relation depRel = heap_open(DependRelationId, AccessShareLock);
|
||||
|
||||
/* scan pg_depend for classid = $1 AND objid = $2 using pg_depend_depender_index */
|
||||
ScanKeyInit(&key[0], Anum_pg_depend_classid, BTEqualStrategyNumber, F_OIDEQ,
|
||||
ObjectIdGetDatum(target->classId));
|
||||
ScanKeyInit(&key[1], Anum_pg_depend_objid, BTEqualStrategyNumber, F_OIDEQ,
|
||||
ObjectIdGetDatum(target->objectId));
|
||||
depScan = systable_beginscan(depRel, DependDependerIndexId, true, NULL, 2, key);
|
||||
SysScanDesc depScan = systable_beginscan(depRel, DependDependerIndexId, true, NULL, 2,
|
||||
key);
|
||||
|
||||
while (HeapTupleIsValid(depTup = systable_getnext(depScan)))
|
||||
{
|
||||
|
|
|
@ -139,13 +139,12 @@ MarkObjectDistributed(const ObjectAddress *distAddress)
|
|||
ObjectIdGetDatum(distAddress->objectId),
|
||||
Int32GetDatum(distAddress->objectSubId)
|
||||
};
|
||||
int spiStatus = 0;
|
||||
|
||||
char *insertQuery = "INSERT INTO citus.pg_dist_object (classid, objid, objsubid) "
|
||||
"VALUES ($1, $2, $3) ON CONFLICT DO NOTHING";
|
||||
|
||||
spiStatus = ExecuteCommandAsSuperuser(insertQuery, paramCount, paramTypes,
|
||||
paramValues);
|
||||
int spiStatus = ExecuteCommandAsSuperuser(insertQuery, paramCount, paramTypes,
|
||||
paramValues);
|
||||
if (spiStatus < 0)
|
||||
{
|
||||
ereport(ERROR, (errmsg("failed to insert object into citus.pg_dist_object")));
|
||||
|
@ -160,14 +159,12 @@ MarkObjectDistributed(const ObjectAddress *distAddress)
|
|||
bool
|
||||
CitusExtensionObject(const ObjectAddress *objectAddress)
|
||||
{
|
||||
char *extensionName = false;
|
||||
|
||||
if (objectAddress->classId != ExtensionRelationId)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
extensionName = get_extension_name(objectAddress->objectId);
|
||||
char *extensionName = get_extension_name(objectAddress->objectId);
|
||||
if (extensionName != NULL &&
|
||||
strncasecmp(extensionName, "citus", NAMEDATALEN) == 0)
|
||||
{
|
||||
|
@ -188,13 +185,10 @@ static int
|
|||
ExecuteCommandAsSuperuser(char *query, int paramCount, Oid *paramTypes,
|
||||
Datum *paramValues)
|
||||
{
|
||||
int spiConnected = 0;
|
||||
Oid savedUserId = InvalidOid;
|
||||
int savedSecurityContext = 0;
|
||||
int spiStatus = 0;
|
||||
int spiFinished = 0;
|
||||
|
||||
spiConnected = SPI_connect();
|
||||
int spiConnected = SPI_connect();
|
||||
if (spiConnected != SPI_OK_CONNECT)
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not connect to SPI manager")));
|
||||
|
@ -204,12 +198,12 @@ ExecuteCommandAsSuperuser(char *query, int paramCount, Oid *paramTypes,
|
|||
GetUserIdAndSecContext(&savedUserId, &savedSecurityContext);
|
||||
SetUserIdAndSecContext(CitusExtensionOwner(), SECURITY_LOCAL_USERID_CHANGE);
|
||||
|
||||
spiStatus = SPI_execute_with_args(query, paramCount, paramTypes, paramValues,
|
||||
NULL, false, 0);
|
||||
int spiStatus = SPI_execute_with_args(query, paramCount, paramTypes, paramValues,
|
||||
NULL, false, 0);
|
||||
|
||||
SetUserIdAndSecContext(savedUserId, savedSecurityContext);
|
||||
|
||||
spiFinished = SPI_finish();
|
||||
int spiFinished = SPI_finish();
|
||||
if (spiFinished != SPI_OK_FINISH)
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not disconnect from SPI manager")));
|
||||
|
@ -237,13 +231,12 @@ UnmarkObjectDistributed(const ObjectAddress *address)
|
|||
ObjectIdGetDatum(address->objectId),
|
||||
Int32GetDatum(address->objectSubId)
|
||||
};
|
||||
int spiStatus = 0;
|
||||
|
||||
char *deleteQuery = "DELETE FROM citus.pg_dist_object WHERE classid = $1 AND "
|
||||
"objid = $2 AND objsubid = $3";
|
||||
|
||||
spiStatus = ExecuteCommandAsSuperuser(deleteQuery, paramCount, paramTypes,
|
||||
paramValues);
|
||||
int spiStatus = ExecuteCommandAsSuperuser(deleteQuery, paramCount, paramTypes,
|
||||
paramValues);
|
||||
if (spiStatus < 0)
|
||||
{
|
||||
ereport(ERROR, (errmsg("failed to delete object from citus.pg_dist_object")));
|
||||
|
@ -258,13 +251,10 @@ UnmarkObjectDistributed(const ObjectAddress *address)
|
|||
bool
|
||||
IsObjectDistributed(const ObjectAddress *address)
|
||||
{
|
||||
Relation pgDistObjectRel = NULL;
|
||||
ScanKeyData key[3];
|
||||
SysScanDesc pgDistObjectScan = NULL;
|
||||
HeapTuple pgDistObjectTup = NULL;
|
||||
bool result = false;
|
||||
|
||||
pgDistObjectRel = heap_open(DistObjectRelationId(), AccessShareLock);
|
||||
Relation pgDistObjectRel = heap_open(DistObjectRelationId(), AccessShareLock);
|
||||
|
||||
/* scan pg_dist_object for classid = $1 AND objid = $2 AND objsubid = $3 via index */
|
||||
ScanKeyInit(&key[0], Anum_pg_dist_object_classid, BTEqualStrategyNumber, F_OIDEQ,
|
||||
|
@ -273,10 +263,11 @@ IsObjectDistributed(const ObjectAddress *address)
|
|||
ObjectIdGetDatum(address->objectId));
|
||||
ScanKeyInit(&key[2], Anum_pg_dist_object_objsubid, BTEqualStrategyNumber, F_INT4EQ,
|
||||
Int32GetDatum(address->objectSubId));
|
||||
pgDistObjectScan = systable_beginscan(pgDistObjectRel, DistObjectPrimaryKeyIndexId(),
|
||||
true, NULL, 3, key);
|
||||
SysScanDesc pgDistObjectScan = systable_beginscan(pgDistObjectRel,
|
||||
DistObjectPrimaryKeyIndexId(),
|
||||
true, NULL, 3, key);
|
||||
|
||||
pgDistObjectTup = systable_getnext(pgDistObjectScan);
|
||||
HeapTuple pgDistObjectTup = systable_getnext(pgDistObjectScan);
|
||||
if (HeapTupleIsValid(pgDistObjectTup))
|
||||
{
|
||||
result = true;
|
||||
|
@ -299,14 +290,13 @@ ClusterHasDistributedFunctionWithDistArgument(void)
|
|||
{
|
||||
bool foundDistributedFunction = false;
|
||||
|
||||
SysScanDesc pgDistObjectScan = NULL;
|
||||
HeapTuple pgDistObjectTup = NULL;
|
||||
|
||||
Relation pgDistObjectRel = heap_open(DistObjectRelationId(), AccessShareLock);
|
||||
|
||||
TupleDesc tupleDescriptor = RelationGetDescr(pgDistObjectRel);
|
||||
|
||||
pgDistObjectScan =
|
||||
SysScanDesc pgDistObjectScan =
|
||||
systable_beginscan(pgDistObjectRel, InvalidOid, false, NULL, 0, NULL);
|
||||
while (HeapTupleIsValid(pgDistObjectTup = systable_getnext(pgDistObjectScan)))
|
||||
{
|
||||
|
@ -315,8 +305,7 @@ ClusterHasDistributedFunctionWithDistArgument(void)
|
|||
|
||||
if (pg_dist_object->classid == ProcedureRelationId)
|
||||
{
|
||||
bool distArgumentIsNull = false;
|
||||
distArgumentIsNull =
|
||||
bool distArgumentIsNull =
|
||||
heap_attisnull(pgDistObjectTup,
|
||||
Anum_pg_dist_object_distribution_argument_index,
|
||||
tupleDescriptor);
|
||||
|
@ -345,14 +334,13 @@ ClusterHasDistributedFunctionWithDistArgument(void)
|
|||
List *
|
||||
GetDistributedObjectAddressList(void)
|
||||
{
|
||||
Relation pgDistObjectRel = NULL;
|
||||
SysScanDesc pgDistObjectScan = NULL;
|
||||
HeapTuple pgDistObjectTup = NULL;
|
||||
List *objectAddressList = NIL;
|
||||
|
||||
pgDistObjectRel = heap_open(DistObjectRelationId(), AccessShareLock);
|
||||
pgDistObjectScan = systable_beginscan(pgDistObjectRel, InvalidOid, false, NULL, 0,
|
||||
NULL);
|
||||
Relation pgDistObjectRel = heap_open(DistObjectRelationId(), AccessShareLock);
|
||||
SysScanDesc pgDistObjectScan = systable_beginscan(pgDistObjectRel, InvalidOid, false,
|
||||
NULL, 0,
|
||||
NULL);
|
||||
while (HeapTupleIsValid(pgDistObjectTup = systable_getnext(pgDistObjectScan)))
|
||||
{
|
||||
Form_pg_dist_object pg_dist_object =
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -91,7 +91,6 @@ start_metadata_sync_to_node(PG_FUNCTION_ARGS)
|
|||
void
|
||||
StartMetadatSyncToNode(char *nodeNameString, int32 nodePort)
|
||||
{
|
||||
WorkerNode *workerNode = NULL;
|
||||
char *escapedNodeName = quote_literal_cstr(nodeNameString);
|
||||
|
||||
/* fail if metadata synchronization doesn't succeed */
|
||||
|
@ -106,7 +105,7 @@ StartMetadatSyncToNode(char *nodeNameString, int32 nodePort)
|
|||
|
||||
LockRelationOid(DistNodeRelationId(), ExclusiveLock);
|
||||
|
||||
workerNode = FindWorkerNode(nodeNameString, nodePort);
|
||||
WorkerNode *workerNode = FindWorkerNode(nodeNameString, nodePort);
|
||||
if (workerNode == NULL)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
|
@ -159,7 +158,6 @@ stop_metadata_sync_to_node(PG_FUNCTION_ARGS)
|
|||
text *nodeName = PG_GETARG_TEXT_P(0);
|
||||
int32 nodePort = PG_GETARG_INT32(1);
|
||||
char *nodeNameString = text_to_cstring(nodeName);
|
||||
WorkerNode *workerNode = NULL;
|
||||
|
||||
EnsureCoordinator();
|
||||
EnsureSuperUser();
|
||||
|
@ -167,7 +165,7 @@ stop_metadata_sync_to_node(PG_FUNCTION_ARGS)
|
|||
|
||||
LockRelationOid(DistNodeRelationId(), ExclusiveLock);
|
||||
|
||||
workerNode = FindWorkerNode(nodeNameString, nodePort);
|
||||
WorkerNode *workerNode = FindWorkerNode(nodeNameString, nodePort);
|
||||
if (workerNode == NULL)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
|
@ -297,13 +295,13 @@ bool
|
|||
SendOptionalCommandListToWorkerInTransaction(char *nodeName, int32 nodePort,
|
||||
char *nodeUser, List *commandList)
|
||||
{
|
||||
MultiConnection *workerConnection = NULL;
|
||||
ListCell *commandCell = NULL;
|
||||
int connectionFlags = FORCE_NEW_CONNECTION;
|
||||
bool failed = false;
|
||||
|
||||
workerConnection = GetNodeUserDatabaseConnection(connectionFlags, nodeName, nodePort,
|
||||
nodeUser, NULL);
|
||||
MultiConnection *workerConnection = GetNodeUserDatabaseConnection(connectionFlags,
|
||||
nodeName, nodePort,
|
||||
nodeUser, NULL);
|
||||
|
||||
RemoteTransactionBegin(workerConnection);
|
||||
|
||||
|
@ -356,14 +354,13 @@ MetadataCreateCommands(void)
|
|||
bool includeNodesFromOtherClusters = true;
|
||||
List *workerNodeList = ReadDistNode(includeNodesFromOtherClusters);
|
||||
ListCell *distributedTableCell = NULL;
|
||||
char *nodeListInsertCommand = NULL;
|
||||
bool includeSequenceDefaults = true;
|
||||
|
||||
/* make sure we have deterministic output for our tests */
|
||||
workerNodeList = SortList(workerNodeList, CompareWorkerNodes);
|
||||
|
||||
/* generate insert command for pg_dist_node table */
|
||||
nodeListInsertCommand = NodeListInsertCommand(workerNodeList);
|
||||
char *nodeListInsertCommand = NodeListInsertCommand(workerNodeList);
|
||||
metadataSnapshotCommandList = lappend(metadataSnapshotCommandList,
|
||||
nodeListInsertCommand);
|
||||
|
||||
|
@ -441,26 +438,22 @@ MetadataCreateCommands(void)
|
|||
{
|
||||
DistTableCacheEntry *cacheEntry =
|
||||
(DistTableCacheEntry *) lfirst(distributedTableCell);
|
||||
List *shardIntervalList = NIL;
|
||||
List *shardCreateCommandList = NIL;
|
||||
char *metadataCommand = NULL;
|
||||
char *truncateTriggerCreateCommand = NULL;
|
||||
Oid clusteredTableId = cacheEntry->relationId;
|
||||
|
||||
/* add the table metadata command first*/
|
||||
metadataCommand = DistributionCreateCommand(cacheEntry);
|
||||
char *metadataCommand = DistributionCreateCommand(cacheEntry);
|
||||
metadataSnapshotCommandList = lappend(metadataSnapshotCommandList,
|
||||
metadataCommand);
|
||||
|
||||
/* add the truncate trigger command after the table became distributed */
|
||||
truncateTriggerCreateCommand =
|
||||
char *truncateTriggerCreateCommand =
|
||||
TruncateTriggerCreateCommand(cacheEntry->relationId);
|
||||
metadataSnapshotCommandList = lappend(metadataSnapshotCommandList,
|
||||
truncateTriggerCreateCommand);
|
||||
|
||||
/* add the pg_dist_shard{,placement} entries */
|
||||
shardIntervalList = LoadShardIntervalList(clusteredTableId);
|
||||
shardCreateCommandList = ShardListInsertCommand(shardIntervalList);
|
||||
List *shardIntervalList = LoadShardIntervalList(clusteredTableId);
|
||||
List *shardCreateCommandList = ShardListInsertCommand(shardIntervalList);
|
||||
|
||||
metadataSnapshotCommandList = list_concat(metadataSnapshotCommandList,
|
||||
shardCreateCommandList);
|
||||
|
@ -481,44 +474,36 @@ GetDistributedTableDDLEvents(Oid relationId)
|
|||
{
|
||||
DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(relationId);
|
||||
|
||||
List *shardIntervalList = NIL;
|
||||
List *commandList = NIL;
|
||||
List *foreignConstraintCommands = NIL;
|
||||
List *shardMetadataInsertCommandList = NIL;
|
||||
List *sequenceDDLCommands = NIL;
|
||||
List *tableDDLCommands = NIL;
|
||||
char *tableOwnerResetCommand = NULL;
|
||||
char *metadataCommand = NULL;
|
||||
char *truncateTriggerCreateCommand = NULL;
|
||||
bool includeSequenceDefaults = true;
|
||||
|
||||
/* commands to create sequences */
|
||||
sequenceDDLCommands = SequenceDDLCommandsForTable(relationId);
|
||||
List *sequenceDDLCommands = SequenceDDLCommandsForTable(relationId);
|
||||
commandList = list_concat(commandList, sequenceDDLCommands);
|
||||
|
||||
/* commands to create the table */
|
||||
tableDDLCommands = GetTableDDLEvents(relationId, includeSequenceDefaults);
|
||||
List *tableDDLCommands = GetTableDDLEvents(relationId, includeSequenceDefaults);
|
||||
commandList = list_concat(commandList, tableDDLCommands);
|
||||
|
||||
/* command to reset the table owner */
|
||||
tableOwnerResetCommand = TableOwnerResetCommand(relationId);
|
||||
char *tableOwnerResetCommand = TableOwnerResetCommand(relationId);
|
||||
commandList = lappend(commandList, tableOwnerResetCommand);
|
||||
|
||||
/* command to insert pg_dist_partition entry */
|
||||
metadataCommand = DistributionCreateCommand(cacheEntry);
|
||||
char *metadataCommand = DistributionCreateCommand(cacheEntry);
|
||||
commandList = lappend(commandList, metadataCommand);
|
||||
|
||||
/* commands to create the truncate trigger of the table */
|
||||
truncateTriggerCreateCommand = TruncateTriggerCreateCommand(relationId);
|
||||
char *truncateTriggerCreateCommand = TruncateTriggerCreateCommand(relationId);
|
||||
commandList = lappend(commandList, truncateTriggerCreateCommand);
|
||||
|
||||
/* commands to insert pg_dist_shard & pg_dist_placement entries */
|
||||
shardIntervalList = LoadShardIntervalList(relationId);
|
||||
shardMetadataInsertCommandList = ShardListInsertCommand(shardIntervalList);
|
||||
List *shardIntervalList = LoadShardIntervalList(relationId);
|
||||
List *shardMetadataInsertCommandList = ShardListInsertCommand(shardIntervalList);
|
||||
commandList = list_concat(commandList, shardMetadataInsertCommandList);
|
||||
|
||||
/* commands to create foreign key constraints */
|
||||
foreignConstraintCommands = GetTableForeignConstraintCommands(relationId);
|
||||
List *foreignConstraintCommands = GetTableForeignConstraintCommands(relationId);
|
||||
commandList = list_concat(commandList, foreignConstraintCommands);
|
||||
|
||||
/* commands to create partitioning hierarchy */
|
||||
|
@ -686,10 +671,9 @@ DistributionCreateCommand(DistTableCacheEntry *cacheEntry)
|
|||
char *
|
||||
DistributionDeleteCommand(char *schemaName, char *tableName)
|
||||
{
|
||||
char *distributedRelationName = NULL;
|
||||
StringInfo deleteDistributionCommand = makeStringInfo();
|
||||
|
||||
distributedRelationName = quote_qualified_identifier(schemaName, tableName);
|
||||
char *distributedRelationName = quote_qualified_identifier(schemaName, tableName);
|
||||
|
||||
appendStringInfo(deleteDistributionCommand,
|
||||
"SELECT worker_drop_distributed_table(%s)",
|
||||
|
@ -850,11 +834,9 @@ ShardDeleteCommandList(ShardInterval *shardInterval)
|
|||
{
|
||||
uint64 shardId = shardInterval->shardId;
|
||||
List *commandList = NIL;
|
||||
StringInfo deletePlacementCommand = NULL;
|
||||
StringInfo deleteShardCommand = NULL;
|
||||
|
||||
/* create command to delete shard placements */
|
||||
deletePlacementCommand = makeStringInfo();
|
||||
StringInfo deletePlacementCommand = makeStringInfo();
|
||||
appendStringInfo(deletePlacementCommand,
|
||||
"DELETE FROM pg_dist_placement WHERE shardid = " UINT64_FORMAT,
|
||||
shardId);
|
||||
|
@ -862,7 +844,7 @@ ShardDeleteCommandList(ShardInterval *shardInterval)
|
|||
commandList = lappend(commandList, deletePlacementCommand->data);
|
||||
|
||||
/* create command to delete shard */
|
||||
deleteShardCommand = makeStringInfo();
|
||||
StringInfo deleteShardCommand = makeStringInfo();
|
||||
appendStringInfo(deleteShardCommand,
|
||||
"DELETE FROM pg_dist_shard WHERE shardid = " UINT64_FORMAT, shardId);
|
||||
|
||||
|
@ -1013,27 +995,23 @@ UpdateDistNodeBoolAttr(char *nodeName, int32 nodePort, int attrNum, bool value)
|
|||
{
|
||||
const bool indexOK = false;
|
||||
|
||||
Relation pgDistNode = NULL;
|
||||
TupleDesc tupleDescriptor = NULL;
|
||||
ScanKeyData scanKey[2];
|
||||
SysScanDesc scanDescriptor = NULL;
|
||||
HeapTuple heapTuple = NULL;
|
||||
Datum values[Natts_pg_dist_node];
|
||||
bool isnull[Natts_pg_dist_node];
|
||||
bool replace[Natts_pg_dist_node];
|
||||
|
||||
pgDistNode = heap_open(DistNodeRelationId(), RowExclusiveLock);
|
||||
tupleDescriptor = RelationGetDescr(pgDistNode);
|
||||
Relation pgDistNode = heap_open(DistNodeRelationId(), RowExclusiveLock);
|
||||
TupleDesc tupleDescriptor = RelationGetDescr(pgDistNode);
|
||||
|
||||
ScanKeyInit(&scanKey[0], Anum_pg_dist_node_nodename,
|
||||
BTEqualStrategyNumber, F_TEXTEQ, CStringGetTextDatum(nodeName));
|
||||
ScanKeyInit(&scanKey[1], Anum_pg_dist_node_nodeport,
|
||||
BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(nodePort));
|
||||
|
||||
scanDescriptor = systable_beginscan(pgDistNode, InvalidOid, indexOK,
|
||||
NULL, 2, scanKey);
|
||||
SysScanDesc scanDescriptor = systable_beginscan(pgDistNode, InvalidOid, indexOK,
|
||||
NULL, 2, scanKey);
|
||||
|
||||
heapTuple = systable_getnext(scanDescriptor);
|
||||
HeapTuple heapTuple = systable_getnext(scanDescriptor);
|
||||
if (!HeapTupleIsValid(heapTuple))
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not find valid entry for node \"%s:%d\"",
|
||||
|
@ -1113,18 +1091,15 @@ char *
|
|||
CreateSchemaDDLCommand(Oid schemaId)
|
||||
{
|
||||
char *schemaName = get_namespace_name(schemaId);
|
||||
StringInfo schemaNameDef = NULL;
|
||||
const char *ownerName = NULL;
|
||||
const char *quotedSchemaName = NULL;
|
||||
|
||||
if (strncmp(schemaName, "public", NAMEDATALEN) == 0)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
schemaNameDef = makeStringInfo();
|
||||
quotedSchemaName = quote_identifier(schemaName);
|
||||
ownerName = quote_identifier(SchemaOwnerName(schemaId));
|
||||
StringInfo schemaNameDef = makeStringInfo();
|
||||
const char *quotedSchemaName = quote_identifier(schemaName);
|
||||
const char *ownerName = quote_identifier(SchemaOwnerName(schemaId));
|
||||
appendStringInfo(schemaNameDef, CREATE_SCHEMA_COMMAND, quotedSchemaName, ownerName);
|
||||
|
||||
return schemaNameDef->data;
|
||||
|
@ -1155,11 +1130,9 @@ TruncateTriggerCreateCommand(Oid relationId)
|
|||
static char *
|
||||
SchemaOwnerName(Oid objectId)
|
||||
{
|
||||
HeapTuple tuple = NULL;
|
||||
Oid ownerId = InvalidOid;
|
||||
char *ownerName = NULL;
|
||||
|
||||
tuple = SearchSysCache1(NAMESPACEOID, ObjectIdGetDatum(objectId));
|
||||
HeapTuple tuple = SearchSysCache1(NAMESPACEOID, ObjectIdGetDatum(objectId));
|
||||
if (HeapTupleIsValid(tuple))
|
||||
{
|
||||
ownerId = ((Form_pg_namespace) GETSTRUCT(tuple))->nspowner;
|
||||
|
@ -1169,7 +1142,7 @@ SchemaOwnerName(Oid objectId)
|
|||
ownerId = GetUserId();
|
||||
}
|
||||
|
||||
ownerName = GetUserNameFromId(ownerId, false);
|
||||
char *ownerName = GetUserNameFromId(ownerId, false);
|
||||
|
||||
ReleaseSysCache(tuple);
|
||||
|
||||
|
@ -1248,7 +1221,6 @@ DetachPartitionCommandList(void)
|
|||
{
|
||||
DistTableCacheEntry *cacheEntry =
|
||||
(DistTableCacheEntry *) lfirst(distributedTableCell);
|
||||
List *partitionList = NIL;
|
||||
ListCell *partitionCell = NULL;
|
||||
|
||||
if (!PartitionedTable(cacheEntry->relationId))
|
||||
|
@ -1256,7 +1228,7 @@ DetachPartitionCommandList(void)
|
|||
continue;
|
||||
}
|
||||
|
||||
partitionList = PartitionList(cacheEntry->relationId);
|
||||
List *partitionList = PartitionList(cacheEntry->relationId);
|
||||
foreach(partitionCell, partitionList)
|
||||
{
|
||||
Oid partitionRelationId = lfirst_oid(partitionCell);
|
||||
|
@ -1295,7 +1267,6 @@ DetachPartitionCommandList(void)
|
|||
MetadataSyncResult
|
||||
SyncMetadataToNodes(void)
|
||||
{
|
||||
List *workerList = NIL;
|
||||
ListCell *workerCell = NULL;
|
||||
MetadataSyncResult result = METADATA_SYNC_SUCCESS;
|
||||
|
||||
|
@ -1314,7 +1285,7 @@ SyncMetadataToNodes(void)
|
|||
return METADATA_SYNC_FAILED_LOCK;
|
||||
}
|
||||
|
||||
workerList = ActivePrimaryWorkerNodeList(NoLock);
|
||||
List *workerList = ActivePrimaryWorkerNodeList(NoLock);
|
||||
|
||||
foreach(workerCell, workerList)
|
||||
{
|
||||
|
|
|
@ -128,7 +128,6 @@ master_add_node(PG_FUNCTION_ARGS)
|
|||
text *nodeName = PG_GETARG_TEXT_P(0);
|
||||
int32 nodePort = PG_GETARG_INT32(1);
|
||||
char *nodeNameString = text_to_cstring(nodeName);
|
||||
int nodeId = 0;
|
||||
|
||||
NodeMetadata nodeMetadata = DefaultNodeMetadata();
|
||||
bool nodeAlreadyExists = false;
|
||||
|
@ -153,8 +152,8 @@ master_add_node(PG_FUNCTION_ARGS)
|
|||
nodeMetadata.nodeRole = PG_GETARG_OID(3);
|
||||
}
|
||||
|
||||
nodeId = AddNodeMetadata(nodeNameString, nodePort, &nodeMetadata,
|
||||
&nodeAlreadyExists);
|
||||
int nodeId = AddNodeMetadata(nodeNameString, nodePort, &nodeMetadata,
|
||||
&nodeAlreadyExists);
|
||||
|
||||
/*
|
||||
* After adding new node, if the node did not already exist, we will activate
|
||||
|
@ -185,15 +184,14 @@ master_add_inactive_node(PG_FUNCTION_ARGS)
|
|||
|
||||
NodeMetadata nodeMetadata = DefaultNodeMetadata();
|
||||
bool nodeAlreadyExists = false;
|
||||
int nodeId = 0;
|
||||
nodeMetadata.groupId = PG_GETARG_INT32(2);
|
||||
nodeMetadata.nodeRole = PG_GETARG_OID(3);
|
||||
nodeMetadata.nodeCluster = NameStr(*nodeClusterName);
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
nodeId = AddNodeMetadata(nodeNameString, nodePort, &nodeMetadata,
|
||||
&nodeAlreadyExists);
|
||||
int nodeId = AddNodeMetadata(nodeNameString, nodePort, &nodeMetadata,
|
||||
&nodeAlreadyExists);
|
||||
|
||||
PG_RETURN_INT32(nodeId);
|
||||
}
|
||||
|
@ -217,7 +215,6 @@ master_add_secondary_node(PG_FUNCTION_ARGS)
|
|||
Name nodeClusterName = PG_GETARG_NAME(4);
|
||||
NodeMetadata nodeMetadata = DefaultNodeMetadata();
|
||||
bool nodeAlreadyExists = false;
|
||||
int nodeId = 0;
|
||||
|
||||
nodeMetadata.groupId = GroupForNode(primaryNameString, primaryPort);
|
||||
nodeMetadata.nodeCluster = NameStr(*nodeClusterName);
|
||||
|
@ -226,8 +223,8 @@ master_add_secondary_node(PG_FUNCTION_ARGS)
|
|||
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
nodeId = AddNodeMetadata(nodeNameString, nodePort, &nodeMetadata,
|
||||
&nodeAlreadyExists);
|
||||
int nodeId = AddNodeMetadata(nodeNameString, nodePort, &nodeMetadata,
|
||||
&nodeAlreadyExists);
|
||||
|
||||
PG_RETURN_INT32(nodeId);
|
||||
}
|
||||
|
@ -307,11 +304,9 @@ master_disable_node(PG_FUNCTION_ARGS)
|
|||
}
|
||||
PG_CATCH();
|
||||
{
|
||||
ErrorData *edata = NULL;
|
||||
|
||||
/* CopyErrorData() requires (CurrentMemoryContext != ErrorContext) */
|
||||
MemoryContextSwitchTo(savedContext);
|
||||
edata = CopyErrorData();
|
||||
ErrorData *edata = CopyErrorData();
|
||||
|
||||
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
errmsg("Disabling %s:%d failed", workerNode->workerName,
|
||||
|
@ -397,14 +392,12 @@ SetUpDistributedTableDependencies(WorkerNode *newWorkerNode)
|
|||
static void
|
||||
PropagateRolesToNewNode(WorkerNode *newWorkerNode)
|
||||
{
|
||||
List *ddlCommands = NIL;
|
||||
|
||||
if (!EnableAlterRolePropagation)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
ddlCommands = GenerateAlterRoleIfExistsCommandAllRoles();
|
||||
List *ddlCommands = GenerateAlterRoleIfExistsCommandAllRoles();
|
||||
|
||||
SendCommandListToWorkerInSingleTransaction(newWorkerNode->workerName,
|
||||
newWorkerNode->workerPort,
|
||||
|
@ -419,8 +412,6 @@ PropagateRolesToNewNode(WorkerNode *newWorkerNode)
|
|||
static WorkerNode *
|
||||
ModifiableWorkerNode(const char *nodeName, int32 nodePort)
|
||||
{
|
||||
WorkerNode *workerNode = NULL;
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
EnsureCoordinator();
|
||||
|
@ -428,7 +419,7 @@ ModifiableWorkerNode(const char *nodeName, int32 nodePort)
|
|||
/* take an exclusive lock on pg_dist_node to serialize pg_dist_node changes */
|
||||
LockRelationOid(DistNodeRelationId(), ExclusiveLock);
|
||||
|
||||
workerNode = FindWorkerNodeAnyCluster(nodeName, nodePort);
|
||||
WorkerNode *workerNode = FindWorkerNodeAnyCluster(nodeName, nodePort);
|
||||
if (workerNode == NULL)
|
||||
{
|
||||
ereport(ERROR, (errmsg("node at \"%s:%u\" does not exist", nodeName, nodePort)));
|
||||
|
@ -581,13 +572,12 @@ PrimaryNodeForGroup(int32 groupId, bool *groupContainsNodes)
|
|||
static int
|
||||
ActivateNode(char *nodeName, int nodePort)
|
||||
{
|
||||
WorkerNode *newWorkerNode = NULL;
|
||||
bool isActive = true;
|
||||
|
||||
/* take an exclusive lock on pg_dist_node to serialize pg_dist_node changes */
|
||||
LockRelationOid(DistNodeRelationId(), ExclusiveLock);
|
||||
|
||||
newWorkerNode = SetNodeState(nodeName, nodePort, isActive);
|
||||
WorkerNode *newWorkerNode = SetNodeState(nodeName, nodePort, isActive);
|
||||
|
||||
PropagateRolesToNewNode(newWorkerNode);
|
||||
SetUpDistributedTableDependencies(newWorkerNode);
|
||||
|
@ -621,14 +611,13 @@ master_update_node(PG_FUNCTION_ARGS)
|
|||
int32 lock_cooldown = PG_GETARG_INT32(4);
|
||||
|
||||
char *newNodeNameString = text_to_cstring(newNodeName);
|
||||
WorkerNode *workerNode = NULL;
|
||||
WorkerNode *workerNodeWithSameAddress = NULL;
|
||||
List *placementList = NIL;
|
||||
BackgroundWorkerHandle *handle = NULL;
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
workerNodeWithSameAddress = FindWorkerNodeAnyCluster(newNodeNameString, newNodePort);
|
||||
WorkerNode *workerNodeWithSameAddress = FindWorkerNodeAnyCluster(newNodeNameString,
|
||||
newNodePort);
|
||||
if (workerNodeWithSameAddress != NULL)
|
||||
{
|
||||
/* a node with the given hostname and port already exists in the metadata */
|
||||
|
@ -646,7 +635,7 @@ master_update_node(PG_FUNCTION_ARGS)
|
|||
}
|
||||
}
|
||||
|
||||
workerNode = LookupNodeByNodeId(nodeId);
|
||||
WorkerNode *workerNode = LookupNodeByNodeId(nodeId);
|
||||
if (workerNode == NULL)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND),
|
||||
|
@ -734,25 +723,22 @@ UpdateNodeLocation(int32 nodeId, char *newNodeName, int32 newNodePort)
|
|||
{
|
||||
const bool indexOK = true;
|
||||
|
||||
Relation pgDistNode = NULL;
|
||||
TupleDesc tupleDescriptor = NULL;
|
||||
ScanKeyData scanKey[1];
|
||||
SysScanDesc scanDescriptor = NULL;
|
||||
HeapTuple heapTuple = NULL;
|
||||
Datum values[Natts_pg_dist_node];
|
||||
bool isnull[Natts_pg_dist_node];
|
||||
bool replace[Natts_pg_dist_node];
|
||||
|
||||
pgDistNode = heap_open(DistNodeRelationId(), RowExclusiveLock);
|
||||
tupleDescriptor = RelationGetDescr(pgDistNode);
|
||||
Relation pgDistNode = heap_open(DistNodeRelationId(), RowExclusiveLock);
|
||||
TupleDesc tupleDescriptor = RelationGetDescr(pgDistNode);
|
||||
|
||||
ScanKeyInit(&scanKey[0], Anum_pg_dist_node_nodeid,
|
||||
BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(nodeId));
|
||||
|
||||
scanDescriptor = systable_beginscan(pgDistNode, DistNodeNodeIdIndexId(), indexOK,
|
||||
NULL, 1, scanKey);
|
||||
SysScanDesc scanDescriptor = systable_beginscan(pgDistNode, DistNodeNodeIdIndexId(),
|
||||
indexOK,
|
||||
NULL, 1, scanKey);
|
||||
|
||||
heapTuple = systable_getnext(scanDescriptor);
|
||||
HeapTuple heapTuple = systable_getnext(scanDescriptor);
|
||||
if (!HeapTupleIsValid(heapTuple))
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not find valid entry for node \"%s:%d\"",
|
||||
|
@ -791,8 +777,6 @@ Datum
|
|||
get_shard_id_for_distribution_column(PG_FUNCTION_ARGS)
|
||||
{
|
||||
ShardInterval *shardInterval = NULL;
|
||||
char distributionMethod = 0;
|
||||
Oid relationId = InvalidOid;
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
|
@ -806,7 +790,7 @@ get_shard_id_for_distribution_column(PG_FUNCTION_ARGS)
|
|||
errmsg("relation cannot be NULL")));
|
||||
}
|
||||
|
||||
relationId = PG_GETARG_OID(0);
|
||||
Oid relationId = PG_GETARG_OID(0);
|
||||
EnsureTablePermissions(relationId, ACL_SELECT);
|
||||
|
||||
if (!IsDistributedTable(relationId))
|
||||
|
@ -815,7 +799,7 @@ get_shard_id_for_distribution_column(PG_FUNCTION_ARGS)
|
|||
errmsg("relation is not distributed")));
|
||||
}
|
||||
|
||||
distributionMethod = PartitionMethod(relationId);
|
||||
char distributionMethod = PartitionMethod(relationId);
|
||||
if (distributionMethod == DISTRIBUTE_BY_NONE)
|
||||
{
|
||||
List *shardIntervalList = LoadShardIntervalList(relationId);
|
||||
|
@ -829,12 +813,6 @@ get_shard_id_for_distribution_column(PG_FUNCTION_ARGS)
|
|||
else if (distributionMethod == DISTRIBUTE_BY_HASH ||
|
||||
distributionMethod == DISTRIBUTE_BY_RANGE)
|
||||
{
|
||||
Var *distributionColumn = NULL;
|
||||
Oid distributionDataType = InvalidOid;
|
||||
Oid inputDataType = InvalidOid;
|
||||
char *distributionValueString = NULL;
|
||||
Datum inputDatum = 0;
|
||||
Datum distributionValueDatum = 0;
|
||||
DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(relationId);
|
||||
|
||||
/* if given table is not reference table, distributionValue cannot be NULL */
|
||||
|
@ -845,15 +823,15 @@ get_shard_id_for_distribution_column(PG_FUNCTION_ARGS)
|
|||
"than reference tables.")));
|
||||
}
|
||||
|
||||
inputDatum = PG_GETARG_DATUM(1);
|
||||
inputDataType = get_fn_expr_argtype(fcinfo->flinfo, 1);
|
||||
distributionValueString = DatumToString(inputDatum, inputDataType);
|
||||
Datum inputDatum = PG_GETARG_DATUM(1);
|
||||
Oid inputDataType = get_fn_expr_argtype(fcinfo->flinfo, 1);
|
||||
char *distributionValueString = DatumToString(inputDatum, inputDataType);
|
||||
|
||||
distributionColumn = DistPartitionKey(relationId);
|
||||
distributionDataType = distributionColumn->vartype;
|
||||
Var *distributionColumn = DistPartitionKey(relationId);
|
||||
Oid distributionDataType = distributionColumn->vartype;
|
||||
|
||||
distributionValueDatum = StringToDatum(distributionValueString,
|
||||
distributionDataType);
|
||||
Datum distributionValueDatum = StringToDatum(distributionValueString,
|
||||
distributionDataType);
|
||||
|
||||
shardInterval = FindShardInterval(distributionValueDatum, cacheEntry);
|
||||
}
|
||||
|
@ -881,18 +859,17 @@ get_shard_id_for_distribution_column(PG_FUNCTION_ARGS)
|
|||
WorkerNode *
|
||||
FindWorkerNode(char *nodeName, int32 nodePort)
|
||||
{
|
||||
WorkerNode *cachedWorkerNode = NULL;
|
||||
HTAB *workerNodeHash = GetWorkerNodeHash();
|
||||
bool handleFound = false;
|
||||
void *hashKey = NULL;
|
||||
|
||||
WorkerNode *searchedNode = (WorkerNode *) palloc0(sizeof(WorkerNode));
|
||||
strlcpy(searchedNode->workerName, nodeName, WORKER_LENGTH);
|
||||
searchedNode->workerPort = nodePort;
|
||||
|
||||
hashKey = (void *) searchedNode;
|
||||
cachedWorkerNode = (WorkerNode *) hash_search(workerNodeHash, hashKey, HASH_FIND,
|
||||
&handleFound);
|
||||
void *hashKey = (void *) searchedNode;
|
||||
WorkerNode *cachedWorkerNode = (WorkerNode *) hash_search(workerNodeHash, hashKey,
|
||||
HASH_FIND,
|
||||
&handleFound);
|
||||
if (handleFound)
|
||||
{
|
||||
WorkerNode *workerNode = (WorkerNode *) palloc(sizeof(WorkerNode));
|
||||
|
@ -939,22 +916,19 @@ FindWorkerNodeAnyCluster(const char *nodeName, int32 nodePort)
|
|||
List *
|
||||
ReadDistNode(bool includeNodesFromOtherClusters)
|
||||
{
|
||||
SysScanDesc scanDescriptor = NULL;
|
||||
ScanKeyData scanKey[1];
|
||||
int scanKeyCount = 0;
|
||||
HeapTuple heapTuple = NULL;
|
||||
List *workerNodeList = NIL;
|
||||
TupleDesc tupleDescriptor = NULL;
|
||||
|
||||
Relation pgDistNode = heap_open(DistNodeRelationId(), AccessShareLock);
|
||||
|
||||
scanDescriptor = systable_beginscan(pgDistNode,
|
||||
InvalidOid, false,
|
||||
NULL, scanKeyCount, scanKey);
|
||||
SysScanDesc scanDescriptor = systable_beginscan(pgDistNode,
|
||||
InvalidOid, false,
|
||||
NULL, scanKeyCount, scanKey);
|
||||
|
||||
tupleDescriptor = RelationGetDescr(pgDistNode);
|
||||
TupleDesc tupleDescriptor = RelationGetDescr(pgDistNode);
|
||||
|
||||
heapTuple = systable_getnext(scanDescriptor);
|
||||
HeapTuple heapTuple = systable_getnext(scanDescriptor);
|
||||
while (HeapTupleIsValid(heapTuple))
|
||||
{
|
||||
WorkerNode *workerNode = TupleToWorkerNode(tupleDescriptor, heapTuple);
|
||||
|
@ -989,7 +963,6 @@ ReadDistNode(bool includeNodesFromOtherClusters)
|
|||
static void
|
||||
RemoveNodeFromCluster(char *nodeName, int32 nodePort)
|
||||
{
|
||||
char *nodeDeleteCommand = NULL;
|
||||
WorkerNode *workerNode = ModifiableWorkerNode(nodeName, nodePort);
|
||||
|
||||
if (NodeIsPrimary(workerNode))
|
||||
|
@ -1012,7 +985,7 @@ RemoveNodeFromCluster(char *nodeName, int32 nodePort)
|
|||
|
||||
DeleteNodeRow(workerNode->workerName, nodePort);
|
||||
|
||||
nodeDeleteCommand = NodeDeleteCommand(workerNode->nodeId);
|
||||
char *nodeDeleteCommand = NodeDeleteCommand(workerNode->nodeId);
|
||||
|
||||
/* make sure we don't have any lingering session lifespan connections */
|
||||
CloseNodeConnectionsAfterTransaction(workerNode->workerName, nodePort);
|
||||
|
@ -1059,11 +1032,6 @@ AddNodeMetadata(char *nodeName, int32 nodePort,
|
|||
NodeMetadata *nodeMetadata,
|
||||
bool *nodeAlreadyExists)
|
||||
{
|
||||
int nextNodeIdInt = 0;
|
||||
WorkerNode *workerNode = NULL;
|
||||
char *nodeDeleteCommand = NULL;
|
||||
uint32 primariesWithMetadata = 0;
|
||||
|
||||
EnsureCoordinator();
|
||||
|
||||
*nodeAlreadyExists = false;
|
||||
|
@ -1075,7 +1043,7 @@ AddNodeMetadata(char *nodeName, int32 nodePort,
|
|||
*/
|
||||
LockRelationOid(DistNodeRelationId(), ExclusiveLock);
|
||||
|
||||
workerNode = FindWorkerNodeAnyCluster(nodeName, nodePort);
|
||||
WorkerNode *workerNode = FindWorkerNodeAnyCluster(nodeName, nodePort);
|
||||
if (workerNode != NULL)
|
||||
{
|
||||
/* fill return data and return */
|
||||
|
@ -1122,18 +1090,18 @@ AddNodeMetadata(char *nodeName, int32 nodePort,
|
|||
}
|
||||
|
||||
/* generate the new node id from the sequence */
|
||||
nextNodeIdInt = GetNextNodeId();
|
||||
int nextNodeIdInt = GetNextNodeId();
|
||||
|
||||
InsertNodeRow(nextNodeIdInt, nodeName, nodePort, nodeMetadata);
|
||||
|
||||
workerNode = FindWorkerNodeAnyCluster(nodeName, nodePort);
|
||||
|
||||
/* send the delete command to all primary nodes with metadata */
|
||||
nodeDeleteCommand = NodeDeleteCommand(workerNode->nodeId);
|
||||
char *nodeDeleteCommand = NodeDeleteCommand(workerNode->nodeId);
|
||||
SendCommandToWorkers(WORKERS_WITH_METADATA, nodeDeleteCommand);
|
||||
|
||||
/* finally prepare the insert command and send it to all primary nodes */
|
||||
primariesWithMetadata = CountPrimariesWithMetadata();
|
||||
uint32 primariesWithMetadata = CountPrimariesWithMetadata();
|
||||
if (primariesWithMetadata != 0)
|
||||
{
|
||||
List *workerNodeList = list_make1(workerNode);
|
||||
|
@ -1157,7 +1125,6 @@ SetWorkerColumn(WorkerNode *workerNode, int columnIndex, Datum value)
|
|||
Relation pgDistNode = heap_open(DistNodeRelationId(), RowExclusiveLock);
|
||||
TupleDesc tupleDescriptor = RelationGetDescr(pgDistNode);
|
||||
HeapTuple heapTuple = GetNodeTuple(workerNode->workerName, workerNode->workerPort);
|
||||
WorkerNode *newWorkerNode = NULL;
|
||||
|
||||
Datum values[Natts_pg_dist_node];
|
||||
bool isnull[Natts_pg_dist_node];
|
||||
|
@ -1206,7 +1173,7 @@ SetWorkerColumn(WorkerNode *workerNode, int columnIndex, Datum value)
|
|||
CitusInvalidateRelcacheByRelid(DistNodeRelationId());
|
||||
CommandCounterIncrement();
|
||||
|
||||
newWorkerNode = TupleToWorkerNode(tupleDescriptor, heapTuple);
|
||||
WorkerNode *newWorkerNode = TupleToWorkerNode(tupleDescriptor, heapTuple);
|
||||
|
||||
heap_close(pgDistNode, NoLock);
|
||||
|
||||
|
@ -1257,18 +1224,16 @@ GetNodeTuple(const char *nodeName, int32 nodePort)
|
|||
const bool indexOK = false;
|
||||
|
||||
ScanKeyData scanKey[2];
|
||||
SysScanDesc scanDescriptor = NULL;
|
||||
HeapTuple heapTuple = NULL;
|
||||
HeapTuple nodeTuple = NULL;
|
||||
|
||||
ScanKeyInit(&scanKey[0], Anum_pg_dist_node_nodename,
|
||||
BTEqualStrategyNumber, F_TEXTEQ, CStringGetTextDatum(nodeName));
|
||||
ScanKeyInit(&scanKey[1], Anum_pg_dist_node_nodeport,
|
||||
BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(nodePort));
|
||||
scanDescriptor = systable_beginscan(pgDistNode, InvalidOid, indexOK,
|
||||
NULL, scanKeyCount, scanKey);
|
||||
SysScanDesc scanDescriptor = systable_beginscan(pgDistNode, InvalidOid, indexOK,
|
||||
NULL, scanKeyCount, scanKey);
|
||||
|
||||
heapTuple = systable_getnext(scanDescriptor);
|
||||
HeapTuple heapTuple = systable_getnext(scanDescriptor);
|
||||
if (HeapTupleIsValid(heapTuple))
|
||||
{
|
||||
nodeTuple = heap_copytuple(heapTuple);
|
||||
|
@ -1298,18 +1263,16 @@ GetNextGroupId()
|
|||
Datum sequenceIdDatum = ObjectIdGetDatum(sequenceId);
|
||||
Oid savedUserId = InvalidOid;
|
||||
int savedSecurityContext = 0;
|
||||
Datum groupIdDatum = 0;
|
||||
int32 groupId = 0;
|
||||
|
||||
GetUserIdAndSecContext(&savedUserId, &savedSecurityContext);
|
||||
SetUserIdAndSecContext(CitusExtensionOwner(), SECURITY_LOCAL_USERID_CHANGE);
|
||||
|
||||
/* generate new and unique shardId from sequence */
|
||||
groupIdDatum = DirectFunctionCall1(nextval_oid, sequenceIdDatum);
|
||||
Datum groupIdDatum = DirectFunctionCall1(nextval_oid, sequenceIdDatum);
|
||||
|
||||
SetUserIdAndSecContext(savedUserId, savedSecurityContext);
|
||||
|
||||
groupId = DatumGetInt32(groupIdDatum);
|
||||
int32 groupId = DatumGetInt32(groupIdDatum);
|
||||
|
||||
return groupId;
|
||||
}
|
||||
|
@ -1332,18 +1295,16 @@ GetNextNodeId()
|
|||
Datum sequenceIdDatum = ObjectIdGetDatum(sequenceId);
|
||||
Oid savedUserId = InvalidOid;
|
||||
int savedSecurityContext = 0;
|
||||
Datum nextNodeIdDatum;
|
||||
int nextNodeId = 0;
|
||||
|
||||
GetUserIdAndSecContext(&savedUserId, &savedSecurityContext);
|
||||
SetUserIdAndSecContext(CitusExtensionOwner(), SECURITY_LOCAL_USERID_CHANGE);
|
||||
|
||||
/* generate new and unique shardId from sequence */
|
||||
nextNodeIdDatum = DirectFunctionCall1(nextval_oid, sequenceIdDatum);
|
||||
Datum nextNodeIdDatum = DirectFunctionCall1(nextval_oid, sequenceIdDatum);
|
||||
|
||||
SetUserIdAndSecContext(savedUserId, savedSecurityContext);
|
||||
|
||||
nextNodeId = DatumGetUInt32(nextNodeIdDatum);
|
||||
int nextNodeId = DatumGetUInt32(nextNodeIdDatum);
|
||||
|
||||
return nextNodeId;
|
||||
}
|
||||
|
@ -1377,9 +1338,6 @@ EnsureCoordinator(void)
|
|||
static void
|
||||
InsertNodeRow(int nodeid, char *nodeName, int32 nodePort, NodeMetadata *nodeMetadata)
|
||||
{
|
||||
Relation pgDistNode = NULL;
|
||||
TupleDesc tupleDescriptor = NULL;
|
||||
HeapTuple heapTuple = NULL;
|
||||
Datum values[Natts_pg_dist_node];
|
||||
bool isNulls[Natts_pg_dist_node];
|
||||
|
||||
|
@ -1404,10 +1362,10 @@ InsertNodeRow(int nodeid, char *nodeName, int32 nodePort, NodeMetadata *nodeMeta
|
|||
values[Anum_pg_dist_node_shouldhaveshards - 1] = BoolGetDatum(
|
||||
nodeMetadata->shouldHaveShards);
|
||||
|
||||
pgDistNode = heap_open(DistNodeRelationId(), RowExclusiveLock);
|
||||
Relation pgDistNode = heap_open(DistNodeRelationId(), RowExclusiveLock);
|
||||
|
||||
tupleDescriptor = RelationGetDescr(pgDistNode);
|
||||
heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls);
|
||||
TupleDesc tupleDescriptor = RelationGetDescr(pgDistNode);
|
||||
HeapTuple heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls);
|
||||
|
||||
CatalogTupleInsert(pgDistNode, heapTuple);
|
||||
|
||||
|
@ -1430,8 +1388,6 @@ DeleteNodeRow(char *nodeName, int32 nodePort)
|
|||
const int scanKeyCount = 2;
|
||||
bool indexOK = false;
|
||||
|
||||
HeapTuple heapTuple = NULL;
|
||||
SysScanDesc heapScan = NULL;
|
||||
ScanKeyData scanKey[2];
|
||||
Relation pgDistNode = heap_open(DistNodeRelationId(), RowExclusiveLock);
|
||||
|
||||
|
@ -1447,10 +1403,10 @@ DeleteNodeRow(char *nodeName, int32 nodePort)
|
|||
ScanKeyInit(&scanKey[1], Anum_pg_dist_node_nodeport,
|
||||
BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(nodePort));
|
||||
|
||||
heapScan = systable_beginscan(pgDistNode, InvalidOid, indexOK,
|
||||
NULL, scanKeyCount, scanKey);
|
||||
SysScanDesc heapScan = systable_beginscan(pgDistNode, InvalidOid, indexOK,
|
||||
NULL, scanKeyCount, scanKey);
|
||||
|
||||
heapTuple = systable_getnext(heapScan);
|
||||
HeapTuple heapTuple = systable_getnext(heapScan);
|
||||
|
||||
if (!HeapTupleIsValid(heapTuple))
|
||||
{
|
||||
|
@ -1481,11 +1437,8 @@ DeleteNodeRow(char *nodeName, int32 nodePort)
|
|||
static WorkerNode *
|
||||
TupleToWorkerNode(TupleDesc tupleDescriptor, HeapTuple heapTuple)
|
||||
{
|
||||
WorkerNode *workerNode = NULL;
|
||||
Datum datumArray[Natts_pg_dist_node];
|
||||
bool isNullArray[Natts_pg_dist_node];
|
||||
char *nodeName = NULL;
|
||||
char *nodeRack = NULL;
|
||||
|
||||
Assert(!HeapTupleHasNulls(heapTuple));
|
||||
|
||||
|
@ -1502,10 +1455,10 @@ TupleToWorkerNode(TupleDesc tupleDescriptor, HeapTuple heapTuple)
|
|||
*/
|
||||
heap_deform_tuple(heapTuple, tupleDescriptor, datumArray, isNullArray);
|
||||
|
||||
nodeName = DatumGetCString(datumArray[Anum_pg_dist_node_nodename - 1]);
|
||||
nodeRack = DatumGetCString(datumArray[Anum_pg_dist_node_noderack - 1]);
|
||||
char *nodeName = DatumGetCString(datumArray[Anum_pg_dist_node_nodename - 1]);
|
||||
char *nodeRack = DatumGetCString(datumArray[Anum_pg_dist_node_noderack - 1]);
|
||||
|
||||
workerNode = (WorkerNode *) palloc0(sizeof(WorkerNode));
|
||||
WorkerNode *workerNode = (WorkerNode *) palloc0(sizeof(WorkerNode));
|
||||
workerNode->nodeId = DatumGetUInt32(datumArray[Anum_pg_dist_node_nodeid - 1]);
|
||||
workerNode->workerPort = DatumGetUInt32(datumArray[Anum_pg_dist_node_nodeport - 1]);
|
||||
workerNode->groupId = DatumGetInt32(datumArray[Anum_pg_dist_node_groupid - 1]);
|
||||
|
@ -1546,12 +1499,11 @@ StringToDatum(char *inputString, Oid dataType)
|
|||
Oid typIoFunc = InvalidOid;
|
||||
Oid typIoParam = InvalidOid;
|
||||
int32 typeModifier = -1;
|
||||
Datum datum = 0;
|
||||
|
||||
getTypeInputInfo(dataType, &typIoFunc, &typIoParam);
|
||||
getBaseTypeAndTypmod(dataType, &typeModifier);
|
||||
|
||||
datum = OidInputFunctionCall(typIoFunc, inputString, typIoParam, typeModifier);
|
||||
Datum datum = OidInputFunctionCall(typIoFunc, inputString, typIoParam, typeModifier);
|
||||
|
||||
return datum;
|
||||
}
|
||||
|
@ -1563,12 +1515,11 @@ StringToDatum(char *inputString, Oid dataType)
|
|||
char *
|
||||
DatumToString(Datum datum, Oid dataType)
|
||||
{
|
||||
char *outputString = NULL;
|
||||
Oid typIoFunc = InvalidOid;
|
||||
bool typIsVarlena = false;
|
||||
|
||||
getTypeOutputInfo(dataType, &typIoFunc, &typIsVarlena);
|
||||
outputString = OidOutputFunctionCall(typIoFunc, datum);
|
||||
char *outputString = OidOutputFunctionCall(typIoFunc, datum);
|
||||
|
||||
return outputString;
|
||||
}
|
||||
|
@ -1582,34 +1533,29 @@ static bool
|
|||
UnsetMetadataSyncedForAll(void)
|
||||
{
|
||||
bool updatedAtLeastOne = false;
|
||||
Relation relation = NULL;
|
||||
SysScanDesc scanDescriptor = NULL;
|
||||
ScanKeyData scanKey[2];
|
||||
int scanKeyCount = 2;
|
||||
bool indexOK = false;
|
||||
HeapTuple heapTuple = NULL;
|
||||
TupleDesc tupleDescriptor = NULL;
|
||||
CatalogIndexState indstate;
|
||||
|
||||
/*
|
||||
* Concurrent master_update_node() calls might iterate and try to update
|
||||
* pg_dist_node in different orders. To protect against deadlock, we
|
||||
* get an exclusive lock here.
|
||||
*/
|
||||
relation = heap_open(DistNodeRelationId(), ExclusiveLock);
|
||||
tupleDescriptor = RelationGetDescr(relation);
|
||||
Relation relation = heap_open(DistNodeRelationId(), ExclusiveLock);
|
||||
TupleDesc tupleDescriptor = RelationGetDescr(relation);
|
||||
ScanKeyInit(&scanKey[0], Anum_pg_dist_node_hasmetadata,
|
||||
BTEqualStrategyNumber, F_BOOLEQ, BoolGetDatum(true));
|
||||
ScanKeyInit(&scanKey[1], Anum_pg_dist_node_metadatasynced,
|
||||
BTEqualStrategyNumber, F_BOOLEQ, BoolGetDatum(true));
|
||||
|
||||
indstate = CatalogOpenIndexes(relation);
|
||||
CatalogIndexState indstate = CatalogOpenIndexes(relation);
|
||||
|
||||
scanDescriptor = systable_beginscan(relation,
|
||||
InvalidOid, indexOK,
|
||||
NULL, scanKeyCount, scanKey);
|
||||
SysScanDesc scanDescriptor = systable_beginscan(relation,
|
||||
InvalidOid, indexOK,
|
||||
NULL, scanKeyCount, scanKey);
|
||||
|
||||
heapTuple = systable_getnext(scanDescriptor);
|
||||
HeapTuple heapTuple = systable_getnext(scanDescriptor);
|
||||
if (HeapTupleIsValid(heapTuple))
|
||||
{
|
||||
updatedAtLeastOne = true;
|
||||
|
@ -1617,7 +1563,6 @@ UnsetMetadataSyncedForAll(void)
|
|||
|
||||
while (HeapTupleIsValid(heapTuple))
|
||||
{
|
||||
HeapTuple newHeapTuple = NULL;
|
||||
Datum values[Natts_pg_dist_node];
|
||||
bool isnull[Natts_pg_dist_node];
|
||||
bool replace[Natts_pg_dist_node];
|
||||
|
@ -1629,8 +1574,9 @@ UnsetMetadataSyncedForAll(void)
|
|||
values[Anum_pg_dist_node_metadatasynced - 1] = BoolGetDatum(false);
|
||||
replace[Anum_pg_dist_node_metadatasynced - 1] = true;
|
||||
|
||||
newHeapTuple = heap_modify_tuple(heapTuple, tupleDescriptor, values, isnull,
|
||||
replace);
|
||||
HeapTuple newHeapTuple = heap_modify_tuple(heapTuple, tupleDescriptor, values,
|
||||
isnull,
|
||||
replace);
|
||||
|
||||
CatalogTupleUpdateWithInfo(relation, &newHeapTuple->t_self, newHeapTuple,
|
||||
indstate);
|
||||
|
|
|
@ -61,21 +61,17 @@ RebuildQueryStrings(Query *originalQuery, List *taskList)
|
|||
else if (query->commandType == CMD_INSERT && task->modifyWithSubquery)
|
||||
{
|
||||
/* for INSERT..SELECT, adjust shard names in SELECT part */
|
||||
RangeTblEntry *copiedInsertRte = NULL;
|
||||
RangeTblEntry *copiedSubqueryRte = NULL;
|
||||
Query *copiedSubquery = NULL;
|
||||
List *relationShardList = task->relationShardList;
|
||||
ShardInterval *shardInterval = LoadShardInterval(task->anchorShardId);
|
||||
char partitionMethod = 0;
|
||||
|
||||
query = copyObject(originalQuery);
|
||||
|
||||
copiedInsertRte = ExtractResultRelationRTE(query);
|
||||
copiedSubqueryRte = ExtractSelectRangeTableEntry(query);
|
||||
copiedSubquery = copiedSubqueryRte->subquery;
|
||||
RangeTblEntry *copiedInsertRte = ExtractResultRelationRTE(query);
|
||||
RangeTblEntry *copiedSubqueryRte = ExtractSelectRangeTableEntry(query);
|
||||
Query *copiedSubquery = copiedSubqueryRte->subquery;
|
||||
|
||||
/* there are no restrictions to add for reference tables */
|
||||
partitionMethod = PartitionMethod(shardInterval->relationId);
|
||||
char partitionMethod = PartitionMethod(shardInterval->relationId);
|
||||
if (partitionMethod != DISTRIBUTE_BY_NONE)
|
||||
{
|
||||
AddShardIntervalRestrictionToSelect(copiedSubquery, shardInterval);
|
||||
|
@ -95,14 +91,12 @@ RebuildQueryStrings(Query *originalQuery, List *taskList)
|
|||
else if (query->commandType == CMD_INSERT && (query->onConflict != NULL ||
|
||||
valuesRTE != NULL))
|
||||
{
|
||||
RangeTblEntry *rangeTableEntry = NULL;
|
||||
|
||||
/*
|
||||
* Always an alias in UPSERTs and multi-row INSERTs to avoid
|
||||
* deparsing issues (e.g. RETURNING might reference the original
|
||||
* table name, which has been replaced by a shard name).
|
||||
*/
|
||||
rangeTableEntry = linitial(query->rtable);
|
||||
RangeTblEntry *rangeTableEntry = linitial(query->rtable);
|
||||
if (rangeTableEntry->alias == NULL)
|
||||
{
|
||||
Alias *alias = makeAlias(CITUS_TABLE_ALIAS, NIL);
|
||||
|
@ -184,13 +178,8 @@ UpdateTaskQueryString(Query *query, Oid distributedTableId, RangeTblEntry *value
|
|||
bool
|
||||
UpdateRelationToShardNames(Node *node, List *relationShardList)
|
||||
{
|
||||
RangeTblEntry *newRte = NULL;
|
||||
uint64 shardId = INVALID_SHARD_ID;
|
||||
Oid relationId = InvalidOid;
|
||||
Oid schemaId = InvalidOid;
|
||||
char *relationName = NULL;
|
||||
char *schemaName = NULL;
|
||||
bool replaceRteWithNullValues = false;
|
||||
ListCell *relationShardCell = NULL;
|
||||
RelationShard *relationShard = NULL;
|
||||
|
||||
|
@ -212,7 +201,7 @@ UpdateRelationToShardNames(Node *node, List *relationShardList)
|
|||
relationShardList);
|
||||
}
|
||||
|
||||
newRte = (RangeTblEntry *) node;
|
||||
RangeTblEntry *newRte = (RangeTblEntry *) node;
|
||||
|
||||
if (newRte->rtekind != RTE_RELATION)
|
||||
{
|
||||
|
@ -238,8 +227,8 @@ UpdateRelationToShardNames(Node *node, List *relationShardList)
|
|||
relationShard = NULL;
|
||||
}
|
||||
|
||||
replaceRteWithNullValues = relationShard == NULL ||
|
||||
relationShard->shardId == INVALID_SHARD_ID;
|
||||
bool replaceRteWithNullValues = relationShard == NULL ||
|
||||
relationShard->shardId == INVALID_SHARD_ID;
|
||||
if (replaceRteWithNullValues)
|
||||
{
|
||||
ConvertRteToSubqueryWithEmptyResult(newRte);
|
||||
|
@ -249,11 +238,11 @@ UpdateRelationToShardNames(Node *node, List *relationShardList)
|
|||
shardId = relationShard->shardId;
|
||||
relationId = relationShard->relationId;
|
||||
|
||||
relationName = get_rel_name(relationId);
|
||||
char *relationName = get_rel_name(relationId);
|
||||
AppendShardIdToName(&relationName, shardId);
|
||||
|
||||
schemaId = get_rel_namespace(relationId);
|
||||
schemaName = get_namespace_name(schemaId);
|
||||
Oid schemaId = get_rel_namespace(relationId);
|
||||
char *schemaName = get_namespace_name(schemaId);
|
||||
|
||||
ModifyRangeTblExtraData(newRte, CITUS_RTE_SHARD, schemaName, relationName, NIL);
|
||||
|
||||
|
@ -271,31 +260,26 @@ ConvertRteToSubqueryWithEmptyResult(RangeTblEntry *rte)
|
|||
Relation relation = heap_open(rte->relid, NoLock);
|
||||
TupleDesc tupleDescriptor = RelationGetDescr(relation);
|
||||
int columnCount = tupleDescriptor->natts;
|
||||
int columnIndex = 0;
|
||||
Query *subquery = NULL;
|
||||
List *targetList = NIL;
|
||||
FromExpr *joinTree = NULL;
|
||||
|
||||
for (columnIndex = 0; columnIndex < columnCount; columnIndex++)
|
||||
for (int columnIndex = 0; columnIndex < columnCount; columnIndex++)
|
||||
{
|
||||
FormData_pg_attribute *attributeForm = TupleDescAttr(tupleDescriptor,
|
||||
columnIndex);
|
||||
TargetEntry *targetEntry = NULL;
|
||||
StringInfo resname = NULL;
|
||||
Const *constValue = NULL;
|
||||
|
||||
if (attributeForm->attisdropped)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
resname = makeStringInfo();
|
||||
constValue = makeNullConst(attributeForm->atttypid, attributeForm->atttypmod,
|
||||
attributeForm->attcollation);
|
||||
StringInfo resname = makeStringInfo();
|
||||
Const *constValue = makeNullConst(attributeForm->atttypid,
|
||||
attributeForm->atttypmod,
|
||||
attributeForm->attcollation);
|
||||
|
||||
appendStringInfo(resname, "%s", attributeForm->attname.data);
|
||||
|
||||
targetEntry = makeNode(TargetEntry);
|
||||
TargetEntry *targetEntry = makeNode(TargetEntry);
|
||||
targetEntry->expr = (Expr *) constValue;
|
||||
targetEntry->resno = columnIndex;
|
||||
targetEntry->resname = resname->data;
|
||||
|
@ -305,10 +289,10 @@ ConvertRteToSubqueryWithEmptyResult(RangeTblEntry *rte)
|
|||
|
||||
heap_close(relation, NoLock);
|
||||
|
||||
joinTree = makeNode(FromExpr);
|
||||
FromExpr *joinTree = makeNode(FromExpr);
|
||||
joinTree->quals = makeBoolConst(false, false);
|
||||
|
||||
subquery = makeNode(Query);
|
||||
Query *subquery = makeNode(Query);
|
||||
subquery->commandType = CMD_SELECT;
|
||||
subquery->querySource = QSRC_ORIGINAL;
|
||||
subquery->canSetTag = true;
|
||||
|
|
|
@ -113,7 +113,6 @@ distributed_planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
|
|||
PlannedStmt *result = NULL;
|
||||
bool needsDistributedPlanning = false;
|
||||
Query *originalQuery = NULL;
|
||||
PlannerRestrictionContext *plannerRestrictionContext = NULL;
|
||||
bool setPartitionedTablesInherited = false;
|
||||
List *rangeTableList = ExtractRangeTableEntryList(parse);
|
||||
|
||||
|
@ -181,7 +180,8 @@ distributed_planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
|
|||
ReplaceTableVisibleFunction((Node *) parse);
|
||||
|
||||
/* create a restriction context and put it at the end if context list */
|
||||
plannerRestrictionContext = CreateAndPushPlannerRestrictionContext();
|
||||
PlannerRestrictionContext *plannerRestrictionContext =
|
||||
CreateAndPushPlannerRestrictionContext();
|
||||
|
||||
PG_TRY();
|
||||
{
|
||||
|
@ -519,8 +519,6 @@ CreateDistributedPlannedStmt(uint64 planId, PlannedStmt *localPlan, Query *origi
|
|||
Query *query, ParamListInfo boundParams,
|
||||
PlannerRestrictionContext *plannerRestrictionContext)
|
||||
{
|
||||
DistributedPlan *distributedPlan = NULL;
|
||||
PlannedStmt *resultPlan = NULL;
|
||||
bool hasUnresolvedParams = false;
|
||||
JoinRestrictionContext *joinRestrictionContext =
|
||||
plannerRestrictionContext->joinRestrictionContext;
|
||||
|
@ -533,7 +531,7 @@ CreateDistributedPlannedStmt(uint64 planId, PlannedStmt *localPlan, Query *origi
|
|||
plannerRestrictionContext->joinRestrictionContext =
|
||||
RemoveDuplicateJoinRestrictions(joinRestrictionContext);
|
||||
|
||||
distributedPlan =
|
||||
DistributedPlan *distributedPlan =
|
||||
CreateDistributedPlan(planId, originalQuery, query, boundParams,
|
||||
hasUnresolvedParams, plannerRestrictionContext);
|
||||
|
||||
|
@ -580,7 +578,7 @@ CreateDistributedPlannedStmt(uint64 planId, PlannedStmt *localPlan, Query *origi
|
|||
distributedPlan->planId = planId;
|
||||
|
||||
/* create final plan by combining local plan with distributed plan */
|
||||
resultPlan = FinalizePlan(localPlan, distributedPlan);
|
||||
PlannedStmt *resultPlan = FinalizePlan(localPlan, distributedPlan);
|
||||
|
||||
/*
|
||||
* As explained above, force planning costs to be unrealistically high if
|
||||
|
@ -617,17 +615,14 @@ CreateDistributedPlan(uint64 planId, Query *originalQuery, Query *query, ParamLi
|
|||
PlannerRestrictionContext *plannerRestrictionContext)
|
||||
{
|
||||
DistributedPlan *distributedPlan = NULL;
|
||||
MultiTreeRoot *logicalPlan = NULL;
|
||||
List *subPlanList = NIL;
|
||||
bool hasCtes = originalQuery->cteList != NIL;
|
||||
|
||||
|
||||
if (IsModifyCommand(originalQuery))
|
||||
{
|
||||
Oid targetRelationId = InvalidOid;
|
||||
EnsureModificationsCanRun();
|
||||
|
||||
targetRelationId = ModifyQueryResultRelationId(query);
|
||||
Oid targetRelationId = ModifyQueryResultRelationId(query);
|
||||
EnsurePartitionTableNotReplicated(targetRelationId);
|
||||
|
||||
if (InsertSelectIntoDistributedTable(originalQuery))
|
||||
|
@ -722,8 +717,8 @@ CreateDistributedPlan(uint64 planId, Query *originalQuery, Query *query, ParamLi
|
|||
* Plan subqueries and CTEs that cannot be pushed down by recursively
|
||||
* calling the planner and return the resulting plans to subPlanList.
|
||||
*/
|
||||
subPlanList = GenerateSubplansForSubqueriesAndCTEs(planId, originalQuery,
|
||||
plannerRestrictionContext);
|
||||
List *subPlanList = GenerateSubplansForSubqueriesAndCTEs(planId, originalQuery,
|
||||
plannerRestrictionContext);
|
||||
|
||||
/*
|
||||
* If subqueries were recursively planned then we need to replan the query
|
||||
|
@ -798,8 +793,8 @@ CreateDistributedPlan(uint64 planId, Query *originalQuery, Query *query, ParamLi
|
|||
query->cteList = NIL;
|
||||
Assert(originalQuery->cteList == NIL);
|
||||
|
||||
logicalPlan = MultiLogicalPlanCreate(originalQuery, query,
|
||||
plannerRestrictionContext);
|
||||
MultiTreeRoot *logicalPlan = MultiLogicalPlanCreate(originalQuery, query,
|
||||
plannerRestrictionContext);
|
||||
MultiLogicalPlanOptimize(logicalPlan);
|
||||
|
||||
/*
|
||||
|
@ -937,14 +932,11 @@ ResolveExternalParams(Node *inputNode, ParamListInfo boundParams)
|
|||
if (IsA(inputNode, Param))
|
||||
{
|
||||
Param *paramToProcess = (Param *) inputNode;
|
||||
ParamExternData *correspondingParameterData = NULL;
|
||||
int numberOfParameters = boundParams->numParams;
|
||||
int parameterId = paramToProcess->paramid;
|
||||
int16 typeLength = 0;
|
||||
bool typeByValue = false;
|
||||
Datum constValue = 0;
|
||||
bool paramIsNull = false;
|
||||
int parameterIndex = 0;
|
||||
|
||||
if (paramToProcess->paramkind != PARAM_EXTERN)
|
||||
{
|
||||
|
@ -957,13 +949,14 @@ ResolveExternalParams(Node *inputNode, ParamListInfo boundParams)
|
|||
}
|
||||
|
||||
/* parameterId starts from 1 */
|
||||
parameterIndex = parameterId - 1;
|
||||
int parameterIndex = parameterId - 1;
|
||||
if (parameterIndex >= numberOfParameters)
|
||||
{
|
||||
return inputNode;
|
||||
}
|
||||
|
||||
correspondingParameterData = &boundParams->params[parameterIndex];
|
||||
ParamExternData *correspondingParameterData =
|
||||
&boundParams->params[parameterIndex];
|
||||
|
||||
if (!(correspondingParameterData->pflags & PARAM_FLAG_CONST))
|
||||
{
|
||||
|
@ -972,7 +965,7 @@ ResolveExternalParams(Node *inputNode, ParamListInfo boundParams)
|
|||
|
||||
get_typlenbyval(paramToProcess->paramtype, &typeLength, &typeByValue);
|
||||
|
||||
paramIsNull = correspondingParameterData->isnull;
|
||||
bool paramIsNull = correspondingParameterData->isnull;
|
||||
if (paramIsNull)
|
||||
{
|
||||
constValue = 0;
|
||||
|
@ -1015,17 +1008,14 @@ ResolveExternalParams(Node *inputNode, ParamListInfo boundParams)
|
|||
DistributedPlan *
|
||||
GetDistributedPlan(CustomScan *customScan)
|
||||
{
|
||||
Node *node = NULL;
|
||||
DistributedPlan *distributedPlan = NULL;
|
||||
|
||||
Assert(list_length(customScan->custom_private) == 1);
|
||||
|
||||
node = (Node *) linitial(customScan->custom_private);
|
||||
Node *node = (Node *) linitial(customScan->custom_private);
|
||||
Assert(CitusIsA(node, DistributedPlan));
|
||||
|
||||
CheckNodeCopyAndSerialization(node);
|
||||
|
||||
distributedPlan = (DistributedPlan *) node;
|
||||
DistributedPlan *distributedPlan = (DistributedPlan *) node;
|
||||
|
||||
return distributedPlan;
|
||||
}
|
||||
|
@ -1040,7 +1030,6 @@ FinalizePlan(PlannedStmt *localPlan, DistributedPlan *distributedPlan)
|
|||
{
|
||||
PlannedStmt *finalPlan = NULL;
|
||||
CustomScan *customScan = makeNode(CustomScan);
|
||||
Node *distributedPlanData = NULL;
|
||||
MultiExecutorType executorType = MULTI_EXECUTOR_INVALID_FIRST;
|
||||
|
||||
if (!distributedPlan->planningError)
|
||||
|
@ -1092,7 +1081,7 @@ FinalizePlan(PlannedStmt *localPlan, DistributedPlan *distributedPlan)
|
|||
distributedPlan->relationIdList = localPlan->relationOids;
|
||||
distributedPlan->queryId = localPlan->queryId;
|
||||
|
||||
distributedPlanData = (Node *) distributedPlan;
|
||||
Node *distributedPlanData = (Node *) distributedPlan;
|
||||
|
||||
customScan->custom_private = list_make1(distributedPlanData);
|
||||
customScan->flags = CUSTOMPATH_SUPPORT_BACKWARD_SCAN;
|
||||
|
@ -1119,9 +1108,7 @@ static PlannedStmt *
|
|||
FinalizeNonRouterPlan(PlannedStmt *localPlan, DistributedPlan *distributedPlan,
|
||||
CustomScan *customScan)
|
||||
{
|
||||
PlannedStmt *finalPlan = NULL;
|
||||
|
||||
finalPlan = MasterNodeSelectPlan(distributedPlan, customScan);
|
||||
PlannedStmt *finalPlan = MasterNodeSelectPlan(distributedPlan, customScan);
|
||||
finalPlan->queryId = localPlan->queryId;
|
||||
finalPlan->utilityStmt = localPlan->utilityStmt;
|
||||
|
||||
|
@ -1141,8 +1128,6 @@ FinalizeNonRouterPlan(PlannedStmt *localPlan, DistributedPlan *distributedPlan,
|
|||
static PlannedStmt *
|
||||
FinalizeRouterPlan(PlannedStmt *localPlan, CustomScan *customScan)
|
||||
{
|
||||
PlannedStmt *routerPlan = NULL;
|
||||
RangeTblEntry *remoteScanRangeTableEntry = NULL;
|
||||
ListCell *targetEntryCell = NULL;
|
||||
List *targetList = NIL;
|
||||
List *columnNameList = NIL;
|
||||
|
@ -1154,9 +1139,6 @@ FinalizeRouterPlan(PlannedStmt *localPlan, CustomScan *customScan)
|
|||
foreach(targetEntryCell, localPlan->planTree->targetlist)
|
||||
{
|
||||
TargetEntry *targetEntry = lfirst(targetEntryCell);
|
||||
TargetEntry *newTargetEntry = NULL;
|
||||
Var *newVar = NULL;
|
||||
Value *columnName = NULL;
|
||||
|
||||
Assert(IsA(targetEntry, TargetEntry));
|
||||
|
||||
|
@ -1171,7 +1153,7 @@ FinalizeRouterPlan(PlannedStmt *localPlan, CustomScan *customScan)
|
|||
}
|
||||
|
||||
/* build target entry pointing to remote scan range table entry */
|
||||
newVar = makeVarFromTargetEntry(customScanRangeTableIndex, targetEntry);
|
||||
Var *newVar = makeVarFromTargetEntry(customScanRangeTableIndex, targetEntry);
|
||||
|
||||
if (newVar->vartype == RECORDOID)
|
||||
{
|
||||
|
@ -1184,20 +1166,20 @@ FinalizeRouterPlan(PlannedStmt *localPlan, CustomScan *customScan)
|
|||
newVar->vartypmod = BlessRecordExpression(targetEntry->expr);
|
||||
}
|
||||
|
||||
newTargetEntry = flatCopyTargetEntry(targetEntry);
|
||||
TargetEntry *newTargetEntry = flatCopyTargetEntry(targetEntry);
|
||||
newTargetEntry->expr = (Expr *) newVar;
|
||||
targetList = lappend(targetList, newTargetEntry);
|
||||
|
||||
columnName = makeString(targetEntry->resname);
|
||||
Value *columnName = makeString(targetEntry->resname);
|
||||
columnNameList = lappend(columnNameList, columnName);
|
||||
}
|
||||
|
||||
customScan->scan.plan.targetlist = targetList;
|
||||
|
||||
routerPlan = makeNode(PlannedStmt);
|
||||
PlannedStmt *routerPlan = makeNode(PlannedStmt);
|
||||
routerPlan->planTree = (Plan *) customScan;
|
||||
|
||||
remoteScanRangeTableEntry = RemoteScanRangeTableEntry(columnNameList);
|
||||
RangeTblEntry *remoteScanRangeTableEntry = RemoteScanRangeTableEntry(columnNameList);
|
||||
routerPlan->rtable = list_make1(remoteScanRangeTableEntry);
|
||||
|
||||
/* add original range table list for access permission checks */
|
||||
|
@ -1236,11 +1218,10 @@ BlessRecordExpression(Expr *expr)
|
|||
*/
|
||||
Oid resultTypeId = InvalidOid;
|
||||
TupleDesc resultTupleDesc = NULL;
|
||||
TypeFuncClass typeClass;
|
||||
|
||||
/* get_expr_result_type blesses the tuple descriptor */
|
||||
typeClass = get_expr_result_type((Node *) expr, &resultTypeId,
|
||||
&resultTupleDesc);
|
||||
TypeFuncClass typeClass = get_expr_result_type((Node *) expr, &resultTypeId,
|
||||
&resultTupleDesc);
|
||||
if (typeClass == TYPEFUNC_COMPOSITE)
|
||||
{
|
||||
typeMod = resultTupleDesc->tdtypmod;
|
||||
|
@ -1368,32 +1349,27 @@ multi_join_restriction_hook(PlannerInfo *root,
|
|||
JoinType jointype,
|
||||
JoinPathExtraData *extra)
|
||||
{
|
||||
PlannerRestrictionContext *plannerRestrictionContext = NULL;
|
||||
JoinRestrictionContext *joinRestrictionContext = NULL;
|
||||
JoinRestriction *joinRestriction = NULL;
|
||||
MemoryContext restrictionsMemoryContext = NULL;
|
||||
MemoryContext oldMemoryContext = NULL;
|
||||
List *restrictInfoList = NIL;
|
||||
|
||||
/*
|
||||
* Use a memory context that's guaranteed to live long enough, could be
|
||||
* called in a more shorted lived one (e.g. with GEQO).
|
||||
*/
|
||||
plannerRestrictionContext = CurrentPlannerRestrictionContext();
|
||||
restrictionsMemoryContext = plannerRestrictionContext->memoryContext;
|
||||
oldMemoryContext = MemoryContextSwitchTo(restrictionsMemoryContext);
|
||||
PlannerRestrictionContext *plannerRestrictionContext =
|
||||
CurrentPlannerRestrictionContext();
|
||||
MemoryContext restrictionsMemoryContext = plannerRestrictionContext->memoryContext;
|
||||
MemoryContext oldMemoryContext = MemoryContextSwitchTo(restrictionsMemoryContext);
|
||||
|
||||
/*
|
||||
* We create a copy of restrictInfoList because it may be created in a memory
|
||||
* context which will be deleted when we still need it, thus we create a copy
|
||||
* of it in our memory context.
|
||||
*/
|
||||
restrictInfoList = copyObject(extra->restrictlist);
|
||||
List *restrictInfoList = copyObject(extra->restrictlist);
|
||||
|
||||
joinRestrictionContext = plannerRestrictionContext->joinRestrictionContext;
|
||||
JoinRestrictionContext *joinRestrictionContext =
|
||||
plannerRestrictionContext->joinRestrictionContext;
|
||||
Assert(joinRestrictionContext != NULL);
|
||||
|
||||
joinRestriction = palloc0(sizeof(JoinRestriction));
|
||||
JoinRestriction *joinRestriction = palloc0(sizeof(JoinRestriction));
|
||||
joinRestriction->joinType = jointype;
|
||||
joinRestriction->joinRestrictInfoList = restrictInfoList;
|
||||
joinRestriction->plannerInfo = root;
|
||||
|
@ -1424,14 +1400,7 @@ void
|
|||
multi_relation_restriction_hook(PlannerInfo *root, RelOptInfo *relOptInfo,
|
||||
Index restrictionIndex, RangeTblEntry *rte)
|
||||
{
|
||||
PlannerRestrictionContext *plannerRestrictionContext = NULL;
|
||||
RelationRestrictionContext *relationRestrictionContext = NULL;
|
||||
MemoryContext restrictionsMemoryContext = NULL;
|
||||
MemoryContext oldMemoryContext = NULL;
|
||||
RelationRestriction *relationRestriction = NULL;
|
||||
DistTableCacheEntry *cacheEntry = NULL;
|
||||
bool distributedTable = false;
|
||||
bool localTable = false;
|
||||
|
||||
AdjustReadIntermediateResultCost(rte, relOptInfo);
|
||||
|
||||
|
@ -1444,14 +1413,15 @@ multi_relation_restriction_hook(PlannerInfo *root, RelOptInfo *relOptInfo,
|
|||
* Use a memory context that's guaranteed to live long enough, could be
|
||||
* called in a more shorted lived one (e.g. with GEQO).
|
||||
*/
|
||||
plannerRestrictionContext = CurrentPlannerRestrictionContext();
|
||||
restrictionsMemoryContext = plannerRestrictionContext->memoryContext;
|
||||
oldMemoryContext = MemoryContextSwitchTo(restrictionsMemoryContext);
|
||||
PlannerRestrictionContext *plannerRestrictionContext =
|
||||
CurrentPlannerRestrictionContext();
|
||||
MemoryContext restrictionsMemoryContext = plannerRestrictionContext->memoryContext;
|
||||
MemoryContext oldMemoryContext = MemoryContextSwitchTo(restrictionsMemoryContext);
|
||||
|
||||
distributedTable = IsDistributedTable(rte->relid);
|
||||
localTable = !distributedTable;
|
||||
bool distributedTable = IsDistributedTable(rte->relid);
|
||||
bool localTable = !distributedTable;
|
||||
|
||||
relationRestriction = palloc0(sizeof(RelationRestriction));
|
||||
RelationRestriction *relationRestriction = palloc0(sizeof(RelationRestriction));
|
||||
relationRestriction->index = restrictionIndex;
|
||||
relationRestriction->relationId = rte->relid;
|
||||
relationRestriction->rte = rte;
|
||||
|
@ -1463,7 +1433,8 @@ multi_relation_restriction_hook(PlannerInfo *root, RelOptInfo *relOptInfo,
|
|||
/* see comments on GetVarFromAssignedParam() */
|
||||
relationRestriction->outerPlanParamsList = OuterPlanParamsList(root);
|
||||
|
||||
relationRestrictionContext = plannerRestrictionContext->relationRestrictionContext;
|
||||
RelationRestrictionContext *relationRestrictionContext =
|
||||
plannerRestrictionContext->relationRestrictionContext;
|
||||
relationRestrictionContext->hasDistributedRelation |= distributedTable;
|
||||
relationRestrictionContext->hasLocalRelation |= localTable;
|
||||
|
||||
|
@ -1644,9 +1615,8 @@ static List *
|
|||
OuterPlanParamsList(PlannerInfo *root)
|
||||
{
|
||||
List *planParamsList = NIL;
|
||||
PlannerInfo *outerNodeRoot = NULL;
|
||||
|
||||
for (outerNodeRoot = root->parent_root; outerNodeRoot != NULL;
|
||||
for (PlannerInfo *outerNodeRoot = root->parent_root; outerNodeRoot != NULL;
|
||||
outerNodeRoot = outerNodeRoot->parent_root)
|
||||
{
|
||||
RootPlanParams *rootPlanParams = palloc0(sizeof(RootPlanParams));
|
||||
|
@ -1729,11 +1699,9 @@ CreateAndPushPlannerRestrictionContext(void)
|
|||
static PlannerRestrictionContext *
|
||||
CurrentPlannerRestrictionContext(void)
|
||||
{
|
||||
PlannerRestrictionContext *plannerRestrictionContext = NULL;
|
||||
|
||||
Assert(plannerRestrictionContextList != NIL);
|
||||
|
||||
plannerRestrictionContext =
|
||||
PlannerRestrictionContext *plannerRestrictionContext =
|
||||
(PlannerRestrictionContext *) linitial(plannerRestrictionContextList);
|
||||
|
||||
if (plannerRestrictionContext == NULL)
|
||||
|
@ -1804,7 +1772,6 @@ HasUnresolvedExternParamsWalker(Node *expression, ParamListInfo boundParams)
|
|||
if (boundParams && paramId > 0 && paramId <= boundParams->numParams)
|
||||
{
|
||||
ParamExternData *externParam = NULL;
|
||||
Oid paramType = InvalidOid;
|
||||
|
||||
/* give hook a chance in case parameter is dynamic */
|
||||
if (boundParams->paramFetch != NULL)
|
||||
|
@ -1818,7 +1785,7 @@ HasUnresolvedExternParamsWalker(Node *expression, ParamListInfo boundParams)
|
|||
externParam = &boundParams->params[paramId - 1];
|
||||
}
|
||||
|
||||
paramType = externParam->ptype;
|
||||
Oid paramType = externParam->ptype;
|
||||
if (OidIsValid(paramType))
|
||||
{
|
||||
return false;
|
||||
|
@ -1890,7 +1857,6 @@ IsLocalReferenceTableJoin(Query *parse, List *rangeTableList)
|
|||
foreach(rangeTableCell, rangeTableList)
|
||||
{
|
||||
RangeTblEntry *rangeTableEntry = (RangeTblEntry *) lfirst(rangeTableCell);
|
||||
DistTableCacheEntry *cacheEntry = NULL;
|
||||
|
||||
if (rangeTableEntry->rtekind == RTE_FUNCTION)
|
||||
{
|
||||
|
@ -1909,7 +1875,8 @@ IsLocalReferenceTableJoin(Query *parse, List *rangeTableList)
|
|||
continue;
|
||||
}
|
||||
|
||||
cacheEntry = DistributedTableCacheEntry(rangeTableEntry->relid);
|
||||
DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(
|
||||
rangeTableEntry->relid);
|
||||
if (cacheEntry->partitionMethod == DISTRIBUTE_BY_NONE)
|
||||
{
|
||||
hasReferenceTable = true;
|
||||
|
@ -1931,14 +1898,12 @@ IsLocalReferenceTableJoin(Query *parse, List *rangeTableList)
|
|||
static bool
|
||||
QueryIsNotSimpleSelect(Node *node)
|
||||
{
|
||||
Query *query = NULL;
|
||||
|
||||
if (!IsA(node, Query))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
query = (Query *) node;
|
||||
Query *query = (Query *) node;
|
||||
return (query->commandType != CMD_SELECT) || (query->rowMarks != NIL);
|
||||
}
|
||||
|
||||
|
@ -1950,14 +1915,6 @@ QueryIsNotSimpleSelect(Node *node)
|
|||
static bool
|
||||
UpdateReferenceTablesWithShard(Node *node, void *context)
|
||||
{
|
||||
RangeTblEntry *newRte = NULL;
|
||||
uint64 shardId = INVALID_SHARD_ID;
|
||||
Oid relationId = InvalidOid;
|
||||
Oid schemaId = InvalidOid;
|
||||
char *relationName = NULL;
|
||||
DistTableCacheEntry *cacheEntry = NULL;
|
||||
ShardInterval *shardInterval = NULL;
|
||||
|
||||
if (node == NULL)
|
||||
{
|
||||
return false;
|
||||
|
@ -1976,32 +1933,32 @@ UpdateReferenceTablesWithShard(Node *node, void *context)
|
|||
NULL);
|
||||
}
|
||||
|
||||
newRte = (RangeTblEntry *) node;
|
||||
RangeTblEntry *newRte = (RangeTblEntry *) node;
|
||||
|
||||
if (newRte->rtekind != RTE_RELATION)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
relationId = newRte->relid;
|
||||
Oid relationId = newRte->relid;
|
||||
if (!IsDistributedTable(relationId))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
cacheEntry = DistributedTableCacheEntry(relationId);
|
||||
DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(relationId);
|
||||
if (cacheEntry->partitionMethod != DISTRIBUTE_BY_NONE)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
shardInterval = cacheEntry->sortedShardIntervalArray[0];
|
||||
shardId = shardInterval->shardId;
|
||||
ShardInterval *shardInterval = cacheEntry->sortedShardIntervalArray[0];
|
||||
uint64 shardId = shardInterval->shardId;
|
||||
|
||||
relationName = get_rel_name(relationId);
|
||||
char *relationName = get_rel_name(relationId);
|
||||
AppendShardIdToName(&relationName, shardId);
|
||||
|
||||
schemaId = get_rel_namespace(relationId);
|
||||
Oid schemaId = get_rel_namespace(relationId);
|
||||
newRte->relid = get_relname_relid(relationName, schemaId);
|
||||
|
||||
/*
|
||||
|
|
|
@ -45,28 +45,21 @@ ExtendedOpNodeProperties
|
|||
BuildExtendedOpNodeProperties(MultiExtendedOp *extendedOpNode)
|
||||
{
|
||||
ExtendedOpNodeProperties extendedOpNodeProperties;
|
||||
List *tableNodeList = NIL;
|
||||
List *targetList = NIL;
|
||||
Node *havingQual = NULL;
|
||||
|
||||
bool groupedByDisjointPartitionColumn = false;
|
||||
bool repartitionSubquery = false;
|
||||
bool hasNonPartitionColumnDistinctAgg = false;
|
||||
bool pullDistinctColumns = false;
|
||||
bool pushDownWindowFunctions = false;
|
||||
|
||||
tableNodeList = FindNodesOfType((MultiNode *) extendedOpNode, T_MultiTable);
|
||||
groupedByDisjointPartitionColumn = GroupedByDisjointPartitionColumn(tableNodeList,
|
||||
extendedOpNode);
|
||||
List *tableNodeList = FindNodesOfType((MultiNode *) extendedOpNode, T_MultiTable);
|
||||
bool groupedByDisjointPartitionColumn = GroupedByDisjointPartitionColumn(
|
||||
tableNodeList,
|
||||
extendedOpNode);
|
||||
|
||||
repartitionSubquery = ExtendedOpNodeContainsRepartitionSubquery(extendedOpNode);
|
||||
bool repartitionSubquery = ExtendedOpNodeContainsRepartitionSubquery(extendedOpNode);
|
||||
|
||||
targetList = extendedOpNode->targetList;
|
||||
havingQual = extendedOpNode->havingQual;
|
||||
hasNonPartitionColumnDistinctAgg =
|
||||
List *targetList = extendedOpNode->targetList;
|
||||
Node *havingQual = extendedOpNode->havingQual;
|
||||
bool hasNonPartitionColumnDistinctAgg =
|
||||
HasNonPartitionColumnDistinctAgg(targetList, havingQual, tableNodeList);
|
||||
|
||||
pullDistinctColumns =
|
||||
bool pullDistinctColumns =
|
||||
ShouldPullDistinctColumn(repartitionSubquery, groupedByDisjointPartitionColumn,
|
||||
hasNonPartitionColumnDistinctAgg);
|
||||
|
||||
|
@ -75,7 +68,7 @@ BuildExtendedOpNodeProperties(MultiExtendedOp *extendedOpNode)
|
|||
* using hasWindowFuncs is safe for now. However, this should be fixed
|
||||
* when we support pull-to-master window functions.
|
||||
*/
|
||||
pushDownWindowFunctions = extendedOpNode->hasWindowFuncs;
|
||||
bool pushDownWindowFunctions = extendedOpNode->hasWindowFuncs;
|
||||
|
||||
extendedOpNodeProperties.groupedByDisjointPartitionColumn =
|
||||
groupedByDisjointPartitionColumn;
|
||||
|
@ -103,14 +96,13 @@ GroupedByDisjointPartitionColumn(List *tableNodeList, MultiExtendedOp *opNode)
|
|||
{
|
||||
MultiTable *tableNode = (MultiTable *) lfirst(tableNodeCell);
|
||||
Oid relationId = tableNode->relationId;
|
||||
char partitionMethod = 0;
|
||||
|
||||
if (relationId == SUBQUERY_RELATION_ID || !IsDistributedTable(relationId))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
partitionMethod = PartitionMethod(relationId);
|
||||
char partitionMethod = PartitionMethod(relationId);
|
||||
if (partitionMethod != DISTRIBUTE_BY_RANGE &&
|
||||
partitionMethod != DISTRIBUTE_BY_HASH)
|
||||
{
|
||||
|
@ -173,12 +165,8 @@ HasNonPartitionColumnDistinctAgg(List *targetEntryList, Node *havingQual,
|
|||
foreach(aggregateCheckCell, aggregateCheckList)
|
||||
{
|
||||
Node *targetNode = lfirst(aggregateCheckCell);
|
||||
Aggref *targetAgg = NULL;
|
||||
List *varList = NIL;
|
||||
ListCell *varCell = NULL;
|
||||
bool isPartitionColumn = false;
|
||||
TargetEntry *firstTargetEntry = NULL;
|
||||
Node *firstTargetExprNode = NULL;
|
||||
|
||||
if (IsA(targetNode, Var))
|
||||
{
|
||||
|
@ -186,7 +174,7 @@ HasNonPartitionColumnDistinctAgg(List *targetEntryList, Node *havingQual,
|
|||
}
|
||||
|
||||
Assert(IsA(targetNode, Aggref));
|
||||
targetAgg = (Aggref *) targetNode;
|
||||
Aggref *targetAgg = (Aggref *) targetNode;
|
||||
if (targetAgg->aggdistinct == NIL)
|
||||
{
|
||||
continue;
|
||||
|
@ -201,14 +189,15 @@ HasNonPartitionColumnDistinctAgg(List *targetEntryList, Node *havingQual,
|
|||
return true;
|
||||
}
|
||||
|
||||
firstTargetEntry = linitial_node(TargetEntry, targetAgg->args);
|
||||
firstTargetExprNode = strip_implicit_coercions((Node *) firstTargetEntry->expr);
|
||||
TargetEntry *firstTargetEntry = linitial_node(TargetEntry, targetAgg->args);
|
||||
Node *firstTargetExprNode = strip_implicit_coercions(
|
||||
(Node *) firstTargetEntry->expr);
|
||||
if (!IsA(firstTargetExprNode, Var))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
varList = pull_var_clause_default((Node *) targetAgg->args);
|
||||
List *varList = pull_var_clause_default((Node *) targetAgg->args);
|
||||
foreach(varCell, varList)
|
||||
{
|
||||
Node *targetVar = (Node *) lfirst(varCell);
|
||||
|
|
|
@ -71,8 +71,6 @@ static bool DistKeyInSimpleOpExpression(Expr *clause, Var *distColumn);
|
|||
PlannedStmt *
|
||||
FastPathPlanner(Query *originalQuery, Query *parse, ParamListInfo boundParams)
|
||||
{
|
||||
PlannedStmt *result = NULL;
|
||||
|
||||
/*
|
||||
* To support prepared statements for fast-path queries, we resolve the
|
||||
* external parameters at this point. Note that this is normally done by
|
||||
|
@ -98,7 +96,7 @@ FastPathPlanner(Query *originalQuery, Query *parse, ParamListInfo boundParams)
|
|||
(Node *) eval_const_expressions(NULL, (Node *) parse->jointree->quals);
|
||||
|
||||
|
||||
result = GeneratePlaceHolderPlannedStmt(originalQuery);
|
||||
PlannedStmt *result = GeneratePlaceHolderPlannedStmt(originalQuery);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -122,7 +120,6 @@ GeneratePlaceHolderPlannedStmt(Query *parse)
|
|||
PlannedStmt *result = makeNode(PlannedStmt);
|
||||
SeqScan *seqScanNode = makeNode(SeqScan);
|
||||
Plan *plan = &seqScanNode->plan;
|
||||
Oid relationId = InvalidOid;
|
||||
|
||||
AssertArg(FastPathRouterQuery(parse));
|
||||
|
||||
|
@ -143,7 +140,7 @@ GeneratePlaceHolderPlannedStmt(Query *parse)
|
|||
result->rtable = copyObject(parse->rtable);
|
||||
result->planTree = (Plan *) plan;
|
||||
|
||||
relationId = ExtractFirstDistributedTableId(parse);
|
||||
Oid relationId = ExtractFirstDistributedTableId(parse);
|
||||
result->relationOids = list_make1_oid(relationId);
|
||||
|
||||
return result;
|
||||
|
@ -166,12 +163,8 @@ GeneratePlaceHolderPlannedStmt(Query *parse)
|
|||
bool
|
||||
FastPathRouterQuery(Query *query)
|
||||
{
|
||||
RangeTblEntry *rangeTableEntry = NULL;
|
||||
FromExpr *joinTree = query->jointree;
|
||||
Node *quals = NULL;
|
||||
Oid distributedTableId = InvalidOid;
|
||||
Var *distributionKey = NULL;
|
||||
DistTableCacheEntry *cacheEntry = NULL;
|
||||
|
||||
if (!EnableFastPathRouterPlanner)
|
||||
{
|
||||
|
@ -201,15 +194,15 @@ FastPathRouterQuery(Query *query)
|
|||
return false;
|
||||
}
|
||||
|
||||
rangeTableEntry = (RangeTblEntry *) linitial(query->rtable);
|
||||
RangeTblEntry *rangeTableEntry = (RangeTblEntry *) linitial(query->rtable);
|
||||
if (rangeTableEntry->rtekind != RTE_RELATION)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
/* we don't want to deal with append/range distributed tables */
|
||||
distributedTableId = rangeTableEntry->relid;
|
||||
cacheEntry = DistributedTableCacheEntry(distributedTableId);
|
||||
Oid distributedTableId = rangeTableEntry->relid;
|
||||
DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(distributedTableId);
|
||||
if (!(cacheEntry->partitionMethod == DISTRIBUTE_BY_HASH ||
|
||||
cacheEntry->partitionMethod == DISTRIBUTE_BY_NONE))
|
||||
{
|
||||
|
@ -224,7 +217,7 @@ FastPathRouterQuery(Query *query)
|
|||
}
|
||||
|
||||
/* if that's a reference table, we don't need to check anything further */
|
||||
distributionKey = PartitionColumn(distributedTableId, 1);
|
||||
Var *distributionKey = PartitionColumn(distributedTableId, 1);
|
||||
if (!distributionKey)
|
||||
{
|
||||
return true;
|
||||
|
@ -269,11 +262,10 @@ static bool
|
|||
ColumnAppearsMultipleTimes(Node *quals, Var *distributionKey)
|
||||
{
|
||||
ListCell *varClauseCell = NULL;
|
||||
List *varClauseList = NIL;
|
||||
int partitionColumnReferenceCount = 0;
|
||||
|
||||
/* make sure partition column is used only once in the quals */
|
||||
varClauseList = pull_var_clause_default(quals);
|
||||
List *varClauseList = pull_var_clause_default(quals);
|
||||
foreach(varClauseCell, varClauseList)
|
||||
{
|
||||
Var *column = (Var *) lfirst(varClauseCell);
|
||||
|
|
|
@ -98,7 +98,6 @@ contain_param_walker(Node *node, void *context)
|
|||
DistributedPlan *
|
||||
TryToDelegateFunctionCall(Query *query, bool *hasExternParam)
|
||||
{
|
||||
FromExpr *joinTree = NULL;
|
||||
List *targetList = NIL;
|
||||
TargetEntry *targetEntry = NULL;
|
||||
FuncExpr *funcExpr = NULL;
|
||||
|
@ -116,7 +115,6 @@ TryToDelegateFunctionCall(Query *query, bool *hasExternParam)
|
|||
Task *task = NULL;
|
||||
Job *job = NULL;
|
||||
DistributedPlan *distributedPlan = NULL;
|
||||
int32 groupId = 0;
|
||||
struct ParamWalkerContext walkerParamContext = { 0 };
|
||||
|
||||
/* set hasExternParam now in case of early exit */
|
||||
|
@ -128,7 +126,7 @@ TryToDelegateFunctionCall(Query *query, bool *hasExternParam)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
groupId = GetLocalGroupId();
|
||||
int32 groupId = GetLocalGroupId();
|
||||
if (groupId != 0 || groupId == GROUP_ID_UPGRADING)
|
||||
{
|
||||
/* do not delegate from workers, or while upgrading */
|
||||
|
@ -147,7 +145,7 @@ TryToDelegateFunctionCall(Query *query, bool *hasExternParam)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
joinTree = query->jointree;
|
||||
FromExpr *joinTree = query->jointree;
|
||||
if (joinTree == NULL)
|
||||
{
|
||||
/* no join tree (mostly here to be defensive) */
|
||||
|
|
|
@ -136,9 +136,6 @@ static bool
|
|||
CheckInsertSelectQuery(Query *query)
|
||||
{
|
||||
CmdType commandType = query->commandType;
|
||||
List *fromList = NULL;
|
||||
RangeTblRef *rangeTableReference = NULL;
|
||||
RangeTblEntry *subqueryRte = NULL;
|
||||
|
||||
if (commandType != CMD_INSERT)
|
||||
{
|
||||
|
@ -150,19 +147,19 @@ CheckInsertSelectQuery(Query *query)
|
|||
return false;
|
||||
}
|
||||
|
||||
fromList = query->jointree->fromlist;
|
||||
List *fromList = query->jointree->fromlist;
|
||||
if (list_length(fromList) != 1)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
rangeTableReference = linitial(fromList);
|
||||
RangeTblRef *rangeTableReference = linitial(fromList);
|
||||
if (!IsA(rangeTableReference, RangeTblRef))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
subqueryRte = rt_fetch(rangeTableReference->rtindex, query->rtable);
|
||||
RangeTblEntry *subqueryRte = rt_fetch(rangeTableReference->rtindex, query->rtable);
|
||||
if (subqueryRte->rtekind != RTE_SUBQUERY)
|
||||
{
|
||||
return false;
|
||||
|
@ -185,18 +182,15 @@ DistributedPlan *
|
|||
CreateInsertSelectPlan(uint64 planId, Query *originalQuery,
|
||||
PlannerRestrictionContext *plannerRestrictionContext)
|
||||
{
|
||||
DistributedPlan *distributedPlan = NULL;
|
||||
DeferredErrorMessage *deferredError = NULL;
|
||||
|
||||
deferredError = ErrorIfOnConflictNotSupported(originalQuery);
|
||||
DeferredErrorMessage *deferredError = ErrorIfOnConflictNotSupported(originalQuery);
|
||||
if (deferredError != NULL)
|
||||
{
|
||||
/* raising the error as there is no possible solution for the unsupported on conflict statements */
|
||||
RaiseDeferredError(deferredError, ERROR);
|
||||
}
|
||||
|
||||
distributedPlan = CreateDistributedInsertSelectPlan(originalQuery,
|
||||
plannerRestrictionContext);
|
||||
DistributedPlan *distributedPlan = CreateDistributedInsertSelectPlan(originalQuery,
|
||||
plannerRestrictionContext);
|
||||
|
||||
if (distributedPlan->planningError != NULL)
|
||||
{
|
||||
|
@ -220,10 +214,8 @@ static DistributedPlan *
|
|||
CreateDistributedInsertSelectPlan(Query *originalQuery,
|
||||
PlannerRestrictionContext *plannerRestrictionContext)
|
||||
{
|
||||
int shardOffset = 0;
|
||||
List *sqlTaskList = NIL;
|
||||
uint32 taskIdIndex = 1; /* 0 is reserved for invalid taskId */
|
||||
Job *workerJob = NULL;
|
||||
uint64 jobId = INVALID_JOB_ID;
|
||||
DistributedPlan *distributedPlan = CitusMakeNode(DistributedPlan);
|
||||
RangeTblEntry *insertRte = ExtractResultRelationRTE(originalQuery);
|
||||
|
@ -234,7 +226,6 @@ CreateDistributedInsertSelectPlan(Query *originalQuery,
|
|||
RelationRestrictionContext *relationRestrictionContext =
|
||||
plannerRestrictionContext->relationRestrictionContext;
|
||||
bool allReferenceTables = relationRestrictionContext->allReferenceTables;
|
||||
bool allDistributionKeysInQueryAreEqual = false;
|
||||
|
||||
distributedPlan->modLevel = RowModifyLevelForQuery(originalQuery);
|
||||
|
||||
|
@ -251,7 +242,7 @@ CreateDistributedInsertSelectPlan(Query *originalQuery,
|
|||
return distributedPlan;
|
||||
}
|
||||
|
||||
allDistributionKeysInQueryAreEqual =
|
||||
bool allDistributionKeysInQueryAreEqual =
|
||||
AllDistributionKeysInQueryAreEqual(originalQuery, plannerRestrictionContext);
|
||||
|
||||
/*
|
||||
|
@ -263,16 +254,16 @@ CreateDistributedInsertSelectPlan(Query *originalQuery,
|
|||
* the current shard boundaries. Finally, perform the normal shard pruning to
|
||||
* decide on whether to push the query to the current shard or not.
|
||||
*/
|
||||
for (shardOffset = 0; shardOffset < shardCount; shardOffset++)
|
||||
for (int shardOffset = 0; shardOffset < shardCount; shardOffset++)
|
||||
{
|
||||
ShardInterval *targetShardInterval =
|
||||
targetCacheEntry->sortedShardIntervalArray[shardOffset];
|
||||
Task *modifyTask = NULL;
|
||||
|
||||
modifyTask = RouterModifyTaskForShardInterval(originalQuery, targetShardInterval,
|
||||
plannerRestrictionContext,
|
||||
taskIdIndex,
|
||||
allDistributionKeysInQueryAreEqual);
|
||||
Task *modifyTask = RouterModifyTaskForShardInterval(originalQuery,
|
||||
targetShardInterval,
|
||||
plannerRestrictionContext,
|
||||
taskIdIndex,
|
||||
allDistributionKeysInQueryAreEqual);
|
||||
|
||||
/* Planning error gelmisse return et, ustteki fonksiyona */
|
||||
/* distributed plan gecir */
|
||||
|
@ -289,7 +280,7 @@ CreateDistributedInsertSelectPlan(Query *originalQuery,
|
|||
}
|
||||
|
||||
/* Create the worker job */
|
||||
workerJob = CitusMakeNode(Job);
|
||||
Job *workerJob = CitusMakeNode(Job);
|
||||
workerJob->taskList = sqlTaskList;
|
||||
workerJob->subqueryPushdown = false;
|
||||
workerJob->dependedJobList = NIL;
|
||||
|
@ -321,17 +312,15 @@ static DeferredErrorMessage *
|
|||
DistributedInsertSelectSupported(Query *queryTree, RangeTblEntry *insertRte,
|
||||
RangeTblEntry *subqueryRte, bool allReferenceTables)
|
||||
{
|
||||
Query *subquery = NULL;
|
||||
Oid selectPartitionColumnTableId = InvalidOid;
|
||||
Oid targetRelationId = insertRte->relid;
|
||||
char targetPartitionMethod = PartitionMethod(targetRelationId);
|
||||
ListCell *rangeTableCell = NULL;
|
||||
DeferredErrorMessage *error = NULL;
|
||||
|
||||
/* we only do this check for INSERT ... SELECT queries */
|
||||
AssertArg(InsertSelectIntoDistributedTable(queryTree));
|
||||
|
||||
subquery = subqueryRte->subquery;
|
||||
Query *subquery = subqueryRte->subquery;
|
||||
|
||||
if (!NeedsDistributedPlanning(subquery))
|
||||
{
|
||||
|
@ -363,7 +352,7 @@ DistributedInsertSelectSupported(Query *queryTree, RangeTblEntry *insertRte,
|
|||
}
|
||||
|
||||
/* we don't support LIMIT, OFFSET and WINDOW functions */
|
||||
error = MultiTaskRouterSelectQuerySupported(subquery);
|
||||
DeferredErrorMessage *error = MultiTaskRouterSelectQuerySupported(subquery);
|
||||
if (error)
|
||||
{
|
||||
return error;
|
||||
|
@ -442,20 +431,15 @@ RouterModifyTaskForShardInterval(Query *originalQuery, ShardInterval *shardInter
|
|||
|
||||
StringInfo queryString = makeStringInfo();
|
||||
ListCell *restrictionCell = NULL;
|
||||
Task *modifyTask = NULL;
|
||||
List *selectPlacementList = NIL;
|
||||
uint64 selectAnchorShardId = INVALID_SHARD_ID;
|
||||
List *relationShardList = NIL;
|
||||
List *prunedShardIntervalListList = NIL;
|
||||
uint64 jobId = INVALID_JOB_ID;
|
||||
List *insertShardPlacementList = NULL;
|
||||
List *intersectedPlacementList = NULL;
|
||||
bool replacePrunedQueryWithDummy = false;
|
||||
bool allReferenceTables =
|
||||
plannerRestrictionContext->relationRestrictionContext->allReferenceTables;
|
||||
List *shardOpExpressions = NIL;
|
||||
RestrictInfo *shardRestrictionList = NULL;
|
||||
DeferredErrorMessage *planningError = NULL;
|
||||
bool multiShardModifyQuery = false;
|
||||
List *relationRestrictionList = NIL;
|
||||
|
||||
|
@ -517,18 +501,21 @@ RouterModifyTaskForShardInterval(Query *originalQuery, ShardInterval *shardInter
|
|||
}
|
||||
|
||||
/* mark that we don't want the router planner to generate dummy hosts/queries */
|
||||
replacePrunedQueryWithDummy = false;
|
||||
bool replacePrunedQueryWithDummy = false;
|
||||
|
||||
/*
|
||||
* Use router planner to decide on whether we can push down the query or not.
|
||||
* If we can, we also rely on the side-effects that all RTEs have been updated
|
||||
* to point to the relevant nodes and selectPlacementList is determined.
|
||||
*/
|
||||
planningError = PlanRouterQuery(copiedSubquery, copyOfPlannerRestrictionContext,
|
||||
&selectPlacementList, &selectAnchorShardId,
|
||||
&relationShardList, &prunedShardIntervalListList,
|
||||
replacePrunedQueryWithDummy,
|
||||
&multiShardModifyQuery, NULL);
|
||||
DeferredErrorMessage *planningError = PlanRouterQuery(copiedSubquery,
|
||||
copyOfPlannerRestrictionContext,
|
||||
&selectPlacementList,
|
||||
&selectAnchorShardId,
|
||||
&relationShardList,
|
||||
&prunedShardIntervalListList,
|
||||
replacePrunedQueryWithDummy,
|
||||
&multiShardModifyQuery, NULL);
|
||||
|
||||
Assert(!multiShardModifyQuery);
|
||||
|
||||
|
@ -552,9 +539,9 @@ RouterModifyTaskForShardInterval(Query *originalQuery, ShardInterval *shardInter
|
|||
}
|
||||
|
||||
/* get the placements for insert target shard and its intersection with select */
|
||||
insertShardPlacementList = FinalizedShardPlacementList(shardId);
|
||||
intersectedPlacementList = IntersectPlacementList(insertShardPlacementList,
|
||||
selectPlacementList);
|
||||
List *insertShardPlacementList = FinalizedShardPlacementList(shardId);
|
||||
List *intersectedPlacementList = IntersectPlacementList(insertShardPlacementList,
|
||||
selectPlacementList);
|
||||
|
||||
/*
|
||||
* If insert target does not have exactly the same placements with the select,
|
||||
|
@ -586,7 +573,8 @@ RouterModifyTaskForShardInterval(Query *originalQuery, ShardInterval *shardInter
|
|||
ereport(DEBUG2, (errmsg("distributed statement: %s",
|
||||
ApplyLogRedaction(queryString->data))));
|
||||
|
||||
modifyTask = CreateBasicTask(jobId, taskIdIndex, MODIFY_TASK, queryString->data);
|
||||
Task *modifyTask = CreateBasicTask(jobId, taskIdIndex, MODIFY_TASK,
|
||||
queryString->data);
|
||||
modifyTask->dependedTaskList = NULL;
|
||||
modifyTask->anchorShardId = shardId;
|
||||
modifyTask->taskPlacementList = insertShardPlacementList;
|
||||
|
@ -612,21 +600,18 @@ Query *
|
|||
ReorderInsertSelectTargetLists(Query *originalQuery, RangeTblEntry *insertRte,
|
||||
RangeTblEntry *subqueryRte)
|
||||
{
|
||||
Query *subquery = NULL;
|
||||
ListCell *insertTargetEntryCell;
|
||||
List *newSubqueryTargetlist = NIL;
|
||||
List *newInsertTargetlist = NIL;
|
||||
int resno = 1;
|
||||
Index insertTableId = 1;
|
||||
Oid insertRelationId = InvalidOid;
|
||||
int subqueryTargetLength = 0;
|
||||
int targetEntryIndex = 0;
|
||||
|
||||
AssertArg(InsertSelectIntoDistributedTable(originalQuery));
|
||||
|
||||
subquery = subqueryRte->subquery;
|
||||
Query *subquery = subqueryRte->subquery;
|
||||
|
||||
insertRelationId = insertRte->relid;
|
||||
Oid insertRelationId = insertRte->relid;
|
||||
|
||||
/*
|
||||
* We implement the following algorithm for the reoderding:
|
||||
|
@ -642,11 +627,7 @@ ReorderInsertSelectTargetLists(Query *originalQuery, RangeTblEntry *insertRte,
|
|||
foreach(insertTargetEntryCell, originalQuery->targetList)
|
||||
{
|
||||
TargetEntry *oldInsertTargetEntry = lfirst(insertTargetEntryCell);
|
||||
TargetEntry *newInsertTargetEntry = NULL;
|
||||
Var *newInsertVar = NULL;
|
||||
TargetEntry *newSubqueryTargetEntry = NULL;
|
||||
List *targetVarList = NULL;
|
||||
int targetVarCount = 0;
|
||||
AttrNumber originalAttrNo = get_attnum(insertRelationId,
|
||||
oldInsertTargetEntry->resname);
|
||||
|
||||
|
@ -665,10 +646,10 @@ ReorderInsertSelectTargetLists(Query *originalQuery, RangeTblEntry *insertRte,
|
|||
* It is safe to pull Var clause and ignore the coercions since that
|
||||
* are already going to be added on the workers implicitly.
|
||||
*/
|
||||
targetVarList = pull_var_clause((Node *) oldInsertTargetEntry->expr,
|
||||
PVC_RECURSE_AGGREGATES);
|
||||
List *targetVarList = pull_var_clause((Node *) oldInsertTargetEntry->expr,
|
||||
PVC_RECURSE_AGGREGATES);
|
||||
|
||||
targetVarCount = list_length(targetVarList);
|
||||
int targetVarCount = list_length(targetVarList);
|
||||
|
||||
/* a single INSERT target entry cannot have more than one Var */
|
||||
Assert(targetVarCount <= 1);
|
||||
|
@ -702,14 +683,15 @@ ReorderInsertSelectTargetLists(Query *originalQuery, RangeTblEntry *insertRte,
|
|||
*/
|
||||
Assert(!newSubqueryTargetEntry->resjunk);
|
||||
|
||||
newInsertVar = makeVar(insertTableId, originalAttrNo,
|
||||
exprType((Node *) newSubqueryTargetEntry->expr),
|
||||
exprTypmod((Node *) newSubqueryTargetEntry->expr),
|
||||
exprCollation((Node *) newSubqueryTargetEntry->expr),
|
||||
0);
|
||||
newInsertTargetEntry = makeTargetEntry((Expr *) newInsertVar, originalAttrNo,
|
||||
oldInsertTargetEntry->resname,
|
||||
oldInsertTargetEntry->resjunk);
|
||||
Var *newInsertVar = makeVar(insertTableId, originalAttrNo,
|
||||
exprType((Node *) newSubqueryTargetEntry->expr),
|
||||
exprTypmod((Node *) newSubqueryTargetEntry->expr),
|
||||
exprCollation((Node *) newSubqueryTargetEntry->expr),
|
||||
0);
|
||||
TargetEntry *newInsertTargetEntry = makeTargetEntry((Expr *) newInsertVar,
|
||||
originalAttrNo,
|
||||
oldInsertTargetEntry->resname,
|
||||
oldInsertTargetEntry->resjunk);
|
||||
|
||||
newInsertTargetlist = lappend(newInsertTargetlist, newInsertTargetEntry);
|
||||
resno++;
|
||||
|
@ -719,12 +701,11 @@ ReorderInsertSelectTargetLists(Query *originalQuery, RangeTblEntry *insertRte,
|
|||
* if there are any remaining target list entries (i.e., GROUP BY column not on the
|
||||
* target list of subquery), update the remaining resnos.
|
||||
*/
|
||||
subqueryTargetLength = list_length(subquery->targetList);
|
||||
int subqueryTargetLength = list_length(subquery->targetList);
|
||||
for (; targetEntryIndex < subqueryTargetLength; ++targetEntryIndex)
|
||||
{
|
||||
TargetEntry *oldSubqueryTle = list_nth(subquery->targetList,
|
||||
targetEntryIndex);
|
||||
TargetEntry *newSubqueryTargetEntry = NULL;
|
||||
|
||||
/*
|
||||
* Skip non-junk entries since we've already processed them above and this
|
||||
|
@ -735,7 +716,7 @@ ReorderInsertSelectTargetLists(Query *originalQuery, RangeTblEntry *insertRte,
|
|||
continue;
|
||||
}
|
||||
|
||||
newSubqueryTargetEntry = copyObject(oldSubqueryTle);
|
||||
TargetEntry *newSubqueryTargetEntry = copyObject(oldSubqueryTle);
|
||||
|
||||
newSubqueryTargetEntry->resno = resno;
|
||||
newSubqueryTargetlist = lappend(newSubqueryTargetlist,
|
||||
|
@ -920,13 +901,8 @@ InsertPartitionColumnMatchesSelect(Query *query, RangeTblEntry *insertRte,
|
|||
{
|
||||
TargetEntry *targetEntry = (TargetEntry *) lfirst(targetEntryCell);
|
||||
List *insertTargetEntryColumnList = pull_var_clause_default((Node *) targetEntry);
|
||||
Var *insertVar = NULL;
|
||||
AttrNumber originalAttrNo = InvalidAttrNumber;
|
||||
TargetEntry *subqueryTargetEntry = NULL;
|
||||
Expr *selectTargetExpr = NULL;
|
||||
Oid subqueryPartitionColumnRelationId = InvalidOid;
|
||||
Var *subqueryPartitionColumn = NULL;
|
||||
List *parentQueryList = NIL;
|
||||
|
||||
/*
|
||||
* We only consider target entries that include a single column. Note that this
|
||||
|
@ -941,8 +917,8 @@ InsertPartitionColumnMatchesSelect(Query *query, RangeTblEntry *insertRte,
|
|||
continue;
|
||||
}
|
||||
|
||||
insertVar = (Var *) linitial(insertTargetEntryColumnList);
|
||||
originalAttrNo = targetEntry->resno;
|
||||
Var *insertVar = (Var *) linitial(insertTargetEntryColumnList);
|
||||
AttrNumber originalAttrNo = targetEntry->resno;
|
||||
|
||||
/* skip processing of target table non-partition columns */
|
||||
if (originalAttrNo != insertPartitionColumn->varattno)
|
||||
|
@ -953,11 +929,11 @@ InsertPartitionColumnMatchesSelect(Query *query, RangeTblEntry *insertRte,
|
|||
/* INSERT query includes the partition column */
|
||||
targetTableHasPartitionColumn = true;
|
||||
|
||||
subqueryTargetEntry = list_nth(subquery->targetList,
|
||||
insertVar->varattno - 1);
|
||||
selectTargetExpr = subqueryTargetEntry->expr;
|
||||
TargetEntry *subqueryTargetEntry = list_nth(subquery->targetList,
|
||||
insertVar->varattno - 1);
|
||||
Expr *selectTargetExpr = subqueryTargetEntry->expr;
|
||||
|
||||
parentQueryList = list_make2(query, subquery);
|
||||
List *parentQueryList = list_make2(query, subquery);
|
||||
FindReferencedTableColumn(selectTargetExpr,
|
||||
parentQueryList, subquery,
|
||||
&subqueryPartitionColumnRelationId,
|
||||
|
@ -1135,7 +1111,6 @@ static DistributedPlan *
|
|||
CreateCoordinatorInsertSelectPlan(uint64 planId, Query *parse)
|
||||
{
|
||||
Query *insertSelectQuery = copyObject(parse);
|
||||
Query *selectQuery = NULL;
|
||||
|
||||
RangeTblEntry *selectRte = ExtractSelectRangeTableEntry(insertSelectQuery);
|
||||
RangeTblEntry *insertRte = ExtractResultRelationRTE(insertSelectQuery);
|
||||
|
@ -1152,7 +1127,7 @@ CreateCoordinatorInsertSelectPlan(uint64 planId, Query *parse)
|
|||
return distributedPlan;
|
||||
}
|
||||
|
||||
selectQuery = selectRte->subquery;
|
||||
Query *selectQuery = selectRte->subquery;
|
||||
|
||||
/*
|
||||
* Wrap the SELECT as a subquery if the INSERT...SELECT has CTEs or the SELECT
|
||||
|
@ -1194,15 +1169,13 @@ CreateCoordinatorInsertSelectPlan(uint64 planId, Query *parse)
|
|||
* insertSelectSubuery and a workerJob to execute afterwards.
|
||||
*/
|
||||
uint64 jobId = INVALID_JOB_ID;
|
||||
Job *workerJob = NULL;
|
||||
List *taskList = NIL;
|
||||
char *resultIdPrefix = InsertSelectResultIdPrefix(planId);
|
||||
|
||||
/* generate tasks for the INSERT..SELECT phase */
|
||||
taskList = TwoPhaseInsertSelectTaskList(targetRelationId, insertSelectQuery,
|
||||
resultIdPrefix);
|
||||
List *taskList = TwoPhaseInsertSelectTaskList(targetRelationId, insertSelectQuery,
|
||||
resultIdPrefix);
|
||||
|
||||
workerJob = CitusMakeNode(Job);
|
||||
Job *workerJob = CitusMakeNode(Job);
|
||||
workerJob->taskList = taskList;
|
||||
workerJob->subqueryPushdown = false;
|
||||
workerJob->dependedJobList = NIL;
|
||||
|
@ -1232,18 +1205,14 @@ CreateCoordinatorInsertSelectPlan(uint64 planId, Query *parse)
|
|||
static DeferredErrorMessage *
|
||||
CoordinatorInsertSelectSupported(Query *insertSelectQuery)
|
||||
{
|
||||
RangeTblEntry *insertRte = NULL;
|
||||
RangeTblEntry *subqueryRte = NULL;
|
||||
Query *subquery = NULL;
|
||||
DeferredErrorMessage *deferredError = NULL;
|
||||
|
||||
deferredError = ErrorIfOnConflictNotSupported(insertSelectQuery);
|
||||
DeferredErrorMessage *deferredError = ErrorIfOnConflictNotSupported(
|
||||
insertSelectQuery);
|
||||
if (deferredError)
|
||||
{
|
||||
return deferredError;
|
||||
}
|
||||
|
||||
insertRte = ExtractResultRelationRTE(insertSelectQuery);
|
||||
RangeTblEntry *insertRte = ExtractResultRelationRTE(insertSelectQuery);
|
||||
if (PartitionMethod(insertRte->relid) == DISTRIBUTE_BY_APPEND)
|
||||
{
|
||||
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
|
||||
|
@ -1251,8 +1220,8 @@ CoordinatorInsertSelectSupported(Query *insertSelectQuery)
|
|||
"not supported", NULL, NULL);
|
||||
}
|
||||
|
||||
subqueryRte = ExtractSelectRangeTableEntry(insertSelectQuery);
|
||||
subquery = (Query *) subqueryRte->subquery;
|
||||
RangeTblEntry *subqueryRte = ExtractSelectRangeTableEntry(insertSelectQuery);
|
||||
Query *subquery = (Query *) subqueryRte->subquery;
|
||||
|
||||
if (NeedsDistributedPlanning(subquery) &&
|
||||
contain_nextval_expression_walker((Node *) insertSelectQuery->targetList, NULL))
|
||||
|
@ -1274,25 +1243,22 @@ CoordinatorInsertSelectSupported(Query *insertSelectQuery)
|
|||
static Query *
|
||||
WrapSubquery(Query *subquery)
|
||||
{
|
||||
Query *outerQuery = NULL;
|
||||
ParseState *pstate = make_parsestate(NULL);
|
||||
Alias *selectAlias = NULL;
|
||||
RangeTblEntry *newRangeTableEntry = NULL;
|
||||
RangeTblRef *newRangeTableRef = NULL;
|
||||
ListCell *selectTargetCell = NULL;
|
||||
List *newTargetList = NIL;
|
||||
|
||||
outerQuery = makeNode(Query);
|
||||
Query *outerQuery = makeNode(Query);
|
||||
outerQuery->commandType = CMD_SELECT;
|
||||
|
||||
/* create range table entries */
|
||||
selectAlias = makeAlias("citus_insert_select_subquery", NIL);
|
||||
newRangeTableEntry = addRangeTableEntryForSubquery(pstate, subquery,
|
||||
selectAlias, false, true);
|
||||
Alias *selectAlias = makeAlias("citus_insert_select_subquery", NIL);
|
||||
RangeTblEntry *newRangeTableEntry = addRangeTableEntryForSubquery(pstate, subquery,
|
||||
selectAlias, false,
|
||||
true);
|
||||
outerQuery->rtable = list_make1(newRangeTableEntry);
|
||||
|
||||
/* set the FROM expression to the subquery */
|
||||
newRangeTableRef = makeNode(RangeTblRef);
|
||||
RangeTblRef *newRangeTableRef = makeNode(RangeTblRef);
|
||||
newRangeTableRef->rtindex = 1;
|
||||
outerQuery->jointree = makeFromExpr(list_make1(newRangeTableRef), NULL);
|
||||
|
||||
|
@ -1300,8 +1266,6 @@ WrapSubquery(Query *subquery)
|
|||
foreach(selectTargetCell, subquery->targetList)
|
||||
{
|
||||
TargetEntry *selectTargetEntry = (TargetEntry *) lfirst(selectTargetCell);
|
||||
Var *newSelectVar = NULL;
|
||||
TargetEntry *newSelectTargetEntry = NULL;
|
||||
|
||||
/* exactly 1 entry in FROM */
|
||||
int indexInRangeTable = 1;
|
||||
|
@ -1311,15 +1275,15 @@ WrapSubquery(Query *subquery)
|
|||
continue;
|
||||
}
|
||||
|
||||
newSelectVar = makeVar(indexInRangeTable, selectTargetEntry->resno,
|
||||
exprType((Node *) selectTargetEntry->expr),
|
||||
exprTypmod((Node *) selectTargetEntry->expr),
|
||||
exprCollation((Node *) selectTargetEntry->expr), 0);
|
||||
Var *newSelectVar = makeVar(indexInRangeTable, selectTargetEntry->resno,
|
||||
exprType((Node *) selectTargetEntry->expr),
|
||||
exprTypmod((Node *) selectTargetEntry->expr),
|
||||
exprCollation((Node *) selectTargetEntry->expr), 0);
|
||||
|
||||
newSelectTargetEntry = makeTargetEntry((Expr *) newSelectVar,
|
||||
selectTargetEntry->resno,
|
||||
selectTargetEntry->resname,
|
||||
selectTargetEntry->resjunk);
|
||||
TargetEntry *newSelectTargetEntry = makeTargetEntry((Expr *) newSelectVar,
|
||||
selectTargetEntry->resno,
|
||||
selectTargetEntry->resname,
|
||||
selectTargetEntry->resjunk);
|
||||
|
||||
newTargetList = lappend(newTargetList, newSelectTargetEntry);
|
||||
}
|
||||
|
@ -1352,16 +1316,13 @@ TwoPhaseInsertSelectTaskList(Oid targetRelationId, Query *insertSelectQuery,
|
|||
|
||||
DistTableCacheEntry *targetCacheEntry = DistributedTableCacheEntry(targetRelationId);
|
||||
int shardCount = targetCacheEntry->shardIntervalArrayLength;
|
||||
int shardOffset = 0;
|
||||
uint32 taskIdIndex = 1;
|
||||
uint64 jobId = INVALID_JOB_ID;
|
||||
|
||||
ListCell *targetEntryCell = NULL;
|
||||
Relation distributedRelation = NULL;
|
||||
TupleDesc destTupleDescriptor = NULL;
|
||||
|
||||
distributedRelation = heap_open(targetRelationId, RowExclusiveLock);
|
||||
destTupleDescriptor = RelationGetDescr(distributedRelation);
|
||||
Relation distributedRelation = heap_open(targetRelationId, RowExclusiveLock);
|
||||
TupleDesc destTupleDescriptor = RelationGetDescr(distributedRelation);
|
||||
|
||||
/*
|
||||
* If the type of insert column and target table's column type is
|
||||
|
@ -1388,25 +1349,22 @@ TwoPhaseInsertSelectTaskList(Oid targetRelationId, Query *insertSelectQuery,
|
|||
}
|
||||
}
|
||||
|
||||
for (shardOffset = 0; shardOffset < shardCount; shardOffset++)
|
||||
for (int shardOffset = 0; shardOffset < shardCount; shardOffset++)
|
||||
{
|
||||
ShardInterval *targetShardInterval =
|
||||
targetCacheEntry->sortedShardIntervalArray[shardOffset];
|
||||
uint64 shardId = targetShardInterval->shardId;
|
||||
List *columnAliasList = NIL;
|
||||
List *insertShardPlacementList = NIL;
|
||||
Query *resultSelectQuery = NULL;
|
||||
StringInfo queryString = makeStringInfo();
|
||||
RelationShard *relationShard = NULL;
|
||||
Task *modifyTask = NULL;
|
||||
StringInfo resultId = makeStringInfo();
|
||||
|
||||
/* during COPY, the shard ID is appended to the result name */
|
||||
appendStringInfo(resultId, "%s_" UINT64_FORMAT, resultIdPrefix, shardId);
|
||||
|
||||
/* generate the query on the intermediate result */
|
||||
resultSelectQuery = BuildSubPlanResultQuery(insertSelectQuery->targetList,
|
||||
columnAliasList, resultId->data);
|
||||
Query *resultSelectQuery = BuildSubPlanResultQuery(insertSelectQuery->targetList,
|
||||
columnAliasList,
|
||||
resultId->data);
|
||||
|
||||
/* put the intermediate result query in the INSERT..SELECT */
|
||||
selectRte->subquery = resultSelectQuery;
|
||||
|
@ -1431,13 +1389,14 @@ TwoPhaseInsertSelectTaskList(Oid targetRelationId, Query *insertSelectQuery,
|
|||
ereport(DEBUG2, (errmsg("distributed statement: %s", queryString->data)));
|
||||
|
||||
LockShardDistributionMetadata(shardId, ShareLock);
|
||||
insertShardPlacementList = FinalizedShardPlacementList(shardId);
|
||||
List *insertShardPlacementList = FinalizedShardPlacementList(shardId);
|
||||
|
||||
relationShard = CitusMakeNode(RelationShard);
|
||||
RelationShard *relationShard = CitusMakeNode(RelationShard);
|
||||
relationShard->relationId = targetShardInterval->relationId;
|
||||
relationShard->shardId = targetShardInterval->shardId;
|
||||
|
||||
modifyTask = CreateBasicTask(jobId, taskIdIndex, MODIFY_TASK, queryString->data);
|
||||
Task *modifyTask = CreateBasicTask(jobId, taskIdIndex, MODIFY_TASK,
|
||||
queryString->data);
|
||||
modifyTask->dependedTaskList = NULL;
|
||||
modifyTask->anchorShardId = shardId;
|
||||
modifyTask->taskPlacementList = insertShardPlacementList;
|
||||
|
|
|
@ -52,7 +52,6 @@ FindSubPlansUsedInNode(Node *node)
|
|||
{
|
||||
char *resultId =
|
||||
FindIntermediateResultIdIfExists(rangeTableEntry);
|
||||
Value *resultIdValue = NULL;
|
||||
|
||||
if (resultId == NULL)
|
||||
{
|
||||
|
@ -63,7 +62,7 @@ FindSubPlansUsedInNode(Node *node)
|
|||
* Use a Value to be able to use list_append_unique and store
|
||||
* the result ID in the DistributedPlan.
|
||||
*/
|
||||
resultIdValue = makeString(resultId);
|
||||
Value *resultIdValue = makeString(resultId);
|
||||
subPlanList = list_append_unique(subPlanList, resultIdValue);
|
||||
}
|
||||
}
|
||||
|
@ -185,8 +184,6 @@ AppendAllAccessedWorkerNodes(List *workerNodeList, DistributedPlan *distributedP
|
|||
HTAB *
|
||||
MakeIntermediateResultHTAB()
|
||||
{
|
||||
HTAB *intermediateResultsHash = NULL;
|
||||
uint32 hashFlags = 0;
|
||||
HASHCTL info = { 0 };
|
||||
int initialNumberOfElements = 16;
|
||||
|
||||
|
@ -194,10 +191,11 @@ MakeIntermediateResultHTAB()
|
|||
info.entrysize = sizeof(IntermediateResultsHashEntry);
|
||||
info.hash = string_hash;
|
||||
info.hcxt = CurrentMemoryContext;
|
||||
hashFlags = (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
|
||||
uint32 hashFlags = (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
|
||||
|
||||
intermediateResultsHash = hash_create("Intermediate results hash",
|
||||
initialNumberOfElements, &info, hashFlags);
|
||||
HTAB *intermediateResultsHash = hash_create("Intermediate results hash",
|
||||
initialNumberOfElements, &info,
|
||||
hashFlags);
|
||||
|
||||
return intermediateResultsHash;
|
||||
}
|
||||
|
@ -243,10 +241,10 @@ FindAllWorkerNodesUsingSubplan(HTAB *intermediateResultsHash,
|
|||
static IntermediateResultsHashEntry *
|
||||
SearchIntermediateResult(HTAB *intermediateResultsHash, char *resultId)
|
||||
{
|
||||
IntermediateResultsHashEntry *entry = NULL;
|
||||
bool found = false;
|
||||
|
||||
entry = hash_search(intermediateResultsHash, resultId, HASH_ENTER, &found);
|
||||
IntermediateResultsHashEntry *entry = hash_search(intermediateResultsHash, resultId,
|
||||
HASH_ENTER, &found);
|
||||
|
||||
/* use sane defaults */
|
||||
if (!found)
|
||||
|
|
|
@ -343,9 +343,8 @@ ExplainTaskList(List *taskList, ExplainState *es)
|
|||
foreach(taskCell, taskList)
|
||||
{
|
||||
Task *task = (Task *) lfirst(taskCell);
|
||||
RemoteExplainPlan *remoteExplain = NULL;
|
||||
|
||||
remoteExplain = RemoteExplain(task, es);
|
||||
RemoteExplainPlan *remoteExplain = RemoteExplain(task, es);
|
||||
remoteExplainList = lappend(remoteExplainList, remoteExplain);
|
||||
|
||||
if (!ExplainAllTasks)
|
||||
|
@ -374,14 +373,12 @@ ExplainTaskList(List *taskList, ExplainState *es)
|
|||
static RemoteExplainPlan *
|
||||
RemoteExplain(Task *task, ExplainState *es)
|
||||
{
|
||||
StringInfo explainQuery = NULL;
|
||||
List *taskPlacementList = task->taskPlacementList;
|
||||
int placementCount = list_length(taskPlacementList);
|
||||
int placementIndex = 0;
|
||||
RemoteExplainPlan *remotePlan = NULL;
|
||||
|
||||
remotePlan = (RemoteExplainPlan *) palloc0(sizeof(RemoteExplainPlan));
|
||||
explainQuery = BuildRemoteExplainQuery(task->queryString, es);
|
||||
RemoteExplainPlan *remotePlan = (RemoteExplainPlan *) palloc0(
|
||||
sizeof(RemoteExplainPlan));
|
||||
StringInfo explainQuery = BuildRemoteExplainQuery(task->queryString, es);
|
||||
|
||||
/*
|
||||
* Use a coordinated transaction to ensure that we open a transaction block
|
||||
|
@ -389,17 +386,16 @@ RemoteExplain(Task *task, ExplainState *es)
|
|||
*/
|
||||
BeginOrContinueCoordinatedTransaction();
|
||||
|
||||
for (placementIndex = 0; placementIndex < placementCount; placementIndex++)
|
||||
for (int placementIndex = 0; placementIndex < placementCount; placementIndex++)
|
||||
{
|
||||
ShardPlacement *taskPlacement = list_nth(taskPlacementList, placementIndex);
|
||||
MultiConnection *connection = NULL;
|
||||
PGresult *queryResult = NULL;
|
||||
int connectionFlags = 0;
|
||||
int executeResult = 0;
|
||||
|
||||
remotePlan->placementIndex = placementIndex;
|
||||
|
||||
connection = GetPlacementConnection(connectionFlags, taskPlacement, NULL);
|
||||
MultiConnection *connection = GetPlacementConnection(connectionFlags,
|
||||
taskPlacement, NULL);
|
||||
|
||||
/* try other placements if we fail to connect this one */
|
||||
if (PQstatus(connection->pgConn) != CONNECTION_OK)
|
||||
|
@ -417,8 +413,8 @@ RemoteExplain(Task *task, ExplainState *es)
|
|||
ExecuteCriticalRemoteCommand(connection, "SAVEPOINT citus_explain_savepoint");
|
||||
|
||||
/* run explain query */
|
||||
executeResult = ExecuteOptionalRemoteCommand(connection, explainQuery->data,
|
||||
&queryResult);
|
||||
int executeResult = ExecuteOptionalRemoteCommand(connection, explainQuery->data,
|
||||
&queryResult);
|
||||
if (executeResult != 0)
|
||||
{
|
||||
PQclear(queryResult);
|
||||
|
@ -517,11 +513,9 @@ ExplainTaskPlacement(ShardPlacement *taskPlacement, List *explainOutputList,
|
|||
foreach(explainOutputCell, explainOutputList)
|
||||
{
|
||||
StringInfo rowString = (StringInfo) lfirst(explainOutputCell);
|
||||
int rowLength = 0;
|
||||
char *lineStart = NULL;
|
||||
|
||||
rowLength = strlen(rowString->data);
|
||||
lineStart = rowString->data;
|
||||
int rowLength = strlen(rowString->data);
|
||||
char *lineStart = rowString->data;
|
||||
|
||||
/* parse the lines in the remote EXPLAIN for proper indentation */
|
||||
while (lineStart < rowString->data + rowLength)
|
||||
|
@ -646,14 +640,13 @@ ExplainOneQuery(Query *query, int cursorOptions,
|
|||
}
|
||||
else
|
||||
{
|
||||
PlannedStmt *plan;
|
||||
instr_time planstart,
|
||||
planduration;
|
||||
|
||||
INSTR_TIME_SET_CURRENT(planstart);
|
||||
|
||||
/* plan the query */
|
||||
plan = pg_plan_query(query, cursorOptions, params);
|
||||
PlannedStmt *plan = pg_plan_query(query, cursorOptions, params);
|
||||
|
||||
INSTR_TIME_SET_CURRENT(planduration);
|
||||
INSTR_TIME_SUBTRACT(planduration, planstart);
|
||||
|
|
|
@ -116,18 +116,16 @@ JoinExprList(FromExpr *fromExpr)
|
|||
if (joinList != NIL)
|
||||
{
|
||||
/* multiple nodes in from clause, add an explicit join between them */
|
||||
JoinExpr *newJoinExpr = NULL;
|
||||
RangeTblRef *nextRangeTableRef = NULL;
|
||||
int nextRangeTableIndex = 0;
|
||||
|
||||
/* find the left most range table in this node */
|
||||
ExtractLeftMostRangeTableIndex((Node *) fromExpr, &nextRangeTableIndex);
|
||||
|
||||
nextRangeTableRef = makeNode(RangeTblRef);
|
||||
RangeTblRef *nextRangeTableRef = makeNode(RangeTblRef);
|
||||
nextRangeTableRef->rtindex = nextRangeTableIndex;
|
||||
|
||||
/* join the previous node with nextRangeTableRef */
|
||||
newJoinExpr = makeNode(JoinExpr);
|
||||
JoinExpr *newJoinExpr = makeNode(JoinExpr);
|
||||
newJoinExpr->jointype = JOIN_INNER;
|
||||
newJoinExpr->rarg = (Node *) nextRangeTableRef;
|
||||
newJoinExpr->quals = NULL;
|
||||
|
@ -261,18 +259,16 @@ JoinOnColumns(Var *currentColumn, Var *candidateColumn, List *joinClauseList)
|
|||
List *
|
||||
JoinOrderList(List *tableEntryList, List *joinClauseList)
|
||||
{
|
||||
List *bestJoinOrder = NIL;
|
||||
List *candidateJoinOrderList = NIL;
|
||||
ListCell *tableEntryCell = NULL;
|
||||
|
||||
foreach(tableEntryCell, tableEntryList)
|
||||
{
|
||||
TableEntry *startingTable = (TableEntry *) lfirst(tableEntryCell);
|
||||
List *candidateJoinOrder = NIL;
|
||||
|
||||
/* each candidate join order starts with a different table */
|
||||
candidateJoinOrder = JoinOrderForTable(startingTable, tableEntryList,
|
||||
joinClauseList);
|
||||
List *candidateJoinOrder = JoinOrderForTable(startingTable, tableEntryList,
|
||||
joinClauseList);
|
||||
|
||||
if (candidateJoinOrder != NULL)
|
||||
{
|
||||
|
@ -289,7 +285,7 @@ JoinOrderList(List *tableEntryList, List *joinClauseList)
|
|||
"equal operator")));
|
||||
}
|
||||
|
||||
bestJoinOrder = BestJoinOrder(candidateJoinOrderList);
|
||||
List *bestJoinOrder = BestJoinOrder(candidateJoinOrderList);
|
||||
|
||||
/* if logging is enabled, print join order */
|
||||
if (LogMultiJoinOrder)
|
||||
|
@ -312,10 +308,7 @@ JoinOrderList(List *tableEntryList, List *joinClauseList)
|
|||
static List *
|
||||
JoinOrderForTable(TableEntry *firstTable, List *tableEntryList, List *joinClauseList)
|
||||
{
|
||||
JoinOrderNode *currentJoinNode = NULL;
|
||||
JoinRuleType firstJoinRule = JOIN_RULE_INVALID_FIRST;
|
||||
List *joinOrderList = NIL;
|
||||
List *joinedTableList = NIL;
|
||||
int joinedTableCount = 1;
|
||||
int totalTableCount = list_length(tableEntryList);
|
||||
|
||||
|
@ -331,20 +324,19 @@ JoinOrderForTable(TableEntry *firstTable, List *tableEntryList, List *joinClause
|
|||
firstTable);
|
||||
|
||||
/* add first node to the join order */
|
||||
joinOrderList = list_make1(firstJoinNode);
|
||||
joinedTableList = list_make1(firstTable);
|
||||
currentJoinNode = firstJoinNode;
|
||||
List *joinOrderList = list_make1(firstJoinNode);
|
||||
List *joinedTableList = list_make1(firstTable);
|
||||
JoinOrderNode *currentJoinNode = firstJoinNode;
|
||||
|
||||
/* loop until we join all remaining tables */
|
||||
while (joinedTableCount < totalTableCount)
|
||||
{
|
||||
List *pendingTableList = NIL;
|
||||
ListCell *pendingTableCell = NULL;
|
||||
JoinOrderNode *nextJoinNode = NULL;
|
||||
TableEntry *nextJoinedTable = NULL;
|
||||
JoinRuleType nextJoinRuleType = JOIN_RULE_LAST;
|
||||
|
||||
pendingTableList = TableEntryListDifference(tableEntryList, joinedTableList);
|
||||
List *pendingTableList = TableEntryListDifference(tableEntryList,
|
||||
joinedTableList);
|
||||
|
||||
/*
|
||||
* Iterate over all pending tables, and find the next best table to
|
||||
|
@ -354,13 +346,13 @@ JoinOrderForTable(TableEntry *firstTable, List *tableEntryList, List *joinClause
|
|||
foreach(pendingTableCell, pendingTableList)
|
||||
{
|
||||
TableEntry *pendingTable = (TableEntry *) lfirst(pendingTableCell);
|
||||
JoinOrderNode *pendingJoinNode = NULL;
|
||||
JoinRuleType pendingJoinRuleType = JOIN_RULE_LAST;
|
||||
JoinType joinType = JOIN_INNER;
|
||||
|
||||
/* evaluate all join rules for this pending table */
|
||||
pendingJoinNode = EvaluateJoinRules(joinedTableList, currentJoinNode,
|
||||
pendingTable, joinClauseList, joinType);
|
||||
JoinOrderNode *pendingJoinNode = EvaluateJoinRules(joinedTableList,
|
||||
currentJoinNode,
|
||||
pendingTable,
|
||||
joinClauseList, joinType);
|
||||
|
||||
if (pendingJoinNode == NULL)
|
||||
{
|
||||
|
@ -369,7 +361,7 @@ JoinOrderForTable(TableEntry *firstTable, List *tableEntryList, List *joinClause
|
|||
}
|
||||
|
||||
/* if this rule is better than previous ones, keep it */
|
||||
pendingJoinRuleType = pendingJoinNode->joinRuleType;
|
||||
JoinRuleType pendingJoinRuleType = pendingJoinNode->joinRuleType;
|
||||
if (pendingJoinRuleType < nextJoinRuleType)
|
||||
{
|
||||
nextJoinNode = pendingJoinNode;
|
||||
|
@ -387,7 +379,7 @@ JoinOrderForTable(TableEntry *firstTable, List *tableEntryList, List *joinClause
|
|||
}
|
||||
|
||||
Assert(nextJoinNode != NULL);
|
||||
nextJoinedTable = nextJoinNode->tableEntry;
|
||||
TableEntry *nextJoinedTable = nextJoinNode->tableEntry;
|
||||
|
||||
/* add next node to the join order */
|
||||
joinOrderList = lappend(joinOrderList, nextJoinNode);
|
||||
|
@ -411,8 +403,6 @@ JoinOrderForTable(TableEntry *firstTable, List *tableEntryList, List *joinClause
|
|||
static List *
|
||||
BestJoinOrder(List *candidateJoinOrders)
|
||||
{
|
||||
List *bestJoinOrder = NULL;
|
||||
uint32 ruleTypeIndex = 0;
|
||||
uint32 highestValidIndex = JOIN_RULE_LAST - 1;
|
||||
uint32 candidateCount PG_USED_FOR_ASSERTS_ONLY = 0;
|
||||
|
||||
|
@ -429,7 +419,7 @@ BestJoinOrder(List *candidateJoinOrders)
|
|||
* have 3 or more, if there isn't a join order with fewer DPs; and so
|
||||
* forth.
|
||||
*/
|
||||
for (ruleTypeIndex = highestValidIndex; ruleTypeIndex > 0; ruleTypeIndex--)
|
||||
for (uint32 ruleTypeIndex = highestValidIndex; ruleTypeIndex > 0; ruleTypeIndex--)
|
||||
{
|
||||
JoinRuleType ruleType = (JoinRuleType) ruleTypeIndex;
|
||||
|
||||
|
@ -451,7 +441,7 @@ BestJoinOrder(List *candidateJoinOrders)
|
|||
* If there still is a tie, we pick the join order whose relation appeared
|
||||
* earliest in the query's range table entry list.
|
||||
*/
|
||||
bestJoinOrder = (List *) linitial(candidateJoinOrders);
|
||||
List *bestJoinOrder = (List *) linitial(candidateJoinOrders);
|
||||
|
||||
return bestJoinOrder;
|
||||
}
|
||||
|
@ -662,24 +652,21 @@ EvaluateJoinRules(List *joinedTableList, JoinOrderNode *currentJoinNode,
|
|||
JoinType joinType)
|
||||
{
|
||||
JoinOrderNode *nextJoinNode = NULL;
|
||||
uint32 candidateTableId = 0;
|
||||
List *joinedTableIdList = NIL;
|
||||
List *applicableJoinClauses = NIL;
|
||||
uint32 lowestValidIndex = JOIN_RULE_INVALID_FIRST + 1;
|
||||
uint32 highestValidIndex = JOIN_RULE_LAST - 1;
|
||||
uint32 ruleIndex = 0;
|
||||
|
||||
/*
|
||||
* We first find all applicable join clauses between already joined tables
|
||||
* and the candidate table.
|
||||
*/
|
||||
joinedTableIdList = RangeTableIdList(joinedTableList);
|
||||
candidateTableId = candidateTable->rangeTableId;
|
||||
applicableJoinClauses = ApplicableJoinClauses(joinedTableIdList, candidateTableId,
|
||||
joinClauseList);
|
||||
List *joinedTableIdList = RangeTableIdList(joinedTableList);
|
||||
uint32 candidateTableId = candidateTable->rangeTableId;
|
||||
List *applicableJoinClauses = ApplicableJoinClauses(joinedTableIdList,
|
||||
candidateTableId,
|
||||
joinClauseList);
|
||||
|
||||
/* we then evaluate all join rules in order */
|
||||
for (ruleIndex = lowestValidIndex; ruleIndex <= highestValidIndex; ruleIndex++)
|
||||
for (uint32 ruleIndex = lowestValidIndex; ruleIndex <= highestValidIndex; ruleIndex++)
|
||||
{
|
||||
JoinRuleType ruleType = (JoinRuleType) ruleIndex;
|
||||
RuleEvalFunction ruleEvalFunction = JoinRuleEvalFunction(ruleType);
|
||||
|
@ -737,7 +724,6 @@ static RuleEvalFunction
|
|||
JoinRuleEvalFunction(JoinRuleType ruleType)
|
||||
{
|
||||
static bool ruleEvalFunctionsInitialized = false;
|
||||
RuleEvalFunction ruleEvalFunction = NULL;
|
||||
|
||||
if (!ruleEvalFunctionsInitialized)
|
||||
{
|
||||
|
@ -751,7 +737,7 @@ JoinRuleEvalFunction(JoinRuleType ruleType)
|
|||
ruleEvalFunctionsInitialized = true;
|
||||
}
|
||||
|
||||
ruleEvalFunction = RuleEvalFunctionArray[ruleType];
|
||||
RuleEvalFunction ruleEvalFunction = RuleEvalFunctionArray[ruleType];
|
||||
Assert(ruleEvalFunction != NULL);
|
||||
|
||||
return ruleEvalFunction;
|
||||
|
@ -763,7 +749,6 @@ static char *
|
|||
JoinRuleName(JoinRuleType ruleType)
|
||||
{
|
||||
static bool ruleNamesInitialized = false;
|
||||
char *ruleName = NULL;
|
||||
|
||||
if (!ruleNamesInitialized)
|
||||
{
|
||||
|
@ -780,7 +765,7 @@ JoinRuleName(JoinRuleType ruleType)
|
|||
ruleNamesInitialized = true;
|
||||
}
|
||||
|
||||
ruleName = RuleNameArray[ruleType];
|
||||
char *ruleName = RuleNameArray[ruleType];
|
||||
Assert(ruleName != NULL);
|
||||
|
||||
return ruleName;
|
||||
|
@ -857,7 +842,6 @@ static JoinOrderNode *
|
|||
LocalJoin(JoinOrderNode *currentJoinNode, TableEntry *candidateTable,
|
||||
List *applicableJoinClauses, JoinType joinType)
|
||||
{
|
||||
JoinOrderNode *nextJoinNode = NULL;
|
||||
Oid relationId = candidateTable->relationId;
|
||||
uint32 tableId = candidateTable->rangeTableId;
|
||||
Var *candidatePartitionColumn = PartitionColumn(relationId, tableId);
|
||||
|
@ -865,8 +849,6 @@ LocalJoin(JoinOrderNode *currentJoinNode, TableEntry *candidateTable,
|
|||
char candidatePartitionMethod = PartitionMethod(relationId);
|
||||
char currentPartitionMethod = currentJoinNode->partitionMethod;
|
||||
TableEntry *currentAnchorTable = currentJoinNode->anchorTable;
|
||||
bool joinOnPartitionColumns = false;
|
||||
bool coPartitionedTables = false;
|
||||
|
||||
/*
|
||||
* If we previously dual-hash re-partitioned the tables for a join or made cartesian
|
||||
|
@ -883,26 +865,27 @@ LocalJoin(JoinOrderNode *currentJoinNode, TableEntry *candidateTable,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
joinOnPartitionColumns = JoinOnColumns(currentPartitionColumn,
|
||||
candidatePartitionColumn,
|
||||
applicableJoinClauses);
|
||||
bool joinOnPartitionColumns = JoinOnColumns(currentPartitionColumn,
|
||||
candidatePartitionColumn,
|
||||
applicableJoinClauses);
|
||||
if (!joinOnPartitionColumns)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* shard interval lists must have 1-1 matching for local joins */
|
||||
coPartitionedTables = CoPartitionedTables(currentAnchorTable->relationId, relationId);
|
||||
bool coPartitionedTables = CoPartitionedTables(currentAnchorTable->relationId,
|
||||
relationId);
|
||||
|
||||
if (!coPartitionedTables)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
nextJoinNode = MakeJoinOrderNode(candidateTable, LOCAL_PARTITION_JOIN,
|
||||
currentPartitionColumn,
|
||||
currentPartitionMethod,
|
||||
currentAnchorTable);
|
||||
JoinOrderNode *nextJoinNode = MakeJoinOrderNode(candidateTable, LOCAL_PARTITION_JOIN,
|
||||
currentPartitionColumn,
|
||||
currentPartitionMethod,
|
||||
currentAnchorTable);
|
||||
|
||||
|
||||
return nextJoinNode;
|
||||
|
@ -925,7 +908,6 @@ SinglePartitionJoin(JoinOrderNode *currentJoinNode, TableEntry *candidateTable,
|
|||
TableEntry *currentAnchorTable = currentJoinNode->anchorTable;
|
||||
JoinRuleType currentJoinRuleType = currentJoinNode->joinRuleType;
|
||||
|
||||
OpExpr *joinClause = NULL;
|
||||
|
||||
Oid relationId = candidateTable->relationId;
|
||||
uint32 tableId = candidateTable->rangeTableId;
|
||||
|
@ -948,7 +930,7 @@ SinglePartitionJoin(JoinOrderNode *currentJoinNode, TableEntry *candidateTable,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
joinClause =
|
||||
OpExpr *joinClause =
|
||||
SinglePartitionJoinClause(currentPartitionColumn, applicableJoinClauses);
|
||||
if (joinClause != NULL)
|
||||
{
|
||||
|
|
|
@ -312,19 +312,8 @@ static bool HasOrderByHllType(List *sortClauseList, List *targetList);
|
|||
void
|
||||
MultiLogicalPlanOptimize(MultiTreeRoot *multiLogicalPlan)
|
||||
{
|
||||
bool hasOrderByHllType = false;
|
||||
List *selectNodeList = NIL;
|
||||
List *projectNodeList = NIL;
|
||||
List *collectNodeList = NIL;
|
||||
List *extendedOpNodeList = NIL;
|
||||
List *tableNodeList = NIL;
|
||||
ListCell *collectNodeCell = NULL;
|
||||
ListCell *tableNodeCell = NULL;
|
||||
MultiProject *projectNode = NULL;
|
||||
MultiExtendedOp *extendedOpNode = NULL;
|
||||
MultiExtendedOp *masterExtendedOpNode = NULL;
|
||||
MultiExtendedOp *workerExtendedOpNode = NULL;
|
||||
ExtendedOpNodeProperties extendedOpNodeProperties;
|
||||
MultiNode *logicalPlanNode = (MultiNode *) multiLogicalPlan;
|
||||
|
||||
/* check that we can optimize aggregates in the plan */
|
||||
|
@ -336,7 +325,7 @@ MultiLogicalPlanOptimize(MultiTreeRoot *multiLogicalPlan)
|
|||
* exist, we modify the tree in place to swap the original select node with
|
||||
* And and Or nodes. We then push down the And select node if it exists.
|
||||
*/
|
||||
selectNodeList = FindNodesOfType(logicalPlanNode, T_MultiSelect);
|
||||
List *selectNodeList = FindNodesOfType(logicalPlanNode, T_MultiSelect);
|
||||
if (selectNodeList != NIL)
|
||||
{
|
||||
MultiSelect *selectNode = (MultiSelect *) linitial(selectNodeList);
|
||||
|
@ -365,12 +354,12 @@ MultiLogicalPlanOptimize(MultiTreeRoot *multiLogicalPlan)
|
|||
}
|
||||
|
||||
/* push down the multi project node */
|
||||
projectNodeList = FindNodesOfType(logicalPlanNode, T_MultiProject);
|
||||
projectNode = (MultiProject *) linitial(projectNodeList);
|
||||
List *projectNodeList = FindNodesOfType(logicalPlanNode, T_MultiProject);
|
||||
MultiProject *projectNode = (MultiProject *) linitial(projectNodeList);
|
||||
PushDownNodeLoop((MultiUnaryNode *) projectNode);
|
||||
|
||||
/* pull up collect nodes and merge duplicate collects */
|
||||
collectNodeList = FindNodesOfType(logicalPlanNode, T_MultiCollect);
|
||||
List *collectNodeList = FindNodesOfType(logicalPlanNode, T_MultiCollect);
|
||||
foreach(collectNodeCell, collectNodeList)
|
||||
{
|
||||
MultiCollect *collectNode = (MultiCollect *) lfirst(collectNodeCell);
|
||||
|
@ -385,19 +374,20 @@ MultiLogicalPlanOptimize(MultiTreeRoot *multiLogicalPlan)
|
|||
* clause list to the worker operator node. We then push the worker operator
|
||||
* node below the collect node.
|
||||
*/
|
||||
extendedOpNodeList = FindNodesOfType(logicalPlanNode, T_MultiExtendedOp);
|
||||
extendedOpNode = (MultiExtendedOp *) linitial(extendedOpNodeList);
|
||||
List *extendedOpNodeList = FindNodesOfType(logicalPlanNode, T_MultiExtendedOp);
|
||||
MultiExtendedOp *extendedOpNode = (MultiExtendedOp *) linitial(extendedOpNodeList);
|
||||
|
||||
extendedOpNodeProperties = BuildExtendedOpNodeProperties(extendedOpNode);
|
||||
ExtendedOpNodeProperties extendedOpNodeProperties = BuildExtendedOpNodeProperties(
|
||||
extendedOpNode);
|
||||
|
||||
masterExtendedOpNode =
|
||||
MultiExtendedOp *masterExtendedOpNode =
|
||||
MasterExtendedOpNode(extendedOpNode, &extendedOpNodeProperties);
|
||||
workerExtendedOpNode =
|
||||
MultiExtendedOp *workerExtendedOpNode =
|
||||
WorkerExtendedOpNode(extendedOpNode, &extendedOpNodeProperties);
|
||||
|
||||
ApplyExtendedOpNodes(extendedOpNode, masterExtendedOpNode, workerExtendedOpNode);
|
||||
|
||||
tableNodeList = FindNodesOfType(logicalPlanNode, T_MultiTable);
|
||||
List *tableNodeList = FindNodesOfType(logicalPlanNode, T_MultiTable);
|
||||
foreach(tableNodeCell, tableNodeList)
|
||||
{
|
||||
MultiTable *tableNode = (MultiTable *) lfirst(tableNodeCell);
|
||||
|
@ -414,8 +404,8 @@ MultiLogicalPlanOptimize(MultiTreeRoot *multiLogicalPlan)
|
|||
* clause's sortop oid, so we can't push an order by on the hll data type to
|
||||
* the worker node. We check that here and error out if necessary.
|
||||
*/
|
||||
hasOrderByHllType = HasOrderByHllType(workerExtendedOpNode->sortClauseList,
|
||||
workerExtendedOpNode->targetList);
|
||||
bool hasOrderByHllType = HasOrderByHllType(workerExtendedOpNode->sortClauseList,
|
||||
workerExtendedOpNode->targetList);
|
||||
if (hasOrderByHllType)
|
||||
{
|
||||
ereport(ERROR, (errmsg("cannot approximate count(distinct) and order by it"),
|
||||
|
@ -597,7 +587,6 @@ PushDownNodeLoop(MultiUnaryNode *currentNode)
|
|||
static void
|
||||
PullUpCollectLoop(MultiCollect *collectNode)
|
||||
{
|
||||
MultiNode *childNode = NULL;
|
||||
MultiUnaryNode *currentNode = (MultiUnaryNode *) collectNode;
|
||||
|
||||
PullUpStatus pullUpStatus = CanPullUp(currentNode);
|
||||
|
@ -611,7 +600,7 @@ PullUpCollectLoop(MultiCollect *collectNode)
|
|||
* After pulling up the collect node, if we find that our child node is also
|
||||
* a collect, we merge the two collect nodes together by removing this node.
|
||||
*/
|
||||
childNode = currentNode->childNode;
|
||||
MultiNode *childNode = currentNode->childNode;
|
||||
if (CitusIsA(childNode, MultiCollect))
|
||||
{
|
||||
RemoveUnaryNode(currentNode);
|
||||
|
@ -753,8 +742,8 @@ CanPullUp(MultiUnaryNode *childNode)
|
|||
* Evaluate if parent can be pushed down below the child node, since it
|
||||
* is equivalent to pulling up the child above its parent.
|
||||
*/
|
||||
PushDownStatus parentPushDownStatus = PUSH_DOWN_INVALID_FIRST;
|
||||
parentPushDownStatus = Commutative((MultiUnaryNode *) parentNode, childNode);
|
||||
PushDownStatus parentPushDownStatus = Commutative((MultiUnaryNode *) parentNode,
|
||||
childNode);
|
||||
|
||||
if (parentPushDownStatus == PUSH_DOWN_VALID)
|
||||
{
|
||||
|
@ -932,8 +921,6 @@ SelectClauseTableIdList(List *selectClauseList)
|
|||
{
|
||||
Node *selectClause = (Node *) lfirst(selectClauseCell);
|
||||
List *selectColumnList = pull_var_clause_default(selectClause);
|
||||
Var *selectColumn = NULL;
|
||||
int selectColumnTableId = 0;
|
||||
|
||||
if (list_length(selectColumnList) == 0)
|
||||
{
|
||||
|
@ -941,8 +928,8 @@ SelectClauseTableIdList(List *selectClauseList)
|
|||
continue;
|
||||
}
|
||||
|
||||
selectColumn = (Var *) linitial(selectColumnList);
|
||||
selectColumnTableId = (int) selectColumn->varno;
|
||||
Var *selectColumn = (Var *) linitial(selectColumnList);
|
||||
int selectColumnTableId = (int) selectColumn->varno;
|
||||
|
||||
tableIdList = lappend_int(tableIdList, selectColumnTableId);
|
||||
}
|
||||
|
@ -1014,9 +1001,9 @@ GenerateNode(MultiUnaryNode *currentNode, MultiNode *childNode)
|
|||
{
|
||||
MultiSelect *selectNode = (MultiSelect *) currentNode;
|
||||
List *selectClauseList = copyObject(selectNode->selectClauseList);
|
||||
List *newSelectClauseList = NIL;
|
||||
|
||||
newSelectClauseList = TableIdListSelectClauses(tableIdList, selectClauseList);
|
||||
List *newSelectClauseList = TableIdListSelectClauses(tableIdList,
|
||||
selectClauseList);
|
||||
if (newSelectClauseList != NIL)
|
||||
{
|
||||
MultiSelect *newSelectNode = CitusMakeNode(MultiSelect);
|
||||
|
@ -1370,7 +1357,6 @@ static MultiExtendedOp *
|
|||
MasterExtendedOpNode(MultiExtendedOp *originalOpNode,
|
||||
ExtendedOpNodeProperties *extendedOpNodeProperties)
|
||||
{
|
||||
MultiExtendedOp *masterExtendedOpNode = NULL;
|
||||
List *targetEntryList = originalOpNode->targetList;
|
||||
List *newTargetEntryList = NIL;
|
||||
ListCell *targetEntryCell = NULL;
|
||||
|
@ -1433,7 +1419,7 @@ MasterExtendedOpNode(MultiExtendedOp *originalOpNode,
|
|||
newHavingQual = MasterAggregateMutator(originalHavingQual, walkerContext);
|
||||
}
|
||||
|
||||
masterExtendedOpNode = CitusMakeNode(MultiExtendedOp);
|
||||
MultiExtendedOp *masterExtendedOpNode = CitusMakeNode(MultiExtendedOp);
|
||||
masterExtendedOpNode->targetList = newTargetEntryList;
|
||||
masterExtendedOpNode->groupClauseList = originalOpNode->groupClauseList;
|
||||
masterExtendedOpNode->sortClauseList = originalOpNode->sortClauseList;
|
||||
|
@ -1510,7 +1496,6 @@ MasterAggregateExpression(Aggref *originalAggregate,
|
|||
{
|
||||
AggregateType aggregateType = GetAggregateType(originalAggregate->aggfnoid);
|
||||
Expr *newMasterExpression = NULL;
|
||||
Expr *typeConvertedExpression = NULL;
|
||||
const uint32 masterTableId = 1; /* one table on the master node */
|
||||
const Index columnLevelsUp = 0; /* normal column */
|
||||
const AttrNumber argumentId = 1; /* our aggregates have single arguments */
|
||||
|
@ -1576,9 +1561,6 @@ MasterAggregateExpression(Aggref *originalAggregate,
|
|||
const int argCount = 1;
|
||||
const int defaultTypeMod = -1;
|
||||
|
||||
TargetEntry *hllTargetEntry = NULL;
|
||||
Aggref *unionAggregate = NULL;
|
||||
FuncExpr *cardinalityExpression = NULL;
|
||||
|
||||
/* extract schema name of hll */
|
||||
Oid hllId = get_extension_oid(HLL_EXTENSION_NAME, false);
|
||||
|
@ -1598,9 +1580,10 @@ MasterAggregateExpression(Aggref *originalAggregate,
|
|||
hllTypeCollationId, columnLevelsUp);
|
||||
walkerContext->columnId++;
|
||||
|
||||
hllTargetEntry = makeTargetEntry((Expr *) hllColumn, argumentId, NULL, false);
|
||||
TargetEntry *hllTargetEntry = makeTargetEntry((Expr *) hllColumn, argumentId,
|
||||
NULL, false);
|
||||
|
||||
unionAggregate = makeNode(Aggref);
|
||||
Aggref *unionAggregate = makeNode(Aggref);
|
||||
unionAggregate->aggfnoid = unionFunctionId;
|
||||
unionAggregate->aggtype = hllType;
|
||||
unionAggregate->args = list_make1(hllTargetEntry);
|
||||
|
@ -1610,7 +1593,7 @@ MasterAggregateExpression(Aggref *originalAggregate,
|
|||
unionAggregate->aggargtypes = list_make1_oid(unionAggregate->aggtype);
|
||||
unionAggregate->aggsplit = AGGSPLIT_SIMPLE;
|
||||
|
||||
cardinalityExpression = makeNode(FuncExpr);
|
||||
FuncExpr *cardinalityExpression = makeNode(FuncExpr);
|
||||
cardinalityExpression->funcid = cardinalityFunctionId;
|
||||
cardinalityExpression->funcresulttype = cardinalityReturnType;
|
||||
cardinalityExpression->args = list_make1(unionAggregate);
|
||||
|
@ -1647,12 +1630,6 @@ MasterAggregateExpression(Aggref *originalAggregate,
|
|||
* Count aggregates are handled in two steps. First, worker nodes report
|
||||
* their count results. Then, the master node sums up these results.
|
||||
*/
|
||||
Var *column = NULL;
|
||||
TargetEntry *columnTargetEntry = NULL;
|
||||
CoerceViaIO *coerceExpr = NULL;
|
||||
Const *zeroConst = NULL;
|
||||
List *coalesceArgs = NULL;
|
||||
CoalesceExpr *coalesceExpr = NULL;
|
||||
|
||||
/* worker aggregate and original aggregate have the same return type */
|
||||
Oid workerReturnType = exprType((Node *) originalAggregate);
|
||||
|
@ -1673,16 +1650,17 @@ MasterAggregateExpression(Aggref *originalAggregate,
|
|||
newMasterAggregate->aggargtypes = list_make1_oid(newMasterAggregate->aggtype);
|
||||
newMasterAggregate->aggsplit = AGGSPLIT_SIMPLE;
|
||||
|
||||
column = makeVar(masterTableId, walkerContext->columnId, workerReturnType,
|
||||
workerReturnTypeMod, workerCollationId, columnLevelsUp);
|
||||
Var *column = makeVar(masterTableId, walkerContext->columnId, workerReturnType,
|
||||
workerReturnTypeMod, workerCollationId, columnLevelsUp);
|
||||
walkerContext->columnId++;
|
||||
|
||||
/* aggref expects its arguments to be wrapped in target entries */
|
||||
columnTargetEntry = makeTargetEntry((Expr *) column, argumentId, NULL, false);
|
||||
TargetEntry *columnTargetEntry = makeTargetEntry((Expr *) column, argumentId,
|
||||
NULL, false);
|
||||
newMasterAggregate->args = list_make1(columnTargetEntry);
|
||||
|
||||
/* cast numeric sum result to bigint (count's return type) */
|
||||
coerceExpr = makeNode(CoerceViaIO);
|
||||
CoerceViaIO *coerceExpr = makeNode(CoerceViaIO);
|
||||
coerceExpr->arg = (Expr *) newMasterAggregate;
|
||||
coerceExpr->resulttype = INT8OID;
|
||||
coerceExpr->resultcollid = InvalidOid;
|
||||
|
@ -1690,10 +1668,10 @@ MasterAggregateExpression(Aggref *originalAggregate,
|
|||
coerceExpr->location = -1;
|
||||
|
||||
/* convert NULL to 0 in case of no rows */
|
||||
zeroConst = MakeIntegerConstInt64(0);
|
||||
coalesceArgs = list_make2(coerceExpr, zeroConst);
|
||||
Const *zeroConst = MakeIntegerConstInt64(0);
|
||||
List *coalesceArgs = list_make2(coerceExpr, zeroConst);
|
||||
|
||||
coalesceExpr = makeNode(CoalesceExpr);
|
||||
CoalesceExpr *coalesceExpr = makeNode(CoalesceExpr);
|
||||
coalesceExpr->coalescetype = INT8OID;
|
||||
coalesceExpr->coalescecollid = InvalidOid;
|
||||
coalesceExpr->args = coalesceArgs;
|
||||
|
@ -1713,10 +1691,6 @@ MasterAggregateExpression(Aggref *originalAggregate,
|
|||
* the arrays or jsons on the master and compute the array_cat_agg()
|
||||
* or jsonb_cat_agg() aggregate on them to get the final array or json.
|
||||
*/
|
||||
Var *column = NULL;
|
||||
TargetEntry *catAggArgument = NULL;
|
||||
Aggref *newMasterAggregate = NULL;
|
||||
Oid aggregateFunctionId = InvalidOid;
|
||||
const char *catAggregateName = NULL;
|
||||
Oid catInputType = InvalidOid;
|
||||
|
||||
|
@ -1753,17 +1727,18 @@ MasterAggregateExpression(Aggref *originalAggregate,
|
|||
Assert(catAggregateName != NULL);
|
||||
Assert(catInputType != InvalidOid);
|
||||
|
||||
aggregateFunctionId = AggregateFunctionOid(catAggregateName,
|
||||
catInputType);
|
||||
Oid aggregateFunctionId = AggregateFunctionOid(catAggregateName,
|
||||
catInputType);
|
||||
|
||||
/* create argument for the array_cat_agg() or jsonb_cat_agg() aggregate */
|
||||
column = makeVar(masterTableId, walkerContext->columnId, workerReturnType,
|
||||
workerReturnTypeMod, workerCollationId, columnLevelsUp);
|
||||
catAggArgument = makeTargetEntry((Expr *) column, argumentId, NULL, false);
|
||||
Var *column = makeVar(masterTableId, walkerContext->columnId, workerReturnType,
|
||||
workerReturnTypeMod, workerCollationId, columnLevelsUp);
|
||||
TargetEntry *catAggArgument = makeTargetEntry((Expr *) column, argumentId, NULL,
|
||||
false);
|
||||
walkerContext->columnId++;
|
||||
|
||||
/* construct the master array_cat_agg() or jsonb_cat_agg() expression */
|
||||
newMasterAggregate = copyObject(originalAggregate);
|
||||
Aggref *newMasterAggregate = copyObject(originalAggregate);
|
||||
newMasterAggregate->aggfnoid = aggregateFunctionId;
|
||||
newMasterAggregate->args = list_make1(catAggArgument);
|
||||
newMasterAggregate->aggfilter = NULL;
|
||||
|
@ -1781,8 +1756,6 @@ MasterAggregateExpression(Aggref *originalAggregate,
|
|||
* to apply in the master after running the original aggregate in
|
||||
* workers.
|
||||
*/
|
||||
TargetEntry *hllTargetEntry = NULL;
|
||||
Aggref *unionAggregate = NULL;
|
||||
|
||||
Oid hllType = exprType((Node *) originalAggregate);
|
||||
Oid unionFunctionId = AggregateFunctionOid(HLL_UNION_AGGREGATE_NAME, hllType);
|
||||
|
@ -1793,9 +1766,10 @@ MasterAggregateExpression(Aggref *originalAggregate,
|
|||
hllReturnTypeMod, hllTypeCollationId, columnLevelsUp);
|
||||
walkerContext->columnId++;
|
||||
|
||||
hllTargetEntry = makeTargetEntry((Expr *) hllColumn, argumentId, NULL, false);
|
||||
TargetEntry *hllTargetEntry = makeTargetEntry((Expr *) hllColumn, argumentId,
|
||||
NULL, false);
|
||||
|
||||
unionAggregate = makeNode(Aggref);
|
||||
Aggref *unionAggregate = makeNode(Aggref);
|
||||
unionAggregate->aggfnoid = unionFunctionId;
|
||||
unionAggregate->aggtype = hllType;
|
||||
unionAggregate->args = list_make1(hllTargetEntry);
|
||||
|
@ -1816,8 +1790,6 @@ MasterAggregateExpression(Aggref *originalAggregate,
|
|||
* Then, we gather the Top-Ns on the master and take the union of all
|
||||
* to get the final topn.
|
||||
*/
|
||||
TargetEntry *topNTargetEntry = NULL;
|
||||
Aggref *unionAggregate = NULL;
|
||||
|
||||
/* worker aggregate and original aggregate have same return type */
|
||||
Oid topnType = exprType((Node *) originalAggregate);
|
||||
|
@ -1831,10 +1803,11 @@ MasterAggregateExpression(Aggref *originalAggregate,
|
|||
topnReturnTypeMod, topnTypeCollationId, columnLevelsUp);
|
||||
walkerContext->columnId++;
|
||||
|
||||
topNTargetEntry = makeTargetEntry((Expr *) topnColumn, argumentId, NULL, false);
|
||||
TargetEntry *topNTargetEntry = makeTargetEntry((Expr *) topnColumn, argumentId,
|
||||
NULL, false);
|
||||
|
||||
/* construct the master topn_union_agg() expression */
|
||||
unionAggregate = makeNode(Aggref);
|
||||
Aggref *unionAggregate = makeNode(Aggref);
|
||||
unionAggregate->aggfnoid = unionFunctionId;
|
||||
unionAggregate->aggtype = topnType;
|
||||
unionAggregate->args = list_make1(topNTargetEntry);
|
||||
|
@ -1869,32 +1842,30 @@ MasterAggregateExpression(Aggref *originalAggregate,
|
|||
|
||||
if (combine != InvalidOid)
|
||||
{
|
||||
Const *aggOidParam = NULL;
|
||||
Var *column = NULL;
|
||||
Const *nullTag = NULL;
|
||||
List *aggArguments = NIL;
|
||||
Aggref *newMasterAggregate = NULL;
|
||||
Oid coordCombineId = CoordCombineAggOid();
|
||||
Oid workerReturnType = CSTRINGOID;
|
||||
int32 workerReturnTypeMod = -1;
|
||||
Oid workerCollationId = InvalidOid;
|
||||
Oid resultType = exprType((Node *) originalAggregate);
|
||||
|
||||
aggOidParam = makeConst(OIDOID, -1, InvalidOid, sizeof(Oid),
|
||||
ObjectIdGetDatum(originalAggregate->aggfnoid),
|
||||
false, true);
|
||||
column = makeVar(masterTableId, walkerContext->columnId, workerReturnType,
|
||||
workerReturnTypeMod, workerCollationId, columnLevelsUp);
|
||||
Const *aggOidParam = makeConst(OIDOID, -1, InvalidOid, sizeof(Oid),
|
||||
ObjectIdGetDatum(originalAggregate->aggfnoid),
|
||||
false, true);
|
||||
Var *column = makeVar(masterTableId, walkerContext->columnId,
|
||||
workerReturnType,
|
||||
workerReturnTypeMod, workerCollationId, columnLevelsUp);
|
||||
walkerContext->columnId++;
|
||||
nullTag = makeNullConst(resultType, -1, InvalidOid);
|
||||
Const *nullTag = makeNullConst(resultType, -1, InvalidOid);
|
||||
|
||||
aggArguments = list_make3(makeTargetEntry((Expr *) aggOidParam, 1, NULL,
|
||||
false),
|
||||
makeTargetEntry((Expr *) column, 2, NULL, false),
|
||||
makeTargetEntry((Expr *) nullTag, 3, NULL, false));
|
||||
List *aggArguments = list_make3(makeTargetEntry((Expr *) aggOidParam, 1, NULL,
|
||||
false),
|
||||
makeTargetEntry((Expr *) column, 2, NULL,
|
||||
false),
|
||||
makeTargetEntry((Expr *) nullTag, 3, NULL,
|
||||
false));
|
||||
|
||||
/* coord_combine_agg(agg, workercol) */
|
||||
newMasterAggregate = makeNode(Aggref);
|
||||
Aggref *newMasterAggregate = makeNode(Aggref);
|
||||
newMasterAggregate->aggfnoid = coordCombineId;
|
||||
newMasterAggregate->aggtype = originalAggregate->aggtype;
|
||||
newMasterAggregate->args = aggArguments;
|
||||
|
@ -1918,9 +1889,6 @@ MasterAggregateExpression(Aggref *originalAggregate,
|
|||
* All other aggregates are handled as they are. These include sum, min,
|
||||
* and max.
|
||||
*/
|
||||
Var *column = NULL;
|
||||
TargetEntry *columnTargetEntry = NULL;
|
||||
Aggref *newMasterAggregate = NULL;
|
||||
|
||||
/* worker aggregate and original aggregate have the same return type */
|
||||
Oid workerReturnType = exprType((Node *) originalAggregate);
|
||||
|
@ -1940,18 +1908,19 @@ MasterAggregateExpression(Aggref *originalAggregate,
|
|||
{
|
||||
masterReturnType = workerReturnType;
|
||||
}
|
||||
newMasterAggregate = copyObject(originalAggregate);
|
||||
Aggref *newMasterAggregate = copyObject(originalAggregate);
|
||||
newMasterAggregate->aggdistinct = NULL;
|
||||
newMasterAggregate->aggfnoid = aggregateFunctionId;
|
||||
newMasterAggregate->aggtype = masterReturnType;
|
||||
newMasterAggregate->aggfilter = NULL;
|
||||
|
||||
column = makeVar(masterTableId, walkerContext->columnId, workerReturnType,
|
||||
workerReturnTypeMod, workerCollationId, columnLevelsUp);
|
||||
Var *column = makeVar(masterTableId, walkerContext->columnId, workerReturnType,
|
||||
workerReturnTypeMod, workerCollationId, columnLevelsUp);
|
||||
walkerContext->columnId++;
|
||||
|
||||
/* aggref expects its arguments to be wrapped in target entries */
|
||||
columnTargetEntry = makeTargetEntry((Expr *) column, argumentId, NULL, false);
|
||||
TargetEntry *columnTargetEntry = makeTargetEntry((Expr *) column, argumentId,
|
||||
NULL, false);
|
||||
newMasterAggregate->args = list_make1(columnTargetEntry);
|
||||
|
||||
newMasterExpression = (Expr *) newMasterAggregate;
|
||||
|
@ -1964,8 +1933,8 @@ MasterAggregateExpression(Aggref *originalAggregate,
|
|||
* type as the original aggregate. We need this since functions like sorting
|
||||
* and grouping have already been chosen based on the original type.
|
||||
*/
|
||||
typeConvertedExpression = AddTypeConversion((Node *) originalAggregate,
|
||||
(Node *) newMasterExpression);
|
||||
Expr *typeConvertedExpression = AddTypeConversion((Node *) originalAggregate,
|
||||
(Node *) newMasterExpression);
|
||||
if (typeConvertedExpression != NULL)
|
||||
{
|
||||
newMasterExpression = typeConvertedExpression;
|
||||
|
@ -1999,22 +1968,15 @@ MasterAverageExpression(Oid sumAggregateType, Oid countAggregateType,
|
|||
|
||||
Oid sumTypeCollationId = get_typcollation(sumAggregateType);
|
||||
Oid countTypeCollationId = get_typcollation(countAggregateType);
|
||||
Var *firstColumn = NULL;
|
||||
Var *secondColumn = NULL;
|
||||
TargetEntry *firstTargetEntry = NULL;
|
||||
TargetEntry *secondTargetEntry = NULL;
|
||||
Aggref *firstSum = NULL;
|
||||
Aggref *secondSum = NULL;
|
||||
List *operatorNameList = NIL;
|
||||
Expr *opExpr = NULL;
|
||||
|
||||
/* create the first argument for sum(column1) */
|
||||
firstColumn = makeVar(masterTableId, (*columnId), sumAggregateType,
|
||||
defaultTypeMod, sumTypeCollationId, defaultLevelsUp);
|
||||
firstTargetEntry = makeTargetEntry((Expr *) firstColumn, argumentId, NULL, false);
|
||||
Var *firstColumn = makeVar(masterTableId, (*columnId), sumAggregateType,
|
||||
defaultTypeMod, sumTypeCollationId, defaultLevelsUp);
|
||||
TargetEntry *firstTargetEntry = makeTargetEntry((Expr *) firstColumn, argumentId,
|
||||
NULL, false);
|
||||
(*columnId)++;
|
||||
|
||||
firstSum = makeNode(Aggref);
|
||||
Aggref *firstSum = makeNode(Aggref);
|
||||
firstSum->aggfnoid = AggregateFunctionOid(sumAggregateName, sumAggregateType);
|
||||
firstSum->aggtype = get_func_rettype(firstSum->aggfnoid);
|
||||
firstSum->args = list_make1(firstTargetEntry);
|
||||
|
@ -2024,12 +1986,13 @@ MasterAverageExpression(Oid sumAggregateType, Oid countAggregateType,
|
|||
firstSum->aggsplit = AGGSPLIT_SIMPLE;
|
||||
|
||||
/* create the second argument for sum(column2) */
|
||||
secondColumn = makeVar(masterTableId, (*columnId), countAggregateType,
|
||||
defaultTypeMod, countTypeCollationId, defaultLevelsUp);
|
||||
secondTargetEntry = makeTargetEntry((Expr *) secondColumn, argumentId, NULL, false);
|
||||
Var *secondColumn = makeVar(masterTableId, (*columnId), countAggregateType,
|
||||
defaultTypeMod, countTypeCollationId, defaultLevelsUp);
|
||||
TargetEntry *secondTargetEntry = makeTargetEntry((Expr *) secondColumn, argumentId,
|
||||
NULL, false);
|
||||
(*columnId)++;
|
||||
|
||||
secondSum = makeNode(Aggref);
|
||||
Aggref *secondSum = makeNode(Aggref);
|
||||
secondSum->aggfnoid = AggregateFunctionOid(sumAggregateName, countAggregateType);
|
||||
secondSum->aggtype = get_func_rettype(secondSum->aggfnoid);
|
||||
secondSum->args = list_make1(secondTargetEntry);
|
||||
|
@ -2042,9 +2005,10 @@ MasterAverageExpression(Oid sumAggregateType, Oid countAggregateType,
|
|||
* Build the division operator between these two aggregates. This function
|
||||
* will convert the types of the aggregates if necessary.
|
||||
*/
|
||||
operatorNameList = list_make1(makeString(DIVISION_OPER_NAME));
|
||||
opExpr = make_op(NULL, operatorNameList, (Node *) firstSum, (Node *) secondSum, NULL,
|
||||
-1);
|
||||
List *operatorNameList = list_make1(makeString(DIVISION_OPER_NAME));
|
||||
Expr *opExpr = make_op(NULL, operatorNameList, (Node *) firstSum, (Node *) secondSum,
|
||||
NULL,
|
||||
-1);
|
||||
|
||||
return opExpr;
|
||||
}
|
||||
|
@ -2061,7 +2025,6 @@ AddTypeConversion(Node *originalAggregate, Node *newExpression)
|
|||
Oid newTypeId = exprType(newExpression);
|
||||
Oid originalTypeId = exprType(originalAggregate);
|
||||
int32 originalTypeMod = exprTypmod(originalAggregate);
|
||||
Node *typeConvertedExpression = NULL;
|
||||
|
||||
/* nothing to do if the two types are the same */
|
||||
if (originalTypeId == newTypeId)
|
||||
|
@ -2070,10 +2033,10 @@ AddTypeConversion(Node *originalAggregate, Node *newExpression)
|
|||
}
|
||||
|
||||
/* otherwise, add a type conversion function */
|
||||
typeConvertedExpression = coerce_to_target_type(NULL, newExpression, newTypeId,
|
||||
originalTypeId, originalTypeMod,
|
||||
COERCION_EXPLICIT,
|
||||
COERCE_EXPLICIT_CAST, -1);
|
||||
Node *typeConvertedExpression = coerce_to_target_type(NULL, newExpression, newTypeId,
|
||||
originalTypeId, originalTypeMod,
|
||||
COERCION_EXPLICIT,
|
||||
COERCE_EXPLICIT_CAST, -1);
|
||||
Assert(typeConvertedExpression != NULL);
|
||||
return (Expr *) typeConvertedExpression;
|
||||
}
|
||||
|
@ -2090,10 +2053,7 @@ static MultiExtendedOp *
|
|||
WorkerExtendedOpNode(MultiExtendedOp *originalOpNode,
|
||||
ExtendedOpNodeProperties *extendedOpNodeProperties)
|
||||
{
|
||||
MultiExtendedOp *workerExtendedOpNode = NULL;
|
||||
Index nextSortGroupRefIndex = 0;
|
||||
bool distinctPreventsLimitPushdown = false;
|
||||
bool groupByExtended = false;
|
||||
bool groupedByDisjointPartitionColumn =
|
||||
extendedOpNodeProperties->groupedByDisjointPartitionColumn;
|
||||
|
||||
|
@ -2125,7 +2085,7 @@ WorkerExtendedOpNode(MultiExtendedOp *originalOpNode,
|
|||
memset(&queryOrderByLimit, 0, sizeof(queryGroupClause));
|
||||
|
||||
/* calculate the next sort group index based on the original target list */
|
||||
nextSortGroupRefIndex = GetNextSortGroupRef(originalTargetEntryList);
|
||||
Index nextSortGroupRefIndex = GetNextSortGroupRef(originalTargetEntryList);
|
||||
|
||||
/* targetProjectionNumber starts from 1 */
|
||||
queryTargetList.targetProjectionNumber = 1;
|
||||
|
@ -2167,7 +2127,7 @@ WorkerExtendedOpNode(MultiExtendedOp *originalOpNode,
|
|||
* (1) Creating a new group by clause during aggregate mutation, or
|
||||
* (2) Distinct clause is not pushed down
|
||||
*/
|
||||
groupByExtended =
|
||||
bool groupByExtended =
|
||||
list_length(queryGroupClause.groupClauseList) > originalGroupClauseLength;
|
||||
if (!groupByExtended && !distinctPreventsLimitPushdown)
|
||||
{
|
||||
|
@ -2188,7 +2148,7 @@ WorkerExtendedOpNode(MultiExtendedOp *originalOpNode,
|
|||
}
|
||||
|
||||
/* finally, fill the extended op node with the data we gathered */
|
||||
workerExtendedOpNode = CitusMakeNode(MultiExtendedOp);
|
||||
MultiExtendedOp *workerExtendedOpNode = CitusMakeNode(MultiExtendedOp);
|
||||
|
||||
workerExtendedOpNode->targetList = queryTargetList.targetEntryList;
|
||||
workerExtendedOpNode->groupClauseList = queryGroupClause.groupClauseList;
|
||||
|
@ -2303,9 +2263,7 @@ ProcessHavingClauseForWorkerQuery(Node *originalHavingQual,
|
|||
QueryTargetList *queryTargetList,
|
||||
QueryGroupClause *queryGroupClause)
|
||||
{
|
||||
List *newExpressionList = NIL;
|
||||
TargetEntry *targetEntry = NULL;
|
||||
WorkerAggregateWalkerContext *workerAggContext = NULL;
|
||||
|
||||
if (originalHavingQual == NULL)
|
||||
{
|
||||
|
@ -2314,13 +2272,14 @@ ProcessHavingClauseForWorkerQuery(Node *originalHavingQual,
|
|||
|
||||
*workerHavingQual = NULL;
|
||||
|
||||
workerAggContext = palloc0(sizeof(WorkerAggregateWalkerContext));
|
||||
WorkerAggregateWalkerContext *workerAggContext = palloc0(
|
||||
sizeof(WorkerAggregateWalkerContext));
|
||||
workerAggContext->expressionList = NIL;
|
||||
workerAggContext->pullDistinctColumns = extendedOpNodeProperties->pullDistinctColumns;
|
||||
workerAggContext->createGroupByClause = false;
|
||||
|
||||
WorkerAggregateWalker(originalHavingQual, workerAggContext);
|
||||
newExpressionList = workerAggContext->expressionList;
|
||||
List *newExpressionList = workerAggContext->expressionList;
|
||||
|
||||
ExpandWorkerTargetEntry(newExpressionList, targetEntry,
|
||||
workerAggContext->createGroupByClause,
|
||||
|
@ -2385,7 +2344,6 @@ ProcessDistinctClauseForWorkerQuery(List *distinctClause, bool hasDistinctOn,
|
|||
bool *distinctPreventsLimitPushdown)
|
||||
{
|
||||
bool distinctClauseSupersetofGroupClause = false;
|
||||
bool shouldPushdownDistinct = false;
|
||||
|
||||
if (distinctClause == NIL)
|
||||
{
|
||||
|
@ -2419,8 +2377,8 @@ ProcessDistinctClauseForWorkerQuery(List *distinctClause, bool hasDistinctOn,
|
|||
* distinct pushdown if distinct clause is missing some entries that
|
||||
* group by clause has.
|
||||
*/
|
||||
shouldPushdownDistinct = !queryHasAggregates &&
|
||||
distinctClauseSupersetofGroupClause;
|
||||
bool shouldPushdownDistinct = !queryHasAggregates &&
|
||||
distinctClauseSupersetofGroupClause;
|
||||
if (shouldPushdownDistinct)
|
||||
{
|
||||
queryDistinctClause->workerDistinctClause = distinctClause;
|
||||
|
@ -2524,8 +2482,6 @@ ProcessLimitOrderByForWorkerQuery(OrderByLimitReference orderByLimitReference,
|
|||
QueryOrderByLimit *queryOrderByLimit,
|
||||
QueryTargetList *queryTargetList)
|
||||
{
|
||||
List *newTargetEntryListForSortClauses = NIL;
|
||||
|
||||
queryOrderByLimit->workerLimitCount =
|
||||
WorkerLimitCount(originalLimitCount, limitOffset, orderByLimitReference);
|
||||
|
||||
|
@ -2539,7 +2495,7 @@ ProcessLimitOrderByForWorkerQuery(OrderByLimitReference orderByLimitReference,
|
|||
* TODO: Do we really need to add the target entries if we're not pushing
|
||||
* down ORDER BY?
|
||||
*/
|
||||
newTargetEntryListForSortClauses =
|
||||
List *newTargetEntryListForSortClauses =
|
||||
GenerateNewTargetEntriesForSortClauses(originalTargetList,
|
||||
queryOrderByLimit->workerSortClauseList,
|
||||
&(queryTargetList->targetProjectionNumber),
|
||||
|
@ -2634,10 +2590,9 @@ ExpandWorkerTargetEntry(List *expressionList, TargetEntry *originalTargetEntry,
|
|||
foreach(newExpressionCell, expressionList)
|
||||
{
|
||||
Expr *newExpression = (Expr *) lfirst(newExpressionCell);
|
||||
TargetEntry *newTargetEntry = NULL;
|
||||
|
||||
/* generate and add the new target entry to the target list */
|
||||
newTargetEntry =
|
||||
TargetEntry *newTargetEntry =
|
||||
GenerateWorkerTargetEntry(originalTargetEntry, newExpression,
|
||||
queryTargetList->targetProjectionNumber);
|
||||
(queryTargetList->targetProjectionNumber)++;
|
||||
|
@ -2749,14 +2704,12 @@ AppendTargetEntryToGroupClause(TargetEntry *targetEntry,
|
|||
QueryGroupClause *queryGroupClause)
|
||||
{
|
||||
Expr *targetExpr PG_USED_FOR_ASSERTS_ONLY = targetEntry->expr;
|
||||
Var *targetColumn = NULL;
|
||||
SortGroupClause *groupByClause = NULL;
|
||||
|
||||
/* we currently only support appending Var target entries */
|
||||
AssertArg(IsA(targetExpr, Var));
|
||||
|
||||
targetColumn = (Var *) targetEntry->expr;
|
||||
groupByClause = CreateSortGroupClause(targetColumn);
|
||||
Var *targetColumn = (Var *) targetEntry->expr;
|
||||
SortGroupClause *groupByClause = CreateSortGroupClause(targetColumn);
|
||||
|
||||
/* the target entry should have an index */
|
||||
targetEntry->ressortgroupref = *queryGroupClause->nextSortGroupRefIndex;
|
||||
|
@ -2854,10 +2807,6 @@ WorkerAggregateExpressionList(Aggref *originalAggregate,
|
|||
const int hashArgumentCount = 2;
|
||||
const int addArgumentCount = 2;
|
||||
|
||||
TargetEntry *hashedColumnArgument = NULL;
|
||||
TargetEntry *storageSizeArgument = NULL;
|
||||
List *addAggregateArgumentList = NIL;
|
||||
Aggref *addAggregateFunction = NULL;
|
||||
|
||||
/* init hll_hash() related variables */
|
||||
Oid argumentType = AggregateArgumentType(originalAggregate);
|
||||
|
@ -2888,13 +2837,14 @@ WorkerAggregateExpressionList(Aggref *originalAggregate,
|
|||
hashFunction->args = list_make1(argumentExpression);
|
||||
|
||||
/* construct hll_add_agg() expression */
|
||||
hashedColumnArgument = makeTargetEntry((Expr *) hashFunction,
|
||||
firstArgumentId, NULL, false);
|
||||
storageSizeArgument = makeTargetEntry((Expr *) logOfStorageSizeConst,
|
||||
secondArgumentId, NULL, false);
|
||||
addAggregateArgumentList = list_make2(hashedColumnArgument, storageSizeArgument);
|
||||
TargetEntry *hashedColumnArgument = makeTargetEntry((Expr *) hashFunction,
|
||||
firstArgumentId, NULL, false);
|
||||
TargetEntry *storageSizeArgument = makeTargetEntry((Expr *) logOfStorageSizeConst,
|
||||
secondArgumentId, NULL, false);
|
||||
List *addAggregateArgumentList = list_make2(hashedColumnArgument,
|
||||
storageSizeArgument);
|
||||
|
||||
addAggregateFunction = makeNode(Aggref);
|
||||
Aggref *addAggregateFunction = makeNode(Aggref);
|
||||
addAggregateFunction->aggfnoid = addFunctionId;
|
||||
addAggregateFunction->aggtype = hllType;
|
||||
addAggregateFunction->args = addAggregateArgumentList;
|
||||
|
@ -2964,17 +2914,15 @@ WorkerAggregateExpressionList(Aggref *originalAggregate,
|
|||
|
||||
if (combine != InvalidOid)
|
||||
{
|
||||
Const *aggOidParam = NULL;
|
||||
Aggref *newWorkerAggregate = NULL;
|
||||
List *aggArguments = NIL;
|
||||
ListCell *originalAggArgCell;
|
||||
Oid workerPartialId = WorkerPartialAggOid();
|
||||
|
||||
aggOidParam = makeConst(REGPROCEDUREOID, -1, InvalidOid, sizeof(Oid),
|
||||
ObjectIdGetDatum(originalAggregate->aggfnoid), false,
|
||||
true);
|
||||
aggArguments = list_make1(makeTargetEntry((Expr *) aggOidParam, 1, NULL,
|
||||
false));
|
||||
Const *aggOidParam = makeConst(REGPROCEDUREOID, -1, InvalidOid, sizeof(Oid),
|
||||
ObjectIdGetDatum(originalAggregate->aggfnoid),
|
||||
false,
|
||||
true);
|
||||
List *aggArguments = list_make1(makeTargetEntry((Expr *) aggOidParam, 1, NULL,
|
||||
false));
|
||||
foreach(originalAggArgCell, originalAggregate->args)
|
||||
{
|
||||
TargetEntry *arg = lfirst(originalAggArgCell);
|
||||
|
@ -2984,7 +2932,7 @@ WorkerAggregateExpressionList(Aggref *originalAggregate,
|
|||
}
|
||||
|
||||
/* worker_partial_agg(agg, ...args) */
|
||||
newWorkerAggregate = makeNode(Aggref);
|
||||
Aggref *newWorkerAggregate = makeNode(Aggref);
|
||||
newWorkerAggregate->aggfnoid = workerPartialId;
|
||||
newWorkerAggregate->aggtype = CSTRINGOID;
|
||||
newWorkerAggregate->args = aggArguments;
|
||||
|
@ -3030,44 +2978,33 @@ WorkerAggregateExpressionList(Aggref *originalAggregate,
|
|||
static AggregateType
|
||||
GetAggregateType(Oid aggFunctionId)
|
||||
{
|
||||
char *aggregateProcName = NULL;
|
||||
uint32 aggregateCount = 0;
|
||||
uint32 aggregateIndex = 0;
|
||||
bool found = false;
|
||||
|
||||
/* look up the function name */
|
||||
aggregateProcName = get_func_name(aggFunctionId);
|
||||
char *aggregateProcName = get_func_name(aggFunctionId);
|
||||
if (aggregateProcName == NULL)
|
||||
{
|
||||
ereport(ERROR, (errmsg("citus cache lookup failed for function %u",
|
||||
aggFunctionId)));
|
||||
}
|
||||
|
||||
aggregateCount = lengthof(AggregateNames);
|
||||
uint32 aggregateCount = lengthof(AggregateNames);
|
||||
|
||||
Assert(AGGREGATE_INVALID_FIRST == 0);
|
||||
|
||||
for (aggregateIndex = 1; aggregateIndex < aggregateCount; aggregateIndex++)
|
||||
for (uint32 aggregateIndex = 1; aggregateIndex < aggregateCount; aggregateIndex++)
|
||||
{
|
||||
const char *aggregateName = AggregateNames[aggregateIndex];
|
||||
if (strncmp(aggregateName, aggregateProcName, NAMEDATALEN) == 0)
|
||||
{
|
||||
found = true;
|
||||
break;
|
||||
return aggregateIndex;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found)
|
||||
if (AggregateEnabledCustom(aggFunctionId))
|
||||
{
|
||||
if (AggregateEnabledCustom(aggFunctionId))
|
||||
{
|
||||
return AGGREGATE_CUSTOM;
|
||||
}
|
||||
|
||||
ereport(ERROR, (errmsg("unsupported aggregate function %s", aggregateProcName)));
|
||||
return AGGREGATE_CUSTOM;
|
||||
}
|
||||
|
||||
return aggregateIndex;
|
||||
ereport(ERROR, (errmsg("unsupported aggregate function %s", aggregateProcName)));
|
||||
}
|
||||
|
||||
|
||||
|
@ -3093,18 +3030,12 @@ AggregateArgumentType(Aggref *aggregate)
|
|||
static bool
|
||||
AggregateEnabledCustom(Oid aggregateOid)
|
||||
{
|
||||
HeapTuple aggTuple;
|
||||
Form_pg_aggregate aggform;
|
||||
HeapTuple typeTuple;
|
||||
Form_pg_type typeform;
|
||||
bool supportsSafeCombine;
|
||||
|
||||
aggTuple = SearchSysCache1(AGGFNOID, aggregateOid);
|
||||
HeapTuple aggTuple = SearchSysCache1(AGGFNOID, aggregateOid);
|
||||
if (!HeapTupleIsValid(aggTuple))
|
||||
{
|
||||
elog(ERROR, "citus cache lookup failed.");
|
||||
}
|
||||
aggform = (Form_pg_aggregate) GETSTRUCT(aggTuple);
|
||||
Form_pg_aggregate aggform = (Form_pg_aggregate) GETSTRUCT(aggTuple);
|
||||
|
||||
if (aggform->aggcombinefn == InvalidOid)
|
||||
{
|
||||
|
@ -3112,14 +3043,14 @@ AggregateEnabledCustom(Oid aggregateOid)
|
|||
return false;
|
||||
}
|
||||
|
||||
typeTuple = SearchSysCache1(TYPEOID, aggform->aggtranstype);
|
||||
HeapTuple typeTuple = SearchSysCache1(TYPEOID, aggform->aggtranstype);
|
||||
if (!HeapTupleIsValid(typeTuple))
|
||||
{
|
||||
elog(ERROR, "citus cache lookup failed.");
|
||||
}
|
||||
typeform = (Form_pg_type) GETSTRUCT(typeTuple);
|
||||
Form_pg_type typeform = (Form_pg_type) GETSTRUCT(typeTuple);
|
||||
|
||||
supportsSafeCombine = typeform->typtype != TYPTYPE_PSEUDO;
|
||||
bool supportsSafeCombine = typeform->typtype != TYPTYPE_PSEUDO;
|
||||
|
||||
ReleaseSysCache(aggTuple);
|
||||
ReleaseSysCache(typeTuple);
|
||||
|
@ -3137,23 +3068,20 @@ static Oid
|
|||
AggregateFunctionOid(const char *functionName, Oid inputType)
|
||||
{
|
||||
Oid functionOid = InvalidOid;
|
||||
Relation procRelation = NULL;
|
||||
SysScanDesc scanDescriptor = NULL;
|
||||
ScanKeyData scanKey[1];
|
||||
int scanKeyCount = 1;
|
||||
HeapTuple heapTuple = NULL;
|
||||
|
||||
procRelation = heap_open(ProcedureRelationId, AccessShareLock);
|
||||
Relation procRelation = heap_open(ProcedureRelationId, AccessShareLock);
|
||||
|
||||
ScanKeyInit(&scanKey[0], Anum_pg_proc_proname,
|
||||
BTEqualStrategyNumber, F_NAMEEQ, CStringGetDatum(functionName));
|
||||
|
||||
scanDescriptor = systable_beginscan(procRelation,
|
||||
ProcedureNameArgsNspIndexId, true,
|
||||
NULL, scanKeyCount, scanKey);
|
||||
SysScanDesc scanDescriptor = systable_beginscan(procRelation,
|
||||
ProcedureNameArgsNspIndexId, true,
|
||||
NULL, scanKeyCount, scanKey);
|
||||
|
||||
/* loop until we find the right function */
|
||||
heapTuple = systable_getnext(scanDescriptor);
|
||||
HeapTuple heapTuple = systable_getnext(scanDescriptor);
|
||||
while (HeapTupleIsValid(heapTuple))
|
||||
{
|
||||
Form_pg_proc procForm = (Form_pg_proc) GETSTRUCT(heapTuple);
|
||||
|
@ -3253,11 +3181,9 @@ CoordCombineAggOid()
|
|||
static Oid
|
||||
TypeOid(Oid schemaId, const char *typeName)
|
||||
{
|
||||
Oid typeOid;
|
||||
|
||||
typeOid = GetSysCacheOid2Compat(TYPENAMENSP, Anum_pg_type_oid,
|
||||
PointerGetDatum(typeName),
|
||||
ObjectIdGetDatum(schemaId));
|
||||
Oid typeOid = GetSysCacheOid2Compat(TYPENAMENSP, Anum_pg_type_oid,
|
||||
PointerGetDatum(typeName),
|
||||
ObjectIdGetDatum(schemaId));
|
||||
|
||||
return typeOid;
|
||||
}
|
||||
|
@ -3410,8 +3336,6 @@ ErrorIfContainsUnsupportedAggregate(MultiNode *logicalPlanNode)
|
|||
foreach(expressionCell, expressionList)
|
||||
{
|
||||
Node *expression = (Node *) lfirst(expressionCell);
|
||||
Aggref *aggregateExpression = NULL;
|
||||
AggregateType aggregateType = AGGREGATE_INVALID_FIRST;
|
||||
|
||||
/* only consider aggregate expressions */
|
||||
if (!IsA(expression, Aggref))
|
||||
|
@ -3420,8 +3344,8 @@ ErrorIfContainsUnsupportedAggregate(MultiNode *logicalPlanNode)
|
|||
}
|
||||
|
||||
/* GetAggregateType errors out on unsupported aggregate types */
|
||||
aggregateExpression = (Aggref *) expression;
|
||||
aggregateType = GetAggregateType(aggregateExpression->aggfnoid);
|
||||
Aggref *aggregateExpression = (Aggref *) expression;
|
||||
AggregateType aggregateType = GetAggregateType(aggregateExpression->aggfnoid);
|
||||
Assert(aggregateType != AGGREGATE_INVALID_FIRST);
|
||||
|
||||
/*
|
||||
|
@ -3514,11 +3438,6 @@ ErrorIfUnsupportedAggregateDistinct(Aggref *aggregateExpression,
|
|||
{
|
||||
char *errorDetail = NULL;
|
||||
bool distinctSupported = true;
|
||||
List *repartitionNodeList = NIL;
|
||||
Var *distinctColumn = NULL;
|
||||
List *tableNodeList = NIL;
|
||||
List *extendedOpNodeList = NIL;
|
||||
MultiExtendedOp *extendedOpNode = NULL;
|
||||
|
||||
AggregateType aggregateType = GetAggregateType(aggregateExpression->aggfnoid);
|
||||
|
||||
|
@ -3588,18 +3507,18 @@ ErrorIfUnsupportedAggregateDistinct(Aggref *aggregateExpression,
|
|||
}
|
||||
}
|
||||
|
||||
repartitionNodeList = FindNodesOfType(logicalPlanNode, T_MultiPartition);
|
||||
List *repartitionNodeList = FindNodesOfType(logicalPlanNode, T_MultiPartition);
|
||||
if (repartitionNodeList != NIL)
|
||||
{
|
||||
distinctSupported = false;
|
||||
errorDetail = "aggregate (distinct) with table repartitioning is unsupported";
|
||||
}
|
||||
|
||||
tableNodeList = FindNodesOfType(logicalPlanNode, T_MultiTable);
|
||||
extendedOpNodeList = FindNodesOfType(logicalPlanNode, T_MultiExtendedOp);
|
||||
extendedOpNode = (MultiExtendedOp *) linitial(extendedOpNodeList);
|
||||
List *tableNodeList = FindNodesOfType(logicalPlanNode, T_MultiTable);
|
||||
List *extendedOpNodeList = FindNodesOfType(logicalPlanNode, T_MultiExtendedOp);
|
||||
MultiExtendedOp *extendedOpNode = (MultiExtendedOp *) linitial(extendedOpNodeList);
|
||||
|
||||
distinctColumn = AggregateDistinctColumn(aggregateExpression);
|
||||
Var *distinctColumn = AggregateDistinctColumn(aggregateExpression);
|
||||
if (distinctSupported)
|
||||
{
|
||||
if (distinctColumn == NULL)
|
||||
|
@ -3664,29 +3583,26 @@ ErrorIfUnsupportedAggregateDistinct(Aggref *aggregateExpression,
|
|||
static Var *
|
||||
AggregateDistinctColumn(Aggref *aggregateExpression)
|
||||
{
|
||||
Var *aggregateColumn = NULL;
|
||||
int aggregateArgumentCount = 0;
|
||||
TargetEntry *aggregateTargetEntry = NULL;
|
||||
|
||||
/* only consider aggregates with distincts */
|
||||
if (!aggregateExpression->aggdistinct)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
aggregateArgumentCount = list_length(aggregateExpression->args);
|
||||
int aggregateArgumentCount = list_length(aggregateExpression->args);
|
||||
if (aggregateArgumentCount != 1)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
aggregateTargetEntry = (TargetEntry *) linitial(aggregateExpression->args);
|
||||
TargetEntry *aggregateTargetEntry = (TargetEntry *) linitial(
|
||||
aggregateExpression->args);
|
||||
if (!IsA(aggregateTargetEntry->expr, Var))
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
aggregateColumn = (Var *) aggregateTargetEntry->expr;
|
||||
Var *aggregateColumn = (Var *) aggregateTargetEntry->expr;
|
||||
return aggregateColumn;
|
||||
}
|
||||
|
||||
|
@ -3710,8 +3626,6 @@ TablePartitioningSupportsDistinct(List *tableNodeList, MultiExtendedOp *opNode,
|
|||
MultiTable *tableNode = (MultiTable *) lfirst(tableNodeCell);
|
||||
Oid relationId = tableNode->relationId;
|
||||
bool tableDistinctSupported = false;
|
||||
char partitionMethod = 0;
|
||||
List *shardList = NIL;
|
||||
|
||||
if (relationId == SUBQUERY_RELATION_ID ||
|
||||
relationId == SUBQUERY_PUSHDOWN_RELATION_ID)
|
||||
|
@ -3720,7 +3634,7 @@ TablePartitioningSupportsDistinct(List *tableNodeList, MultiExtendedOp *opNode,
|
|||
}
|
||||
|
||||
/* if table has one shard, task results don't overlap */
|
||||
shardList = LoadShardList(relationId);
|
||||
List *shardList = LoadShardList(relationId);
|
||||
if (list_length(shardList) == 1)
|
||||
{
|
||||
continue;
|
||||
|
@ -3730,13 +3644,12 @@ TablePartitioningSupportsDistinct(List *tableNodeList, MultiExtendedOp *opNode,
|
|||
* We need to check that task results don't overlap. We can only do this
|
||||
* if table is range partitioned.
|
||||
*/
|
||||
partitionMethod = PartitionMethod(relationId);
|
||||
char partitionMethod = PartitionMethod(relationId);
|
||||
|
||||
if (partitionMethod == DISTRIBUTE_BY_RANGE ||
|
||||
partitionMethod == DISTRIBUTE_BY_HASH)
|
||||
{
|
||||
Var *tablePartitionColumn = tableNode->partitionColumn;
|
||||
bool groupedByPartitionColumn = false;
|
||||
|
||||
if (aggregateType == AGGREGATE_COUNT)
|
||||
{
|
||||
|
@ -3752,9 +3665,9 @@ TablePartitioningSupportsDistinct(List *tableNodeList, MultiExtendedOp *opNode,
|
|||
}
|
||||
|
||||
/* if results are grouped by partition column, we can push down */
|
||||
groupedByPartitionColumn = GroupedByColumn(opNode->groupClauseList,
|
||||
opNode->targetList,
|
||||
tablePartitionColumn);
|
||||
bool groupedByPartitionColumn = GroupedByColumn(opNode->groupClauseList,
|
||||
opNode->targetList,
|
||||
tablePartitionColumn);
|
||||
if (groupedByPartitionColumn)
|
||||
{
|
||||
tableDistinctSupported = true;
|
||||
|
@ -3901,8 +3814,6 @@ FindReferencedTableColumn(Expr *columnExpression, List *parentQueryList, Query *
|
|||
{
|
||||
Var *candidateColumn = NULL;
|
||||
List *rangetableList = query->rtable;
|
||||
Index rangeTableEntryIndex = 0;
|
||||
RangeTblEntry *rangeTableEntry = NULL;
|
||||
Expr *strippedColumnExpression = (Expr *) strip_implicit_coercions(
|
||||
(Node *) columnExpression);
|
||||
|
||||
|
@ -3940,8 +3851,8 @@ FindReferencedTableColumn(Expr *columnExpression, List *parentQueryList, Query *
|
|||
return;
|
||||
}
|
||||
|
||||
rangeTableEntryIndex = candidateColumn->varno - 1;
|
||||
rangeTableEntry = list_nth(rangetableList, rangeTableEntryIndex);
|
||||
Index rangeTableEntryIndex = candidateColumn->varno - 1;
|
||||
RangeTblEntry *rangeTableEntry = list_nth(rangetableList, rangeTableEntryIndex);
|
||||
|
||||
if (rangeTableEntry->rtekind == RTE_RELATION)
|
||||
{
|
||||
|
@ -4402,7 +4313,6 @@ HasOrderByComplexExpression(List *sortClauseList, List *targetList)
|
|||
{
|
||||
SortGroupClause *sortClause = (SortGroupClause *) lfirst(sortClauseCell);
|
||||
Node *sortExpression = get_sortgroupclause_expr(sortClause, targetList);
|
||||
bool nestedAggregate = false;
|
||||
|
||||
/* simple aggregate functions are ok */
|
||||
if (IsA(sortExpression, Aggref))
|
||||
|
@ -4410,7 +4320,7 @@ HasOrderByComplexExpression(List *sortClauseList, List *targetList)
|
|||
continue;
|
||||
}
|
||||
|
||||
nestedAggregate = contain_agg_clause(sortExpression);
|
||||
bool nestedAggregate = contain_agg_clause(sortExpression);
|
||||
if (nestedAggregate)
|
||||
{
|
||||
hasOrderByComplexExpression = true;
|
||||
|
@ -4430,20 +4340,17 @@ static bool
|
|||
HasOrderByHllType(List *sortClauseList, List *targetList)
|
||||
{
|
||||
bool hasOrderByHllType = false;
|
||||
Oid hllId = InvalidOid;
|
||||
Oid hllSchemaOid = InvalidOid;
|
||||
Oid hllTypeId = InvalidOid;
|
||||
ListCell *sortClauseCell = NULL;
|
||||
|
||||
/* check whether HLL is loaded */
|
||||
hllId = get_extension_oid(HLL_EXTENSION_NAME, true);
|
||||
Oid hllId = get_extension_oid(HLL_EXTENSION_NAME, true);
|
||||
if (!OidIsValid(hllId))
|
||||
{
|
||||
return hasOrderByHllType;
|
||||
}
|
||||
|
||||
hllSchemaOid = get_extension_schema(hllId);
|
||||
hllTypeId = TypeOid(hllSchemaOid, HLL_TYPE_NAME);
|
||||
Oid hllSchemaOid = get_extension_schema(hllId);
|
||||
Oid hllTypeId = TypeOid(hllSchemaOid, HLL_TYPE_NAME);
|
||||
|
||||
foreach(sortClauseCell, sortClauseList)
|
||||
{
|
||||
|
|
|
@ -134,7 +134,6 @@ MultiLogicalPlanCreate(Query *originalQuery, Query *queryTree,
|
|||
PlannerRestrictionContext *plannerRestrictionContext)
|
||||
{
|
||||
MultiNode *multiQueryNode = NULL;
|
||||
MultiTreeRoot *rootNode = NULL;
|
||||
|
||||
|
||||
if (ShouldUseSubqueryPushDown(originalQuery, queryTree, plannerRestrictionContext))
|
||||
|
@ -148,7 +147,7 @@ MultiLogicalPlanCreate(Query *originalQuery, Query *queryTree,
|
|||
}
|
||||
|
||||
/* add a root node to serve as the permanent handle to the tree */
|
||||
rootNode = CitusMakeNode(MultiTreeRoot);
|
||||
MultiTreeRoot *rootNode = CitusMakeNode(MultiTreeRoot);
|
||||
SetChild((MultiUnaryNode *) rootNode, multiQueryNode);
|
||||
|
||||
return rootNode;
|
||||
|
@ -206,9 +205,7 @@ bool
|
|||
SingleRelationRepartitionSubquery(Query *queryTree)
|
||||
{
|
||||
List *rangeTableIndexList = NULL;
|
||||
RangeTblEntry *rangeTableEntry = NULL;
|
||||
List *rangeTableList = queryTree->rtable;
|
||||
int rangeTableIndex = 0;
|
||||
|
||||
/* we don't support subqueries in WHERE */
|
||||
if (queryTree->hasSubLinks)
|
||||
|
@ -234,8 +231,8 @@ SingleRelationRepartitionSubquery(Query *queryTree)
|
|||
return false;
|
||||
}
|
||||
|
||||
rangeTableIndex = linitial_int(rangeTableIndexList);
|
||||
rangeTableEntry = rt_fetch(rangeTableIndex, rangeTableList);
|
||||
int rangeTableIndex = linitial_int(rangeTableIndexList);
|
||||
RangeTblEntry *rangeTableEntry = rt_fetch(rangeTableIndex, rangeTableList);
|
||||
if (rangeTableEntry->rtekind == RTE_RELATION)
|
||||
{
|
||||
return true;
|
||||
|
@ -413,9 +410,6 @@ QueryContainsDistributedTableRTE(Query *query)
|
|||
bool
|
||||
IsDistributedTableRTE(Node *node)
|
||||
{
|
||||
RangeTblEntry *rangeTableEntry = NULL;
|
||||
Oid relationId = InvalidOid;
|
||||
|
||||
if (node == NULL)
|
||||
{
|
||||
return false;
|
||||
|
@ -426,13 +420,13 @@ IsDistributedTableRTE(Node *node)
|
|||
return false;
|
||||
}
|
||||
|
||||
rangeTableEntry = (RangeTblEntry *) node;
|
||||
RangeTblEntry *rangeTableEntry = (RangeTblEntry *) node;
|
||||
if (rangeTableEntry->rtekind != RTE_RELATION)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
relationId = rangeTableEntry->relid;
|
||||
Oid relationId = rangeTableEntry->relid;
|
||||
if (!IsDistributedTable(relationId) ||
|
||||
PartitionMethod(relationId) == DISTRIBUTE_BY_NONE)
|
||||
{
|
||||
|
@ -453,7 +447,6 @@ FullCompositeFieldList(List *compositeFieldList)
|
|||
bool fullCompositeFieldList = true;
|
||||
bool *compositeFieldArray = NULL;
|
||||
uint32 compositeFieldCount = 0;
|
||||
uint32 fieldIndex = 0;
|
||||
|
||||
ListCell *fieldSelectCell = NULL;
|
||||
foreach(fieldSelectCell, compositeFieldList)
|
||||
|
@ -490,7 +483,7 @@ FullCompositeFieldList(List *compositeFieldList)
|
|||
compositeFieldArray[compositeFieldIndex] = true;
|
||||
}
|
||||
|
||||
for (fieldIndex = 0; fieldIndex < compositeFieldCount; fieldIndex++)
|
||||
for (uint32 fieldIndex = 0; fieldIndex < compositeFieldCount; fieldIndex++)
|
||||
{
|
||||
if (!compositeFieldArray[fieldIndex])
|
||||
{
|
||||
|
@ -523,8 +516,6 @@ CompositeFieldRecursive(Expr *expression, Query *query)
|
|||
{
|
||||
FieldSelect *compositeField = NULL;
|
||||
List *rangetableList = query->rtable;
|
||||
Index rangeTableEntryIndex = 0;
|
||||
RangeTblEntry *rangeTableEntry = NULL;
|
||||
Var *candidateColumn = NULL;
|
||||
|
||||
if (IsA(expression, FieldSelect))
|
||||
|
@ -542,8 +533,8 @@ CompositeFieldRecursive(Expr *expression, Query *query)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
rangeTableEntryIndex = candidateColumn->varno - 1;
|
||||
rangeTableEntry = list_nth(rangetableList, rangeTableEntryIndex);
|
||||
Index rangeTableEntryIndex = candidateColumn->varno - 1;
|
||||
RangeTblEntry *rangeTableEntry = list_nth(rangetableList, rangeTableEntryIndex);
|
||||
|
||||
if (rangeTableEntry->rtekind == RTE_SUBQUERY)
|
||||
{
|
||||
|
@ -633,29 +624,24 @@ MultiNodeTree(Query *queryTree)
|
|||
{
|
||||
List *rangeTableList = queryTree->rtable;
|
||||
List *targetEntryList = queryTree->targetList;
|
||||
List *whereClauseList = NIL;
|
||||
List *joinClauseList = NIL;
|
||||
List *joinOrderList = NIL;
|
||||
List *tableEntryList = NIL;
|
||||
List *tableNodeList = NIL;
|
||||
List *collectTableList = NIL;
|
||||
List *subqueryEntryList = NIL;
|
||||
MultiNode *joinTreeNode = NULL;
|
||||
MultiSelect *selectNode = NULL;
|
||||
MultiProject *projectNode = NULL;
|
||||
MultiExtendedOp *extendedOpNode = NULL;
|
||||
MultiNode *currentTopNode = NULL;
|
||||
DeferredErrorMessage *unsupportedQueryError = NULL;
|
||||
|
||||
/* verify we can perform distributed planning on this query */
|
||||
unsupportedQueryError = DeferErrorIfQueryNotSupported(queryTree);
|
||||
DeferredErrorMessage *unsupportedQueryError = DeferErrorIfQueryNotSupported(
|
||||
queryTree);
|
||||
if (unsupportedQueryError != NULL)
|
||||
{
|
||||
RaiseDeferredError(unsupportedQueryError, ERROR);
|
||||
}
|
||||
|
||||
/* extract where clause qualifiers and verify we can plan for them */
|
||||
whereClauseList = WhereClauseList(queryTree->jointree);
|
||||
List *whereClauseList = WhereClauseList(queryTree->jointree);
|
||||
unsupportedQueryError = DeferErrorIfUnsupportedClause(whereClauseList);
|
||||
if (unsupportedQueryError)
|
||||
{
|
||||
|
@ -666,29 +652,23 @@ MultiNodeTree(Query *queryTree)
|
|||
* If we have a subquery, build a multi table node for the subquery and
|
||||
* add a collect node on top of the multi table node.
|
||||
*/
|
||||
subqueryEntryList = SubqueryEntryList(queryTree);
|
||||
List *subqueryEntryList = SubqueryEntryList(queryTree);
|
||||
if (subqueryEntryList != NIL)
|
||||
{
|
||||
RangeTblEntry *subqueryRangeTableEntry = NULL;
|
||||
MultiCollect *subqueryCollectNode = CitusMakeNode(MultiCollect);
|
||||
MultiTable *subqueryNode = NULL;
|
||||
MultiNode *subqueryExtendedNode = NULL;
|
||||
Query *subqueryTree = NULL;
|
||||
List *whereClauseColumnList = NIL;
|
||||
List *targetListColumnList = NIL;
|
||||
List *columnList = NIL;
|
||||
ListCell *columnCell = NULL;
|
||||
|
||||
/* we only support single subquery in the entry list */
|
||||
Assert(list_length(subqueryEntryList) == 1);
|
||||
|
||||
subqueryRangeTableEntry = (RangeTblEntry *) linitial(subqueryEntryList);
|
||||
subqueryTree = subqueryRangeTableEntry->subquery;
|
||||
RangeTblEntry *subqueryRangeTableEntry = (RangeTblEntry *) linitial(
|
||||
subqueryEntryList);
|
||||
Query *subqueryTree = subqueryRangeTableEntry->subquery;
|
||||
|
||||
/* ensure if subquery satisfies preconditions */
|
||||
Assert(DeferErrorIfUnsupportedSubqueryRepartition(subqueryTree) == NULL);
|
||||
|
||||
subqueryNode = CitusMakeNode(MultiTable);
|
||||
MultiTable *subqueryNode = CitusMakeNode(MultiTable);
|
||||
subqueryNode->relationId = SUBQUERY_RELATION_ID;
|
||||
subqueryNode->rangeTableId = SUBQUERY_RANGE_TABLE_ID;
|
||||
subqueryNode->partitionColumn = NULL;
|
||||
|
@ -704,10 +684,10 @@ MultiNodeTree(Query *queryTree)
|
|||
*/
|
||||
Assert(list_length(subqueryEntryList) == 1);
|
||||
|
||||
whereClauseColumnList = pull_var_clause_default((Node *) whereClauseList);
|
||||
targetListColumnList = pull_var_clause_default((Node *) targetEntryList);
|
||||
List *whereClauseColumnList = pull_var_clause_default((Node *) whereClauseList);
|
||||
List *targetListColumnList = pull_var_clause_default((Node *) targetEntryList);
|
||||
|
||||
columnList = list_concat(whereClauseColumnList, targetListColumnList);
|
||||
List *columnList = list_concat(whereClauseColumnList, targetListColumnList);
|
||||
foreach(columnCell, columnList)
|
||||
{
|
||||
Var *column = (Var *) lfirst(columnCell);
|
||||
|
@ -715,7 +695,7 @@ MultiNodeTree(Query *queryTree)
|
|||
}
|
||||
|
||||
/* recursively create child nested multitree */
|
||||
subqueryExtendedNode = MultiNodeTree(subqueryTree);
|
||||
MultiNode *subqueryExtendedNode = MultiNodeTree(subqueryTree);
|
||||
|
||||
SetChild((MultiUnaryNode *) subqueryCollectNode, (MultiNode *) subqueryNode);
|
||||
SetChild((MultiUnaryNode *) subqueryNode, subqueryExtendedNode);
|
||||
|
@ -751,7 +731,7 @@ MultiNodeTree(Query *queryTree)
|
|||
Assert(currentTopNode != NULL);
|
||||
|
||||
/* build select node if the query has selection criteria */
|
||||
selectNode = MultiSelectNode(whereClauseList);
|
||||
MultiSelect *selectNode = MultiSelectNode(whereClauseList);
|
||||
if (selectNode != NULL)
|
||||
{
|
||||
SetChild((MultiUnaryNode *) selectNode, currentTopNode);
|
||||
|
@ -759,7 +739,7 @@ MultiNodeTree(Query *queryTree)
|
|||
}
|
||||
|
||||
/* build project node for the columns to project */
|
||||
projectNode = MultiProjectNode(targetEntryList);
|
||||
MultiProject *projectNode = MultiProjectNode(targetEntryList);
|
||||
SetChild((MultiUnaryNode *) projectNode, currentTopNode);
|
||||
currentTopNode = (MultiNode *) projectNode;
|
||||
|
||||
|
@ -769,7 +749,7 @@ MultiNodeTree(Query *queryTree)
|
|||
* distinguish between aggregates and expressions; and we address this later
|
||||
* in the logical optimizer.
|
||||
*/
|
||||
extendedOpNode = MultiExtendedOpNode(queryTree);
|
||||
MultiExtendedOp *extendedOpNode = MultiExtendedOpNode(queryTree);
|
||||
SetChild((MultiUnaryNode *) extendedOpNode, currentTopNode);
|
||||
currentTopNode = (MultiNode *) extendedOpNode;
|
||||
|
||||
|
@ -816,16 +796,13 @@ IsReadIntermediateResultFunction(Node *node)
|
|||
char *
|
||||
FindIntermediateResultIdIfExists(RangeTblEntry *rte)
|
||||
{
|
||||
List *functionList = NULL;
|
||||
RangeTblFunction *rangeTblfunction = NULL;
|
||||
FuncExpr *funcExpr = NULL;
|
||||
char *resultId = NULL;
|
||||
|
||||
Assert(rte->rtekind == RTE_FUNCTION);
|
||||
|
||||
functionList = rte->functions;
|
||||
rangeTblfunction = (RangeTblFunction *) linitial(functionList);
|
||||
funcExpr = (FuncExpr *) rangeTblfunction->funcexpr;
|
||||
List *functionList = rte->functions;
|
||||
RangeTblFunction *rangeTblfunction = (RangeTblFunction *) linitial(functionList);
|
||||
FuncExpr *funcExpr = (FuncExpr *) rangeTblfunction->funcexpr;
|
||||
|
||||
if (IsReadIntermediateResultFunction((Node *) funcExpr))
|
||||
{
|
||||
|
@ -850,9 +827,6 @@ DeferredErrorMessage *
|
|||
DeferErrorIfQueryNotSupported(Query *queryTree)
|
||||
{
|
||||
char *errorMessage = NULL;
|
||||
bool hasTablesample = false;
|
||||
bool hasUnsupportedJoin = false;
|
||||
bool hasComplexRangeTableType = false;
|
||||
bool preconditionsSatisfied = true;
|
||||
StringInfo errorInfo = NULL;
|
||||
const char *errorHint = NULL;
|
||||
|
@ -922,7 +896,7 @@ DeferErrorIfQueryNotSupported(Query *queryTree)
|
|||
errorHint = filterHint;
|
||||
}
|
||||
|
||||
hasTablesample = HasTablesample(queryTree);
|
||||
bool hasTablesample = HasTablesample(queryTree);
|
||||
if (hasTablesample)
|
||||
{
|
||||
preconditionsSatisfied = false;
|
||||
|
@ -930,7 +904,8 @@ DeferErrorIfQueryNotSupported(Query *queryTree)
|
|||
errorHint = filterHint;
|
||||
}
|
||||
|
||||
hasUnsupportedJoin = HasUnsupportedJoinWalker((Node *) queryTree->jointree, NULL);
|
||||
bool hasUnsupportedJoin = HasUnsupportedJoinWalker((Node *) queryTree->jointree,
|
||||
NULL);
|
||||
if (hasUnsupportedJoin)
|
||||
{
|
||||
preconditionsSatisfied = false;
|
||||
|
@ -939,7 +914,7 @@ DeferErrorIfQueryNotSupported(Query *queryTree)
|
|||
errorHint = joinHint;
|
||||
}
|
||||
|
||||
hasComplexRangeTableType = HasComplexRangeTableType(queryTree);
|
||||
bool hasComplexRangeTableType = HasComplexRangeTableType(queryTree);
|
||||
if (hasComplexRangeTableType)
|
||||
{
|
||||
preconditionsSatisfied = false;
|
||||
|
@ -1079,9 +1054,6 @@ DeferErrorIfUnsupportedSubqueryRepartition(Query *subqueryTree)
|
|||
char *errorDetail = NULL;
|
||||
bool preconditionsSatisfied = true;
|
||||
List *joinTreeTableIndexList = NIL;
|
||||
int rangeTableIndex = 0;
|
||||
RangeTblEntry *rangeTableEntry = NULL;
|
||||
Query *innerSubquery = NULL;
|
||||
|
||||
if (!subqueryTree->hasAggs)
|
||||
{
|
||||
|
@ -1136,15 +1108,15 @@ DeferErrorIfUnsupportedSubqueryRepartition(Query *subqueryTree)
|
|||
Assert(list_length(joinTreeTableIndexList) == 1);
|
||||
|
||||
/* continue with the inner subquery */
|
||||
rangeTableIndex = linitial_int(joinTreeTableIndexList);
|
||||
rangeTableEntry = rt_fetch(rangeTableIndex, subqueryTree->rtable);
|
||||
int rangeTableIndex = linitial_int(joinTreeTableIndexList);
|
||||
RangeTblEntry *rangeTableEntry = rt_fetch(rangeTableIndex, subqueryTree->rtable);
|
||||
if (rangeTableEntry->rtekind == RTE_RELATION)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
Assert(rangeTableEntry->rtekind == RTE_SUBQUERY);
|
||||
innerSubquery = rangeTableEntry->subquery;
|
||||
Query *innerSubquery = rangeTableEntry->subquery;
|
||||
|
||||
/* recursively continue to the inner subqueries */
|
||||
return DeferErrorIfUnsupportedSubqueryRepartition(innerSubquery);
|
||||
|
@ -1225,10 +1197,9 @@ WhereClauseList(FromExpr *fromExpr)
|
|||
{
|
||||
FromExpr *fromExprCopy = copyObject(fromExpr);
|
||||
QualifierWalkerContext *walkerContext = palloc0(sizeof(QualifierWalkerContext));
|
||||
List *whereClauseList = NIL;
|
||||
|
||||
ExtractFromExpressionWalker((Node *) fromExprCopy, walkerContext);
|
||||
whereClauseList = walkerContext->baseQualifierList;
|
||||
List *whereClauseList = walkerContext->baseQualifierList;
|
||||
|
||||
return whereClauseList;
|
||||
}
|
||||
|
@ -1335,7 +1306,6 @@ JoinClauseList(List *whereClauseList)
|
|||
static bool
|
||||
ExtractFromExpressionWalker(Node *node, QualifierWalkerContext *walkerContext)
|
||||
{
|
||||
bool walkerResult = false;
|
||||
if (node == NULL)
|
||||
{
|
||||
return false;
|
||||
|
@ -1406,8 +1376,8 @@ ExtractFromExpressionWalker(Node *node, QualifierWalkerContext *walkerContext)
|
|||
}
|
||||
}
|
||||
|
||||
walkerResult = expression_tree_walker(node, ExtractFromExpressionWalker,
|
||||
(void *) walkerContext);
|
||||
bool walkerResult = expression_tree_walker(node, ExtractFromExpressionWalker,
|
||||
(void *) walkerContext);
|
||||
|
||||
return walkerResult;
|
||||
}
|
||||
|
@ -1421,10 +1391,6 @@ ExtractFromExpressionWalker(Node *node, QualifierWalkerContext *walkerContext)
|
|||
bool
|
||||
IsJoinClause(Node *clause)
|
||||
{
|
||||
OpExpr *operatorExpression = NULL;
|
||||
bool equalsOperator = false;
|
||||
List *varList = NIL;
|
||||
Var *initialVar = NULL;
|
||||
Var *var = NULL;
|
||||
|
||||
if (!IsA(clause, OpExpr))
|
||||
|
@ -1432,8 +1398,8 @@ IsJoinClause(Node *clause)
|
|||
return false;
|
||||
}
|
||||
|
||||
operatorExpression = castNode(OpExpr, clause);
|
||||
equalsOperator = OperatorImplementsEquality(operatorExpression->opno);
|
||||
OpExpr *operatorExpression = castNode(OpExpr, clause);
|
||||
bool equalsOperator = OperatorImplementsEquality(operatorExpression->opno);
|
||||
|
||||
if (!equalsOperator)
|
||||
{
|
||||
|
@ -1452,13 +1418,13 @@ IsJoinClause(Node *clause)
|
|||
* take all column references from the clause, if we find 2 column references from a
|
||||
* different relation we assume this is a join clause
|
||||
*/
|
||||
varList = pull_var_clause_default(clause);
|
||||
List *varList = pull_var_clause_default(clause);
|
||||
if (list_length(varList) <= 0)
|
||||
{
|
||||
/* no column references in query, not describing a join */
|
||||
return false;
|
||||
}
|
||||
initialVar = castNode(Var, linitial(varList));
|
||||
Var *initialVar = castNode(Var, linitial(varList));
|
||||
|
||||
foreach_ptr(var, varList)
|
||||
{
|
||||
|
@ -1635,16 +1601,17 @@ MultiJoinTree(List *joinOrderList, List *collectTableList, List *joinWhereClause
|
|||
JoinRuleType joinRuleType = joinOrderNode->joinRuleType;
|
||||
JoinType joinType = joinOrderNode->joinType;
|
||||
Var *partitionColumn = joinOrderNode->partitionColumn;
|
||||
MultiNode *newJoinNode = NULL;
|
||||
List *joinClauseList = joinOrderNode->joinClauseList;
|
||||
|
||||
/*
|
||||
* Build a join node between the top of our join tree and the next
|
||||
* table in the join order.
|
||||
*/
|
||||
newJoinNode = ApplyJoinRule(currentTopNode, (MultiNode *) collectNode,
|
||||
joinRuleType, partitionColumn, joinType,
|
||||
joinClauseList);
|
||||
MultiNode *newJoinNode = ApplyJoinRule(currentTopNode,
|
||||
(MultiNode *) collectNode,
|
||||
joinRuleType, partitionColumn,
|
||||
joinType,
|
||||
joinClauseList);
|
||||
|
||||
/* the new join node becomes the top of our join tree */
|
||||
currentTopNode = newJoinNode;
|
||||
|
@ -1727,22 +1694,19 @@ MultiSelectNode(List *whereClauseList)
|
|||
static bool
|
||||
IsSelectClause(Node *clause)
|
||||
{
|
||||
List *columnList = NIL;
|
||||
ListCell *columnCell = NULL;
|
||||
Var *firstColumn = NULL;
|
||||
Index firstColumnTableId = 0;
|
||||
bool isSelectClause = true;
|
||||
|
||||
/* extract columns from the clause */
|
||||
columnList = pull_var_clause_default(clause);
|
||||
List *columnList = pull_var_clause_default(clause);
|
||||
if (list_length(columnList) == 0)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
/* get first column's tableId */
|
||||
firstColumn = (Var *) linitial(columnList);
|
||||
firstColumnTableId = firstColumn->varno;
|
||||
Var *firstColumn = (Var *) linitial(columnList);
|
||||
Index firstColumnTableId = firstColumn->varno;
|
||||
|
||||
/* check if all columns are from the same table */
|
||||
foreach(columnCell, columnList)
|
||||
|
@ -1766,13 +1730,11 @@ IsSelectClause(Node *clause)
|
|||
MultiProject *
|
||||
MultiProjectNode(List *targetEntryList)
|
||||
{
|
||||
MultiProject *projectNode = NULL;
|
||||
List *uniqueColumnList = NIL;
|
||||
List *columnList = NIL;
|
||||
ListCell *columnCell = NULL;
|
||||
|
||||
/* extract the list of columns and remove any duplicates */
|
||||
columnList = pull_var_clause_default((Node *) targetEntryList);
|
||||
List *columnList = pull_var_clause_default((Node *) targetEntryList);
|
||||
foreach(columnCell, columnList)
|
||||
{
|
||||
Var *column = (Var *) lfirst(columnCell);
|
||||
|
@ -1781,7 +1743,7 @@ MultiProjectNode(List *targetEntryList)
|
|||
}
|
||||
|
||||
/* create project node with list of columns to project */
|
||||
projectNode = CitusMakeNode(MultiProject);
|
||||
MultiProject *projectNode = CitusMakeNode(MultiProject);
|
||||
projectNode->columnList = uniqueColumnList;
|
||||
|
||||
return projectNode;
|
||||
|
@ -1932,7 +1894,6 @@ List *
|
|||
FindNodesOfType(MultiNode *node, int type)
|
||||
{
|
||||
List *nodeList = NIL;
|
||||
int nodeType = T_Invalid;
|
||||
|
||||
/* terminal condition for recursion */
|
||||
if (node == NULL)
|
||||
|
@ -1941,7 +1902,7 @@ FindNodesOfType(MultiNode *node, int type)
|
|||
}
|
||||
|
||||
/* current node has expected node type */
|
||||
nodeType = CitusNodeTag(node);
|
||||
int nodeType = CitusNodeTag(node);
|
||||
if (nodeType == type)
|
||||
{
|
||||
nodeList = lappend(nodeList, node);
|
||||
|
@ -1997,27 +1958,22 @@ static MultiNode *
|
|||
ApplyJoinRule(MultiNode *leftNode, MultiNode *rightNode, JoinRuleType ruleType,
|
||||
Var *partitionColumn, JoinType joinType, List *joinClauseList)
|
||||
{
|
||||
RuleApplyFunction ruleApplyFunction = NULL;
|
||||
MultiNode *multiNode = NULL;
|
||||
|
||||
List *applicableJoinClauses = NIL;
|
||||
List *leftTableIdList = OutputTableIdList(leftNode);
|
||||
List *rightTableIdList = OutputTableIdList(rightNode);
|
||||
int rightTableIdCount PG_USED_FOR_ASSERTS_ONLY = 0;
|
||||
uint32 rightTableId = 0;
|
||||
|
||||
rightTableIdCount = list_length(rightTableIdList);
|
||||
Assert(rightTableIdCount == 1);
|
||||
|
||||
/* find applicable join clauses between the left and right data sources */
|
||||
rightTableId = (uint32) linitial_int(rightTableIdList);
|
||||
applicableJoinClauses = ApplicableJoinClauses(leftTableIdList, rightTableId,
|
||||
joinClauseList);
|
||||
uint32 rightTableId = (uint32) linitial_int(rightTableIdList);
|
||||
List *applicableJoinClauses = ApplicableJoinClauses(leftTableIdList, rightTableId,
|
||||
joinClauseList);
|
||||
|
||||
/* call the join rule application function to create the new join node */
|
||||
ruleApplyFunction = JoinRuleApplyFunction(ruleType);
|
||||
multiNode = (*ruleApplyFunction)(leftNode, rightNode, partitionColumn,
|
||||
joinType, applicableJoinClauses);
|
||||
RuleApplyFunction ruleApplyFunction = JoinRuleApplyFunction(ruleType);
|
||||
MultiNode *multiNode = (*ruleApplyFunction)(leftNode, rightNode, partitionColumn,
|
||||
joinType, applicableJoinClauses);
|
||||
|
||||
if (joinType != JOIN_INNER && CitusIsA(multiNode, MultiJoin))
|
||||
{
|
||||
|
@ -2041,7 +1997,6 @@ static RuleApplyFunction
|
|||
JoinRuleApplyFunction(JoinRuleType ruleType)
|
||||
{
|
||||
static bool ruleApplyFunctionInitialized = false;
|
||||
RuleApplyFunction ruleApplyFunction = NULL;
|
||||
|
||||
if (!ruleApplyFunctionInitialized)
|
||||
{
|
||||
|
@ -2057,7 +2012,7 @@ JoinRuleApplyFunction(JoinRuleType ruleType)
|
|||
ruleApplyFunctionInitialized = true;
|
||||
}
|
||||
|
||||
ruleApplyFunction = RuleApplyFunctionArray[ruleType];
|
||||
RuleApplyFunction ruleApplyFunction = RuleApplyFunctionArray[ruleType];
|
||||
Assert(ruleApplyFunction != NULL);
|
||||
|
||||
return ruleApplyFunction;
|
||||
|
@ -2154,11 +2109,6 @@ ApplySinglePartitionJoin(MultiNode *leftNode, MultiNode *rightNode,
|
|||
Var *partitionColumn, JoinType joinType,
|
||||
List *applicableJoinClauses)
|
||||
{
|
||||
OpExpr *joinClause = NULL;
|
||||
Var *leftColumn = NULL;
|
||||
Var *rightColumn = NULL;
|
||||
List *rightTableIdList = NIL;
|
||||
uint32 rightTableId = 0;
|
||||
uint32 partitionTableId = partitionColumn->varno;
|
||||
|
||||
/* create all operator structures up front */
|
||||
|
@ -2171,12 +2121,13 @@ ApplySinglePartitionJoin(MultiNode *leftNode, MultiNode *rightNode,
|
|||
* column against the join clause's columns. If one of the columns matches,
|
||||
* we introduce a (re-)partition operator for the other column.
|
||||
*/
|
||||
joinClause = SinglePartitionJoinClause(partitionColumn, applicableJoinClauses);
|
||||
OpExpr *joinClause = SinglePartitionJoinClause(partitionColumn,
|
||||
applicableJoinClauses);
|
||||
Assert(joinClause != NULL);
|
||||
|
||||
/* both are verified in SinglePartitionJoinClause to not be NULL, assert is to guard */
|
||||
leftColumn = LeftColumnOrNULL(joinClause);
|
||||
rightColumn = RightColumnOrNULL(joinClause);
|
||||
Var *leftColumn = LeftColumnOrNULL(joinClause);
|
||||
Var *rightColumn = RightColumnOrNULL(joinClause);
|
||||
|
||||
Assert(leftColumn != NULL);
|
||||
Assert(rightColumn != NULL);
|
||||
|
@ -2193,8 +2144,8 @@ ApplySinglePartitionJoin(MultiNode *leftNode, MultiNode *rightNode,
|
|||
}
|
||||
|
||||
/* determine the node the partition operator goes on top of */
|
||||
rightTableIdList = OutputTableIdList(rightNode);
|
||||
rightTableId = (uint32) linitial_int(rightTableIdList);
|
||||
List *rightTableIdList = OutputTableIdList(rightNode);
|
||||
uint32 rightTableId = (uint32) linitial_int(rightTableIdList);
|
||||
Assert(list_length(rightTableIdList) == 1);
|
||||
|
||||
/*
|
||||
|
@ -2238,33 +2189,22 @@ ApplyDualPartitionJoin(MultiNode *leftNode, MultiNode *rightNode,
|
|||
Var *partitionColumn, JoinType joinType,
|
||||
List *applicableJoinClauses)
|
||||
{
|
||||
MultiJoin *joinNode = NULL;
|
||||
OpExpr *joinClause = NULL;
|
||||
MultiPartition *leftPartitionNode = NULL;
|
||||
MultiPartition *rightPartitionNode = NULL;
|
||||
MultiCollect *leftCollectNode = NULL;
|
||||
MultiCollect *rightCollectNode = NULL;
|
||||
Var *leftColumn = NULL;
|
||||
Var *rightColumn = NULL;
|
||||
List *rightTableIdList = NIL;
|
||||
uint32 rightTableId = 0;
|
||||
|
||||
/* find the appropriate join clause */
|
||||
joinClause = DualPartitionJoinClause(applicableJoinClauses);
|
||||
OpExpr *joinClause = DualPartitionJoinClause(applicableJoinClauses);
|
||||
Assert(joinClause != NULL);
|
||||
|
||||
/* both are verified in DualPartitionJoinClause to not be NULL, assert is to guard */
|
||||
leftColumn = LeftColumnOrNULL(joinClause);
|
||||
rightColumn = RightColumnOrNULL(joinClause);
|
||||
Var *leftColumn = LeftColumnOrNULL(joinClause);
|
||||
Var *rightColumn = RightColumnOrNULL(joinClause);
|
||||
Assert(leftColumn != NULL);
|
||||
Assert(rightColumn != NULL);
|
||||
|
||||
rightTableIdList = OutputTableIdList(rightNode);
|
||||
rightTableId = (uint32) linitial_int(rightTableIdList);
|
||||
List *rightTableIdList = OutputTableIdList(rightNode);
|
||||
uint32 rightTableId = (uint32) linitial_int(rightTableIdList);
|
||||
Assert(list_length(rightTableIdList) == 1);
|
||||
|
||||
leftPartitionNode = CitusMakeNode(MultiPartition);
|
||||
rightPartitionNode = CitusMakeNode(MultiPartition);
|
||||
MultiPartition *leftPartitionNode = CitusMakeNode(MultiPartition);
|
||||
MultiPartition *rightPartitionNode = CitusMakeNode(MultiPartition);
|
||||
|
||||
/* find the partition node each join clause column belongs to */
|
||||
if (leftColumn->varno == rightTableId)
|
||||
|
@ -2283,14 +2223,14 @@ ApplyDualPartitionJoin(MultiNode *leftNode, MultiNode *rightNode,
|
|||
SetChild((MultiUnaryNode *) rightPartitionNode, rightNode);
|
||||
|
||||
/* add collect operators on top of the two partition operators */
|
||||
leftCollectNode = CitusMakeNode(MultiCollect);
|
||||
rightCollectNode = CitusMakeNode(MultiCollect);
|
||||
MultiCollect *leftCollectNode = CitusMakeNode(MultiCollect);
|
||||
MultiCollect *rightCollectNode = CitusMakeNode(MultiCollect);
|
||||
|
||||
SetChild((MultiUnaryNode *) leftCollectNode, (MultiNode *) leftPartitionNode);
|
||||
SetChild((MultiUnaryNode *) rightCollectNode, (MultiNode *) rightPartitionNode);
|
||||
|
||||
/* add join operator on top of the two collect operators */
|
||||
joinNode = CitusMakeNode(MultiJoin);
|
||||
MultiJoin *joinNode = CitusMakeNode(MultiJoin);
|
||||
joinNode->joinRuleType = DUAL_PARTITION_JOIN;
|
||||
joinNode->joinType = joinType;
|
||||
joinNode->joinClauseList = applicableJoinClauses;
|
||||
|
|
|
@ -71,13 +71,13 @@ PlannedStmt *
|
|||
MasterNodeSelectPlan(DistributedPlan *distributedPlan, CustomScan *remoteScan)
|
||||
{
|
||||
Query *masterQuery = distributedPlan->masterQuery;
|
||||
PlannedStmt *masterSelectPlan = NULL;
|
||||
|
||||
Job *workerJob = distributedPlan->workerJob;
|
||||
List *workerTargetList = workerJob->jobQuery->targetList;
|
||||
List *masterTargetList = MasterTargetList(workerTargetList);
|
||||
|
||||
masterSelectPlan = BuildSelectStatement(masterQuery, masterTargetList, remoteScan);
|
||||
PlannedStmt *masterSelectPlan = BuildSelectStatement(masterQuery, masterTargetList,
|
||||
remoteScan);
|
||||
|
||||
return masterSelectPlan;
|
||||
}
|
||||
|
@ -99,15 +99,13 @@ MasterTargetList(List *workerTargetList)
|
|||
foreach(workerTargetCell, workerTargetList)
|
||||
{
|
||||
TargetEntry *workerTargetEntry = (TargetEntry *) lfirst(workerTargetCell);
|
||||
TargetEntry *masterTargetEntry = NULL;
|
||||
Var *masterColumn = NULL;
|
||||
|
||||
if (workerTargetEntry->resjunk)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
masterColumn = makeVarFromTargetEntry(tableId, workerTargetEntry);
|
||||
Var *masterColumn = makeVarFromTargetEntry(tableId, workerTargetEntry);
|
||||
masterColumn->varattno = columnId;
|
||||
masterColumn->varoattno = columnId;
|
||||
columnId++;
|
||||
|
@ -124,7 +122,7 @@ MasterTargetList(List *workerTargetList)
|
|||
* from the worker target entry. Note that any changes to worker target
|
||||
* entry's sort and group clauses will *break* us here.
|
||||
*/
|
||||
masterTargetEntry = flatCopyTargetEntry(workerTargetEntry);
|
||||
TargetEntry *masterTargetEntry = flatCopyTargetEntry(workerTargetEntry);
|
||||
masterTargetEntry->expr = (Expr *) masterColumn;
|
||||
masterTargetList = lappend(masterTargetList, masterTargetEntry);
|
||||
}
|
||||
|
@ -469,16 +467,14 @@ BuildAggregatePlan(PlannerInfo *root, Query *masterQuery, Plan *subPlan)
|
|||
static bool
|
||||
HasDistinctAggregate(Query *masterQuery)
|
||||
{
|
||||
List *targetVarList = NIL;
|
||||
List *havingVarList = NIL;
|
||||
List *allColumnList = NIL;
|
||||
ListCell *allColumnCell = NULL;
|
||||
|
||||
targetVarList = pull_var_clause((Node *) masterQuery->targetList,
|
||||
PVC_INCLUDE_AGGREGATES);
|
||||
havingVarList = pull_var_clause(masterQuery->havingQual, PVC_INCLUDE_AGGREGATES);
|
||||
List *targetVarList = pull_var_clause((Node *) masterQuery->targetList,
|
||||
PVC_INCLUDE_AGGREGATES);
|
||||
List *havingVarList = pull_var_clause(masterQuery->havingQual,
|
||||
PVC_INCLUDE_AGGREGATES);
|
||||
|
||||
allColumnList = list_concat(targetVarList, havingVarList);
|
||||
List *allColumnList = list_concat(targetVarList, havingVarList);
|
||||
foreach(allColumnCell, allColumnList)
|
||||
{
|
||||
Node *columnNode = lfirst(allColumnCell);
|
||||
|
@ -506,7 +502,6 @@ static bool
|
|||
UseGroupAggregateWithHLL(Query *masterQuery)
|
||||
{
|
||||
Oid hllId = get_extension_oid(HLL_EXTENSION_NAME, true);
|
||||
const char *gucStrValue = NULL;
|
||||
|
||||
/* If HLL extension is not loaded, return false */
|
||||
if (!OidIsValid(hllId))
|
||||
|
@ -515,7 +510,7 @@ UseGroupAggregateWithHLL(Query *masterQuery)
|
|||
}
|
||||
|
||||
/* If HLL is loaded but related GUC is not set, return false */
|
||||
gucStrValue = GetConfigOption(HLL_FORCE_GROUPAGG_GUC_NAME, true, false);
|
||||
const char *gucStrValue = GetConfigOption(HLL_FORCE_GROUPAGG_GUC_NAME, true, false);
|
||||
if (gucStrValue == NULL || strcmp(gucStrValue, "off") == 0)
|
||||
{
|
||||
return false;
|
||||
|
@ -532,10 +527,9 @@ UseGroupAggregateWithHLL(Query *masterQuery)
|
|||
static bool
|
||||
QueryContainsAggregateWithHLL(Query *query)
|
||||
{
|
||||
List *varList = NIL;
|
||||
ListCell *varCell = NULL;
|
||||
|
||||
varList = pull_var_clause((Node *) query->targetList, PVC_INCLUDE_AGGREGATES);
|
||||
List *varList = pull_var_clause((Node *) query->targetList, PVC_INCLUDE_AGGREGATES);
|
||||
foreach(varCell, varList)
|
||||
{
|
||||
Var *var = (Var *) lfirst(varCell);
|
||||
|
@ -579,10 +573,8 @@ static Plan *
|
|||
BuildDistinctPlan(Query *masterQuery, Plan *subPlan)
|
||||
{
|
||||
Plan *distinctPlan = NULL;
|
||||
bool distinctClausesHashable = true;
|
||||
List *distinctClauseList = masterQuery->distinctClause;
|
||||
List *targetList = copyObject(masterQuery->targetList);
|
||||
bool hasDistinctAggregate = false;
|
||||
|
||||
/*
|
||||
* We don't need to add distinct plan if all of the columns used in group by
|
||||
|
@ -602,8 +594,8 @@ BuildDistinctPlan(Query *masterQuery, Plan *subPlan)
|
|||
* members are hashable, and not containing distinct aggregate.
|
||||
* Otherwise create sort+unique plan.
|
||||
*/
|
||||
distinctClausesHashable = grouping_is_hashable(distinctClauseList);
|
||||
hasDistinctAggregate = HasDistinctAggregate(masterQuery);
|
||||
bool distinctClausesHashable = grouping_is_hashable(distinctClauseList);
|
||||
bool hasDistinctAggregate = HasDistinctAggregate(masterQuery);
|
||||
|
||||
if (enable_hashagg && distinctClausesHashable && !hasDistinctAggregate)
|
||||
{
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -260,13 +260,11 @@ CreateSingleTaskRouterPlan(DistributedPlan *distributedPlan, Query *originalQuer
|
|||
Query *query,
|
||||
PlannerRestrictionContext *plannerRestrictionContext)
|
||||
{
|
||||
Job *job = NULL;
|
||||
|
||||
distributedPlan->modLevel = RowModifyLevelForQuery(query);
|
||||
|
||||
/* we cannot have multi shard update/delete query via this code path */
|
||||
job = RouterJob(originalQuery, plannerRestrictionContext,
|
||||
&distributedPlan->planningError);
|
||||
Job *job = RouterJob(originalQuery, plannerRestrictionContext,
|
||||
&distributedPlan->planningError);
|
||||
|
||||
if (distributedPlan->planningError != NULL)
|
||||
{
|
||||
|
@ -302,7 +300,6 @@ ShardIntervalOpExpressions(ShardInterval *shardInterval, Index rteIndex)
|
|||
Oid relationId = shardInterval->relationId;
|
||||
char partitionMethod = PartitionMethod(shardInterval->relationId);
|
||||
Var *partitionColumn = NULL;
|
||||
Node *baseConstraint = NULL;
|
||||
|
||||
if (partitionMethod == DISTRIBUTE_BY_HASH)
|
||||
{
|
||||
|
@ -321,7 +318,7 @@ ShardIntervalOpExpressions(ShardInterval *shardInterval, Index rteIndex)
|
|||
}
|
||||
|
||||
/* build the base expression for constraint */
|
||||
baseConstraint = BuildBaseConstraint(partitionColumn);
|
||||
Node *baseConstraint = BuildBaseConstraint(partitionColumn);
|
||||
|
||||
/* walk over shard list and check if shards can be pruned */
|
||||
if (shardInterval->minValueExists && shardInterval->maxValueExists)
|
||||
|
@ -349,14 +346,7 @@ AddShardIntervalRestrictionToSelect(Query *subqery, ShardInterval *shardInterval
|
|||
List *targetList = subqery->targetList;
|
||||
ListCell *targetEntryCell = NULL;
|
||||
Var *targetPartitionColumnVar = NULL;
|
||||
Oid integer4GEoperatorId = InvalidOid;
|
||||
Oid integer4LEoperatorId = InvalidOid;
|
||||
TypeCacheEntry *typeEntry = NULL;
|
||||
FuncExpr *hashFunctionExpr = NULL;
|
||||
OpExpr *greaterThanAndEqualsBoundExpr = NULL;
|
||||
OpExpr *lessThanAndEqualsBoundExpr = NULL;
|
||||
List *boundExpressionList = NIL;
|
||||
Expr *andedBoundExpressions = NULL;
|
||||
|
||||
/* iterate through the target entries */
|
||||
foreach(targetEntryCell, targetList)
|
||||
|
@ -374,20 +364,20 @@ AddShardIntervalRestrictionToSelect(Query *subqery, ShardInterval *shardInterval
|
|||
/* we should have found target partition column */
|
||||
Assert(targetPartitionColumnVar != NULL);
|
||||
|
||||
integer4GEoperatorId = get_opfamily_member(INTEGER_BTREE_FAM_OID, INT4OID,
|
||||
INT4OID,
|
||||
BTGreaterEqualStrategyNumber);
|
||||
integer4LEoperatorId = get_opfamily_member(INTEGER_BTREE_FAM_OID, INT4OID,
|
||||
INT4OID,
|
||||
BTLessEqualStrategyNumber);
|
||||
Oid integer4GEoperatorId = get_opfamily_member(INTEGER_BTREE_FAM_OID, INT4OID,
|
||||
INT4OID,
|
||||
BTGreaterEqualStrategyNumber);
|
||||
Oid integer4LEoperatorId = get_opfamily_member(INTEGER_BTREE_FAM_OID, INT4OID,
|
||||
INT4OID,
|
||||
BTLessEqualStrategyNumber);
|
||||
|
||||
/* ensure that we find the correct operators */
|
||||
Assert(integer4GEoperatorId != InvalidOid);
|
||||
Assert(integer4LEoperatorId != InvalidOid);
|
||||
|
||||
/* look up the type cache */
|
||||
typeEntry = lookup_type_cache(targetPartitionColumnVar->vartype,
|
||||
TYPECACHE_HASH_PROC_FINFO);
|
||||
TypeCacheEntry *typeEntry = lookup_type_cache(targetPartitionColumnVar->vartype,
|
||||
TYPECACHE_HASH_PROC_FINFO);
|
||||
|
||||
/* probable never possible given that the tables are already hash partitioned */
|
||||
if (!OidIsValid(typeEntry->hash_proc_finfo.fn_oid))
|
||||
|
@ -398,7 +388,7 @@ AddShardIntervalRestrictionToSelect(Query *subqery, ShardInterval *shardInterval
|
|||
}
|
||||
|
||||
/* generate hashfunc(partCol) expression */
|
||||
hashFunctionExpr = makeNode(FuncExpr);
|
||||
FuncExpr *hashFunctionExpr = makeNode(FuncExpr);
|
||||
hashFunctionExpr->funcid = CitusWorkerHashFunctionId();
|
||||
hashFunctionExpr->args = list_make1(targetPartitionColumnVar);
|
||||
|
||||
|
@ -406,7 +396,7 @@ AddShardIntervalRestrictionToSelect(Query *subqery, ShardInterval *shardInterval
|
|||
hashFunctionExpr->funcresulttype = INT4OID;
|
||||
|
||||
/* generate hashfunc(partCol) >= shardMinValue OpExpr */
|
||||
greaterThanAndEqualsBoundExpr =
|
||||
OpExpr *greaterThanAndEqualsBoundExpr =
|
||||
(OpExpr *) make_opclause(integer4GEoperatorId,
|
||||
InvalidOid, false,
|
||||
(Expr *) hashFunctionExpr,
|
||||
|
@ -421,7 +411,7 @@ AddShardIntervalRestrictionToSelect(Query *subqery, ShardInterval *shardInterval
|
|||
get_func_rettype(greaterThanAndEqualsBoundExpr->opfuncid);
|
||||
|
||||
/* generate hashfunc(partCol) <= shardMinValue OpExpr */
|
||||
lessThanAndEqualsBoundExpr =
|
||||
OpExpr *lessThanAndEqualsBoundExpr =
|
||||
(OpExpr *) make_opclause(integer4LEoperatorId,
|
||||
InvalidOid, false,
|
||||
(Expr *) hashFunctionExpr,
|
||||
|
@ -438,7 +428,7 @@ AddShardIntervalRestrictionToSelect(Query *subqery, ShardInterval *shardInterval
|
|||
boundExpressionList = lappend(boundExpressionList, greaterThanAndEqualsBoundExpr);
|
||||
boundExpressionList = lappend(boundExpressionList, lessThanAndEqualsBoundExpr);
|
||||
|
||||
andedBoundExpressions = make_ands_explicit(boundExpressionList);
|
||||
Expr *andedBoundExpressions = make_ands_explicit(boundExpressionList);
|
||||
|
||||
/* finally add the quals */
|
||||
if (subqery->jointree->quals == NULL)
|
||||
|
@ -461,19 +451,15 @@ AddShardIntervalRestrictionToSelect(Query *subqery, ShardInterval *shardInterval
|
|||
RangeTblEntry *
|
||||
ExtractSelectRangeTableEntry(Query *query)
|
||||
{
|
||||
List *fromList = NULL;
|
||||
RangeTblRef *reference = NULL;
|
||||
RangeTblEntry *subqueryRte = NULL;
|
||||
|
||||
Assert(InsertSelectIntoDistributedTable(query));
|
||||
|
||||
/*
|
||||
* Since we already asserted InsertSelectIntoDistributedTable() it is safe to access
|
||||
* both lists
|
||||
*/
|
||||
fromList = query->jointree->fromlist;
|
||||
reference = linitial(fromList);
|
||||
subqueryRte = rt_fetch(reference->rtindex, query->rtable);
|
||||
List *fromList = query->jointree->fromlist;
|
||||
RangeTblRef *reference = linitial(fromList);
|
||||
RangeTblEntry *subqueryRte = rt_fetch(reference->rtindex, query->rtable);
|
||||
|
||||
return subqueryRte;
|
||||
}
|
||||
|
@ -490,8 +476,6 @@ ExtractSelectRangeTableEntry(Query *query)
|
|||
Oid
|
||||
ModifyQueryResultRelationId(Query *query)
|
||||
{
|
||||
RangeTblEntry *resultRte = NULL;
|
||||
|
||||
/* only modify queries have result relations */
|
||||
if (!IsModifyCommand(query))
|
||||
{
|
||||
|
@ -499,7 +483,7 @@ ModifyQueryResultRelationId(Query *query)
|
|||
errmsg("input query is not a modification query")));
|
||||
}
|
||||
|
||||
resultRte = ExtractResultRelationRTE(query);
|
||||
RangeTblEntry *resultRte = ExtractResultRelationRTE(query);
|
||||
Assert(OidIsValid(resultRte->relid));
|
||||
|
||||
return resultRte->relid;
|
||||
|
@ -562,7 +546,6 @@ DeferredErrorMessage *
|
|||
ModifyQuerySupported(Query *queryTree, Query *originalQuery, bool multiShardQuery,
|
||||
PlannerRestrictionContext *plannerRestrictionContext)
|
||||
{
|
||||
DeferredErrorMessage *deferredError = NULL;
|
||||
Oid distributedTableId = ExtractFirstDistributedTableId(queryTree);
|
||||
uint32 rangeTableId = 1;
|
||||
Var *partitionColumn = PartitionColumn(distributedTableId, rangeTableId);
|
||||
|
@ -571,7 +554,7 @@ ModifyQuerySupported(Query *queryTree, Query *originalQuery, bool multiShardQuer
|
|||
uint32 queryTableCount = 0;
|
||||
CmdType commandType = queryTree->commandType;
|
||||
|
||||
deferredError = DeferErrorIfModifyView(queryTree);
|
||||
DeferredErrorMessage *deferredError = DeferErrorIfModifyView(queryTree);
|
||||
if (deferredError != NULL)
|
||||
{
|
||||
return deferredError;
|
||||
|
@ -624,7 +607,6 @@ ModifyQuerySupported(Query *queryTree, Query *originalQuery, bool multiShardQuer
|
|||
{
|
||||
CommonTableExpr *cte = (CommonTableExpr *) lfirst(cteCell);
|
||||
Query *cteQuery = (Query *) cte->ctequery;
|
||||
DeferredErrorMessage *cteError = NULL;
|
||||
|
||||
if (cteQuery->commandType != CMD_SELECT)
|
||||
{
|
||||
|
@ -649,7 +631,7 @@ ModifyQuerySupported(Query *queryTree, Query *originalQuery, bool multiShardQuer
|
|||
NULL, NULL);
|
||||
}
|
||||
|
||||
cteError = MultiRouterPlannableQuery(cteQuery);
|
||||
DeferredErrorMessage *cteError = MultiRouterPlannableQuery(cteQuery);
|
||||
if (cteError)
|
||||
{
|
||||
return cteError;
|
||||
|
@ -957,12 +939,7 @@ DeferErrorIfModifyView(Query *queryTree)
|
|||
DeferredErrorMessage *
|
||||
ErrorIfOnConflictNotSupported(Query *queryTree)
|
||||
{
|
||||
Oid distributedTableId = InvalidOid;
|
||||
uint32 rangeTableId = 1;
|
||||
Var *partitionColumn = NULL;
|
||||
List *onConflictSet = NIL;
|
||||
Node *arbiterWhere = NULL;
|
||||
Node *onConflictWhere = NULL;
|
||||
ListCell *setTargetCell = NULL;
|
||||
bool specifiesPartitionValue = false;
|
||||
|
||||
|
@ -972,12 +949,12 @@ ErrorIfOnConflictNotSupported(Query *queryTree)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
distributedTableId = ExtractFirstDistributedTableId(queryTree);
|
||||
partitionColumn = PartitionColumn(distributedTableId, rangeTableId);
|
||||
Oid distributedTableId = ExtractFirstDistributedTableId(queryTree);
|
||||
Var *partitionColumn = PartitionColumn(distributedTableId, rangeTableId);
|
||||
|
||||
onConflictSet = queryTree->onConflict->onConflictSet;
|
||||
arbiterWhere = queryTree->onConflict->arbiterWhere;
|
||||
onConflictWhere = queryTree->onConflict->onConflictWhere;
|
||||
List *onConflictSet = queryTree->onConflict->onConflictSet;
|
||||
Node *arbiterWhere = queryTree->onConflict->arbiterWhere;
|
||||
Node *onConflictWhere = queryTree->onConflict->onConflictWhere;
|
||||
|
||||
/*
|
||||
* onConflictSet is expanded via expand_targetlist() on the standard planner.
|
||||
|
@ -1207,11 +1184,10 @@ UpdateOrDeleteQuery(Query *query)
|
|||
static bool
|
||||
MasterIrreducibleExpression(Node *expression, bool *varArgument, bool *badCoalesce)
|
||||
{
|
||||
bool result;
|
||||
WalkerState data;
|
||||
data.containsVar = data.varArgument = data.badCoalesce = false;
|
||||
|
||||
result = MasterIrreducibleExpressionWalker(expression, &data);
|
||||
bool result = MasterIrreducibleExpressionWalker(expression, &data);
|
||||
|
||||
*varArgument |= data.varArgument;
|
||||
*badCoalesce |= data.badCoalesce;
|
||||
|
@ -1379,14 +1355,13 @@ TargetEntryChangesValue(TargetEntry *targetEntry, Var *column, FromExpr *joinTre
|
|||
List *restrictClauseList = WhereClauseList(joinTree);
|
||||
OpExpr *equalityExpr = MakeOpExpression(column, BTEqualStrategyNumber);
|
||||
Const *rightConst = (Const *) get_rightop((Expr *) equalityExpr);
|
||||
bool predicateIsImplied = false;
|
||||
|
||||
rightConst->constvalue = newValue->constvalue;
|
||||
rightConst->constisnull = newValue->constisnull;
|
||||
rightConst->constbyval = newValue->constbyval;
|
||||
|
||||
predicateIsImplied = predicate_implied_by(list_make1(equalityExpr),
|
||||
restrictClauseList, false);
|
||||
bool predicateIsImplied = predicate_implied_by(list_make1(equalityExpr),
|
||||
restrictClauseList, false);
|
||||
if (predicateIsImplied)
|
||||
{
|
||||
/* target entry of the form SET col = <x> WHERE col = <x> AND ... */
|
||||
|
@ -1408,7 +1383,6 @@ RouterInsertJob(Query *originalQuery, Query *query, DeferredErrorMessage **plann
|
|||
{
|
||||
Oid distributedTableId = ExtractFirstDistributedTableId(query);
|
||||
List *taskList = NIL;
|
||||
Job *job = NULL;
|
||||
bool requiresMasterEvaluation = false;
|
||||
bool deferredPruning = false;
|
||||
Const *partitionKeyValue = NULL;
|
||||
|
@ -1459,7 +1433,7 @@ RouterInsertJob(Query *originalQuery, Query *query, DeferredErrorMessage **plann
|
|||
partitionKeyValue = ExtractInsertPartitionKeyValue(originalQuery);
|
||||
}
|
||||
|
||||
job = CreateJob(originalQuery);
|
||||
Job *job = CreateJob(originalQuery);
|
||||
job->taskList = taskList;
|
||||
job->requiresMasterEvaluation = requiresMasterEvaluation;
|
||||
job->deferredPruning = deferredPruning;
|
||||
|
@ -1475,9 +1449,7 @@ RouterInsertJob(Query *originalQuery, Query *query, DeferredErrorMessage **plann
|
|||
static Job *
|
||||
CreateJob(Query *query)
|
||||
{
|
||||
Job *job = NULL;
|
||||
|
||||
job = CitusMakeNode(Job);
|
||||
Job *job = CitusMakeNode(Job);
|
||||
job->jobId = UniqueJobId();
|
||||
job->jobQuery = query;
|
||||
job->taskList = NIL;
|
||||
|
@ -1498,8 +1470,6 @@ static bool
|
|||
CanShardPrune(Oid distributedTableId, Query *query)
|
||||
{
|
||||
uint32 rangeTableId = 1;
|
||||
Var *partitionColumn = NULL;
|
||||
List *insertValuesList = NIL;
|
||||
ListCell *insertValuesCell = NULL;
|
||||
|
||||
if (query->commandType != CMD_INSERT)
|
||||
|
@ -1508,7 +1478,7 @@ CanShardPrune(Oid distributedTableId, Query *query)
|
|||
return true;
|
||||
}
|
||||
|
||||
partitionColumn = PartitionColumn(distributedTableId, rangeTableId);
|
||||
Var *partitionColumn = PartitionColumn(distributedTableId, rangeTableId);
|
||||
if (partitionColumn == NULL)
|
||||
{
|
||||
/* can always do shard pruning for reference tables */
|
||||
|
@ -1516,7 +1486,7 @@ CanShardPrune(Oid distributedTableId, Query *query)
|
|||
}
|
||||
|
||||
/* get full list of partition values and ensure they are all Consts */
|
||||
insertValuesList = ExtractInsertValuesList(query, partitionColumn);
|
||||
List *insertValuesList = ExtractInsertValuesList(query, partitionColumn);
|
||||
foreach(insertValuesCell, insertValuesList)
|
||||
{
|
||||
InsertValues *insertValues = (InsertValues *) lfirst(insertValuesCell);
|
||||
|
@ -1561,7 +1531,6 @@ List *
|
|||
RouterInsertTaskList(Query *query, DeferredErrorMessage **planningError)
|
||||
{
|
||||
List *insertTaskList = NIL;
|
||||
List *modifyRouteList = NIL;
|
||||
ListCell *modifyRouteCell = NULL;
|
||||
|
||||
Oid distributedTableId = ExtractFirstDistributedTableId(query);
|
||||
|
@ -1571,7 +1540,7 @@ RouterInsertTaskList(Query *query, DeferredErrorMessage **planningError)
|
|||
|
||||
Assert(query->commandType == CMD_INSERT);
|
||||
|
||||
modifyRouteList = BuildRoutesForInsert(query, planningError);
|
||||
List *modifyRouteList = BuildRoutesForInsert(query, planningError);
|
||||
if (*planningError != NULL)
|
||||
{
|
||||
return NIL;
|
||||
|
@ -1599,9 +1568,7 @@ RouterInsertTaskList(Query *query, DeferredErrorMessage **planningError)
|
|||
static Task *
|
||||
CreateTask(TaskType taskType)
|
||||
{
|
||||
Task *task = NULL;
|
||||
|
||||
task = CitusMakeNode(Task);
|
||||
Task *task = CitusMakeNode(Task);
|
||||
task->taskType = taskType;
|
||||
task->jobId = INVALID_JOB_ID;
|
||||
task->taskId = INVALID_TASK_ID;
|
||||
|
@ -1666,22 +1633,18 @@ static Job *
|
|||
RouterJob(Query *originalQuery, PlannerRestrictionContext *plannerRestrictionContext,
|
||||
DeferredErrorMessage **planningError)
|
||||
{
|
||||
Job *job = NULL;
|
||||
uint64 shardId = INVALID_SHARD_ID;
|
||||
List *placementList = NIL;
|
||||
List *relationShardList = NIL;
|
||||
List *prunedShardIntervalListList = NIL;
|
||||
bool replacePrunedQueryWithDummy = false;
|
||||
bool requiresMasterEvaluation = false;
|
||||
RangeTblEntry *updateOrDeleteRTE = NULL;
|
||||
bool isMultiShardModifyQuery = false;
|
||||
Const *partitionKeyValue = NULL;
|
||||
|
||||
/* router planner should create task even if it doesn't hit a shard at all */
|
||||
replacePrunedQueryWithDummy = true;
|
||||
bool replacePrunedQueryWithDummy = true;
|
||||
|
||||
/* check if this query requires master evaluation */
|
||||
requiresMasterEvaluation = RequiresMasterEvaluation(originalQuery);
|
||||
bool requiresMasterEvaluation = RequiresMasterEvaluation(originalQuery);
|
||||
|
||||
(*planningError) = PlanRouterQuery(originalQuery, plannerRestrictionContext,
|
||||
&placementList, &shardId, &relationShardList,
|
||||
|
@ -1694,10 +1657,10 @@ RouterJob(Query *originalQuery, PlannerRestrictionContext *plannerRestrictionCon
|
|||
return NULL;
|
||||
}
|
||||
|
||||
job = CreateJob(originalQuery);
|
||||
Job *job = CreateJob(originalQuery);
|
||||
job->partitionKeyValue = partitionKeyValue;
|
||||
|
||||
updateOrDeleteRTE = GetUpdateOrDeleteRTE(originalQuery);
|
||||
RangeTblEntry *updateOrDeleteRTE = GetUpdateOrDeleteRTE(originalQuery);
|
||||
|
||||
/*
|
||||
* If all of the shards are pruned, we replace the relation RTE into
|
||||
|
@ -1770,16 +1733,12 @@ ReorderTaskPlacementsByTaskAssignmentPolicy(Job *job,
|
|||
{
|
||||
if (taskAssignmentPolicy == TASK_ASSIGNMENT_ROUND_ROBIN)
|
||||
{
|
||||
Task *task = NULL;
|
||||
List *reorderedPlacementList = NIL;
|
||||
ShardPlacement *primaryPlacement = NULL;
|
||||
|
||||
/*
|
||||
* We hit a single shard on router plans, and there should be only
|
||||
* one task in the task list
|
||||
*/
|
||||
Assert(list_length(job->taskList) == 1);
|
||||
task = (Task *) linitial(job->taskList);
|
||||
Task *task = (Task *) linitial(job->taskList);
|
||||
|
||||
/*
|
||||
* For round-robin SELECT queries, we don't want to include the coordinator
|
||||
|
@ -1796,10 +1755,11 @@ ReorderTaskPlacementsByTaskAssignmentPolicy(Job *job,
|
|||
placementList = RemoveCoordinatorPlacement(placementList);
|
||||
|
||||
/* reorder the placement list */
|
||||
reorderedPlacementList = RoundRobinReorder(task, placementList);
|
||||
List *reorderedPlacementList = RoundRobinReorder(task, placementList);
|
||||
task->taskPlacementList = reorderedPlacementList;
|
||||
|
||||
primaryPlacement = (ShardPlacement *) linitial(reorderedPlacementList);
|
||||
ShardPlacement *primaryPlacement = (ShardPlacement *) linitial(
|
||||
reorderedPlacementList);
|
||||
ereport(DEBUG3, (errmsg("assigned task %u to node %s:%u", task->taskId,
|
||||
primaryPlacement->nodeName,
|
||||
primaryPlacement->nodePort)));
|
||||
|
@ -1916,16 +1876,14 @@ SingleShardModifyTaskList(Query *query, uint64 jobId, List *relationShardList,
|
|||
{
|
||||
Task *task = CreateTask(MODIFY_TASK);
|
||||
StringInfo queryString = makeStringInfo();
|
||||
DistTableCacheEntry *modificationTableCacheEntry = NULL;
|
||||
char modificationPartitionMethod = 0;
|
||||
List *rangeTableList = NIL;
|
||||
RangeTblEntry *updateOrDeleteRTE = NULL;
|
||||
|
||||
ExtractRangeTableEntryWalker((Node *) query, &rangeTableList);
|
||||
updateOrDeleteRTE = GetUpdateOrDeleteRTE(query);
|
||||
RangeTblEntry *updateOrDeleteRTE = GetUpdateOrDeleteRTE(query);
|
||||
|
||||
modificationTableCacheEntry = DistributedTableCacheEntry(updateOrDeleteRTE->relid);
|
||||
modificationPartitionMethod = modificationTableCacheEntry->partitionMethod;
|
||||
DistTableCacheEntry *modificationTableCacheEntry = DistributedTableCacheEntry(
|
||||
updateOrDeleteRTE->relid);
|
||||
char modificationPartitionMethod = modificationTableCacheEntry->partitionMethod;
|
||||
|
||||
if (modificationPartitionMethod == DISTRIBUTE_BY_NONE &&
|
||||
SelectsFromDistributedTable(rangeTableList, query))
|
||||
|
@ -1983,14 +1941,14 @@ SelectsFromDistributedTable(List *rangeTableList, Query *query)
|
|||
foreach(rangeTableCell, rangeTableList)
|
||||
{
|
||||
RangeTblEntry *rangeTableEntry = (RangeTblEntry *) lfirst(rangeTableCell);
|
||||
DistTableCacheEntry *cacheEntry = NULL;
|
||||
|
||||
if (rangeTableEntry->relid == InvalidOid)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
cacheEntry = DistributedTableCacheEntry(rangeTableEntry->relid);
|
||||
DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(
|
||||
rangeTableEntry->relid);
|
||||
if (cacheEntry->partitionMethod != DISTRIBUTE_BY_NONE &&
|
||||
(resultRangeTableEntry == NULL || resultRangeTableEntry->relid !=
|
||||
rangeTableEntry->relid))
|
||||
|
@ -2242,7 +2200,6 @@ GetAnchorShardId(List *prunedShardIntervalListList)
|
|||
foreach(prunedShardIntervalListCell, prunedShardIntervalListList)
|
||||
{
|
||||
List *prunedShardIntervalList = (List *) lfirst(prunedShardIntervalListCell);
|
||||
ShardInterval *shardInterval = NULL;
|
||||
|
||||
/* no shard is present or all shards are pruned out case will be handled later */
|
||||
if (prunedShardIntervalList == NIL)
|
||||
|
@ -2250,7 +2207,7 @@ GetAnchorShardId(List *prunedShardIntervalListList)
|
|||
continue;
|
||||
}
|
||||
|
||||
shardInterval = linitial(prunedShardIntervalList);
|
||||
ShardInterval *shardInterval = linitial(prunedShardIntervalList);
|
||||
|
||||
if (ReferenceTableShardId(shardInterval->shardId))
|
||||
{
|
||||
|
@ -2341,7 +2298,6 @@ TargetShardIntervalsForRestrictInfo(RelationRestrictionContext *restrictionConte
|
|||
List *prunedShardIntervalList = NIL;
|
||||
List *joinInfoList = relationRestriction->relOptInfo->joininfo;
|
||||
List *pseudoRestrictionList = extract_actual_clauses(joinInfoList, true);
|
||||
bool whereFalseQuery = false;
|
||||
|
||||
relationRestriction->prunedShardIntervalList = NIL;
|
||||
|
||||
|
@ -2351,7 +2307,7 @@ TargetShardIntervalsForRestrictInfo(RelationRestrictionContext *restrictionConte
|
|||
* inside relOptInfo->joininfo list. We treat such cases as if all
|
||||
* shards of the table are pruned out.
|
||||
*/
|
||||
whereFalseQuery = ContainsFalseClause(pseudoRestrictionList);
|
||||
bool whereFalseQuery = ContainsFalseClause(pseudoRestrictionList);
|
||||
if (!whereFalseQuery && shardCount > 0)
|
||||
{
|
||||
Const *restrictionPartitionValueConst = NULL;
|
||||
|
@ -2445,9 +2401,6 @@ WorkersContainingAllShards(List *prunedShardIntervalsList)
|
|||
foreach(prunedShardIntervalCell, prunedShardIntervalsList)
|
||||
{
|
||||
List *shardIntervalList = (List *) lfirst(prunedShardIntervalCell);
|
||||
ShardInterval *shardInterval = NULL;
|
||||
uint64 shardId = INVALID_SHARD_ID;
|
||||
List *newPlacementList = NIL;
|
||||
|
||||
if (shardIntervalList == NIL)
|
||||
{
|
||||
|
@ -2456,11 +2409,11 @@ WorkersContainingAllShards(List *prunedShardIntervalsList)
|
|||
|
||||
Assert(list_length(shardIntervalList) == 1);
|
||||
|
||||
shardInterval = (ShardInterval *) linitial(shardIntervalList);
|
||||
shardId = shardInterval->shardId;
|
||||
ShardInterval *shardInterval = (ShardInterval *) linitial(shardIntervalList);
|
||||
uint64 shardId = shardInterval->shardId;
|
||||
|
||||
/* retrieve all active shard placements for this shard */
|
||||
newPlacementList = FinalizedShardPlacementList(shardId);
|
||||
List *newPlacementList = FinalizedShardPlacementList(shardId);
|
||||
|
||||
if (firstShard)
|
||||
{
|
||||
|
@ -2506,8 +2459,6 @@ BuildRoutesForInsert(Query *query, DeferredErrorMessage **planningError)
|
|||
DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(distributedTableId);
|
||||
char partitionMethod = cacheEntry->partitionMethod;
|
||||
uint32 rangeTableId = 1;
|
||||
Var *partitionColumn = NULL;
|
||||
List *insertValuesList = NIL;
|
||||
List *modifyRouteList = NIL;
|
||||
ListCell *insertValuesCell = NULL;
|
||||
|
||||
|
@ -2516,24 +2467,20 @@ BuildRoutesForInsert(Query *query, DeferredErrorMessage **planningError)
|
|||
/* reference tables can only have one shard */
|
||||
if (partitionMethod == DISTRIBUTE_BY_NONE)
|
||||
{
|
||||
int shardCount = 0;
|
||||
List *shardIntervalList = LoadShardIntervalList(distributedTableId);
|
||||
RangeTblEntry *valuesRTE = NULL;
|
||||
ShardInterval *shardInterval = NULL;
|
||||
ModifyRoute *modifyRoute = NULL;
|
||||
|
||||
shardCount = list_length(shardIntervalList);
|
||||
int shardCount = list_length(shardIntervalList);
|
||||
if (shardCount != 1)
|
||||
{
|
||||
ereport(ERROR, (errmsg("reference table cannot have %d shards", shardCount)));
|
||||
}
|
||||
|
||||
shardInterval = linitial(shardIntervalList);
|
||||
modifyRoute = palloc(sizeof(ModifyRoute));
|
||||
ShardInterval *shardInterval = linitial(shardIntervalList);
|
||||
ModifyRoute *modifyRoute = palloc(sizeof(ModifyRoute));
|
||||
|
||||
modifyRoute->shardId = shardInterval->shardId;
|
||||
|
||||
valuesRTE = ExtractDistributedInsertValuesRTE(query);
|
||||
RangeTblEntry *valuesRTE = ExtractDistributedInsertValuesRTE(query);
|
||||
if (valuesRTE != NULL)
|
||||
{
|
||||
/* add the values list for a multi-row INSERT */
|
||||
|
@ -2549,18 +2496,15 @@ BuildRoutesForInsert(Query *query, DeferredErrorMessage **planningError)
|
|||
return modifyRouteList;
|
||||
}
|
||||
|
||||
partitionColumn = PartitionColumn(distributedTableId, rangeTableId);
|
||||
Var *partitionColumn = PartitionColumn(distributedTableId, rangeTableId);
|
||||
|
||||
/* get full list of insert values and iterate over them to prune */
|
||||
insertValuesList = ExtractInsertValuesList(query, partitionColumn);
|
||||
List *insertValuesList = ExtractInsertValuesList(query, partitionColumn);
|
||||
|
||||
foreach(insertValuesCell, insertValuesList)
|
||||
{
|
||||
InsertValues *insertValues = (InsertValues *) lfirst(insertValuesCell);
|
||||
Const *partitionValueConst = NULL;
|
||||
List *prunedShardIntervalList = NIL;
|
||||
int prunedShardIntervalCount = 0;
|
||||
ShardInterval *targetShard = NULL;
|
||||
|
||||
if (!IsA(insertValues->partitionValueExpr, Const))
|
||||
{
|
||||
|
@ -2568,7 +2512,7 @@ BuildRoutesForInsert(Query *query, DeferredErrorMessage **planningError)
|
|||
return NIL;
|
||||
}
|
||||
|
||||
partitionValueConst = (Const *) insertValues->partitionValueExpr;
|
||||
Const *partitionValueConst = (Const *) insertValues->partitionValueExpr;
|
||||
if (partitionValueConst->constisnull)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
|
||||
|
@ -2580,10 +2524,9 @@ BuildRoutesForInsert(Query *query, DeferredErrorMessage **planningError)
|
|||
DISTRIBUTE_BY_RANGE)
|
||||
{
|
||||
Datum partitionValue = partitionValueConst->constvalue;
|
||||
ShardInterval *shardInterval = NULL;
|
||||
|
||||
cacheEntry = DistributedTableCacheEntry(distributedTableId);
|
||||
shardInterval = FindShardInterval(partitionValue, cacheEntry);
|
||||
ShardInterval *shardInterval = FindShardInterval(partitionValue, cacheEntry);
|
||||
if (shardInterval != NULL)
|
||||
{
|
||||
prunedShardIntervalList = list_make1(shardInterval);
|
||||
|
@ -2591,7 +2534,6 @@ BuildRoutesForInsert(Query *query, DeferredErrorMessage **planningError)
|
|||
}
|
||||
else
|
||||
{
|
||||
List *restrictClauseList = NIL;
|
||||
Index tableId = 1;
|
||||
OpExpr *equalityExpr = MakeOpExpression(partitionColumn,
|
||||
BTEqualStrategyNumber);
|
||||
|
@ -2604,13 +2546,13 @@ BuildRoutesForInsert(Query *query, DeferredErrorMessage **planningError)
|
|||
rightConst->constisnull = partitionValueConst->constisnull;
|
||||
rightConst->constbyval = partitionValueConst->constbyval;
|
||||
|
||||
restrictClauseList = list_make1(equalityExpr);
|
||||
List *restrictClauseList = list_make1(equalityExpr);
|
||||
|
||||
prunedShardIntervalList = PruneShards(distributedTableId, tableId,
|
||||
restrictClauseList, NULL);
|
||||
}
|
||||
|
||||
prunedShardIntervalCount = list_length(prunedShardIntervalList);
|
||||
int prunedShardIntervalCount = list_length(prunedShardIntervalList);
|
||||
if (prunedShardIntervalCount != 1)
|
||||
{
|
||||
char *partitionKeyString = cacheEntry->partitionKeyString;
|
||||
|
@ -2651,7 +2593,7 @@ BuildRoutesForInsert(Query *query, DeferredErrorMessage **planningError)
|
|||
return NIL;
|
||||
}
|
||||
|
||||
targetShard = (ShardInterval *) linitial(prunedShardIntervalList);
|
||||
ShardInterval *targetShard = (ShardInterval *) linitial(prunedShardIntervalList);
|
||||
insertValues->shardId = targetShard->shardId;
|
||||
}
|
||||
|
||||
|
@ -2768,19 +2710,15 @@ NormalizeMultiRowInsertTargetList(Query *query)
|
|||
{
|
||||
TargetEntry *targetEntry = lfirst(targetEntryCell);
|
||||
Node *targetExprNode = (Node *) targetEntry->expr;
|
||||
Oid targetType = InvalidOid;
|
||||
int32 targetTypmod = -1;
|
||||
Oid targetColl = InvalidOid;
|
||||
Var *syntheticVar = NULL;
|
||||
|
||||
/* RTE_VALUES comes 2nd, after destination table */
|
||||
Index valuesVarno = 2;
|
||||
|
||||
targetEntryNo++;
|
||||
|
||||
targetType = exprType(targetExprNode);
|
||||
targetTypmod = exprTypmod(targetExprNode);
|
||||
targetColl = exprCollation(targetExprNode);
|
||||
Oid targetType = exprType(targetExprNode);
|
||||
int32 targetTypmod = exprTypmod(targetExprNode);
|
||||
Oid targetColl = exprCollation(targetExprNode);
|
||||
|
||||
valuesRTE->coltypes = lappend_oid(valuesRTE->coltypes, targetType);
|
||||
valuesRTE->coltypmods = lappend_int(valuesRTE->coltypmods, targetTypmod);
|
||||
|
@ -2794,8 +2732,8 @@ NormalizeMultiRowInsertTargetList(Query *query)
|
|||
}
|
||||
|
||||
/* replace the original expression with a Var referencing values_lists */
|
||||
syntheticVar = makeVar(valuesVarno, targetEntryNo, targetType, targetTypmod,
|
||||
targetColl, 0);
|
||||
Var *syntheticVar = makeVar(valuesVarno, targetEntryNo, targetType, targetTypmod,
|
||||
targetColl, 0);
|
||||
targetEntry->expr = (Expr *) syntheticVar;
|
||||
}
|
||||
}
|
||||
|
@ -2935,11 +2873,10 @@ ExtractInsertValuesList(Query *query, Var *partitionColumn)
|
|||
if (IsA(targetEntry->expr, Var))
|
||||
{
|
||||
Var *partitionVar = (Var *) targetEntry->expr;
|
||||
RangeTblEntry *referencedRTE = NULL;
|
||||
ListCell *valuesListCell = NULL;
|
||||
Index ivIndex = 0;
|
||||
|
||||
referencedRTE = rt_fetch(partitionVar->varno, query->rtable);
|
||||
RangeTblEntry *referencedRTE = rt_fetch(partitionVar->varno, query->rtable);
|
||||
foreach(valuesListCell, referencedRTE->values_lists)
|
||||
{
|
||||
InsertValues *insertValues = (InsertValues *) palloc(sizeof(InsertValues));
|
||||
|
@ -2980,10 +2917,7 @@ ExtractInsertPartitionKeyValue(Query *query)
|
|||
{
|
||||
Oid distributedTableId = ExtractFirstDistributedTableId(query);
|
||||
uint32 rangeTableId = 1;
|
||||
Var *partitionColumn = NULL;
|
||||
TargetEntry *targetEntry = NULL;
|
||||
Const *singlePartitionValueConst = NULL;
|
||||
Node *targetExpression = NULL;
|
||||
|
||||
char partitionMethod = PartitionMethod(distributedTableId);
|
||||
if (partitionMethod == DISTRIBUTE_BY_NONE)
|
||||
|
@ -2991,15 +2925,16 @@ ExtractInsertPartitionKeyValue(Query *query)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
partitionColumn = PartitionColumn(distributedTableId, rangeTableId);
|
||||
targetEntry = get_tle_by_resno(query->targetList, partitionColumn->varattno);
|
||||
Var *partitionColumn = PartitionColumn(distributedTableId, rangeTableId);
|
||||
TargetEntry *targetEntry = get_tle_by_resno(query->targetList,
|
||||
partitionColumn->varattno);
|
||||
if (targetEntry == NULL)
|
||||
{
|
||||
/* partition column value not specified */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
targetExpression = strip_implicit_coercions((Node *) targetEntry->expr);
|
||||
Node *targetExpression = strip_implicit_coercions((Node *) targetEntry->expr);
|
||||
|
||||
/*
|
||||
* Multi-row INSERTs have a Var in the target list that points to
|
||||
|
@ -3008,10 +2943,9 @@ ExtractInsertPartitionKeyValue(Query *query)
|
|||
if (IsA(targetExpression, Var))
|
||||
{
|
||||
Var *partitionVar = (Var *) targetExpression;
|
||||
RangeTblEntry *referencedRTE = NULL;
|
||||
ListCell *valuesListCell = NULL;
|
||||
|
||||
referencedRTE = rt_fetch(partitionVar->varno, query->rtable);
|
||||
RangeTblEntry *referencedRTE = rt_fetch(partitionVar->varno, query->rtable);
|
||||
|
||||
foreach(valuesListCell, referencedRTE->values_lists)
|
||||
{
|
||||
|
@ -3019,7 +2953,6 @@ ExtractInsertPartitionKeyValue(Query *query)
|
|||
Node *partitionValueNode = list_nth(rowValues, partitionVar->varattno - 1);
|
||||
Expr *partitionValueExpr = (Expr *) strip_implicit_coercions(
|
||||
partitionValueNode);
|
||||
Const *partitionValueConst = NULL;
|
||||
|
||||
if (!IsA(partitionValueExpr, Const))
|
||||
{
|
||||
|
@ -3028,7 +2961,7 @@ ExtractInsertPartitionKeyValue(Query *query)
|
|||
break;
|
||||
}
|
||||
|
||||
partitionValueConst = (Const *) partitionValueExpr;
|
||||
Const *partitionValueConst = (Const *) partitionValueExpr;
|
||||
|
||||
if (singlePartitionValueConst == NULL)
|
||||
{
|
||||
|
@ -3098,7 +3031,6 @@ MultiRouterPlannableQuery(Query *query)
|
|||
{
|
||||
/* only hash partitioned tables are supported */
|
||||
Oid distributedTableId = rte->relid;
|
||||
char partitionMethod = 0;
|
||||
|
||||
if (!IsDistributedTable(distributedTableId))
|
||||
{
|
||||
|
@ -3109,7 +3041,7 @@ MultiRouterPlannableQuery(Query *query)
|
|||
NULL, NULL);
|
||||
}
|
||||
|
||||
partitionMethod = PartitionMethod(distributedTableId);
|
||||
char partitionMethod = PartitionMethod(distributedTableId);
|
||||
if (!(partitionMethod == DISTRIBUTE_BY_HASH || partitionMethod ==
|
||||
DISTRIBUTE_BY_NONE || partitionMethod == DISTRIBUTE_BY_RANGE))
|
||||
{
|
||||
|
|
|
@ -43,9 +43,6 @@ make_unique_from_sortclauses(Plan *lefttree, List *distinctList)
|
|||
Plan *plan = &node->plan;
|
||||
int numCols = list_length(distinctList);
|
||||
int keyno = 0;
|
||||
AttrNumber *uniqColIdx;
|
||||
Oid *uniqOperators;
|
||||
Oid *uniqCollations;
|
||||
ListCell *slitem;
|
||||
|
||||
plan->targetlist = lefttree->targetlist;
|
||||
|
@ -58,9 +55,9 @@ make_unique_from_sortclauses(Plan *lefttree, List *distinctList)
|
|||
* operators, as wanted by executor
|
||||
*/
|
||||
Assert(numCols > 0);
|
||||
uniqColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numCols);
|
||||
uniqOperators = (Oid *) palloc(sizeof(Oid) * numCols);
|
||||
uniqCollations = (Oid *) palloc(sizeof(Oid) * numCols);
|
||||
AttrNumber *uniqColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numCols);
|
||||
Oid *uniqOperators = (Oid *) palloc(sizeof(Oid) * numCols);
|
||||
Oid *uniqCollations = (Oid *) palloc(sizeof(Oid) * numCols);
|
||||
|
||||
foreach(slitem, distinctList)
|
||||
{
|
||||
|
@ -97,8 +94,6 @@ make_unique_from_sortclauses(Plan *lefttree, List *distinctList)
|
|||
Plan *plan = &node->plan;
|
||||
int numCols = list_length(distinctList);
|
||||
int keyno = 0;
|
||||
AttrNumber *uniqColIdx;
|
||||
Oid *uniqOperators;
|
||||
ListCell *slitem;
|
||||
|
||||
plan->targetlist = lefttree->targetlist;
|
||||
|
@ -111,8 +106,8 @@ make_unique_from_sortclauses(Plan *lefttree, List *distinctList)
|
|||
* operators, as wanted by executor
|
||||
*/
|
||||
Assert(numCols > 0);
|
||||
uniqColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numCols);
|
||||
uniqOperators = (Oid *) palloc(sizeof(Oid) * numCols);
|
||||
AttrNumber *uniqColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numCols);
|
||||
Oid *uniqOperators = (Oid *) palloc(sizeof(Oid) * numCols);
|
||||
|
||||
foreach(slitem, distinctList)
|
||||
{
|
||||
|
|
|
@ -49,14 +49,10 @@ CreateColocatedJoinChecker(Query *subquery, PlannerRestrictionContext *restricti
|
|||
{
|
||||
ColocatedJoinChecker colocatedJoinChecker;
|
||||
|
||||
RangeTblEntry *anchorRangeTblEntry = NULL;
|
||||
Query *anchorSubquery = NULL;
|
||||
PlannerRestrictionContext *anchorPlannerRestrictionContext = NULL;
|
||||
RelationRestrictionContext *anchorRelationRestrictionContext = NULL;
|
||||
List *anchorRestrictionEquivalences = NIL;
|
||||
|
||||
/* we couldn't pick an anchor subquery, no need to continue */
|
||||
anchorRangeTblEntry = AnchorRte(subquery);
|
||||
RangeTblEntry *anchorRangeTblEntry = AnchorRte(subquery);
|
||||
if (anchorRangeTblEntry == NULL)
|
||||
{
|
||||
colocatedJoinChecker.anchorRelationRestrictionList = NIL;
|
||||
|
@ -84,11 +80,11 @@ CreateColocatedJoinChecker(Query *subquery, PlannerRestrictionContext *restricti
|
|||
pg_unreachable();
|
||||
}
|
||||
|
||||
anchorPlannerRestrictionContext =
|
||||
PlannerRestrictionContext *anchorPlannerRestrictionContext =
|
||||
FilterPlannerRestrictionForQuery(restrictionContext, anchorSubquery);
|
||||
anchorRelationRestrictionContext =
|
||||
RelationRestrictionContext *anchorRelationRestrictionContext =
|
||||
anchorPlannerRestrictionContext->relationRestrictionContext;
|
||||
anchorRestrictionEquivalences =
|
||||
List *anchorRestrictionEquivalences =
|
||||
GenerateAllAttributeEquivalences(anchorPlannerRestrictionContext);
|
||||
|
||||
/* fill the non colocated planning context */
|
||||
|
@ -191,9 +187,6 @@ SubqueryColocated(Query *subquery, ColocatedJoinChecker *checker)
|
|||
List *filteredRestrictionList =
|
||||
filteredPlannerContext->relationRestrictionContext->relationRestrictionList;
|
||||
|
||||
List *unionedRelationRestrictionList = NULL;
|
||||
RelationRestrictionContext *unionedRelationRestrictionContext = NULL;
|
||||
PlannerRestrictionContext *unionedPlannerRestrictionContext = NULL;
|
||||
|
||||
/*
|
||||
* There are no relations in the input subquery, such as a subquery
|
||||
|
@ -213,7 +206,7 @@ SubqueryColocated(Query *subquery, ColocatedJoinChecker *checker)
|
|||
* forming this temporary context is to check whether the context contains
|
||||
* distribution key equality or not.
|
||||
*/
|
||||
unionedRelationRestrictionList =
|
||||
List *unionedRelationRestrictionList =
|
||||
UnionRelationRestrictionLists(anchorRelationRestrictionList,
|
||||
filteredRestrictionList);
|
||||
|
||||
|
@ -224,11 +217,13 @@ SubqueryColocated(Query *subquery, ColocatedJoinChecker *checker)
|
|||
* join restrictions, we're already relying on the attributeEquivalances
|
||||
* provided by the context.
|
||||
*/
|
||||
unionedRelationRestrictionContext = palloc0(sizeof(RelationRestrictionContext));
|
||||
RelationRestrictionContext *unionedRelationRestrictionContext = palloc0(
|
||||
sizeof(RelationRestrictionContext));
|
||||
unionedRelationRestrictionContext->relationRestrictionList =
|
||||
unionedRelationRestrictionList;
|
||||
|
||||
unionedPlannerRestrictionContext = palloc0(sizeof(PlannerRestrictionContext));
|
||||
PlannerRestrictionContext *unionedPlannerRestrictionContext = palloc0(
|
||||
sizeof(PlannerRestrictionContext));
|
||||
unionedPlannerRestrictionContext->relationRestrictionContext =
|
||||
unionedRelationRestrictionContext;
|
||||
|
||||
|
@ -256,14 +251,11 @@ WrapRteRelationIntoSubquery(RangeTblEntry *rteRelation)
|
|||
{
|
||||
Query *subquery = makeNode(Query);
|
||||
RangeTblRef *newRangeTableRef = makeNode(RangeTblRef);
|
||||
RangeTblEntry *newRangeTableEntry = NULL;
|
||||
Var *targetColumn = NULL;
|
||||
TargetEntry *targetEntry = NULL;
|
||||
|
||||
subquery->commandType = CMD_SELECT;
|
||||
|
||||
/* we copy the input rteRelation to preserve the rteIdentity */
|
||||
newRangeTableEntry = copyObject(rteRelation);
|
||||
RangeTblEntry *newRangeTableEntry = copyObject(rteRelation);
|
||||
subquery->rtable = list_make1(newRangeTableEntry);
|
||||
|
||||
/* set the FROM expression to the subquery */
|
||||
|
@ -272,11 +264,12 @@ WrapRteRelationIntoSubquery(RangeTblEntry *rteRelation)
|
|||
subquery->jointree = makeFromExpr(list_make1(newRangeTableRef), NULL);
|
||||
|
||||
/* Need the whole row as a junk var */
|
||||
targetColumn = makeWholeRowVar(newRangeTableEntry, newRangeTableRef->rtindex, 0,
|
||||
false);
|
||||
Var *targetColumn = makeWholeRowVar(newRangeTableEntry, newRangeTableRef->rtindex, 0,
|
||||
false);
|
||||
|
||||
/* create a dummy target entry */
|
||||
targetEntry = makeTargetEntry((Expr *) targetColumn, 1, "wholerow", true);
|
||||
TargetEntry *targetEntry = makeTargetEntry((Expr *) targetColumn, 1, "wholerow",
|
||||
true);
|
||||
|
||||
subquery->targetList = lappend(subquery->targetList, targetEntry);
|
||||
|
||||
|
@ -292,15 +285,13 @@ WrapRteRelationIntoSubquery(RangeTblEntry *rteRelation)
|
|||
static List *
|
||||
UnionRelationRestrictionLists(List *firstRelationList, List *secondRelationList)
|
||||
{
|
||||
RelationRestrictionContext *unionedRestrictionContext = NULL;
|
||||
List *unionedRelationRestrictionList = NULL;
|
||||
ListCell *relationRestrictionCell = NULL;
|
||||
Relids rteIdentities = NULL;
|
||||
List *allRestrictionList = NIL;
|
||||
|
||||
/* list_concat destructively modifies the first list, thus copy it */
|
||||
firstRelationList = list_copy(firstRelationList);
|
||||
allRestrictionList = list_concat(firstRelationList, secondRelationList);
|
||||
List *allRestrictionList = list_concat(firstRelationList, secondRelationList);
|
||||
|
||||
foreach(relationRestrictionCell, allRestrictionList)
|
||||
{
|
||||
|
@ -320,7 +311,8 @@ UnionRelationRestrictionLists(List *firstRelationList, List *secondRelationList)
|
|||
rteIdentities = bms_add_member(rteIdentities, rteIdentity);
|
||||
}
|
||||
|
||||
unionedRestrictionContext = palloc0(sizeof(RelationRestrictionContext));
|
||||
RelationRestrictionContext *unionedRestrictionContext = palloc0(
|
||||
sizeof(RelationRestrictionContext));
|
||||
unionedRestrictionContext->relationRestrictionList = unionedRelationRestrictionList;
|
||||
|
||||
return unionedRelationRestrictionList;
|
||||
|
|
|
@ -107,7 +107,6 @@ bool
|
|||
ShouldUseSubqueryPushDown(Query *originalQuery, Query *rewrittenQuery,
|
||||
PlannerRestrictionContext *plannerRestrictionContext)
|
||||
{
|
||||
List *qualifierList = NIL;
|
||||
StringInfo errorMessage = NULL;
|
||||
|
||||
/*
|
||||
|
@ -183,7 +182,7 @@ ShouldUseSubqueryPushDown(Query *originalQuery, Query *rewrittenQuery,
|
|||
* Some unsupported join clauses in logical planner
|
||||
* may be supported by subquery pushdown planner.
|
||||
*/
|
||||
qualifierList = QualifierList(rewrittenQuery->jointree);
|
||||
List *qualifierList = QualifierList(rewrittenQuery->jointree);
|
||||
if (DeferErrorIfUnsupportedClause(qualifierList) != NULL)
|
||||
{
|
||||
return true;
|
||||
|
@ -283,7 +282,6 @@ bool
|
|||
WhereOrHavingClauseContainsSubquery(Query *query)
|
||||
{
|
||||
FromExpr *joinTree = query->jointree;
|
||||
Node *queryQuals = NULL;
|
||||
|
||||
if (FindNodeCheck(query->havingQual, IsNodeSubquery))
|
||||
{
|
||||
|
@ -295,7 +293,7 @@ WhereOrHavingClauseContainsSubquery(Query *query)
|
|||
return false;
|
||||
}
|
||||
|
||||
queryQuals = joinTree->quals;
|
||||
Node *queryQuals = joinTree->quals;
|
||||
|
||||
return FindNodeCheck(queryQuals, IsNodeSubquery);
|
||||
}
|
||||
|
@ -450,15 +448,13 @@ WindowPartitionOnDistributionColumn(Query *query)
|
|||
foreach(windowClauseCell, windowClauseList)
|
||||
{
|
||||
WindowClause *windowClause = lfirst(windowClauseCell);
|
||||
List *groupTargetEntryList = NIL;
|
||||
bool partitionOnDistributionColumn = false;
|
||||
List *partitionClauseList = windowClause->partitionClause;
|
||||
List *targetEntryList = query->targetList;
|
||||
|
||||
groupTargetEntryList =
|
||||
List *groupTargetEntryList =
|
||||
GroupTargetEntryList(partitionClauseList, targetEntryList);
|
||||
|
||||
partitionOnDistributionColumn =
|
||||
bool partitionOnDistributionColumn =
|
||||
TargetListOnPartitionColumn(query, groupTargetEntryList);
|
||||
|
||||
if (!partitionOnDistributionColumn)
|
||||
|
@ -495,14 +491,13 @@ SubqueryMultiNodeTree(Query *originalQuery, Query *queryTree,
|
|||
PlannerRestrictionContext *plannerRestrictionContext)
|
||||
{
|
||||
MultiNode *multiQueryNode = NULL;
|
||||
DeferredErrorMessage *subqueryPushdownError = NULL;
|
||||
DeferredErrorMessage *unsupportedQueryError = NULL;
|
||||
|
||||
/*
|
||||
* This is a generic error check that applies to both subquery pushdown
|
||||
* and single table repartition subquery.
|
||||
*/
|
||||
unsupportedQueryError = DeferErrorIfQueryNotSupported(originalQuery);
|
||||
DeferredErrorMessage *unsupportedQueryError = DeferErrorIfQueryNotSupported(
|
||||
originalQuery);
|
||||
if (unsupportedQueryError != NULL)
|
||||
{
|
||||
RaiseDeferredError(unsupportedQueryError, ERROR);
|
||||
|
@ -513,38 +508,35 @@ SubqueryMultiNodeTree(Query *originalQuery, Query *queryTree,
|
|||
* to create a logical plan, continue with trying the single table
|
||||
* repartition subquery planning.
|
||||
*/
|
||||
subqueryPushdownError = DeferErrorIfUnsupportedSubqueryPushdown(originalQuery,
|
||||
plannerRestrictionContext);
|
||||
DeferredErrorMessage *subqueryPushdownError = DeferErrorIfUnsupportedSubqueryPushdown(
|
||||
originalQuery,
|
||||
plannerRestrictionContext);
|
||||
if (!subqueryPushdownError)
|
||||
{
|
||||
multiQueryNode = SubqueryPushdownMultiNodeTree(originalQuery);
|
||||
}
|
||||
else if (subqueryPushdownError)
|
||||
{
|
||||
bool singleRelationRepartitionSubquery = false;
|
||||
RangeTblEntry *subqueryRangeTableEntry = NULL;
|
||||
Query *subqueryTree = NULL;
|
||||
DeferredErrorMessage *repartitionQueryError = NULL;
|
||||
List *subqueryEntryList = NULL;
|
||||
|
||||
/*
|
||||
* If not eligible for single relation repartition query, we should raise
|
||||
* subquery pushdown error.
|
||||
*/
|
||||
singleRelationRepartitionSubquery =
|
||||
bool singleRelationRepartitionSubquery =
|
||||
SingleRelationRepartitionSubquery(originalQuery);
|
||||
if (!singleRelationRepartitionSubquery)
|
||||
{
|
||||
RaiseDeferredErrorInternal(subqueryPushdownError, ERROR);
|
||||
}
|
||||
|
||||
subqueryEntryList = SubqueryEntryList(queryTree);
|
||||
subqueryRangeTableEntry = (RangeTblEntry *) linitial(subqueryEntryList);
|
||||
List *subqueryEntryList = SubqueryEntryList(queryTree);
|
||||
RangeTblEntry *subqueryRangeTableEntry = (RangeTblEntry *) linitial(
|
||||
subqueryEntryList);
|
||||
Assert(subqueryRangeTableEntry->rtekind == RTE_SUBQUERY);
|
||||
|
||||
subqueryTree = subqueryRangeTableEntry->subquery;
|
||||
Query *subqueryTree = subqueryRangeTableEntry->subquery;
|
||||
|
||||
repartitionQueryError = DeferErrorIfUnsupportedSubqueryRepartition(subqueryTree);
|
||||
DeferredErrorMessage *repartitionQueryError =
|
||||
DeferErrorIfUnsupportedSubqueryRepartition(subqueryTree);
|
||||
if (repartitionQueryError)
|
||||
{
|
||||
RaiseDeferredErrorInternal(repartitionQueryError, ERROR);
|
||||
|
@ -574,7 +566,6 @@ DeferErrorIfUnsupportedSubqueryPushdown(Query *originalQuery,
|
|||
bool outerMostQueryHasLimit = false;
|
||||
ListCell *subqueryCell = NULL;
|
||||
List *subqueryList = NIL;
|
||||
DeferredErrorMessage *error = NULL;
|
||||
|
||||
if (originalQuery->limitCount != NULL)
|
||||
{
|
||||
|
@ -610,7 +601,7 @@ DeferErrorIfUnsupportedSubqueryPushdown(Query *originalQuery,
|
|||
}
|
||||
|
||||
/* we shouldn't allow reference tables in the FROM clause when the query has sublinks */
|
||||
error = DeferErrorIfFromClauseRecurs(originalQuery);
|
||||
DeferredErrorMessage *error = DeferErrorIfFromClauseRecurs(originalQuery);
|
||||
if (error)
|
||||
{
|
||||
return error;
|
||||
|
@ -666,14 +657,12 @@ DeferErrorIfUnsupportedSubqueryPushdown(Query *originalQuery,
|
|||
static DeferredErrorMessage *
|
||||
DeferErrorIfFromClauseRecurs(Query *queryTree)
|
||||
{
|
||||
RecurringTuplesType recurType = RECURRING_TUPLES_INVALID;
|
||||
|
||||
if (!queryTree->hasSubLinks)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
recurType = FromClauseRecurringTupleType(queryTree);
|
||||
RecurringTuplesType recurType = FromClauseRecurringTupleType(queryTree);
|
||||
if (recurType == RECURRING_TUPLES_REFERENCE_TABLE)
|
||||
{
|
||||
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
|
||||
|
@ -892,9 +881,9 @@ DeferErrorIfCannotPushdownSubquery(Query *subqueryTree, bool outerMostQueryHasLi
|
|||
bool preconditionsSatisfied = true;
|
||||
char *errorDetail = NULL;
|
||||
StringInfo errorInfo = NULL;
|
||||
DeferredErrorMessage *deferredError = NULL;
|
||||
|
||||
deferredError = DeferErrorIfUnsupportedTableCombination(subqueryTree);
|
||||
DeferredErrorMessage *deferredError = DeferErrorIfUnsupportedTableCombination(
|
||||
subqueryTree);
|
||||
if (deferredError)
|
||||
{
|
||||
return deferredError;
|
||||
|
@ -1187,9 +1176,8 @@ DeferErrorIfUnsupportedUnionQuery(Query *subqueryTree)
|
|||
|
||||
if (IsA(leftArg, RangeTblRef))
|
||||
{
|
||||
Query *leftArgSubquery = NULL;
|
||||
leftArgRTI = ((RangeTblRef *) leftArg)->rtindex;
|
||||
leftArgSubquery = rt_fetch(leftArgRTI, subqueryTree->rtable)->subquery;
|
||||
Query *leftArgSubquery = rt_fetch(leftArgRTI, subqueryTree->rtable)->subquery;
|
||||
recurType = FromClauseRecurringTupleType(leftArgSubquery);
|
||||
if (recurType != RECURRING_TUPLES_INVALID)
|
||||
{
|
||||
|
@ -1199,9 +1187,9 @@ DeferErrorIfUnsupportedUnionQuery(Query *subqueryTree)
|
|||
|
||||
if (IsA(rightArg, RangeTblRef))
|
||||
{
|
||||
Query *rightArgSubquery = NULL;
|
||||
rightArgRTI = ((RangeTblRef *) rightArg)->rtindex;
|
||||
rightArgSubquery = rt_fetch(rightArgRTI, subqueryTree->rtable)->subquery;
|
||||
Query *rightArgSubquery = rt_fetch(rightArgRTI,
|
||||
subqueryTree->rtable)->subquery;
|
||||
recurType = FromClauseRecurringTupleType(rightArgSubquery);
|
||||
if (recurType != RECURRING_TUPLES_INVALID)
|
||||
{
|
||||
|
@ -1251,7 +1239,6 @@ DeferErrorIfUnsupportedUnionQuery(Query *subqueryTree)
|
|||
static bool
|
||||
ExtractSetOperationStatmentWalker(Node *node, List **setOperationList)
|
||||
{
|
||||
bool walkerResult = false;
|
||||
if (node == NULL)
|
||||
{
|
||||
return false;
|
||||
|
@ -1264,8 +1251,8 @@ ExtractSetOperationStatmentWalker(Node *node, List **setOperationList)
|
|||
(*setOperationList) = lappend(*setOperationList, setOperation);
|
||||
}
|
||||
|
||||
walkerResult = expression_tree_walker(node, ExtractSetOperationStatmentWalker,
|
||||
setOperationList);
|
||||
bool walkerResult = expression_tree_walker(node, ExtractSetOperationStatmentWalker,
|
||||
setOperationList);
|
||||
|
||||
return walkerResult;
|
||||
}
|
||||
|
@ -1522,21 +1509,11 @@ static MultiNode *
|
|||
SubqueryPushdownMultiNodeTree(Query *queryTree)
|
||||
{
|
||||
List *targetEntryList = queryTree->targetList;
|
||||
List *columnList = NIL;
|
||||
List *flattenedExprList = NIL;
|
||||
List *targetColumnList = NIL;
|
||||
MultiCollect *subqueryCollectNode = CitusMakeNode(MultiCollect);
|
||||
MultiTable *subqueryNode = NULL;
|
||||
MultiProject *projectNode = NULL;
|
||||
MultiExtendedOp *extendedOpNode = NULL;
|
||||
MultiNode *currentTopNode = NULL;
|
||||
Query *pushedDownQuery = NULL;
|
||||
List *subqueryTargetEntryList = NIL;
|
||||
List *havingClauseColumnList = NIL;
|
||||
DeferredErrorMessage *unsupportedQueryError = NULL;
|
||||
|
||||
/* verify we can perform distributed planning on this query */
|
||||
unsupportedQueryError = DeferErrorIfQueryNotSupported(queryTree);
|
||||
DeferredErrorMessage *unsupportedQueryError = DeferErrorIfQueryNotSupported(
|
||||
queryTree);
|
||||
if (unsupportedQueryError != NULL)
|
||||
{
|
||||
RaiseDeferredError(unsupportedQueryError, ERROR);
|
||||
|
@ -1588,14 +1565,14 @@ SubqueryPushdownMultiNodeTree(Query *queryTree)
|
|||
* columnList. Columns mentioned in multiProject node and multiExtendedOp
|
||||
* node are indexed with their respective position in columnList.
|
||||
*/
|
||||
targetColumnList = pull_var_clause_default((Node *) targetEntryList);
|
||||
havingClauseColumnList = pull_var_clause_default(queryTree->havingQual);
|
||||
columnList = list_concat(targetColumnList, havingClauseColumnList);
|
||||
List *targetColumnList = pull_var_clause_default((Node *) targetEntryList);
|
||||
List *havingClauseColumnList = pull_var_clause_default(queryTree->havingQual);
|
||||
List *columnList = list_concat(targetColumnList, havingClauseColumnList);
|
||||
|
||||
flattenedExprList = FlattenJoinVars(columnList, queryTree);
|
||||
List *flattenedExprList = FlattenJoinVars(columnList, queryTree);
|
||||
|
||||
/* create a target entry for each unique column */
|
||||
subqueryTargetEntryList = CreateSubqueryTargetEntryList(flattenedExprList);
|
||||
List *subqueryTargetEntryList = CreateSubqueryTargetEntryList(flattenedExprList);
|
||||
|
||||
/*
|
||||
* Update varno/varattno fields of columns in columnList to
|
||||
|
@ -1605,7 +1582,7 @@ SubqueryPushdownMultiNodeTree(Query *queryTree)
|
|||
subqueryTargetEntryList);
|
||||
|
||||
/* new query only has target entries, join tree, and rtable*/
|
||||
pushedDownQuery = makeNode(Query);
|
||||
Query *pushedDownQuery = makeNode(Query);
|
||||
pushedDownQuery->commandType = queryTree->commandType;
|
||||
pushedDownQuery->targetList = subqueryTargetEntryList;
|
||||
pushedDownQuery->jointree = copyObject(queryTree->jointree);
|
||||
|
@ -1614,13 +1591,13 @@ SubqueryPushdownMultiNodeTree(Query *queryTree)
|
|||
pushedDownQuery->querySource = queryTree->querySource;
|
||||
pushedDownQuery->hasSubLinks = queryTree->hasSubLinks;
|
||||
|
||||
subqueryNode = MultiSubqueryPushdownTable(pushedDownQuery);
|
||||
MultiTable *subqueryNode = MultiSubqueryPushdownTable(pushedDownQuery);
|
||||
|
||||
SetChild((MultiUnaryNode *) subqueryCollectNode, (MultiNode *) subqueryNode);
|
||||
currentTopNode = (MultiNode *) subqueryCollectNode;
|
||||
MultiNode *currentTopNode = (MultiNode *) subqueryCollectNode;
|
||||
|
||||
/* build project node for the columns to project */
|
||||
projectNode = MultiProjectNode(targetEntryList);
|
||||
MultiProject *projectNode = MultiProjectNode(targetEntryList);
|
||||
SetChild((MultiUnaryNode *) projectNode, currentTopNode);
|
||||
currentTopNode = (MultiNode *) projectNode;
|
||||
|
||||
|
@ -1630,7 +1607,7 @@ SubqueryPushdownMultiNodeTree(Query *queryTree)
|
|||
* distinguish between aggregates and expressions; and we address this later
|
||||
* in the logical optimizer.
|
||||
*/
|
||||
extendedOpNode = MultiExtendedOpNode(queryTree);
|
||||
MultiExtendedOp *extendedOpNode = MultiExtendedOpNode(queryTree);
|
||||
|
||||
/*
|
||||
* Postgres standard planner converts having qual node to a list of and
|
||||
|
@ -1724,8 +1701,6 @@ FlattenJoinVarsMutator(Node *node, Query *queryTree)
|
|||
RangeTblEntry *rte = rt_fetch(column->varno, queryTree->rtable);
|
||||
if (rte->rtekind == RTE_JOIN)
|
||||
{
|
||||
Node *newColumn = NULL;
|
||||
|
||||
/*
|
||||
* if join has an alias, it is copied over join RTE. We should
|
||||
* reference this RTE.
|
||||
|
@ -1737,7 +1712,7 @@ FlattenJoinVarsMutator(Node *node, Query *queryTree)
|
|||
|
||||
/* join RTE does not have and alias defined at this level, deeper look is needed */
|
||||
Assert(column->varattno > 0);
|
||||
newColumn = (Node *) list_nth(rte->joinaliasvars, column->varattno - 1);
|
||||
Node *newColumn = (Node *) list_nth(rte->joinaliasvars, column->varattno - 1);
|
||||
Assert(newColumn != NULL);
|
||||
|
||||
/*
|
||||
|
@ -1894,7 +1869,6 @@ UpdateColumnToMatchingTargetEntry(Var *column, Node *flattenedExpr, List *target
|
|||
static MultiTable *
|
||||
MultiSubqueryPushdownTable(Query *subquery)
|
||||
{
|
||||
MultiTable *subqueryTableNode = NULL;
|
||||
StringInfo rteName = makeStringInfo();
|
||||
List *columnNamesList = NIL;
|
||||
ListCell *targetEntryCell = NULL;
|
||||
|
@ -1907,7 +1881,7 @@ MultiSubqueryPushdownTable(Query *subquery)
|
|||
columnNamesList = lappend(columnNamesList, makeString(targetEntry->resname));
|
||||
}
|
||||
|
||||
subqueryTableNode = CitusMakeNode(MultiTable);
|
||||
MultiTable *subqueryTableNode = CitusMakeNode(MultiTable);
|
||||
subqueryTableNode->subquery = subquery;
|
||||
subqueryTableNode->relationId = SUBQUERY_PUSHDOWN_RELATION_ID;
|
||||
subqueryTableNode->rangeTableId = SUBQUERY_RANGE_TABLE_ID;
|
||||
|
|
|
@ -189,7 +189,6 @@ GenerateSubplansForSubqueriesAndCTEs(uint64 planId, Query *originalQuery,
|
|||
PlannerRestrictionContext *plannerRestrictionContext)
|
||||
{
|
||||
RecursivePlanningContext context;
|
||||
DeferredErrorMessage *error = NULL;
|
||||
|
||||
recursivePlanningDepth++;
|
||||
|
||||
|
@ -217,7 +216,8 @@ GenerateSubplansForSubqueriesAndCTEs(uint64 planId, Query *originalQuery,
|
|||
context.allDistributionKeysInQueryAreEqual =
|
||||
AllDistributionKeysInQueryAreEqual(originalQuery, plannerRestrictionContext);
|
||||
|
||||
error = RecursivelyPlanSubqueriesAndCTEs(originalQuery, &context);
|
||||
DeferredErrorMessage *error = RecursivelyPlanSubqueriesAndCTEs(originalQuery,
|
||||
&context);
|
||||
if (error != NULL)
|
||||
{
|
||||
recursivePlanningDepth--;
|
||||
|
@ -257,9 +257,7 @@ GenerateSubplansForSubqueriesAndCTEs(uint64 planId, Query *originalQuery,
|
|||
static DeferredErrorMessage *
|
||||
RecursivelyPlanSubqueriesAndCTEs(Query *query, RecursivePlanningContext *context)
|
||||
{
|
||||
DeferredErrorMessage *error = NULL;
|
||||
|
||||
error = RecursivelyPlanCTEs(query, context);
|
||||
DeferredErrorMessage *error = RecursivelyPlanCTEs(query, context);
|
||||
if (error != NULL)
|
||||
{
|
||||
return error;
|
||||
|
@ -410,14 +408,12 @@ ContainsSubquery(Query *query)
|
|||
static void
|
||||
RecursivelyPlanNonColocatedSubqueries(Query *subquery, RecursivePlanningContext *context)
|
||||
{
|
||||
ColocatedJoinChecker colocatedJoinChecker;
|
||||
|
||||
FromExpr *joinTree = subquery->jointree;
|
||||
PlannerRestrictionContext *restrictionContext = NULL;
|
||||
|
||||
/* create the context for the non colocated subquery planning */
|
||||
restrictionContext = context->plannerRestrictionContext;
|
||||
colocatedJoinChecker = CreateColocatedJoinChecker(subquery, restrictionContext);
|
||||
PlannerRestrictionContext *restrictionContext = context->plannerRestrictionContext;
|
||||
ColocatedJoinChecker colocatedJoinChecker = CreateColocatedJoinChecker(subquery,
|
||||
restrictionContext);
|
||||
|
||||
/*
|
||||
* Although this is a rare case, we weren't able to pick an anchor
|
||||
|
@ -490,7 +486,6 @@ RecursivelyPlanNonColocatedJoinWalker(Node *joinNode,
|
|||
int rangeTableIndex = ((RangeTblRef *) joinNode)->rtindex;
|
||||
List *rangeTableList = colocatedJoinChecker->subquery->rtable;
|
||||
RangeTblEntry *rte = rt_fetch(rangeTableIndex, rangeTableList);
|
||||
Query *subquery = NULL;
|
||||
|
||||
/* we're only interested in subqueries for now */
|
||||
if (rte->rtekind != RTE_SUBQUERY)
|
||||
|
@ -502,7 +497,7 @@ RecursivelyPlanNonColocatedJoinWalker(Node *joinNode,
|
|||
* If the subquery is not colocated with the anchor subquery,
|
||||
* recursively plan it.
|
||||
*/
|
||||
subquery = rte->subquery;
|
||||
Query *subquery = rte->subquery;
|
||||
if (!SubqueryColocated(subquery, colocatedJoinChecker))
|
||||
{
|
||||
RecursivelyPlanSubquery(subquery, recursivePlanningContext);
|
||||
|
@ -560,7 +555,6 @@ static List *
|
|||
SublinkList(Query *originalQuery)
|
||||
{
|
||||
FromExpr *joinTree = originalQuery->jointree;
|
||||
Node *queryQuals = NULL;
|
||||
List *sublinkList = NIL;
|
||||
|
||||
if (!joinTree)
|
||||
|
@ -568,7 +562,7 @@ SublinkList(Query *originalQuery)
|
|||
return NIL;
|
||||
}
|
||||
|
||||
queryQuals = joinTree->quals;
|
||||
Node *queryQuals = joinTree->quals;
|
||||
ExtractSublinkWalker(queryQuals, &sublinkList);
|
||||
|
||||
return sublinkList;
|
||||
|
@ -610,17 +604,14 @@ ExtractSublinkWalker(Node *node, List **sublinkList)
|
|||
static bool
|
||||
ShouldRecursivelyPlanAllSubqueriesInWhere(Query *query)
|
||||
{
|
||||
FromExpr *joinTree = NULL;
|
||||
Node *whereClause = NULL;
|
||||
|
||||
joinTree = query->jointree;
|
||||
FromExpr *joinTree = query->jointree;
|
||||
if (joinTree == NULL)
|
||||
{
|
||||
/* there is no FROM clause */
|
||||
return false;
|
||||
}
|
||||
|
||||
whereClause = joinTree->quals;
|
||||
Node *whereClause = joinTree->quals;
|
||||
if (whereClause == NULL)
|
||||
{
|
||||
/* there is no WHERE clause */
|
||||
|
@ -703,11 +694,7 @@ RecursivelyPlanCTEs(Query *query, RecursivePlanningContext *planningContext)
|
|||
char *cteName = cte->ctename;
|
||||
Query *subquery = (Query *) cte->ctequery;
|
||||
uint64 planId = planningContext->planId;
|
||||
uint32 subPlanId = 0;
|
||||
char *resultId = NULL;
|
||||
List *cteTargetList = NIL;
|
||||
Query *resultQuery = NULL;
|
||||
DistributedSubPlan *subPlan = NULL;
|
||||
ListCell *rteCell = NULL;
|
||||
int replacedCtesCount = 0;
|
||||
|
||||
|
@ -729,7 +716,7 @@ RecursivelyPlanCTEs(Query *query, RecursivePlanningContext *planningContext)
|
|||
continue;
|
||||
}
|
||||
|
||||
subPlanId = list_length(planningContext->subPlanList) + 1;
|
||||
uint32 subPlanId = list_length(planningContext->subPlanList) + 1;
|
||||
|
||||
if (IsLoggableLevel(DEBUG1))
|
||||
{
|
||||
|
@ -742,11 +729,11 @@ RecursivelyPlanCTEs(Query *query, RecursivePlanningContext *planningContext)
|
|||
}
|
||||
|
||||
/* build a sub plan for the CTE */
|
||||
subPlan = CreateDistributedSubPlan(subPlanId, subquery);
|
||||
DistributedSubPlan *subPlan = CreateDistributedSubPlan(subPlanId, subquery);
|
||||
planningContext->subPlanList = lappend(planningContext->subPlanList, subPlan);
|
||||
|
||||
/* build the result_id parameter for the call to read_intermediate_result */
|
||||
resultId = GenerateResultId(planId, subPlanId);
|
||||
char *resultId = GenerateResultId(planId, subPlanId);
|
||||
|
||||
if (subquery->returningList)
|
||||
{
|
||||
|
@ -760,8 +747,8 @@ RecursivelyPlanCTEs(Query *query, RecursivePlanningContext *planningContext)
|
|||
}
|
||||
|
||||
/* replace references to the CTE with a subquery that reads results */
|
||||
resultQuery = BuildSubPlanResultQuery(cteTargetList, cte->aliascolnames,
|
||||
resultId);
|
||||
Query *resultQuery = BuildSubPlanResultQuery(cteTargetList, cte->aliascolnames,
|
||||
resultId);
|
||||
|
||||
foreach(rteCell, context.cteReferenceList)
|
||||
{
|
||||
|
@ -832,7 +819,6 @@ RecursivelyPlanSubqueryWalker(Node *node, RecursivePlanningContext *context)
|
|||
if (IsA(node, Query))
|
||||
{
|
||||
Query *query = (Query *) node;
|
||||
DeferredErrorMessage *error = NULL;
|
||||
|
||||
context->level += 1;
|
||||
|
||||
|
@ -840,7 +826,7 @@ RecursivelyPlanSubqueryWalker(Node *node, RecursivePlanningContext *context)
|
|||
* First, make sure any subqueries and CTEs within this subquery
|
||||
* are recursively planned if necessary.
|
||||
*/
|
||||
error = RecursivelyPlanSubqueriesAndCTEs(query, context);
|
||||
DeferredErrorMessage *error = RecursivelyPlanSubqueriesAndCTEs(query, context);
|
||||
if (error != NULL)
|
||||
{
|
||||
RaiseDeferredError(error, ERROR);
|
||||
|
@ -934,19 +920,16 @@ static bool
|
|||
AllDistributionKeysInSubqueryAreEqual(Query *subquery,
|
||||
PlannerRestrictionContext *restrictionContext)
|
||||
{
|
||||
bool allDistributionKeysInSubqueryAreEqual = false;
|
||||
PlannerRestrictionContext *filteredRestrictionContext = NULL;
|
||||
|
||||
/* we don't support distribution eq. checks for CTEs yet */
|
||||
if (subquery->cteList != NIL)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
filteredRestrictionContext =
|
||||
PlannerRestrictionContext *filteredRestrictionContext =
|
||||
FilterPlannerRestrictionForQuery(restrictionContext, subquery);
|
||||
|
||||
allDistributionKeysInSubqueryAreEqual =
|
||||
bool allDistributionKeysInSubqueryAreEqual =
|
||||
AllDistributionKeysInQueryAreEqual(subquery, filteredRestrictionContext);
|
||||
if (!allDistributionKeysInSubqueryAreEqual)
|
||||
{
|
||||
|
@ -965,8 +948,6 @@ AllDistributionKeysInSubqueryAreEqual(Query *subquery,
|
|||
static bool
|
||||
ShouldRecursivelyPlanSetOperation(Query *query, RecursivePlanningContext *context)
|
||||
{
|
||||
PlannerRestrictionContext *filteredRestrictionContext = NULL;
|
||||
|
||||
SetOperationStmt *setOperations = (SetOperationStmt *) query->setOperations;
|
||||
if (setOperations == NULL)
|
||||
{
|
||||
|
@ -1000,7 +981,7 @@ ShouldRecursivelyPlanSetOperation(Query *query, RecursivePlanningContext *contex
|
|||
return true;
|
||||
}
|
||||
|
||||
filteredRestrictionContext =
|
||||
PlannerRestrictionContext *filteredRestrictionContext =
|
||||
FilterPlannerRestrictionForQuery(context->plannerRestrictionContext, query);
|
||||
if (!SafeToPushdownUnionSubquery(filteredRestrictionContext))
|
||||
{
|
||||
|
@ -1062,9 +1043,6 @@ RecursivelyPlanSetOperations(Query *query, Node *node,
|
|||
static bool
|
||||
IsLocalTableRTE(Node *node)
|
||||
{
|
||||
RangeTblEntry *rangeTableEntry = NULL;
|
||||
Oid relationId = InvalidOid;
|
||||
|
||||
if (node == NULL)
|
||||
{
|
||||
return false;
|
||||
|
@ -1075,7 +1053,7 @@ IsLocalTableRTE(Node *node)
|
|||
return false;
|
||||
}
|
||||
|
||||
rangeTableEntry = (RangeTblEntry *) node;
|
||||
RangeTblEntry *rangeTableEntry = (RangeTblEntry *) node;
|
||||
if (rangeTableEntry->rtekind != RTE_RELATION)
|
||||
{
|
||||
return false;
|
||||
|
@ -1086,7 +1064,7 @@ IsLocalTableRTE(Node *node)
|
|||
return false;
|
||||
}
|
||||
|
||||
relationId = rangeTableEntry->relid;
|
||||
Oid relationId = rangeTableEntry->relid;
|
||||
if (IsDistributedTable(relationId))
|
||||
{
|
||||
return false;
|
||||
|
@ -1111,11 +1089,7 @@ IsLocalTableRTE(Node *node)
|
|||
static void
|
||||
RecursivelyPlanSubquery(Query *subquery, RecursivePlanningContext *planningContext)
|
||||
{
|
||||
DistributedSubPlan *subPlan = NULL;
|
||||
uint64 planId = planningContext->planId;
|
||||
int subPlanId = 0;
|
||||
char *resultId = NULL;
|
||||
Query *resultQuery = NULL;
|
||||
Query *debugQuery = NULL;
|
||||
|
||||
if (ContainsReferencesToOuterQuery(subquery))
|
||||
|
@ -1138,19 +1112,19 @@ RecursivelyPlanSubquery(Query *subquery, RecursivePlanningContext *planningConte
|
|||
/*
|
||||
* Create the subplan and append it to the list in the planning context.
|
||||
*/
|
||||
subPlanId = list_length(planningContext->subPlanList) + 1;
|
||||
int subPlanId = list_length(planningContext->subPlanList) + 1;
|
||||
|
||||
subPlan = CreateDistributedSubPlan(subPlanId, subquery);
|
||||
DistributedSubPlan *subPlan = CreateDistributedSubPlan(subPlanId, subquery);
|
||||
planningContext->subPlanList = lappend(planningContext->subPlanList, subPlan);
|
||||
|
||||
/* build the result_id parameter for the call to read_intermediate_result */
|
||||
resultId = GenerateResultId(planId, subPlanId);
|
||||
char *resultId = GenerateResultId(planId, subPlanId);
|
||||
|
||||
/*
|
||||
* BuildSubPlanResultQuery() can optionally use provided column aliases.
|
||||
* We do not need to send additional alias list for subqueries.
|
||||
*/
|
||||
resultQuery = BuildSubPlanResultQuery(subquery->targetList, NIL, resultId);
|
||||
Query *resultQuery = BuildSubPlanResultQuery(subquery->targetList, NIL, resultId);
|
||||
|
||||
if (IsLoggableLevel(DEBUG1))
|
||||
{
|
||||
|
@ -1176,7 +1150,6 @@ RecursivelyPlanSubquery(Query *subquery, RecursivePlanningContext *planningConte
|
|||
static DistributedSubPlan *
|
||||
CreateDistributedSubPlan(uint32 subPlanId, Query *subPlanQuery)
|
||||
{
|
||||
DistributedSubPlan *subPlan = NULL;
|
||||
int cursorOptions = 0;
|
||||
|
||||
if (ContainsReadIntermediateResultFunction((Node *) subPlanQuery))
|
||||
|
@ -1192,7 +1165,7 @@ CreateDistributedSubPlan(uint32 subPlanId, Query *subPlanQuery)
|
|||
cursorOptions |= CURSOR_OPT_FORCE_DISTRIBUTED;
|
||||
}
|
||||
|
||||
subPlan = CitusMakeNode(DistributedSubPlan);
|
||||
DistributedSubPlan *subPlan = CitusMakeNode(DistributedSubPlan);
|
||||
subPlan->plan = planner(subPlanQuery, cursorOptions, NULL);
|
||||
subPlan->subPlanId = subPlanId;
|
||||
|
||||
|
@ -1310,12 +1283,11 @@ ContainsReferencesToOuterQueryWalker(Node *node, VarLevelsUpWalkerContext *conte
|
|||
else if (IsA(node, Query))
|
||||
{
|
||||
Query *query = (Query *) node;
|
||||
bool found = false;
|
||||
int flags = 0;
|
||||
|
||||
context->level += 1;
|
||||
found = query_tree_walker(query, ContainsReferencesToOuterQueryWalker,
|
||||
context, flags);
|
||||
bool found = query_tree_walker(query, ContainsReferencesToOuterQueryWalker,
|
||||
context, flags);
|
||||
context->level -= 1;
|
||||
|
||||
return found;
|
||||
|
@ -1383,19 +1355,16 @@ TransformFunctionRTE(RangeTblEntry *rangeTblEntry)
|
|||
{
|
||||
Query *subquery = makeNode(Query);
|
||||
RangeTblRef *newRangeTableRef = makeNode(RangeTblRef);
|
||||
RangeTblEntry *newRangeTableEntry = NULL;
|
||||
Var *targetColumn = NULL;
|
||||
TargetEntry *targetEntry = NULL;
|
||||
RangeTblFunction *rangeTblFunction = NULL;
|
||||
AttrNumber targetColumnIndex = 0;
|
||||
TupleDesc tupleDesc = NULL;
|
||||
|
||||
rangeTblFunction = linitial(rangeTblEntry->functions);
|
||||
RangeTblFunction *rangeTblFunction = linitial(rangeTblEntry->functions);
|
||||
|
||||
subquery->commandType = CMD_SELECT;
|
||||
|
||||
/* copy the input rangeTblEntry to prevent cycles */
|
||||
newRangeTableEntry = copyObject(rangeTblEntry);
|
||||
RangeTblEntry *newRangeTableEntry = copyObject(rangeTblEntry);
|
||||
|
||||
/* set the FROM expression to the subquery */
|
||||
subquery->rtable = list_make1(newRangeTableEntry);
|
||||
|
@ -1407,8 +1376,8 @@ TransformFunctionRTE(RangeTblEntry *rangeTblEntry)
|
|||
* If function return type is not composite or rowtype can't be determined,
|
||||
* tupleDesc is set to null here
|
||||
*/
|
||||
tupleDesc = (TupleDesc) get_expr_result_tupdesc(rangeTblFunction->funcexpr,
|
||||
true);
|
||||
TupleDesc tupleDesc = (TupleDesc) get_expr_result_tupdesc(rangeTblFunction->funcexpr,
|
||||
true);
|
||||
|
||||
/*
|
||||
* If tupleDesc is not null, we iterate over all the attributes and
|
||||
|
@ -1460,10 +1429,9 @@ TransformFunctionRTE(RangeTblEntry *rangeTblEntry)
|
|||
else
|
||||
{
|
||||
/* create target entries for all columns returned by the function */
|
||||
List *functionColumnNames = NULL;
|
||||
ListCell *functionColumnName = NULL;
|
||||
|
||||
functionColumnNames = rangeTblEntry->eref->colnames;
|
||||
List *functionColumnNames = rangeTblEntry->eref->colnames;
|
||||
foreach(functionColumnName, functionColumnNames)
|
||||
{
|
||||
char *columnName = strVal(lfirst(functionColumnName));
|
||||
|
@ -1574,19 +1542,10 @@ ShouldTransformRTE(RangeTblEntry *rangeTableEntry)
|
|||
Query *
|
||||
BuildSubPlanResultQuery(List *targetEntryList, List *columnAliasList, char *resultId)
|
||||
{
|
||||
Query *resultQuery = NULL;
|
||||
Const *resultIdConst = NULL;
|
||||
Const *resultFormatConst = NULL;
|
||||
FuncExpr *funcExpr = NULL;
|
||||
Alias *funcAlias = NULL;
|
||||
List *funcColNames = NIL;
|
||||
List *funcColTypes = NIL;
|
||||
List *funcColTypMods = NIL;
|
||||
List *funcColCollations = NIL;
|
||||
RangeTblFunction *rangeTableFunction = NULL;
|
||||
RangeTblEntry *rangeTableEntry = NULL;
|
||||
RangeTblRef *rangeTableRef = NULL;
|
||||
FromExpr *joinTree = NULL;
|
||||
ListCell *targetEntryCell = NULL;
|
||||
List *targetList = NIL;
|
||||
int columnNumber = 1;
|
||||
|
@ -1603,8 +1562,6 @@ BuildSubPlanResultQuery(List *targetEntryList, List *columnAliasList, char *resu
|
|||
Oid columnType = exprType(targetExpr);
|
||||
Oid columnTypMod = exprTypmod(targetExpr);
|
||||
Oid columnCollation = exprCollation(targetExpr);
|
||||
Var *functionColumnVar = NULL;
|
||||
TargetEntry *newTargetEntry = NULL;
|
||||
|
||||
if (targetEntry->resjunk)
|
||||
{
|
||||
|
@ -1616,7 +1573,7 @@ BuildSubPlanResultQuery(List *targetEntryList, List *columnAliasList, char *resu
|
|||
funcColTypMods = lappend_int(funcColTypMods, columnTypMod);
|
||||
funcColCollations = lappend_int(funcColCollations, columnCollation);
|
||||
|
||||
functionColumnVar = makeNode(Var);
|
||||
Var *functionColumnVar = makeNode(Var);
|
||||
functionColumnVar->varno = 1;
|
||||
functionColumnVar->varattno = columnNumber;
|
||||
functionColumnVar->vartype = columnType;
|
||||
|
@ -1627,7 +1584,7 @@ BuildSubPlanResultQuery(List *targetEntryList, List *columnAliasList, char *resu
|
|||
functionColumnVar->varoattno = columnNumber;
|
||||
functionColumnVar->location = -1;
|
||||
|
||||
newTargetEntry = makeNode(TargetEntry);
|
||||
TargetEntry *newTargetEntry = makeNode(TargetEntry);
|
||||
newTargetEntry->expr = (Expr *) functionColumnVar;
|
||||
newTargetEntry->resno = columnNumber;
|
||||
|
||||
|
@ -1659,7 +1616,7 @@ BuildSubPlanResultQuery(List *targetEntryList, List *columnAliasList, char *resu
|
|||
columnNumber++;
|
||||
}
|
||||
|
||||
resultIdConst = makeNode(Const);
|
||||
Const *resultIdConst = makeNode(Const);
|
||||
resultIdConst->consttype = TEXTOID;
|
||||
resultIdConst->consttypmod = -1;
|
||||
resultIdConst->constlen = -1;
|
||||
|
@ -1674,7 +1631,7 @@ BuildSubPlanResultQuery(List *targetEntryList, List *columnAliasList, char *resu
|
|||
copyFormatId = TextCopyFormatId();
|
||||
}
|
||||
|
||||
resultFormatConst = makeNode(Const);
|
||||
Const *resultFormatConst = makeNode(Const);
|
||||
resultFormatConst->consttype = CitusCopyFormatTypeId();
|
||||
resultFormatConst->consttypmod = -1;
|
||||
resultFormatConst->constlen = 4;
|
||||
|
@ -1684,7 +1641,7 @@ BuildSubPlanResultQuery(List *targetEntryList, List *columnAliasList, char *resu
|
|||
resultFormatConst->location = -1;
|
||||
|
||||
/* build the call to read_intermediate_result */
|
||||
funcExpr = makeNode(FuncExpr);
|
||||
FuncExpr *funcExpr = makeNode(FuncExpr);
|
||||
funcExpr->funcid = CitusReadIntermediateResultFuncId();
|
||||
funcExpr->funcretset = true;
|
||||
funcExpr->funcvariadic = false;
|
||||
|
@ -1695,7 +1652,7 @@ BuildSubPlanResultQuery(List *targetEntryList, List *columnAliasList, char *resu
|
|||
funcExpr->args = list_make2(resultIdConst, resultFormatConst);
|
||||
|
||||
/* build the RTE for the call to read_intermediate_result */
|
||||
rangeTableFunction = makeNode(RangeTblFunction);
|
||||
RangeTblFunction *rangeTableFunction = makeNode(RangeTblFunction);
|
||||
rangeTableFunction->funccolcount = list_length(funcColNames);
|
||||
rangeTableFunction->funccolnames = funcColNames;
|
||||
rangeTableFunction->funccoltypes = funcColTypes;
|
||||
|
@ -1704,25 +1661,25 @@ BuildSubPlanResultQuery(List *targetEntryList, List *columnAliasList, char *resu
|
|||
rangeTableFunction->funcparams = NULL;
|
||||
rangeTableFunction->funcexpr = (Node *) funcExpr;
|
||||
|
||||
funcAlias = makeNode(Alias);
|
||||
Alias *funcAlias = makeNode(Alias);
|
||||
funcAlias->aliasname = "intermediate_result";
|
||||
funcAlias->colnames = funcColNames;
|
||||
|
||||
rangeTableEntry = makeNode(RangeTblEntry);
|
||||
RangeTblEntry *rangeTableEntry = makeNode(RangeTblEntry);
|
||||
rangeTableEntry->rtekind = RTE_FUNCTION;
|
||||
rangeTableEntry->functions = list_make1(rangeTableFunction);
|
||||
rangeTableEntry->inFromCl = true;
|
||||
rangeTableEntry->eref = funcAlias;
|
||||
|
||||
/* build the join tree using the read_intermediate_result RTE */
|
||||
rangeTableRef = makeNode(RangeTblRef);
|
||||
RangeTblRef *rangeTableRef = makeNode(RangeTblRef);
|
||||
rangeTableRef->rtindex = 1;
|
||||
|
||||
joinTree = makeNode(FromExpr);
|
||||
FromExpr *joinTree = makeNode(FromExpr);
|
||||
joinTree->fromlist = list_make1(rangeTableRef);
|
||||
|
||||
/* build the SELECT query */
|
||||
resultQuery = makeNode(Query);
|
||||
Query *resultQuery = makeNode(Query);
|
||||
resultQuery->commandType = CMD_SELECT;
|
||||
resultQuery->rtable = list_make1(rangeTableEntry);
|
||||
resultQuery->jointree = joinTree;
|
||||
|
|
|
@ -160,9 +160,6 @@ bool
|
|||
AllDistributionKeysInQueryAreEqual(Query *originalQuery,
|
||||
PlannerRestrictionContext *plannerRestrictionContext)
|
||||
{
|
||||
bool restrictionEquivalenceForPartitionKeys = false;
|
||||
RelationRestrictionContext *restrictionContext = NULL;
|
||||
|
||||
/* we don't support distribution key equality checks for CTEs yet */
|
||||
if (originalQuery->cteList != NIL)
|
||||
{
|
||||
|
@ -170,13 +167,14 @@ AllDistributionKeysInQueryAreEqual(Query *originalQuery,
|
|||
}
|
||||
|
||||
/* we don't support distribution key equality checks for local tables */
|
||||
restrictionContext = plannerRestrictionContext->relationRestrictionContext;
|
||||
RelationRestrictionContext *restrictionContext =
|
||||
plannerRestrictionContext->relationRestrictionContext;
|
||||
if (ContextContainsLocalRelation(restrictionContext))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
restrictionEquivalenceForPartitionKeys =
|
||||
bool restrictionEquivalenceForPartitionKeys =
|
||||
RestrictionEquivalenceForPartitionKeys(plannerRestrictionContext);
|
||||
if (restrictionEquivalenceForPartitionKeys)
|
||||
{
|
||||
|
@ -245,9 +243,6 @@ SafeToPushdownUnionSubquery(PlannerRestrictionContext *plannerRestrictionContext
|
|||
AttributeEquivalenceClass *attributeEquivalance =
|
||||
palloc0(sizeof(AttributeEquivalenceClass));
|
||||
ListCell *relationRestrictionCell = NULL;
|
||||
List *relationRestrictionAttributeEquivalenceList = NIL;
|
||||
List *joinRestrictionAttributeEquivalenceList = NIL;
|
||||
List *allAttributeEquivalenceList = NIL;
|
||||
|
||||
attributeEquivalance->equivalenceId = attributeEquivalenceId++;
|
||||
|
||||
|
@ -338,12 +333,12 @@ SafeToPushdownUnionSubquery(PlannerRestrictionContext *plannerRestrictionContext
|
|||
* we determine whether all relations are joined on the partition column
|
||||
* by adding the equivalence classes that can be inferred from joins.
|
||||
*/
|
||||
relationRestrictionAttributeEquivalenceList =
|
||||
List *relationRestrictionAttributeEquivalenceList =
|
||||
GenerateAttributeEquivalencesForRelationRestrictions(restrictionContext);
|
||||
joinRestrictionAttributeEquivalenceList =
|
||||
List *joinRestrictionAttributeEquivalenceList =
|
||||
GenerateAttributeEquivalencesForJoinRestrictions(joinRestrictionContext);
|
||||
|
||||
allAttributeEquivalenceList =
|
||||
List *allAttributeEquivalenceList =
|
||||
list_concat(relationRestrictionAttributeEquivalenceList,
|
||||
joinRestrictionAttributeEquivalenceList);
|
||||
|
||||
|
@ -373,8 +368,6 @@ FindTranslatedVar(List *appendRelList, Oid relationOid, Index relationRteIndex,
|
|||
AppendRelInfo *targetAppendRelInfo = NULL;
|
||||
ListCell *translatedVarCell = NULL;
|
||||
AttrNumber childAttrNumber = 0;
|
||||
Var *relationPartitionKey = NULL;
|
||||
List *translaterVars = NULL;
|
||||
|
||||
*partitionKeyIndex = 0;
|
||||
|
||||
|
@ -400,13 +393,12 @@ FindTranslatedVar(List *appendRelList, Oid relationOid, Index relationRteIndex,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
relationPartitionKey = DistPartitionKey(relationOid);
|
||||
Var *relationPartitionKey = DistPartitionKey(relationOid);
|
||||
|
||||
translaterVars = targetAppendRelInfo->translated_vars;
|
||||
List *translaterVars = targetAppendRelInfo->translated_vars;
|
||||
foreach(translatedVarCell, translaterVars)
|
||||
{
|
||||
Node *targetNode = (Node *) lfirst(translatedVarCell);
|
||||
Var *targetVar = NULL;
|
||||
|
||||
childAttrNumber++;
|
||||
|
||||
|
@ -415,7 +407,7 @@ FindTranslatedVar(List *appendRelList, Oid relationOid, Index relationRteIndex,
|
|||
continue;
|
||||
}
|
||||
|
||||
targetVar = (Var *) lfirst(translatedVarCell);
|
||||
Var *targetVar = (Var *) lfirst(translatedVarCell);
|
||||
if (targetVar->varno == relationRteIndex &&
|
||||
targetVar->varattno == relationPartitionKey->varattno)
|
||||
{
|
||||
|
@ -464,15 +456,13 @@ FindTranslatedVar(List *appendRelList, Oid relationOid, Index relationRteIndex,
|
|||
bool
|
||||
RestrictionEquivalenceForPartitionKeys(PlannerRestrictionContext *restrictionContext)
|
||||
{
|
||||
List *attributeEquivalenceList = NIL;
|
||||
|
||||
/* there is a single distributed relation, no need to continue */
|
||||
if (!ContainsMultipleDistributedRelations(restrictionContext))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
attributeEquivalenceList = GenerateAllAttributeEquivalences(restrictionContext);
|
||||
List *attributeEquivalenceList = GenerateAllAttributeEquivalences(restrictionContext);
|
||||
|
||||
return RestrictionEquivalenceForPartitionKeysViaEquivalances(restrictionContext,
|
||||
attributeEquivalenceList);
|
||||
|
@ -554,20 +544,18 @@ GenerateAllAttributeEquivalences(PlannerRestrictionContext *plannerRestrictionCo
|
|||
JoinRestrictionContext *joinRestrictionContext =
|
||||
plannerRestrictionContext->joinRestrictionContext;
|
||||
|
||||
List *relationRestrictionAttributeEquivalenceList = NIL;
|
||||
List *joinRestrictionAttributeEquivalenceList = NIL;
|
||||
List *allAttributeEquivalenceList = NIL;
|
||||
|
||||
/* reset the equivalence id counter per call to prevent overflows */
|
||||
attributeEquivalenceId = 1;
|
||||
|
||||
relationRestrictionAttributeEquivalenceList =
|
||||
List *relationRestrictionAttributeEquivalenceList =
|
||||
GenerateAttributeEquivalencesForRelationRestrictions(relationRestrictionContext);
|
||||
joinRestrictionAttributeEquivalenceList =
|
||||
List *joinRestrictionAttributeEquivalenceList =
|
||||
GenerateAttributeEquivalencesForJoinRestrictions(joinRestrictionContext);
|
||||
|
||||
allAttributeEquivalenceList = list_concat(relationRestrictionAttributeEquivalenceList,
|
||||
joinRestrictionAttributeEquivalenceList);
|
||||
List *allAttributeEquivalenceList = list_concat(
|
||||
relationRestrictionAttributeEquivalenceList,
|
||||
joinRestrictionAttributeEquivalenceList);
|
||||
|
||||
return allAttributeEquivalenceList;
|
||||
}
|
||||
|
@ -609,7 +597,6 @@ bool
|
|||
EquivalenceListContainsRelationsEquality(List *attributeEquivalenceList,
|
||||
RelationRestrictionContext *restrictionContext)
|
||||
{
|
||||
AttributeEquivalenceClass *commonEquivalenceClass = NULL;
|
||||
ListCell *commonEqClassCell = NULL;
|
||||
ListCell *relationRestrictionCell = NULL;
|
||||
Relids commonRteIdentities = NULL;
|
||||
|
@ -619,8 +606,9 @@ EquivalenceListContainsRelationsEquality(List *attributeEquivalenceList,
|
|||
* common equivalence class. The main goal is to test whether this main class
|
||||
* contains all partition keys of the existing relations.
|
||||
*/
|
||||
commonEquivalenceClass = GenerateCommonEquivalence(attributeEquivalenceList,
|
||||
restrictionContext);
|
||||
AttributeEquivalenceClass *commonEquivalenceClass = GenerateCommonEquivalence(
|
||||
attributeEquivalenceList,
|
||||
restrictionContext);
|
||||
|
||||
/* add the rte indexes of relations to a bitmap */
|
||||
foreach(commonEqClassCell, commonEquivalenceClass->equivalentAttributes)
|
||||
|
@ -885,13 +873,12 @@ static AttributeEquivalenceClass *
|
|||
GenerateCommonEquivalence(List *attributeEquivalenceList,
|
||||
RelationRestrictionContext *relationRestrictionContext)
|
||||
{
|
||||
AttributeEquivalenceClass *commonEquivalenceClass = NULL;
|
||||
AttributeEquivalenceClass *firstEquivalenceClass = NULL;
|
||||
Bitmapset *addedEquivalenceIds = NULL;
|
||||
uint32 equivalenceListSize = list_length(attributeEquivalenceList);
|
||||
uint32 equivalenceClassIndex = 0;
|
||||
|
||||
commonEquivalenceClass = palloc0(sizeof(AttributeEquivalenceClass));
|
||||
AttributeEquivalenceClass *commonEquivalenceClass = palloc0(
|
||||
sizeof(AttributeEquivalenceClass));
|
||||
commonEquivalenceClass->equivalenceId = 0;
|
||||
|
||||
/*
|
||||
|
@ -899,7 +886,7 @@ GenerateCommonEquivalence(List *attributeEquivalenceList,
|
|||
* table since we always want the input distributed relations to be
|
||||
* on the common class.
|
||||
*/
|
||||
firstEquivalenceClass =
|
||||
AttributeEquivalenceClass *firstEquivalenceClass =
|
||||
GenerateEquivalanceClassForRelationRestriction(relationRestrictionContext);
|
||||
|
||||
/* we skip the calculation if there are not enough information */
|
||||
|
@ -915,12 +902,12 @@ GenerateCommonEquivalence(List *attributeEquivalenceList,
|
|||
|
||||
while (equivalenceClassIndex < equivalenceListSize)
|
||||
{
|
||||
AttributeEquivalenceClass *currentEquivalenceClass = NULL;
|
||||
ListCell *equivalenceMemberCell = NULL;
|
||||
bool restartLoop = false;
|
||||
|
||||
currentEquivalenceClass = list_nth(attributeEquivalenceList,
|
||||
equivalenceClassIndex);
|
||||
AttributeEquivalenceClass *currentEquivalenceClass = list_nth(
|
||||
attributeEquivalenceList,
|
||||
equivalenceClassIndex);
|
||||
|
||||
/*
|
||||
* This is an optimization. If we already added the same equivalence class,
|
||||
|
@ -1077,22 +1064,14 @@ GenerateAttributeEquivalencesForJoinRestrictions(JoinRestrictionContext *
|
|||
foreach(restrictionInfoList, joinRestriction->joinRestrictInfoList)
|
||||
{
|
||||
RestrictInfo *rinfo = (RestrictInfo *) lfirst(restrictionInfoList);
|
||||
OpExpr *restrictionOpExpr = NULL;
|
||||
Node *leftNode = NULL;
|
||||
Node *rightNode = NULL;
|
||||
Expr *strippedLeftExpr = NULL;
|
||||
Expr *strippedRightExpr = NULL;
|
||||
Var *leftVar = NULL;
|
||||
Var *rightVar = NULL;
|
||||
Expr *restrictionClause = rinfo->clause;
|
||||
AttributeEquivalenceClass *attributeEquivalance = NULL;
|
||||
|
||||
if (!IsA(restrictionClause, OpExpr))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
restrictionOpExpr = (OpExpr *) restrictionClause;
|
||||
OpExpr *restrictionOpExpr = (OpExpr *) restrictionClause;
|
||||
if (list_length(restrictionOpExpr->args) != 2)
|
||||
{
|
||||
continue;
|
||||
|
@ -1102,22 +1081,24 @@ GenerateAttributeEquivalencesForJoinRestrictions(JoinRestrictionContext *
|
|||
continue;
|
||||
}
|
||||
|
||||
leftNode = linitial(restrictionOpExpr->args);
|
||||
rightNode = lsecond(restrictionOpExpr->args);
|
||||
Node *leftNode = linitial(restrictionOpExpr->args);
|
||||
Node *rightNode = lsecond(restrictionOpExpr->args);
|
||||
|
||||
/* we also don't want implicit coercions */
|
||||
strippedLeftExpr = (Expr *) strip_implicit_coercions((Node *) leftNode);
|
||||
strippedRightExpr = (Expr *) strip_implicit_coercions((Node *) rightNode);
|
||||
Expr *strippedLeftExpr = (Expr *) strip_implicit_coercions((Node *) leftNode);
|
||||
Expr *strippedRightExpr = (Expr *) strip_implicit_coercions(
|
||||
(Node *) rightNode);
|
||||
|
||||
if (!(IsA(strippedLeftExpr, Var) && IsA(strippedRightExpr, Var)))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
leftVar = (Var *) strippedLeftExpr;
|
||||
rightVar = (Var *) strippedRightExpr;
|
||||
Var *leftVar = (Var *) strippedLeftExpr;
|
||||
Var *rightVar = (Var *) strippedRightExpr;
|
||||
|
||||
attributeEquivalance = palloc0(sizeof(AttributeEquivalenceClass));
|
||||
AttributeEquivalenceClass *attributeEquivalance = palloc0(
|
||||
sizeof(AttributeEquivalenceClass));
|
||||
attributeEquivalance->equivalenceId = attributeEquivalenceId++;
|
||||
|
||||
AddToAttributeEquivalenceClass(&attributeEquivalance,
|
||||
|
@ -1167,8 +1148,6 @@ static void
|
|||
AddToAttributeEquivalenceClass(AttributeEquivalenceClass **attributeEquivalanceClass,
|
||||
PlannerInfo *root, Var *varToBeAdded)
|
||||
{
|
||||
RangeTblEntry *rangeTableEntry = NULL;
|
||||
|
||||
/* punt if it's a whole-row var rather than a plain column reference */
|
||||
if (varToBeAdded->varattno == InvalidAttrNumber)
|
||||
{
|
||||
|
@ -1181,7 +1160,7 @@ AddToAttributeEquivalenceClass(AttributeEquivalenceClass **attributeEquivalanceC
|
|||
return;
|
||||
}
|
||||
|
||||
rangeTableEntry = root->simple_rte_array[varToBeAdded->varno];
|
||||
RangeTblEntry *rangeTableEntry = root->simple_rte_array[varToBeAdded->varno];
|
||||
if (rangeTableEntry->rtekind == RTE_RELATION)
|
||||
{
|
||||
AddRteRelationToAttributeEquivalenceClass(attributeEquivalanceClass,
|
||||
|
@ -1210,7 +1189,6 @@ AddRteSubqueryToAttributeEquivalenceClass(AttributeEquivalenceClass
|
|||
PlannerInfo *root, Var *varToBeAdded)
|
||||
{
|
||||
RelOptInfo *baseRelOptInfo = find_base_rel(root, varToBeAdded->varno);
|
||||
TargetEntry *subqueryTargetEntry = NULL;
|
||||
Query *targetSubquery = GetTargetSubquery(root, rangeTableEntry, varToBeAdded);
|
||||
|
||||
/*
|
||||
|
@ -1229,8 +1207,8 @@ AddRteSubqueryToAttributeEquivalenceClass(AttributeEquivalenceClass
|
|||
return;
|
||||
}
|
||||
|
||||
subqueryTargetEntry = get_tle_by_resno(targetSubquery->targetList,
|
||||
varToBeAdded->varattno);
|
||||
TargetEntry *subqueryTargetEntry = get_tle_by_resno(targetSubquery->targetList,
|
||||
varToBeAdded->varattno);
|
||||
|
||||
/* if we fail to find corresponding target entry, do not proceed */
|
||||
if (subqueryTargetEntry == NULL || subqueryTargetEntry->resjunk)
|
||||
|
@ -1402,9 +1380,7 @@ AddRteRelationToAttributeEquivalenceClass(AttributeEquivalenceClass **
|
|||
RangeTblEntry *rangeTableEntry,
|
||||
Var *varToBeAdded)
|
||||
{
|
||||
AttributeEquivalenceClassMember *attributeEqMember = NULL;
|
||||
Oid relationId = rangeTableEntry->relid;
|
||||
Var *relationPartitionKey = NULL;
|
||||
|
||||
/* we don't consider local tables in the equality on columns */
|
||||
if (!IsDistributedTable(relationId))
|
||||
|
@ -1412,7 +1388,7 @@ AddRteRelationToAttributeEquivalenceClass(AttributeEquivalenceClass **
|
|||
return;
|
||||
}
|
||||
|
||||
relationPartitionKey = DistPartitionKey(relationId);
|
||||
Var *relationPartitionKey = DistPartitionKey(relationId);
|
||||
|
||||
Assert(rangeTableEntry->rtekind == RTE_RELATION);
|
||||
|
||||
|
@ -1428,7 +1404,8 @@ AddRteRelationToAttributeEquivalenceClass(AttributeEquivalenceClass **
|
|||
return;
|
||||
}
|
||||
|
||||
attributeEqMember = palloc0(sizeof(AttributeEquivalenceClassMember));
|
||||
AttributeEquivalenceClassMember *attributeEqMember = palloc0(
|
||||
sizeof(AttributeEquivalenceClassMember));
|
||||
|
||||
attributeEqMember->varattno = varToBeAdded->varattno;
|
||||
attributeEqMember->varno = varToBeAdded->varno;
|
||||
|
@ -1481,7 +1458,6 @@ static List *
|
|||
AddAttributeClassToAttributeClassList(List *attributeEquivalenceList,
|
||||
AttributeEquivalenceClass *attributeEquivalance)
|
||||
{
|
||||
List *equivalentAttributes = NULL;
|
||||
ListCell *attributeEquivalanceCell = NULL;
|
||||
|
||||
if (attributeEquivalance == NULL)
|
||||
|
@ -1493,7 +1469,7 @@ AddAttributeClassToAttributeClassList(List *attributeEquivalenceList,
|
|||
* Note that in some cases we allow having equivalentAttributes with zero or
|
||||
* one elements. For the details, see AddToAttributeEquivalenceClass().
|
||||
*/
|
||||
equivalentAttributes = attributeEquivalance->equivalentAttributes;
|
||||
List *equivalentAttributes = attributeEquivalance->equivalentAttributes;
|
||||
if (list_length(equivalentAttributes) < 2)
|
||||
{
|
||||
return attributeEquivalenceList;
|
||||
|
@ -1589,15 +1565,10 @@ bool
|
|||
ContainsUnionSubquery(Query *queryTree)
|
||||
{
|
||||
List *rangeTableList = queryTree->rtable;
|
||||
Node *setOperations = queryTree->setOperations;
|
||||
List *joinTreeTableIndexList = NIL;
|
||||
Index subqueryRteIndex = 0;
|
||||
uint32 joiningRangeTableCount = 0;
|
||||
RangeTblEntry *rangeTableEntry = NULL;
|
||||
Query *subqueryTree = NULL;
|
||||
|
||||
ExtractRangeTableIndexWalker((Node *) queryTree->jointree, &joinTreeTableIndexList);
|
||||
joiningRangeTableCount = list_length(joinTreeTableIndexList);
|
||||
uint32 joiningRangeTableCount = list_length(joinTreeTableIndexList);
|
||||
|
||||
/* don't allow joins on top of unions */
|
||||
if (joiningRangeTableCount > 1)
|
||||
|
@ -1611,15 +1582,15 @@ ContainsUnionSubquery(Query *queryTree)
|
|||
return false;
|
||||
}
|
||||
|
||||
subqueryRteIndex = linitial_int(joinTreeTableIndexList);
|
||||
rangeTableEntry = rt_fetch(subqueryRteIndex, rangeTableList);
|
||||
Index subqueryRteIndex = linitial_int(joinTreeTableIndexList);
|
||||
RangeTblEntry *rangeTableEntry = rt_fetch(subqueryRteIndex, rangeTableList);
|
||||
if (rangeTableEntry->rtekind != RTE_SUBQUERY)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
subqueryTree = rangeTableEntry->subquery;
|
||||
setOperations = subqueryTree->setOperations;
|
||||
Query *subqueryTree = rangeTableEntry->subquery;
|
||||
Node *setOperations = subqueryTree->setOperations;
|
||||
if (setOperations != NULL)
|
||||
{
|
||||
SetOperationStmt *setOperationStatement = (SetOperationStmt *) setOperations;
|
||||
|
@ -1648,15 +1619,12 @@ ContainsUnionSubquery(Query *queryTree)
|
|||
static Index
|
||||
RelationRestrictionPartitionKeyIndex(RelationRestriction *relationRestriction)
|
||||
{
|
||||
PlannerInfo *relationPlannerRoot = NULL;
|
||||
Query *relationPlannerParseQuery = NULL;
|
||||
List *relationTargetList = NIL;
|
||||
ListCell *targetEntryCell = NULL;
|
||||
Index partitionKeyTargetAttrIndex = 0;
|
||||
|
||||
relationPlannerRoot = relationRestriction->plannerInfo;
|
||||
relationPlannerParseQuery = relationPlannerRoot->parse;
|
||||
relationTargetList = relationPlannerParseQuery->targetList;
|
||||
PlannerInfo *relationPlannerRoot = relationRestriction->plannerInfo;
|
||||
Query *relationPlannerParseQuery = relationPlannerRoot->parse;
|
||||
List *relationTargetList = relationPlannerParseQuery->targetList;
|
||||
|
||||
foreach(targetEntryCell, relationTargetList)
|
||||
{
|
||||
|
@ -1689,12 +1657,11 @@ List *
|
|||
DistributedRelationIdList(Query *query)
|
||||
{
|
||||
List *rangeTableList = NIL;
|
||||
List *tableEntryList = NIL;
|
||||
List *relationIdList = NIL;
|
||||
ListCell *tableEntryCell = NULL;
|
||||
|
||||
ExtractRangeTableRelationWalker((Node *) query, &rangeTableList);
|
||||
tableEntryList = TableEntryList(rangeTableList);
|
||||
List *tableEntryList = TableEntryList(rangeTableList);
|
||||
|
||||
foreach(tableEntryCell, tableEntryList)
|
||||
{
|
||||
|
@ -1724,10 +1691,6 @@ PlannerRestrictionContext *
|
|||
FilterPlannerRestrictionForQuery(PlannerRestrictionContext *plannerRestrictionContext,
|
||||
Query *query)
|
||||
{
|
||||
PlannerRestrictionContext *filteredPlannerRestrictionContext = NULL;
|
||||
int referenceRelationCount = 0;
|
||||
int totalRelationCount = 0;
|
||||
|
||||
Relids queryRteIdentities = QueryRteIdentities(query);
|
||||
|
||||
RelationRestrictionContext *relationRestrictionContext =
|
||||
|
@ -1742,14 +1705,16 @@ FilterPlannerRestrictionForQuery(PlannerRestrictionContext *plannerRestrictionCo
|
|||
FilterJoinRestrictionContext(joinRestrictionContext, queryRteIdentities);
|
||||
|
||||
/* allocate the filtered planner restriction context and set all the fields */
|
||||
filteredPlannerRestrictionContext = palloc0(sizeof(PlannerRestrictionContext));
|
||||
PlannerRestrictionContext *filteredPlannerRestrictionContext = palloc0(
|
||||
sizeof(PlannerRestrictionContext));
|
||||
|
||||
filteredPlannerRestrictionContext->memoryContext =
|
||||
plannerRestrictionContext->memoryContext;
|
||||
|
||||
totalRelationCount = list_length(
|
||||
int totalRelationCount = list_length(
|
||||
filteredRelationRestrictionContext->relationRestrictionList);
|
||||
referenceRelationCount = ReferenceRelationCount(filteredRelationRestrictionContext);
|
||||
int referenceRelationCount = ReferenceRelationCount(
|
||||
filteredRelationRestrictionContext);
|
||||
|
||||
filteredRelationRestrictionContext->allReferenceTables =
|
||||
(totalRelationCount == referenceRelationCount);
|
||||
|
@ -1850,10 +1815,8 @@ static bool
|
|||
RangeTableArrayContainsAnyRTEIdentities(RangeTblEntry **rangeTableEntries, int
|
||||
rangeTableArrayLength, Relids queryRteIdentities)
|
||||
{
|
||||
int rteIndex = 0;
|
||||
|
||||
/* simple_rte_array starts from 1, see plannerInfo struct */
|
||||
for (rteIndex = 1; rteIndex < rangeTableArrayLength; ++rteIndex)
|
||||
for (int rteIndex = 1; rteIndex < rangeTableArrayLength; ++rteIndex)
|
||||
{
|
||||
RangeTblEntry *rangeTableEntry = rangeTableEntries[rteIndex];
|
||||
List *rangeTableRelationList = NULL;
|
||||
|
@ -1883,11 +1846,10 @@ RangeTableArrayContainsAnyRTEIdentities(RangeTblEntry **rangeTableEntries, int
|
|||
foreach(rteRelationCell, rangeTableRelationList)
|
||||
{
|
||||
RangeTblEntry *rteRelation = (RangeTblEntry *) lfirst(rteRelationCell);
|
||||
int rteIdentity = 0;
|
||||
|
||||
Assert(rteRelation->rtekind == RTE_RELATION);
|
||||
|
||||
rteIdentity = GetRTEIdentity(rteRelation);
|
||||
int rteIdentity = GetRTEIdentity(rteRelation);
|
||||
if (bms_is_member(rteIdentity, queryRteIdentities))
|
||||
{
|
||||
return true;
|
||||
|
@ -1916,12 +1878,11 @@ QueryRteIdentities(Query *queryTree)
|
|||
foreach(rangeTableCell, rangeTableList)
|
||||
{
|
||||
RangeTblEntry *rangeTableEntry = (RangeTblEntry *) lfirst(rangeTableCell);
|
||||
int rteIdentity = 0;
|
||||
|
||||
/* we're only interested in relations */
|
||||
Assert(rangeTableEntry->rtekind == RTE_RELATION);
|
||||
|
||||
rteIdentity = GetRTEIdentity(rangeTableEntry);
|
||||
int rteIdentity = GetRTEIdentity(rangeTableEntry);
|
||||
|
||||
queryRteIdentities = bms_add_member(queryRteIdentities, rteIdentity);
|
||||
}
|
||||
|
|
|
@ -308,7 +308,6 @@ PruneShards(Oid relationId, Index rangeTableId, List *whereClauseList,
|
|||
foreach(pruneCell, context.pruningInstances)
|
||||
{
|
||||
PruningInstance *prune = (PruningInstance *) lfirst(pruneCell);
|
||||
List *pruneOneList;
|
||||
|
||||
/*
|
||||
* If this is a partial instance, a fully built one has also been
|
||||
|
@ -358,7 +357,7 @@ PruneShards(Oid relationId, Index rangeTableId, List *whereClauseList,
|
|||
}
|
||||
}
|
||||
|
||||
pruneOneList = PruneOne(cacheEntry, &context, prune);
|
||||
List *pruneOneList = PruneOne(cacheEntry, &context, prune);
|
||||
|
||||
if (prunedList)
|
||||
{
|
||||
|
@ -643,12 +642,9 @@ AddSAOPartitionKeyRestrictionToInstance(ClauseWalkerContext *context,
|
|||
equal(strippedLeftOpExpression, context->partitionColumn) &&
|
||||
IsA(arrayArgument, Const))
|
||||
{
|
||||
ArrayType *array = NULL;
|
||||
int16 typlen = 0;
|
||||
bool typbyval = false;
|
||||
char typalign = '\0';
|
||||
Oid elementType = 0;
|
||||
ArrayIterator arrayIterator = NULL;
|
||||
Datum arrayElement = 0;
|
||||
Datum inArray = ((Const *) arrayArgument)->constvalue;
|
||||
bool isNull = false;
|
||||
|
@ -659,26 +655,25 @@ AddSAOPartitionKeyRestrictionToInstance(ClauseWalkerContext *context,
|
|||
return;
|
||||
}
|
||||
|
||||
array = DatumGetArrayTypeP(((Const *) arrayArgument)->constvalue);
|
||||
ArrayType *array = DatumGetArrayTypeP(((Const *) arrayArgument)->constvalue);
|
||||
|
||||
/* get the necessary information from array type to iterate over it */
|
||||
elementType = ARR_ELEMTYPE(array);
|
||||
Oid elementType = ARR_ELEMTYPE(array);
|
||||
get_typlenbyvalalign(elementType,
|
||||
&typlen,
|
||||
&typbyval,
|
||||
&typalign);
|
||||
|
||||
/* Iterate over the righthand array of expression */
|
||||
arrayIterator = array_create_iterator(array, 0, NULL);
|
||||
ArrayIterator arrayIterator = array_create_iterator(array, 0, NULL);
|
||||
while (array_iterate(arrayIterator, &arrayElement, &isNull))
|
||||
{
|
||||
OpExpr *arrayEqualityOp = NULL;
|
||||
Const *constElement = makeConst(elementType, -1,
|
||||
DEFAULT_COLLATION_OID, typlen, arrayElement,
|
||||
isNull, typbyval);
|
||||
|
||||
/* build partcol = arrayelem operator */
|
||||
arrayEqualityOp = makeNode(OpExpr);
|
||||
OpExpr *arrayEqualityOp = makeNode(OpExpr);
|
||||
arrayEqualityOp->opno = arrayOperatorExpression->opno;
|
||||
arrayEqualityOp->opfuncid = arrayOperatorExpression->opfuncid;
|
||||
arrayEqualityOp->inputcollid = arrayOperatorExpression->inputcollid;
|
||||
|
@ -734,7 +729,6 @@ AddPartitionKeyRestrictionToInstance(ClauseWalkerContext *context, OpExpr *opCla
|
|||
Var *partitionColumn, Const *constantClause)
|
||||
{
|
||||
PruningInstance *prune = context->currentPruningInstance;
|
||||
List *btreeInterpretationList = NULL;
|
||||
ListCell *btreeInterpretationCell = NULL;
|
||||
bool matchedOp = false;
|
||||
|
||||
|
@ -756,7 +750,7 @@ AddPartitionKeyRestrictionToInstance(ClauseWalkerContext *context, OpExpr *opCla
|
|||
/* at this point, we'd better be able to pass binary Datums to comparison functions */
|
||||
Assert(IsBinaryCoercible(constantClause->consttype, partitionColumn->vartype));
|
||||
|
||||
btreeInterpretationList = get_op_btree_interpretation(opClause->opno);
|
||||
List *btreeInterpretationList = get_op_btree_interpretation(opClause->opno);
|
||||
foreach(btreeInterpretationCell, btreeInterpretationList)
|
||||
{
|
||||
OpBtreeInterpretation *btreeInterpretation =
|
||||
|
@ -924,13 +918,12 @@ AddHashRestrictionToInstance(ClauseWalkerContext *context, OpExpr *opClause,
|
|||
Var *varClause, Const *constantClause)
|
||||
{
|
||||
PruningInstance *prune = context->currentPruningInstance;
|
||||
List *btreeInterpretationList = NULL;
|
||||
ListCell *btreeInterpretationCell = NULL;
|
||||
|
||||
/* be paranoid */
|
||||
Assert(IsBinaryCoercible(constantClause->consttype, INT4OID));
|
||||
|
||||
btreeInterpretationList =
|
||||
List *btreeInterpretationList =
|
||||
get_op_btree_interpretation(opClause->opno);
|
||||
foreach(btreeInterpretationCell, btreeInterpretationList)
|
||||
{
|
||||
|
@ -986,9 +979,8 @@ static List *
|
|||
ShardArrayToList(ShardInterval **shardArray, int length)
|
||||
{
|
||||
List *shardIntervalList = NIL;
|
||||
int shardIndex;
|
||||
|
||||
for (shardIndex = 0; shardIndex < length; shardIndex++)
|
||||
for (int shardIndex = 0; shardIndex < length; shardIndex++)
|
||||
{
|
||||
ShardInterval *shardInterval =
|
||||
shardArray[shardIndex];
|
||||
|
@ -1068,13 +1060,12 @@ PruneOne(DistTableCacheEntry *cacheEntry, ClauseWalkerContext *context,
|
|||
*/
|
||||
if (prune->hashedEqualConsts)
|
||||
{
|
||||
int shardIndex = INVALID_SHARD_INDEX;
|
||||
ShardInterval **sortedShardIntervalArray = cacheEntry->sortedShardIntervalArray;
|
||||
|
||||
Assert(context->partitionMethod == DISTRIBUTE_BY_HASH);
|
||||
|
||||
shardIndex = FindShardIntervalIndex(prune->hashedEqualConsts->constvalue,
|
||||
cacheEntry);
|
||||
int shardIndex = FindShardIntervalIndex(prune->hashedEqualConsts->constvalue,
|
||||
cacheEntry);
|
||||
|
||||
if (shardIndex == INVALID_SHARD_INDEX)
|
||||
{
|
||||
|
@ -1198,14 +1189,12 @@ LowerShardBoundary(Datum partitionColumnValue, ShardInterval **shardIntervalCach
|
|||
while (lowerBoundIndex < upperBoundIndex)
|
||||
{
|
||||
int middleIndex = lowerBoundIndex + ((upperBoundIndex - lowerBoundIndex) / 2);
|
||||
int maxValueComparison = 0;
|
||||
int minValueComparison = 0;
|
||||
|
||||
/* setup minValue as argument */
|
||||
fcSetArg(compareFunction, 1, shardIntervalCache[middleIndex]->minValue);
|
||||
|
||||
/* execute cmp(partitionValue, lowerBound) */
|
||||
minValueComparison = PerformCompare(compareFunction);
|
||||
int minValueComparison = PerformCompare(compareFunction);
|
||||
|
||||
/* and evaluate results */
|
||||
if (minValueComparison < 0)
|
||||
|
@ -1219,7 +1208,7 @@ LowerShardBoundary(Datum partitionColumnValue, ShardInterval **shardIntervalCach
|
|||
fcSetArg(compareFunction, 1, shardIntervalCache[middleIndex]->maxValue);
|
||||
|
||||
/* execute cmp(partitionValue, upperBound) */
|
||||
maxValueComparison = PerformCompare(compareFunction);
|
||||
int maxValueComparison = PerformCompare(compareFunction);
|
||||
|
||||
if ((maxValueComparison == 0 && !includeMax) ||
|
||||
maxValueComparison > 0)
|
||||
|
@ -1276,14 +1265,12 @@ UpperShardBoundary(Datum partitionColumnValue, ShardInterval **shardIntervalCach
|
|||
while (lowerBoundIndex < upperBoundIndex)
|
||||
{
|
||||
int middleIndex = lowerBoundIndex + ((upperBoundIndex - lowerBoundIndex) / 2);
|
||||
int maxValueComparison = 0;
|
||||
int minValueComparison = 0;
|
||||
|
||||
/* setup minValue as argument */
|
||||
fcSetArg(compareFunction, 1, shardIntervalCache[middleIndex]->minValue);
|
||||
|
||||
/* execute cmp(partitionValue, lowerBound) */
|
||||
minValueComparison = PerformCompare(compareFunction);
|
||||
int minValueComparison = PerformCompare(compareFunction);
|
||||
|
||||
/* and evaluate results */
|
||||
if ((minValueComparison == 0 && !includeMin) ||
|
||||
|
@ -1298,7 +1285,7 @@ UpperShardBoundary(Datum partitionColumnValue, ShardInterval **shardIntervalCach
|
|||
fcSetArg(compareFunction, 1, shardIntervalCache[middleIndex]->maxValue);
|
||||
|
||||
/* execute cmp(partitionValue, upperBound) */
|
||||
maxValueComparison = PerformCompare(compareFunction);
|
||||
int maxValueComparison = PerformCompare(compareFunction);
|
||||
|
||||
if (maxValueComparison > 0)
|
||||
{
|
||||
|
@ -1355,7 +1342,6 @@ PruneWithBoundaries(DistTableCacheEntry *cacheEntry, ClauseWalkerContext *contex
|
|||
bool upperBoundInclusive = false;
|
||||
int lowerBoundIdx = -1;
|
||||
int upperBoundIdx = -1;
|
||||
int curIdx = 0;
|
||||
FunctionCallInfo compareFunctionCall = (FunctionCallInfo) &
|
||||
context->compareIntervalFunctionCall;
|
||||
|
||||
|
@ -1442,7 +1428,7 @@ PruneWithBoundaries(DistTableCacheEntry *cacheEntry, ClauseWalkerContext *contex
|
|||
/*
|
||||
* Build list of all shards that are in the range of shards (possibly 0).
|
||||
*/
|
||||
for (curIdx = lowerBoundIdx; curIdx <= upperBoundIdx; curIdx++)
|
||||
for (int curIdx = lowerBoundIdx; curIdx <= upperBoundIdx; curIdx++)
|
||||
{
|
||||
remainingShardList = lappend(remainingShardList,
|
||||
sortedShardIntervalArray[curIdx]);
|
||||
|
@ -1463,9 +1449,8 @@ ExhaustivePrune(DistTableCacheEntry *cacheEntry, ClauseWalkerContext *context,
|
|||
List *remainingShardList = NIL;
|
||||
int shardCount = cacheEntry->shardIntervalArrayLength;
|
||||
ShardInterval **sortedShardIntervalArray = cacheEntry->sortedShardIntervalArray;
|
||||
int curIdx = 0;
|
||||
|
||||
for (curIdx = 0; curIdx < shardCount; curIdx++)
|
||||
for (int curIdx = 0; curIdx < shardCount; curIdx++)
|
||||
{
|
||||
ShardInterval *curInterval = sortedShardIntervalArray[curIdx];
|
||||
|
||||
|
|
|
@ -39,11 +39,6 @@ ProgressMonitorData *
|
|||
CreateProgressMonitor(uint64 progressTypeMagicNumber, int stepCount, Size stepSize,
|
||||
Oid relationId)
|
||||
{
|
||||
dsm_segment *dsmSegment = NULL;
|
||||
dsm_handle dsmHandle = 0;
|
||||
ProgressMonitorData *monitor = NULL;
|
||||
Size monitorSize = 0;
|
||||
|
||||
if (stepSize <= 0 || stepCount <= 0)
|
||||
{
|
||||
ereport(ERROR,
|
||||
|
@ -51,8 +46,8 @@ CreateProgressMonitor(uint64 progressTypeMagicNumber, int stepCount, Size stepSi
|
|||
"positive values")));
|
||||
}
|
||||
|
||||
monitorSize = sizeof(ProgressMonitorData) + stepSize * stepCount;
|
||||
dsmSegment = dsm_create(monitorSize, DSM_CREATE_NULL_IF_MAXSEGMENTS);
|
||||
Size monitorSize = sizeof(ProgressMonitorData) + stepSize * stepCount;
|
||||
dsm_segment *dsmSegment = dsm_create(monitorSize, DSM_CREATE_NULL_IF_MAXSEGMENTS);
|
||||
|
||||
if (dsmSegment == NULL)
|
||||
{
|
||||
|
@ -62,9 +57,9 @@ CreateProgressMonitor(uint64 progressTypeMagicNumber, int stepCount, Size stepSi
|
|||
return NULL;
|
||||
}
|
||||
|
||||
dsmHandle = dsm_segment_handle(dsmSegment);
|
||||
dsm_handle dsmHandle = dsm_segment_handle(dsmSegment);
|
||||
|
||||
monitor = MonitorDataFromDSMHandle(dsmHandle, &dsmSegment);
|
||||
ProgressMonitorData *monitor = MonitorDataFromDSMHandle(dsmHandle, &dsmSegment);
|
||||
|
||||
monitor->stepCount = stepCount;
|
||||
monitor->processId = MyProcPid;
|
||||
|
@ -143,42 +138,38 @@ ProgressMonitorList(uint64 commandTypeMagicNumber, List **attachedDSMSegments)
|
|||
*/
|
||||
text *commandTypeText = cstring_to_text("VACUUM");
|
||||
Datum commandTypeDatum = PointerGetDatum(commandTypeText);
|
||||
Oid getProgressInfoFunctionOid = InvalidOid;
|
||||
TupleTableSlot *tupleTableSlot = NULL;
|
||||
ReturnSetInfo *progressResultSet = NULL;
|
||||
List *monitorList = NIL;
|
||||
|
||||
getProgressInfoFunctionOid = FunctionOid("pg_catalog",
|
||||
"pg_stat_get_progress_info",
|
||||
1);
|
||||
Oid getProgressInfoFunctionOid = FunctionOid("pg_catalog",
|
||||
"pg_stat_get_progress_info",
|
||||
1);
|
||||
|
||||
progressResultSet = FunctionCallGetTupleStore1(pg_stat_get_progress_info,
|
||||
getProgressInfoFunctionOid,
|
||||
commandTypeDatum);
|
||||
ReturnSetInfo *progressResultSet = FunctionCallGetTupleStore1(
|
||||
pg_stat_get_progress_info,
|
||||
getProgressInfoFunctionOid,
|
||||
commandTypeDatum);
|
||||
|
||||
tupleTableSlot = MakeSingleTupleTableSlotCompat(progressResultSet->setDesc,
|
||||
&TTSOpsMinimalTuple);
|
||||
TupleTableSlot *tupleTableSlot = MakeSingleTupleTableSlotCompat(
|
||||
progressResultSet->setDesc,
|
||||
&TTSOpsMinimalTuple);
|
||||
|
||||
/* iterate over tuples in tuple store, and send them to destination */
|
||||
for (;;)
|
||||
{
|
||||
bool nextTuple = false;
|
||||
bool isNull = false;
|
||||
Datum magicNumberDatum = 0;
|
||||
uint64 magicNumber = 0;
|
||||
|
||||
nextTuple = tuplestore_gettupleslot(progressResultSet->setResult,
|
||||
true,
|
||||
false,
|
||||
tupleTableSlot);
|
||||
bool nextTuple = tuplestore_gettupleslot(progressResultSet->setResult,
|
||||
true,
|
||||
false,
|
||||
tupleTableSlot);
|
||||
|
||||
if (!nextTuple)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
magicNumberDatum = slot_getattr(tupleTableSlot, magicNumberIndex, &isNull);
|
||||
magicNumber = DatumGetUInt64(magicNumberDatum);
|
||||
Datum magicNumberDatum = slot_getattr(tupleTableSlot, magicNumberIndex, &isNull);
|
||||
uint64 magicNumber = DatumGetUInt64(magicNumberDatum);
|
||||
|
||||
if (!isNull && magicNumber == commandTypeMagicNumber)
|
||||
{
|
||||
|
|
|
@ -118,7 +118,6 @@ RelayEventExtendNames(Node *parseTree, char *schemaName, uint64 shardId)
|
|||
command->subtype == AT_ValidateConstraint)
|
||||
{
|
||||
char **constraintName = &(command->name);
|
||||
Oid constraintOid = InvalidOid;
|
||||
const bool constraintMissingOk = true;
|
||||
|
||||
if (!OidIsValid(relationId))
|
||||
|
@ -129,9 +128,9 @@ RelayEventExtendNames(Node *parseTree, char *schemaName, uint64 shardId)
|
|||
rvMissingOk);
|
||||
}
|
||||
|
||||
constraintOid = get_relation_constraint_oid(relationId,
|
||||
command->name,
|
||||
constraintMissingOk);
|
||||
Oid constraintOid = get_relation_constraint_oid(relationId,
|
||||
command->name,
|
||||
constraintMissingOk);
|
||||
if (!OidIsValid(constraintOid))
|
||||
{
|
||||
AppendShardIdToName(constraintName, shardId);
|
||||
|
@ -161,8 +160,6 @@ RelayEventExtendNames(Node *parseTree, char *schemaName, uint64 shardId)
|
|||
case T_ClusterStmt:
|
||||
{
|
||||
ClusterStmt *clusterStmt = (ClusterStmt *) parseTree;
|
||||
char **relationName = NULL;
|
||||
char **relationSchemaName = NULL;
|
||||
|
||||
/* we do not support clustering the entire database */
|
||||
if (clusterStmt->relation == NULL)
|
||||
|
@ -170,8 +167,8 @@ RelayEventExtendNames(Node *parseTree, char *schemaName, uint64 shardId)
|
|||
ereport(ERROR, (errmsg("cannot extend name for multi-relation cluster")));
|
||||
}
|
||||
|
||||
relationName = &(clusterStmt->relation->relname);
|
||||
relationSchemaName = &(clusterStmt->relation->schemaname);
|
||||
char **relationName = &(clusterStmt->relation->relname);
|
||||
char **relationSchemaName = &(clusterStmt->relation->schemaname);
|
||||
|
||||
/* prefix with schema name if it is not added already */
|
||||
SetSchemaNameIfNotExist(relationSchemaName, schemaName);
|
||||
|
@ -232,11 +229,8 @@ RelayEventExtendNames(Node *parseTree, char *schemaName, uint64 shardId)
|
|||
if (objectType == OBJECT_TABLE || objectType == OBJECT_INDEX ||
|
||||
objectType == OBJECT_FOREIGN_TABLE || objectType == OBJECT_FOREIGN_SERVER)
|
||||
{
|
||||
List *relationNameList = NULL;
|
||||
int relationNameListLength = 0;
|
||||
Value *relationSchemaNameValue = NULL;
|
||||
Value *relationNameValue = NULL;
|
||||
char **relationName = NULL;
|
||||
|
||||
uint32 dropCount = list_length(dropStmt->objects);
|
||||
if (dropCount > 1)
|
||||
|
@ -253,8 +247,8 @@ RelayEventExtendNames(Node *parseTree, char *schemaName, uint64 shardId)
|
|||
* have the correct memory address for the name.
|
||||
*/
|
||||
|
||||
relationNameList = (List *) linitial(dropStmt->objects);
|
||||
relationNameListLength = list_length(relationNameList);
|
||||
List *relationNameList = (List *) linitial(dropStmt->objects);
|
||||
int relationNameListLength = list_length(relationNameList);
|
||||
|
||||
switch (relationNameListLength)
|
||||
{
|
||||
|
@ -294,7 +288,7 @@ RelayEventExtendNames(Node *parseTree, char *schemaName, uint64 shardId)
|
|||
relationNameList = lcons(schemaNameValue, relationNameList);
|
||||
}
|
||||
|
||||
relationName = &(relationNameValue->val.str);
|
||||
char **relationName = &(relationNameValue->val.str);
|
||||
AppendShardIdToName(relationName, shardId);
|
||||
}
|
||||
else if (objectType == OBJECT_POLICY)
|
||||
|
@ -418,7 +412,6 @@ RelayEventExtendNames(Node *parseTree, char *schemaName, uint64 shardId)
|
|||
char **oldRelationName = &(renameStmt->relation->relname);
|
||||
char **newRelationName = &(renameStmt->newname);
|
||||
char **objectSchemaName = &(renameStmt->relation->schemaname);
|
||||
int newRelationNameLength;
|
||||
|
||||
/* prefix with schema name if it is not added already */
|
||||
SetSchemaNameIfNotExist(objectSchemaName, schemaName);
|
||||
|
@ -440,7 +433,7 @@ RelayEventExtendNames(Node *parseTree, char *schemaName, uint64 shardId)
|
|||
*
|
||||
* See also https://github.com/citusdata/citus/issues/1664
|
||||
*/
|
||||
newRelationNameLength = strlen(*newRelationName);
|
||||
int newRelationNameLength = strlen(*newRelationName);
|
||||
if (newRelationNameLength >= (NAMEDATALEN - 1))
|
||||
{
|
||||
ereport(ERROR,
|
||||
|
@ -676,10 +669,8 @@ AppendShardIdToName(char **name, uint64 shardId)
|
|||
char extendedName[NAMEDATALEN];
|
||||
int nameLength = strlen(*name);
|
||||
char shardIdAndSeparator[NAMEDATALEN];
|
||||
int shardIdAndSeparatorLength;
|
||||
uint32 longNameHash = 0;
|
||||
int multiByteClipLength = 0;
|
||||
int neededBytes = 0;
|
||||
|
||||
if (nameLength >= NAMEDATALEN)
|
||||
{
|
||||
|
@ -690,7 +681,7 @@ AppendShardIdToName(char **name, uint64 shardId)
|
|||
|
||||
snprintf(shardIdAndSeparator, NAMEDATALEN, "%c" UINT64_FORMAT,
|
||||
SHARD_NAME_SEPARATOR, shardId);
|
||||
shardIdAndSeparatorLength = strlen(shardIdAndSeparator);
|
||||
int shardIdAndSeparatorLength = strlen(shardIdAndSeparator);
|
||||
|
||||
/*
|
||||
* If *name strlen is < (NAMEDATALEN - shardIdAndSeparatorLength),
|
||||
|
@ -740,7 +731,7 @@ AppendShardIdToName(char **name, uint64 shardId)
|
|||
}
|
||||
|
||||
(*name) = (char *) repalloc((*name), NAMEDATALEN);
|
||||
neededBytes = snprintf((*name), NAMEDATALEN, "%s", extendedName);
|
||||
int neededBytes = snprintf((*name), NAMEDATALEN, "%s", extendedName);
|
||||
if (neededBytes < 0)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY),
|
||||
|
@ -764,10 +755,7 @@ shard_name(PG_FUNCTION_ARGS)
|
|||
{
|
||||
Oid relationId = PG_GETARG_OID(0);
|
||||
int64 shardId = PG_GETARG_INT64(1);
|
||||
char *relationName = NULL;
|
||||
|
||||
Oid schemaId = InvalidOid;
|
||||
char *schemaName = NULL;
|
||||
char *qualifiedName = NULL;
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
|
@ -785,7 +773,7 @@ shard_name(PG_FUNCTION_ARGS)
|
|||
errmsg("object_name does not reference a valid relation")));
|
||||
}
|
||||
|
||||
relationName = get_rel_name(relationId);
|
||||
char *relationName = get_rel_name(relationId);
|
||||
|
||||
if (relationName == NULL)
|
||||
{
|
||||
|
@ -795,8 +783,8 @@ shard_name(PG_FUNCTION_ARGS)
|
|||
|
||||
AppendShardIdToName(&relationName, shardId);
|
||||
|
||||
schemaId = get_rel_namespace(relationId);
|
||||
schemaName = get_namespace_name(schemaId);
|
||||
Oid schemaId = get_rel_namespace(relationId);
|
||||
char *schemaName = get_namespace_name(schemaId);
|
||||
|
||||
if (strncmp(schemaName, "public", NAMEDATALEN) == 0)
|
||||
{
|
||||
|
|
|
@ -273,10 +273,9 @@ static void
|
|||
ResizeStackToMaximumDepth(void)
|
||||
{
|
||||
#ifndef WIN32
|
||||
volatile char *stack_resizer = NULL;
|
||||
long max_stack_depth_bytes = max_stack_depth * 1024L;
|
||||
|
||||
stack_resizer = alloca(max_stack_depth_bytes);
|
||||
volatile char *stack_resizer = alloca(max_stack_depth_bytes);
|
||||
|
||||
/*
|
||||
* Different architectures might have different directions while
|
||||
|
@ -345,14 +344,13 @@ StartupCitusBackend(void)
|
|||
static void
|
||||
CreateRequiredDirectories(void)
|
||||
{
|
||||
int dirNo = 0;
|
||||
const char *subdirs[] = {
|
||||
"pg_foreign_file",
|
||||
"pg_foreign_file/cached",
|
||||
"base/" PG_JOB_CACHE_DIR
|
||||
};
|
||||
|
||||
for (dirNo = 0; dirNo < lengthof(subdirs); dirNo++)
|
||||
for (int dirNo = 0; dirNo < lengthof(subdirs); dirNo++)
|
||||
{
|
||||
int ret = mkdir(subdirs[dirNo], S_IRWXU);
|
||||
|
||||
|
@ -1380,15 +1378,12 @@ NodeConninfoGucCheckHook(char **newval, void **extra, GucSource source)
|
|||
static void
|
||||
NodeConninfoGucAssignHook(const char *newval, void *extra)
|
||||
{
|
||||
PQconninfoOption *optionArray = NULL;
|
||||
PQconninfoOption *option = NULL;
|
||||
|
||||
if (newval == NULL)
|
||||
{
|
||||
newval = "";
|
||||
}
|
||||
|
||||
optionArray = PQconninfoParse(newval, NULL);
|
||||
PQconninfoOption *optionArray = PQconninfoParse(newval, NULL);
|
||||
if (optionArray == NULL)
|
||||
{
|
||||
ereport(FATAL, (errmsg("cannot parse node_conninfo value"),
|
||||
|
@ -1398,7 +1393,7 @@ NodeConninfoGucAssignHook(const char *newval, void *extra)
|
|||
|
||||
ResetConnParams();
|
||||
|
||||
for (option = optionArray; option->keyword != NULL; option++)
|
||||
for (PQconninfoOption *option = optionArray; option->keyword != NULL; option++)
|
||||
{
|
||||
if (option->val == NULL || option->val[0] == '\0')
|
||||
{
|
||||
|
|
|
@ -83,7 +83,6 @@ get_colocated_table_array(PG_FUNCTION_ARGS)
|
|||
{
|
||||
Oid distributedTableId = PG_GETARG_OID(0);
|
||||
|
||||
ArrayType *colocatedTablesArrayType = NULL;
|
||||
List *colocatedTableList = ColocatedTableList(distributedTableId);
|
||||
ListCell *colocatedTableCell = NULL;
|
||||
int colocatedTableCount = list_length(colocatedTableList);
|
||||
|
@ -100,8 +99,9 @@ get_colocated_table_array(PG_FUNCTION_ARGS)
|
|||
colocatedTableIndex++;
|
||||
}
|
||||
|
||||
colocatedTablesArrayType = DatumArrayToArrayType(colocatedTablesDatumArray,
|
||||
colocatedTableCount, arrayTypeId);
|
||||
ArrayType *colocatedTablesArrayType = DatumArrayToArrayType(colocatedTablesDatumArray,
|
||||
colocatedTableCount,
|
||||
arrayTypeId);
|
||||
|
||||
PG_RETURN_ARRAYTYPE_P(colocatedTablesArrayType);
|
||||
}
|
||||
|
|
|
@ -31,15 +31,12 @@ Datum
|
|||
deparse_test(PG_FUNCTION_ARGS)
|
||||
{
|
||||
text *queryStringText = PG_GETARG_TEXT_P(0);
|
||||
char *queryStringChar = NULL;
|
||||
Query *query = NULL;
|
||||
const char *deparsedQuery = NULL;
|
||||
|
||||
queryStringChar = text_to_cstring(queryStringText);
|
||||
query = ParseQueryString(queryStringChar, NULL, 0);
|
||||
char *queryStringChar = text_to_cstring(queryStringText);
|
||||
Query *query = ParseQueryString(queryStringChar, NULL, 0);
|
||||
|
||||
QualifyTreeNode(query->utilityStmt);
|
||||
deparsedQuery = DeparseTreeNode(query->utilityStmt);
|
||||
const char *deparsedQuery = DeparseTreeNode(query->utilityStmt);
|
||||
|
||||
PG_RETURN_TEXT_P(cstring_to_text(deparsedQuery));
|
||||
}
|
||||
|
|
|
@ -50,10 +50,10 @@ deparse_shard_query_test(PG_FUNCTION_ARGS)
|
|||
{
|
||||
Node *parsetree = (Node *) lfirst(parseTreeCell);
|
||||
ListCell *queryTreeCell = NULL;
|
||||
List *queryTreeList = NIL;
|
||||
|
||||
queryTreeList = pg_analyze_and_rewrite((RawStmt *) parsetree, queryStringChar,
|
||||
NULL, 0, NULL);
|
||||
List *queryTreeList = pg_analyze_and_rewrite((RawStmt *) parsetree,
|
||||
queryStringChar,
|
||||
NULL, 0, NULL);
|
||||
|
||||
foreach(queryTreeCell, queryTreeList)
|
||||
{
|
||||
|
|
|
@ -40,10 +40,7 @@ Datum
|
|||
get_adjacency_list_wait_graph(PG_FUNCTION_ARGS)
|
||||
{
|
||||
TupleDesc tupleDescriptor = NULL;
|
||||
Tuplestorestate *tupleStore = NULL;
|
||||
|
||||
WaitGraph *waitGraph = NULL;
|
||||
HTAB *adjacencyList = NULL;
|
||||
HASH_SEQ_STATUS status;
|
||||
TransactionNode *transactionNode = NULL;
|
||||
|
||||
|
@ -52,9 +49,9 @@ get_adjacency_list_wait_graph(PG_FUNCTION_ARGS)
|
|||
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
tupleStore = SetupTuplestore(fcinfo, &tupleDescriptor);
|
||||
waitGraph = BuildGlobalWaitGraph();
|
||||
adjacencyList = BuildAdjacencyListsForWaitGraph(waitGraph);
|
||||
Tuplestorestate *tupleStore = SetupTuplestore(fcinfo, &tupleDescriptor);
|
||||
WaitGraph *waitGraph = BuildGlobalWaitGraph();
|
||||
HTAB *adjacencyList = BuildAdjacencyListsForWaitGraph(waitGraph);
|
||||
|
||||
/* iterate on all nodes */
|
||||
hash_seq_init(&status, adjacencyList);
|
||||
|
|
|
@ -62,17 +62,14 @@ Datum
|
|||
load_shard_id_array(PG_FUNCTION_ARGS)
|
||||
{
|
||||
Oid distributedTableId = PG_GETARG_OID(0);
|
||||
ArrayType *shardIdArrayType = NULL;
|
||||
ListCell *shardCell = NULL;
|
||||
int shardIdIndex = 0;
|
||||
Oid shardIdTypeId = INT8OID;
|
||||
|
||||
int shardIdCount = -1;
|
||||
Datum *shardIdDatumArray = NULL;
|
||||
List *shardList = LoadShardIntervalList(distributedTableId);
|
||||
|
||||
shardIdCount = list_length(shardList);
|
||||
shardIdDatumArray = palloc0(shardIdCount * sizeof(Datum));
|
||||
int shardIdCount = list_length(shardList);
|
||||
Datum *shardIdDatumArray = palloc0(shardIdCount * sizeof(Datum));
|
||||
|
||||
foreach(shardCell, shardList)
|
||||
{
|
||||
|
@ -83,8 +80,8 @@ load_shard_id_array(PG_FUNCTION_ARGS)
|
|||
shardIdIndex++;
|
||||
}
|
||||
|
||||
shardIdArrayType = DatumArrayToArrayType(shardIdDatumArray, shardIdCount,
|
||||
shardIdTypeId);
|
||||
ArrayType *shardIdArrayType = DatumArrayToArrayType(shardIdDatumArray, shardIdCount,
|
||||
shardIdTypeId);
|
||||
|
||||
PG_RETURN_ARRAYTYPE_P(shardIdArrayType);
|
||||
}
|
||||
|
@ -103,12 +100,11 @@ load_shard_interval_array(PG_FUNCTION_ARGS)
|
|||
Oid expectedType PG_USED_FOR_ASSERTS_ONLY = get_fn_expr_argtype(fcinfo->flinfo, 1);
|
||||
ShardInterval *shardInterval = LoadShardInterval(shardId);
|
||||
Datum shardIntervalArray[] = { shardInterval->minValue, shardInterval->maxValue };
|
||||
ArrayType *shardIntervalArrayType = NULL;
|
||||
|
||||
Assert(expectedType == shardInterval->valueTypeId);
|
||||
|
||||
shardIntervalArrayType = DatumArrayToArrayType(shardIntervalArray, 2,
|
||||
shardInterval->valueTypeId);
|
||||
ArrayType *shardIntervalArrayType = DatumArrayToArrayType(shardIntervalArray, 2,
|
||||
shardInterval->valueTypeId);
|
||||
|
||||
PG_RETURN_ARRAYTYPE_P(shardIntervalArrayType);
|
||||
}
|
||||
|
@ -126,12 +122,9 @@ load_shard_placement_array(PG_FUNCTION_ARGS)
|
|||
{
|
||||
int64 shardId = PG_GETARG_INT64(0);
|
||||
bool onlyFinalized = PG_GETARG_BOOL(1);
|
||||
ArrayType *placementArrayType = NULL;
|
||||
List *placementList = NIL;
|
||||
ListCell *placementCell = NULL;
|
||||
int placementCount = -1;
|
||||
int placementIndex = 0;
|
||||
Datum *placementDatumArray = NULL;
|
||||
Oid placementTypeId = TEXTOID;
|
||||
StringInfo placementInfo = makeStringInfo();
|
||||
|
||||
|
@ -146,8 +139,8 @@ load_shard_placement_array(PG_FUNCTION_ARGS)
|
|||
|
||||
placementList = SortList(placementList, CompareShardPlacementsByWorker);
|
||||
|
||||
placementCount = list_length(placementList);
|
||||
placementDatumArray = palloc0(placementCount * sizeof(Datum));
|
||||
int placementCount = list_length(placementList);
|
||||
Datum *placementDatumArray = palloc0(placementCount * sizeof(Datum));
|
||||
|
||||
foreach(placementCell, placementList)
|
||||
{
|
||||
|
@ -160,8 +153,9 @@ load_shard_placement_array(PG_FUNCTION_ARGS)
|
|||
resetStringInfo(placementInfo);
|
||||
}
|
||||
|
||||
placementArrayType = DatumArrayToArrayType(placementDatumArray, placementCount,
|
||||
placementTypeId);
|
||||
ArrayType *placementArrayType = DatumArrayToArrayType(placementDatumArray,
|
||||
placementCount,
|
||||
placementTypeId);
|
||||
|
||||
PG_RETURN_ARRAYTYPE_P(placementArrayType);
|
||||
}
|
||||
|
@ -224,14 +218,12 @@ create_monolithic_shard_row(PG_FUNCTION_ARGS)
|
|||
StringInfo minInfo = makeStringInfo();
|
||||
StringInfo maxInfo = makeStringInfo();
|
||||
uint64 newShardId = GetNextShardId();
|
||||
text *maxInfoText = NULL;
|
||||
text *minInfoText = NULL;
|
||||
|
||||
appendStringInfo(minInfo, "%d", INT32_MIN);
|
||||
appendStringInfo(maxInfo, "%d", INT32_MAX);
|
||||
|
||||
minInfoText = cstring_to_text(minInfo->data);
|
||||
maxInfoText = cstring_to_text(maxInfo->data);
|
||||
text *minInfoText = cstring_to_text(minInfo->data);
|
||||
text *maxInfoText = cstring_to_text(maxInfo->data);
|
||||
|
||||
InsertShardRow(distributedTableId, newShardId, SHARD_STORAGE_TABLE, minInfoText,
|
||||
maxInfoText);
|
||||
|
@ -270,10 +262,10 @@ relation_count_in_query(PG_FUNCTION_ARGS)
|
|||
{
|
||||
Node *parsetree = (Node *) lfirst(parseTreeCell);
|
||||
ListCell *queryTreeCell = NULL;
|
||||
List *queryTreeList = NIL;
|
||||
|
||||
queryTreeList = pg_analyze_and_rewrite((RawStmt *) parsetree, queryStringChar,
|
||||
NULL, 0, NULL);
|
||||
List *queryTreeList = pg_analyze_and_rewrite((RawStmt *) parsetree,
|
||||
queryStringChar,
|
||||
NULL, 0, NULL);
|
||||
|
||||
foreach(queryTreeCell, queryTreeList)
|
||||
{
|
||||
|
|
|
@ -41,17 +41,14 @@ master_metadata_snapshot(PG_FUNCTION_ARGS)
|
|||
List *createSnapshotCommands = MetadataCreateCommands();
|
||||
List *snapshotCommandList = NIL;
|
||||
ListCell *snapshotCommandCell = NULL;
|
||||
int snapshotCommandCount = 0;
|
||||
Datum *snapshotCommandDatumArray = NULL;
|
||||
ArrayType *snapshotCommandArrayType = NULL;
|
||||
int snapshotCommandIndex = 0;
|
||||
Oid ddlCommandTypeId = TEXTOID;
|
||||
|
||||
snapshotCommandList = list_concat(snapshotCommandList, dropSnapshotCommands);
|
||||
snapshotCommandList = list_concat(snapshotCommandList, createSnapshotCommands);
|
||||
|
||||
snapshotCommandCount = list_length(snapshotCommandList);
|
||||
snapshotCommandDatumArray = palloc0(snapshotCommandCount * sizeof(Datum));
|
||||
int snapshotCommandCount = list_length(snapshotCommandList);
|
||||
Datum *snapshotCommandDatumArray = palloc0(snapshotCommandCount * sizeof(Datum));
|
||||
|
||||
foreach(snapshotCommandCell, snapshotCommandList)
|
||||
{
|
||||
|
@ -62,9 +59,9 @@ master_metadata_snapshot(PG_FUNCTION_ARGS)
|
|||
snapshotCommandIndex++;
|
||||
}
|
||||
|
||||
snapshotCommandArrayType = DatumArrayToArrayType(snapshotCommandDatumArray,
|
||||
snapshotCommandCount,
|
||||
ddlCommandTypeId);
|
||||
ArrayType *snapshotCommandArrayType = DatumArrayToArrayType(snapshotCommandDatumArray,
|
||||
snapshotCommandCount,
|
||||
ddlCommandTypeId);
|
||||
|
||||
PG_RETURN_ARRAYTYPE_P(snapshotCommandArrayType);
|
||||
}
|
||||
|
@ -78,13 +75,10 @@ Datum
|
|||
wait_until_metadata_sync(PG_FUNCTION_ARGS)
|
||||
{
|
||||
uint32 timeout = PG_GETARG_UINT32(0);
|
||||
int waitResult = 0;
|
||||
|
||||
List *workerList = ActivePrimaryWorkerNodeList(NoLock);
|
||||
ListCell *workerCell = NULL;
|
||||
bool waitNotifications = false;
|
||||
MultiConnection *connection = NULL;
|
||||
int waitFlags = 0;
|
||||
|
||||
foreach(workerCell, workerList)
|
||||
{
|
||||
|
@ -109,13 +103,13 @@ wait_until_metadata_sync(PG_FUNCTION_ARGS)
|
|||
PG_RETURN_VOID();
|
||||
}
|
||||
|
||||
connection = GetNodeConnection(FORCE_NEW_CONNECTION,
|
||||
"localhost", PostPortNumber);
|
||||
MultiConnection *connection = GetNodeConnection(FORCE_NEW_CONNECTION,
|
||||
"localhost", PostPortNumber);
|
||||
ExecuteCriticalRemoteCommand(connection, "LISTEN " METADATA_SYNC_CHANNEL);
|
||||
|
||||
waitFlags = WL_SOCKET_READABLE | WL_TIMEOUT | WL_POSTMASTER_DEATH;
|
||||
waitResult = WaitLatchOrSocket(NULL, waitFlags, PQsocket(connection->pgConn),
|
||||
timeout, 0);
|
||||
int waitFlags = WL_SOCKET_READABLE | WL_TIMEOUT | WL_POSTMASTER_DEATH;
|
||||
int waitResult = WaitLatchOrSocket(NULL, waitFlags, PQsocket(connection->pgConn),
|
||||
timeout, 0);
|
||||
if (waitResult & WL_POSTMASTER_DEATH)
|
||||
{
|
||||
ereport(ERROR, (errmsg("postmaster was shut down, exiting")));
|
||||
|
|
|
@ -95,8 +95,7 @@ show_progress(PG_FUNCTION_ARGS)
|
|||
ProgressMonitorData *monitor = lfirst(monitorCell);
|
||||
uint64 *steps = monitor->steps;
|
||||
|
||||
int stepIndex = 0;
|
||||
for (stepIndex = 0; stepIndex < monitor->stepCount; stepIndex++)
|
||||
for (int stepIndex = 0; stepIndex < monitor->stepCount; stepIndex++)
|
||||
{
|
||||
uint64 step = steps[stepIndex];
|
||||
|
||||
|
|
|
@ -202,20 +202,16 @@ MakeTextPartitionExpression(Oid distributedTableId, text *value)
|
|||
static ArrayType *
|
||||
PrunedShardIdsForTable(Oid distributedTableId, List *whereClauseList)
|
||||
{
|
||||
ArrayType *shardIdArrayType = NULL;
|
||||
ListCell *shardCell = NULL;
|
||||
int shardIdIndex = 0;
|
||||
Oid shardIdTypeId = INT8OID;
|
||||
Index tableId = 1;
|
||||
|
||||
List *shardList = NIL;
|
||||
int shardIdCount = -1;
|
||||
Datum *shardIdDatumArray = NULL;
|
||||
|
||||
shardList = PruneShards(distributedTableId, tableId, whereClauseList, NULL);
|
||||
List *shardList = PruneShards(distributedTableId, tableId, whereClauseList, NULL);
|
||||
|
||||
shardIdCount = list_length(shardList);
|
||||
shardIdDatumArray = palloc0(shardIdCount * sizeof(Datum));
|
||||
int shardIdCount = list_length(shardList);
|
||||
Datum *shardIdDatumArray = palloc0(shardIdCount * sizeof(Datum));
|
||||
|
||||
foreach(shardCell, shardList)
|
||||
{
|
||||
|
@ -226,8 +222,8 @@ PrunedShardIdsForTable(Oid distributedTableId, List *whereClauseList)
|
|||
shardIdIndex++;
|
||||
}
|
||||
|
||||
shardIdArrayType = DatumArrayToArrayType(shardIdDatumArray, shardIdCount,
|
||||
shardIdTypeId);
|
||||
ArrayType *shardIdArrayType = DatumArrayToArrayType(shardIdDatumArray, shardIdCount,
|
||||
shardIdTypeId);
|
||||
|
||||
return shardIdArrayType;
|
||||
}
|
||||
|
@ -240,8 +236,6 @@ PrunedShardIdsForTable(Oid distributedTableId, List *whereClauseList)
|
|||
static ArrayType *
|
||||
SortedShardIntervalArray(Oid distributedTableId)
|
||||
{
|
||||
ArrayType *shardIdArrayType = NULL;
|
||||
int shardIndex = 0;
|
||||
Oid shardIdTypeId = INT8OID;
|
||||
|
||||
DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(distributedTableId);
|
||||
|
@ -249,7 +243,7 @@ SortedShardIntervalArray(Oid distributedTableId)
|
|||
int shardIdCount = cacheEntry->shardIntervalArrayLength;
|
||||
Datum *shardIdDatumArray = palloc0(shardIdCount * sizeof(Datum));
|
||||
|
||||
for (shardIndex = 0; shardIndex < shardIdCount; ++shardIndex)
|
||||
for (int shardIndex = 0; shardIndex < shardIdCount; ++shardIndex)
|
||||
{
|
||||
ShardInterval *shardId = shardIntervalArray[shardIndex];
|
||||
Datum shardIdDatum = Int64GetDatum(shardId->shardId);
|
||||
|
@ -257,8 +251,8 @@ SortedShardIntervalArray(Oid distributedTableId)
|
|||
shardIdDatumArray[shardIndex] = shardIdDatum;
|
||||
}
|
||||
|
||||
shardIdArrayType = DatumArrayToArrayType(shardIdDatumArray, shardIdCount,
|
||||
shardIdTypeId);
|
||||
ArrayType *shardIdArrayType = DatumArrayToArrayType(shardIdDatumArray, shardIdCount,
|
||||
shardIdTypeId);
|
||||
|
||||
return shardIdArrayType;
|
||||
}
|
||||
|
|
|
@ -136,7 +136,6 @@ run_commands_on_session_level_connection_to_node(PG_FUNCTION_ARGS)
|
|||
StringInfo workerProcessStringInfo = makeStringInfo();
|
||||
MultiConnection *localConnection = GetNodeConnection(0, LOCAL_HOST_NAME,
|
||||
PostPortNumber);
|
||||
Oid pgReloadConfOid = InvalidOid;
|
||||
|
||||
if (!singleConnection)
|
||||
{
|
||||
|
@ -160,7 +159,7 @@ run_commands_on_session_level_connection_to_node(PG_FUNCTION_ARGS)
|
|||
CloseConnection(localConnection);
|
||||
|
||||
/* Call pg_reload_conf UDF to update changed GUCs above on each backend */
|
||||
pgReloadConfOid = FunctionOid("pg_catalog", "pg_reload_conf", 0);
|
||||
Oid pgReloadConfOid = FunctionOid("pg_catalog", "pg_reload_conf", 0);
|
||||
OidFunctionCall0(pgReloadConfOid);
|
||||
|
||||
|
||||
|
@ -197,21 +196,19 @@ GetRemoteProcessId()
|
|||
{
|
||||
StringInfo queryStringInfo = makeStringInfo();
|
||||
PGresult *result = NULL;
|
||||
int64 rowCount = 0;
|
||||
int64 resultValue = 0;
|
||||
|
||||
appendStringInfo(queryStringInfo, GET_PROCESS_ID);
|
||||
|
||||
ExecuteOptionalRemoteCommand(singleConnection, queryStringInfo->data, &result);
|
||||
|
||||
rowCount = PQntuples(result);
|
||||
int64 rowCount = PQntuples(result);
|
||||
|
||||
if (rowCount != 1)
|
||||
{
|
||||
PG_RETURN_VOID();
|
||||
}
|
||||
|
||||
resultValue = ParseIntField(result, 0, 0);
|
||||
int64 resultValue = ParseIntField(result, 0, 0);
|
||||
|
||||
PQclear(result);
|
||||
ClearResults(singleConnection, false);
|
||||
|
|
|
@ -155,12 +155,10 @@ Datum
|
|||
get_current_transaction_id(PG_FUNCTION_ARGS)
|
||||
{
|
||||
TupleDesc tupleDescriptor = NULL;
|
||||
HeapTuple heapTuple = NULL;
|
||||
|
||||
Datum values[5];
|
||||
bool isNulls[5];
|
||||
|
||||
DistributedTransactionId *distributedTransctionId = NULL;
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
|
@ -176,7 +174,8 @@ get_current_transaction_id(PG_FUNCTION_ARGS)
|
|||
ereport(ERROR, (errmsg("backend is not ready for distributed transactions")));
|
||||
}
|
||||
|
||||
distributedTransctionId = GetCurrentDistributedTransactionId();
|
||||
DistributedTransactionId *distributedTransctionId =
|
||||
GetCurrentDistributedTransactionId();
|
||||
|
||||
memset(values, 0, sizeof(values));
|
||||
memset(isNulls, false, sizeof(isNulls));
|
||||
|
@ -198,7 +197,7 @@ get_current_transaction_id(PG_FUNCTION_ARGS)
|
|||
isNulls[4] = true;
|
||||
}
|
||||
|
||||
heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls);
|
||||
HeapTuple heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls);
|
||||
|
||||
PG_RETURN_DATUM(HeapTupleGetDatum(heapTuple));
|
||||
}
|
||||
|
@ -215,7 +214,6 @@ Datum
|
|||
get_global_active_transactions(PG_FUNCTION_ARGS)
|
||||
{
|
||||
TupleDesc tupleDescriptor = NULL;
|
||||
Tuplestorestate *tupleStore = NULL;
|
||||
List *workerNodeList = ActivePrimaryWorkerNodeList(NoLock);
|
||||
ListCell *workerNodeCell = NULL;
|
||||
List *connectionList = NIL;
|
||||
|
@ -223,7 +221,7 @@ get_global_active_transactions(PG_FUNCTION_ARGS)
|
|||
StringInfo queryToSend = makeStringInfo();
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
tupleStore = SetupTuplestore(fcinfo, &tupleDescriptor);
|
||||
Tuplestorestate *tupleStore = SetupTuplestore(fcinfo, &tupleDescriptor);
|
||||
|
||||
appendStringInfo(queryToSend, GET_ACTIVE_TRANSACTION_QUERY);
|
||||
|
||||
|
@ -236,7 +234,6 @@ get_global_active_transactions(PG_FUNCTION_ARGS)
|
|||
WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell);
|
||||
char *nodeName = workerNode->workerName;
|
||||
int nodePort = workerNode->workerPort;
|
||||
MultiConnection *connection = NULL;
|
||||
int connectionFlags = 0;
|
||||
|
||||
if (workerNode->groupId == GetLocalGroupId())
|
||||
|
@ -245,7 +242,8 @@ get_global_active_transactions(PG_FUNCTION_ARGS)
|
|||
continue;
|
||||
}
|
||||
|
||||
connection = StartNodeConnection(connectionFlags, nodeName, nodePort);
|
||||
MultiConnection *connection = StartNodeConnection(connectionFlags, nodeName,
|
||||
nodePort);
|
||||
|
||||
connectionList = lappend(connectionList, connection);
|
||||
}
|
||||
|
@ -256,9 +254,8 @@ get_global_active_transactions(PG_FUNCTION_ARGS)
|
|||
foreach(connectionCell, connectionList)
|
||||
{
|
||||
MultiConnection *connection = (MultiConnection *) lfirst(connectionCell);
|
||||
int querySent = false;
|
||||
|
||||
querySent = SendRemoteCommand(connection, queryToSend->data);
|
||||
int querySent = SendRemoteCommand(connection, queryToSend->data);
|
||||
if (querySent == 0)
|
||||
{
|
||||
ReportConnectionError(connection, WARNING);
|
||||
|
@ -269,28 +266,24 @@ get_global_active_transactions(PG_FUNCTION_ARGS)
|
|||
foreach(connectionCell, connectionList)
|
||||
{
|
||||
MultiConnection *connection = (MultiConnection *) lfirst(connectionCell);
|
||||
PGresult *result = NULL;
|
||||
bool raiseInterrupts = true;
|
||||
Datum values[ACTIVE_TRANSACTION_COLUMN_COUNT];
|
||||
bool isNulls[ACTIVE_TRANSACTION_COLUMN_COUNT];
|
||||
int64 rowIndex = 0;
|
||||
int64 rowCount = 0;
|
||||
int64 colCount = 0;
|
||||
|
||||
if (PQstatus(connection->pgConn) != CONNECTION_OK)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
result = GetRemoteCommandResult(connection, raiseInterrupts);
|
||||
PGresult *result = GetRemoteCommandResult(connection, raiseInterrupts);
|
||||
if (!IsResponseOK(result))
|
||||
{
|
||||
ReportResultError(connection, result, WARNING);
|
||||
continue;
|
||||
}
|
||||
|
||||
rowCount = PQntuples(result);
|
||||
colCount = PQnfields(result);
|
||||
int64 rowCount = PQntuples(result);
|
||||
int64 colCount = PQnfields(result);
|
||||
|
||||
/* Although it is not expected */
|
||||
if (colCount != ACTIVE_TRANSACTION_COLUMN_COUNT)
|
||||
|
@ -300,7 +293,7 @@ get_global_active_transactions(PG_FUNCTION_ARGS)
|
|||
continue;
|
||||
}
|
||||
|
||||
for (rowIndex = 0; rowIndex < rowCount; rowIndex++)
|
||||
for (int64 rowIndex = 0; rowIndex < rowCount; rowIndex++)
|
||||
{
|
||||
memset(values, 0, sizeof(values));
|
||||
memset(isNulls, false, sizeof(isNulls));
|
||||
|
@ -334,10 +327,9 @@ Datum
|
|||
get_all_active_transactions(PG_FUNCTION_ARGS)
|
||||
{
|
||||
TupleDesc tupleDescriptor = NULL;
|
||||
Tuplestorestate *tupleStore = NULL;
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
tupleStore = SetupTuplestore(fcinfo, &tupleDescriptor);
|
||||
Tuplestorestate *tupleStore = SetupTuplestore(fcinfo, &tupleDescriptor);
|
||||
|
||||
StoreAllActiveTransactions(tupleStore, tupleDescriptor);
|
||||
|
||||
|
@ -355,7 +347,6 @@ get_all_active_transactions(PG_FUNCTION_ARGS)
|
|||
static void
|
||||
StoreAllActiveTransactions(Tuplestorestate *tupleStore, TupleDesc tupleDescriptor)
|
||||
{
|
||||
int backendIndex = 0;
|
||||
Datum values[ACTIVE_TRANSACTION_COLUMN_COUNT];
|
||||
bool isNulls[ACTIVE_TRANSACTION_COLUMN_COUNT];
|
||||
bool showAllTransactions = superuser();
|
||||
|
@ -377,18 +368,14 @@ StoreAllActiveTransactions(Tuplestorestate *tupleStore, TupleDesc tupleDescripto
|
|||
/* we're reading all distributed transactions, prevent new backends */
|
||||
LockBackendSharedMemory(LW_SHARED);
|
||||
|
||||
for (backendIndex = 0; backendIndex < MaxBackends; ++backendIndex)
|
||||
for (int backendIndex = 0; backendIndex < MaxBackends; ++backendIndex)
|
||||
{
|
||||
BackendData *currentBackend =
|
||||
&backendManagementShmemData->backends[backendIndex];
|
||||
bool coordinatorOriginatedQuery = false;
|
||||
|
||||
/* to work on data after releasing g spinlock to protect against errors */
|
||||
Oid databaseId = InvalidOid;
|
||||
int backendPid = -1;
|
||||
int initiatorNodeIdentifier = -1;
|
||||
uint64 transactionNumber = 0;
|
||||
TimestampTz transactionIdTimestamp = 0;
|
||||
|
||||
SpinLockAcquire(¤tBackend->mutex);
|
||||
|
||||
|
@ -409,8 +396,8 @@ StoreAllActiveTransactions(Tuplestorestate *tupleStore, TupleDesc tupleDescripto
|
|||
continue;
|
||||
}
|
||||
|
||||
databaseId = currentBackend->databaseId;
|
||||
backendPid = ProcGlobal->allProcs[backendIndex].pid;
|
||||
Oid databaseId = currentBackend->databaseId;
|
||||
int backendPid = ProcGlobal->allProcs[backendIndex].pid;
|
||||
initiatorNodeIdentifier = currentBackend->citusBackend.initiatorNodeIdentifier;
|
||||
|
||||
/*
|
||||
|
@ -421,10 +408,11 @@ StoreAllActiveTransactions(Tuplestorestate *tupleStore, TupleDesc tupleDescripto
|
|||
* field with the same name. The reason is that it also covers backends that are not
|
||||
* inside a distributed transaction.
|
||||
*/
|
||||
coordinatorOriginatedQuery = currentBackend->citusBackend.transactionOriginator;
|
||||
bool coordinatorOriginatedQuery =
|
||||
currentBackend->citusBackend.transactionOriginator;
|
||||
|
||||
transactionNumber = currentBackend->transactionId.transactionNumber;
|
||||
transactionIdTimestamp = currentBackend->transactionId.timestamp;
|
||||
TimestampTz transactionIdTimestamp = currentBackend->transactionId.timestamp;
|
||||
|
||||
SpinLockRelease(¤tBackend->mutex);
|
||||
|
||||
|
@ -489,8 +477,6 @@ BackendManagementShmemInit(void)
|
|||
|
||||
if (!alreadyInitialized)
|
||||
{
|
||||
int backendIndex = 0;
|
||||
int totalProcs = 0;
|
||||
char *trancheName = "Backend Management Tranche";
|
||||
|
||||
NamedLWLockTranche *namedLockTranche =
|
||||
|
@ -518,8 +504,8 @@ BackendManagementShmemInit(void)
|
|||
* We also initiate initiatorNodeIdentifier to -1, which can never be
|
||||
* used as a node id.
|
||||
*/
|
||||
totalProcs = TotalProcCount();
|
||||
for (backendIndex = 0; backendIndex < totalProcs; ++backendIndex)
|
||||
int totalProcs = TotalProcCount();
|
||||
for (int backendIndex = 0; backendIndex < totalProcs; ++backendIndex)
|
||||
{
|
||||
BackendData *backendData =
|
||||
&backendManagementShmemData->backends[backendIndex];
|
||||
|
@ -809,7 +795,6 @@ CurrentDistributedTransactionNumber(void)
|
|||
void
|
||||
GetBackendDataForProc(PGPROC *proc, BackendData *result)
|
||||
{
|
||||
BackendData *backendData = NULL;
|
||||
int pgprocno = proc->pgprocno;
|
||||
|
||||
if (proc->lockGroupLeader != NULL)
|
||||
|
@ -817,7 +802,7 @@ GetBackendDataForProc(PGPROC *proc, BackendData *result)
|
|||
pgprocno = proc->lockGroupLeader->pgprocno;
|
||||
}
|
||||
|
||||
backendData = &backendManagementShmemData->backends[pgprocno];
|
||||
BackendData *backendData = &backendManagementShmemData->backends[pgprocno];
|
||||
|
||||
SpinLockAcquire(&backendData->mutex);
|
||||
|
||||
|
@ -903,14 +888,12 @@ List *
|
|||
ActiveDistributedTransactionNumbers(void)
|
||||
{
|
||||
List *activeTransactionNumberList = NIL;
|
||||
int curBackend = 0;
|
||||
|
||||
/* build list of starting procs */
|
||||
for (curBackend = 0; curBackend < MaxBackends; curBackend++)
|
||||
for (int curBackend = 0; curBackend < MaxBackends; curBackend++)
|
||||
{
|
||||
PGPROC *currentProc = &ProcGlobal->allProcs[curBackend];
|
||||
BackendData currentBackendData;
|
||||
uint64 *transactionNumber = NULL;
|
||||
|
||||
if (currentProc->pid == 0)
|
||||
{
|
||||
|
@ -932,7 +915,7 @@ ActiveDistributedTransactionNumbers(void)
|
|||
continue;
|
||||
}
|
||||
|
||||
transactionNumber = (uint64 *) palloc0(sizeof(uint64));
|
||||
uint64 *transactionNumber = (uint64 *) palloc0(sizeof(uint64));
|
||||
*transactionNumber = currentBackendData.transactionId.transactionNumber;
|
||||
|
||||
activeTransactionNumberList = lappend(activeTransactionNumberList,
|
||||
|
|
|
@ -269,11 +269,9 @@ PG_FUNCTION_INFO_V1(citus_worker_stat_activity);
|
|||
Datum
|
||||
citus_dist_stat_activity(PG_FUNCTION_ARGS)
|
||||
{
|
||||
List *citusDistStatStatements = NIL;
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
citusDistStatStatements = CitusStatActivity(CITUS_DIST_STAT_ACTIVITY_QUERY);
|
||||
List *citusDistStatStatements = CitusStatActivity(CITUS_DIST_STAT_ACTIVITY_QUERY);
|
||||
|
||||
ReturnCitusDistStats(citusDistStatStatements, fcinfo);
|
||||
|
||||
|
@ -289,11 +287,9 @@ citus_dist_stat_activity(PG_FUNCTION_ARGS)
|
|||
Datum
|
||||
citus_worker_stat_activity(PG_FUNCTION_ARGS)
|
||||
{
|
||||
List *citusWorkerStatStatements = NIL;
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
citusWorkerStatStatements = CitusStatActivity(CITUS_WORKER_STAT_ACTIVITY_QUERY);
|
||||
List *citusWorkerStatStatements = CitusStatActivity(CITUS_WORKER_STAT_ACTIVITY_QUERY);
|
||||
|
||||
ReturnCitusDistStats(citusWorkerStatStatements, fcinfo);
|
||||
|
||||
|
@ -315,11 +311,8 @@ citus_worker_stat_activity(PG_FUNCTION_ARGS)
|
|||
static List *
|
||||
CitusStatActivity(const char *statQuery)
|
||||
{
|
||||
List *citusStatsList = NIL;
|
||||
|
||||
List *workerNodeList = ActivePrimaryWorkerNodeList(NoLock);
|
||||
ListCell *workerNodeCell = NULL;
|
||||
char *nodeUser = NULL;
|
||||
List *connectionList = NIL;
|
||||
ListCell *connectionCell = NULL;
|
||||
|
||||
|
@ -329,14 +322,14 @@ CitusStatActivity(const char *statQuery)
|
|||
* the authentication for self-connection via any user who calls the citus
|
||||
* stat activity functions.
|
||||
*/
|
||||
citusStatsList = GetLocalNodeCitusDistStat(statQuery);
|
||||
List *citusStatsList = GetLocalNodeCitusDistStat(statQuery);
|
||||
|
||||
/*
|
||||
* We prefer to connect with the current user to the remote nodes. This will
|
||||
* ensure that we have the same privilage restrictions that pg_stat_activity
|
||||
* enforces.
|
||||
*/
|
||||
nodeUser = CurrentUserName();
|
||||
char *nodeUser = CurrentUserName();
|
||||
|
||||
/* open connections in parallel */
|
||||
foreach(workerNodeCell, workerNodeList)
|
||||
|
@ -344,7 +337,6 @@ CitusStatActivity(const char *statQuery)
|
|||
WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell);
|
||||
char *nodeName = workerNode->workerName;
|
||||
int nodePort = workerNode->workerPort;
|
||||
MultiConnection *connection = NULL;
|
||||
int connectionFlags = 0;
|
||||
|
||||
if (workerNode->groupId == GetLocalGroupId())
|
||||
|
@ -353,8 +345,9 @@ CitusStatActivity(const char *statQuery)
|
|||
continue;
|
||||
}
|
||||
|
||||
connection = StartNodeUserDatabaseConnection(connectionFlags, nodeName, nodePort,
|
||||
nodeUser, NULL);
|
||||
MultiConnection *connection = StartNodeUserDatabaseConnection(connectionFlags,
|
||||
nodeName, nodePort,
|
||||
nodeUser, NULL);
|
||||
|
||||
connectionList = lappend(connectionList, connection);
|
||||
}
|
||||
|
@ -365,9 +358,8 @@ CitusStatActivity(const char *statQuery)
|
|||
foreach(connectionCell, connectionList)
|
||||
{
|
||||
MultiConnection *connection = (MultiConnection *) lfirst(connectionCell);
|
||||
int querySent = false;
|
||||
|
||||
querySent = SendRemoteCommand(connection, statQuery);
|
||||
int querySent = SendRemoteCommand(connection, statQuery);
|
||||
if (querySent == 0)
|
||||
{
|
||||
ReportConnectionError(connection, WARNING);
|
||||
|
@ -378,21 +370,17 @@ CitusStatActivity(const char *statQuery)
|
|||
foreach(connectionCell, connectionList)
|
||||
{
|
||||
MultiConnection *connection = (MultiConnection *) lfirst(connectionCell);
|
||||
PGresult *result = NULL;
|
||||
bool raiseInterrupts = true;
|
||||
int64 rowIndex = 0;
|
||||
int64 rowCount = 0;
|
||||
int64 colCount = 0;
|
||||
|
||||
result = GetRemoteCommandResult(connection, raiseInterrupts);
|
||||
PGresult *result = GetRemoteCommandResult(connection, raiseInterrupts);
|
||||
if (!IsResponseOK(result))
|
||||
{
|
||||
ReportResultError(connection, result, WARNING);
|
||||
continue;
|
||||
}
|
||||
|
||||
rowCount = PQntuples(result);
|
||||
colCount = PQnfields(result);
|
||||
int64 rowCount = PQntuples(result);
|
||||
int64 colCount = PQnfields(result);
|
||||
|
||||
if (colCount != CITUS_DIST_STAT_ACTIVITY_QUERY_COLS)
|
||||
{
|
||||
|
@ -405,7 +393,7 @@ CitusStatActivity(const char *statQuery)
|
|||
continue;
|
||||
}
|
||||
|
||||
for (rowIndex = 0; rowIndex < rowCount; rowIndex++)
|
||||
for (int64 rowIndex = 0; rowIndex < rowCount; rowIndex++)
|
||||
{
|
||||
CitusDistStat *citusDistStat = ParseCitusDistStat(result, rowIndex);
|
||||
|
||||
|
@ -436,9 +424,7 @@ GetLocalNodeCitusDistStat(const char *statQuery)
|
|||
{
|
||||
List *citusStatsList = NIL;
|
||||
|
||||
List *workerNodeList = NIL;
|
||||
ListCell *workerNodeCell = NULL;
|
||||
int localGroupId = -1;
|
||||
|
||||
if (IsCoordinator())
|
||||
{
|
||||
|
@ -452,10 +438,10 @@ GetLocalNodeCitusDistStat(const char *statQuery)
|
|||
return citusStatsList;
|
||||
}
|
||||
|
||||
localGroupId = GetLocalGroupId();
|
||||
int localGroupId = GetLocalGroupId();
|
||||
|
||||
/* get the current worker's node stats */
|
||||
workerNodeList = ActivePrimaryWorkerNodeList(NoLock);
|
||||
List *workerNodeList = ActivePrimaryWorkerNodeList(NoLock);
|
||||
foreach(workerNodeCell, workerNodeList)
|
||||
{
|
||||
WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell);
|
||||
|
@ -488,10 +474,9 @@ static CitusDistStat *
|
|||
ParseCitusDistStat(PGresult *result, int64 rowIndex)
|
||||
{
|
||||
CitusDistStat *citusDistStat = (CitusDistStat *) palloc0(sizeof(CitusDistStat));
|
||||
int initiator_node_identifier = 0;
|
||||
|
||||
|
||||
initiator_node_identifier =
|
||||
int initiator_node_identifier =
|
||||
PQgetisnull(result, rowIndex, 0) ? -1 : ParseIntField(result, rowIndex, 0);
|
||||
|
||||
ReplaceInitiatorNodeIdentifier(initiator_node_identifier, citusDistStat);
|
||||
|
@ -591,14 +576,11 @@ static List *
|
|||
LocalNodeCitusDistStat(const char *statQuery, const char *hostname, int port)
|
||||
{
|
||||
List *localNodeCitusDistStatList = NIL;
|
||||
int spiConnectionResult = 0;
|
||||
int spiQueryResult = 0;
|
||||
bool readOnly = true;
|
||||
uint32 rowIndex = 0;
|
||||
|
||||
MemoryContext upperContext = CurrentMemoryContext, oldContext = NULL;
|
||||
|
||||
spiConnectionResult = SPI_connect();
|
||||
int spiConnectionResult = SPI_connect();
|
||||
if (spiConnectionResult != SPI_OK_CONNECT)
|
||||
{
|
||||
ereport(WARNING, (errmsg("could not connect to SPI manager to get "
|
||||
|
@ -609,7 +591,7 @@ LocalNodeCitusDistStat(const char *statQuery, const char *hostname, int port)
|
|||
return NIL;
|
||||
}
|
||||
|
||||
spiQueryResult = SPI_execute(statQuery, readOnly, 0);
|
||||
int spiQueryResult = SPI_execute(statQuery, readOnly, 0);
|
||||
if (spiQueryResult != SPI_OK_SELECT)
|
||||
{
|
||||
ereport(WARNING, (errmsg("execution was not successful while trying to get "
|
||||
|
@ -629,15 +611,13 @@ LocalNodeCitusDistStat(const char *statQuery, const char *hostname, int port)
|
|||
*/
|
||||
oldContext = MemoryContextSwitchTo(upperContext);
|
||||
|
||||
for (rowIndex = 0; rowIndex < SPI_processed; rowIndex++)
|
||||
for (uint32 rowIndex = 0; rowIndex < SPI_processed; rowIndex++)
|
||||
{
|
||||
HeapTuple row = NULL;
|
||||
TupleDesc rowDescriptor = SPI_tuptable->tupdesc;
|
||||
CitusDistStat *citusDistStat = NULL;
|
||||
|
||||
/* we use pointers from the tuple, so copy it before processing */
|
||||
row = SPI_copytuple(SPI_tuptable->vals[rowIndex]);
|
||||
citusDistStat = HeapTupleToCitusDistStat(row, rowDescriptor);
|
||||
HeapTuple row = SPI_copytuple(SPI_tuptable->vals[rowIndex]);
|
||||
CitusDistStat *citusDistStat = HeapTupleToCitusDistStat(row, rowDescriptor);
|
||||
|
||||
/*
|
||||
* Add the query_host_name and query_host_port which denote where
|
||||
|
@ -670,9 +650,8 @@ static CitusDistStat *
|
|||
HeapTupleToCitusDistStat(HeapTuple result, TupleDesc rowDescriptor)
|
||||
{
|
||||
CitusDistStat *citusDistStat = (CitusDistStat *) palloc0(sizeof(CitusDistStat));
|
||||
int initiator_node_identifier = 0;
|
||||
|
||||
initiator_node_identifier = ParseIntFieldFromHeapTuple(result, rowDescriptor, 1);
|
||||
int initiator_node_identifier = ParseIntFieldFromHeapTuple(result, rowDescriptor, 1);
|
||||
|
||||
ReplaceInitiatorNodeIdentifier(initiator_node_identifier, citusDistStat);
|
||||
|
||||
|
@ -721,10 +700,9 @@ HeapTupleToCitusDistStat(HeapTuple result, TupleDesc rowDescriptor)
|
|||
static int64
|
||||
ParseIntFieldFromHeapTuple(HeapTuple tuple, TupleDesc tupdesc, int colIndex)
|
||||
{
|
||||
Datum resultDatum;
|
||||
bool isNull = false;
|
||||
|
||||
resultDatum = SPI_getbinval(tuple, tupdesc, colIndex, &isNull);
|
||||
Datum resultDatum = SPI_getbinval(tuple, tupdesc, colIndex, &isNull);
|
||||
if (isNull)
|
||||
{
|
||||
return 0;
|
||||
|
@ -741,10 +719,9 @@ ParseIntFieldFromHeapTuple(HeapTuple tuple, TupleDesc tupdesc, int colIndex)
|
|||
static text *
|
||||
ParseTextFieldFromHeapTuple(HeapTuple tuple, TupleDesc tupdesc, int colIndex)
|
||||
{
|
||||
Datum resultDatum;
|
||||
bool isNull = false;
|
||||
|
||||
resultDatum = SPI_getbinval(tuple, tupdesc, colIndex, &isNull);
|
||||
Datum resultDatum = SPI_getbinval(tuple, tupdesc, colIndex, &isNull);
|
||||
if (isNull)
|
||||
{
|
||||
return NULL;
|
||||
|
@ -761,10 +738,9 @@ ParseTextFieldFromHeapTuple(HeapTuple tuple, TupleDesc tupdesc, int colIndex)
|
|||
static Name
|
||||
ParseNameFieldFromHeapTuple(HeapTuple tuple, TupleDesc tupdesc, int colIndex)
|
||||
{
|
||||
Datum resultDatum;
|
||||
bool isNull = false;
|
||||
|
||||
resultDatum = SPI_getbinval(tuple, tupdesc, colIndex, &isNull);
|
||||
Datum resultDatum = SPI_getbinval(tuple, tupdesc, colIndex, &isNull);
|
||||
if (isNull)
|
||||
{
|
||||
return NULL;
|
||||
|
@ -781,10 +757,9 @@ ParseNameFieldFromHeapTuple(HeapTuple tuple, TupleDesc tupdesc, int colIndex)
|
|||
static inet *
|
||||
ParseInetFieldFromHeapTuple(HeapTuple tuple, TupleDesc tupdesc, int colIndex)
|
||||
{
|
||||
Datum resultDatum;
|
||||
bool isNull = false;
|
||||
|
||||
resultDatum = SPI_getbinval(tuple, tupdesc, colIndex, &isNull);
|
||||
Datum resultDatum = SPI_getbinval(tuple, tupdesc, colIndex, &isNull);
|
||||
if (isNull)
|
||||
{
|
||||
return NULL;
|
||||
|
@ -801,10 +776,9 @@ ParseInetFieldFromHeapTuple(HeapTuple tuple, TupleDesc tupdesc, int colIndex)
|
|||
static TimestampTz
|
||||
ParseTimestampTzFieldFromHeapTuple(HeapTuple tuple, TupleDesc tupdesc, int colIndex)
|
||||
{
|
||||
Datum resultDatum;
|
||||
bool isNull = false;
|
||||
|
||||
resultDatum = SPI_getbinval(tuple, tupdesc, colIndex, &isNull);
|
||||
Datum resultDatum = SPI_getbinval(tuple, tupdesc, colIndex, &isNull);
|
||||
if (isNull)
|
||||
{
|
||||
return DT_NOBEGIN;
|
||||
|
@ -821,10 +795,9 @@ ParseTimestampTzFieldFromHeapTuple(HeapTuple tuple, TupleDesc tupdesc, int colIn
|
|||
static TransactionId
|
||||
ParseXIDFieldFromHeapTuple(HeapTuple tuple, TupleDesc tupdesc, int colIndex)
|
||||
{
|
||||
Datum resultDatum;
|
||||
bool isNull = false;
|
||||
|
||||
resultDatum = SPI_getbinval(tuple, tupdesc, colIndex, &isNull);
|
||||
Datum resultDatum = SPI_getbinval(tuple, tupdesc, colIndex, &isNull);
|
||||
if (isNull)
|
||||
{
|
||||
/*
|
||||
|
@ -845,18 +818,14 @@ ParseXIDFieldFromHeapTuple(HeapTuple tuple, TupleDesc tupdesc, int colIndex)
|
|||
static text *
|
||||
ParseTextField(PGresult *result, int rowIndex, int colIndex)
|
||||
{
|
||||
char *resultString = NULL;
|
||||
Datum resultStringDatum = 0;
|
||||
Datum textDatum = 0;
|
||||
|
||||
if (PQgetisnull(result, rowIndex, colIndex))
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
resultString = PQgetvalue(result, rowIndex, colIndex);
|
||||
resultStringDatum = CStringGetDatum(resultString);
|
||||
textDatum = DirectFunctionCall1(textin, resultStringDatum);
|
||||
char *resultString = PQgetvalue(result, rowIndex, colIndex);
|
||||
Datum resultStringDatum = CStringGetDatum(resultString);
|
||||
Datum textDatum = DirectFunctionCall1(textin, resultStringDatum);
|
||||
|
||||
return (text *) DatumGetPointer(textDatum);
|
||||
}
|
||||
|
@ -869,8 +838,6 @@ ParseTextField(PGresult *result, int rowIndex, int colIndex)
|
|||
static Name
|
||||
ParseNameField(PGresult *result, int rowIndex, int colIndex)
|
||||
{
|
||||
char *resultString = NULL;
|
||||
Datum resultStringDatum = 0;
|
||||
Datum nameDatum = 0;
|
||||
|
||||
if (PQgetisnull(result, rowIndex, colIndex))
|
||||
|
@ -878,8 +845,8 @@ ParseNameField(PGresult *result, int rowIndex, int colIndex)
|
|||
return (Name) nameDatum;
|
||||
}
|
||||
|
||||
resultString = PQgetvalue(result, rowIndex, colIndex);
|
||||
resultStringDatum = CStringGetDatum(resultString);
|
||||
char *resultString = PQgetvalue(result, rowIndex, colIndex);
|
||||
Datum resultStringDatum = CStringGetDatum(resultString);
|
||||
nameDatum = DirectFunctionCall1(namein, resultStringDatum);
|
||||
|
||||
return (Name) DatumGetPointer(nameDatum);
|
||||
|
@ -893,18 +860,14 @@ ParseNameField(PGresult *result, int rowIndex, int colIndex)
|
|||
static inet *
|
||||
ParseInetField(PGresult *result, int rowIndex, int colIndex)
|
||||
{
|
||||
char *resultString = NULL;
|
||||
Datum resultStringDatum = 0;
|
||||
Datum inetDatum = 0;
|
||||
|
||||
if (PQgetisnull(result, rowIndex, colIndex))
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
resultString = PQgetvalue(result, rowIndex, colIndex);
|
||||
resultStringDatum = CStringGetDatum(resultString);
|
||||
inetDatum = DirectFunctionCall1(inet_in, resultStringDatum);
|
||||
char *resultString = PQgetvalue(result, rowIndex, colIndex);
|
||||
Datum resultStringDatum = CStringGetDatum(resultString);
|
||||
Datum inetDatum = DirectFunctionCall1(inet_in, resultStringDatum);
|
||||
|
||||
return DatumGetInetP(inetDatum);
|
||||
}
|
||||
|
@ -917,10 +880,6 @@ ParseInetField(PGresult *result, int rowIndex, int colIndex)
|
|||
static TransactionId
|
||||
ParseXIDField(PGresult *result, int rowIndex, int colIndex)
|
||||
{
|
||||
char *resultString = NULL;
|
||||
Datum resultStringDatum = 0;
|
||||
Datum XIDDatum = 0;
|
||||
|
||||
if (PQgetisnull(result, rowIndex, colIndex))
|
||||
{
|
||||
/*
|
||||
|
@ -930,9 +889,9 @@ ParseXIDField(PGresult *result, int rowIndex, int colIndex)
|
|||
return PG_UINT32_MAX;
|
||||
}
|
||||
|
||||
resultString = PQgetvalue(result, rowIndex, colIndex);
|
||||
resultStringDatum = CStringGetDatum(resultString);
|
||||
XIDDatum = DirectFunctionCall1(xidin, resultStringDatum);
|
||||
char *resultString = PQgetvalue(result, rowIndex, colIndex);
|
||||
Datum resultStringDatum = CStringGetDatum(resultString);
|
||||
Datum XIDDatum = DirectFunctionCall1(xidin, resultStringDatum);
|
||||
|
||||
return DatumGetTransactionId(XIDDatum);
|
||||
}
|
||||
|
|
|
@ -103,11 +103,8 @@ check_distributed_deadlocks(PG_FUNCTION_ARGS)
|
|||
bool
|
||||
CheckForDistributedDeadlocks(void)
|
||||
{
|
||||
WaitGraph *waitGraph = NULL;
|
||||
HTAB *adjacencyLists = NULL;
|
||||
HASH_SEQ_STATUS status;
|
||||
TransactionNode *transactionNode = NULL;
|
||||
int edgeCount = 0;
|
||||
int localGroupId = GetLocalGroupId();
|
||||
List *workerNodeList = ActiveReadableNodeList();
|
||||
|
||||
|
@ -122,10 +119,10 @@ CheckForDistributedDeadlocks(void)
|
|||
return false;
|
||||
}
|
||||
|
||||
waitGraph = BuildGlobalWaitGraph();
|
||||
adjacencyLists = BuildAdjacencyListsForWaitGraph(waitGraph);
|
||||
WaitGraph *waitGraph = BuildGlobalWaitGraph();
|
||||
HTAB *adjacencyLists = BuildAdjacencyListsForWaitGraph(waitGraph);
|
||||
|
||||
edgeCount = waitGraph->edgeCount;
|
||||
int edgeCount = waitGraph->edgeCount;
|
||||
|
||||
/*
|
||||
* We iterate on transaction nodes and search for deadlocks where the
|
||||
|
@ -134,7 +131,6 @@ CheckForDistributedDeadlocks(void)
|
|||
hash_seq_init(&status, adjacencyLists);
|
||||
while ((transactionNode = (TransactionNode *) hash_seq_search(&status)) != 0)
|
||||
{
|
||||
bool deadlockFound = false;
|
||||
List *deadlockPath = NIL;
|
||||
|
||||
/*
|
||||
|
@ -151,9 +147,9 @@ CheckForDistributedDeadlocks(void)
|
|||
|
||||
ResetVisitedFields(adjacencyLists);
|
||||
|
||||
deadlockFound = CheckDeadlockForTransactionNode(transactionNode,
|
||||
maxStackDepth,
|
||||
&deadlockPath);
|
||||
bool deadlockFound = CheckDeadlockForTransactionNode(transactionNode,
|
||||
maxStackDepth,
|
||||
&deadlockPath);
|
||||
if (deadlockFound)
|
||||
{
|
||||
TransactionNode *youngestAliveTransaction = NULL;
|
||||
|
@ -184,8 +180,6 @@ CheckForDistributedDeadlocks(void)
|
|||
(TransactionNode *) lfirst(participantTransactionCell);
|
||||
bool transactionAssociatedWithProc =
|
||||
AssociateDistributedTransactionWithBackendProc(currentNode);
|
||||
TimestampTz youngestTimestamp = 0;
|
||||
TimestampTz currentTimestamp = 0;
|
||||
|
||||
LogTransactionNode(currentNode);
|
||||
|
||||
|
@ -201,8 +195,9 @@ CheckForDistributedDeadlocks(void)
|
|||
continue;
|
||||
}
|
||||
|
||||
youngestTimestamp = youngestAliveTransaction->transactionId.timestamp;
|
||||
currentTimestamp = currentNode->transactionId.timestamp;
|
||||
TimestampTz youngestTimestamp =
|
||||
youngestAliveTransaction->transactionId.timestamp;
|
||||
TimestampTz currentTimestamp = currentNode->transactionId.timestamp;
|
||||
if (timestamptz_cmp_internal(currentTimestamp, youngestTimestamp) == 1)
|
||||
{
|
||||
youngestAliveTransaction = currentNode;
|
||||
|
@ -258,7 +253,6 @@ CheckDeadlockForTransactionNode(TransactionNode *startingTransactionNode,
|
|||
/* traverse the graph and search for the deadlocks */
|
||||
while (toBeVisitedNodes != NIL)
|
||||
{
|
||||
int currentStackDepth;
|
||||
QueuedTransactionNode *queuedTransactionNode =
|
||||
(QueuedTransactionNode *) linitial(toBeVisitedNodes);
|
||||
TransactionNode *currentTransactionNode = queuedTransactionNode->transactionNode;
|
||||
|
@ -284,7 +278,7 @@ CheckDeadlockForTransactionNode(TransactionNode *startingTransactionNode,
|
|||
currentTransactionNode->transactionVisited = true;
|
||||
|
||||
/* set the stack's corresponding element with the current node */
|
||||
currentStackDepth = queuedTransactionNode->currentStackDepth;
|
||||
int currentStackDepth = queuedTransactionNode->currentStackDepth;
|
||||
Assert(currentStackDepth < maxStackDepth);
|
||||
transactionNodeStack[currentStackDepth] = currentTransactionNode;
|
||||
|
||||
|
@ -335,11 +329,10 @@ BuildDeadlockPathList(QueuedTransactionNode *cycledTransactionNode,
|
|||
List **deadlockPath)
|
||||
{
|
||||
int deadlockStackDepth = cycledTransactionNode->currentStackDepth;
|
||||
int stackIndex = 0;
|
||||
|
||||
*deadlockPath = NIL;
|
||||
|
||||
for (stackIndex = 0; stackIndex < deadlockStackDepth; stackIndex++)
|
||||
for (int stackIndex = 0; stackIndex < deadlockStackDepth; stackIndex++)
|
||||
{
|
||||
*deadlockPath = lappend(*deadlockPath, transactionNodeStack[stackIndex]);
|
||||
}
|
||||
|
@ -380,13 +373,10 @@ ResetVisitedFields(HTAB *adjacencyList)
|
|||
static bool
|
||||
AssociateDistributedTransactionWithBackendProc(TransactionNode *transactionNode)
|
||||
{
|
||||
int backendIndex = 0;
|
||||
|
||||
for (backendIndex = 0; backendIndex < MaxBackends; ++backendIndex)
|
||||
for (int backendIndex = 0; backendIndex < MaxBackends; ++backendIndex)
|
||||
{
|
||||
PGPROC *currentProc = &ProcGlobal->allProcs[backendIndex];
|
||||
BackendData currentBackendData;
|
||||
DistributedTransactionId *currentTransactionId = NULL;
|
||||
|
||||
/* we're not interested in processes that are not active or waiting on a lock */
|
||||
if (currentProc->pid <= 0)
|
||||
|
@ -402,7 +392,8 @@ AssociateDistributedTransactionWithBackendProc(TransactionNode *transactionNode)
|
|||
continue;
|
||||
}
|
||||
|
||||
currentTransactionId = ¤tBackendData.transactionId;
|
||||
DistributedTransactionId *currentTransactionId =
|
||||
¤tBackendData.transactionId;
|
||||
|
||||
if (currentTransactionId->transactionNumber !=
|
||||
transactionNode->transactionId.transactionNumber)
|
||||
|
@ -455,9 +446,6 @@ extern HTAB *
|
|||
BuildAdjacencyListsForWaitGraph(WaitGraph *waitGraph)
|
||||
{
|
||||
HASHCTL info;
|
||||
uint32 hashFlags = 0;
|
||||
HTAB *adjacencyList = NULL;
|
||||
int edgeIndex = 0;
|
||||
int edgeCount = waitGraph->edgeCount;
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
|
@ -466,15 +454,14 @@ BuildAdjacencyListsForWaitGraph(WaitGraph *waitGraph)
|
|||
info.hash = DistributedTransactionIdHash;
|
||||
info.match = DistributedTransactionIdCompare;
|
||||
info.hcxt = CurrentMemoryContext;
|
||||
hashFlags = (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT | HASH_COMPARE);
|
||||
uint32 hashFlags = (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT | HASH_COMPARE);
|
||||
|
||||
adjacencyList = hash_create("distributed deadlock detection", 64, &info, hashFlags);
|
||||
HTAB *adjacencyList = hash_create("distributed deadlock detection", 64, &info,
|
||||
hashFlags);
|
||||
|
||||
for (edgeIndex = 0; edgeIndex < edgeCount; edgeIndex++)
|
||||
for (int edgeIndex = 0; edgeIndex < edgeCount; edgeIndex++)
|
||||
{
|
||||
WaitEdge *edge = &waitGraph->edges[edgeIndex];
|
||||
TransactionNode *waitingTransaction = NULL;
|
||||
TransactionNode *blockingTransaction = NULL;
|
||||
bool transactionOriginator = false;
|
||||
|
||||
DistributedTransactionId waitingId = {
|
||||
|
@ -491,9 +478,9 @@ BuildAdjacencyListsForWaitGraph(WaitGraph *waitGraph)
|
|||
edge->blockingTransactionStamp
|
||||
};
|
||||
|
||||
waitingTransaction =
|
||||
TransactionNode *waitingTransaction =
|
||||
GetOrCreateTransactionNode(adjacencyList, &waitingId);
|
||||
blockingTransaction =
|
||||
TransactionNode *blockingTransaction =
|
||||
GetOrCreateTransactionNode(adjacencyList, &blockingId);
|
||||
|
||||
waitingTransaction->waitsFor = lappend(waitingTransaction->waitsFor,
|
||||
|
@ -512,11 +499,12 @@ BuildAdjacencyListsForWaitGraph(WaitGraph *waitGraph)
|
|||
static TransactionNode *
|
||||
GetOrCreateTransactionNode(HTAB *adjacencyList, DistributedTransactionId *transactionId)
|
||||
{
|
||||
TransactionNode *transactionNode = NULL;
|
||||
bool found = false;
|
||||
|
||||
transactionNode = (TransactionNode *) hash_search(adjacencyList, transactionId,
|
||||
HASH_ENTER, &found);
|
||||
TransactionNode *transactionNode = (TransactionNode *) hash_search(adjacencyList,
|
||||
transactionId,
|
||||
HASH_ENTER,
|
||||
&found);
|
||||
if (!found)
|
||||
{
|
||||
transactionNode->waitsFor = NIL;
|
||||
|
@ -535,9 +523,8 @@ static uint32
|
|||
DistributedTransactionIdHash(const void *key, Size keysize)
|
||||
{
|
||||
DistributedTransactionId *entry = (DistributedTransactionId *) key;
|
||||
uint32 hash = 0;
|
||||
|
||||
hash = hash_uint32(entry->initiatorNodeIdentifier);
|
||||
uint32 hash = hash_uint32(entry->initiatorNodeIdentifier);
|
||||
hash = hash_combine(hash, hash_any((unsigned char *) &entry->transactionNumber,
|
||||
sizeof(int64)));
|
||||
hash = hash_combine(hash, hash_any((unsigned char *) &entry->timestamp,
|
||||
|
@ -601,14 +588,12 @@ DistributedTransactionIdCompare(const void *a, const void *b, Size keysize)
|
|||
static void
|
||||
LogCancellingBackend(TransactionNode *transactionNode)
|
||||
{
|
||||
StringInfo logMessage = NULL;
|
||||
|
||||
if (!LogDistributedDeadlockDetection)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
logMessage = makeStringInfo();
|
||||
StringInfo logMessage = makeStringInfo();
|
||||
|
||||
appendStringInfo(logMessage, "Cancelling the following backend "
|
||||
"to resolve distributed deadlock "
|
||||
|
@ -627,16 +612,13 @@ LogCancellingBackend(TransactionNode *transactionNode)
|
|||
static void
|
||||
LogTransactionNode(TransactionNode *transactionNode)
|
||||
{
|
||||
StringInfo logMessage = NULL;
|
||||
DistributedTransactionId *transactionId = NULL;
|
||||
|
||||
if (!LogDistributedDeadlockDetection)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
logMessage = makeStringInfo();
|
||||
transactionId = &(transactionNode->transactionId);
|
||||
StringInfo logMessage = makeStringInfo();
|
||||
DistributedTransactionId *transactionId = &(transactionNode->transactionId);
|
||||
|
||||
appendStringInfo(logMessage,
|
||||
"[DistributedTransactionId: (%d, " UINT64_FORMAT ", %s)] = ",
|
||||
|
|
|
@ -73,9 +73,7 @@ PG_FUNCTION_INFO_V1(dump_global_wait_edges);
|
|||
Datum
|
||||
dump_global_wait_edges(PG_FUNCTION_ARGS)
|
||||
{
|
||||
WaitGraph *waitGraph = NULL;
|
||||
|
||||
waitGraph = BuildGlobalWaitGraph();
|
||||
WaitGraph *waitGraph = BuildGlobalWaitGraph();
|
||||
|
||||
ReturnWaitGraph(waitGraph, fcinfo);
|
||||
|
||||
|
@ -106,7 +104,6 @@ BuildGlobalWaitGraph(void)
|
|||
WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell);
|
||||
char *nodeName = workerNode->workerName;
|
||||
int nodePort = workerNode->workerPort;
|
||||
MultiConnection *connection = NULL;
|
||||
int connectionFlags = 0;
|
||||
|
||||
if (workerNode->groupId == localNodeId)
|
||||
|
@ -115,8 +112,9 @@ BuildGlobalWaitGraph(void)
|
|||
continue;
|
||||
}
|
||||
|
||||
connection = StartNodeUserDatabaseConnection(connectionFlags, nodeName, nodePort,
|
||||
nodeUser, NULL);
|
||||
MultiConnection *connection = StartNodeUserDatabaseConnection(connectionFlags,
|
||||
nodeName, nodePort,
|
||||
nodeUser, NULL);
|
||||
|
||||
connectionList = lappend(connectionList, connection);
|
||||
}
|
||||
|
@ -127,10 +125,9 @@ BuildGlobalWaitGraph(void)
|
|||
foreach(connectionCell, connectionList)
|
||||
{
|
||||
MultiConnection *connection = (MultiConnection *) lfirst(connectionCell);
|
||||
int querySent = false;
|
||||
const char *command = "SELECT * FROM dump_local_wait_edges()";
|
||||
|
||||
querySent = SendRemoteCommand(connection, command);
|
||||
int querySent = SendRemoteCommand(connection, command);
|
||||
if (querySent == 0)
|
||||
{
|
||||
ReportConnectionError(connection, WARNING);
|
||||
|
@ -141,21 +138,17 @@ BuildGlobalWaitGraph(void)
|
|||
foreach(connectionCell, connectionList)
|
||||
{
|
||||
MultiConnection *connection = (MultiConnection *) lfirst(connectionCell);
|
||||
PGresult *result = NULL;
|
||||
bool raiseInterrupts = true;
|
||||
int64 rowIndex = 0;
|
||||
int64 rowCount = 0;
|
||||
int64 colCount = 0;
|
||||
|
||||
result = GetRemoteCommandResult(connection, raiseInterrupts);
|
||||
PGresult *result = GetRemoteCommandResult(connection, raiseInterrupts);
|
||||
if (!IsResponseOK(result))
|
||||
{
|
||||
ReportResultError(connection, result, WARNING);
|
||||
continue;
|
||||
}
|
||||
|
||||
rowCount = PQntuples(result);
|
||||
colCount = PQnfields(result);
|
||||
int64 rowCount = PQntuples(result);
|
||||
int64 colCount = PQnfields(result);
|
||||
|
||||
if (colCount != 9)
|
||||
{
|
||||
|
@ -164,7 +157,7 @@ BuildGlobalWaitGraph(void)
|
|||
continue;
|
||||
}
|
||||
|
||||
for (rowIndex = 0; rowIndex < rowCount; rowIndex++)
|
||||
for (int64 rowIndex = 0; rowIndex < rowCount; rowIndex++)
|
||||
{
|
||||
AddWaitEdgeFromResult(waitGraph, result, rowIndex);
|
||||
}
|
||||
|
@ -205,14 +198,12 @@ AddWaitEdgeFromResult(WaitGraph *waitGraph, PGresult *result, int rowIndex)
|
|||
int64
|
||||
ParseIntField(PGresult *result, int rowIndex, int colIndex)
|
||||
{
|
||||
char *resultString = NULL;
|
||||
|
||||
if (PQgetisnull(result, rowIndex, colIndex))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
resultString = PQgetvalue(result, rowIndex, colIndex);
|
||||
char *resultString = PQgetvalue(result, rowIndex, colIndex);
|
||||
|
||||
return pg_strtouint64(resultString, NULL, 10);
|
||||
}
|
||||
|
@ -225,14 +216,12 @@ ParseIntField(PGresult *result, int rowIndex, int colIndex)
|
|||
bool
|
||||
ParseBoolField(PGresult *result, int rowIndex, int colIndex)
|
||||
{
|
||||
char *resultString = NULL;
|
||||
|
||||
if (PQgetisnull(result, rowIndex, colIndex))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
resultString = PQgetvalue(result, rowIndex, colIndex);
|
||||
char *resultString = PQgetvalue(result, rowIndex, colIndex);
|
||||
if (strlen(resultString) != 1)
|
||||
{
|
||||
return false;
|
||||
|
@ -249,18 +238,14 @@ ParseBoolField(PGresult *result, int rowIndex, int colIndex)
|
|||
TimestampTz
|
||||
ParseTimestampTzField(PGresult *result, int rowIndex, int colIndex)
|
||||
{
|
||||
char *resultString = NULL;
|
||||
Datum resultStringDatum = 0;
|
||||
Datum timestampDatum = 0;
|
||||
|
||||
if (PQgetisnull(result, rowIndex, colIndex))
|
||||
{
|
||||
return DT_NOBEGIN;
|
||||
}
|
||||
|
||||
resultString = PQgetvalue(result, rowIndex, colIndex);
|
||||
resultStringDatum = CStringGetDatum(resultString);
|
||||
timestampDatum = DirectFunctionCall3(timestamptz_in, resultStringDatum, 0, -1);
|
||||
char *resultString = PQgetvalue(result, rowIndex, colIndex);
|
||||
Datum resultStringDatum = CStringGetDatum(resultString);
|
||||
Datum timestampDatum = DirectFunctionCall3(timestamptz_in, resultStringDatum, 0, -1);
|
||||
|
||||
return DatumGetTimestampTz(timestampDatum);
|
||||
}
|
||||
|
@ -286,7 +271,6 @@ dump_local_wait_edges(PG_FUNCTION_ARGS)
|
|||
static void
|
||||
ReturnWaitGraph(WaitGraph *waitGraph, FunctionCallInfo fcinfo)
|
||||
{
|
||||
size_t curEdgeNum = 0;
|
||||
TupleDesc tupleDesc;
|
||||
Tuplestorestate *tupleStore = SetupTuplestore(fcinfo, &tupleDesc);
|
||||
|
||||
|
@ -302,7 +286,7 @@ ReturnWaitGraph(WaitGraph *waitGraph, FunctionCallInfo fcinfo)
|
|||
* 07: blocking_transaction_stamp
|
||||
* 08: blocking_transaction_waiting
|
||||
*/
|
||||
for (curEdgeNum = 0; curEdgeNum < waitGraph->edgeCount; curEdgeNum++)
|
||||
for (size_t curEdgeNum = 0; curEdgeNum < waitGraph->edgeCount; curEdgeNum++)
|
||||
{
|
||||
Datum values[9];
|
||||
bool nulls[9];
|
||||
|
@ -353,8 +337,6 @@ ReturnWaitGraph(WaitGraph *waitGraph, FunctionCallInfo fcinfo)
|
|||
static WaitGraph *
|
||||
BuildLocalWaitGraph(void)
|
||||
{
|
||||
WaitGraph *waitGraph = NULL;
|
||||
int curBackend = 0;
|
||||
PROCStack remaining;
|
||||
int totalProcs = TotalProcCount();
|
||||
|
||||
|
@ -364,7 +346,7 @@ BuildLocalWaitGraph(void)
|
|||
* more than enough space to build the list of wait edges without a single
|
||||
* allocation.
|
||||
*/
|
||||
waitGraph = (WaitGraph *) palloc0(sizeof(WaitGraph));
|
||||
WaitGraph *waitGraph = (WaitGraph *) palloc0(sizeof(WaitGraph));
|
||||
waitGraph->localNodeId = GetLocalGroupId();
|
||||
waitGraph->allocatedSize = totalProcs * 3;
|
||||
waitGraph->edgeCount = 0;
|
||||
|
@ -384,7 +366,7 @@ BuildLocalWaitGraph(void)
|
|||
*/
|
||||
|
||||
/* build list of starting procs */
|
||||
for (curBackend = 0; curBackend < totalProcs; curBackend++)
|
||||
for (int curBackend = 0; curBackend < totalProcs; curBackend++)
|
||||
{
|
||||
PGPROC *currentProc = &ProcGlobal->allProcs[curBackend];
|
||||
BackendData currentBackendData;
|
||||
|
@ -476,24 +458,20 @@ BuildLocalWaitGraph(void)
|
|||
static bool
|
||||
IsProcessWaitingForSafeOperations(PGPROC *proc)
|
||||
{
|
||||
PROCLOCK *waitProcLock = NULL;
|
||||
LOCK *waitLock = NULL;
|
||||
PGXACT *pgxact = NULL;
|
||||
|
||||
if (proc->waitStatus != STATUS_WAITING)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
/* get the transaction that the backend associated with */
|
||||
pgxact = &ProcGlobal->allPgXact[proc->pgprocno];
|
||||
PGXACT *pgxact = &ProcGlobal->allPgXact[proc->pgprocno];
|
||||
if (pgxact->vacuumFlags & PROC_IS_AUTOVACUUM)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
waitProcLock = proc->waitProcLock;
|
||||
waitLock = waitProcLock->tag.myLock;
|
||||
PROCLOCK *waitProcLock = proc->waitProcLock;
|
||||
LOCK *waitLock = waitProcLock->tag.myLock;
|
||||
|
||||
return waitLock->tag.locktag_type == LOCKTAG_RELATION_EXTEND ||
|
||||
waitLock->tag.locktag_type == LOCKTAG_PAGE ||
|
||||
|
@ -511,11 +489,9 @@ IsProcessWaitingForSafeOperations(PGPROC *proc)
|
|||
static void
|
||||
LockLockData(void)
|
||||
{
|
||||
int partitionNum = 0;
|
||||
|
||||
LockBackendSharedMemory(LW_SHARED);
|
||||
|
||||
for (partitionNum = 0; partitionNum < NUM_LOCK_PARTITIONS; partitionNum++)
|
||||
for (int partitionNum = 0; partitionNum < NUM_LOCK_PARTITIONS; partitionNum++)
|
||||
{
|
||||
LWLockAcquire(LockHashPartitionLockByIndex(partitionNum), LW_SHARED);
|
||||
}
|
||||
|
@ -533,9 +509,7 @@ LockLockData(void)
|
|||
static void
|
||||
UnlockLockData(void)
|
||||
{
|
||||
int partitionNum = 0;
|
||||
|
||||
for (partitionNum = NUM_LOCK_PARTITIONS - 1; partitionNum >= 0; partitionNum--)
|
||||
for (int partitionNum = NUM_LOCK_PARTITIONS - 1; partitionNum >= 0; partitionNum--)
|
||||
{
|
||||
LWLockRelease(LockHashPartitionLockByIndex(partitionNum));
|
||||
}
|
||||
|
|
|
@ -133,14 +133,13 @@ void
|
|||
AllocateRelationAccessHash(void)
|
||||
{
|
||||
HASHCTL info;
|
||||
uint32 hashFlags = 0;
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
info.keysize = sizeof(RelationAccessHashKey);
|
||||
info.entrysize = sizeof(RelationAccessHashEntry);
|
||||
info.hash = tag_hash;
|
||||
info.hcxt = ConnectionContext;
|
||||
hashFlags = (HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
|
||||
uint32 hashFlags = (HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
|
||||
|
||||
RelationAccessHash = hash_create("citus connection cache (relationid)",
|
||||
8, &info, hashFlags);
|
||||
|
@ -244,12 +243,12 @@ static void
|
|||
RecordPlacementAccessToCache(Oid relationId, ShardPlacementAccessType accessType)
|
||||
{
|
||||
RelationAccessHashKey hashKey;
|
||||
RelationAccessHashEntry *hashEntry;
|
||||
bool found = false;
|
||||
|
||||
hashKey.relationId = relationId;
|
||||
|
||||
hashEntry = hash_search(RelationAccessHash, &hashKey, HASH_ENTER, &found);
|
||||
RelationAccessHashEntry *hashEntry = hash_search(RelationAccessHash, &hashKey,
|
||||
HASH_ENTER, &found);
|
||||
if (!found)
|
||||
{
|
||||
hashEntry->relationAccessMode = 0;
|
||||
|
@ -270,8 +269,6 @@ RecordPlacementAccessToCache(Oid relationId, ShardPlacementAccessType accessType
|
|||
void
|
||||
RecordParallelRelationAccessForTaskList(List *taskList)
|
||||
{
|
||||
Task *firstTask = NULL;
|
||||
|
||||
if (MultiShardConnectionType == SEQUENTIAL_CONNECTION)
|
||||
{
|
||||
/* sequential mode prevents parallel access */
|
||||
|
@ -288,7 +285,7 @@ RecordParallelRelationAccessForTaskList(List *taskList)
|
|||
* Since all the tasks in a task list is expected to operate on the same
|
||||
* distributed table(s), we only need to process the first task.
|
||||
*/
|
||||
firstTask = linitial(taskList);
|
||||
Task *firstTask = linitial(taskList);
|
||||
|
||||
if (firstTask->taskType == SQL_TASK)
|
||||
{
|
||||
|
@ -328,7 +325,6 @@ RecordParallelRelationAccessForTaskList(List *taskList)
|
|||
static void
|
||||
RecordRelationParallelSelectAccessForTask(Task *task)
|
||||
{
|
||||
List *relationShardList = NIL;
|
||||
ListCell *relationShardCell = NULL;
|
||||
Oid lastRelationId = InvalidOid;
|
||||
|
||||
|
@ -338,7 +334,7 @@ RecordRelationParallelSelectAccessForTask(Task *task)
|
|||
return;
|
||||
}
|
||||
|
||||
relationShardList = task->relationShardList;
|
||||
List *relationShardList = task->relationShardList;
|
||||
|
||||
foreach(relationShardCell, relationShardList)
|
||||
{
|
||||
|
@ -528,13 +524,12 @@ RecordParallelRelationAccessToCache(Oid relationId,
|
|||
ShardPlacementAccessType placementAccess)
|
||||
{
|
||||
RelationAccessHashKey hashKey;
|
||||
RelationAccessHashEntry *hashEntry;
|
||||
bool found = false;
|
||||
int parallelRelationAccessBit = 0;
|
||||
|
||||
hashKey.relationId = relationId;
|
||||
|
||||
hashEntry = hash_search(RelationAccessHash, &hashKey, HASH_ENTER, &found);
|
||||
RelationAccessHashEntry *hashEntry = hash_search(RelationAccessHash, &hashKey,
|
||||
HASH_ENTER, &found);
|
||||
if (!found)
|
||||
{
|
||||
hashEntry->relationAccessMode = 0;
|
||||
|
@ -544,7 +539,7 @@ RecordParallelRelationAccessToCache(Oid relationId,
|
|||
hashEntry->relationAccessMode |= (1 << (placementAccess));
|
||||
|
||||
/* set the bit representing access mode */
|
||||
parallelRelationAccessBit = placementAccess + PARALLEL_MODE_FLAG_OFFSET;
|
||||
int parallelRelationAccessBit = placementAccess + PARALLEL_MODE_FLAG_OFFSET;
|
||||
hashEntry->relationAccessMode |= (1 << parallelRelationAccessBit);
|
||||
}
|
||||
|
||||
|
@ -557,7 +552,6 @@ bool
|
|||
ParallelQueryExecutedInTransaction(void)
|
||||
{
|
||||
HASH_SEQ_STATUS status;
|
||||
RelationAccessHashEntry *hashEntry;
|
||||
|
||||
if (!ShouldRecordRelationAccess() || RelationAccessHash == NULL)
|
||||
{
|
||||
|
@ -566,7 +560,8 @@ ParallelQueryExecutedInTransaction(void)
|
|||
|
||||
hash_seq_init(&status, RelationAccessHash);
|
||||
|
||||
hashEntry = (RelationAccessHashEntry *) hash_seq_search(&status);
|
||||
RelationAccessHashEntry *hashEntry = (RelationAccessHashEntry *) hash_seq_search(
|
||||
&status);
|
||||
while (hashEntry != NULL)
|
||||
{
|
||||
int relationAccessMode = hashEntry->relationAccessMode;
|
||||
|
@ -621,8 +616,6 @@ static RelationAccessMode
|
|||
GetRelationAccessMode(Oid relationId, ShardPlacementAccessType accessType)
|
||||
{
|
||||
RelationAccessHashKey hashKey;
|
||||
RelationAccessHashEntry *hashEntry;
|
||||
int relationAcessMode = 0;
|
||||
bool found = false;
|
||||
int parallelRelationAccessBit = accessType + PARALLEL_MODE_FLAG_OFFSET;
|
||||
|
||||
|
@ -634,7 +627,8 @@ GetRelationAccessMode(Oid relationId, ShardPlacementAccessType accessType)
|
|||
|
||||
hashKey.relationId = relationId;
|
||||
|
||||
hashEntry = hash_search(RelationAccessHash, &hashKey, HASH_FIND, &found);
|
||||
RelationAccessHashEntry *hashEntry = hash_search(RelationAccessHash, &hashKey,
|
||||
HASH_FIND, &found);
|
||||
if (!found)
|
||||
{
|
||||
/* relation not accessed at all */
|
||||
|
@ -642,7 +636,7 @@ GetRelationAccessMode(Oid relationId, ShardPlacementAccessType accessType)
|
|||
}
|
||||
|
||||
|
||||
relationAcessMode = hashEntry->relationAccessMode;
|
||||
int relationAcessMode = hashEntry->relationAccessMode;
|
||||
if (!(relationAcessMode & (1 << accessType)))
|
||||
{
|
||||
/* relation not accessed with the given access type */
|
||||
|
@ -692,7 +686,6 @@ ShouldRecordRelationAccess()
|
|||
static void
|
||||
CheckConflictingRelationAccesses(Oid relationId, ShardPlacementAccessType accessType)
|
||||
{
|
||||
DistTableCacheEntry *cacheEntry = NULL;
|
||||
Oid conflictingReferencingRelationId = InvalidOid;
|
||||
ShardPlacementAccessType conflictingAccessType = PLACEMENT_ACCESS_SELECT;
|
||||
|
||||
|
@ -701,7 +694,7 @@ CheckConflictingRelationAccesses(Oid relationId, ShardPlacementAccessType access
|
|||
return;
|
||||
}
|
||||
|
||||
cacheEntry = DistributedTableCacheEntry(relationId);
|
||||
DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(relationId);
|
||||
|
||||
if (!(cacheEntry->partitionMethod == DISTRIBUTE_BY_NONE &&
|
||||
cacheEntry->referencingRelationsViaForeignKey != NIL))
|
||||
|
@ -791,7 +784,6 @@ static void
|
|||
CheckConflictingParallelRelationAccesses(Oid relationId, ShardPlacementAccessType
|
||||
accessType)
|
||||
{
|
||||
DistTableCacheEntry *cacheEntry = NULL;
|
||||
Oid conflictingReferencingRelationId = InvalidOid;
|
||||
ShardPlacementAccessType conflictingAccessType = PLACEMENT_ACCESS_SELECT;
|
||||
|
||||
|
@ -800,7 +792,7 @@ CheckConflictingParallelRelationAccesses(Oid relationId, ShardPlacementAccessTyp
|
|||
return;
|
||||
}
|
||||
|
||||
cacheEntry = DistributedTableCacheEntry(relationId);
|
||||
DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(relationId);
|
||||
if (!(cacheEntry->partitionMethod == DISTRIBUTE_BY_HASH &&
|
||||
cacheEntry->referencedRelationsViaForeignKey != NIL))
|
||||
{
|
||||
|
@ -877,9 +869,6 @@ HoldsConflictingLockWithReferencedRelations(Oid relationId, ShardPlacementAccess
|
|||
foreach(referencedRelationCell, cacheEntry->referencedRelationsViaForeignKey)
|
||||
{
|
||||
Oid referencedRelation = lfirst_oid(referencedRelationCell);
|
||||
RelationAccessMode selectMode = RELATION_NOT_ACCESSED;
|
||||
RelationAccessMode dmlMode = RELATION_NOT_ACCESSED;
|
||||
RelationAccessMode ddlMode = RELATION_NOT_ACCESSED;
|
||||
|
||||
/* we're only interested in foreign keys to reference tables */
|
||||
if (PartitionMethod(referencedRelation) != DISTRIBUTE_BY_NONE)
|
||||
|
@ -891,7 +880,7 @@ HoldsConflictingLockWithReferencedRelations(Oid relationId, ShardPlacementAccess
|
|||
* A select on a reference table could conflict with a DDL
|
||||
* on a distributed table.
|
||||
*/
|
||||
selectMode = GetRelationSelectAccessMode(referencedRelation);
|
||||
RelationAccessMode selectMode = GetRelationSelectAccessMode(referencedRelation);
|
||||
if (placementAccess == PLACEMENT_ACCESS_DDL &&
|
||||
selectMode != RELATION_NOT_ACCESSED)
|
||||
{
|
||||
|
@ -905,7 +894,7 @@ HoldsConflictingLockWithReferencedRelations(Oid relationId, ShardPlacementAccess
|
|||
* Both DML and DDL operations on a reference table conflicts with
|
||||
* any parallel operation on distributed tables.
|
||||
*/
|
||||
dmlMode = GetRelationDMLAccessMode(referencedRelation);
|
||||
RelationAccessMode dmlMode = GetRelationDMLAccessMode(referencedRelation);
|
||||
if (dmlMode != RELATION_NOT_ACCESSED)
|
||||
{
|
||||
*conflictingRelationId = referencedRelation;
|
||||
|
@ -914,7 +903,7 @@ HoldsConflictingLockWithReferencedRelations(Oid relationId, ShardPlacementAccess
|
|||
return true;
|
||||
}
|
||||
|
||||
ddlMode = GetRelationDDLAccessMode(referencedRelation);
|
||||
RelationAccessMode ddlMode = GetRelationDDLAccessMode(referencedRelation);
|
||||
if (ddlMode != RELATION_NOT_ACCESSED)
|
||||
{
|
||||
*conflictingRelationId = referencedRelation;
|
||||
|
@ -985,7 +974,6 @@ HoldsConflictingLockWithReferencingRelations(Oid relationId, ShardPlacementAcces
|
|||
}
|
||||
else if (placementAccess == PLACEMENT_ACCESS_DML)
|
||||
{
|
||||
RelationAccessMode ddlMode = RELATION_NOT_ACCESSED;
|
||||
RelationAccessMode dmlMode = GetRelationDMLAccessMode(referencingRelation);
|
||||
|
||||
if (dmlMode == RELATION_PARALLEL_ACCESSED)
|
||||
|
@ -994,7 +982,7 @@ HoldsConflictingLockWithReferencingRelations(Oid relationId, ShardPlacementAcces
|
|||
*conflictingAccessMode = PLACEMENT_ACCESS_DML;
|
||||
}
|
||||
|
||||
ddlMode = GetRelationDDLAccessMode(referencingRelation);
|
||||
RelationAccessMode ddlMode = GetRelationDDLAccessMode(referencingRelation);
|
||||
if (ddlMode == RELATION_PARALLEL_ACCESSED)
|
||||
{
|
||||
/* SELECT on a distributed table conflicts with DDL / TRUNCATE */
|
||||
|
@ -1004,25 +992,22 @@ HoldsConflictingLockWithReferencingRelations(Oid relationId, ShardPlacementAcces
|
|||
}
|
||||
else if (placementAccess == PLACEMENT_ACCESS_DDL)
|
||||
{
|
||||
RelationAccessMode selectMode = RELATION_NOT_ACCESSED;
|
||||
RelationAccessMode ddlMode = RELATION_NOT_ACCESSED;
|
||||
RelationAccessMode dmlMode = RELATION_NOT_ACCESSED;
|
||||
|
||||
selectMode = GetRelationSelectAccessMode(referencingRelation);
|
||||
RelationAccessMode selectMode = GetRelationSelectAccessMode(
|
||||
referencingRelation);
|
||||
if (selectMode == RELATION_PARALLEL_ACCESSED)
|
||||
{
|
||||
holdsConflictingLocks = true;
|
||||
*conflictingAccessMode = PLACEMENT_ACCESS_SELECT;
|
||||
}
|
||||
|
||||
dmlMode = GetRelationDMLAccessMode(referencingRelation);
|
||||
RelationAccessMode dmlMode = GetRelationDMLAccessMode(referencingRelation);
|
||||
if (dmlMode == RELATION_PARALLEL_ACCESSED)
|
||||
{
|
||||
holdsConflictingLocks = true;
|
||||
*conflictingAccessMode = PLACEMENT_ACCESS_DML;
|
||||
}
|
||||
|
||||
ddlMode = GetRelationDDLAccessMode(referencingRelation);
|
||||
RelationAccessMode ddlMode = GetRelationDDLAccessMode(referencingRelation);
|
||||
if (ddlMode == RELATION_PARALLEL_ACCESSED)
|
||||
{
|
||||
holdsConflictingLocks = true;
|
||||
|
|
|
@ -59,10 +59,7 @@ StartRemoteTransactionBegin(struct MultiConnection *connection)
|
|||
{
|
||||
RemoteTransaction *transaction = &connection->remoteTransaction;
|
||||
StringInfo beginAndSetDistributedTransactionId = makeStringInfo();
|
||||
DistributedTransactionId *distributedTransactionId = NULL;
|
||||
ListCell *subIdCell = NULL;
|
||||
List *activeSubXacts = NIL;
|
||||
const char *timestamp = NULL;
|
||||
|
||||
Assert(transaction->transactionState == REMOTE_TRANS_INVALID);
|
||||
|
||||
|
@ -84,8 +81,9 @@ StartRemoteTransactionBegin(struct MultiConnection *connection)
|
|||
* and send both in one step. The reason is purely performance, we don't want
|
||||
* seperate roundtrips for these two statements.
|
||||
*/
|
||||
distributedTransactionId = GetCurrentDistributedTransactionId();
|
||||
timestamp = timestamptz_to_str(distributedTransactionId->timestamp);
|
||||
DistributedTransactionId *distributedTransactionId =
|
||||
GetCurrentDistributedTransactionId();
|
||||
const char *timestamp = timestamptz_to_str(distributedTransactionId->timestamp);
|
||||
appendStringInfo(beginAndSetDistributedTransactionId,
|
||||
"SELECT assign_distributed_transaction_id(%d, " UINT64_FORMAT
|
||||
", '%s');",
|
||||
|
@ -94,7 +92,7 @@ StartRemoteTransactionBegin(struct MultiConnection *connection)
|
|||
timestamp);
|
||||
|
||||
/* append context for in-progress SAVEPOINTs for this transaction */
|
||||
activeSubXacts = ActiveSubXactContexts();
|
||||
List *activeSubXacts = ActiveSubXactContexts();
|
||||
transaction->lastSuccessfulSubXact = TopSubTransactionId;
|
||||
transaction->lastQueuedSubXact = TopSubTransactionId;
|
||||
foreach(subIdCell, activeSubXacts)
|
||||
|
@ -139,12 +137,11 @@ void
|
|||
FinishRemoteTransactionBegin(struct MultiConnection *connection)
|
||||
{
|
||||
RemoteTransaction *transaction = &connection->remoteTransaction;
|
||||
bool clearSuccessful = true;
|
||||
bool raiseErrors = true;
|
||||
|
||||
Assert(transaction->transactionState == REMOTE_TRANS_STARTING);
|
||||
|
||||
clearSuccessful = ClearResults(connection, raiseErrors);
|
||||
bool clearSuccessful = ClearResults(connection, raiseErrors);
|
||||
if (clearSuccessful)
|
||||
{
|
||||
transaction->transactionState = REMOTE_TRANS_STARTED;
|
||||
|
@ -276,7 +273,6 @@ void
|
|||
FinishRemoteTransactionCommit(MultiConnection *connection)
|
||||
{
|
||||
RemoteTransaction *transaction = &connection->remoteTransaction;
|
||||
PGresult *result = NULL;
|
||||
const bool raiseErrors = false;
|
||||
const bool isCommit = true;
|
||||
|
||||
|
@ -284,7 +280,7 @@ FinishRemoteTransactionCommit(MultiConnection *connection)
|
|||
transaction->transactionState == REMOTE_TRANS_1PC_COMMITTING ||
|
||||
transaction->transactionState == REMOTE_TRANS_2PC_COMMITTING);
|
||||
|
||||
result = GetRemoteCommandResult(connection, raiseErrors);
|
||||
PGresult *result = GetRemoteCommandResult(connection, raiseErrors);
|
||||
|
||||
if (!IsResponseOK(result))
|
||||
{
|
||||
|
@ -476,7 +472,6 @@ StartRemoteTransactionPrepare(struct MultiConnection *connection)
|
|||
RemoteTransaction *transaction = &connection->remoteTransaction;
|
||||
StringInfoData command;
|
||||
const bool raiseErrors = true;
|
||||
WorkerNode *workerNode = NULL;
|
||||
|
||||
/* can't prepare a nonexistant transaction */
|
||||
Assert(transaction->transactionState != REMOTE_TRANS_INVALID);
|
||||
|
@ -490,7 +485,7 @@ StartRemoteTransactionPrepare(struct MultiConnection *connection)
|
|||
Assign2PCIdentifier(connection);
|
||||
|
||||
/* log transactions to workers in pg_dist_transaction */
|
||||
workerNode = FindWorkerNode(connection->hostname, connection->port);
|
||||
WorkerNode *workerNode = FindWorkerNode(connection->hostname, connection->port);
|
||||
if (workerNode != NULL)
|
||||
{
|
||||
LogTransactionRecord(workerNode->groupId, transaction->preparedName);
|
||||
|
@ -520,12 +515,11 @@ void
|
|||
FinishRemoteTransactionPrepare(struct MultiConnection *connection)
|
||||
{
|
||||
RemoteTransaction *transaction = &connection->remoteTransaction;
|
||||
PGresult *result = NULL;
|
||||
const bool raiseErrors = true;
|
||||
|
||||
Assert(transaction->transactionState == REMOTE_TRANS_PREPARING);
|
||||
|
||||
result = GetRemoteCommandResult(connection, raiseErrors);
|
||||
PGresult *result = GetRemoteCommandResult(connection, raiseErrors);
|
||||
|
||||
if (!IsResponseOK(result))
|
||||
{
|
||||
|
@ -596,7 +590,6 @@ void
|
|||
RemoteTransactionsBeginIfNecessary(List *connectionList)
|
||||
{
|
||||
ListCell *connectionCell = NULL;
|
||||
bool raiseInterrupts = true;
|
||||
|
||||
/*
|
||||
* Don't do anything if not in a coordinated transaction. That allows the
|
||||
|
@ -630,7 +623,7 @@ RemoteTransactionsBeginIfNecessary(List *connectionList)
|
|||
StartRemoteTransactionBegin(connection);
|
||||
}
|
||||
|
||||
raiseInterrupts = true;
|
||||
bool raiseInterrupts = true;
|
||||
WaitForAllConnections(connectionList, raiseInterrupts);
|
||||
|
||||
/* get result of all the BEGINs */
|
||||
|
@ -798,7 +791,6 @@ void
|
|||
CoordinatedRemoteTransactionsPrepare(void)
|
||||
{
|
||||
dlist_iter iter;
|
||||
bool raiseInterrupts = false;
|
||||
List *connectionList = NIL;
|
||||
|
||||
/* issue PREPARE TRANSACTION; to all relevant remote nodes */
|
||||
|
@ -822,7 +814,7 @@ CoordinatedRemoteTransactionsPrepare(void)
|
|||
connectionList = lappend(connectionList, connection);
|
||||
}
|
||||
|
||||
raiseInterrupts = true;
|
||||
bool raiseInterrupts = true;
|
||||
WaitForAllConnections(connectionList, raiseInterrupts);
|
||||
|
||||
/* Wait for result */
|
||||
|
@ -857,7 +849,6 @@ CoordinatedRemoteTransactionsCommit(void)
|
|||
{
|
||||
dlist_iter iter;
|
||||
List *connectionList = NIL;
|
||||
bool raiseInterrupts = false;
|
||||
|
||||
/*
|
||||
* Issue appropriate transaction commands to remote nodes. If everything
|
||||
|
@ -885,7 +876,7 @@ CoordinatedRemoteTransactionsCommit(void)
|
|||
connectionList = lappend(connectionList, connection);
|
||||
}
|
||||
|
||||
raiseInterrupts = false;
|
||||
bool raiseInterrupts = false;
|
||||
WaitForAllConnections(connectionList, raiseInterrupts);
|
||||
|
||||
/* wait for the replies to the commands to come in */
|
||||
|
@ -921,7 +912,6 @@ CoordinatedRemoteTransactionsAbort(void)
|
|||
{
|
||||
dlist_iter iter;
|
||||
List *connectionList = NIL;
|
||||
bool raiseInterrupts = false;
|
||||
|
||||
/* asynchronously send ROLLBACK [PREPARED] */
|
||||
dlist_foreach(iter, &InProgressTransactions)
|
||||
|
@ -942,7 +932,7 @@ CoordinatedRemoteTransactionsAbort(void)
|
|||
connectionList = lappend(connectionList, connection);
|
||||
}
|
||||
|
||||
raiseInterrupts = false;
|
||||
bool raiseInterrupts = false;
|
||||
WaitForAllConnections(connectionList, raiseInterrupts);
|
||||
|
||||
/* and wait for the results */
|
||||
|
|
|
@ -65,11 +65,9 @@ static bool RecoverPreparedTransactionOnWorker(MultiConnection *connection,
|
|||
Datum
|
||||
recover_prepared_transactions(PG_FUNCTION_ARGS)
|
||||
{
|
||||
int recoveredTransactionCount = 0;
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
recoveredTransactionCount = RecoverTwoPhaseCommits();
|
||||
int recoveredTransactionCount = RecoverTwoPhaseCommits();
|
||||
|
||||
PG_RETURN_INT32(recoveredTransactionCount);
|
||||
}
|
||||
|
@ -83,9 +81,6 @@ recover_prepared_transactions(PG_FUNCTION_ARGS)
|
|||
void
|
||||
LogTransactionRecord(int32 groupId, char *transactionName)
|
||||
{
|
||||
Relation pgDistTransaction = NULL;
|
||||
TupleDesc tupleDescriptor = NULL;
|
||||
HeapTuple heapTuple = NULL;
|
||||
Datum values[Natts_pg_dist_transaction];
|
||||
bool isNulls[Natts_pg_dist_transaction];
|
||||
|
||||
|
@ -97,10 +92,10 @@ LogTransactionRecord(int32 groupId, char *transactionName)
|
|||
values[Anum_pg_dist_transaction_gid - 1] = CStringGetTextDatum(transactionName);
|
||||
|
||||
/* open transaction relation and insert new tuple */
|
||||
pgDistTransaction = heap_open(DistTransactionRelationId(), RowExclusiveLock);
|
||||
Relation pgDistTransaction = heap_open(DistTransactionRelationId(), RowExclusiveLock);
|
||||
|
||||
tupleDescriptor = RelationGetDescr(pgDistTransaction);
|
||||
heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls);
|
||||
TupleDesc tupleDescriptor = RelationGetDescr(pgDistTransaction);
|
||||
HeapTuple heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls);
|
||||
|
||||
CatalogTupleInsert(pgDistTransaction, heapTuple);
|
||||
|
||||
|
@ -118,11 +113,10 @@ LogTransactionRecord(int32 groupId, char *transactionName)
|
|||
int
|
||||
RecoverTwoPhaseCommits(void)
|
||||
{
|
||||
List *workerList = NIL;
|
||||
ListCell *workerNodeCell = NULL;
|
||||
int recoveredTransactionCount = 0;
|
||||
|
||||
workerList = ActivePrimaryNodeList(NoLock);
|
||||
List *workerList = ActivePrimaryNodeList(NoLock);
|
||||
|
||||
foreach(workerNodeCell, workerList)
|
||||
{
|
||||
|
@ -148,26 +142,14 @@ RecoverWorkerTransactions(WorkerNode *workerNode)
|
|||
char *nodeName = workerNode->workerName;
|
||||
int nodePort = workerNode->workerPort;
|
||||
|
||||
List *activeTransactionNumberList = NIL;
|
||||
HTAB *activeTransactionNumberSet = NULL;
|
||||
|
||||
List *pendingTransactionList = NIL;
|
||||
HTAB *pendingTransactionSet = NULL;
|
||||
List *recheckTransactionList = NIL;
|
||||
HTAB *recheckTransactionSet = NULL;
|
||||
|
||||
Relation pgDistTransaction = NULL;
|
||||
SysScanDesc scanDescriptor = NULL;
|
||||
ScanKeyData scanKey[1];
|
||||
int scanKeyCount = 1;
|
||||
bool indexOK = true;
|
||||
HeapTuple heapTuple = NULL;
|
||||
TupleDesc tupleDescriptor = NULL;
|
||||
|
||||
HASH_SEQ_STATUS status;
|
||||
|
||||
MemoryContext localContext = NULL;
|
||||
MemoryContext oldContext = NULL;
|
||||
bool recoveryFailed = false;
|
||||
|
||||
int connectionFlags = 0;
|
||||
|
@ -180,17 +162,18 @@ RecoverWorkerTransactions(WorkerNode *workerNode)
|
|||
return 0;
|
||||
}
|
||||
|
||||
localContext = AllocSetContextCreateExtended(CurrentMemoryContext,
|
||||
"RecoverWorkerTransactions",
|
||||
ALLOCSET_DEFAULT_MINSIZE,
|
||||
ALLOCSET_DEFAULT_INITSIZE,
|
||||
ALLOCSET_DEFAULT_MAXSIZE);
|
||||
MemoryContext localContext = AllocSetContextCreateExtended(CurrentMemoryContext,
|
||||
"RecoverWorkerTransactions",
|
||||
ALLOCSET_DEFAULT_MINSIZE,
|
||||
ALLOCSET_DEFAULT_INITSIZE,
|
||||
ALLOCSET_DEFAULT_MAXSIZE);
|
||||
|
||||
oldContext = MemoryContextSwitchTo(localContext);
|
||||
MemoryContext oldContext = MemoryContextSwitchTo(localContext);
|
||||
|
||||
/* take table lock first to avoid running concurrently */
|
||||
pgDistTransaction = heap_open(DistTransactionRelationId(), ShareUpdateExclusiveLock);
|
||||
tupleDescriptor = RelationGetDescr(pgDistTransaction);
|
||||
Relation pgDistTransaction = heap_open(DistTransactionRelationId(),
|
||||
ShareUpdateExclusiveLock);
|
||||
TupleDesc tupleDescriptor = RelationGetDescr(pgDistTransaction);
|
||||
|
||||
/*
|
||||
* We're going to check the list of prepared transactions on the worker,
|
||||
|
@ -225,31 +208,33 @@ RecoverWorkerTransactions(WorkerNode *workerNode)
|
|||
*/
|
||||
|
||||
/* find stale prepared transactions on the remote node */
|
||||
pendingTransactionList = PendingWorkerTransactionList(connection);
|
||||
pendingTransactionSet = ListToHashSet(pendingTransactionList, NAMEDATALEN, true);
|
||||
List *pendingTransactionList = PendingWorkerTransactionList(connection);
|
||||
HTAB *pendingTransactionSet = ListToHashSet(pendingTransactionList, NAMEDATALEN,
|
||||
true);
|
||||
|
||||
/* find in-progress distributed transactions */
|
||||
activeTransactionNumberList = ActiveDistributedTransactionNumbers();
|
||||
activeTransactionNumberSet = ListToHashSet(activeTransactionNumberList,
|
||||
sizeof(uint64), false);
|
||||
List *activeTransactionNumberList = ActiveDistributedTransactionNumbers();
|
||||
HTAB *activeTransactionNumberSet = ListToHashSet(activeTransactionNumberList,
|
||||
sizeof(uint64), false);
|
||||
|
||||
/* scan through all recovery records of the current worker */
|
||||
ScanKeyInit(&scanKey[0], Anum_pg_dist_transaction_groupid,
|
||||
BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(groupId));
|
||||
|
||||
/* get a snapshot of pg_dist_transaction */
|
||||
scanDescriptor = systable_beginscan(pgDistTransaction,
|
||||
DistTransactionGroupIndexId(), indexOK,
|
||||
NULL, scanKeyCount, scanKey);
|
||||
SysScanDesc scanDescriptor = systable_beginscan(pgDistTransaction,
|
||||
DistTransactionGroupIndexId(),
|
||||
indexOK,
|
||||
NULL, scanKeyCount, scanKey);
|
||||
|
||||
/* find stale prepared transactions on the remote node */
|
||||
recheckTransactionList = PendingWorkerTransactionList(connection);
|
||||
recheckTransactionSet = ListToHashSet(recheckTransactionList, NAMEDATALEN, true);
|
||||
List *recheckTransactionList = PendingWorkerTransactionList(connection);
|
||||
HTAB *recheckTransactionSet = ListToHashSet(recheckTransactionList, NAMEDATALEN,
|
||||
true);
|
||||
|
||||
while (HeapTupleIsValid(heapTuple = systable_getnext(scanDescriptor)))
|
||||
{
|
||||
bool isNull = false;
|
||||
bool isTransactionInProgress = false;
|
||||
bool foundPreparedTransactionBeforeCommit = false;
|
||||
bool foundPreparedTransactionAfterCommit = false;
|
||||
|
||||
|
@ -258,8 +243,8 @@ RecoverWorkerTransactions(WorkerNode *workerNode)
|
|||
tupleDescriptor, &isNull);
|
||||
char *transactionName = TextDatumGetCString(transactionNameDatum);
|
||||
|
||||
isTransactionInProgress = IsTransactionInProgress(activeTransactionNumberSet,
|
||||
transactionName);
|
||||
bool isTransactionInProgress = IsTransactionInProgress(activeTransactionNumberSet,
|
||||
transactionName);
|
||||
if (isTransactionInProgress)
|
||||
{
|
||||
/*
|
||||
|
@ -375,17 +360,15 @@ RecoverWorkerTransactions(WorkerNode *workerNode)
|
|||
|
||||
while ((pendingTransactionName = hash_seq_search(&status)) != NULL)
|
||||
{
|
||||
bool isTransactionInProgress = false;
|
||||
bool shouldCommit = false;
|
||||
|
||||
isTransactionInProgress = IsTransactionInProgress(activeTransactionNumberSet,
|
||||
pendingTransactionName);
|
||||
bool isTransactionInProgress = IsTransactionInProgress(
|
||||
activeTransactionNumberSet,
|
||||
pendingTransactionName);
|
||||
if (isTransactionInProgress)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
shouldCommit = false;
|
||||
bool shouldCommit = false;
|
||||
abortSucceeded = RecoverPreparedTransactionOnWorker(connection,
|
||||
pendingTransactionName,
|
||||
shouldCommit);
|
||||
|
@ -415,10 +398,6 @@ PendingWorkerTransactionList(MultiConnection *connection)
|
|||
{
|
||||
StringInfo command = makeStringInfo();
|
||||
bool raiseInterrupts = true;
|
||||
int querySent = 0;
|
||||
PGresult *result = NULL;
|
||||
int rowCount = 0;
|
||||
int rowIndex = 0;
|
||||
List *transactionNames = NIL;
|
||||
int coordinatorId = GetLocalGroupId();
|
||||
|
||||
|
@ -426,21 +405,21 @@ PendingWorkerTransactionList(MultiConnection *connection)
|
|||
"WHERE gid LIKE 'citus\\_%d\\_%%'",
|
||||
coordinatorId);
|
||||
|
||||
querySent = SendRemoteCommand(connection, command->data);
|
||||
int querySent = SendRemoteCommand(connection, command->data);
|
||||
if (querySent == 0)
|
||||
{
|
||||
ReportConnectionError(connection, ERROR);
|
||||
}
|
||||
|
||||
result = GetRemoteCommandResult(connection, raiseInterrupts);
|
||||
PGresult *result = GetRemoteCommandResult(connection, raiseInterrupts);
|
||||
if (!IsResponseOK(result))
|
||||
{
|
||||
ReportResultError(connection, result, ERROR);
|
||||
}
|
||||
|
||||
rowCount = PQntuples(result);
|
||||
int rowCount = PQntuples(result);
|
||||
|
||||
for (rowIndex = 0; rowIndex < rowCount; rowIndex++)
|
||||
for (int rowIndex = 0; rowIndex < rowCount; rowIndex++)
|
||||
{
|
||||
const int columnIndex = 0;
|
||||
char *transactionName = PQgetvalue(result, rowIndex, columnIndex);
|
||||
|
@ -468,11 +447,12 @@ IsTransactionInProgress(HTAB *activeTransactionNumberSet, char *preparedTransact
|
|||
int procId = 0;
|
||||
uint32 connectionNumber = 0;
|
||||
uint64 transactionNumber = 0;
|
||||
bool isValidName = false;
|
||||
bool isTransactionInProgress = false;
|
||||
|
||||
isValidName = ParsePreparedTransactionName(preparedTransactionName, &groupId, &procId,
|
||||
&transactionNumber, &connectionNumber);
|
||||
bool isValidName = ParsePreparedTransactionName(preparedTransactionName, &groupId,
|
||||
&procId,
|
||||
&transactionNumber,
|
||||
&connectionNumber);
|
||||
if (isValidName)
|
||||
{
|
||||
hash_search(activeTransactionNumberSet, &transactionNumber, HASH_FIND,
|
||||
|
@ -493,7 +473,6 @@ RecoverPreparedTransactionOnWorker(MultiConnection *connection, char *transactio
|
|||
{
|
||||
StringInfo command = makeStringInfo();
|
||||
PGresult *result = NULL;
|
||||
int executeCommand = 0;
|
||||
bool raiseInterrupts = false;
|
||||
|
||||
if (shouldCommit)
|
||||
|
@ -509,7 +488,7 @@ RecoverPreparedTransactionOnWorker(MultiConnection *connection, char *transactio
|
|||
quote_literal_cstr(transactionName));
|
||||
}
|
||||
|
||||
executeCommand = ExecuteOptionalRemoteCommand(connection, command->data, &result);
|
||||
int executeCommand = ExecuteOptionalRemoteCommand(connection, command->data, &result);
|
||||
if (executeCommand == QUERY_SEND_FAILED)
|
||||
{
|
||||
return false;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue