diff --git a/.circleci/config.yml b/.circleci/config.yml index 87a84b4b8..270e6c6ba 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -22,6 +22,12 @@ jobs: - run: name: 'Check Style' command: citus_indent --check + - run: + name: 'Remove useless declarations' + command: ci/remove_useless_declarations.sh + - run: + name: 'Check if changed' + command: git diff --cached --exit-code check-sql-snapshots: docker: - image: 'citus/extbuilder:latest' diff --git a/ci/remove_useless_declarations.sh b/ci/remove_useless_declarations.sh new file mode 100755 index 000000000..0e77eff24 --- /dev/null +++ b/ci/remove_useless_declarations.sh @@ -0,0 +1,23 @@ +#!/bin/sh + +set -eu + +files=$(find src -iname '*.c' | git check-attr --stdin citus-style | grep -v ': unset$' | sed 's/: citus-style: set$//') +while true; do + # shellcheck disable=SC2086 + perl -i -p0e 's/\n\t(?!return )(?P(\w+ )+\**)(?>(?P\w+)( = *[\w>\s\n-]*?)?;\n(?P(?>(?P\/\*.*?\*\/|"(?>\\"|.)*?"|[^#]))*?)(\t)?(?=\b(?P=variable)\b))(?<=\n\t)(?P=variable) =(?![^;]*?[^>_]\b(?P=variable)\b[^_])/\n$+{code_between}\t$+{type}$+{variable} =/sg' $files + # The following are simply the same regex, but repeated for different tab sizes + # (this is needed because variable sized backtracking is not supported in perl) + # shellcheck disable=SC2086 + perl -i -p0e 's/\n\t\t(?!return )(?P(\w+ )+\**)(?>(?P\w+)( = *[\w>\s\n-]*?)?;\n(?P(?>(?P\/\*.*?\*\/|"(?>\\"|.)*?"|[^#]))*?)(\t\t)?(?=\b(?P=variable)\b))(?<=\n\t\t)(?P=variable) =(?![^;]*?[^>_]\b(?P=variable)\b[^_])/\n$+{code_between}\t\t$+{type}$+{variable} =/sg' $files + # shellcheck disable=SC2086 + perl -i -p0e 's/\n\t\t\t(?!return )(?P(\w+ )+\**)(?>(?P\w+)( = *[\w>\s\n-]*?)?;\n(?P(?>(?P\/\*.*?\*\/|"(?>\\"|.)*?"|[^#]))*?)(\t\t\t)?(?=\b(?P=variable)\b))(?<=\n\t\t\t)(?P=variable) =(?![^;]*?[^>_]\b(?P=variable)\b[^_])/\n$+{code_between}\t\t\t$+{type}$+{variable} =/sg' $files + # shellcheck disable=SC2086 + perl -i -p0e 's/\n\t\t\t\t(?!return )(?P(\w+ )+\**)(?>(?P\w+)( = *[\w>\s\n-]*?)?;\n(?P(?>(?P\/\*.*?\*\/|"(?>\\"|.)*?"|[^#]))*?)(\t\t\t\t)?(?=\b(?P=variable)\b))(?<=\n\t\t\t\t)(?P=variable) =(?![^;]*?[^>_]\b(?P=variable)\b[^_])/\n$+{code_between}\t\t\t\t$+{type}$+{variable} =/sg' $files + # shellcheck disable=SC2086 + perl -i -p0e 's/\n\t\t\t\t\t(?!return )(?P(\w+ )+\**)(?>(?P\w+)( = *[\w>\s\n-]*?)?;\n(?P(?>(?P\/\*.*?\*\/|"(?>\\"|.)*?"|[^#]))*?)(\t\t\t\t\t)?(?=\b(?P=variable)\b))(?<=\n\t\t\t\t\t)(?P=variable) =(?![^;]*?[^>_]\b(?P=variable)\b[^_])/\n$+{code_between}\t\t\t\t\t$+{type}$+{variable} =/sg' $files + # shellcheck disable=SC2086 + perl -i -p0e 's/\n\t\t\t\t\t\t(?!return )(?P(\w+ )+\**)(?>(?P\w+)( = *[\w>\s\n-]*?)?;\n(?P(?>(?P\/\*.*?\*\/|"(?>\\"|.)*?"|[^#]))*?)(\t\t\t\t\t\t)?(?=\b(?P=variable)\b))(?<=\n\t\t\t\t\t\t)(?P=variable) =(?![^;]*?[^>_]\b(?P=variable)\b[^_])/\n$+{code_between}\t\t\t\t\t\t$+{type}$+{variable} =/sg' $files + git diff --quiet && break; + git add .; +done diff --git a/src/backend/distributed/commands/call.c b/src/backend/distributed/commands/call.c index 69cf088ad..745e30105 100644 --- a/src/backend/distributed/commands/call.c +++ b/src/backend/distributed/commands/call.c @@ -47,11 +47,11 @@ static bool CallFuncExprRemotely(CallStmt *callStmt, bool CallDistributedProcedureRemotely(CallStmt *callStmt, DestReceiver *dest) { - DistObjectCacheEntry *procedure = NULL; FuncExpr *funcExpr = callStmt->funcexpr; Oid functionId = funcExpr->funcid; - procedure = LookupDistObjectCacheEntry(ProcedureRelationId, functionId, 0); + DistObjectCacheEntry *procedure = LookupDistObjectCacheEntry(ProcedureRelationId, + functionId, 0); if (procedure == NULL || !procedure->isDistributed) { return false; @@ -68,25 +68,13 @@ static bool CallFuncExprRemotely(CallStmt *callStmt, DistObjectCacheEntry *procedure, FuncExpr *funcExpr, DestReceiver *dest) { - Oid colocatedRelationId = InvalidOid; - Node *partitionValueNode = NULL; - Const *partitionValue = NULL; - Datum partitionValueDatum = 0; - ShardInterval *shardInterval = NULL; - List *placementList = NIL; - DistTableCacheEntry *distTable = NULL; - Var *partitionColumn = NULL; - ShardPlacement *placement = NULL; - WorkerNode *workerNode = NULL; - StringInfo callCommand = NULL; - if (IsMultiStatementTransaction()) { ereport(DEBUG1, (errmsg("cannot push down CALL in multi-statement transaction"))); return false; } - colocatedRelationId = ColocatedTableId(procedure->colocationId); + Oid colocatedRelationId = ColocatedTableId(procedure->colocationId); if (colocatedRelationId == InvalidOid) { ereport(DEBUG1, (errmsg("stored procedure does not have co-located tables"))); @@ -107,8 +95,8 @@ CallFuncExprRemotely(CallStmt *callStmt, DistObjectCacheEntry *procedure, return false; } - distTable = DistributedTableCacheEntry(colocatedRelationId); - partitionColumn = distTable->partitionColumn; + DistTableCacheEntry *distTable = DistributedTableCacheEntry(colocatedRelationId); + Var *partitionColumn = distTable->partitionColumn; if (partitionColumn == NULL) { /* This can happen if colocated with a reference table. Punt for now. */ @@ -117,17 +105,17 @@ CallFuncExprRemotely(CallStmt *callStmt, DistObjectCacheEntry *procedure, return false; } - partitionValueNode = (Node *) list_nth(funcExpr->args, - procedure->distributionArgIndex); + Node *partitionValueNode = (Node *) list_nth(funcExpr->args, + procedure->distributionArgIndex); partitionValueNode = strip_implicit_coercions(partitionValueNode); if (!IsA(partitionValueNode, Const)) { ereport(DEBUG1, (errmsg("distribution argument value must be a constant"))); return false; } - partitionValue = (Const *) partitionValueNode; + Const *partitionValue = (Const *) partitionValueNode; - partitionValueDatum = partitionValue->constvalue; + Datum partitionValueDatum = partitionValue->constvalue; if (partitionValue->consttype != partitionColumn->vartype) { CopyCoercionData coercionData; @@ -138,14 +126,14 @@ CallFuncExprRemotely(CallStmt *callStmt, DistObjectCacheEntry *procedure, partitionValueDatum = CoerceColumnValue(partitionValueDatum, &coercionData); } - shardInterval = FindShardInterval(partitionValueDatum, distTable); + ShardInterval *shardInterval = FindShardInterval(partitionValueDatum, distTable); if (shardInterval == NULL) { ereport(DEBUG1, (errmsg("cannot push down call, failed to find shard interval"))); return false; } - placementList = FinalizedShardPlacementList(shardInterval->shardId); + List *placementList = FinalizedShardPlacementList(shardInterval->shardId); if (list_length(placementList) != 1) { /* punt on this for now */ @@ -154,8 +142,8 @@ CallFuncExprRemotely(CallStmt *callStmt, DistObjectCacheEntry *procedure, return false; } - placement = (ShardPlacement *) linitial(placementList); - workerNode = FindWorkerNode(placement->nodeName, placement->nodePort); + ShardPlacement *placement = (ShardPlacement *) linitial(placementList); + WorkerNode *workerNode = FindWorkerNode(placement->nodeName, placement->nodePort); if (workerNode == NULL || !workerNode->hasMetadata || !workerNode->metadataSynced) { ereport(DEBUG1, (errmsg("there is no worker node with metadata"))); @@ -165,7 +153,7 @@ CallFuncExprRemotely(CallStmt *callStmt, DistObjectCacheEntry *procedure, ereport(DEBUG1, (errmsg("pushing down the procedure"))); /* build remote command with fully qualified names */ - callCommand = makeStringInfo(); + StringInfo callCommand = makeStringInfo(); appendStringInfo(callCommand, "CALL %s", pg_get_rule_expr((Node *) funcExpr)); { diff --git a/src/backend/distributed/commands/cluster.c b/src/backend/distributed/commands/cluster.c index c2d15f6db..a7def3c48 100644 --- a/src/backend/distributed/commands/cluster.c +++ b/src/backend/distributed/commands/cluster.c @@ -28,11 +28,10 @@ PlanClusterStmt(ClusterStmt *clusterStmt, const char *clusterCommand) } else { - Oid relationId = InvalidOid; bool missingOK = false; - relationId = RangeVarGetRelid(clusterStmt->relation, AccessShareLock, - missingOK); + Oid relationId = RangeVarGetRelid(clusterStmt->relation, AccessShareLock, + missingOK); if (OidIsValid(relationId)) { diff --git a/src/backend/distributed/commands/create_distributed_table.c b/src/backend/distributed/commands/create_distributed_table.c index d879ecde8..fa9620acf 100644 --- a/src/backend/distributed/commands/create_distributed_table.c +++ b/src/backend/distributed/commands/create_distributed_table.c @@ -126,14 +126,10 @@ master_create_distributed_table(PG_FUNCTION_ARGS) text *distributionColumnText = PG_GETARG_TEXT_P(1); Oid distributionMethodOid = PG_GETARG_OID(2); - char *distributionColumnName = NULL; - Var *distributionColumn = NULL; - char distributionMethod = 0; char *colocateWithTableName = NULL; bool viaDeprecatedAPI = true; ObjectAddress tableAddress = { 0 }; - Relation relation = NULL; CheckCitusVersion(ERROR); EnsureCoordinator(); @@ -153,7 +149,7 @@ master_create_distributed_table(PG_FUNCTION_ARGS) * sense of this table until we've committed, and we don't want multiple * backends manipulating this relation. */ - relation = try_relation_open(relationId, ExclusiveLock); + Relation relation = try_relation_open(relationId, ExclusiveLock); if (relation == NULL) { @@ -168,10 +164,10 @@ master_create_distributed_table(PG_FUNCTION_ARGS) */ EnsureRelationKindSupported(relationId); - distributionColumnName = text_to_cstring(distributionColumnText); - distributionColumn = BuildDistributionKeyFromColumnName(relation, - distributionColumnName); - distributionMethod = LookupDistributionMethod(distributionMethodOid); + char *distributionColumnName = text_to_cstring(distributionColumnText); + Var *distributionColumn = BuildDistributionKeyFromColumnName(relation, + distributionColumnName); + char distributionMethod = LookupDistributionMethod(distributionMethodOid); CreateDistributedTable(relationId, distributionColumn, distributionMethod, colocateWithTableName, viaDeprecatedAPI); @@ -190,28 +186,18 @@ master_create_distributed_table(PG_FUNCTION_ARGS) Datum create_distributed_table(PG_FUNCTION_ARGS) { - Oid relationId = InvalidOid; - text *distributionColumnText = NULL; - Oid distributionMethodOid = InvalidOid; - text *colocateWithTableNameText = NULL; ObjectAddress tableAddress = { 0 }; - Relation relation = NULL; - char *distributionColumnName = NULL; - Var *distributionColumn = NULL; - char distributionMethod = 0; - - char *colocateWithTableName = NULL; bool viaDeprecatedAPI = false; CheckCitusVersion(ERROR); EnsureCoordinator(); - relationId = PG_GETARG_OID(0); - distributionColumnText = PG_GETARG_TEXT_P(1); - distributionMethodOid = PG_GETARG_OID(2); - colocateWithTableNameText = PG_GETARG_TEXT_P(3); + Oid relationId = PG_GETARG_OID(0); + text *distributionColumnText = PG_GETARG_TEXT_P(1); + Oid distributionMethodOid = PG_GETARG_OID(2); + text *colocateWithTableNameText = PG_GETARG_TEXT_P(3); EnsureTableOwner(relationId); @@ -229,7 +215,7 @@ create_distributed_table(PG_FUNCTION_ARGS) * sense of this table until we've committed, and we don't want multiple * backends manipulating this relation. */ - relation = try_relation_open(relationId, ExclusiveLock); + Relation relation = try_relation_open(relationId, ExclusiveLock); if (relation == NULL) { @@ -244,12 +230,12 @@ create_distributed_table(PG_FUNCTION_ARGS) */ EnsureRelationKindSupported(relationId); - distributionColumnName = text_to_cstring(distributionColumnText); - distributionColumn = BuildDistributionKeyFromColumnName(relation, - distributionColumnName); - distributionMethod = LookupDistributionMethod(distributionMethodOid); + char *distributionColumnName = text_to_cstring(distributionColumnText); + Var *distributionColumn = BuildDistributionKeyFromColumnName(relation, + distributionColumnName); + char distributionMethod = LookupDistributionMethod(distributionMethodOid); - colocateWithTableName = text_to_cstring(colocateWithTableNameText); + char *colocateWithTableName = text_to_cstring(colocateWithTableNameText); CreateDistributedTable(relationId, distributionColumn, distributionMethod, colocateWithTableName, viaDeprecatedAPI); @@ -270,10 +256,7 @@ create_reference_table(PG_FUNCTION_ARGS) { Oid relationId = PG_GETARG_OID(0); - Relation relation = NULL; char *colocateWithTableName = NULL; - List *workerNodeList = NIL; - int workerCount = 0; Var *distributionColumn = NULL; ObjectAddress tableAddress = { 0 }; @@ -297,7 +280,7 @@ create_reference_table(PG_FUNCTION_ARGS) * sense of this table until we've committed, and we don't want multiple * backends manipulating this relation. */ - relation = relation_open(relationId, ExclusiveLock); + Relation relation = relation_open(relationId, ExclusiveLock); /* * We should do this check here since the codes in the following lines rely @@ -306,8 +289,8 @@ create_reference_table(PG_FUNCTION_ARGS) */ EnsureRelationKindSupported(relationId); - workerNodeList = ActivePrimaryNodeList(ShareLock); - workerCount = list_length(workerNodeList); + List *workerNodeList = ActivePrimaryNodeList(ShareLock); + int workerCount = list_length(workerNodeList); /* if there are no workers, error out */ if (workerCount == 0) @@ -344,27 +327,24 @@ void CreateDistributedTable(Oid relationId, Var *distributionColumn, char distributionMethod, char *colocateWithTableName, bool viaDeprecatedAPI) { - char replicationModel = REPLICATION_MODEL_INVALID; - uint32 colocationId = INVALID_COLOCATION_ID; - Oid colocatedTableId = InvalidOid; - bool localTableEmpty = false; - - replicationModel = AppropriateReplicationModel(distributionMethod, viaDeprecatedAPI); + char replicationModel = AppropriateReplicationModel(distributionMethod, + viaDeprecatedAPI); /* * ColocationIdForNewTable assumes caller acquires lock on relationId. In our case, * our caller already acquired lock on relationId. */ - colocationId = ColocationIdForNewTable(relationId, distributionColumn, - distributionMethod, replicationModel, - colocateWithTableName, viaDeprecatedAPI); + uint32 colocationId = ColocationIdForNewTable(relationId, distributionColumn, + distributionMethod, replicationModel, + colocateWithTableName, + viaDeprecatedAPI); EnsureRelationCanBeDistributed(relationId, distributionColumn, distributionMethod, colocationId, replicationModel, viaDeprecatedAPI); /* we need to calculate these variables before creating distributed metadata */ - localTableEmpty = LocalTableEmpty(relationId); - colocatedTableId = ColocatedTableId(colocationId); + bool localTableEmpty = LocalTableEmpty(relationId); + Oid colocatedTableId = ColocatedTableId(colocationId); /* create an entry for distributed table in pg_dist_partition */ InsertIntoPgDistPartition(relationId, distributionMethod, distributionColumn, @@ -642,9 +622,6 @@ EnsureRelationCanBeDistributed(Oid relationId, Var *distributionColumn, char distributionMethod, uint32 colocationId, char replicationModel, bool viaDeprecatedAPI) { - Relation relation = NULL; - TupleDesc relationDesc = NULL; - char *relationName = NULL; Oid parentRelationId = InvalidOid; EnsureTableNotDistributed(relationId); @@ -652,9 +629,9 @@ EnsureRelationCanBeDistributed(Oid relationId, Var *distributionColumn, EnsureReplicationSettings(InvalidOid, replicationModel); /* we assume callers took necessary locks */ - relation = relation_open(relationId, NoLock); - relationDesc = RelationGetDescr(relation); - relationName = RelationGetRelationName(relation); + Relation relation = relation_open(relationId, NoLock); + TupleDesc relationDesc = RelationGetDescr(relation); + char *relationName = RelationGetRelationName(relation); if (!RelationUsesHeapAccessMethodOrNone(relation)) { @@ -805,7 +782,6 @@ EnsureTableCanBeColocatedWith(Oid relationId, char replicationModel, char sourceDistributionMethod = sourceTableEntry->partitionMethod; char sourceReplicationModel = sourceTableEntry->replicationModel; Var *sourceDistributionColumn = DistPartitionKey(sourceRelationId); - Oid sourceDistributionColumnType = InvalidOid; if (sourceDistributionMethod != DISTRIBUTE_BY_HASH) { @@ -826,7 +802,7 @@ EnsureTableCanBeColocatedWith(Oid relationId, char replicationModel, sourceRelationName, relationName))); } - sourceDistributionColumnType = sourceDistributionColumn->vartype; + Oid sourceDistributionColumnType = sourceDistributionColumn->vartype; if (sourceDistributionColumnType != distributionColumnType) { char *relationName = get_rel_name(relationId); @@ -898,9 +874,8 @@ static void EnsureTableNotDistributed(Oid relationId) { char *relationName = get_rel_name(relationId); - bool isDistributedTable = false; - isDistributedTable = IsDistributedTable(relationId); + bool isDistributedTable = IsDistributedTable(relationId); if (isDistributedTable) { @@ -949,20 +924,18 @@ EnsureReplicationSettings(Oid relationId, char replicationModel) static char LookupDistributionMethod(Oid distributionMethodOid) { - HeapTuple enumTuple = NULL; - Form_pg_enum enumForm = NULL; char distributionMethod = 0; - const char *enumLabel = NULL; - enumTuple = SearchSysCache1(ENUMOID, ObjectIdGetDatum(distributionMethodOid)); + HeapTuple enumTuple = SearchSysCache1(ENUMOID, ObjectIdGetDatum( + distributionMethodOid)); if (!HeapTupleIsValid(enumTuple)) { ereport(ERROR, (errmsg("invalid internal value for enum: %u", distributionMethodOid))); } - enumForm = (Form_pg_enum) GETSTRUCT(enumTuple); - enumLabel = NameStr(enumForm->enumlabel); + Form_pg_enum enumForm = (Form_pg_enum) GETSTRUCT(enumTuple); + const char *enumLabel = NameStr(enumForm->enumlabel); if (strncmp(enumLabel, "append", NAMEDATALEN) == 0) { @@ -997,9 +970,6 @@ static Oid SupportFunctionForColumn(Var *partitionColumn, Oid accessMethodId, int16 supportFunctionNumber) { - Oid operatorFamilyId = InvalidOid; - Oid supportFunctionOid = InvalidOid; - Oid operatorClassInputType = InvalidOid; Oid columnOid = partitionColumn->vartype; Oid operatorClassId = GetDefaultOpClass(columnOid, accessMethodId); @@ -1014,11 +984,11 @@ SupportFunctionForColumn(Var *partitionColumn, Oid accessMethodId, " class defined."))); } - operatorFamilyId = get_opclass_family(operatorClassId); - operatorClassInputType = get_opclass_input_type(operatorClassId); - supportFunctionOid = get_opfamily_proc(operatorFamilyId, operatorClassInputType, - operatorClassInputType, - supportFunctionNumber); + Oid operatorFamilyId = get_opclass_family(operatorClassId); + Oid operatorClassInputType = get_opclass_input_type(operatorClassId); + Oid supportFunctionOid = get_opfamily_proc(operatorFamilyId, operatorClassInputType, + operatorClassInputType, + supportFunctionNumber); return supportFunctionOid; } @@ -1037,13 +1007,8 @@ LocalTableEmpty(Oid tableId) char *tableName = get_rel_name(tableId); char *tableQualifiedName = quote_qualified_identifier(schemaName, tableName); - int spiConnectionResult = 0; - int spiQueryResult = 0; StringInfo selectExistQueryString = makeStringInfo(); - HeapTuple tuple = NULL; - Datum hasDataDatum = 0; - bool localTableEmpty = false; bool columnNull = false; bool readOnly = true; @@ -1052,7 +1017,7 @@ LocalTableEmpty(Oid tableId) AssertArg(!IsDistributedTable(tableId)); - spiConnectionResult = SPI_connect(); + int spiConnectionResult = SPI_connect(); if (spiConnectionResult != SPI_OK_CONNECT) { ereport(ERROR, (errmsg("could not connect to SPI manager"))); @@ -1060,7 +1025,7 @@ LocalTableEmpty(Oid tableId) appendStringInfo(selectExistQueryString, SELECT_EXIST_QUERY, tableQualifiedName); - spiQueryResult = SPI_execute(selectExistQueryString->data, readOnly, 0); + int spiQueryResult = SPI_execute(selectExistQueryString->data, readOnly, 0); if (spiQueryResult != SPI_OK_SELECT) { ereport(ERROR, (errmsg("execution was not successful \"%s\"", @@ -1070,9 +1035,10 @@ LocalTableEmpty(Oid tableId) /* we expect that SELECT EXISTS query will return single value in a single row */ Assert(SPI_processed == 1); - tuple = SPI_tuptable->vals[rowId]; - hasDataDatum = SPI_getbinval(tuple, SPI_tuptable->tupdesc, attributeId, &columnNull); - localTableEmpty = !DatumGetBool(hasDataDatum); + HeapTuple tuple = SPI_tuptable->vals[rowId]; + Datum hasDataDatum = SPI_getbinval(tuple, SPI_tuptable->tupdesc, attributeId, + &columnNull); + bool localTableEmpty = !DatumGetBool(hasDataDatum); SPI_finish(); @@ -1145,13 +1111,12 @@ CanUseExclusiveConnections(Oid relationId, bool localTableEmpty) void CreateTruncateTrigger(Oid relationId) { - CreateTrigStmt *trigger = NULL; StringInfo triggerName = makeStringInfo(); bool internal = true; appendStringInfo(triggerName, "truncate_trigger"); - trigger = makeNode(CreateTrigStmt); + CreateTrigStmt *trigger = makeNode(CreateTrigStmt); trigger->trigname = triggerName->data; trigger->relation = NULL; trigger->funcname = SystemFuncName("citus_truncate_trigger"); @@ -1232,9 +1197,7 @@ CopyLocalDataIntoShards(Oid distributedRelationId) HeapScanDesc scan = NULL; #endif HeapTuple tuple = NULL; - ExprContext *econtext = NULL; MemoryContext oldContext = NULL; - TupleTableSlot *slot = NULL; uint64 rowsCopied = 0; /* take an ExclusiveLock to block all operations except SELECT */ @@ -1264,7 +1227,8 @@ CopyLocalDataIntoShards(Oid distributedRelationId) /* get the table columns */ tupleDescriptor = RelationGetDescr(distributedRelation); - slot = MakeSingleTupleTableSlotCompat(tupleDescriptor, &TTSOpsHeapTuple); + TupleTableSlot *slot = MakeSingleTupleTableSlotCompat(tupleDescriptor, + &TTSOpsHeapTuple); columnNameList = TupleDescColumnNameList(tupleDescriptor); /* determine the partition column in the tuple descriptor */ @@ -1276,7 +1240,7 @@ CopyLocalDataIntoShards(Oid distributedRelationId) /* initialise per-tuple memory context */ estate = CreateExecutorState(); - econtext = GetPerTupleExprContext(estate); + ExprContext *econtext = GetPerTupleExprContext(estate); econtext->ecxt_scantuple = slot; copyDest = @@ -1362,9 +1326,8 @@ static List * TupleDescColumnNameList(TupleDesc tupleDescriptor) { List *columnNameList = NIL; - int columnIndex = 0; - for (columnIndex = 0; columnIndex < tupleDescriptor->natts; columnIndex++) + for (int columnIndex = 0; columnIndex < tupleDescriptor->natts; columnIndex++) { Form_pg_attribute currentColumn = TupleDescAttr(tupleDescriptor, columnIndex); char *columnName = NameStr(currentColumn->attname); @@ -1392,9 +1355,7 @@ TupleDescColumnNameList(TupleDesc tupleDescriptor) static bool RelationUsesIdentityColumns(TupleDesc relationDesc) { - int attributeIndex = 0; - - for (attributeIndex = 0; attributeIndex < relationDesc->natts; attributeIndex++) + for (int attributeIndex = 0; attributeIndex < relationDesc->natts; attributeIndex++) { Form_pg_attribute attributeForm = TupleDescAttr(relationDesc, attributeIndex); diff --git a/src/backend/distributed/commands/dependencies.c b/src/backend/distributed/commands/dependencies.c index aea39efd5..8794a425e 100644 --- a/src/backend/distributed/commands/dependencies.c +++ b/src/backend/distributed/commands/dependencies.c @@ -50,7 +50,6 @@ void EnsureDependenciesExistsOnAllNodes(const ObjectAddress *target) { /* local variables to work with dependencies */ - List *dependencies = NIL; List *dependenciesWithCommands = NIL; ListCell *dependencyCell = NULL; @@ -58,13 +57,12 @@ EnsureDependenciesExistsOnAllNodes(const ObjectAddress *target) List *ddlCommands = NULL; /* local variables to work with worker nodes */ - List *workerNodeList = NULL; ListCell *workerNodeCell = NULL; /* * collect all dependencies in creation order and get their ddl commands */ - dependencies = GetDependenciesForObject(target); + List *dependencies = GetDependenciesForObject(target); foreach(dependencyCell, dependencies) { ObjectAddress *dependency = (ObjectAddress *) lfirst(dependencyCell); @@ -94,7 +92,7 @@ EnsureDependenciesExistsOnAllNodes(const ObjectAddress *target) * either get it now, or get it in master_add_node after this transaction finishes and * the pg_dist_object record becomes visible. */ - workerNodeList = ActivePrimaryWorkerNodeList(RowShareLock); + List *workerNodeList = ActivePrimaryWorkerNodeList(RowShareLock); /* * right after we acquired the lock we mark our objects as distributed, these changes @@ -216,13 +214,12 @@ void ReplicateAllDependenciesToNode(const char *nodeName, int nodePort) { ListCell *dependencyCell = NULL; - List *dependencies = NIL; List *ddlCommands = NIL; /* * collect all dependencies in creation order and get their ddl commands */ - dependencies = GetDistributedObjectAddressList(); + List *dependencies = GetDistributedObjectAddressList(); /* * Depending on changes in the environment, such as the enable_object_propagation guc diff --git a/src/backend/distributed/commands/drop_distributed_table.c b/src/backend/distributed/commands/drop_distributed_table.c index c35738c8c..736bc060a 100644 --- a/src/backend/distributed/commands/drop_distributed_table.c +++ b/src/backend/distributed/commands/drop_distributed_table.c @@ -126,8 +126,6 @@ static void MasterRemoveDistributedTableMetadataFromWorkers(Oid relationId, char *schemaName, char *tableName) { - char *deleteDistributionCommand = NULL; - /* * The SQL_DROP trigger calls this function even for tables that are * not distributed. In that case, silently ignore. This is not very @@ -147,6 +145,6 @@ MasterRemoveDistributedTableMetadataFromWorkers(Oid relationId, char *schemaName } /* drop the distributed table metadata on the workers */ - deleteDistributionCommand = DistributionDeleteCommand(schemaName, tableName); + char *deleteDistributionCommand = DistributionDeleteCommand(schemaName, tableName); SendCommandToWorkers(WORKERS_WITH_METADATA, deleteDistributionCommand); } diff --git a/src/backend/distributed/commands/extension.c b/src/backend/distributed/commands/extension.c index ada8ebb59..619c5845d 100644 --- a/src/backend/distributed/commands/extension.c +++ b/src/backend/distributed/commands/extension.c @@ -82,8 +82,6 @@ ErrorIfUnstableCreateOrAlterExtensionStmt(Node *parseTree) static char * ExtractNewExtensionVersion(Node *parseTree) { - Value *newVersionValue = NULL; - List *optionsList = NIL; if (IsA(parseTree, CreateExtensionStmt)) @@ -100,7 +98,7 @@ ExtractNewExtensionVersion(Node *parseTree) Assert(false); } - newVersionValue = GetExtensionOption(optionsList, "new_version"); + Value *newVersionValue = GetExtensionOption(optionsList, "new_version"); /* return target string safely */ if (newVersionValue) @@ -126,9 +124,6 @@ ExtractNewExtensionVersion(Node *parseTree) List * PlanCreateExtensionStmt(CreateExtensionStmt *createExtensionStmt, const char *queryString) { - List *commands = NIL; - const char *createExtensionStmtSql = NULL; - if (!ShouldPropagateExtensionCommand((Node *) createExtensionStmt)) { return NIL; @@ -168,15 +163,15 @@ PlanCreateExtensionStmt(CreateExtensionStmt *createExtensionStmt, const char *qu */ AddSchemaFieldIfMissing(createExtensionStmt); - createExtensionStmtSql = DeparseTreeNode((Node *) createExtensionStmt); + const char *createExtensionStmtSql = DeparseTreeNode((Node *) createExtensionStmt); /* * To prevent recursive propagation in mx architecture, we disable ddl * propagation before sending the command to workers. */ - commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) createExtensionStmtSql, - ENABLE_DDL_PROPAGATION); + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) createExtensionStmtSql, + ENABLE_DDL_PROPAGATION); return NodeDDLTaskList(ALL_WORKERS, commands); } @@ -229,8 +224,6 @@ void ProcessCreateExtensionStmt(CreateExtensionStmt *createExtensionStmt, const char *queryString) { - const ObjectAddress *extensionAddress = NULL; - if (!ShouldPropagateExtensionCommand((Node *) createExtensionStmt)) { return; @@ -246,7 +239,8 @@ ProcessCreateExtensionStmt(CreateExtensionStmt *createExtensionStmt, const return; } - extensionAddress = GetObjectAddressFromParseTree((Node *) createExtensionStmt, false); + const ObjectAddress *extensionAddress = GetObjectAddressFromParseTree( + (Node *) createExtensionStmt, false); EnsureDependenciesExistsOnAllNodes(extensionAddress); @@ -267,11 +261,6 @@ PlanDropExtensionStmt(DropStmt *dropStmt, const char *queryString) { List *allDroppedExtensions = dropStmt->objects; - List *distributedExtensions = NIL; - List *distributedExtensionAddresses = NIL; - - List *commands = NIL; - const char *deparsedStmt = NULL; ListCell *addressCell = NULL; @@ -281,7 +270,7 @@ PlanDropExtensionStmt(DropStmt *dropStmt, const char *queryString) } /* get distributed extensions to be dropped in worker nodes as well */ - distributedExtensions = FilterDistributedExtensions(allDroppedExtensions); + List *distributedExtensions = FilterDistributedExtensions(allDroppedExtensions); if (list_length(distributedExtensions) <= 0) { @@ -308,7 +297,7 @@ PlanDropExtensionStmt(DropStmt *dropStmt, const char *queryString) */ EnsureSequentialModeForExtensionDDL(); - distributedExtensionAddresses = ExtensionNameListToObjectAddressList( + List *distributedExtensionAddresses = ExtensionNameListToObjectAddressList( distributedExtensions); /* unmark each distributed extension */ @@ -326,7 +315,7 @@ PlanDropExtensionStmt(DropStmt *dropStmt, const char *queryString) * its execution. */ dropStmt->objects = distributedExtensions; - deparsedStmt = DeparseTreeNode((Node *) dropStmt); + const char *deparsedStmt = DeparseTreeNode((Node *) dropStmt); dropStmt->objects = allDroppedExtensions; @@ -334,9 +323,9 @@ PlanDropExtensionStmt(DropStmt *dropStmt, const char *queryString) * To prevent recursive propagation in mx architecture, we disable ddl * propagation before sending the command to workers. */ - commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) deparsedStmt, - ENABLE_DDL_PROPAGATION); + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) deparsedStmt, + ENABLE_DDL_PROPAGATION); return NodeDDLTaskList(ALL_WORKERS, commands); } @@ -425,9 +414,6 @@ List * PlanAlterExtensionSchemaStmt(AlterObjectSchemaStmt *alterExtensionStmt, const char *queryString) { - const char *alterExtensionStmtSql = NULL; - List *commands = NIL; - if (!ShouldPropagateExtensionCommand((Node *) alterExtensionStmt)) { return NIL; @@ -451,15 +437,15 @@ PlanAlterExtensionSchemaStmt(AlterObjectSchemaStmt *alterExtensionStmt, const */ EnsureSequentialModeForExtensionDDL(); - alterExtensionStmtSql = DeparseTreeNode((Node *) alterExtensionStmt); + const char *alterExtensionStmtSql = DeparseTreeNode((Node *) alterExtensionStmt); /* * To prevent recursive propagation in mx architecture, we disable ddl * propagation before sending the command to workers. */ - commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) alterExtensionStmtSql, - ENABLE_DDL_PROPAGATION); + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) alterExtensionStmtSql, + ENABLE_DDL_PROPAGATION); return NodeDDLTaskList(ALL_WORKERS, commands); } @@ -474,9 +460,8 @@ void ProcessAlterExtensionSchemaStmt(AlterObjectSchemaStmt *alterExtensionStmt, const char *queryString) { - const ObjectAddress *extensionAddress = NULL; - - extensionAddress = GetObjectAddressFromParseTree((Node *) alterExtensionStmt, false); + const ObjectAddress *extensionAddress = GetObjectAddressFromParseTree( + (Node *) alterExtensionStmt, false); if (!ShouldPropagateExtensionCommand((Node *) alterExtensionStmt)) { @@ -495,9 +480,6 @@ List * PlanAlterExtensionUpdateStmt(AlterExtensionStmt *alterExtensionStmt, const char *queryString) { - const char *alterExtensionStmtSql = NULL; - List *commands = NIL; - if (!ShouldPropagateExtensionCommand((Node *) alterExtensionStmt)) { return NIL; @@ -522,15 +504,15 @@ PlanAlterExtensionUpdateStmt(AlterExtensionStmt *alterExtensionStmt, const */ EnsureSequentialModeForExtensionDDL(); - alterExtensionStmtSql = DeparseTreeNode((Node *) alterExtensionStmt); + const char *alterExtensionStmtSql = DeparseTreeNode((Node *) alterExtensionStmt); /* * To prevent recursive propagation in mx architecture, we disable ddl * propagation before sending the command to workers. */ - commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) alterExtensionStmtSql, - ENABLE_DDL_PROPAGATION); + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) alterExtensionStmtSql, + ENABLE_DDL_PROPAGATION); return NodeDDLTaskList(ALL_WORKERS, commands); } @@ -711,18 +693,13 @@ IsAlterExtensionSetSchemaCitus(Node *parseTree) List * CreateExtensionDDLCommand(const ObjectAddress *extensionAddress) { - List *ddlCommands = NIL; - const char *ddlCommand = NULL; - - Node *stmt = NULL; - /* generate a statement for creation of the extension in "if not exists" construct */ - stmt = RecreateExtensionStmt(extensionAddress->objectId); + Node *stmt = RecreateExtensionStmt(extensionAddress->objectId); /* capture ddl command for the create statement */ - ddlCommand = DeparseTreeNode(stmt); + const char *ddlCommand = DeparseTreeNode(stmt); - ddlCommands = list_make1((void *) ddlCommand); + List *ddlCommands = list_make1((void *) ddlCommand); return ddlCommands; } @@ -747,26 +724,22 @@ RecreateExtensionStmt(Oid extensionOid) } /* schema DefElement related variables */ - Oid extensionSchemaOid = InvalidOid; - char *extensionSchemaName = NULL; - Node *schemaNameArg = NULL; /* set location to -1 as it is unknown */ int location = -1; - DefElem *schemaDefElement = NULL; /* set extension name and if_not_exists fields */ createExtensionStmt->extname = extensionName; createExtensionStmt->if_not_exists = true; /* get schema name that extension was created on */ - extensionSchemaOid = get_extension_schema(extensionOid); - extensionSchemaName = get_namespace_name(extensionSchemaOid); + Oid extensionSchemaOid = get_extension_schema(extensionOid); + char *extensionSchemaName = get_namespace_name(extensionSchemaOid); /* make DefEleme for extensionSchemaName */ - schemaNameArg = (Node *) makeString(extensionSchemaName); + Node *schemaNameArg = (Node *) makeString(extensionSchemaName); - schemaDefElement = makeDefElem("schema", schemaNameArg, location); + DefElem *schemaDefElement = makeDefElem("schema", schemaNameArg, location); /* append the schema name DefElem finally */ createExtensionStmt->options = lappend(createExtensionStmt->options, @@ -784,15 +757,11 @@ ObjectAddress * AlterExtensionSchemaStmtObjectAddress(AlterObjectSchemaStmt *alterExtensionSchemaStmt, bool missing_ok) { - ObjectAddress *extensionAddress = NULL; - Oid extensionOid = InvalidOid; - const char *extensionName = NULL; - Assert(alterExtensionSchemaStmt->objectType == OBJECT_EXTENSION); - extensionName = strVal(alterExtensionSchemaStmt->object); + const char *extensionName = strVal(alterExtensionSchemaStmt->object); - extensionOid = get_extension_oid(extensionName, missing_ok); + Oid extensionOid = get_extension_oid(extensionName, missing_ok); if (extensionOid == InvalidOid) { @@ -801,7 +770,7 @@ AlterExtensionSchemaStmtObjectAddress(AlterObjectSchemaStmt *alterExtensionSchem extensionName))); } - extensionAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddress *extensionAddress = palloc0(sizeof(ObjectAddress)); ObjectAddressSet(*extensionAddress, ExtensionRelationId, extensionOid); return extensionAddress; @@ -816,13 +785,9 @@ ObjectAddress * AlterExtensionUpdateStmtObjectAddress(AlterExtensionStmt *alterExtensionStmt, bool missing_ok) { - ObjectAddress *extensionAddress = NULL; - Oid extensionOid = InvalidOid; - const char *extensionName = NULL; + const char *extensionName = alterExtensionStmt->extname; - extensionName = alterExtensionStmt->extname; - - extensionOid = get_extension_oid(extensionName, missing_ok); + Oid extensionOid = get_extension_oid(extensionName, missing_ok); if (extensionOid == InvalidOid) { @@ -831,7 +796,7 @@ AlterExtensionUpdateStmtObjectAddress(AlterExtensionStmt *alterExtensionStmt, extensionName))); } - extensionAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddress *extensionAddress = palloc0(sizeof(ObjectAddress)); ObjectAddressSet(*extensionAddress, ExtensionRelationId, extensionOid); return extensionAddress; diff --git a/src/backend/distributed/commands/foreign_constraint.c b/src/backend/distributed/commands/foreign_constraint.c index 523566bc7..6d6abe07c 100644 --- a/src/backend/distributed/commands/foreign_constraint.c +++ b/src/backend/distributed/commands/foreign_constraint.c @@ -49,25 +49,21 @@ static void ForeignConstraintFindDistKeys(HeapTuple pgConstraintTuple, bool ConstraintIsAForeignKeyToReferenceTable(char *constraintName, Oid relationId) { - Relation pgConstraint = NULL; - SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 1; - HeapTuple heapTuple = NULL; bool foreignKeyToReferenceTable = false; - pgConstraint = heap_open(ConstraintRelationId, AccessShareLock); + Relation pgConstraint = heap_open(ConstraintRelationId, AccessShareLock); ScanKeyInit(&scanKey[0], Anum_pg_constraint_contype, BTEqualStrategyNumber, F_CHAREQ, CharGetDatum(CONSTRAINT_FOREIGN)); - scanDescriptor = systable_beginscan(pgConstraint, InvalidOid, false, - NULL, scanKeyCount, scanKey); + SysScanDesc scanDescriptor = systable_beginscan(pgConstraint, InvalidOid, false, + NULL, scanKeyCount, scanKey); - heapTuple = systable_getnext(scanDescriptor); + HeapTuple heapTuple = systable_getnext(scanDescriptor); while (HeapTupleIsValid(heapTuple)) { - Oid referencedTableId = InvalidOid; Form_pg_constraint constraintForm = (Form_pg_constraint) GETSTRUCT(heapTuple); char *tupleConstraintName = (constraintForm->conname).data; @@ -78,7 +74,7 @@ ConstraintIsAForeignKeyToReferenceTable(char *constraintName, Oid relationId) continue; } - referencedTableId = constraintForm->confrelid; + Oid referencedTableId = constraintForm->confrelid; Assert(IsDistributedTable(referencedTableId)); @@ -122,11 +118,8 @@ ErrorIfUnsupportedForeignConstraintExists(Relation relation, char referencingDis Var *referencingDistKey, uint32 referencingColocationId) { - Relation pgConstraint = NULL; - SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 1; - HeapTuple heapTuple = NULL; Oid referencingTableId = relation->rd_id; Oid referencedTableId = InvalidOid; @@ -145,26 +138,22 @@ ErrorIfUnsupportedForeignConstraintExists(Relation relation, char referencingDis referencingNotReplicated = (ShardReplicationFactor == 1); } - pgConstraint = heap_open(ConstraintRelationId, AccessShareLock); + Relation pgConstraint = heap_open(ConstraintRelationId, AccessShareLock); ScanKeyInit(&scanKey[0], Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ, relation->rd_id); - scanDescriptor = systable_beginscan(pgConstraint, ConstraintRelidTypidNameIndexId, - true, NULL, - scanKeyCount, scanKey); + SysScanDesc scanDescriptor = systable_beginscan(pgConstraint, + ConstraintRelidTypidNameIndexId, + true, NULL, + scanKeyCount, scanKey); - heapTuple = systable_getnext(scanDescriptor); + HeapTuple heapTuple = systable_getnext(scanDescriptor); while (HeapTupleIsValid(heapTuple)) { Form_pg_constraint constraintForm = (Form_pg_constraint) GETSTRUCT(heapTuple); - bool referencedIsDistributed = false; char referencedDistMethod = 0; Var *referencedDistKey = NULL; - bool referencingIsReferenceTable = false; - bool referencedIsReferenceTable = false; int referencingAttrIndex = -1; int referencedAttrIndex = -1; - bool referencingColumnsIncludeDistKey = false; - bool foreignConstraintOnDistKey = false; if (constraintForm->contype != CONSTRAINT_FOREIGN) { @@ -175,7 +164,7 @@ ErrorIfUnsupportedForeignConstraintExists(Relation relation, char referencingDis referencedTableId = constraintForm->confrelid; selfReferencingTable = (referencingTableId == referencedTableId); - referencedIsDistributed = IsDistributedTable(referencedTableId); + bool referencedIsDistributed = IsDistributedTable(referencedTableId); if (!referencedIsDistributed && !selfReferencingTable) { ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), @@ -199,8 +188,8 @@ ErrorIfUnsupportedForeignConstraintExists(Relation relation, char referencingDis referencedColocationId = referencingColocationId; } - referencingIsReferenceTable = (referencingDistMethod == DISTRIBUTE_BY_NONE); - referencedIsReferenceTable = (referencedDistMethod == DISTRIBUTE_BY_NONE); + bool referencingIsReferenceTable = (referencingDistMethod == DISTRIBUTE_BY_NONE); + bool referencedIsReferenceTable = (referencedDistMethod == DISTRIBUTE_BY_NONE); /* @@ -250,8 +239,8 @@ ErrorIfUnsupportedForeignConstraintExists(Relation relation, char referencingDis referencedDistKey, &referencingAttrIndex, &referencedAttrIndex); - referencingColumnsIncludeDistKey = (referencingAttrIndex != -1); - foreignConstraintOnDistKey = + bool referencingColumnsIncludeDistKey = (referencingAttrIndex != -1); + bool foreignConstraintOnDistKey = (referencingColumnsIncludeDistKey && referencingAttrIndex == referencedAttrIndex); @@ -353,14 +342,11 @@ ForeignConstraintFindDistKeys(HeapTuple pgConstraintTuple, int *referencingAttrIndex, int *referencedAttrIndex) { - Datum referencingColumnsDatum = 0; Datum *referencingColumnArray = NULL; int referencingColumnCount = 0; - Datum referencedColumnsDatum = 0; Datum *referencedColumnArray = NULL; int referencedColumnCount = 0; bool isNull = false; - int attrIdx = 0; *referencedAttrIndex = -1; *referencedAttrIndex = -1; @@ -371,10 +357,10 @@ ForeignConstraintFindDistKeys(HeapTuple pgConstraintTuple, * attributes together because partition column must be at the same place in both * referencing and referenced side of the foreign key constraint. */ - referencingColumnsDatum = SysCacheGetAttr(CONSTROID, pgConstraintTuple, - Anum_pg_constraint_conkey, &isNull); - referencedColumnsDatum = SysCacheGetAttr(CONSTROID, pgConstraintTuple, - Anum_pg_constraint_confkey, &isNull); + Datum referencingColumnsDatum = SysCacheGetAttr(CONSTROID, pgConstraintTuple, + Anum_pg_constraint_conkey, &isNull); + Datum referencedColumnsDatum = SysCacheGetAttr(CONSTROID, pgConstraintTuple, + Anum_pg_constraint_confkey, &isNull); deconstruct_array(DatumGetArrayTypeP(referencingColumnsDatum), INT2OID, 2, true, 's', &referencingColumnArray, NULL, &referencingColumnCount); @@ -383,7 +369,7 @@ ForeignConstraintFindDistKeys(HeapTuple pgConstraintTuple, Assert(referencingColumnCount == referencedColumnCount); - for (attrIdx = 0; attrIdx < referencingColumnCount; ++attrIdx) + for (int attrIdx = 0; attrIdx < referencingColumnCount; ++attrIdx) { AttrNumber referencingAttrNo = DatumGetInt16(referencingColumnArray[attrIdx]); AttrNumber referencedAttrNo = DatumGetInt16(referencedColumnArray[attrIdx]); @@ -412,31 +398,26 @@ ForeignConstraintFindDistKeys(HeapTuple pgConstraintTuple, bool ColumnAppearsInForeignKeyToReferenceTable(char *columnName, Oid relationId) { - Relation pgConstraint = NULL; - SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 1; - HeapTuple heapTuple = NULL; bool foreignKeyToReferenceTableIncludesGivenColumn = false; - pgConstraint = heap_open(ConstraintRelationId, AccessShareLock); + Relation pgConstraint = heap_open(ConstraintRelationId, AccessShareLock); ScanKeyInit(&scanKey[0], Anum_pg_constraint_contype, BTEqualStrategyNumber, F_CHAREQ, CharGetDatum(CONSTRAINT_FOREIGN)); - scanDescriptor = systable_beginscan(pgConstraint, InvalidOid, false, - NULL, scanKeyCount, scanKey); + SysScanDesc scanDescriptor = systable_beginscan(pgConstraint, InvalidOid, false, + NULL, scanKeyCount, scanKey); - heapTuple = systable_getnext(scanDescriptor); + HeapTuple heapTuple = systable_getnext(scanDescriptor); while (HeapTupleIsValid(heapTuple)) { - Oid referencedTableId = InvalidOid; - Oid referencingTableId = InvalidOid; int pgConstraintKey = 0; Form_pg_constraint constraintForm = (Form_pg_constraint) GETSTRUCT(heapTuple); - referencedTableId = constraintForm->confrelid; - referencingTableId = constraintForm->conrelid; + Oid referencedTableId = constraintForm->confrelid; + Oid referencingTableId = constraintForm->conrelid; if (referencedTableId == relationId) { @@ -493,11 +474,8 @@ GetTableForeignConstraintCommands(Oid relationId) { List *tableForeignConstraints = NIL; - Relation pgConstraint = NULL; - SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 1; - HeapTuple heapTuple = NULL; /* * Set search_path to NIL so that all objects outside of pg_catalog will be @@ -510,14 +488,15 @@ GetTableForeignConstraintCommands(Oid relationId) PushOverrideSearchPath(overridePath); /* open system catalog and scan all constraints that belong to this table */ - pgConstraint = heap_open(ConstraintRelationId, AccessShareLock); + Relation pgConstraint = heap_open(ConstraintRelationId, AccessShareLock); ScanKeyInit(&scanKey[0], Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ, relationId); - scanDescriptor = systable_beginscan(pgConstraint, ConstraintRelidTypidNameIndexId, - true, NULL, - scanKeyCount, scanKey); + SysScanDesc scanDescriptor = systable_beginscan(pgConstraint, + ConstraintRelidTypidNameIndexId, + true, NULL, + scanKeyCount, scanKey); - heapTuple = systable_getnext(scanDescriptor); + HeapTuple heapTuple = systable_getnext(scanDescriptor); while (HeapTupleIsValid(heapTuple)) { Form_pg_constraint constraintForm = (Form_pg_constraint) GETSTRUCT(heapTuple); @@ -556,24 +535,21 @@ GetTableForeignConstraintCommands(Oid relationId) bool HasForeignKeyToReferenceTable(Oid relationId) { - Relation pgConstraint = NULL; - SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 1; - HeapTuple heapTuple = NULL; bool hasForeignKeyToReferenceTable = false; - pgConstraint = heap_open(ConstraintRelationId, AccessShareLock); + Relation pgConstraint = heap_open(ConstraintRelationId, AccessShareLock); ScanKeyInit(&scanKey[0], Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ, relationId); - scanDescriptor = systable_beginscan(pgConstraint, ConstraintRelidTypidNameIndexId, - true, NULL, - scanKeyCount, scanKey); + SysScanDesc scanDescriptor = systable_beginscan(pgConstraint, + ConstraintRelidTypidNameIndexId, + true, NULL, + scanKeyCount, scanKey); - heapTuple = systable_getnext(scanDescriptor); + HeapTuple heapTuple = systable_getnext(scanDescriptor); while (HeapTupleIsValid(heapTuple)) { - Oid referencedTableId = InvalidOid; Form_pg_constraint constraintForm = (Form_pg_constraint) GETSTRUCT(heapTuple); if (constraintForm->contype != CONSTRAINT_FOREIGN) @@ -582,7 +558,7 @@ HasForeignKeyToReferenceTable(Oid relationId) continue; } - referencedTableId = constraintForm->confrelid; + Oid referencedTableId = constraintForm->confrelid; if (!IsDistributedTable(referencedTableId)) { @@ -615,22 +591,20 @@ HasForeignKeyToReferenceTable(Oid relationId) bool TableReferenced(Oid relationId) { - Relation pgConstraint = NULL; - HeapTuple heapTuple = NULL; - SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 1; Oid scanIndexId = InvalidOid; bool useIndex = false; - pgConstraint = heap_open(ConstraintRelationId, AccessShareLock); + Relation pgConstraint = heap_open(ConstraintRelationId, AccessShareLock); ScanKeyInit(&scanKey[0], Anum_pg_constraint_confrelid, BTEqualStrategyNumber, F_OIDEQ, relationId); - scanDescriptor = systable_beginscan(pgConstraint, scanIndexId, useIndex, NULL, - scanKeyCount, scanKey); + SysScanDesc scanDescriptor = systable_beginscan(pgConstraint, scanIndexId, useIndex, + NULL, + scanKeyCount, scanKey); - heapTuple = systable_getnext(scanDescriptor); + HeapTuple heapTuple = systable_getnext(scanDescriptor); while (HeapTupleIsValid(heapTuple)) { Form_pg_constraint constraintForm = (Form_pg_constraint) GETSTRUCT(heapTuple); @@ -661,17 +635,15 @@ static bool HeapTupleOfForeignConstraintIncludesColumn(HeapTuple heapTuple, Oid relationId, int pgConstraintKey, char *columnName) { - Datum columnsDatum = 0; Datum *columnArray = NULL; int columnCount = 0; - int attrIdx = 0; bool isNull = false; - columnsDatum = SysCacheGetAttr(CONSTROID, heapTuple, pgConstraintKey, &isNull); + Datum columnsDatum = SysCacheGetAttr(CONSTROID, heapTuple, pgConstraintKey, &isNull); deconstruct_array(DatumGetArrayTypeP(columnsDatum), INT2OID, 2, true, 's', &columnArray, NULL, &columnCount); - for (attrIdx = 0; attrIdx < columnCount; ++attrIdx) + for (int attrIdx = 0; attrIdx < columnCount; ++attrIdx) { AttrNumber attrNo = DatumGetInt16(columnArray[attrIdx]); @@ -696,22 +668,20 @@ HeapTupleOfForeignConstraintIncludesColumn(HeapTuple heapTuple, Oid relationId, bool TableReferencing(Oid relationId) { - Relation pgConstraint = NULL; - HeapTuple heapTuple = NULL; - SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 1; Oid scanIndexId = InvalidOid; bool useIndex = false; - pgConstraint = heap_open(ConstraintRelationId, AccessShareLock); + Relation pgConstraint = heap_open(ConstraintRelationId, AccessShareLock); ScanKeyInit(&scanKey[0], Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ, relationId); - scanDescriptor = systable_beginscan(pgConstraint, scanIndexId, useIndex, NULL, - scanKeyCount, scanKey); + SysScanDesc scanDescriptor = systable_beginscan(pgConstraint, scanIndexId, useIndex, + NULL, + scanKeyCount, scanKey); - heapTuple = systable_getnext(scanDescriptor); + HeapTuple heapTuple = systable_getnext(scanDescriptor); while (HeapTupleIsValid(heapTuple)) { Form_pg_constraint constraintForm = (Form_pg_constraint) GETSTRUCT(heapTuple); @@ -741,20 +711,17 @@ TableReferencing(Oid relationId) bool ConstraintIsAForeignKey(char *constraintNameInput, Oid relationId) { - Relation pgConstraint = NULL; - SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 1; - HeapTuple heapTuple = NULL; - pgConstraint = heap_open(ConstraintRelationId, AccessShareLock); + Relation pgConstraint = heap_open(ConstraintRelationId, AccessShareLock); ScanKeyInit(&scanKey[0], Anum_pg_constraint_contype, BTEqualStrategyNumber, F_CHAREQ, CharGetDatum(CONSTRAINT_FOREIGN)); - scanDescriptor = systable_beginscan(pgConstraint, InvalidOid, false, - NULL, scanKeyCount, scanKey); + SysScanDesc scanDescriptor = systable_beginscan(pgConstraint, InvalidOid, false, + NULL, scanKeyCount, scanKey); - heapTuple = systable_getnext(scanDescriptor); + HeapTuple heapTuple = systable_getnext(scanDescriptor); while (HeapTupleIsValid(heapTuple)) { Form_pg_constraint constraintForm = (Form_pg_constraint) GETSTRUCT(heapTuple); diff --git a/src/backend/distributed/commands/function.c b/src/backend/distributed/commands/function.c index cbebb132c..c920bab17 100644 --- a/src/backend/distributed/commands/function.c +++ b/src/backend/distributed/commands/function.c @@ -99,8 +99,6 @@ create_distributed_function(PG_FUNCTION_ARGS) text *colocateWithText = NULL; /* optional */ StringInfoData ddlCommand = { 0 }; - const char *createFunctionSQL = NULL; - const char *alterFunctionOwnerSQL = NULL; ObjectAddress functionAddress = { 0 }; int distributionArgumentIndex = -1; @@ -159,8 +157,8 @@ create_distributed_function(PG_FUNCTION_ARGS) EnsureDependenciesExistsOnAllNodes(&functionAddress); - createFunctionSQL = GetFunctionDDLCommand(funcOid, true); - alterFunctionOwnerSQL = GetFunctionAlterOwnerCommand(funcOid); + const char *createFunctionSQL = GetFunctionDDLCommand(funcOid, true); + const char *alterFunctionOwnerSQL = GetFunctionAlterOwnerCommand(funcOid); initStringInfo(&ddlCommand); appendStringInfo(&ddlCommand, "%s;%s", createFunctionSQL, alterFunctionOwnerSQL); SendCommandToWorkersAsUser(ALL_WORKERS, CurrentUserName(), ddlCommand.data); @@ -221,13 +219,10 @@ create_distributed_function(PG_FUNCTION_ARGS) List * CreateFunctionDDLCommandsIdempotent(const ObjectAddress *functionAddress) { - char *ddlCommand = NULL; - char *alterFunctionOwnerSQL = NULL; - Assert(functionAddress->classId == ProcedureRelationId); - ddlCommand = GetFunctionDDLCommand(functionAddress->objectId, true); - alterFunctionOwnerSQL = GetFunctionAlterOwnerCommand(functionAddress->objectId); + char *ddlCommand = GetFunctionDDLCommand(functionAddress->objectId, true); + char *alterFunctionOwnerSQL = GetFunctionAlterOwnerCommand(functionAddress->objectId); return list_make2(ddlCommand, alterFunctionOwnerSQL); } @@ -243,23 +238,20 @@ GetDistributionArgIndex(Oid functionOid, char *distributionArgumentName, { int distributionArgumentIndex = -1; - int numberOfArgs = 0; - int argIndex = 0; Oid *argTypes = NULL; char **argNames = NULL; char *argModes = NULL; - HeapTuple proctup = NULL; *distributionArgumentOid = InvalidOid; - proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(functionOid)); + HeapTuple proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(functionOid)); if (!HeapTupleIsValid(proctup)) { elog(ERROR, "cache lookup failed for function %u", functionOid); } - numberOfArgs = get_func_arg_info(proctup, &argTypes, &argNames, &argModes); + int numberOfArgs = get_func_arg_info(proctup, &argTypes, &argNames, &argModes); if (argumentStartsWith(distributionArgumentName, "$")) { @@ -301,7 +293,7 @@ GetDistributionArgIndex(Oid functionOid, char *distributionArgumentName, * So, loop over the arguments and try to find the argument name that matches * the parameter that user provided. */ - for (argIndex = 0; argIndex < numberOfArgs; ++argIndex) + for (int argIndex = 0; argIndex < numberOfArgs; ++argIndex) { char *argNameOnIndex = argNames != NULL ? argNames[argIndex] : NULL; @@ -352,8 +344,6 @@ GetFunctionColocationId(Oid functionOid, char *colocateWithTableName, if (pg_strncasecmp(colocateWithTableName, "default", NAMEDATALEN) == 0) { - Oid colocatedTableId = InvalidOid; - /* check for default colocation group */ colocationId = ColocationId(ShardCount, ShardReplicationFactor, distributionArgumentOid); @@ -369,7 +359,7 @@ GetFunctionColocationId(Oid functionOid, char *colocateWithTableName, "option to create_distributed_function()"))); } - colocatedTableId = ColocatedTableId(colocationId); + Oid colocatedTableId = ColocatedTableId(colocationId); if (colocatedTableId != InvalidOid) { EnsureFunctionCanBeColocatedWithTable(functionOid, distributionArgumentOid, @@ -415,7 +405,6 @@ EnsureFunctionCanBeColocatedWithTable(Oid functionOid, Oid distributionColumnTyp char sourceDistributionMethod = sourceTableEntry->partitionMethod; char sourceReplicationModel = sourceTableEntry->replicationModel; Var *sourceDistributionColumn = DistPartitionKey(sourceRelationId); - Oid sourceDistributionColumnType = InvalidOid; if (sourceDistributionMethod != DISTRIBUTE_BY_HASH) { @@ -447,13 +436,12 @@ EnsureFunctionCanBeColocatedWithTable(Oid functionOid, Oid distributionColumnTyp * If the types are the same, we're good. If not, we still check if there * is any coercion path between the types. */ - sourceDistributionColumnType = sourceDistributionColumn->vartype; + Oid sourceDistributionColumnType = sourceDistributionColumn->vartype; if (sourceDistributionColumnType != distributionColumnType) { Oid coercionFuncId = InvalidOid; - CoercionPathType coercionType = COERCION_PATH_NONE; - coercionType = + CoercionPathType coercionType = find_coercion_pathway(distributionColumnType, sourceDistributionColumnType, COERCION_EXPLICIT, &coercionFuncId); @@ -483,17 +471,13 @@ UpdateFunctionDistributionInfo(const ObjectAddress *distAddress, { const bool indexOK = true; - Relation pgDistObjectRel = NULL; - TupleDesc tupleDescriptor = NULL; ScanKeyData scanKey[3]; - SysScanDesc scanDescriptor = NULL; - HeapTuple heapTuple = NULL; Datum values[Natts_pg_dist_object]; bool isnull[Natts_pg_dist_object]; bool replace[Natts_pg_dist_object]; - pgDistObjectRel = heap_open(DistObjectRelationId(), RowExclusiveLock); - tupleDescriptor = RelationGetDescr(pgDistObjectRel); + Relation pgDistObjectRel = heap_open(DistObjectRelationId(), RowExclusiveLock); + TupleDesc tupleDescriptor = RelationGetDescr(pgDistObjectRel); /* scan pg_dist_object for classid = $1 AND objid = $2 AND objsubid = $3 via index */ ScanKeyInit(&scanKey[0], Anum_pg_dist_object_classid, BTEqualStrategyNumber, F_OIDEQ, @@ -503,11 +487,12 @@ UpdateFunctionDistributionInfo(const ObjectAddress *distAddress, ScanKeyInit(&scanKey[2], Anum_pg_dist_object_objsubid, BTEqualStrategyNumber, F_INT4EQ, ObjectIdGetDatum(distAddress->objectSubId)); - scanDescriptor = systable_beginscan(pgDistObjectRel, DistObjectPrimaryKeyIndexId(), - indexOK, - NULL, 3, scanKey); + SysScanDesc scanDescriptor = systable_beginscan(pgDistObjectRel, + DistObjectPrimaryKeyIndexId(), + indexOK, + NULL, 3, scanKey); - heapTuple = systable_getnext(scanDescriptor); + HeapTuple heapTuple = systable_getnext(scanDescriptor); if (!HeapTupleIsValid(heapTuple)) { ereport(ERROR, (errmsg("could not find valid entry for node \"%d,%d,%d\" " @@ -609,17 +594,10 @@ GetFunctionAlterOwnerCommand(const RegProcedure funcOid) char *kindString = "FUNCTION"; Oid procOwner = InvalidOid; - char *functionSignature = NULL; - char *functionOwner = NULL; - - OverrideSearchPath *overridePath = NULL; - Datum functionSignatureDatum = 0; if (HeapTupleIsValid(proctup)) { - Form_pg_proc procform; - - procform = (Form_pg_proc) GETSTRUCT(proctup); + Form_pg_proc procform = (Form_pg_proc) GETSTRUCT(proctup); procOwner = procform->proowner; @@ -644,7 +622,7 @@ GetFunctionAlterOwnerCommand(const RegProcedure funcOid) * schema-prefixed. pg_catalog will be added automatically when we call * PushOverrideSearchPath(), since we set addCatalog to true; */ - overridePath = GetOverrideSearchPath(CurrentMemoryContext); + OverrideSearchPath *overridePath = GetOverrideSearchPath(CurrentMemoryContext); overridePath->schemas = NIL; overridePath->addCatalog = true; @@ -654,16 +632,16 @@ GetFunctionAlterOwnerCommand(const RegProcedure funcOid) * If the function exists we want to use pg_get_function_identity_arguments to * serialize its canonical arguments */ - functionSignatureDatum = + Datum functionSignatureDatum = DirectFunctionCall1(regprocedureout, ObjectIdGetDatum(funcOid)); /* revert back to original search_path */ PopOverrideSearchPath(); /* regprocedureout returns cstring */ - functionSignature = DatumGetCString(functionSignatureDatum); + char *functionSignature = DatumGetCString(functionSignatureDatum); - functionOwner = GetUserNameFromId(procOwner, false); + char *functionOwner = GetUserNameFromId(procOwner, false); appendStringInfo(alterCommand, "ALTER %s %s OWNER TO %s;", kindString, @@ -686,12 +664,8 @@ static char * GetAggregateDDLCommand(const RegProcedure funcOid, bool useCreateOrReplace) { StringInfoData buf = { 0 }; - HeapTuple proctup = NULL; - Form_pg_proc proc = NULL; HeapTuple aggtup = NULL; Form_pg_aggregate agg = NULL; - const char *name = NULL; - const char *nsp = NULL; int numargs = 0; int i = 0; Oid *argtypes = NULL; @@ -701,20 +675,20 @@ GetAggregateDDLCommand(const RegProcedure funcOid, bool useCreateOrReplace) int argsprinted = 0; int inputargno = 0; - proctup = SearchSysCache1(PROCOID, funcOid); + HeapTuple proctup = SearchSysCache1(PROCOID, funcOid); if (!HeapTupleIsValid(proctup)) { elog(ERROR, "cache lookup failed for %d", funcOid); } - proc = (Form_pg_proc) GETSTRUCT(proctup); + Form_pg_proc proc = (Form_pg_proc) GETSTRUCT(proctup); Assert(proc->prokind == PROKIND_AGGREGATE); initStringInfo(&buf); - name = NameStr(proc->proname); - nsp = get_namespace_name(proc->pronamespace); + const char *name = NameStr(proc->proname); + const char *nsp = get_namespace_name(proc->pronamespace); #if PG_VERSION_NUM >= 120000 if (useCreateOrReplace) @@ -1112,8 +1086,6 @@ TriggerSyncMetadataToPrimaryNodes(void) static bool ShouldPropagateCreateFunction(CreateFunctionStmt *stmt) { - const ObjectAddress *address = NULL; - if (creating_extension) { /* @@ -1144,7 +1116,7 @@ ShouldPropagateCreateFunction(CreateFunctionStmt *stmt) * Even though its a replace we should accept an non-existing function, it will just * not be distributed */ - address = GetObjectAddressFromParseTree((Node *) stmt, true); + const ObjectAddress *address = GetObjectAddressFromParseTree((Node *) stmt, true); if (!IsObjectDistributed(address)) { /* do not propagate alter function for non-distributed functions */ @@ -1231,21 +1203,18 @@ PlanCreateFunctionStmt(CreateFunctionStmt *stmt, const char *queryString) List * ProcessCreateFunctionStmt(CreateFunctionStmt *stmt, const char *queryString) { - const ObjectAddress *address = NULL; - List *commands = NIL; - if (!ShouldPropagateCreateFunction(stmt)) { return NIL; } - address = GetObjectAddressFromParseTree((Node *) stmt, false); + const ObjectAddress *address = GetObjectAddressFromParseTree((Node *) stmt, false); EnsureDependenciesExistsOnAllNodes(address); - commands = list_make4(DISABLE_DDL_PROPAGATION, - GetFunctionDDLCommand(address->objectId, true), - GetFunctionAlterOwnerCommand(address->objectId), - ENABLE_DDL_PROPAGATION); + List *commands = list_make4(DISABLE_DDL_PROPAGATION, + GetFunctionDDLCommand(address->objectId, true), + GetFunctionAlterOwnerCommand(address->objectId), + ENABLE_DDL_PROPAGATION); return NodeDDLTaskList(ALL_WORKERS, commands); } @@ -1260,7 +1229,6 @@ ObjectAddress * CreateFunctionStmtObjectAddress(CreateFunctionStmt *stmt, bool missing_ok) { ObjectType objectType = OBJECT_FUNCTION; - ObjectWithArgs *objectWithArgs = NULL; ListCell *parameterCell = NULL; if (stmt->is_procedure) @@ -1268,7 +1236,7 @@ CreateFunctionStmtObjectAddress(CreateFunctionStmt *stmt, bool missing_ok) objectType = OBJECT_PROCEDURE; } - objectWithArgs = makeNode(ObjectWithArgs); + ObjectWithArgs *objectWithArgs = makeNode(ObjectWithArgs); objectWithArgs->objname = stmt->funcname; foreach(parameterCell, stmt->parameters) @@ -1292,12 +1260,11 @@ CreateFunctionStmtObjectAddress(CreateFunctionStmt *stmt, bool missing_ok) ObjectAddress * DefineAggregateStmtObjectAddress(DefineStmt *stmt, bool missing_ok) { - ObjectWithArgs *objectWithArgs = NULL; ListCell *parameterCell = NULL; Assert(stmt->kind == OBJECT_AGGREGATE); - objectWithArgs = makeNode(ObjectWithArgs); + ObjectWithArgs *objectWithArgs = makeNode(ObjectWithArgs); objectWithArgs->objname = stmt->defnames; foreach(parameterCell, linitial(stmt->args)) @@ -1318,13 +1285,9 @@ DefineAggregateStmtObjectAddress(DefineStmt *stmt, bool missing_ok) List * PlanAlterFunctionStmt(AlterFunctionStmt *stmt, const char *queryString) { - const char *sql = NULL; - const ObjectAddress *address = NULL; - List *commands = NIL; - AssertObjectTypeIsFunctional(stmt->objtype); - address = GetObjectAddressFromParseTree((Node *) stmt, false); + const ObjectAddress *address = GetObjectAddressFromParseTree((Node *) stmt, false); if (!ShouldPropagateAlterFunction(address)) { return NIL; @@ -1334,11 +1297,11 @@ PlanAlterFunctionStmt(AlterFunctionStmt *stmt, const char *queryString) ErrorIfUnsupportedAlterFunctionStmt(stmt); EnsureSequentialModeForFunctionDDL(); QualifyTreeNode((Node *) stmt); - sql = DeparseTreeNode((Node *) stmt); + const char *sql = DeparseTreeNode((Node *) stmt); - commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) sql, - ENABLE_DDL_PROPAGATION); + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) sql, + ENABLE_DDL_PROPAGATION); return NodeDDLTaskList(ALL_WORKERS, commands); } @@ -1355,13 +1318,9 @@ PlanAlterFunctionStmt(AlterFunctionStmt *stmt, const char *queryString) List * PlanRenameFunctionStmt(RenameStmt *stmt, const char *queryString) { - const char *sql = NULL; - const ObjectAddress *address = NULL; - List *commands = NIL; - AssertObjectTypeIsFunctional(stmt->renameType); - address = GetObjectAddressFromParseTree((Node *) stmt, false); + const ObjectAddress *address = GetObjectAddressFromParseTree((Node *) stmt, false); if (!ShouldPropagateAlterFunction(address)) { return NIL; @@ -1370,11 +1329,11 @@ PlanRenameFunctionStmt(RenameStmt *stmt, const char *queryString) EnsureCoordinator(); EnsureSequentialModeForFunctionDDL(); QualifyTreeNode((Node *) stmt); - sql = DeparseTreeNode((Node *) stmt); + const char *sql = DeparseTreeNode((Node *) stmt); - commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) sql, - ENABLE_DDL_PROPAGATION); + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) sql, + ENABLE_DDL_PROPAGATION); return NodeDDLTaskList(ALL_WORKERS, commands); } @@ -1389,13 +1348,9 @@ PlanRenameFunctionStmt(RenameStmt *stmt, const char *queryString) List * PlanAlterFunctionSchemaStmt(AlterObjectSchemaStmt *stmt, const char *queryString) { - const char *sql = NULL; - const ObjectAddress *address = NULL; - List *commands = NIL; - AssertObjectTypeIsFunctional(stmt->objectType); - address = GetObjectAddressFromParseTree((Node *) stmt, false); + const ObjectAddress *address = GetObjectAddressFromParseTree((Node *) stmt, false); if (!ShouldPropagateAlterFunction(address)) { return NIL; @@ -1404,11 +1359,11 @@ PlanAlterFunctionSchemaStmt(AlterObjectSchemaStmt *stmt, const char *queryString EnsureCoordinator(); EnsureSequentialModeForFunctionDDL(); QualifyTreeNode((Node *) stmt); - sql = DeparseTreeNode((Node *) stmt); + const char *sql = DeparseTreeNode((Node *) stmt); - commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) sql, - ENABLE_DDL_PROPAGATION); + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) sql, + ENABLE_DDL_PROPAGATION); return NodeDDLTaskList(ALL_WORKERS, commands); } @@ -1424,13 +1379,9 @@ PlanAlterFunctionSchemaStmt(AlterObjectSchemaStmt *stmt, const char *queryString List * PlanAlterFunctionOwnerStmt(AlterOwnerStmt *stmt, const char *queryString) { - const ObjectAddress *address = NULL; - const char *sql = NULL; - List *commands = NULL; - AssertObjectTypeIsFunctional(stmt->objectType); - address = GetObjectAddressFromParseTree((Node *) stmt, false); + const ObjectAddress *address = GetObjectAddressFromParseTree((Node *) stmt, false); if (!ShouldPropagateAlterFunction(address)) { return NIL; @@ -1439,11 +1390,11 @@ PlanAlterFunctionOwnerStmt(AlterOwnerStmt *stmt, const char *queryString) EnsureCoordinator(); EnsureSequentialModeForFunctionDDL(); QualifyTreeNode((Node *) stmt); - sql = DeparseTreeNode((Node *) stmt); + const char *sql = DeparseTreeNode((Node *) stmt); - commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) sql, - ENABLE_DDL_PROPAGATION); + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) sql, + ENABLE_DDL_PROPAGATION); return NodeDDLTaskList(ALL_WORKERS, commands); } @@ -1465,10 +1416,7 @@ PlanDropFunctionStmt(DropStmt *stmt, const char *queryString) List *distributedObjectWithArgsList = NIL; List *distributedFunctionAddresses = NIL; ListCell *addressCell = NULL; - const char *dropStmtSql = NULL; - List *commands = NULL; ListCell *objectWithArgsListCell = NULL; - DropStmt *stmtCopy = NULL; AssertObjectTypeIsFunctional(stmt->removeType); @@ -1502,11 +1450,9 @@ PlanDropFunctionStmt(DropStmt *stmt, const char *queryString) */ foreach(objectWithArgsListCell, deletingObjectWithArgsList) { - ObjectWithArgs *func = NULL; - ObjectAddress *address = NULL; - - func = castNode(ObjectWithArgs, lfirst(objectWithArgsListCell)); - address = FunctionToObjectAddress(stmt->removeType, func, stmt->missing_ok); + ObjectWithArgs *func = castNode(ObjectWithArgs, lfirst(objectWithArgsListCell)); + ObjectAddress *address = FunctionToObjectAddress(stmt->removeType, func, + stmt->missing_ok); if (!IsObjectDistributed(address)) { @@ -1543,13 +1489,13 @@ PlanDropFunctionStmt(DropStmt *stmt, const char *queryString) * Swap the list of objects before deparsing and restore the old list after. This * ensures we only have distributed functions in the deparsed drop statement. */ - stmtCopy = copyObject(stmt); + DropStmt *stmtCopy = copyObject(stmt); stmtCopy->objects = distributedObjectWithArgsList; - dropStmtSql = DeparseTreeNode((Node *) stmtCopy); + const char *dropStmtSql = DeparseTreeNode((Node *) stmtCopy); - commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) dropStmtSql, - ENABLE_DDL_PROPAGATION); + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) dropStmtSql, + ENABLE_DDL_PROPAGATION); return NodeDDLTaskList(ALL_WORKERS, commands); } @@ -1569,9 +1515,6 @@ PlanDropFunctionStmt(DropStmt *stmt, const char *queryString) List * PlanAlterFunctionDependsStmt(AlterObjectDependsStmt *stmt, const char *queryString) { - const ObjectAddress *address = NULL; - const char *functionName = NULL; - AssertObjectTypeIsFunctional(stmt->objectType); if (creating_extension) @@ -1591,7 +1534,7 @@ PlanAlterFunctionDependsStmt(AlterObjectDependsStmt *stmt, const char *queryStri return NIL; } - address = GetObjectAddressFromParseTree((Node *) stmt, true); + const ObjectAddress *address = GetObjectAddressFromParseTree((Node *) stmt, true); if (!IsObjectDistributed(address)) { return NIL; @@ -1603,7 +1546,7 @@ PlanAlterFunctionDependsStmt(AlterObjectDependsStmt *stmt, const char *queryStri * workers */ - functionName = getObjectIdentity(address); + const char *functionName = getObjectIdentity(address); ereport(ERROR, (errmsg("distrtibuted functions are not allowed to depend on an " "extension"), errdetail("Function \"%s\" is already distributed. Functions from " @@ -1635,11 +1578,9 @@ AlterFunctionDependsStmtObjectAddress(AlterObjectDependsStmt *stmt, bool missing void ProcessAlterFunctionSchemaStmt(AlterObjectSchemaStmt *stmt, const char *queryString) { - const ObjectAddress *address = NULL; - AssertObjectTypeIsFunctional(stmt->objectType); - address = GetObjectAddressFromParseTree((Node *) stmt, false); + const ObjectAddress *address = GetObjectAddressFromParseTree((Node *) stmt, false); if (!ShouldPropagateAlterFunction(address)) { return; @@ -1698,16 +1639,11 @@ AlterFunctionOwnerObjectAddress(AlterOwnerStmt *stmt, bool missing_ok) ObjectAddress * AlterFunctionSchemaStmtObjectAddress(AlterObjectSchemaStmt *stmt, bool missing_ok) { - ObjectWithArgs *objectWithArgs = NULL; - Oid funcOid = InvalidOid; - List *names = NIL; - ObjectAddress *address = NULL; - AssertObjectTypeIsFunctional(stmt->objectType); - objectWithArgs = castNode(ObjectWithArgs, stmt->object); - funcOid = LookupFuncWithArgs(stmt->objectType, objectWithArgs, true); - names = objectWithArgs->objname; + ObjectWithArgs *objectWithArgs = castNode(ObjectWithArgs, stmt->object); + Oid funcOid = LookupFuncWithArgs(stmt->objectType, objectWithArgs, true); + List *names = objectWithArgs->objname; if (funcOid == InvalidOid) { @@ -1744,7 +1680,7 @@ AlterFunctionSchemaStmtObjectAddress(AlterObjectSchemaStmt *stmt, bool missing_o } } - address = palloc0(sizeof(ObjectAddress)); + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); ObjectAddressSet(*address, ProcedureRelationId, funcOid); return address; @@ -1766,7 +1702,6 @@ GenerateBackupNameForProcCollision(const ObjectAddress *address) address->objectId))); char *baseName = get_func_name(address->objectId); int baseLength = strlen(baseName); - int numargs = 0; Oid *argtypes = NULL; char **argnames = NULL; char *argmodes = NULL; @@ -1777,15 +1712,13 @@ GenerateBackupNameForProcCollision(const ObjectAddress *address) elog(ERROR, "citus cache lookup failed."); } - numargs = get_func_arg_info(proctup, &argtypes, &argnames, &argmodes); + int numargs = get_func_arg_info(proctup, &argtypes, &argnames, &argmodes); ReleaseSysCache(proctup); while (true) { int suffixLength = snprintf(suffix, NAMEDATALEN - 1, "(citus_backup_%d)", count); - List *newProcName = NIL; - FuncCandidateList clist = NULL; /* trim the base name at the end to leave space for the suffix and trailing \0 */ baseLength = Min(baseLength, NAMEDATALEN - suffixLength - 1); @@ -1795,10 +1728,11 @@ GenerateBackupNameForProcCollision(const ObjectAddress *address) strncpy(newName, baseName, baseLength); strncpy(newName + baseLength, suffix, suffixLength); - newProcName = list_make2(namespace, makeString(newName)); + List *newProcName = list_make2(namespace, makeString(newName)); /* don't need to rename if the input arguments don't match */ - clist = FuncnameGetCandidates(newProcName, numargs, NIL, false, false, true); + FuncCandidateList clist = FuncnameGetCandidates(newProcName, numargs, NIL, false, + false, true); for (; clist; clist = clist->next) { if (memcmp(clist->args, argtypes, sizeof(Oid) * numargs) == 0) @@ -1828,8 +1762,6 @@ ObjectWithArgsFromOid(Oid funcOid) Oid *argTypes = NULL; char **argNames = NULL; char *argModes = NULL; - int numargs = 0; - int i = 0; HeapTuple proctup = SearchSysCache1(PROCOID, funcOid); if (!HeapTupleIsValid(proctup)) @@ -1837,14 +1769,14 @@ ObjectWithArgsFromOid(Oid funcOid) elog(ERROR, "citus cache lookup failed."); } - numargs = get_func_arg_info(proctup, &argTypes, &argNames, &argModes); + int numargs = get_func_arg_info(proctup, &argTypes, &argNames, &argModes); objectWithArgs->objname = list_make2( makeString(get_namespace_name(get_func_namespace(funcOid))), makeString(get_func_name(funcOid)) ); - for (i = 0; i < numargs; i++) + for (int i = 0; i < numargs; i++) { if (argModes == NULL || argModes[i] != PROARGMODE_OUT || argModes[i] != PROARGMODE_TABLE) @@ -1870,13 +1802,10 @@ static ObjectAddress * FunctionToObjectAddress(ObjectType objectType, ObjectWithArgs *objectWithArgs, bool missing_ok) { - Oid funcOid = InvalidOid; - ObjectAddress *address = NULL; - AssertObjectTypeIsFunctional(objectType); - funcOid = LookupFuncWithArgs(objectType, objectWithArgs, missing_ok); - address = palloc0(sizeof(ObjectAddress)); + Oid funcOid = LookupFuncWithArgs(objectType, objectWithArgs, missing_ok); + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); ObjectAddressSet(*address, ProcedureRelationId, funcOid); return address; diff --git a/src/backend/distributed/commands/index.c b/src/backend/distributed/commands/index.c index 161e33307..e217fb5c6 100644 --- a/src/backend/distributed/commands/index.c +++ b/src/backend/distributed/commands/index.c @@ -115,9 +115,6 @@ PlanIndexStmt(IndexStmt *createIndexStatement, const char *createIndexCommand) */ if (createIndexStatement->relation != NULL) { - Relation relation = NULL; - Oid relationId = InvalidOid; - bool isDistributedRelation = false; LOCKMODE lockmode = ShareLock; MemoryContext relationContext = NULL; @@ -137,10 +134,10 @@ PlanIndexStmt(IndexStmt *createIndexStatement, const char *createIndexCommand) * checked permissions, and will only fail when executing the actual * index statements. */ - relation = heap_openrv(createIndexStatement->relation, lockmode); - relationId = RelationGetRelid(relation); + Relation relation = heap_openrv(createIndexStatement->relation, lockmode); + Oid relationId = RelationGetRelid(relation); - isDistributedRelation = IsDistributedTable(relationId); + bool isDistributedRelation = IsDistributedTable(relationId); if (createIndexStatement->relation->schemaname == NULL) { @@ -163,15 +160,13 @@ PlanIndexStmt(IndexStmt *createIndexStatement, const char *createIndexCommand) if (isDistributedRelation) { - Oid namespaceId = InvalidOid; - Oid indexRelationId = InvalidOid; char *indexName = createIndexStatement->idxname; char *namespaceName = createIndexStatement->relation->schemaname; ErrorIfUnsupportedIndexStmt(createIndexStatement); - namespaceId = get_namespace_oid(namespaceName, false); - indexRelationId = get_relname_relid(indexName, namespaceId); + Oid namespaceId = get_namespace_oid(namespaceName, false); + Oid indexRelationId = get_relname_relid(indexName, namespaceId); /* if index does not exist, send the command to workers */ if (!OidIsValid(indexRelationId)) @@ -319,9 +314,6 @@ PlanDropIndexStmt(DropStmt *dropIndexStatement, const char *dropIndexCommand) /* check if any of the indexes being dropped belong to a distributed table */ foreach(dropObjectCell, dropIndexStatement->objects) { - Oid indexId = InvalidOid; - Oid relationId = InvalidOid; - bool isDistributedRelation = false; struct DropRelationCallbackState state; uint32 rvrFlags = RVR_MISSING_OK; LOCKMODE lockmode = AccessExclusiveLock; @@ -349,9 +341,9 @@ PlanDropIndexStmt(DropStmt *dropIndexStatement, const char *dropIndexCommand) state.heapOid = InvalidOid; state.concurrent = dropIndexStatement->concurrent; - indexId = RangeVarGetRelidExtended(rangeVar, lockmode, rvrFlags, - RangeVarCallbackForDropIndex, - (void *) &state); + Oid indexId = RangeVarGetRelidExtended(rangeVar, lockmode, rvrFlags, + RangeVarCallbackForDropIndex, + (void *) &state); /* * If the index does not exist, we don't do anything here, and allow @@ -362,8 +354,8 @@ PlanDropIndexStmt(DropStmt *dropIndexStatement, const char *dropIndexCommand) continue; } - relationId = IndexGetRelation(indexId, false); - isDistributedRelation = IsDistributedTable(relationId); + Oid relationId = IndexGetRelation(indexId, false); + bool isDistributedRelation = IsDistributedTable(relationId); if (isDistributedRelation) { distributedIndexId = indexId; @@ -400,13 +392,6 @@ PlanDropIndexStmt(DropStmt *dropIndexStatement, const char *dropIndexCommand) void PostProcessIndexStmt(IndexStmt *indexStmt) { - Relation relation = NULL; - Oid indexRelationId = InvalidOid; - Relation indexRelation = NULL; - Relation pg_index = NULL; - HeapTuple indexTuple = NULL; - Form_pg_index indexForm = NULL; - /* we are only processing CONCURRENT index statements */ if (!indexStmt->concurrent) { @@ -424,10 +409,10 @@ PostProcessIndexStmt(IndexStmt *indexStmt) StartTransactionCommand(); /* get the affected relation and index */ - relation = heap_openrv(indexStmt->relation, ShareUpdateExclusiveLock); - indexRelationId = get_relname_relid(indexStmt->idxname, - RelationGetNamespace(relation)); - indexRelation = index_open(indexRelationId, RowExclusiveLock); + Relation relation = heap_openrv(indexStmt->relation, ShareUpdateExclusiveLock); + Oid indexRelationId = get_relname_relid(indexStmt->idxname, + RelationGetNamespace(relation)); + Relation indexRelation = index_open(indexRelationId, RowExclusiveLock); /* close relations but retain locks */ heap_close(relation, NoLock); @@ -441,13 +426,14 @@ PostProcessIndexStmt(IndexStmt *indexStmt) StartTransactionCommand(); /* now, update index's validity in a way that can roll back */ - pg_index = heap_open(IndexRelationId, RowExclusiveLock); + Relation pg_index = heap_open(IndexRelationId, RowExclusiveLock); - indexTuple = SearchSysCacheCopy1(INDEXRELID, ObjectIdGetDatum(indexRelationId)); + HeapTuple indexTuple = SearchSysCacheCopy1(INDEXRELID, ObjectIdGetDatum( + indexRelationId)); Assert(HeapTupleIsValid(indexTuple)); /* better be present, we have lock! */ /* mark as valid, save, and update pg_index indexes */ - indexForm = (Form_pg_index) GETSTRUCT(indexTuple); + Form_pg_index indexForm = (Form_pg_index) GETSTRUCT(indexTuple); indexForm->indisvalid = true; CatalogTupleUpdate(pg_index, &indexTuple->t_self, indexTuple); @@ -528,11 +514,10 @@ CreateIndexTaskList(Oid relationId, IndexStmt *indexStmt) { ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell); uint64 shardId = shardInterval->shardId; - Task *task = NULL; deparse_shard_index_statement(indexStmt, relationId, shardId, &ddlString); - task = CitusMakeNode(Task); + Task *task = CitusMakeNode(Task); task->jobId = jobId; task->taskId = taskId++; task->taskType = DDL_TASK; @@ -574,11 +559,10 @@ CreateReindexTaskList(Oid relationId, ReindexStmt *reindexStmt) { ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell); uint64 shardId = shardInterval->shardId; - Task *task = NULL; deparse_shard_reindex_statement(reindexStmt, relationId, shardId, &ddlString); - task = CitusMakeNode(Task); + Task *task = CitusMakeNode(Task); task->jobId = jobId; task->taskId = taskId++; task->taskType = DDL_TASK; @@ -612,13 +596,11 @@ RangeVarCallbackForDropIndex(const RangeVar *rel, Oid relOid, Oid oldRelOid, voi { /* *INDENT-OFF* */ HeapTuple tuple; - struct DropRelationCallbackState *state; char relkind; char expected_relkind; - Form_pg_class classform; LOCKMODE heap_lockmode; - state = (struct DropRelationCallbackState *) arg; + struct DropRelationCallbackState *state = (struct DropRelationCallbackState *) arg; relkind = state->relkind; heap_lockmode = state->concurrent ? ShareUpdateExclusiveLock : AccessExclusiveLock; @@ -643,7 +625,7 @@ RangeVarCallbackForDropIndex(const RangeVar *rel, Oid relOid, Oid oldRelOid, voi tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relOid)); if (!HeapTupleIsValid(tuple)) return; /* concurrently dropped, so nothing to do */ - classform = (Form_pg_class) GETSTRUCT(tuple); + Form_pg_class classform = (Form_pg_class) GETSTRUCT(tuple); /* * PG 11 sends relkind as partitioned index for an index @@ -805,7 +787,6 @@ ErrorIfUnsupportedIndexStmt(IndexStmt *createIndexStatement) Oid relationId = RangeVarGetRelid(relation, lockMode, missingOk); Var *partitionKey = DistPartitionKey(relationId); char partitionMethod = PartitionMethod(relationId); - List *indexParameterList = NIL; ListCell *indexParameterCell = NULL; bool indexContainsPartitionColumn = false; @@ -825,12 +806,11 @@ ErrorIfUnsupportedIndexStmt(IndexStmt *createIndexStatement) "is currently unsupported"))); } - indexParameterList = createIndexStatement->indexParams; + List *indexParameterList = createIndexStatement->indexParams; foreach(indexParameterCell, indexParameterList) { IndexElem *indexElement = (IndexElem *) lfirst(indexParameterCell); char *columnName = indexElement->name; - AttrNumber attributeNumber = InvalidAttrNumber; /* column name is null for index expressions, skip it */ if (columnName == NULL) @@ -838,7 +818,7 @@ ErrorIfUnsupportedIndexStmt(IndexStmt *createIndexStatement) continue; } - attributeNumber = get_attnum(relationId, columnName); + AttrNumber attributeNumber = get_attnum(relationId, columnName); if (attributeNumber == partitionKey->varattno) { indexContainsPartitionColumn = true; @@ -902,7 +882,6 @@ DropIndexTaskList(Oid relationId, Oid indexId, DropStmt *dropStmt) ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell); uint64 shardId = shardInterval->shardId; char *shardIndexName = pstrdup(indexName); - Task *task = NULL; AppendShardIdToName(&shardIndexName, shardId); @@ -913,7 +892,7 @@ DropIndexTaskList(Oid relationId, Oid indexId, DropStmt *dropStmt) quote_qualified_identifier(schemaName, shardIndexName), (dropStmt->behavior == DROP_RESTRICT ? "RESTRICT" : "CASCADE")); - task = CitusMakeNode(Task); + Task *task = CitusMakeNode(Task); task->jobId = jobId; task->taskId = taskId++; task->taskType = DDL_TASK; diff --git a/src/backend/distributed/commands/multi_copy.c b/src/backend/distributed/commands/multi_copy.c index 6969e7206..8cfa56695 100644 --- a/src/backend/distributed/commands/multi_copy.c +++ b/src/backend/distributed/commands/multi_copy.c @@ -298,8 +298,6 @@ PG_FUNCTION_INFO_V1(citus_text_send_as_jsonb); static void CitusCopyFrom(CopyStmt *copyStatement, char *completionTag) { - bool isCopyFromWorker = false; - BeginOrContinueCoordinatedTransaction(); /* disallow COPY to/from file or program except for superusers */ @@ -324,7 +322,7 @@ CitusCopyFrom(CopyStmt *copyStatement, char *completionTag) } masterConnection = NULL; /* reset, might still be set after error */ - isCopyFromWorker = IsCopyFromWorker(copyStatement); + bool isCopyFromWorker = IsCopyFromWorker(copyStatement); if (isCopyFromWorker) { CopyFromWorkerNode(copyStatement, completionTag); @@ -387,9 +385,6 @@ CopyFromWorkerNode(CopyStmt *copyStatement, char *completionTag) NodeAddress *masterNodeAddress = MasterNodeAddress(copyStatement); char *nodeName = masterNodeAddress->nodeName; int32 nodePort = masterNodeAddress->nodePort; - Oid relationId = InvalidOid; - char partitionMethod = 0; - char *schemaName = NULL; uint32 connectionFlags = FOR_DML; masterConnection = GetNodeConnection(connectionFlags, nodeName, nodePort); @@ -399,14 +394,14 @@ CopyFromWorkerNode(CopyStmt *copyStatement, char *completionTag) RemoteTransactionBeginIfNecessary(masterConnection); /* strip schema name for local reference */ - schemaName = copyStatement->relation->schemaname; + char *schemaName = copyStatement->relation->schemaname; copyStatement->relation->schemaname = NULL; - relationId = RangeVarGetRelid(copyStatement->relation, NoLock, false); + Oid relationId = RangeVarGetRelid(copyStatement->relation, NoLock, false); /* put schema name back */ copyStatement->relation->schemaname = schemaName; - partitionMethod = MasterPartitionMethod(copyStatement->relation); + char partitionMethod = MasterPartitionMethod(copyStatement->relation); if (partitionMethod != DISTRIBUTE_BY_APPEND) { ereport(ERROR, (errmsg("copy from worker nodes is only supported " @@ -439,18 +434,10 @@ CopyToExistingShards(CopyStmt *copyStatement, char *completionTag) CitusCopyDestReceiver *copyDest = NULL; DestReceiver *dest = NULL; - Relation distributedRelation = NULL; Relation copiedDistributedRelation = NULL; Form_pg_class copiedDistributedRelationTuple = NULL; - TupleDesc tupleDescriptor = NULL; - uint32 columnCount = 0; - Datum *columnValues = NULL; - bool *columnNulls = NULL; - int columnIndex = 0; List *columnNameList = NIL; - Var *partitionColumn = NULL; int partitionColumnIndex = INVALID_PARTITION_COLUMN_INDEX; - TupleTableSlot *tupleTableSlot = NULL; EState *executorState = NULL; MemoryContext executorTupleContext = NULL; @@ -465,27 +452,28 @@ CopyToExistingShards(CopyStmt *copyStatement, char *completionTag) ErrorContextCallback errorCallback; /* allocate column values and nulls arrays */ - distributedRelation = heap_open(tableId, RowExclusiveLock); - tupleDescriptor = RelationGetDescr(distributedRelation); - columnCount = tupleDescriptor->natts; - columnValues = palloc0(columnCount * sizeof(Datum)); - columnNulls = palloc0(columnCount * sizeof(bool)); + Relation distributedRelation = heap_open(tableId, RowExclusiveLock); + TupleDesc tupleDescriptor = RelationGetDescr(distributedRelation); + uint32 columnCount = tupleDescriptor->natts; + Datum *columnValues = palloc0(columnCount * sizeof(Datum)); + bool *columnNulls = palloc0(columnCount * sizeof(bool)); /* set up a virtual tuple table slot */ - tupleTableSlot = MakeSingleTupleTableSlotCompat(tupleDescriptor, &TTSOpsVirtual); + TupleTableSlot *tupleTableSlot = MakeSingleTupleTableSlotCompat(tupleDescriptor, + &TTSOpsVirtual); tupleTableSlot->tts_nvalid = columnCount; tupleTableSlot->tts_values = columnValues; tupleTableSlot->tts_isnull = columnNulls; /* determine the partition column index in the tuple descriptor */ - partitionColumn = PartitionColumn(tableId, 0); + Var *partitionColumn = PartitionColumn(tableId, 0); if (partitionColumn != NULL) { partitionColumnIndex = partitionColumn->varattno - 1; } /* build the list of column names for remote COPY statements */ - for (columnIndex = 0; columnIndex < columnCount; columnIndex++) + for (int columnIndex = 0; columnIndex < columnCount; columnIndex++) { Form_pg_attribute currentColumn = TupleDescAttr(tupleDescriptor, columnIndex); char *columnName = NameStr(currentColumn->attname); @@ -566,16 +554,13 @@ CopyToExistingShards(CopyStmt *copyStatement, char *completionTag) while (true) { - bool nextRowFound = false; - MemoryContext oldContext = NULL; - ResetPerTupleExprContext(executorState); - oldContext = MemoryContextSwitchTo(executorTupleContext); + MemoryContext oldContext = MemoryContextSwitchTo(executorTupleContext); /* parse a row from the input */ - nextRowFound = NextCopyFromCompat(copyState, executorExpressionContext, - columnValues, columnNulls); + bool nextRowFound = NextCopyFromCompat(copyState, executorExpressionContext, + columnValues, columnNulls); if (!nextRowFound) { @@ -625,8 +610,6 @@ CopyToExistingShards(CopyStmt *copyStatement, char *completionTag) static void CopyToNewShards(CopyStmt *copyStatement, char *completionTag, Oid relationId) { - FmgrInfo *columnOutputFunctions = NULL; - /* allocate column values and nulls arrays */ Relation distributedRelation = heap_open(relationId, RowExclusiveLock); TupleDesc tupleDescriptor = RelationGetDescr(distributedRelation); @@ -668,7 +651,8 @@ CopyToNewShards(CopyStmt *copyStatement, char *completionTag, Oid relationId) copyOutState->fe_msgbuf = makeStringInfo(); copyOutState->rowcontext = executorTupleContext; - columnOutputFunctions = ColumnOutputFunctions(tupleDescriptor, copyOutState->binary); + FmgrInfo *columnOutputFunctions = ColumnOutputFunctions(tupleDescriptor, + copyOutState->binary); /* set up callback to identify error line number */ errorCallback.callback = CopyFromErrorCallback; @@ -684,19 +668,15 @@ CopyToNewShards(CopyStmt *copyStatement, char *completionTag, Oid relationId) while (true) { - bool nextRowFound = false; - MemoryContext oldContext = NULL; - uint64 messageBufferSize = 0; - ResetPerTupleExprContext(executorState); /* switch to tuple memory context and start showing line number in errors */ error_context_stack = &errorCallback; - oldContext = MemoryContextSwitchTo(executorTupleContext); + MemoryContext oldContext = MemoryContextSwitchTo(executorTupleContext); /* parse a row from the input */ - nextRowFound = NextCopyFromCompat(copyState, executorExpressionContext, - columnValues, columnNulls); + bool nextRowFound = NextCopyFromCompat(copyState, executorExpressionContext, + columnValues, columnNulls); if (!nextRowFound) { @@ -739,7 +719,7 @@ CopyToNewShards(CopyStmt *copyStatement, char *completionTag, Oid relationId) SendCopyDataToAll(copyOutState->fe_msgbuf, currentShardId, shardConnections->connectionList); - messageBufferSize = copyOutState->fe_msgbuf->len; + uint64 messageBufferSize = copyOutState->fe_msgbuf->len; copiedDataSizeInBytes = copiedDataSizeInBytes + messageBufferSize; /* @@ -841,7 +821,6 @@ static char MasterPartitionMethod(RangeVar *relation) { char partitionMethod = '\0'; - PGresult *queryResult = NULL; bool raiseInterrupts = true; char *relationName = relation->relname; @@ -855,7 +834,7 @@ MasterPartitionMethod(RangeVar *relation) { ReportConnectionError(masterConnection, ERROR); } - queryResult = GetRemoteCommandResult(masterConnection, raiseInterrupts); + PGresult *queryResult = GetRemoteCommandResult(masterConnection, raiseInterrupts); if (PQresultStatus(queryResult) == PGRES_TUPLES_OK) { char *partitionMethodString = PQgetvalue((PGresult *) queryResult, 0, 0); @@ -923,7 +902,6 @@ OpenCopyConnectionsForNewShards(CopyStmt *copyStatement, ShardConnections *shardConnections, bool stopOnFailure, bool useBinaryCopyFormat) { - List *finalizedPlacementList = NIL; int failedPlacementCount = 0; ListCell *placementCell = NULL; List *connectionList = NULL; @@ -940,7 +918,7 @@ OpenCopyConnectionsForNewShards(CopyStmt *copyStatement, /* release finalized placement list at the end of this function */ MemoryContext oldContext = MemoryContextSwitchTo(localContext); - finalizedPlacementList = MasterShardPlacementList(shardId); + List *finalizedPlacementList = MasterShardPlacementList(shardId); MemoryContextSwitchTo(oldContext); @@ -948,10 +926,7 @@ OpenCopyConnectionsForNewShards(CopyStmt *copyStatement, { ShardPlacement *placement = (ShardPlacement *) lfirst(placementCell); char *nodeUser = CurrentUserName(); - MultiConnection *connection = NULL; uint32 connectionFlags = FOR_DML; - StringInfo copyCommand = NULL; - PGresult *result = NULL; /* * For hash partitioned tables, connection establishment happens in @@ -959,7 +934,8 @@ OpenCopyConnectionsForNewShards(CopyStmt *copyStatement, */ Assert(placement->partitionMethod != DISTRIBUTE_BY_HASH); - connection = GetPlacementConnection(connectionFlags, placement, nodeUser); + MultiConnection *connection = GetPlacementConnection(connectionFlags, placement, + nodeUser); if (PQstatus(connection->pgConn) != CONNECTION_OK) { @@ -987,14 +963,15 @@ OpenCopyConnectionsForNewShards(CopyStmt *copyStatement, ClaimConnectionExclusively(connection); RemoteTransactionBeginIfNecessary(connection); - copyCommand = ConstructCopyStatement(copyStatement, shardConnections->shardId, - useBinaryCopyFormat); + StringInfo copyCommand = ConstructCopyStatement(copyStatement, + shardConnections->shardId, + useBinaryCopyFormat); if (!SendRemoteCommand(connection, copyCommand->data)) { ReportConnectionError(connection, ERROR); } - result = GetRemoteCommandResult(connection, raiseInterrupts); + PGresult *result = GetRemoteCommandResult(connection, raiseInterrupts); if (PQresultStatus(result) != PGRES_COPY_IN) { ReportResultError(connection, result, ERROR); @@ -1035,9 +1012,8 @@ CanUseBinaryCopyFormat(TupleDesc tupleDescription) { bool useBinaryCopyFormat = true; int totalColumnCount = tupleDescription->natts; - int columnIndex = 0; - for (columnIndex = 0; columnIndex < totalColumnCount; columnIndex++) + for (int columnIndex = 0; columnIndex < totalColumnCount; columnIndex++) { Form_pg_attribute currentColumn = TupleDescAttr(tupleDescription, columnIndex); Oid typeId = InvalidOid; @@ -1149,7 +1125,6 @@ static List * RemoteFinalizedShardPlacementList(uint64 shardId) { List *finalizedPlacementList = NIL; - PGresult *queryResult = NULL; bool raiseInterrupts = true; StringInfo shardPlacementsCommand = makeStringInfo(); @@ -1159,13 +1134,12 @@ RemoteFinalizedShardPlacementList(uint64 shardId) { ReportConnectionError(masterConnection, ERROR); } - queryResult = GetRemoteCommandResult(masterConnection, raiseInterrupts); + PGresult *queryResult = GetRemoteCommandResult(masterConnection, raiseInterrupts); if (PQresultStatus(queryResult) == PGRES_TUPLES_OK) { int rowCount = PQntuples(queryResult); - int rowIndex = 0; - for (rowIndex = 0; rowIndex < rowCount; rowIndex++) + for (int rowIndex = 0; rowIndex < rowCount; rowIndex++) { char *placementIdString = PQgetvalue(queryResult, rowIndex, 0); char *nodeName = pstrdup(PQgetvalue(queryResult, rowIndex, 1)); @@ -1236,11 +1210,10 @@ ConstructCopyStatement(CopyStmt *copyStatement, int64 shardId, bool useBinaryCop char *relationName = copyStatement->relation->relname; char *shardName = pstrdup(relationName); - char *shardQualifiedName = NULL; AppendShardIdToName(&shardName, shardId); - shardQualifiedName = quote_qualified_identifier(schemaName, shardName); + char *shardQualifiedName = quote_qualified_identifier(schemaName, shardName); appendStringInfo(command, "COPY %s ", shardQualifiedName); @@ -1331,7 +1304,6 @@ EndRemoteCopy(int64 shardId, List *connectionList) foreach(connectionCell, connectionList) { MultiConnection *connection = (MultiConnection *) lfirst(connectionCell); - PGresult *result = NULL; bool raiseInterrupts = true; /* end the COPY input */ @@ -1343,7 +1315,7 @@ EndRemoteCopy(int64 shardId, List *connectionList) } /* check whether there were any COPY errors */ - result = GetRemoteCommandResult(connection, raiseInterrupts); + PGresult *result = GetRemoteCommandResult(connection, raiseInterrupts); if (PQresultStatus(result) != PGRES_COMMAND_OK) { ReportCopyError(connection, result); @@ -1487,14 +1459,13 @@ static Oid TypeForColumnName(Oid relationId, TupleDesc tupleDescriptor, char *columnName) { AttrNumber destAttrNumber = get_attnum(relationId, columnName); - Form_pg_attribute attr = NULL; if (destAttrNumber == InvalidAttrNumber) { ereport(ERROR, (errmsg("invalid attr? %s", columnName))); } - attr = TupleDescAttr(tupleDescriptor, destAttrNumber - 1); + Form_pg_attribute attr = TupleDescAttr(tupleDescriptor, destAttrNumber - 1); return attr->atttypid; } @@ -1508,9 +1479,8 @@ TypeArrayFromTupleDescriptor(TupleDesc tupleDescriptor) { int columnCount = tupleDescriptor->natts; Oid *typeArray = palloc0(columnCount * sizeof(Oid)); - int columnIndex = 0; - for (columnIndex = 0; columnIndex < columnCount; columnIndex++) + for (int columnIndex = 0; columnIndex < columnCount; columnIndex++) { Form_pg_attribute attr = TupleDescAttr(tupleDescriptor, columnIndex); if (attr->attisdropped) @@ -1537,15 +1507,13 @@ ColumnCoercionPaths(TupleDesc destTupleDescriptor, TupleDesc inputTupleDescripto Oid destRelId, List *columnNameList, Oid *finalColumnTypeArray) { - int columnIndex = 0; int columnCount = inputTupleDescriptor->natts; CopyCoercionData *coercePaths = palloc0(columnCount * sizeof(CopyCoercionData)); Oid *inputTupleTypes = TypeArrayFromTupleDescriptor(inputTupleDescriptor); ListCell *currentColumnName = list_head(columnNameList); - for (columnIndex = 0; columnIndex < columnCount; columnIndex++) + for (int columnIndex = 0; columnIndex < columnCount; columnIndex++) { - Oid destTupleType = InvalidOid; Oid inputTupleType = inputTupleTypes[columnIndex]; char *columnName = lfirst(currentColumnName); @@ -1555,7 +1523,7 @@ ColumnCoercionPaths(TupleDesc destTupleDescriptor, TupleDesc inputTupleDescripto continue; } - destTupleType = TypeForColumnName(destRelId, destTupleDescriptor, columnName); + Oid destTupleType = TypeForColumnName(destRelId, destTupleDescriptor, columnName); finalColumnTypeArray[columnIndex] = destTupleType; @@ -1584,8 +1552,7 @@ TypeOutputFunctions(uint32 columnCount, Oid *typeIdArray, bool binaryFormat) { FmgrInfo *columnOutputFunctions = palloc0(columnCount * sizeof(FmgrInfo)); - uint32 columnIndex = 0; - for (columnIndex = 0; columnIndex < columnCount; columnIndex++) + for (uint32 columnIndex = 0; columnIndex < columnCount; columnIndex++) { FmgrInfo *currentOutputFunction = &columnOutputFunctions[columnIndex]; Oid columnTypeId = typeIdArray[columnIndex]; @@ -1665,7 +1632,6 @@ AppendCopyRowData(Datum *valueArray, bool *isNullArray, TupleDesc rowDescriptor, uint32 totalColumnCount = (uint32) rowDescriptor->natts; uint32 availableColumnCount = AvailableColumnCount(rowDescriptor); uint32 appendedColumnCount = 0; - uint32 columnIndex = 0; MemoryContext oldContext = MemoryContextSwitchTo(rowOutputState->rowcontext); @@ -1673,7 +1639,7 @@ AppendCopyRowData(Datum *valueArray, bool *isNullArray, TupleDesc rowDescriptor, { CopySendInt16(rowOutputState, availableColumnCount); } - for (columnIndex = 0; columnIndex < totalColumnCount; columnIndex++) + for (uint32 columnIndex = 0; columnIndex < totalColumnCount; columnIndex++) { Form_pg_attribute currentColumn = TupleDescAttr(rowDescriptor, columnIndex); Datum value = valueArray[columnIndex]; @@ -1803,9 +1769,8 @@ static uint32 AvailableColumnCount(TupleDesc tupleDescriptor) { uint32 columnCount = 0; - uint32 columnIndex = 0; - for (columnIndex = 0; columnIndex < tupleDescriptor->natts; columnIndex++) + for (uint32 columnIndex = 0; columnIndex < tupleDescriptor->natts; columnIndex++) { Form_pg_attribute currentColumn = TupleDescAttr(tupleDescriptor, columnIndex); @@ -1916,13 +1881,11 @@ MasterCreateEmptyShard(char *relationName) static int64 CreateEmptyShard(char *relationName) { - int64 shardId = 0; - text *relationNameText = cstring_to_text(relationName); Datum relationNameDatum = PointerGetDatum(relationNameText); Datum shardIdDatum = DirectFunctionCall1(master_create_empty_shard, relationNameDatum); - shardId = DatumGetInt64(shardIdDatum); + int64 shardId = DatumGetInt64(shardIdDatum); return shardId; } @@ -1936,7 +1899,6 @@ static int64 RemoteCreateEmptyShard(char *relationName) { int64 shardId = 0; - PGresult *queryResult = NULL; bool raiseInterrupts = true; StringInfo createEmptyShardCommand = makeStringInfo(); @@ -1946,7 +1908,7 @@ RemoteCreateEmptyShard(char *relationName) { ReportConnectionError(masterConnection, ERROR); } - queryResult = GetRemoteCommandResult(masterConnection, raiseInterrupts); + PGresult *queryResult = GetRemoteCommandResult(masterConnection, raiseInterrupts); if (PQresultStatus(queryResult) == PGRES_TUPLES_OK) { char *shardIdString = PQgetvalue((PGresult *) queryResult, 0, 0); @@ -1991,7 +1953,6 @@ MasterUpdateShardStatistics(uint64 shardId) static void RemoteUpdateShardStatistics(uint64 shardId) { - PGresult *queryResult = NULL; bool raiseInterrupts = true; StringInfo updateShardStatisticsCommand = makeStringInfo(); @@ -2002,7 +1963,7 @@ RemoteUpdateShardStatistics(uint64 shardId) { ReportConnectionError(masterConnection, ERROR); } - queryResult = GetRemoteCommandResult(masterConnection, raiseInterrupts); + PGresult *queryResult = GetRemoteCommandResult(masterConnection, raiseInterrupts); if (PQresultStatus(queryResult) != PGRES_TUPLES_OK) { ereport(ERROR, (errmsg("could not update shard statistics"))); @@ -2067,7 +2028,6 @@ static void CopyAttributeOutText(CopyOutState cstate, char *string) { char *pointer = NULL; - char *start = NULL; char c = '\0'; char delimc = cstate->delim[0]; @@ -2092,7 +2052,7 @@ CopyAttributeOutText(CopyOutState cstate, char *string) * skip doing pg_encoding_mblen(), because in valid backend encodings, * extra bytes of a multibyte character never look like ASCII. */ - start = pointer; + char *start = pointer; while ((c = *pointer) != '\0') { if ((unsigned char) c < (unsigned char) 0x20) @@ -2184,9 +2144,8 @@ CreateCitusCopyDestReceiver(Oid tableId, List *columnNameList, int partitionColu EState *executorState, bool stopOnFailure, char *intermediateResultIdPrefix) { - CitusCopyDestReceiver *copyDest = NULL; - - copyDest = (CitusCopyDestReceiver *) palloc0(sizeof(CitusCopyDestReceiver)); + CitusCopyDestReceiver *copyDest = (CitusCopyDestReceiver *) palloc0( + sizeof(CitusCopyDestReceiver)); /* set up the DestReceiver function pointers */ copyDest->pub.receiveSlot = CitusCopyDestReceiverReceive; @@ -2225,20 +2184,14 @@ CitusCopyDestReceiverStartup(DestReceiver *dest, int operation, Oid schemaOid = get_rel_namespace(tableId); char *schemaName = get_namespace_name(schemaOid); - Relation distributedRelation = NULL; List *columnNameList = copyDest->columnNameList; List *quotedColumnNameList = NIL; ListCell *columnNameCell = NULL; char partitionMethod = '\0'; - DistTableCacheEntry *cacheEntry = NULL; - CopyStmt *copyStatement = NULL; - List *shardIntervalList = NULL; - - CopyOutState copyOutState = NULL; const char *delimiterCharacter = "\t"; const char *nullPrintCharacter = "\\N"; @@ -2246,15 +2199,15 @@ CitusCopyDestReceiverStartup(DestReceiver *dest, int operation, ErrorIfLocalExecutionHappened(); /* look up table properties */ - distributedRelation = heap_open(tableId, RowExclusiveLock); - cacheEntry = DistributedTableCacheEntry(tableId); + Relation distributedRelation = heap_open(tableId, RowExclusiveLock); + DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(tableId); partitionMethod = cacheEntry->partitionMethod; copyDest->distributedRelation = distributedRelation; copyDest->tupleDescriptor = inputTupleDescriptor; /* load the list of shards and verify that we have shards to copy into */ - shardIntervalList = LoadShardIntervalList(tableId); + List *shardIntervalList = LoadShardIntervalList(tableId); if (shardIntervalList == NIL) { if (partitionMethod == DISTRIBUTE_BY_HASH) @@ -2307,7 +2260,7 @@ CitusCopyDestReceiverStartup(DestReceiver *dest, int operation, } /* define how tuples will be serialised */ - copyOutState = (CopyOutState) palloc0(sizeof(CopyOutStateData)); + CopyOutState copyOutState = (CopyOutState) palloc0(sizeof(CopyOutStateData)); copyOutState->delim = (char *) delimiterCharacter; copyOutState->null_print = (char *) nullPrintCharacter; copyOutState->null_print_client = (char *) nullPrintCharacter; @@ -2349,15 +2302,15 @@ CitusCopyDestReceiverStartup(DestReceiver *dest, int operation, } /* define the template for the COPY statement that is sent to workers */ - copyStatement = makeNode(CopyStmt); + CopyStmt *copyStatement = makeNode(CopyStmt); if (copyDest->intermediateResultIdPrefix != NULL) { - DefElem *formatResultOption = NULL; copyStatement->relation = makeRangeVar(NULL, copyDest->intermediateResultIdPrefix, -1); - formatResultOption = makeDefElem("format", (Node *) makeString("result"), -1); + DefElem *formatResultOption = makeDefElem("format", (Node *) makeString("result"), + -1); copyStatement->options = list_make1(formatResultOption); } else @@ -2422,7 +2375,6 @@ CitusSendTupleToPlacements(TupleTableSlot *slot, CitusCopyDestReceiver *copyDest TupleDesc tupleDescriptor = copyDest->tupleDescriptor; CopyStmt *copyStatement = copyDest->copyStatement; - CopyShardState *shardState = NULL; CopyOutState copyOutState = copyDest->copyOutState; FmgrInfo *columnOutputFunctions = copyDest->columnOutputFunctions; CopyCoercionData *columnCoercionPaths = copyDest->columnCoercionPaths; @@ -2432,10 +2384,6 @@ CitusSendTupleToPlacements(TupleTableSlot *slot, CitusCopyDestReceiver *copyDest bool stopOnFailure = copyDest->stopOnFailure; - Datum *columnValues = NULL; - bool *columnNulls = NULL; - - int64 shardId = 0; EState *executorState = copyDest->executorState; MemoryContext executorTupleContext = GetPerTupleMemoryContext(executorState); @@ -2443,17 +2391,18 @@ CitusSendTupleToPlacements(TupleTableSlot *slot, CitusCopyDestReceiver *copyDest slot_getallattrs(slot); - columnValues = slot->tts_values; - columnNulls = slot->tts_isnull; + Datum *columnValues = slot->tts_values; + bool *columnNulls = slot->tts_isnull; - shardId = ShardIdForTuple(copyDest, columnValues, columnNulls); + int64 shardId = ShardIdForTuple(copyDest, columnValues, columnNulls); /* connections hash is kept in memory context */ MemoryContextSwitchTo(copyDest->memoryContext); - shardState = GetShardState(shardId, copyDest->shardStateHash, - copyDest->connectionStateHash, stopOnFailure, - &cachedShardStateFound); + CopyShardState *shardState = GetShardState(shardId, copyDest->shardStateHash, + copyDest->connectionStateHash, + stopOnFailure, + &cachedShardStateFound); if (!cachedShardStateFound) { firstTupleInShard = true; @@ -2564,7 +2513,6 @@ ShardIdForTuple(CitusCopyDestReceiver *copyDest, Datum *columnValues, bool *colu int partitionColumnIndex = copyDest->partitionColumnIndex; Datum partitionColumnValue = 0; CopyCoercionData *columnCoercionPaths = copyDest->columnCoercionPaths; - ShardInterval *shardInterval = NULL; /* * Find the partition column value and corresponding shard interval @@ -2605,7 +2553,8 @@ ShardIdForTuple(CitusCopyDestReceiver *copyDest, Datum *columnValues, bool *colu * For reference table, this function blindly returns the tables single * shard. */ - shardInterval = FindShardInterval(partitionColumnValue, copyDest->tableMetadata); + ShardInterval *shardInterval = FindShardInterval(partitionColumnValue, + copyDest->tableMetadata); if (shardInterval == NULL) { ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), @@ -2628,11 +2577,10 @@ CitusCopyDestReceiverShutdown(DestReceiver *destReceiver) CitusCopyDestReceiver *copyDest = (CitusCopyDestReceiver *) destReceiver; HTAB *connectionStateHash = copyDest->connectionStateHash; - List *connectionStateList = NIL; ListCell *connectionStateCell = NULL; Relation distributedRelation = copyDest->distributedRelation; - connectionStateList = ConnectionStateList(connectionStateHash); + List *connectionStateList = ConnectionStateList(connectionStateHash); PG_TRY(); { @@ -2820,21 +2768,20 @@ ProcessCopyStmt(CopyStmt *copyStatement, char *completionTag, const char *queryS else { bool isFrom = copyStatement->is_from; - Relation copiedRelation = NULL; - char *schemaName = NULL; - MemoryContext relationContext = NULL; /* consider using RangeVarGetRelidExtended to check perms before locking */ - copiedRelation = heap_openrv(copyStatement->relation, - isFrom ? RowExclusiveLock : AccessShareLock); + Relation copiedRelation = heap_openrv(copyStatement->relation, + isFrom ? RowExclusiveLock : + AccessShareLock); isDistributedRelation = IsDistributedTable(RelationGetRelid(copiedRelation)); /* ensure future lookups hit the same relation */ - schemaName = get_namespace_name(RelationGetNamespace(copiedRelation)); + char *schemaName = get_namespace_name(RelationGetNamespace(copiedRelation)); /* ensure we copy string into proper context */ - relationContext = GetMemoryChunkContext(copyStatement->relation); + MemoryContext relationContext = GetMemoryChunkContext( + copyStatement->relation); schemaName = MemoryContextStrdup(relationContext, schemaName); copyStatement->relation->schemaname = schemaName; @@ -2906,16 +2853,15 @@ ProcessCopyStmt(CopyStmt *copyStatement, char *completionTag, const char *queryS !copyStatement->is_from && !is_absolute_path(filename)) { bool binaryCopyFormat = CopyStatementHasFormat(copyStatement, "binary"); - int64 tuplesSent = 0; Query *query = NULL; Node *queryNode = copyStatement->query; - List *queryTreeList = NIL; StringInfo userFilePath = makeStringInfo(); RawStmt *rawStmt = makeNode(RawStmt); rawStmt->stmt = queryNode; - queryTreeList = pg_analyze_and_rewrite(rawStmt, queryString, NULL, 0, NULL); + List *queryTreeList = pg_analyze_and_rewrite(rawStmt, queryString, NULL, 0, + NULL); if (list_length(queryTreeList) != 1) { @@ -2931,7 +2877,7 @@ ProcessCopyStmt(CopyStmt *copyStatement, char *completionTag, const char *queryS */ appendStringInfo(userFilePath, "%s.%u", filename, GetUserId()); - tuplesSent = WorkerExecuteSqlTask(query, filename, binaryCopyFormat); + int64 tuplesSent = WorkerExecuteSqlTask(query, filename, binaryCopyFormat); snprintf(completionTag, COMPLETION_TAG_BUFSIZE, "COPY " UINT64_FORMAT, tuplesSent); @@ -2952,7 +2898,6 @@ ProcessCopyStmt(CopyStmt *copyStatement, char *completionTag, const char *queryS static void CreateLocalTable(RangeVar *relation, char *nodeName, int32 nodePort) { - List *ddlCommandList = NIL; ListCell *ddlCommandCell = NULL; char *relationName = relation->relname; @@ -2964,7 +2909,7 @@ CreateLocalTable(RangeVar *relation, char *nodeName, int32 nodePort) * enough; therefore, we just throw an error which says that we could not * run the copy operation. */ - ddlCommandList = TableDDLCommandList(nodeName, nodePort, qualifiedRelationName); + List *ddlCommandList = TableDDLCommandList(nodeName, nodePort, qualifiedRelationName); if (ddlCommandList == NIL) { ereport(ERROR, (errmsg("could not run copy from the worker node"))); @@ -3045,14 +2990,13 @@ CheckCopyPermissions(CopyStmt *copyStatement) AclMode required_access = (is_from ? ACL_INSERT : ACL_SELECT); List *attnums; ListCell *cur; - RangeTblEntry *rte; rel = heap_openrv(copyStatement->relation, is_from ? RowExclusiveLock : AccessShareLock); relid = RelationGetRelid(rel); - rte = makeNode(RangeTblEntry); + RangeTblEntry *rte = makeNode(RangeTblEntry); rte->rtekind = RTE_RELATION; rte->relid = relid; rte->relkind = rel->rd_rel->relkind; @@ -3166,18 +3110,16 @@ CopyGetAttnums(TupleDesc tupDesc, Relation rel, List *attnamelist) static HTAB * CreateConnectionStateHash(MemoryContext memoryContext) { - HTAB *connectionStateHash = NULL; - int hashFlags = 0; HASHCTL info; memset(&info, 0, sizeof(info)); info.keysize = sizeof(int); info.entrysize = sizeof(CopyConnectionState); info.hcxt = memoryContext; - hashFlags = (HASH_ELEM | HASH_CONTEXT | HASH_BLOBS); + int hashFlags = (HASH_ELEM | HASH_CONTEXT | HASH_BLOBS); - connectionStateHash = hash_create("Copy Connection State Hash", 128, &info, - hashFlags); + HTAB *connectionStateHash = hash_create("Copy Connection State Hash", 128, &info, + hashFlags); return connectionStateHash; } @@ -3191,17 +3133,15 @@ CreateConnectionStateHash(MemoryContext memoryContext) static HTAB * CreateShardStateHash(MemoryContext memoryContext) { - HTAB *shardStateHash = NULL; - int hashFlags = 0; HASHCTL info; memset(&info, 0, sizeof(info)); info.keysize = sizeof(uint64); info.entrysize = sizeof(CopyShardState); info.hcxt = memoryContext; - hashFlags = (HASH_ELEM | HASH_CONTEXT | HASH_BLOBS); + int hashFlags = (HASH_ELEM | HASH_CONTEXT | HASH_BLOBS); - shardStateHash = hash_create("Copy Shard State Hash", 128, &info, hashFlags); + HTAB *shardStateHash = hash_create("Copy Shard State Hash", 128, &info, hashFlags); return shardStateHash; } @@ -3214,14 +3154,15 @@ CreateShardStateHash(MemoryContext memoryContext) static CopyConnectionState * GetConnectionState(HTAB *connectionStateHash, MultiConnection *connection) { - CopyConnectionState *connectionState = NULL; bool found = false; int sock = PQsocket(connection->pgConn); Assert(sock != -1); - connectionState = (CopyConnectionState *) hash_search(connectionStateHash, &sock, - HASH_ENTER, &found); + CopyConnectionState *connectionState = (CopyConnectionState *) hash_search( + connectionStateHash, &sock, + HASH_ENTER, + &found); if (!found) { connectionState->socket = sock; @@ -3243,11 +3184,11 @@ ConnectionStateList(HTAB *connectionStateHash) { List *connectionStateList = NIL; HASH_SEQ_STATUS status; - CopyConnectionState *connectionState = NULL; hash_seq_init(&status, connectionStateHash); - connectionState = (CopyConnectionState *) hash_seq_search(&status); + CopyConnectionState *connectionState = (CopyConnectionState *) hash_seq_search( + &status); while (connectionState != NULL) { connectionStateList = lappend(connectionStateList, connectionState); @@ -3268,10 +3209,8 @@ static CopyShardState * GetShardState(uint64 shardId, HTAB *shardStateHash, HTAB *connectionStateHash, bool stopOnFailure, bool *found) { - CopyShardState *shardState = NULL; - - shardState = (CopyShardState *) hash_search(shardStateHash, &shardId, - HASH_ENTER, found); + CopyShardState *shardState = (CopyShardState *) hash_search(shardStateHash, &shardId, + HASH_ENTER, found); if (!*found) { InitializeCopyShardState(shardState, connectionStateHash, @@ -3292,7 +3231,6 @@ InitializeCopyShardState(CopyShardState *shardState, HTAB *connectionStateHash, uint64 shardId, bool stopOnFailure) { - List *finalizedPlacementList = NIL; ListCell *placementCell = NULL; int failedPlacementCount = 0; @@ -3306,7 +3244,7 @@ InitializeCopyShardState(CopyShardState *shardState, /* release finalized placement list at the end of this function */ MemoryContext oldContext = MemoryContextSwitchTo(localContext); - finalizedPlacementList = MasterShardPlacementList(shardId); + List *finalizedPlacementList = MasterShardPlacementList(shardId); MemoryContextSwitchTo(oldContext); @@ -3316,8 +3254,6 @@ InitializeCopyShardState(CopyShardState *shardState, foreach(placementCell, finalizedPlacementList) { ShardPlacement *placement = (ShardPlacement *) lfirst(placementCell); - CopyConnectionState *connectionState = NULL; - CopyPlacementState *placementState = NULL; MultiConnection *connection = CopyGetPlacementConnection(placement, stopOnFailure); @@ -3327,7 +3263,8 @@ InitializeCopyShardState(CopyShardState *shardState, continue; } - connectionState = GetConnectionState(connectionStateHash, connection); + CopyConnectionState *connectionState = GetConnectionState(connectionStateHash, + connection); /* * If this is the first time we are using this connection for copying a @@ -3338,7 +3275,7 @@ InitializeCopyShardState(CopyShardState *shardState, RemoteTransactionBeginIfNecessary(connection); } - placementState = palloc0(sizeof(CopyPlacementState)); + CopyPlacementState *placementState = palloc0(sizeof(CopyPlacementState)); placementState->shardState = shardState; placementState->data = makeStringInfo(); placementState->connectionState = connectionState; @@ -3380,19 +3317,19 @@ InitializeCopyShardState(CopyShardState *shardState, static MultiConnection * CopyGetPlacementConnection(ShardPlacement *placement, bool stopOnFailure) { - MultiConnection *connection = NULL; uint32 connectionFlags = FOR_DML; char *nodeUser = CurrentUserName(); - ShardPlacementAccess *placementAccess = NULL; /* * Determine whether the task has to be assigned to a particular connection * due to a preceding access to the placement in the same transaction. */ - placementAccess = CreatePlacementAccess(placement, PLACEMENT_ACCESS_DML); - connection = GetConnectionIfPlacementAccessedInXact(connectionFlags, - list_make1(placementAccess), - NULL); + ShardPlacementAccess *placementAccess = CreatePlacementAccess(placement, + PLACEMENT_ACCESS_DML); + MultiConnection *connection = GetConnectionIfPlacementAccessedInXact(connectionFlags, + list_make1( + placementAccess), + NULL); if (connection != NULL) { return connection; @@ -3451,21 +3388,19 @@ static void StartPlacementStateCopyCommand(CopyPlacementState *placementState, CopyStmt *copyStatement, CopyOutState copyOutState) { - StringInfo copyCommand = NULL; - PGresult *result = NULL; MultiConnection *connection = placementState->connectionState->connection; uint64 shardId = placementState->shardState->shardId; bool raiseInterrupts = true; bool binaryCopy = copyOutState->binary; - copyCommand = ConstructCopyStatement(copyStatement, shardId, binaryCopy); + StringInfo copyCommand = ConstructCopyStatement(copyStatement, shardId, binaryCopy); if (!SendRemoteCommand(connection, copyCommand->data)) { ReportConnectionError(connection, ERROR); } - result = GetRemoteCommandResult(connection, raiseInterrupts); + PGresult *result = GetRemoteCommandResult(connection, raiseInterrupts); if (PQresultStatus(result) != PGRES_COPY_IN) { ReportResultError(connection, result, ERROR); diff --git a/src/backend/distributed/commands/rename.c b/src/backend/distributed/commands/rename.c index 42a6b0b14..93dfcbff9 100644 --- a/src/backend/distributed/commands/rename.c +++ b/src/backend/distributed/commands/rename.c @@ -30,8 +30,6 @@ PlanRenameStmt(RenameStmt *renameStmt, const char *renameCommand) { Oid objectRelationId = InvalidOid; /* SQL Object OID */ Oid tableRelationId = InvalidOid; /* Relation OID, maybe not the same. */ - bool isDistributedRelation = false; - DDLJob *ddlJob = NULL; /* * We only support some of the PostgreSQL supported RENAME statements, and @@ -97,7 +95,7 @@ PlanRenameStmt(RenameStmt *renameStmt, const char *renameCommand) return NIL; } - isDistributedRelation = IsDistributedTable(tableRelationId); + bool isDistributedRelation = IsDistributedTable(tableRelationId); if (!isDistributedRelation) { return NIL; @@ -110,7 +108,7 @@ PlanRenameStmt(RenameStmt *renameStmt, const char *renameCommand) */ ErrorIfUnsupportedRenameStmt(renameStmt); - ddlJob = palloc0(sizeof(DDLJob)); + DDLJob *ddlJob = palloc0(sizeof(DDLJob)); ddlJob->targetRelationId = tableRelationId; ddlJob->concurrentIndexCmd = false; ddlJob->commandString = renameCommand; diff --git a/src/backend/distributed/commands/role.c b/src/backend/distributed/commands/role.c index 61ed0496e..f690a1346 100644 --- a/src/backend/distributed/commands/role.c +++ b/src/backend/distributed/commands/role.c @@ -46,7 +46,6 @@ List * ProcessAlterRoleStmt(AlterRoleStmt *stmt, const char *queryString) { ListCell *optionCell = NULL; - List *commands = NIL; if (!EnableAlterRolePropagation || !IsCoordinator()) { @@ -82,7 +81,7 @@ ProcessAlterRoleStmt(AlterRoleStmt *stmt, const char *queryString) break; } } - commands = list_make1((void *) CreateAlterRoleIfExistsCommand(stmt)); + List *commands = list_make1((void *) CreateAlterRoleIfExistsCommand(stmt)); return NodeDDLTaskList(ALL_WORKERS, commands); } @@ -120,15 +119,14 @@ ExtractEncryptedPassword(Oid roleOid) TupleDesc pgAuthIdDescription = RelationGetDescr(pgAuthId); HeapTuple tuple = SearchSysCache1(AUTHOID, roleOid); bool isNull = true; - Datum passwordDatum; if (!HeapTupleIsValid(tuple)) { return NULL; } - passwordDatum = heap_getattr(tuple, Anum_pg_authid_rolpassword, - pgAuthIdDescription, &isNull); + Datum passwordDatum = heap_getattr(tuple, Anum_pg_authid_rolpassword, + pgAuthIdDescription, &isNull); heap_close(pgAuthId, AccessShareLock); ReleaseSysCache(tuple); @@ -151,8 +149,6 @@ GenerateAlterRoleIfExistsCommand(HeapTuple tuple, TupleDesc pgAuthIdDescription) { char *rolPassword = ""; char *rolValidUntil = "infinity"; - Datum rolValidUntilDatum; - Datum rolPasswordDatum; bool isNull = true; Form_pg_authid role = ((Form_pg_authid) GETSTRUCT(tuple)); AlterRoleStmt *stmt = makeNode(AlterRoleStmt); @@ -199,8 +195,8 @@ GenerateAlterRoleIfExistsCommand(HeapTuple tuple, TupleDesc pgAuthIdDescription) makeDefElemInt("connectionlimit", role->rolconnlimit)); - rolPasswordDatum = heap_getattr(tuple, Anum_pg_authid_rolpassword, - pgAuthIdDescription, &isNull); + Datum rolPasswordDatum = heap_getattr(tuple, Anum_pg_authid_rolpassword, + pgAuthIdDescription, &isNull); if (!isNull) { rolPassword = pstrdup(TextDatumGetCString(rolPasswordDatum)); @@ -214,8 +210,8 @@ GenerateAlterRoleIfExistsCommand(HeapTuple tuple, TupleDesc pgAuthIdDescription) stmt->options = lappend(stmt->options, makeDefElem("password", NULL, -1)); } - rolValidUntilDatum = heap_getattr(tuple, Anum_pg_authid_rolvaliduntil, - pgAuthIdDescription, &isNull); + Datum rolValidUntilDatum = heap_getattr(tuple, Anum_pg_authid_rolvaliduntil, + pgAuthIdDescription, &isNull); if (!isNull) { rolValidUntil = pstrdup((char *) timestamptz_to_str(rolValidUntilDatum)); diff --git a/src/backend/distributed/commands/schema.c b/src/backend/distributed/commands/schema.c index 8bb29eb39..569415816 100644 --- a/src/backend/distributed/commands/schema.c +++ b/src/backend/distributed/commands/schema.c @@ -158,16 +158,14 @@ PlanAlterObjectSchemaStmt(AlterObjectSchemaStmt *stmt, const char *queryString) List * PlanAlterTableSchemaStmt(AlterObjectSchemaStmt *stmt, const char *queryString) { - Oid relationId = InvalidOid; - if (stmt->relation == NULL) { return NIL; } - relationId = RangeVarGetRelid(stmt->relation, - AccessExclusiveLock, - stmt->missing_ok); + Oid relationId = RangeVarGetRelid(stmt->relation, + AccessExclusiveLock, + stmt->missing_ok); /* first check whether a distributed relation is affected */ if (!OidIsValid(relationId) || !IsDistributedTable(relationId)) diff --git a/src/backend/distributed/commands/sequence.c b/src/backend/distributed/commands/sequence.c index 9c134465f..ef59bda4d 100644 --- a/src/backend/distributed/commands/sequence.c +++ b/src/backend/distributed/commands/sequence.c @@ -56,7 +56,6 @@ ErrorIfDistributedAlterSeqOwnedBy(AlterSeqStmt *alterSeqStmt) { Oid sequenceId = RangeVarGetRelid(alterSeqStmt->sequence, AccessShareLock, alterSeqStmt->missing_ok); - bool sequenceOwned = false; Oid ownedByTableId = InvalidOid; Oid newOwnedByTableId = InvalidOid; int32 ownedByColumnId = 0; @@ -68,8 +67,8 @@ ErrorIfDistributedAlterSeqOwnedBy(AlterSeqStmt *alterSeqStmt) return; } - sequenceOwned = sequenceIsOwned(sequenceId, DEPENDENCY_AUTO, &ownedByTableId, - &ownedByColumnId); + bool sequenceOwned = sequenceIsOwned(sequenceId, DEPENDENCY_AUTO, &ownedByTableId, + &ownedByColumnId); if (!sequenceOwned) { sequenceOwned = sequenceIsOwned(sequenceId, DEPENDENCY_INTERNAL, &ownedByTableId, diff --git a/src/backend/distributed/commands/table.c b/src/backend/distributed/commands/table.c index 001cb0960..7b791fa96 100644 --- a/src/backend/distributed/commands/table.c +++ b/src/backend/distributed/commands/table.c @@ -73,7 +73,6 @@ ProcessDropTableStmt(DropStmt *dropTableStatement) List *tableNameList = (List *) lfirst(dropTableCell); RangeVar *tableRangeVar = makeRangeVarFromNameList(tableNameList); bool missingOK = true; - List *partitionList = NIL; ListCell *partitionCell = NULL; Oid relationId = RangeVarGetRelid(tableRangeVar, AccessShareLock, missingOK); @@ -98,7 +97,7 @@ ProcessDropTableStmt(DropStmt *dropTableStatement) EnsureCoordinator(); - partitionList = PartitionList(relationId); + List *partitionList = PartitionList(relationId); if (list_length(partitionList) == 0) { continue; @@ -254,14 +253,7 @@ ProcessAlterTableStmtAttachPartition(AlterTableStmt *alterTableStatement) List * PlanAlterTableStmt(AlterTableStmt *alterTableStatement, const char *alterTableCommand) { - List *ddlJobs = NIL; - DDLJob *ddlJob = NULL; - LOCKMODE lockmode = 0; - Oid leftRelationId = InvalidOid; Oid rightRelationId = InvalidOid; - char leftRelationKind; - bool isDistributedRelation = false; - List *commandList = NIL; ListCell *commandCell = NULL; bool executeSequentially = false; @@ -271,8 +263,8 @@ PlanAlterTableStmt(AlterTableStmt *alterTableStatement, const char *alterTableCo return NIL; } - lockmode = AlterTableGetLockLevel(alterTableStatement->cmds); - leftRelationId = AlterTableLookupRelation(alterTableStatement, lockmode); + LOCKMODE lockmode = AlterTableGetLockLevel(alterTableStatement->cmds); + Oid leftRelationId = AlterTableLookupRelation(alterTableStatement, lockmode); if (!OidIsValid(leftRelationId)) { return NIL; @@ -283,13 +275,13 @@ PlanAlterTableStmt(AlterTableStmt *alterTableStatement, const char *alterTableCo * SET/SET storage parameters in Citus, so we might have to check for * another relation here. */ - leftRelationKind = get_rel_relkind(leftRelationId); + char leftRelationKind = get_rel_relkind(leftRelationId); if (leftRelationKind == RELKIND_INDEX) { leftRelationId = IndexGetRelation(leftRelationId, false); } - isDistributedRelation = IsDistributedTable(leftRelationId); + bool isDistributedRelation = IsDistributedTable(leftRelationId); if (!isDistributedRelation) { return NIL; @@ -317,7 +309,7 @@ PlanAlterTableStmt(AlterTableStmt *alterTableStatement, const char *alterTableCo * set skip_validation to true to prevent PostgreSQL to verify validity of the * foreign constraint in master. Validity will be checked in workers anyway. */ - commandList = alterTableStatement->cmds; + List *commandList = alterTableStatement->cmds; foreach(commandCell, commandList) { @@ -426,7 +418,7 @@ PlanAlterTableStmt(AlterTableStmt *alterTableStatement, const char *alterTableCo SetLocalMultiShardModifyModeToSequential(); } - ddlJob = palloc0(sizeof(DDLJob)); + DDLJob *ddlJob = palloc0(sizeof(DDLJob)); ddlJob->targetRelationId = leftRelationId; ddlJob->concurrentIndexCmd = false; ddlJob->commandString = alterTableCommand; @@ -450,7 +442,7 @@ PlanAlterTableStmt(AlterTableStmt *alterTableStatement, const char *alterTableCo ddlJob->taskList = DDLTaskList(leftRelationId, alterTableCommand); } - ddlJobs = list_make1(ddlJob); + List *ddlJobs = list_make1(ddlJob); return ddlJobs; } @@ -465,10 +457,6 @@ Node * WorkerProcessAlterTableStmt(AlterTableStmt *alterTableStatement, const char *alterTableCommand) { - LOCKMODE lockmode = 0; - Oid leftRelationId = InvalidOid; - bool isDistributedRelation = false; - List *commandList = NIL; ListCell *commandCell = NULL; /* first check whether a distributed relation is affected */ @@ -477,14 +465,14 @@ WorkerProcessAlterTableStmt(AlterTableStmt *alterTableStatement, return (Node *) alterTableStatement; } - lockmode = AlterTableGetLockLevel(alterTableStatement->cmds); - leftRelationId = AlterTableLookupRelation(alterTableStatement, lockmode); + LOCKMODE lockmode = AlterTableGetLockLevel(alterTableStatement->cmds); + Oid leftRelationId = AlterTableLookupRelation(alterTableStatement, lockmode); if (!OidIsValid(leftRelationId)) { return (Node *) alterTableStatement; } - isDistributedRelation = IsDistributedTable(leftRelationId); + bool isDistributedRelation = IsDistributedTable(leftRelationId); if (!isDistributedRelation) { return (Node *) alterTableStatement; @@ -496,7 +484,7 @@ WorkerProcessAlterTableStmt(AlterTableStmt *alterTableStatement, * set skip_validation to true to prevent PostgreSQL to verify validity of the * foreign constraint in master. Validity will be checked in workers anyway. */ - commandList = alterTableStatement->cmds; + List *commandList = alterTableStatement->cmds; foreach(commandCell, commandList) { @@ -559,9 +547,6 @@ IsAlterTableRenameStmt(RenameStmt *renameStmt) void ErrorIfAlterDropsPartitionColumn(AlterTableStmt *alterTableStatement) { - LOCKMODE lockmode = 0; - Oid leftRelationId = InvalidOid; - bool isDistributedRelation = false; List *commandList = alterTableStatement->cmds; ListCell *commandCell = NULL; @@ -571,14 +556,14 @@ ErrorIfAlterDropsPartitionColumn(AlterTableStmt *alterTableStatement) return; } - lockmode = AlterTableGetLockLevel(alterTableStatement->cmds); - leftRelationId = AlterTableLookupRelation(alterTableStatement, lockmode); + LOCKMODE lockmode = AlterTableGetLockLevel(alterTableStatement->cmds); + Oid leftRelationId = AlterTableLookupRelation(alterTableStatement, lockmode); if (!OidIsValid(leftRelationId)) { return; } - isDistributedRelation = IsDistributedTable(leftRelationId); + bool isDistributedRelation = IsDistributedTable(leftRelationId); if (!isDistributedRelation) { return; @@ -613,11 +598,9 @@ PostProcessAlterTableStmt(AlterTableStmt *alterTableStatement) { List *commandList = alterTableStatement->cmds; ListCell *commandCell = NULL; - LOCKMODE lockmode = NoLock; - Oid relationId = InvalidOid; - lockmode = AlterTableGetLockLevel(alterTableStatement->cmds); - relationId = AlterTableLookupRelation(alterTableStatement, lockmode); + LOCKMODE lockmode = AlterTableGetLockLevel(alterTableStatement->cmds); + Oid relationId = AlterTableLookupRelation(alterTableStatement, lockmode); if (relationId != InvalidOid) { @@ -634,8 +617,6 @@ PostProcessAlterTableStmt(AlterTableStmt *alterTableStatement) if (alterTableType == AT_AddConstraint) { - Constraint *constraint = NULL; - Assert(list_length(commandList) == 1); ErrorIfUnsupportedAlterAddConstraintStmt(alterTableStatement); @@ -645,7 +626,7 @@ PostProcessAlterTableStmt(AlterTableStmt *alterTableStatement) continue; } - constraint = (Constraint *) command->def; + Constraint *constraint = (Constraint *) command->def; if (constraint->contype == CONSTR_FOREIGN) { InvalidateForeignKeyGraph(); @@ -653,11 +634,10 @@ PostProcessAlterTableStmt(AlterTableStmt *alterTableStatement) } else if (alterTableType == AT_AddColumn) { - List *columnConstraints = NIL; ListCell *columnConstraint = NULL; ColumnDef *columnDefinition = (ColumnDef *) command->def; - columnConstraints = columnDefinition->constraints; + List *columnConstraints = columnDefinition->constraints; if (columnConstraints) { ErrorIfUnsupportedAlterAddConstraintStmt(alterTableStatement); @@ -792,8 +772,6 @@ void ErrorIfUnsupportedConstraint(Relation relation, char distributionMethod, Var *distributionColumn, uint32 colocationId) { - char *relationName = NULL; - List *indexOidList = NULL; ListCell *indexOidCell = NULL; /* @@ -817,21 +795,17 @@ ErrorIfUnsupportedConstraint(Relation relation, char distributionMethod, return; } - relationName = RelationGetRelationName(relation); - indexOidList = RelationGetIndexList(relation); + char *relationName = RelationGetRelationName(relation); + List *indexOidList = RelationGetIndexList(relation); foreach(indexOidCell, indexOidList) { Oid indexOid = lfirst_oid(indexOidCell); Relation indexDesc = index_open(indexOid, RowExclusiveLock); - IndexInfo *indexInfo = NULL; - AttrNumber *attributeNumberArray = NULL; bool hasDistributionColumn = false; - int attributeCount = 0; - int attributeIndex = 0; /* extract index key information from the index's pg_index info */ - indexInfo = BuildIndexInfo(indexDesc); + IndexInfo *indexInfo = BuildIndexInfo(indexDesc); /* only check unique indexes and exclusion constraints. */ if (indexInfo->ii_Unique == false && indexInfo->ii_ExclusionOps == NULL) @@ -856,25 +830,23 @@ ErrorIfUnsupportedConstraint(Relation relation, char distributionMethod, errhint("Consider using hash partitioning."))); } - attributeCount = indexInfo->ii_NumIndexAttrs; - attributeNumberArray = indexInfo->ii_IndexAttrNumbers; + int attributeCount = indexInfo->ii_NumIndexAttrs; + AttrNumber *attributeNumberArray = indexInfo->ii_IndexAttrNumbers; - for (attributeIndex = 0; attributeIndex < attributeCount; attributeIndex++) + for (int attributeIndex = 0; attributeIndex < attributeCount; attributeIndex++) { AttrNumber attributeNumber = attributeNumberArray[attributeIndex]; - bool uniqueConstraint = false; - bool exclusionConstraintWithEquality = false; if (distributionColumn->varattno != attributeNumber) { continue; } - uniqueConstraint = indexInfo->ii_Unique; - exclusionConstraintWithEquality = (indexInfo->ii_ExclusionOps != NULL && - OperatorImplementsEquality( - indexInfo->ii_ExclusionOps[ - attributeIndex])); + bool uniqueConstraint = indexInfo->ii_Unique; + bool exclusionConstraintWithEquality = (indexInfo->ii_ExclusionOps != NULL && + OperatorImplementsEquality( + indexInfo->ii_ExclusionOps[ + attributeIndex])); if (uniqueConstraint || exclusionConstraintWithEquality) { @@ -1278,15 +1250,13 @@ InterShardDDLTaskList(Oid leftRelationId, Oid rightRelationId, */ if (rightPartitionMethod == DISTRIBUTE_BY_NONE) { - ShardInterval *rightShardInterval = NULL; int rightShardCount = list_length(rightShardList); int leftShardCount = list_length(leftShardList); - int shardCounter = 0; Assert(rightShardCount == 1); - rightShardInterval = (ShardInterval *) linitial(rightShardList); - for (shardCounter = rightShardCount; shardCounter < leftShardCount; + ShardInterval *rightShardInterval = (ShardInterval *) linitial(rightShardList); + for (int shardCounter = rightShardCount; shardCounter < leftShardCount; shardCounter++) { rightShardList = lappend(rightShardList, rightShardInterval); @@ -1301,7 +1271,6 @@ InterShardDDLTaskList(Oid leftRelationId, Oid rightRelationId, ShardInterval *leftShardInterval = (ShardInterval *) lfirst(leftShardCell); uint64 leftShardId = leftShardInterval->shardId; StringInfo applyCommand = makeStringInfo(); - Task *task = NULL; RelationShard *leftRelationShard = CitusMakeNode(RelationShard); RelationShard *rightRelationShard = CitusMakeNode(RelationShard); @@ -1318,7 +1287,7 @@ InterShardDDLTaskList(Oid leftRelationId, Oid rightRelationId, leftShardId, escapedLeftSchemaName, rightShardId, escapedRightSchemaName, escapedCommandString); - task = CitusMakeNode(Task); + Task *task = CitusMakeNode(Task); task->jobId = jobId; task->taskId = taskId++; task->taskType = DDL_TASK; @@ -1345,8 +1314,6 @@ AlterInvolvesPartitionColumn(AlterTableStmt *alterTableStatement, AlterTableCmd *command) { bool involvesPartitionColumn = false; - Var *partitionColumn = NULL; - HeapTuple tuple = NULL; char *alterColumnName = command->name; LOCKMODE lockmode = AlterTableGetLockLevel(alterTableStatement->cmds); @@ -1356,9 +1323,9 @@ AlterInvolvesPartitionColumn(AlterTableStmt *alterTableStatement, return false; } - partitionColumn = DistPartitionKey(relationId); + Var *partitionColumn = DistPartitionKey(relationId); - tuple = SearchSysCacheAttName(relationId, alterColumnName); + HeapTuple tuple = SearchSysCacheAttName(relationId, alterColumnName); if (HeapTupleIsValid(tuple)) { Form_pg_attribute targetAttr = (Form_pg_attribute) GETSTRUCT(tuple); diff --git a/src/backend/distributed/commands/transmit.c b/src/backend/distributed/commands/transmit.c index a26bb1233..9b47115db 100644 --- a/src/backend/distributed/commands/transmit.c +++ b/src/backend/distributed/commands/transmit.c @@ -42,7 +42,6 @@ void RedirectCopyDataToRegularFile(const char *filename) { StringInfo copyData = makeStringInfo(); - bool copyDone = false; const int fileFlags = (O_APPEND | O_CREAT | O_RDWR | O_TRUNC | PG_BINARY); const int fileMode = (S_IRUSR | S_IWUSR); File fileDesc = FileOpenForTransmit(filename, fileFlags, fileMode); @@ -50,7 +49,7 @@ RedirectCopyDataToRegularFile(const char *filename) SendCopyInStart(); - copyDone = ReceiveCopyData(copyData); + bool copyDone = ReceiveCopyData(copyData); while (!copyDone) { /* if received data has contents, append to regular file */ @@ -83,8 +82,6 @@ RedirectCopyDataToRegularFile(const char *filename) void SendRegularFile(const char *filename) { - StringInfo fileBuffer = NULL; - int readBytes = -1; const uint32 fileBufferSize = 32768; /* 32 KB */ const int fileFlags = (O_RDONLY | PG_BINARY); const int fileMode = 0; @@ -97,13 +94,13 @@ SendRegularFile(const char *filename) * We read file's contents into buffers of 32 KB. This buffer size is twice * as large as Hadoop's default buffer size, and may later be configurable. */ - fileBuffer = makeStringInfo(); + StringInfo fileBuffer = makeStringInfo(); enlargeStringInfo(fileBuffer, fileBufferSize); SendCopyOutStart(); - readBytes = FileReadCompat(&fileCompat, fileBuffer->data, fileBufferSize, - PG_WAIT_IO); + int readBytes = FileReadCompat(&fileCompat, fileBuffer->data, fileBufferSize, + PG_WAIT_IO); while (readBytes > 0) { fileBuffer->len = readBytes; @@ -141,11 +138,9 @@ FreeStringInfo(StringInfo stringInfo) File FileOpenForTransmit(const char *filename, int fileFlags, int fileMode) { - File fileDesc = -1; - int fileStated = -1; struct stat fileStat; - fileStated = stat(filename, &fileStat); + int fileStated = stat(filename, &fileStat); if (fileStated >= 0) { if (S_ISDIR(fileStat.st_mode)) @@ -155,7 +150,7 @@ FileOpenForTransmit(const char *filename, int fileFlags, int fileMode) } } - fileDesc = PathNameOpenFilePerm((char *) filename, fileFlags, fileMode); + File fileDesc = PathNameOpenFilePerm((char *) filename, fileFlags, fileMode); if (fileDesc < 0) { ereport(ERROR, (errcode_for_file_access(), @@ -175,7 +170,6 @@ SendCopyInStart(void) { StringInfoData copyInStart = { NULL, 0, 0, 0 }; const char copyFormat = 1; /* binary copy format */ - int flushed = 0; pq_beginmessage(©InStart, 'G'); pq_sendbyte(©InStart, copyFormat); @@ -183,7 +177,7 @@ SendCopyInStart(void) pq_endmessage(©InStart); /* flush here to ensure that FE knows it can send data */ - flushed = pq_flush(); + int flushed = pq_flush(); if (flushed != 0) { ereport(WARNING, (errmsg("could not flush copy start data"))); @@ -213,13 +207,12 @@ static void SendCopyDone(void) { StringInfoData copyDone = { NULL, 0, 0, 0 }; - int flushed = 0; pq_beginmessage(©Done, 'c'); pq_endmessage(©Done); /* flush here to signal to FE that we are done */ - flushed = pq_flush(); + int flushed = pq_flush(); if (flushed != 0) { ereport(WARNING, (errmsg("could not flush copy start data"))); @@ -250,14 +243,12 @@ SendCopyData(StringInfo fileBuffer) static bool ReceiveCopyData(StringInfo copyData) { - int messageType = 0; - int messageCopied = 0; bool copyDone = true; const int unlimitedSize = 0; HOLD_CANCEL_INTERRUPTS(); pq_startmsgread(); - messageType = pq_getbyte(); + int messageType = pq_getbyte(); if (messageType == EOF) { ereport(ERROR, (errcode(ERRCODE_CONNECTION_FAILURE), @@ -265,7 +256,7 @@ ReceiveCopyData(StringInfo copyData) } /* consume the rest of message before checking for message type */ - messageCopied = pq_getmessage(copyData, unlimitedSize); + int messageCopied = pq_getmessage(copyData, unlimitedSize); if (messageCopied == EOF) { ereport(ERROR, (errcode(ERRCODE_CONNECTION_FAILURE), @@ -382,8 +373,6 @@ TransmitStatementUser(CopyStmt *copyStatement) void VerifyTransmitStmt(CopyStmt *copyStatement) { - char *fileName = NULL; - EnsureSuperUser(); /* do some minimal option verification */ @@ -394,7 +383,7 @@ VerifyTransmitStmt(CopyStmt *copyStatement) errmsg("FORMAT 'transmit' requires a target file"))); } - fileName = copyStatement->relation->relname; + char *fileName = copyStatement->relation->relname; if (is_absolute_path(fileName)) { diff --git a/src/backend/distributed/commands/truncate.c b/src/backend/distributed/commands/truncate.c index ec8ce1372..3ec721b4a 100644 --- a/src/backend/distributed/commands/truncate.c +++ b/src/backend/distributed/commands/truncate.c @@ -180,8 +180,6 @@ LockTruncatedRelationMetadataInWorkers(TruncateStmt *truncateStatement) { RangeVar *rangeVar = (RangeVar *) lfirst(relationCell); Oid relationId = RangeVarGetRelid(rangeVar, NoLock, false); - DistTableCacheEntry *cacheEntry = NULL; - List *referencingTableList = NIL; Oid referencingRelationId = InvalidOid; if (!IsDistributedTable(relationId)) @@ -196,10 +194,10 @@ LockTruncatedRelationMetadataInWorkers(TruncateStmt *truncateStatement) distributedRelationList = lappend_oid(distributedRelationList, relationId); - cacheEntry = DistributedTableCacheEntry(relationId); + DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(relationId); Assert(cacheEntry != NULL); - referencingTableList = cacheEntry->referencingRelationsViaForeignKey; + List *referencingTableList = cacheEntry->referencingRelationsViaForeignKey; foreach_oid(referencingRelationId, referencingTableList) { distributedRelationList = list_append_unique_oid(distributedRelationList, diff --git a/src/backend/distributed/commands/type.c b/src/backend/distributed/commands/type.c index aca976894..1aeeb4a30 100644 --- a/src/backend/distributed/commands/type.c +++ b/src/backend/distributed/commands/type.c @@ -114,9 +114,6 @@ static bool ShouldPropagateTypeCreate(void); List * PlanCompositeTypeStmt(CompositeTypeStmt *stmt, const char *queryString) { - const char *compositeTypeStmtSql = NULL; - List *commands = NIL; - if (!ShouldPropagateTypeCreate()) { return NIL; @@ -149,7 +146,7 @@ PlanCompositeTypeStmt(CompositeTypeStmt *stmt, const char *queryString) * type previously has been attempted to be created in a transaction which did not * commit on the coordinator. */ - compositeTypeStmtSql = DeparseCompositeTypeStmt(stmt); + const char *compositeTypeStmtSql = DeparseCompositeTypeStmt(stmt); compositeTypeStmtSql = WrapCreateOrReplace(compositeTypeStmtSql); /* @@ -158,9 +155,9 @@ PlanCompositeTypeStmt(CompositeTypeStmt *stmt, const char *queryString) */ EnsureSequentialModeForTypeDDL(); - commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) compositeTypeStmtSql, - ENABLE_DDL_PROPAGATION); + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) compositeTypeStmtSql, + ENABLE_DDL_PROPAGATION); return NodeDDLTaskList(ALL_WORKERS, commands); } @@ -174,8 +171,6 @@ PlanCompositeTypeStmt(CompositeTypeStmt *stmt, const char *queryString) void ProcessCompositeTypeStmt(CompositeTypeStmt *stmt, const char *queryString) { - const ObjectAddress *typeAddress = NULL; - /* same check we perform during planning of the statement */ if (!ShouldPropagateTypeCreate()) { @@ -186,7 +181,8 @@ ProcessCompositeTypeStmt(CompositeTypeStmt *stmt, const char *queryString) * find object address of the just created object, because the type has been created * locally it can't be missing */ - typeAddress = GetObjectAddressFromParseTree((Node *) stmt, false); + const ObjectAddress *typeAddress = GetObjectAddressFromParseTree((Node *) stmt, + false); EnsureDependenciesExistsOnAllNodes(typeAddress); MarkObjectDistributed(typeAddress); @@ -202,13 +198,10 @@ ProcessCompositeTypeStmt(CompositeTypeStmt *stmt, const char *queryString) List * PlanAlterTypeStmt(AlterTableStmt *stmt, const char *queryString) { - const char *alterTypeStmtSql = NULL; - const ObjectAddress *typeAddress = NULL; - List *commands = NIL; - Assert(stmt->relkind == OBJECT_TYPE); - typeAddress = GetObjectAddressFromParseTree((Node *) stmt, false); + const ObjectAddress *typeAddress = GetObjectAddressFromParseTree((Node *) stmt, + false); if (!ShouldPropagateObject(typeAddress)) { return NIL; @@ -218,7 +211,7 @@ PlanAlterTypeStmt(AlterTableStmt *stmt, const char *queryString) /* reconstruct alter statement in a portable fashion */ QualifyTreeNode((Node *) stmt); - alterTypeStmtSql = DeparseTreeNode((Node *) stmt); + const char *alterTypeStmtSql = DeparseTreeNode((Node *) stmt); /* * all types that are distributed will need their alter statements propagated @@ -227,9 +220,9 @@ PlanAlterTypeStmt(AlterTableStmt *stmt, const char *queryString) */ EnsureSequentialModeForTypeDDL(); - commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) alterTypeStmtSql, - ENABLE_DDL_PROPAGATION); + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) alterTypeStmtSql, + ENABLE_DDL_PROPAGATION); return NodeDDLTaskList(ALL_WORKERS, commands); } @@ -248,9 +241,6 @@ PlanAlterTypeStmt(AlterTableStmt *stmt, const char *queryString) List * PlanCreateEnumStmt(CreateEnumStmt *stmt, const char *queryString) { - const char *createEnumStmtSql = NULL; - List *commands = NIL; - if (!ShouldPropagateTypeCreate()) { return NIL; @@ -266,7 +256,7 @@ PlanCreateEnumStmt(CreateEnumStmt *stmt, const char *queryString) QualifyTreeNode((Node *) stmt); /* reconstruct creation statement in a portable fashion */ - createEnumStmtSql = DeparseCreateEnumStmt(stmt); + const char *createEnumStmtSql = DeparseCreateEnumStmt(stmt); createEnumStmtSql = WrapCreateOrReplace(createEnumStmtSql); /* @@ -276,9 +266,9 @@ PlanCreateEnumStmt(CreateEnumStmt *stmt, const char *queryString) EnsureSequentialModeForTypeDDL(); /* to prevent recursion with mx we disable ddl propagation */ - commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) createEnumStmtSql, - ENABLE_DDL_PROPAGATION); + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) createEnumStmtSql, + ENABLE_DDL_PROPAGATION); return NodeDDLTaskList(ALL_WORKERS, commands); } @@ -295,15 +285,14 @@ PlanCreateEnumStmt(CreateEnumStmt *stmt, const char *queryString) void ProcessCreateEnumStmt(CreateEnumStmt *stmt, const char *queryString) { - const ObjectAddress *typeAddress = NULL; - if (!ShouldPropagateTypeCreate()) { return; } /* lookup type address of just created type */ - typeAddress = GetObjectAddressFromParseTree((Node *) stmt, false); + const ObjectAddress *typeAddress = GetObjectAddressFromParseTree((Node *) stmt, + false); EnsureDependenciesExistsOnAllNodes(typeAddress); /* @@ -326,11 +315,10 @@ ProcessCreateEnumStmt(CreateEnumStmt *stmt, const char *queryString) List * PlanAlterEnumStmt(AlterEnumStmt *stmt, const char *queryString) { - const char *alterEnumStmtSql = NULL; - const ObjectAddress *typeAddress = NULL; List *commands = NIL; - typeAddress = GetObjectAddressFromParseTree((Node *) stmt, false); + const ObjectAddress *typeAddress = GetObjectAddressFromParseTree((Node *) stmt, + false); if (!ShouldPropagateObject(typeAddress)) { return NIL; @@ -351,7 +339,7 @@ PlanAlterEnumStmt(AlterEnumStmt *stmt, const char *queryString) EnsureCoordinator(); QualifyTreeNode((Node *) stmt); - alterEnumStmtSql = DeparseTreeNode((Node *) stmt); + const char *alterEnumStmtSql = DeparseTreeNode((Node *) stmt); /* * Before pg12 ALTER ENUM ... ADD VALUE could not be within a xact block. Instead of @@ -396,9 +384,8 @@ PlanAlterEnumStmt(AlterEnumStmt *stmt, const char *queryString) void ProcessAlterEnumStmt(AlterEnumStmt *stmt, const char *queryString) { - const ObjectAddress *typeAddress = NULL; - - typeAddress = GetObjectAddressFromParseTree((Node *) stmt, false); + const ObjectAddress *typeAddress = GetObjectAddressFromParseTree((Node *) stmt, + false); if (!ShouldPropagateObject(typeAddress)) { return; @@ -422,25 +409,22 @@ ProcessAlterEnumStmt(AlterEnumStmt *stmt, const char *queryString) * might already be added to some nodes, but not all. */ - int result = 0; - List *commands = NIL; - const char *alterEnumStmtSql = NULL; /* qualification of the stmt happened during planning */ - alterEnumStmtSql = DeparseTreeNode((Node *) stmt); + const char *alterEnumStmtSql = DeparseTreeNode((Node *) stmt); - commands = list_make2(DISABLE_DDL_PROPAGATION, (void *) alterEnumStmtSql); + List *commands = list_make2(DISABLE_DDL_PROPAGATION, (void *) alterEnumStmtSql); - result = SendBareOptionalCommandListToWorkersAsUser(ALL_WORKERS, commands, NULL); + int result = SendBareOptionalCommandListToWorkersAsUser(ALL_WORKERS, commands, + NULL); if (result != RESPONSE_OKAY) { - const char *alterEnumStmtIfNotExistsSql = NULL; bool oldSkipIfNewValueExists = stmt->skipIfNewValExists; /* deparse the query with IF NOT EXISTS */ stmt->skipIfNewValExists = true; - alterEnumStmtIfNotExistsSql = DeparseTreeNode((Node *) stmt); + const char *alterEnumStmtIfNotExistsSql = DeparseTreeNode((Node *) stmt); stmt->skipIfNewValExists = oldSkipIfNewValueExists; ereport(WARNING, (errmsg("not all workers applied change to enum"), @@ -466,18 +450,15 @@ PlanDropTypeStmt(DropStmt *stmt, const char *queryString) * the old list to put back */ List *oldTypes = stmt->objects; - List *distributedTypes = NIL; - const char *dropStmtSql = NULL; ListCell *addressCell = NULL; - List *distributedTypeAddresses = NIL; - List *commands = NIL; if (!ShouldPropagate()) { return NIL; } - distributedTypes = FilterNameListForDistributedTypes(oldTypes, stmt->missing_ok); + List *distributedTypes = FilterNameListForDistributedTypes(oldTypes, + stmt->missing_ok); if (list_length(distributedTypes) <= 0) { /* no distributed types to drop */ @@ -494,7 +475,7 @@ PlanDropTypeStmt(DropStmt *stmt, const char *queryString) /* * remove the entries for the distributed objects on dropping */ - distributedTypeAddresses = TypeNameListToObjectAddresses(distributedTypes); + List *distributedTypeAddresses = TypeNameListToObjectAddresses(distributedTypes); foreach(addressCell, distributedTypeAddresses) { ObjectAddress *address = (ObjectAddress *) lfirst(addressCell); @@ -506,15 +487,15 @@ PlanDropTypeStmt(DropStmt *stmt, const char *queryString) * deparse to an executable sql statement for the workers */ stmt->objects = distributedTypes; - dropStmtSql = DeparseTreeNode((Node *) stmt); + const char *dropStmtSql = DeparseTreeNode((Node *) stmt); stmt->objects = oldTypes; /* to prevent recursion with mx we disable ddl propagation */ EnsureSequentialModeForTypeDDL(); - commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) dropStmtSql, - ENABLE_DDL_PROPAGATION); + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) dropStmtSql, + ENABLE_DDL_PROPAGATION); return NodeDDLTaskList(ALL_WORKERS, commands); } @@ -531,11 +512,8 @@ PlanDropTypeStmt(DropStmt *stmt, const char *queryString) List * PlanRenameTypeStmt(RenameStmt *stmt, const char *queryString) { - const char *renameStmtSql = NULL; - const ObjectAddress *typeAddress = NULL; - List *commands = NIL; - - typeAddress = GetObjectAddressFromParseTree((Node *) stmt, false); + const ObjectAddress *typeAddress = GetObjectAddressFromParseTree((Node *) stmt, + false); if (!ShouldPropagateObject(typeAddress)) { return NIL; @@ -545,14 +523,14 @@ PlanRenameTypeStmt(RenameStmt *stmt, const char *queryString) QualifyTreeNode((Node *) stmt); /* deparse sql*/ - renameStmtSql = DeparseTreeNode((Node *) stmt); + const char *renameStmtSql = DeparseTreeNode((Node *) stmt); /* to prevent recursion with mx we disable ddl propagation */ EnsureSequentialModeForTypeDDL(); - commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) renameStmtSql, - ENABLE_DDL_PROPAGATION); + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) renameStmtSql, + ENABLE_DDL_PROPAGATION); return NodeDDLTaskList(ALL_WORKERS, commands); } @@ -568,14 +546,11 @@ PlanRenameTypeStmt(RenameStmt *stmt, const char *queryString) List * PlanRenameTypeAttributeStmt(RenameStmt *stmt, const char *queryString) { - const char *sql = NULL; - const ObjectAddress *typeAddress = NULL; - List *commands = NIL; - Assert(stmt->renameType == OBJECT_ATTRIBUTE); Assert(stmt->relationType == OBJECT_TYPE); - typeAddress = GetObjectAddressFromParseTree((Node *) stmt, false); + const ObjectAddress *typeAddress = GetObjectAddressFromParseTree((Node *) stmt, + false); if (!ShouldPropagateObject(typeAddress)) { return NIL; @@ -583,12 +558,12 @@ PlanRenameTypeAttributeStmt(RenameStmt *stmt, const char *queryString) QualifyTreeNode((Node *) stmt); - sql = DeparseTreeNode((Node *) stmt); + const char *sql = DeparseTreeNode((Node *) stmt); EnsureSequentialModeForTypeDDL(); - commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) sql, - ENABLE_DDL_PROPAGATION); + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) sql, + ENABLE_DDL_PROPAGATION); return NodeDDLTaskList(ALL_WORKERS, commands); } @@ -603,13 +578,10 @@ PlanRenameTypeAttributeStmt(RenameStmt *stmt, const char *queryString) List * PlanAlterTypeSchemaStmt(AlterObjectSchemaStmt *stmt, const char *queryString) { - const char *sql = NULL; - const ObjectAddress *typeAddress = NULL; - List *commands = NIL; - Assert(stmt->objectType == OBJECT_TYPE); - typeAddress = GetObjectAddressFromParseTree((Node *) stmt, false); + const ObjectAddress *typeAddress = GetObjectAddressFromParseTree((Node *) stmt, + false); if (!ShouldPropagateObject(typeAddress)) { return NIL; @@ -618,13 +590,13 @@ PlanAlterTypeSchemaStmt(AlterObjectSchemaStmt *stmt, const char *queryString) EnsureCoordinator(); QualifyTreeNode((Node *) stmt); - sql = DeparseTreeNode((Node *) stmt); + const char *sql = DeparseTreeNode((Node *) stmt); EnsureSequentialModeForTypeDDL(); - commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) sql, - ENABLE_DDL_PROPAGATION); + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) sql, + ENABLE_DDL_PROPAGATION); return NodeDDLTaskList(ALL_WORKERS, commands); } @@ -638,11 +610,10 @@ PlanAlterTypeSchemaStmt(AlterObjectSchemaStmt *stmt, const char *queryString) void ProcessAlterTypeSchemaStmt(AlterObjectSchemaStmt *stmt, const char *queryString) { - const ObjectAddress *typeAddress = NULL; - Assert(stmt->objectType == OBJECT_TYPE); - typeAddress = GetObjectAddressFromParseTree((Node *) stmt, false); + const ObjectAddress *typeAddress = GetObjectAddressFromParseTree((Node *) stmt, + false); if (!ShouldPropagateObject(typeAddress)) { return; @@ -663,13 +634,10 @@ ProcessAlterTypeSchemaStmt(AlterObjectSchemaStmt *stmt, const char *queryString) List * PlanAlterTypeOwnerStmt(AlterOwnerStmt *stmt, const char *queryString) { - const ObjectAddress *typeAddress = NULL; - const char *sql = NULL; - List *commands = NULL; - Assert(stmt->objectType == OBJECT_TYPE); - typeAddress = GetObjectAddressFromParseTree((Node *) stmt, false); + const ObjectAddress *typeAddress = GetObjectAddressFromParseTree((Node *) stmt, + false); if (!ShouldPropagateObject(typeAddress)) { return NIL; @@ -678,12 +646,12 @@ PlanAlterTypeOwnerStmt(AlterOwnerStmt *stmt, const char *queryString) EnsureCoordinator(); QualifyTreeNode((Node *) stmt); - sql = DeparseTreeNode((Node *) stmt); + const char *sql = DeparseTreeNode((Node *) stmt); EnsureSequentialModeForTypeDDL(); - commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) sql, - ENABLE_DDL_PROPAGATION); + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) sql, + ENABLE_DDL_PROPAGATION); return NodeDDLTaskList(ALL_WORKERS, commands); } @@ -726,13 +694,10 @@ CreateTypeStmtByObjectAddress(const ObjectAddress *address) static CompositeTypeStmt * RecreateCompositeTypeStmt(Oid typeOid) { - CompositeTypeStmt *stmt = NULL; - List *names = NIL; - Assert(get_typtype(typeOid) == TYPTYPE_COMPOSITE); - stmt = makeNode(CompositeTypeStmt); - names = stringToQualifiedNameList(format_type_be_qualified(typeOid)); + CompositeTypeStmt *stmt = makeNode(CompositeTypeStmt); + List *names = stringToQualifiedNameList(format_type_be_qualified(typeOid)); stmt->typevar = makeRangeVarFromNameList(names); stmt->coldeflist = CompositeTypeColumnDefList(typeOid); @@ -763,17 +728,14 @@ attributeFormToColumnDef(Form_pg_attribute attributeForm) static List * CompositeTypeColumnDefList(Oid typeOid) { - Relation relation = NULL; - Oid relationId = InvalidOid; - TupleDesc tupleDescriptor = NULL; - int attributeIndex = 0; List *columnDefs = NIL; - relationId = typeidTypeRelid(typeOid); - relation = relation_open(relationId, AccessShareLock); + Oid relationId = typeidTypeRelid(typeOid); + Relation relation = relation_open(relationId, AccessShareLock); - tupleDescriptor = RelationGetDescr(relation); - for (attributeIndex = 0; attributeIndex < tupleDescriptor->natts; attributeIndex++) + TupleDesc tupleDescriptor = RelationGetDescr(relation); + for (int attributeIndex = 0; attributeIndex < tupleDescriptor->natts; + attributeIndex++) { Form_pg_attribute attributeForm = TupleDescAttr(tupleDescriptor, attributeIndex); @@ -799,11 +761,9 @@ CompositeTypeColumnDefList(Oid typeOid) static CreateEnumStmt * RecreateEnumStmt(Oid typeOid) { - CreateEnumStmt *stmt = NULL; - Assert(get_typtype(typeOid) == TYPTYPE_ENUM); - stmt = makeNode(CreateEnumStmt); + CreateEnumStmt *stmt = makeNode(CreateEnumStmt); stmt->typeName = stringToQualifiedNameList(format_type_be_qualified(typeOid)); stmt->vals = EnumValsList(typeOid); @@ -818,8 +778,6 @@ RecreateEnumStmt(Oid typeOid) static List * EnumValsList(Oid typeOid) { - Relation enum_rel = NULL; - SysScanDesc enum_scan = NULL; HeapTuple enum_tuple = NULL; ScanKeyData skey = { 0 }; @@ -831,11 +789,11 @@ EnumValsList(Oid typeOid) BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(typeOid)); - enum_rel = heap_open(EnumRelationId, AccessShareLock); - enum_scan = systable_beginscan(enum_rel, - EnumTypIdSortOrderIndexId, - true, NULL, - 1, &skey); + Relation enum_rel = heap_open(EnumRelationId, AccessShareLock); + SysScanDesc enum_scan = systable_beginscan(enum_rel, + EnumTypIdSortOrderIndexId, + true, NULL, + 1, &skey); /* collect all value names in CREATE TYPE ... AS ENUM stmt */ while (HeapTupleIsValid(enum_tuple = systable_getnext(enum_scan))) @@ -861,13 +819,9 @@ EnumValsList(Oid typeOid) ObjectAddress * CompositeTypeStmtObjectAddress(CompositeTypeStmt *stmt, bool missing_ok) { - TypeName *typeName = NULL; - Oid typeOid = InvalidOid; - ObjectAddress *address = NULL; - - typeName = MakeTypeNameFromRangeVar(stmt->typevar); - typeOid = LookupTypeNameOid(NULL, typeName, missing_ok); - address = palloc0(sizeof(ObjectAddress)); + TypeName *typeName = MakeTypeNameFromRangeVar(stmt->typevar); + Oid typeOid = LookupTypeNameOid(NULL, typeName, missing_ok); + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); ObjectAddressSet(*address, TypeRelationId, typeOid); return address; @@ -885,13 +839,9 @@ CompositeTypeStmtObjectAddress(CompositeTypeStmt *stmt, bool missing_ok) ObjectAddress * CreateEnumStmtObjectAddress(CreateEnumStmt *stmt, bool missing_ok) { - TypeName *typeName = NULL; - Oid typeOid = InvalidOid; - ObjectAddress *address = NULL; - - typeName = makeTypeNameFromNameList(stmt->typeName); - typeOid = LookupTypeNameOid(NULL, typeName, missing_ok); - address = palloc0(sizeof(ObjectAddress)); + TypeName *typeName = makeTypeNameFromNameList(stmt->typeName); + Oid typeOid = LookupTypeNameOid(NULL, typeName, missing_ok); + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); ObjectAddressSet(*address, TypeRelationId, typeOid); return address; @@ -909,15 +859,11 @@ CreateEnumStmtObjectAddress(CreateEnumStmt *stmt, bool missing_ok) ObjectAddress * AlterTypeStmtObjectAddress(AlterTableStmt *stmt, bool missing_ok) { - TypeName *typeName = NULL; - Oid typeOid = InvalidOid; - ObjectAddress *address = NULL; - Assert(stmt->relkind == OBJECT_TYPE); - typeName = MakeTypeNameFromRangeVar(stmt->relation); - typeOid = LookupTypeNameOid(NULL, typeName, missing_ok); - address = palloc0(sizeof(ObjectAddress)); + TypeName *typeName = MakeTypeNameFromRangeVar(stmt->relation); + Oid typeOid = LookupTypeNameOid(NULL, typeName, missing_ok); + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); ObjectAddressSet(*address, TypeRelationId, typeOid); return address; @@ -931,13 +877,9 @@ AlterTypeStmtObjectAddress(AlterTableStmt *stmt, bool missing_ok) ObjectAddress * AlterEnumStmtObjectAddress(AlterEnumStmt *stmt, bool missing_ok) { - TypeName *typeName = NULL; - Oid typeOid = InvalidOid; - ObjectAddress *address = NULL; - - typeName = makeTypeNameFromNameList(stmt->typeName); - typeOid = LookupTypeNameOid(NULL, typeName, missing_ok); - address = palloc0(sizeof(ObjectAddress)); + TypeName *typeName = makeTypeNameFromNameList(stmt->typeName); + Oid typeOid = LookupTypeNameOid(NULL, typeName, missing_ok); + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); ObjectAddressSet(*address, TypeRelationId, typeOid); return address; @@ -951,15 +893,11 @@ AlterEnumStmtObjectAddress(AlterEnumStmt *stmt, bool missing_ok) ObjectAddress * RenameTypeStmtObjectAddress(RenameStmt *stmt, bool missing_ok) { - TypeName *typeName = NULL; - Oid typeOid = InvalidOid; - ObjectAddress *address = NULL; - Assert(stmt->renameType == OBJECT_TYPE); - typeName = makeTypeNameFromNameList((List *) stmt->object); - typeOid = LookupTypeNameOid(NULL, typeName, missing_ok); - address = palloc0(sizeof(ObjectAddress)); + TypeName *typeName = makeTypeNameFromNameList((List *) stmt->object); + Oid typeOid = LookupTypeNameOid(NULL, typeName, missing_ok); + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); ObjectAddressSet(*address, TypeRelationId, typeOid); return address; @@ -978,21 +916,16 @@ RenameTypeStmtObjectAddress(RenameStmt *stmt, bool missing_ok) ObjectAddress * AlterTypeSchemaStmtObjectAddress(AlterObjectSchemaStmt *stmt, bool missing_ok) { - ObjectAddress *address = NULL; - TypeName *typeName = NULL; - Oid typeOid = InvalidOid; - List *names = NIL; - Assert(stmt->objectType == OBJECT_TYPE); - names = (List *) stmt->object; + List *names = (List *) stmt->object; /* * we hardcode missing_ok here during LookupTypeNameOid because if we can't find it it * might have already been moved in this transaction. */ - typeName = makeTypeNameFromNameList(names); - typeOid = LookupTypeNameOid(NULL, typeName, true); + TypeName *typeName = makeTypeNameFromNameList(names); + Oid typeOid = LookupTypeNameOid(NULL, typeName, true); if (typeOid == InvalidOid) { @@ -1024,7 +957,7 @@ AlterTypeSchemaStmtObjectAddress(AlterObjectSchemaStmt *stmt, bool missing_ok) } } - address = palloc0(sizeof(ObjectAddress)); + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); ObjectAddressSet(*address, TypeRelationId, typeOid); return address; @@ -1042,16 +975,12 @@ AlterTypeSchemaStmtObjectAddress(AlterObjectSchemaStmt *stmt, bool missing_ok) ObjectAddress * RenameTypeAttributeStmtObjectAddress(RenameStmt *stmt, bool missing_ok) { - TypeName *typeName = NULL; - Oid typeOid = InvalidOid; - ObjectAddress *address = NULL; - Assert(stmt->renameType == OBJECT_ATTRIBUTE); Assert(stmt->relationType == OBJECT_TYPE); - typeName = MakeTypeNameFromRangeVar(stmt->relation); - typeOid = LookupTypeNameOid(NULL, typeName, missing_ok); - address = palloc0(sizeof(ObjectAddress)); + TypeName *typeName = MakeTypeNameFromRangeVar(stmt->relation); + Oid typeOid = LookupTypeNameOid(NULL, typeName, missing_ok); + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); ObjectAddressSet(*address, TypeRelationId, typeOid); return address; @@ -1065,15 +994,11 @@ RenameTypeAttributeStmtObjectAddress(RenameStmt *stmt, bool missing_ok) ObjectAddress * AlterTypeOwnerObjectAddress(AlterOwnerStmt *stmt, bool missing_ok) { - TypeName *typeName = NULL; - Oid typeOid = InvalidOid; - ObjectAddress *address = NULL; - Assert(stmt->objectType == OBJECT_TYPE); - typeName = makeTypeNameFromNameList((List *) stmt->object); - typeOid = LookupTypeNameOid(NULL, typeName, missing_ok); - address = palloc0(sizeof(ObjectAddress)); + TypeName *typeName = makeTypeNameFromNameList((List *) stmt->object); + Oid typeOid = LookupTypeNameOid(NULL, typeName, missing_ok); + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); ObjectAddressSet(*address, TypeRelationId, typeOid); return address; @@ -1088,10 +1013,7 @@ List * CreateTypeDDLCommandsIdempotent(const ObjectAddress *typeAddress) { List *ddlCommands = NIL; - const char *ddlCommand = NULL; - Node *stmt = NULL; StringInfoData buf = { 0 }; - const char *username = NULL; Assert(typeAddress->classId == TypeRelationId); @@ -1106,15 +1028,15 @@ CreateTypeDDLCommandsIdempotent(const ObjectAddress *typeAddress) return NIL; } - stmt = CreateTypeStmtByObjectAddress(typeAddress); + Node *stmt = CreateTypeStmtByObjectAddress(typeAddress); /* capture ddl command for recreation and wrap in create if not exists construct */ - ddlCommand = DeparseTreeNode(stmt); + const char *ddlCommand = DeparseTreeNode(stmt); ddlCommand = WrapCreateOrReplace(ddlCommand); ddlCommands = lappend(ddlCommands, (void *) ddlCommand); /* add owner ship change so the creation command can be run as a different user */ - username = GetUserNameFromId(GetTypeOwner(typeAddress->objectId), false); + const char *username = GetUserNameFromId(GetTypeOwner(typeAddress->objectId), false); initStringInfo(&buf); appendStringInfo(&buf, ALTER_TYPE_OWNER_COMMAND, getObjectIdentity(typeAddress), quote_identifier(username)); @@ -1145,8 +1067,6 @@ GenerateBackupNameForTypeCollision(const ObjectAddress *address) { int suffixLength = snprintf(suffix, NAMEDATALEN - 1, "(citus_backup_%d)", count); - TypeName *newTypeName = NULL; - Oid typeOid = InvalidOid; /* trim the base name at the end to leave space for the suffix and trailing \0 */ baseLength = Min(baseLength, NAMEDATALEN - suffixLength - 1); @@ -1157,9 +1077,9 @@ GenerateBackupNameForTypeCollision(const ObjectAddress *address) strncpy(newName + baseLength, suffix, suffixLength); rel->relname = newName; - newTypeName = makeTypeNameFromNameList(MakeNameListFromRangeVar(rel)); + TypeName *newTypeName = makeTypeNameFromNameList(MakeNameListFromRangeVar(rel)); - typeOid = LookupTypeNameOid(NULL, newTypeName, true); + Oid typeOid = LookupTypeNameOid(NULL, newTypeName, true); if (typeOid == InvalidOid) { return newName; @@ -1235,9 +1155,8 @@ static Oid GetTypeOwner(Oid typeOid) { Oid result = InvalidOid; - HeapTuple tp = NULL; - tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typeOid)); + HeapTuple tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typeOid)); if (HeapTupleIsValid(tp)) { Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tp); diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index 1022f9ff4..dcc17a7b1 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -117,7 +117,6 @@ multi_ProcessUtility(PlannedStmt *pstmt, { Node *parsetree = pstmt->utilityStmt; List *ddlJobs = NIL; - bool checkCreateAlterExtensionVersion = false; if (IsA(parsetree, TransactionStmt) || IsA(parsetree, LockStmt) || @@ -143,7 +142,8 @@ multi_ProcessUtility(PlannedStmt *pstmt, return; } - checkCreateAlterExtensionVersion = IsCreateAlterExtensionUpdateCitusStmt(parsetree); + bool checkCreateAlterExtensionVersion = IsCreateAlterExtensionUpdateCitusStmt( + parsetree); if (EnableVersionChecks && checkCreateAlterExtensionVersion) { ErrorIfUnstableCreateOrAlterExtensionStmt(parsetree); @@ -332,12 +332,11 @@ multi_ProcessUtility(PlannedStmt *pstmt, if (IsA(parsetree, CopyStmt)) { MemoryContext planContext = GetMemoryChunkContext(parsetree); - MemoryContext previousContext; parsetree = copyObject(parsetree); parsetree = ProcessCopyStmt((CopyStmt *) parsetree, completionTag, queryString); - previousContext = MemoryContextSwitchTo(planContext); + MemoryContext previousContext = MemoryContextSwitchTo(planContext); parsetree = copyObject(parsetree); MemoryContextSwitchTo(previousContext); @@ -886,14 +885,12 @@ multi_ProcessUtility(PlannedStmt *pstmt, static bool IsDropSchemaOrDB(Node *parsetree) { - DropStmt *dropStatement = NULL; - if (!IsA(parsetree, DropStmt)) { return false; } - dropStatement = (DropStmt *) parsetree; + DropStmt *dropStatement = (DropStmt *) parsetree; return (dropStatement->removeType == OBJECT_SCHEMA) || (dropStatement->removeType == OBJECT_DATABASE); } @@ -1091,7 +1088,6 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob) static char * SetSearchPathToCurrentSearchPathCommand(void) { - StringInfo setCommand = NULL; char *currentSearchPath = CurrentSearchPath(); if (currentSearchPath == NULL) @@ -1099,7 +1095,7 @@ SetSearchPathToCurrentSearchPathCommand(void) return NULL; } - setCommand = makeStringInfo(); + StringInfo setCommand = makeStringInfo(); appendStringInfo(setCommand, "SET search_path TO %s;", currentSearchPath); return setCommand->data; @@ -1217,7 +1213,6 @@ DDLTaskList(Oid relationId, const char *commandString) ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell); uint64 shardId = shardInterval->shardId; StringInfo applyCommand = makeStringInfo(); - Task *task = NULL; /* * If rightRelationId is not InvalidOid, instead of worker_apply_shard_ddl_command @@ -1226,7 +1221,7 @@ DDLTaskList(Oid relationId, const char *commandString) appendStringInfo(applyCommand, WORKER_APPLY_SHARD_DDL_COMMAND, shardId, escapedSchemaName, escapedCommandString); - task = CitusMakeNode(Task); + Task *task = CitusMakeNode(Task); task->jobId = jobId; task->taskId = taskId++; task->taskType = DDL_TASK; @@ -1252,9 +1247,7 @@ NodeDDLTaskList(TargetWorkerSet targets, List *commands) { List *workerNodes = TargetWorkerSetNodeList(targets, NoLock); char *concatenatedCommands = StringJoin(commands, ';'); - DDLJob *ddlJob = NULL; ListCell *workerNodeCell = NULL; - Task *task = NULL; if (list_length(workerNodes) <= 0) { @@ -1265,16 +1258,15 @@ NodeDDLTaskList(TargetWorkerSet targets, List *commands) return NIL; } - task = CitusMakeNode(Task); + Task *task = CitusMakeNode(Task); task->taskType = DDL_TASK; task->queryString = concatenatedCommands; foreach(workerNodeCell, workerNodes) { WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell); - ShardPlacement *targetPlacement = NULL; - targetPlacement = CitusMakeNode(ShardPlacement); + ShardPlacement *targetPlacement = CitusMakeNode(ShardPlacement); targetPlacement->nodeName = workerNode->workerName; targetPlacement->nodePort = workerNode->workerPort; targetPlacement->groupId = workerNode->groupId; @@ -1282,7 +1274,7 @@ NodeDDLTaskList(TargetWorkerSet targets, List *commands) task->taskPlacementList = lappend(task->taskPlacementList, targetPlacement); } - ddlJob = palloc0(sizeof(DDLJob)); + DDLJob *ddlJob = palloc0(sizeof(DDLJob)); ddlJob->targetRelationId = InvalidOid; ddlJob->concurrentIndexCmd = false; ddlJob->commandString = NULL; diff --git a/src/backend/distributed/commands/vacuum.c b/src/backend/distributed/commands/vacuum.c index b21db30d1..fd715d1f3 100644 --- a/src/backend/distributed/commands/vacuum.c +++ b/src/backend/distributed/commands/vacuum.c @@ -62,7 +62,6 @@ void ProcessVacuumStmt(VacuumStmt *vacuumStmt, const char *vacuumCommand) { int relationIndex = 0; - bool distributedVacuumStmt = false; List *vacuumRelationList = ExtractVacuumTargetRels(vacuumStmt); ListCell *vacuumRelationCell = NULL; List *relationIdList = NIL; @@ -79,7 +78,8 @@ ProcessVacuumStmt(VacuumStmt *vacuumStmt, const char *vacuumCommand) relationIdList = lappend_oid(relationIdList, relationId); } - distributedVacuumStmt = IsDistributedVacuumStmt(vacuumParams.options, relationIdList); + bool distributedVacuumStmt = IsDistributedVacuumStmt(vacuumParams.options, + relationIdList); if (!distributedVacuumStmt) { return; @@ -91,9 +91,6 @@ ProcessVacuumStmt(VacuumStmt *vacuumStmt, const char *vacuumCommand) Oid relationId = lfirst_oid(relationIdCell); if (IsDistributedTable(relationId)) { - List *vacuumColumnList = NIL; - List *taskList = NIL; - /* * VACUUM commands cannot run inside a transaction block, so we use * the "bare" commit protocol without BEGIN/COMMIT. However, ANALYZE @@ -108,8 +105,8 @@ ProcessVacuumStmt(VacuumStmt *vacuumStmt, const char *vacuumCommand) MultiShardCommitProtocol = COMMIT_PROTOCOL_BARE; } - vacuumColumnList = VacuumColumnList(vacuumStmt, relationIndex); - taskList = VacuumTaskList(relationId, vacuumParams, vacuumColumnList); + List *vacuumColumnList = VacuumColumnList(vacuumStmt, relationIndex); + List *taskList = VacuumTaskList(relationId, vacuumParams, vacuumColumnList); /* use adaptive executor when enabled */ ExecuteUtilityTaskListWithoutResults(taskList); @@ -135,13 +132,12 @@ IsDistributedVacuumStmt(int vacuumOptions, List *vacuumRelationIdList) bool distributeStmt = false; ListCell *relationIdCell = NULL; int distributedRelationCount = 0; - int vacuumedRelationCount = 0; /* * No table in the vacuum statement means vacuuming all relations * which is not supported by citus. */ - vacuumedRelationCount = list_length(vacuumRelationIdList); + int vacuumedRelationCount = list_length(vacuumRelationIdList); if (vacuumedRelationCount == 0) { /* WARN for unqualified VACUUM commands */ @@ -188,18 +184,16 @@ static List * VacuumTaskList(Oid relationId, CitusVacuumParams vacuumParams, List *vacuumColumnList) { List *taskList = NIL; - List *shardIntervalList = NIL; ListCell *shardIntervalCell = NULL; uint64 jobId = INVALID_JOB_ID; int taskId = 1; StringInfo vacuumString = DeparseVacuumStmtPrefix(vacuumParams); - const char *columnNames = NULL; const int vacuumPrefixLen = vacuumString->len; Oid schemaId = get_rel_namespace(relationId); char *schemaName = get_namespace_name(schemaId); char *tableName = get_rel_name(relationId); - columnNames = DeparseVacuumColumnNames(vacuumColumnList); + const char *columnNames = DeparseVacuumColumnNames(vacuumColumnList); /* * We obtain ShareUpdateExclusiveLock here to not conflict with INSERT's @@ -209,7 +203,7 @@ VacuumTaskList(Oid relationId, CitusVacuumParams vacuumParams, List *vacuumColum */ LockRelationOid(relationId, ShareUpdateExclusiveLock); - shardIntervalList = LoadShardIntervalList(relationId); + List *shardIntervalList = LoadShardIntervalList(relationId); /* grab shard lock before getting placement list */ LockShardListMetadata(shardIntervalList, ShareLock); @@ -218,7 +212,6 @@ VacuumTaskList(Oid relationId, CitusVacuumParams vacuumParams, List *vacuumColum { ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell); uint64 shardId = shardInterval->shardId; - Task *task = NULL; char *shardName = pstrdup(tableName); AppendShardIdToName(&shardName, shardInterval->shardId); @@ -228,7 +221,7 @@ VacuumTaskList(Oid relationId, CitusVacuumParams vacuumParams, List *vacuumColum appendStringInfoString(vacuumString, shardName); appendStringInfoString(vacuumString, columnNames); - task = CitusMakeNode(Task); + Task *task = CitusMakeNode(Task); task->jobId = jobId; task->taskId = taskId++; task->taskType = VACUUM_ANALYZE_TASK; diff --git a/src/backend/distributed/commands/variableset.c b/src/backend/distributed/commands/variableset.c index 6800db5f3..714d40359 100644 --- a/src/backend/distributed/commands/variableset.c +++ b/src/backend/distributed/commands/variableset.c @@ -96,9 +96,8 @@ IsSettingSafeToPropagate(char *name) "exit_on_error", "max_stack_depth" }; - Index settingIndex = 0; - for (settingIndex = 0; settingIndex < lengthof(skipSettings); settingIndex++) + for (Index settingIndex = 0; settingIndex < lengthof(skipSettings); settingIndex++) { if (pg_strcasecmp(skipSettings[settingIndex], name) == 0) { @@ -138,9 +137,8 @@ ProcessVariableSetStmt(VariableSetStmt *setStmt, const char *setStmtString) { MultiConnection *connection = dlist_container(MultiConnection, transactionNode, iter.cur); - RemoteTransaction *transaction = NULL; - transaction = &connection->remoteTransaction; + RemoteTransaction *transaction = &connection->remoteTransaction; if (transaction->transactionFailed) { continue; @@ -162,10 +160,9 @@ ProcessVariableSetStmt(VariableSetStmt *setStmt, const char *setStmtString) { MultiConnection *connection = dlist_container(MultiConnection, transactionNode, iter.cur); - RemoteTransaction *transaction = NULL; const bool raiseErrors = true; - transaction = &connection->remoteTransaction; + RemoteTransaction *transaction = &connection->remoteTransaction; if (transaction->transactionFailed) { continue; diff --git a/src/backend/distributed/connection/connection_configuration.c b/src/backend/distributed/connection/connection_configuration.c index 6f3ed8437..a51bbfcf8 100644 --- a/src/backend/distributed/connection/connection_configuration.c +++ b/src/backend/distributed/connection/connection_configuration.c @@ -76,8 +76,7 @@ InitConnParams() void ResetConnParams() { - Index paramIdx = 0; - for (paramIdx = 0; paramIdx < ConnParams.size; paramIdx++) + for (Index paramIdx = 0; paramIdx < ConnParams.size; paramIdx++) { free((void *) ConnParams.keywords[paramIdx]); free((void *) ConnParams.values[paramIdx]); @@ -135,7 +134,6 @@ bool CheckConninfo(const char *conninfo, const char **whitelist, Size whitelistLength, char **errorMsg) { - PQconninfoOption *optionArray = NULL; PQconninfoOption *option = NULL; Index whitelistIdx PG_USED_FOR_ASSERTS_ONLY = 0; char *errorMsgString = NULL; @@ -165,7 +163,7 @@ CheckConninfo(const char *conninfo, const char **whitelist, } /* this should at least parse */ - optionArray = PQconninfoParse(conninfo, NULL); + PQconninfoOption *optionArray = PQconninfoParse(conninfo, NULL); if (optionArray == NULL) { *errorMsg = "Provided string is not a valid libpq connection info string"; @@ -187,15 +185,13 @@ CheckConninfo(const char *conninfo, const char **whitelist, for (option = optionArray; option->keyword != NULL; option++) { - void *matchingKeyword = NULL; - if (option->val == NULL || option->val[0] == '\0') { continue; } - matchingKeyword = bsearch(&option->keyword, whitelist, whitelistLength, - sizeof(char *), pg_qsort_strcmp); + void *matchingKeyword = bsearch(&option->keyword, whitelist, whitelistLength, + sizeof(char *), pg_qsort_strcmp); if (matchingKeyword == NULL) { /* the whitelist lacks this keyword; error out! */ @@ -283,8 +279,6 @@ GetConnParams(ConnectionHashKey *key, char ***keywords, char ***values, /* auth keywords will begin after global and runtime ones are appended */ Index authParamsIdx = ConnParams.size + lengthof(runtimeKeywords); - Index paramIndex = 0; - Index runtimeParamIndex = 0; if (ConnParams.size + lengthof(runtimeKeywords) >= ConnParams.maxSize) { @@ -296,7 +290,7 @@ GetConnParams(ConnectionHashKey *key, char ***keywords, char ***values, pg_ltoa(key->port, nodePortString); /* populate node port string with port */ /* first step: copy global parameters to beginning of array */ - for (paramIndex = 0; paramIndex < ConnParams.size; paramIndex++) + for (Index paramIndex = 0; paramIndex < ConnParams.size; paramIndex++) { /* copy the keyword&value pointers to the new array */ connKeywords[paramIndex] = ConnParams.keywords[paramIndex]; @@ -311,7 +305,7 @@ GetConnParams(ConnectionHashKey *key, char ***keywords, char ***values, *runtimeParamStart = ConnParams.size; /* second step: begin after global params and copy runtime params into our context */ - for (runtimeParamIndex = 0; + for (Index runtimeParamIndex = 0; runtimeParamIndex < lengthof(runtimeKeywords); runtimeParamIndex++) { @@ -334,9 +328,7 @@ GetConnParams(ConnectionHashKey *key, char ***keywords, char ***values, const char * GetConnParam(const char *keyword) { - Index i = 0; - - for (i = 0; i < ConnParams.size; i++) + for (Index i = 0; i < ConnParams.size; i++) { if (strcmp(keyword, ConnParams.keywords[i]) == 0) { @@ -357,10 +349,9 @@ static Size CalculateMaxSize() { PQconninfoOption *defaults = PQconndefaults(); - PQconninfoOption *option = NULL; Size maxSize = 0; - for (option = defaults; + for (PQconninfoOption *option = defaults; option->keyword != NULL; option++, maxSize++) { diff --git a/src/backend/distributed/connection/connection_management.c b/src/backend/distributed/connection/connection_management.c index aaabbfda8..6d1be15e4 100644 --- a/src/backend/distributed/connection/connection_management.c +++ b/src/backend/distributed/connection/connection_management.c @@ -85,7 +85,6 @@ void InitializeConnectionManagement(void) { HASHCTL info, connParamsInfo; - uint32 hashFlags = 0; /* * Create a single context for connection and transaction related memory @@ -105,7 +104,7 @@ InitializeConnectionManagement(void) info.hash = ConnectionHashHash; info.match = ConnectionHashCompare; info.hcxt = ConnectionContext; - hashFlags = (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT | HASH_COMPARE); + uint32 hashFlags = (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT | HASH_COMPARE); memcpy(&connParamsInfo, &info, sizeof(HASHCTL)); connParamsInfo.entrysize = sizeof(ConnParamsHashEntry); @@ -187,9 +186,7 @@ GetNodeConnection(uint32 flags, const char *hostname, int32 port) MultiConnection * GetNonDataAccessConnection(const char *hostname, int32 port) { - MultiConnection *connection; - - connection = StartNonDataAccessConnection(hostname, port); + MultiConnection *connection = StartNonDataAccessConnection(hostname, port); FinishConnectionEstablishment(connection); @@ -243,9 +240,8 @@ MultiConnection * GetNodeUserDatabaseConnection(uint32 flags, const char *hostname, int32 port, const char *user, const char *database) { - MultiConnection *connection; - - connection = StartNodeUserDatabaseConnection(flags, hostname, port, user, database); + MultiConnection *connection = StartNodeUserDatabaseConnection(flags, hostname, port, + user, database); FinishConnectionEstablishment(connection); @@ -269,11 +265,11 @@ StartWorkerListConnections(List *workerNodeList, uint32 flags, const char *user, WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell); char *nodeName = workerNode->workerName; int nodePort = workerNode->workerPort; - MultiConnection *connection = NULL; int connectionFlags = 0; - connection = StartNodeUserDatabaseConnection(connectionFlags, nodeName, nodePort, - user, database); + MultiConnection *connection = StartNodeUserDatabaseConnection(connectionFlags, + nodeName, nodePort, + user, database); connectionList = lappend(connectionList, connection); } @@ -298,7 +294,6 @@ StartNodeUserDatabaseConnection(uint32 flags, const char *hostname, int32 port, char *user, const char *database) { ConnectionHashKey key; - ConnectionHashEntry *entry = NULL; MultiConnection *connection; bool found; @@ -340,7 +335,7 @@ StartNodeUserDatabaseConnection(uint32 flags, const char *hostname, int32 port, * connection list empty. */ - entry = hash_search(ConnectionHash, &key, HASH_ENTER, &found); + ConnectionHashEntry *entry = hash_search(ConnectionHash, &key, HASH_ENTER, &found); if (!found) { entry->connections = MemoryContextAlloc(ConnectionContext, @@ -412,14 +407,13 @@ CloseNodeConnectionsAfterTransaction(char *nodeName, int nodePort) while ((entry = (ConnectionHashEntry *) hash_seq_search(&status)) != 0) { dlist_iter iter; - dlist_head *connections = NULL; if (strcmp(entry->key.hostname, nodeName) != 0 || entry->key.port != nodePort) { continue; } - connections = entry->connections; + dlist_head *connections = entry->connections; dlist_foreach(iter, connections) { MultiConnection *connection = @@ -575,7 +569,6 @@ EventSetSizeForConnectionList(List *connections) static WaitEventSet * WaitEventSetFromMultiConnectionStates(List *connections, int *waitCount) { - WaitEventSet *waitEventSet = NULL; ListCell *connectionCell = NULL; const int eventSetSize = EventSetSizeForConnectionList(connections); @@ -586,7 +579,7 @@ WaitEventSetFromMultiConnectionStates(List *connections, int *waitCount) *waitCount = 0; } - waitEventSet = CreateWaitEventSet(CurrentMemoryContext, eventSetSize); + WaitEventSet *waitEventSet = CreateWaitEventSet(CurrentMemoryContext, eventSetSize); EnsureReleaseResource((MemoryContextCallbackFunction) (&FreeWaitEventSet), waitEventSet); @@ -602,8 +595,6 @@ WaitEventSetFromMultiConnectionStates(List *connections, int *waitCount) { MultiConnectionPollState *connectionState = (MultiConnectionPollState *) lfirst( connectionCell); - int sock = 0; - int eventMask = 0; if (numEventsAdded >= eventSetSize) { @@ -617,9 +608,9 @@ WaitEventSetFromMultiConnectionStates(List *connections, int *waitCount) continue; } - sock = PQsocket(connectionState->connection->pgConn); + int sock = PQsocket(connectionState->connection->pgConn); - eventMask = MultiConnectionStateEventMask(connectionState); + int eventMask = MultiConnectionStateEventMask(connectionState); AddWaitEventToSet(waitEventSet, eventMask, sock, NULL, connectionState); numEventsAdded++; @@ -672,8 +663,6 @@ FinishConnectionListEstablishment(List *multiConnectionList) WaitEventSet *waitEventSet = NULL; bool waitEventSetRebuild = true; int waitCount = 0; - WaitEvent *events = NULL; - MemoryContext oldContext = NULL; foreach(multiConnectionCell, multiConnectionList) { @@ -699,23 +688,22 @@ FinishConnectionListEstablishment(List *multiConnectionList) } /* prepare space for socket events */ - events = (WaitEvent *) palloc0(EventSetSizeForConnectionList(connectionStates) * - sizeof(WaitEvent)); + WaitEvent *events = (WaitEvent *) palloc0(EventSetSizeForConnectionList( + connectionStates) * + sizeof(WaitEvent)); /* * for high connection counts with lots of round trips we could potentially have a lot * of (big) waitsets that we'd like to clean right after we have used them. To do this * we switch to a temporary memory context for this loop which gets reset at the end */ - oldContext = MemoryContextSwitchTo( + MemoryContext oldContext = MemoryContextSwitchTo( AllocSetContextCreate(CurrentMemoryContext, "connection establishment temporary context", ALLOCSET_DEFAULT_SIZES)); while (waitCount > 0) { long timeout = DeadlineTimestampTzToTimeout(deadline); - int eventCount = 0; - int eventIndex = 0; if (waitEventSetRebuild) { @@ -730,13 +718,12 @@ FinishConnectionListEstablishment(List *multiConnectionList) } } - eventCount = WaitEventSetWait(waitEventSet, timeout, events, waitCount, - WAIT_EVENT_CLIENT_READ); + int eventCount = WaitEventSetWait(waitEventSet, timeout, events, waitCount, + WAIT_EVENT_CLIENT_READ); - for (eventIndex = 0; eventIndex < eventCount; eventIndex++) + for (int eventIndex = 0; eventIndex < eventCount; eventIndex++) { WaitEvent *event = &events[eventIndex]; - bool connectionStateChanged = false; MultiConnectionPollState *connectionState = (MultiConnectionPollState *) event->user_data; @@ -764,7 +751,7 @@ FinishConnectionListEstablishment(List *multiConnectionList) continue; } - connectionStateChanged = MultiConnectionStatePoll(connectionState); + bool connectionStateChanged = MultiConnectionStatePoll(connectionState); if (connectionStateChanged) { if (connectionState->phase != MULTI_CONNECTION_PHASE_CONNECTING) @@ -909,9 +896,8 @@ static uint32 ConnectionHashHash(const void *key, Size keysize) { ConnectionHashKey *entry = (ConnectionHashKey *) key; - uint32 hash = 0; - hash = string_hash(entry->hostname, NAMEDATALEN); + uint32 hash = string_hash(entry->hostname, NAMEDATALEN); hash = hash_combine(hash, hash_uint32(entry->port)); hash = hash_combine(hash, string_hash(entry->user, NAMEDATALEN)); hash = hash_combine(hash, string_hash(entry->database, NAMEDATALEN)); @@ -948,11 +934,9 @@ static MultiConnection * StartConnectionEstablishment(ConnectionHashKey *key) { bool found = false; - MultiConnection *connection = NULL; - ConnParamsHashEntry *entry = NULL; /* search our cache for precomputed connection settings */ - entry = hash_search(ConnParamsHash, key, HASH_ENTER, &found); + ConnParamsHashEntry *entry = hash_search(ConnParamsHash, key, HASH_ENTER, &found); if (!found || !entry->isValid) { /* avoid leaking memory in the keys and values arrays */ @@ -968,7 +952,8 @@ StartConnectionEstablishment(ConnectionHashKey *key) entry->isValid = true; } - connection = MemoryContextAllocZero(ConnectionContext, sizeof(MultiConnection)); + MultiConnection *connection = MemoryContextAllocZero(ConnectionContext, + sizeof(MultiConnection)); strlcpy(connection->hostname, key->hostname, MAX_NODE_LENGTH); connection->port = key->port; @@ -1218,9 +1203,8 @@ char * TrimLogLevel(const char *message) { char *chompedMessage = pchomp(message); - size_t n; - n = 0; + size_t n = 0; while (n < strlen(chompedMessage) && chompedMessage[n] != ':') { n++; diff --git a/src/backend/distributed/connection/placement_connection.c b/src/backend/distributed/connection/placement_connection.c index d494fb80e..eea25a13a 100644 --- a/src/backend/distributed/connection/placement_connection.c +++ b/src/backend/distributed/connection/placement_connection.c @@ -267,14 +267,15 @@ StartPlacementListConnection(uint32 flags, List *placementAccessList, const char *userName) { char *freeUserName = NULL; - MultiConnection *chosenConnection = NULL; if (userName == NULL) { userName = freeUserName = CurrentUserName(); } - chosenConnection = FindPlacementListConnection(flags, placementAccessList, userName); + MultiConnection *chosenConnection = FindPlacementListConnection(flags, + placementAccessList, + userName); if (chosenConnection == NULL) { /* use the first placement from the list to extract nodename and nodeport */ @@ -346,10 +347,6 @@ AssignPlacementListToConnection(List *placementAccessList, MultiConnection *conn ShardPlacement *placement = placementAccess->placement; ShardPlacementAccessType accessType = placementAccess->accessType; - ConnectionPlacementHashEntry *placementEntry = NULL; - ConnectionReference *placementConnection = NULL; - - Oid relationId = InvalidOid; if (placement->shardId == INVALID_SHARD_ID) { @@ -363,8 +360,9 @@ AssignPlacementListToConnection(List *placementAccessList, MultiConnection *conn continue; } - placementEntry = FindOrCreatePlacementEntry(placement); - placementConnection = placementEntry->primaryConnection; + ConnectionPlacementHashEntry *placementEntry = FindOrCreatePlacementEntry( + placement); + ConnectionReference *placementConnection = placementEntry->primaryConnection; if (placementConnection->connection == connection) { @@ -438,7 +436,7 @@ AssignPlacementListToConnection(List *placementAccessList, MultiConnection *conn } /* record the relation access */ - relationId = RelationIdForShard(placement->shardId); + Oid relationId = RelationIdForShard(placement->shardId); RecordRelationAccessIfReferenceTable(relationId, accessType); } } @@ -453,7 +451,6 @@ MultiConnection * GetConnectionIfPlacementAccessedInXact(int flags, List *placementAccessList, const char *userName) { - MultiConnection *connection = NULL; char *freeUserName = NULL; if (userName == NULL) @@ -461,8 +458,8 @@ GetConnectionIfPlacementAccessedInXact(int flags, List *placementAccessList, userName = freeUserName = CurrentUserName(); } - connection = FindPlacementListConnection(flags, placementAccessList, - userName); + MultiConnection *connection = FindPlacementListConnection(flags, placementAccessList, + userName); if (freeUserName != NULL) { @@ -515,9 +512,6 @@ FindPlacementListConnection(int flags, List *placementAccessList, const char *us ShardPlacement *placement = placementAccess->placement; ShardPlacementAccessType accessType = placementAccess->accessType; - ConnectionPlacementHashEntry *placementEntry = NULL; - ColocatedPlacementsHashEntry *colocatedEntry = NULL; - ConnectionReference *placementConnection = NULL; if (placement->shardId == INVALID_SHARD_ID) { @@ -530,9 +524,10 @@ FindPlacementListConnection(int flags, List *placementAccessList, const char *us continue; } - placementEntry = FindOrCreatePlacementEntry(placement); - colocatedEntry = placementEntry->colocatedEntry; - placementConnection = placementEntry->primaryConnection; + ConnectionPlacementHashEntry *placementEntry = FindOrCreatePlacementEntry( + placement); + ColocatedPlacementsHashEntry *colocatedEntry = placementEntry->colocatedEntry; + ConnectionReference *placementConnection = placementEntry->primaryConnection; /* note: the Asserts below are primarily for clarifying the conditions */ @@ -628,12 +623,13 @@ static ConnectionPlacementHashEntry * FindOrCreatePlacementEntry(ShardPlacement *placement) { ConnectionPlacementHashKey connKey; - ConnectionPlacementHashEntry *placementEntry = NULL; bool found = false; connKey.placementId = placement->placementId; - placementEntry = hash_search(ConnectionPlacementHash, &connKey, HASH_ENTER, &found); + ConnectionPlacementHashEntry *placementEntry = hash_search(ConnectionPlacementHash, + &connKey, HASH_ENTER, + &found); if (!found) { /* no connection has been chosen for this placement */ @@ -646,15 +642,15 @@ FindOrCreatePlacementEntry(ShardPlacement *placement) placement->partitionMethod == DISTRIBUTE_BY_NONE) { ColocatedPlacementsHashKey coloKey; - ColocatedPlacementsHashEntry *colocatedEntry = NULL; coloKey.nodeId = placement->nodeId; coloKey.colocationGroupId = placement->colocationGroupId; coloKey.representativeValue = placement->representativeValue; /* look for a connection assigned to co-located placements */ - colocatedEntry = hash_search(ColocatedPlacementsHash, &coloKey, HASH_ENTER, - &found); + ColocatedPlacementsHashEntry *colocatedEntry = hash_search( + ColocatedPlacementsHash, &coloKey, HASH_ENTER, + &found); if (!found) { void *conRef = MemoryContextAllocZero(TopTransactionContext, @@ -835,12 +831,12 @@ AssociatePlacementWithShard(ConnectionPlacementHashEntry *placementEntry, ShardPlacement *placement) { ConnectionShardHashKey shardKey; - ConnectionShardHashEntry *shardEntry = NULL; bool found = false; dlist_iter placementIter; shardKey.shardId = placement->shardId; - shardEntry = hash_search(ConnectionShardHash, &shardKey, HASH_ENTER, &found); + ConnectionShardHashEntry *shardEntry = hash_search(ConnectionShardHash, &shardKey, + HASH_ENTER, &found); if (!found) { dlist_init(&shardEntry->placementConnections); @@ -1033,7 +1029,6 @@ CheckShardPlacements(ConnectionShardHashEntry *shardEntry) ConnectionPlacementHashEntry *placementEntry = dlist_container(ConnectionPlacementHashEntry, shardNode, placementIter.cur); ConnectionReference *primaryConnection = placementEntry->primaryConnection; - MultiConnection *connection = NULL; /* we only consider shards that are modified */ if (primaryConnection == NULL || @@ -1042,7 +1037,7 @@ CheckShardPlacements(ConnectionShardHashEntry *shardEntry) continue; } - connection = primaryConnection->connection; + MultiConnection *connection = primaryConnection->connection; if (!connection || connection->remoteTransaction.transactionFailed) { @@ -1096,7 +1091,6 @@ void InitPlacementConnectionManagement(void) { HASHCTL info; - uint32 hashFlags = 0; /* create (placementId) -> [ConnectionReference] hash */ memset(&info, 0, sizeof(info)); @@ -1104,7 +1098,7 @@ InitPlacementConnectionManagement(void) info.entrysize = sizeof(ConnectionPlacementHashEntry); info.hash = tag_hash; info.hcxt = ConnectionContext; - hashFlags = (HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + uint32 hashFlags = (HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); ConnectionPlacementHash = hash_create("citus connection cache (placementid)", 64, &info, hashFlags); @@ -1141,9 +1135,8 @@ static uint32 ColocatedPlacementsHashHash(const void *key, Size keysize) { ColocatedPlacementsHashKey *entry = (ColocatedPlacementsHashKey *) key; - uint32 hash = 0; - hash = hash_uint32(entry->nodeId); + uint32 hash = hash_uint32(entry->nodeId); hash = hash_combine(hash, hash_uint32(entry->colocationGroupId)); hash = hash_combine(hash, hash_uint32(entry->representativeValue)); diff --git a/src/backend/distributed/connection/remote_commands.c b/src/backend/distributed/connection/remote_commands.c index b7c4bc7a0..2a110bcc7 100644 --- a/src/backend/distributed/connection/remote_commands.c +++ b/src/backend/distributed/connection/remote_commands.c @@ -171,9 +171,6 @@ ClearResultsIfReady(MultiConnection *connection) while (true) { - PGresult *result = NULL; - ExecStatusType resultStatus; - /* * If busy, there might still be results already received and buffered * by the OS. As connection is in non-blocking mode, we can check for @@ -199,14 +196,14 @@ ClearResultsIfReady(MultiConnection *connection) return false; } - result = PQgetResult(pgConn); + PGresult *result = PQgetResult(pgConn); if (result == NULL) { /* no more results available */ return true; } - resultStatus = PQresultStatus(result); + ExecStatusType resultStatus = PQresultStatus(result); /* only care about the status, can clear now */ PQclear(result); @@ -241,18 +238,16 @@ bool SqlStateMatchesCategory(char *sqlStateString, int category) { bool sqlStateMatchesCategory = false; - int sqlState = 0; - int sqlStateCategory = 0; if (sqlStateString == NULL) { return false; } - sqlState = MAKE_SQLSTATE(sqlStateString[0], sqlStateString[1], sqlStateString[2], - sqlStateString[3], sqlStateString[4]); + int sqlState = MAKE_SQLSTATE(sqlStateString[0], sqlStateString[1], sqlStateString[2], + sqlStateString[3], sqlStateString[4]); - sqlStateCategory = ERRCODE_TO_CATEGORY(sqlState); + int sqlStateCategory = ERRCODE_TO_CATEGORY(sqlState); if (sqlStateCategory == category) { sqlStateMatchesCategory = true; @@ -390,17 +385,15 @@ ExecuteCriticalRemoteCommandList(MultiConnection *connection, List *commandList) void ExecuteCriticalRemoteCommand(MultiConnection *connection, const char *command) { - int querySent = 0; - PGresult *result = NULL; bool raiseInterrupts = true; - querySent = SendRemoteCommand(connection, command); + int querySent = SendRemoteCommand(connection, command); if (querySent == 0) { ReportConnectionError(connection, ERROR); } - result = GetRemoteCommandResult(connection, raiseInterrupts); + PGresult *result = GetRemoteCommandResult(connection, raiseInterrupts); if (!IsResponseOK(result)) { ReportResultError(connection, result, ERROR); @@ -422,18 +415,16 @@ int ExecuteOptionalRemoteCommand(MultiConnection *connection, const char *command, PGresult **result) { - int querySent = 0; - PGresult *localResult = NULL; bool raiseInterrupts = true; - querySent = SendRemoteCommand(connection, command); + int querySent = SendRemoteCommand(connection, command); if (querySent == 0) { ReportConnectionError(connection, WARNING); return QUERY_SEND_FAILED; } - localResult = GetRemoteCommandResult(connection, raiseInterrupts); + PGresult *localResult = GetRemoteCommandResult(connection, raiseInterrupts); if (!IsResponseOK(localResult)) { ReportResultError(connection, localResult, WARNING); @@ -473,7 +464,6 @@ SendRemoteCommandParams(MultiConnection *connection, const char *command, const char *const *parameterValues) { PGconn *pgConn = connection->pgConn; - int rc = 0; LogRemoteCommand(connection, command); @@ -488,8 +478,8 @@ SendRemoteCommandParams(MultiConnection *connection, const char *command, Assert(PQisnonblocking(pgConn)); - rc = PQsendQueryParams(pgConn, command, parameterCount, parameterTypes, - parameterValues, NULL, NULL, 0); + int rc = PQsendQueryParams(pgConn, command, parameterCount, parameterTypes, + parameterValues, NULL, NULL, 0); return rc; } @@ -506,7 +496,6 @@ int SendRemoteCommand(MultiConnection *connection, const char *command) { PGconn *pgConn = connection->pgConn; - int rc = 0; LogRemoteCommand(connection, command); @@ -521,7 +510,7 @@ SendRemoteCommand(MultiConnection *connection, const char *command) Assert(PQisnonblocking(pgConn)); - rc = PQsendQuery(pgConn, command); + int rc = PQsendQuery(pgConn, command); return rc; } @@ -536,7 +525,6 @@ ReadFirstColumnAsText(PGresult *queryResult) { List *resultRowList = NIL; const int columnIndex = 0; - int64 rowIndex = 0; int64 rowCount = 0; ExecStatusType status = PQresultStatus(queryResult); @@ -545,7 +533,7 @@ ReadFirstColumnAsText(PGresult *queryResult) rowCount = PQntuples(queryResult); } - for (rowIndex = 0; rowIndex < rowCount; rowIndex++) + for (int64 rowIndex = 0; rowIndex < rowCount; rowIndex++) { char *rowValue = PQgetvalue(queryResult, rowIndex, columnIndex); @@ -579,7 +567,6 @@ PGresult * GetRemoteCommandResult(MultiConnection *connection, bool raiseInterrupts) { PGconn *pgConn = connection->pgConn; - PGresult *result = NULL; /* * Short circuit tests around the more expensive parts of this @@ -605,7 +592,7 @@ GetRemoteCommandResult(MultiConnection *connection, bool raiseInterrupts) /* no IO should be necessary to get result */ Assert(!PQisBusy(pgConn)); - result = PQgetResult(connection->pgConn); + PGresult *result = PQgetResult(connection->pgConn); return result; } @@ -621,7 +608,6 @@ bool PutRemoteCopyData(MultiConnection *connection, const char *buffer, int nbytes) { PGconn *pgConn = connection->pgConn; - int copyState = 0; bool allowInterrupts = true; if (PQstatus(pgConn) != CONNECTION_OK) @@ -631,7 +617,7 @@ PutRemoteCopyData(MultiConnection *connection, const char *buffer, int nbytes) Assert(PQisnonblocking(pgConn)); - copyState = PQputCopyData(pgConn, buffer, nbytes); + int copyState = PQputCopyData(pgConn, buffer, nbytes); if (copyState == -1) { return false; @@ -670,7 +656,6 @@ bool PutRemoteCopyEnd(MultiConnection *connection, const char *errormsg) { PGconn *pgConn = connection->pgConn; - int copyState = 0; bool allowInterrupts = true; if (PQstatus(pgConn) != CONNECTION_OK) @@ -680,7 +665,7 @@ PutRemoteCopyEnd(MultiConnection *connection, const char *errormsg) Assert(PQisnonblocking(pgConn)); - copyState = PQputCopyEnd(pgConn, errormsg); + int copyState = PQputCopyEnd(pgConn, errormsg); if (copyState == -1) { return false; @@ -720,12 +705,10 @@ FinishConnectionIO(MultiConnection *connection, bool raiseInterrupts) /* perform the necessary IO */ while (true) { - int sendStatus = 0; - int rc = 0; int waitFlags = WL_POSTMASTER_DEATH | WL_LATCH_SET; /* try to send all pending data */ - sendStatus = PQflush(pgConn); + int sendStatus = PQflush(pgConn); /* if sending failed, there's nothing more we can do */ if (sendStatus == -1) @@ -753,7 +736,7 @@ FinishConnectionIO(MultiConnection *connection, bool raiseInterrupts) return true; } - rc = WaitLatchOrSocket(MyLatch, waitFlags, sock, 0, PG_WAIT_EXTENSION); + int rc = WaitLatchOrSocket(MyLatch, waitFlags, sock, 0, PG_WAIT_EXTENSION); if (rc & WL_POSTMASTER_DEATH) { ereport(ERROR, (errmsg("postmaster was shut down, exiting"))); @@ -837,7 +820,6 @@ WaitForAllConnections(List *connectionList, bool raiseInterrupts) { bool cancellationReceived = false; int eventIndex = 0; - int eventCount = 0; long timeout = -1; int pendingConnectionCount = totalConnectionCount - pendingConnectionsStartIndex; @@ -857,14 +839,14 @@ WaitForAllConnections(List *connectionList, bool raiseInterrupts) } /* wait for I/O events */ - eventCount = WaitEventSetWait(waitEventSet, timeout, events, - pendingConnectionCount, WAIT_EVENT_CLIENT_READ); + int eventCount = WaitEventSetWait(waitEventSet, timeout, events, + pendingConnectionCount, + WAIT_EVENT_CLIENT_READ); /* process I/O events */ for (; eventIndex < eventCount; eventIndex++) { WaitEvent *event = &events[eventIndex]; - MultiConnection *connection = NULL; bool connectionIsReady = false; if (event->events & WL_POSTMASTER_DEATH) @@ -896,7 +878,7 @@ WaitForAllConnections(List *connectionList, bool raiseInterrupts) continue; } - connection = (MultiConnection *) event->user_data; + MultiConnection *connection = (MultiConnection *) event->user_data; if (event->events & WL_SOCKET_WRITEABLE) { @@ -1028,8 +1010,6 @@ BuildWaitEventSet(MultiConnection **allConnections, int totalConnectionCount, int pendingConnectionsStartIndex) { int pendingConnectionCount = totalConnectionCount - pendingConnectionsStartIndex; - WaitEventSet *waitEventSet = NULL; - int connectionIndex = 0; /* * subtract 3 to make room for WL_POSTMASTER_DEATH, WL_LATCH_SET, and @@ -1042,9 +1022,11 @@ BuildWaitEventSet(MultiConnection **allConnections, int totalConnectionCount, /* allocate pending connections + 2 for the signal latch and postmaster death */ /* (CreateWaitEventSet makes room for pgwin32_signal_event automatically) */ - waitEventSet = CreateWaitEventSet(CurrentMemoryContext, pendingConnectionCount + 2); + WaitEventSet *waitEventSet = CreateWaitEventSet(CurrentMemoryContext, + pendingConnectionCount + 2); - for (connectionIndex = 0; connectionIndex < pendingConnectionCount; connectionIndex++) + for (int connectionIndex = 0; connectionIndex < pendingConnectionCount; + connectionIndex++) { MultiConnection *connection = allConnections[pendingConnectionsStartIndex + connectionIndex]; @@ -1078,7 +1060,6 @@ bool SendCancelationRequest(MultiConnection *connection) { char errorBuffer[ERROR_BUFFER_SIZE] = { 0 }; - bool cancelSent = false; PGcancel *cancelObject = PQgetCancel(connection->pgConn); if (cancelObject == NULL) @@ -1087,7 +1068,7 @@ SendCancelationRequest(MultiConnection *connection) return false; } - cancelSent = PQcancel(cancelObject, errorBuffer, sizeof(errorBuffer)); + bool cancelSent = PQcancel(cancelObject, errorBuffer, sizeof(errorBuffer)); if (!cancelSent) { ereport(WARNING, (errmsg("could not issue cancel request"), diff --git a/src/backend/distributed/deparser/citus_ruleutils.c b/src/backend/distributed/deparser/citus_ruleutils.c index 862da99ef..f505e2e3b 100644 --- a/src/backend/distributed/deparser/citus_ruleutils.c +++ b/src/backend/distributed/deparser/citus_ruleutils.c @@ -200,19 +200,15 @@ pg_get_serverdef_string(Oid tableRelationId) char * pg_get_sequencedef_string(Oid sequenceRelationId) { - char *qualifiedSequenceName = NULL; - char *sequenceDef = NULL; - Form_pg_sequence pgSequenceForm = NULL; - - pgSequenceForm = pg_get_sequencedef(sequenceRelationId); + Form_pg_sequence pgSequenceForm = pg_get_sequencedef(sequenceRelationId); /* build our DDL command */ - qualifiedSequenceName = generate_qualified_relation_name(sequenceRelationId); + char *qualifiedSequenceName = generate_qualified_relation_name(sequenceRelationId); - sequenceDef = psprintf(CREATE_SEQUENCE_COMMAND, qualifiedSequenceName, - pgSequenceForm->seqincrement, pgSequenceForm->seqmin, - pgSequenceForm->seqmax, pgSequenceForm->seqstart, - pgSequenceForm->seqcycle ? "" : "NO "); + char *sequenceDef = psprintf(CREATE_SEQUENCE_COMMAND, qualifiedSequenceName, + pgSequenceForm->seqincrement, pgSequenceForm->seqmin, + pgSequenceForm->seqmax, pgSequenceForm->seqstart, + pgSequenceForm->seqcycle ? "" : "NO "); return sequenceDef; } @@ -225,16 +221,13 @@ pg_get_sequencedef_string(Oid sequenceRelationId) Form_pg_sequence pg_get_sequencedef(Oid sequenceRelationId) { - Form_pg_sequence pgSequenceForm = NULL; - HeapTuple heapTuple = NULL; - - heapTuple = SearchSysCache1(SEQRELID, sequenceRelationId); + HeapTuple heapTuple = SearchSysCache1(SEQRELID, sequenceRelationId); if (!HeapTupleIsValid(heapTuple)) { elog(ERROR, "cache lookup failed for sequence %u", sequenceRelationId); } - pgSequenceForm = (Form_pg_sequence) GETSTRUCT(heapTuple); + Form_pg_sequence pgSequenceForm = (Form_pg_sequence) GETSTRUCT(heapTuple); ReleaseSysCache(heapTuple); @@ -253,12 +246,7 @@ pg_get_sequencedef(Oid sequenceRelationId) char * pg_get_tableschemadef_string(Oid tableRelationId, bool includeSequenceDefaults) { - Relation relation = NULL; - char *relationName = NULL; char relationKind = 0; - TupleDesc tupleDescriptor = NULL; - TupleConstr *tupleConstraints = NULL; - int attributeIndex = 0; bool firstAttributePrinted = false; AttrNumber defaultValueIndex = 0; AttrNumber constraintIndex = 0; @@ -273,8 +261,8 @@ pg_get_tableschemadef_string(Oid tableRelationId, bool includeSequenceDefaults) * pg_attribute, pg_constraint, and pg_class; and therefore using the * descriptor saves us from a lot of additional work. */ - relation = relation_open(tableRelationId, AccessShareLock); - relationName = generate_relation_name(tableRelationId, NIL); + Relation relation = relation_open(tableRelationId, AccessShareLock); + char *relationName = generate_relation_name(tableRelationId, NIL); EnsureRelationKindSupported(tableRelationId); @@ -301,10 +289,11 @@ pg_get_tableschemadef_string(Oid tableRelationId, bool includeSequenceDefaults) * and is not inherited from another table, print the column's name and its * formatted type. */ - tupleDescriptor = RelationGetDescr(relation); - tupleConstraints = tupleDescriptor->constr; + TupleDesc tupleDescriptor = RelationGetDescr(relation); + TupleConstr *tupleConstraints = tupleDescriptor->constr; - for (attributeIndex = 0; attributeIndex < tupleDescriptor->natts; attributeIndex++) + for (int attributeIndex = 0; attributeIndex < tupleDescriptor->natts; + attributeIndex++) { Form_pg_attribute attributeForm = TupleDescAttr(tupleDescriptor, attributeIndex); @@ -318,45 +307,40 @@ pg_get_tableschemadef_string(Oid tableRelationId, bool includeSequenceDefaults) */ if (!attributeForm->attisdropped) { - const char *attributeName = NULL; - const char *attributeTypeName = NULL; - if (firstAttributePrinted) { appendStringInfoString(&buffer, ", "); } firstAttributePrinted = true; - attributeName = NameStr(attributeForm->attname); + const char *attributeName = NameStr(attributeForm->attname); appendStringInfo(&buffer, "%s ", quote_identifier(attributeName)); - attributeTypeName = format_type_with_typemod(attributeForm->atttypid, - attributeForm->atttypmod); + const char *attributeTypeName = format_type_with_typemod( + attributeForm->atttypid, + attributeForm-> + atttypmod); appendStringInfoString(&buffer, attributeTypeName); /* if this column has a default value, append the default value */ if (attributeForm->atthasdef) { - AttrDefault *defaultValueList = NULL; - AttrDefault *defaultValue = NULL; - - Node *defaultNode = NULL; List *defaultContext = NULL; char *defaultString = NULL; Assert(tupleConstraints != NULL); - defaultValueList = tupleConstraints->defval; + AttrDefault *defaultValueList = tupleConstraints->defval; Assert(defaultValueList != NULL); - defaultValue = &(defaultValueList[defaultValueIndex]); + AttrDefault *defaultValue = &(defaultValueList[defaultValueIndex]); defaultValueIndex++; Assert(defaultValue->adnum == (attributeIndex + 1)); Assert(defaultValueIndex <= tupleConstraints->num_defval); /* convert expression to node tree, and prepare deparse context */ - defaultNode = (Node *) stringToNode(defaultValue->adbin); + Node *defaultNode = (Node *) stringToNode(defaultValue->adbin); /* * if column default value is explicitly requested, or it is @@ -418,9 +402,6 @@ pg_get_tableschemadef_string(Oid tableRelationId, bool includeSequenceDefaults) ConstrCheck *checkConstraintList = tupleConstraints->check; ConstrCheck *checkConstraint = &(checkConstraintList[constraintIndex]); - Node *checkNode = NULL; - List *checkContext = NULL; - char *checkString = NULL; /* if an attribute or constraint has been printed, format properly */ if (firstAttributePrinted || constraintIndex > 0) @@ -432,11 +413,11 @@ pg_get_tableschemadef_string(Oid tableRelationId, bool includeSequenceDefaults) quote_identifier(checkConstraint->ccname)); /* convert expression to node tree, and prepare deparse context */ - checkNode = (Node *) stringToNode(checkConstraint->ccbin); - checkContext = deparse_context_for(relationName, tableRelationId); + Node *checkNode = (Node *) stringToNode(checkConstraint->ccbin); + List *checkContext = deparse_context_for(relationName, tableRelationId); /* deparse check constraint string */ - checkString = deparse_expression(checkNode, checkContext, false, false); + char *checkString = deparse_expression(checkNode, checkContext, false, false); appendStringInfoString(&buffer, checkString); } @@ -491,10 +472,9 @@ void EnsureRelationKindSupported(Oid relationId) { char relationKind = get_rel_relkind(relationId); - bool supportedRelationKind = false; - supportedRelationKind = RegularTable(relationId) || - relationKind == RELKIND_FOREIGN_TABLE; + bool supportedRelationKind = RegularTable(relationId) || + relationKind == RELKIND_FOREIGN_TABLE; /* * Citus doesn't support bare inherited tables (i.e., not a partition or @@ -523,9 +503,6 @@ EnsureRelationKindSupported(Oid relationId) char * pg_get_tablecolumnoptionsdef_string(Oid tableRelationId) { - Relation relation = NULL; - TupleDesc tupleDescriptor = NULL; - AttrNumber attributeIndex = 0; List *columnOptionList = NIL; ListCell *columnOptionCell = NULL; bool firstOptionPrinted = false; @@ -536,7 +513,7 @@ pg_get_tablecolumnoptionsdef_string(Oid tableRelationId) * and use the relation's tuple descriptor to access attribute information. * This is primarily to maintain symmetry with pg_get_tableschemadef. */ - relation = relation_open(tableRelationId, AccessShareLock); + Relation relation = relation_open(tableRelationId, AccessShareLock); EnsureRelationKindSupported(tableRelationId); @@ -545,9 +522,10 @@ pg_get_tablecolumnoptionsdef_string(Oid tableRelationId) * and is not inherited from another table, check if column storage or * statistics statements need to be printed. */ - tupleDescriptor = RelationGetDescr(relation); + TupleDesc tupleDescriptor = RelationGetDescr(relation); - for (attributeIndex = 0; attributeIndex < tupleDescriptor->natts; attributeIndex++) + for (AttrNumber attributeIndex = 0; attributeIndex < tupleDescriptor->natts; + attributeIndex++) { Form_pg_attribute attributeForm = TupleDescAttr(tupleDescriptor, attributeIndex); char *attributeName = NameStr(attributeForm->attname); @@ -631,8 +609,6 @@ pg_get_tablecolumnoptionsdef_string(Oid tableRelationId) */ foreach(columnOptionCell, columnOptionList) { - char *columnOptionStatement = NULL; - if (!firstOptionPrinted) { initStringInfo(&buffer); @@ -645,7 +621,7 @@ pg_get_tablecolumnoptionsdef_string(Oid tableRelationId) } firstOptionPrinted = true; - columnOptionStatement = (char *) lfirst(columnOptionCell); + char *columnOptionStatement = (char *) lfirst(columnOptionCell); appendStringInfoString(&buffer, columnOptionStatement); pfree(columnOptionStatement); @@ -670,14 +646,13 @@ deparse_shard_index_statement(IndexStmt *origStmt, Oid distrelid, int64 shardid, IndexStmt *indexStmt = copyObject(origStmt); /* copy to avoid modifications */ char *relationName = indexStmt->relation->relname; char *indexName = indexStmt->idxname; - List *deparseContext = NULL; /* extend relation and index name using shard identifier */ AppendShardIdToName(&relationName, shardid); AppendShardIdToName(&indexName, shardid); /* use extended shard name and transformed stmt for deparsing */ - deparseContext = deparse_context_for(relationName, distrelid); + List *deparseContext = deparse_context_for(relationName, distrelid); indexStmt = transformIndexStmt(distrelid, indexStmt, NULL); appendStringInfo(buffer, "CREATE %s INDEX %s %s %s ON %s USING %s ", @@ -850,19 +825,17 @@ deparse_index_columns(StringInfo buffer, List *indexParameterList, List *deparse char * pg_get_indexclusterdef_string(Oid indexRelationId) { - HeapTuple indexTuple = NULL; - Form_pg_index indexForm = NULL; - Oid tableRelationId = InvalidOid; StringInfoData buffer = { NULL, 0, 0, 0 }; - indexTuple = SearchSysCache(INDEXRELID, ObjectIdGetDatum(indexRelationId), 0, 0, 0); + HeapTuple indexTuple = SearchSysCache(INDEXRELID, ObjectIdGetDatum(indexRelationId), + 0, 0, 0); if (!HeapTupleIsValid(indexTuple)) { ereport(ERROR, (errmsg("cache lookup failed for index %u", indexRelationId))); } - indexForm = (Form_pg_index) GETSTRUCT(indexTuple); - tableRelationId = indexForm->indrelid; + Form_pg_index indexForm = (Form_pg_index) GETSTRUCT(indexTuple); + Oid tableRelationId = indexForm->indrelid; /* check if the table is clustered on this index */ if (indexForm->indisclustered) @@ -892,20 +865,16 @@ pg_get_table_grants(Oid relationId) { /* *INDENT-OFF* */ StringInfoData buffer; - Relation relation = NULL; - char *relationName = NULL; List *defs = NIL; - HeapTuple classTuple = NULL; - Datum aclDatum = 0; bool isNull = false; - relation = relation_open(relationId, AccessShareLock); - relationName = generate_relation_name(relationId, NIL); + Relation relation = relation_open(relationId, AccessShareLock); + char *relationName = generate_relation_name(relationId, NIL); initStringInfo(&buffer); /* lookup all table level grants */ - classTuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relationId)); + HeapTuple classTuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relationId)); if (!HeapTupleIsValid(classTuple)) { ereport(ERROR, @@ -914,17 +883,13 @@ pg_get_table_grants(Oid relationId) relationId))); } - aclDatum = SysCacheGetAttr(RELOID, classTuple, Anum_pg_class_relacl, + Datum aclDatum = SysCacheGetAttr(RELOID, classTuple, Anum_pg_class_relacl, &isNull); ReleaseSysCache(classTuple); if (!isNull) { - int i = 0; - AclItem *aidat = NULL; - Acl *acl = NULL; - int offtype = 0; /* * First revoke all default permissions, so we can start adding the @@ -943,11 +908,11 @@ pg_get_table_grants(Oid relationId) /* iterate through the acl datastructure, emit GRANTs */ - acl = DatumGetAclP(aclDatum); - aidat = ACL_DAT(acl); + Acl *acl = DatumGetAclP(aclDatum); + AclItem *aidat = ACL_DAT(acl); - offtype = -1; - i = 0; + int offtype = -1; + int i = 0; while (i < ACL_NUM(acl)) { AclItem *aidata = NULL; @@ -975,9 +940,8 @@ pg_get_table_grants(Oid relationId) if (aidata->ai_grantee != 0) { - HeapTuple htup; - htup = SearchSysCache1(AUTHOID, ObjectIdGetDatum(aidata->ai_grantee)); + HeapTuple htup = SearchSysCache1(AUTHOID, ObjectIdGetDatum(aidata->ai_grantee)); if (HeapTupleIsValid(htup)) { Form_pg_authid authForm = ((Form_pg_authid) GETSTRUCT(htup)); @@ -1029,28 +993,22 @@ pg_get_table_grants(Oid relationId) char * generate_qualified_relation_name(Oid relid) { - HeapTuple tp; - Form_pg_class reltup; - char *relname; - char *nspname; - char *result; - - tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); + HeapTuple tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); if (!HeapTupleIsValid(tp)) { elog(ERROR, "cache lookup failed for relation %u", relid); } - reltup = (Form_pg_class) GETSTRUCT(tp); - relname = NameStr(reltup->relname); + Form_pg_class reltup = (Form_pg_class) GETSTRUCT(tp); + char *relname = NameStr(reltup->relname); - nspname = get_namespace_name(reltup->relnamespace); + char *nspname = get_namespace_name(reltup->relnamespace); if (!nspname) { elog(ERROR, "cache lookup failed for namespace %u", reltup->relnamespace); } - result = quote_qualified_identifier(nspname, relname); + char *result = quote_qualified_identifier(nspname, relname); ReleaseSysCache(tp); @@ -1202,16 +1160,13 @@ contain_nextval_expression_walker(Node *node, void *context) char * pg_get_replica_identity_command(Oid tableRelationId) { - Relation relation = NULL; StringInfo buf = makeStringInfo(); - char *relationName = NULL; - char replicaIdentity = 0; - relation = heap_open(tableRelationId, AccessShareLock); + Relation relation = heap_open(tableRelationId, AccessShareLock); - replicaIdentity = relation->rd_rel->relreplident; + char replicaIdentity = relation->rd_rel->relreplident; - relationName = generate_qualified_relation_name(tableRelationId); + char *relationName = generate_qualified_relation_name(tableRelationId); if (replicaIdentity == REPLICA_IDENTITY_INDEX) { @@ -1251,18 +1206,16 @@ static char * flatten_reloptions(Oid relid) { char *result = NULL; - HeapTuple tuple; - Datum reloptions; bool isnull; - tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); + HeapTuple tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); if (!HeapTupleIsValid(tuple)) { elog(ERROR, "cache lookup failed for relation %u", relid); } - reloptions = SysCacheGetAttr(RELOID, tuple, - Anum_pg_class_reloptions, &isnull); + Datum reloptions = SysCacheGetAttr(RELOID, tuple, + Anum_pg_class_reloptions, &isnull); if (!isnull) { StringInfoData buf; @@ -1279,16 +1232,14 @@ flatten_reloptions(Oid relid) for (i = 0; i < noptions; i++) { char *option = TextDatumGetCString(options[i]); - char *name; - char *separator; char *value; /* * Each array element should have the form name=value. If the "=" * is missing for some reason, treat it like an empty value. */ - name = option; - separator = strchr(option, '='); + char *name = option; + char *separator = strchr(option, '='); if (separator) { *separator = '\0'; @@ -1343,15 +1294,13 @@ flatten_reloptions(Oid relid) static void simple_quote_literal(StringInfo buf, const char *val) { - const char *valptr; - /* * We form the string literal according to the prevailing setting of * standard_conforming_strings; we never use E''. User is responsible for * making sure result is used correctly. */ appendStringInfoChar(buf, '\''); - for (valptr = val; *valptr; valptr++) + for (const char *valptr = val; *valptr; valptr++) { char ch = *valptr; diff --git a/src/backend/distributed/deparser/deparse_extension_stmts.c b/src/backend/distributed/deparser/deparse_extension_stmts.c index fcf17484f..dc19f753d 100644 --- a/src/backend/distributed/deparser/deparse_extension_stmts.c +++ b/src/backend/distributed/deparser/deparse_extension_stmts.c @@ -270,11 +270,9 @@ static void AppendAlterExtensionSchemaStmt(StringInfo buf, AlterObjectSchemaStmt *alterExtensionSchemaStmt) { - const char *extensionName = NULL; - Assert(alterExtensionSchemaStmt->objectType == OBJECT_EXTENSION); - extensionName = strVal(alterExtensionSchemaStmt->object); + const char *extensionName = strVal(alterExtensionSchemaStmt->object); appendStringInfo(buf, "ALTER EXTENSION %s SET SCHEMA %s;", extensionName, quote_identifier(alterExtensionSchemaStmt->newschema)); } diff --git a/src/backend/distributed/deparser/deparse_function_stmts.c b/src/backend/distributed/deparser/deparse_function_stmts.c index 6a58ab870..cd0f0aba6 100644 --- a/src/backend/distributed/deparser/deparse_function_stmts.c +++ b/src/backend/distributed/deparser/deparse_function_stmts.c @@ -488,14 +488,13 @@ AppendFunctionNameList(StringInfo buf, List *objects, ObjectType objtype) foreach(objectCell, objects) { Node *object = lfirst(objectCell); - ObjectWithArgs *func = NULL; if (objectCell != list_head(objects)) { appendStringInfo(buf, ", "); } - func = castNode(ObjectWithArgs, object); + ObjectWithArgs *func = castNode(ObjectWithArgs, object); AppendFunctionName(buf, func, objtype); } @@ -508,14 +507,11 @@ AppendFunctionNameList(StringInfo buf, List *objects, ObjectType objtype) static void AppendFunctionName(StringInfo buf, ObjectWithArgs *func, ObjectType objtype) { - Oid funcid = InvalidOid; - HeapTuple proctup; char *functionName = NULL; char *schemaName = NULL; - char *qualifiedFunctionName; - funcid = LookupFuncWithArgs(objtype, func, true); - proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); + Oid funcid = LookupFuncWithArgs(objtype, func, true); + HeapTuple proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); if (!HeapTupleIsValid(proctup)) { @@ -529,9 +525,7 @@ AppendFunctionName(StringInfo buf, ObjectWithArgs *func, ObjectType objtype) } else { - Form_pg_proc procform; - - procform = (Form_pg_proc) GETSTRUCT(proctup); + Form_pg_proc procform = (Form_pg_proc) GETSTRUCT(proctup); functionName = NameStr(procform->proname); functionName = pstrdup(functionName); /* we release the tuple before used */ schemaName = get_namespace_name(procform->pronamespace); @@ -539,7 +533,7 @@ AppendFunctionName(StringInfo buf, ObjectWithArgs *func, ObjectType objtype) ReleaseSysCache(proctup); } - qualifiedFunctionName = quote_qualified_identifier(schemaName, functionName); + char *qualifiedFunctionName = quote_qualified_identifier(schemaName, functionName); appendStringInfoString(buf, qualifiedFunctionName); if (OidIsValid(funcid)) @@ -548,28 +542,25 @@ AppendFunctionName(StringInfo buf, ObjectWithArgs *func, ObjectType objtype) * If the function exists we want to use pg_get_function_identity_arguments to * serialize its canonical arguments */ - OverrideSearchPath *overridePath = NULL; - Datum sqlTextDatum = 0; - const char *args = NULL; /* * Set search_path to NIL so that all objects outside of pg_catalog will be * schema-prefixed. pg_catalog will be added automatically when we call * PushOverrideSearchPath(), since we set addCatalog to true; */ - overridePath = GetOverrideSearchPath(CurrentMemoryContext); + OverrideSearchPath *overridePath = GetOverrideSearchPath(CurrentMemoryContext); overridePath->schemas = NIL; overridePath->addCatalog = true; PushOverrideSearchPath(overridePath); - sqlTextDatum = DirectFunctionCall1(pg_get_function_identity_arguments, - ObjectIdGetDatum(funcid)); + Datum sqlTextDatum = DirectFunctionCall1(pg_get_function_identity_arguments, + ObjectIdGetDatum(funcid)); /* revert back to original search_path */ PopOverrideSearchPath(); - args = TextDatumGetCString(sqlTextDatum); + const char *args = TextDatumGetCString(sqlTextDatum); appendStringInfo(buf, "(%s)", args); } else if (!func->args_unspecified) @@ -580,9 +571,8 @@ AppendFunctionName(StringInfo buf, ObjectWithArgs *func, ObjectType objtype) * postgres' TypeNameListToString. For now the best we can do until we understand * the underlying cause better. */ - const char *args = NULL; - args = TypeNameListToString(func->objargs); + const char *args = TypeNameListToString(func->objargs); appendStringInfo(buf, "(%s)", args); } diff --git a/src/backend/distributed/deparser/deparse_type_stmts.c b/src/backend/distributed/deparser/deparse_type_stmts.c index ccaa84267..ad0f2a504 100644 --- a/src/backend/distributed/deparser/deparse_type_stmts.c +++ b/src/backend/distributed/deparser/deparse_type_stmts.c @@ -137,14 +137,12 @@ AppendAlterTypeStmt(StringInfo buf, AlterTableStmt *stmt) appendStringInfo(buf, "ALTER TYPE %s", identifier); foreach(cmdCell, stmt->cmds) { - AlterTableCmd *alterTableCmd = NULL; - if (cmdCell != list_head(stmt->cmds)) { appendStringInfoString(buf, ", "); } - alterTableCmd = castNode(AlterTableCmd, lfirst(cmdCell)); + AlterTableCmd *alterTableCmd = castNode(AlterTableCmd, lfirst(cmdCell)); AppendAlterTypeCmd(buf, alterTableCmd); } @@ -317,13 +315,11 @@ AppendCompositeTypeStmt(StringInfo str, CompositeTypeStmt *stmt) static void AppendCreateEnumStmt(StringInfo str, CreateEnumStmt *stmt) { - RangeVar *typevar = NULL; - const char *identifier = NULL; - - typevar = makeRangeVarFromNameList(stmt->typeName); + RangeVar *typevar = makeRangeVarFromNameList(stmt->typeName); /* create the identifier from the fully qualified rangevar */ - identifier = quote_qualified_identifier(typevar->schemaname, typevar->relname); + const char *identifier = quote_qualified_identifier(typevar->schemaname, + typevar->relname); appendStringInfo(str, "CREATE TYPE %s AS ENUM (", identifier); AppendStringList(str, stmt->vals); @@ -472,11 +468,9 @@ DeparseAlterTypeSchemaStmt(AlterObjectSchemaStmt *stmt) static void AppendAlterTypeSchemaStmt(StringInfo buf, AlterObjectSchemaStmt *stmt) { - List *names = NIL; - Assert(stmt->objectType == OBJECT_TYPE); - names = (List *) stmt->object; + List *names = (List *) stmt->object; appendStringInfo(buf, "ALTER TYPE %s SET SCHEMA %s;", NameListToQuotedString(names), quote_identifier(stmt->newschema)); } @@ -499,11 +493,9 @@ DeparseAlterTypeOwnerStmt(AlterOwnerStmt *stmt) static void AppendAlterTypeOwnerStmt(StringInfo buf, AlterOwnerStmt *stmt) { - List *names = NIL; - Assert(stmt->objectType == OBJECT_TYPE); - names = (List *) stmt->object; + List *names = (List *) stmt->object; appendStringInfo(buf, "ALTER TYPE %s OWNER TO %s;", NameListToQuotedString(names), RoleSpecString(stmt->newowner, true)); } diff --git a/src/backend/distributed/deparser/format_collate.c b/src/backend/distributed/deparser/format_collate.c index 0c67d1f50..c27bc41d6 100644 --- a/src/backend/distributed/deparser/format_collate.c +++ b/src/backend/distributed/deparser/format_collate.c @@ -60,18 +60,14 @@ FormatCollateBEQualified(Oid collate_oid) char * FormatCollateExtended(Oid collid, bits16 flags) { - HeapTuple tuple = NULL; - Form_pg_collation collform = NULL; - char *buf = NULL; char *nspname = NULL; - char *typname = NULL; if (collid == InvalidOid && (flags & FORMAT_COLLATE_ALLOW_INVALID) != 0) { return pstrdup("-"); } - tuple = SearchSysCache1(COLLOID, ObjectIdGetDatum(collid)); + HeapTuple tuple = SearchSysCache1(COLLOID, ObjectIdGetDatum(collid)); if (!HeapTupleIsValid(tuple)) { if ((flags & FORMAT_COLLATE_ALLOW_INVALID) != 0) @@ -83,7 +79,7 @@ FormatCollateExtended(Oid collid, bits16 flags) elog(ERROR, "cache lookup failed for collate %u", collid); } } - collform = (Form_pg_collation) GETSTRUCT(tuple); + Form_pg_collation collform = (Form_pg_collation) GETSTRUCT(tuple); if ((flags & FORMAT_COLLATE_FORCE_QUALIFY) == 0 && CollationIsVisible(collid)) { @@ -94,9 +90,9 @@ FormatCollateExtended(Oid collid, bits16 flags) nspname = get_namespace_name_or_temp(collform->collnamespace); } - typname = NameStr(collform->collname); + char *typname = NameStr(collform->collname); - buf = quote_qualified_identifier(nspname, typname); + char *buf = quote_qualified_identifier(nspname, typname); ReleaseSysCache(tuple); diff --git a/src/backend/distributed/deparser/qualify_function_stmt.c b/src/backend/distributed/deparser/qualify_function_stmt.c index c01bf7c43..d6095f3fa 100644 --- a/src/backend/distributed/deparser/qualify_function_stmt.c +++ b/src/backend/distributed/deparser/qualify_function_stmt.c @@ -143,11 +143,9 @@ QualifyFunctionSchemaName(ObjectWithArgs *func, ObjectType type) { char *schemaName = NULL; char *functionName = NULL; - Oid funcid = InvalidOid; - HeapTuple proctup; - funcid = LookupFuncWithArgs(type, func, true); - proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); + Oid funcid = LookupFuncWithArgs(type, func, true); + HeapTuple proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); /* * We can not qualify the function if the catalogs do not have any records. @@ -156,9 +154,7 @@ QualifyFunctionSchemaName(ObjectWithArgs *func, ObjectType type) */ if (HeapTupleIsValid(proctup)) { - Form_pg_proc procform; - - procform = (Form_pg_proc) GETSTRUCT(proctup); + Form_pg_proc procform = (Form_pg_proc) GETSTRUCT(proctup); schemaName = get_namespace_name(procform->pronamespace); functionName = NameStr(procform->proname); functionName = pstrdup(functionName); /* we release the tuple before used */ diff --git a/src/backend/distributed/deparser/qualify_type_stmt.c b/src/backend/distributed/deparser/qualify_type_stmt.c index 844cae422..f8941e514 100644 --- a/src/backend/distributed/deparser/qualify_type_stmt.c +++ b/src/backend/distributed/deparser/qualify_type_stmt.c @@ -53,17 +53,15 @@ GetTypeNamespaceNameByNameList(List *names) static Oid TypeOidGetNamespaceOid(Oid typeOid) { - Form_pg_type typeData = NULL; HeapTuple typeTuple = SearchSysCache1(TYPEOID, typeOid); - Oid typnamespace = InvalidOid; if (!HeapTupleIsValid(typeTuple)) { elog(ERROR, "citus cache lookup failed"); return InvalidOid; } - typeData = (Form_pg_type) GETSTRUCT(typeTuple); - typnamespace = typeData->typnamespace; + Form_pg_type typeData = (Form_pg_type) GETSTRUCT(typeTuple); + Oid typnamespace = typeData->typnamespace; ReleaseSysCache(typeTuple); @@ -161,11 +159,9 @@ QualifyCreateEnumStmt(CreateEnumStmt *stmt) void QualifyAlterTypeSchemaStmt(AlterObjectSchemaStmt *stmt) { - List *names = NIL; - Assert(stmt->objectType == OBJECT_TYPE); - names = (List *) stmt->object; + List *names = (List *) stmt->object; if (list_length(names) == 1) { /* not qualified with schema, lookup type and its schema s*/ @@ -179,11 +175,9 @@ QualifyAlterTypeSchemaStmt(AlterObjectSchemaStmt *stmt) void QualifyAlterTypeOwnerStmt(AlterOwnerStmt *stmt) { - List *names = NIL; - Assert(stmt->objectType == OBJECT_TYPE); - names = (List *) stmt->object; + List *names = (List *) stmt->object; if (list_length(names) == 1) { /* not qualified with schema, lookup type and its schema s*/ diff --git a/src/backend/distributed/executor/adaptive_executor.c b/src/backend/distributed/executor/adaptive_executor.c index 9a4d92d21..76bb41a54 100644 --- a/src/backend/distributed/executor/adaptive_executor.c +++ b/src/backend/distributed/executor/adaptive_executor.c @@ -612,7 +612,6 @@ AdaptiveExecutor(CitusScanState *scanState) TupleTableSlot *resultSlot = NULL; DistributedPlan *distributedPlan = scanState->distributedPlan; - DistributedExecution *execution = NULL; EState *executorState = ScanStateGetExecutorState(scanState); ParamListInfo paramListInfo = executorState->es_param_list_info; TupleDesc tupleDescriptor = ScanStateGetTupleDescriptor(scanState); @@ -645,10 +644,13 @@ AdaptiveExecutor(CitusScanState *scanState) scanState->tuplestorestate = tuplestore_begin_heap(randomAccess, interTransactions, work_mem); - execution = CreateDistributedExecution(distributedPlan->modLevel, taskList, - distributedPlan->hasReturning, paramListInfo, - tupleDescriptor, - scanState->tuplestorestate, targetPoolSize); + DistributedExecution *execution = CreateDistributedExecution( + distributedPlan->modLevel, taskList, + distributedPlan-> + hasReturning, paramListInfo, + tupleDescriptor, + scanState-> + tuplestorestate, targetPoolSize); /* * Make sure that we acquire the appropriate locks even if the local tasks @@ -715,7 +717,6 @@ static void RunLocalExecution(CitusScanState *scanState, DistributedExecution *execution) { uint64 rowsProcessed = ExecuteLocalTaskList(scanState, execution->localTaskList); - EState *executorState = NULL; LocalExecutionHappened = true; @@ -725,7 +726,7 @@ RunLocalExecution(CitusScanState *scanState, DistributedExecution *execution) * and in AdaptiveExecutor. Instead, we set executorState here and skip updating it * for reference table modifications in AdaptiveExecutor. */ - executorState = ScanStateGetExecutorState(scanState); + EState *executorState = ScanStateGetExecutorState(scanState); executorState->es_processed = rowsProcessed; } @@ -782,7 +783,6 @@ ExecuteTaskListExtended(RowModifyLevel modLevel, List *taskList, TupleDesc tupleDescriptor, Tuplestorestate *tupleStore, bool hasReturning, int targetPoolSize) { - DistributedExecution *execution = NULL; ParamListInfo paramListInfo = NULL; /* @@ -796,7 +796,7 @@ ExecuteTaskListExtended(RowModifyLevel modLevel, List *taskList, targetPoolSize = 1; } - execution = + DistributedExecution *execution = CreateDistributedExecution(modLevel, taskList, hasReturning, paramListInfo, tupleDescriptor, tupleStore, targetPoolSize); @@ -993,8 +993,6 @@ DistributedPlanModifiesDatabase(DistributedPlan *plan) static bool TaskListModifiesDatabase(RowModifyLevel modLevel, List *taskList) { - Task *firstTask = NULL; - if (modLevel > ROW_MODIFY_READONLY) { return true; @@ -1010,7 +1008,7 @@ TaskListModifiesDatabase(RowModifyLevel modLevel, List *taskList) return false; } - firstTask = (Task *) linitial(taskList); + Task *firstTask = (Task *) linitial(taskList); return !ReadOnlyTask(firstTask->taskType); } @@ -1027,8 +1025,6 @@ DistributedExecutionRequiresRollback(DistributedExecution *execution) { List *taskList = execution->tasksToExecute; int taskCount = list_length(taskList); - Task *task = NULL; - bool selectForUpdate = false; if (MultiShardCommitProtocol == COMMIT_PROTOCOL_BARE) { @@ -1040,9 +1036,9 @@ DistributedExecutionRequiresRollback(DistributedExecution *execution) return false; } - task = (Task *) linitial(taskList); + Task *task = (Task *) linitial(taskList); - selectForUpdate = task->relationRowLockList != NIL; + bool selectForUpdate = task->relationRowLockList != NIL; if (selectForUpdate) { /* @@ -1114,16 +1110,12 @@ DistributedExecutionRequiresRollback(DistributedExecution *execution) static bool TaskListRequires2PC(List *taskList) { - Task *task = NULL; - bool multipleTasks = false; - uint64 anchorShardId = INVALID_SHARD_ID; - if (taskList == NIL) { return false; } - task = (Task *) linitial(taskList); + Task *task = (Task *) linitial(taskList); if (task->replicationModel == REPLICATION_MODEL_2PC) { return true; @@ -1136,13 +1128,13 @@ TaskListRequires2PC(List *taskList) * TODO: Do we ever need replicationModel in the Task structure? * Can't we always rely on anchorShardId? */ - anchorShardId = task->anchorShardId; + uint64 anchorShardId = task->anchorShardId; if (anchorShardId != INVALID_SHARD_ID && ReferenceTableShardId(anchorShardId)) { return true; } - multipleTasks = list_length(taskList) > 1; + bool multipleTasks = list_length(taskList) > 1; if (!ReadOnlyTask(task->taskType) && multipleTasks && MultiShardCommitProtocol == COMMIT_PROTOCOL_2PC) { @@ -1190,7 +1182,6 @@ ReadOnlyTask(TaskType taskType) static bool SelectForUpdateOnReferenceTable(RowModifyLevel modLevel, List *taskList) { - Task *task = NULL; ListCell *rtiLockCell = NULL; if (modLevel != ROW_MODIFY_READONLY) @@ -1204,7 +1195,7 @@ SelectForUpdateOnReferenceTable(RowModifyLevel modLevel, List *taskList) return false; } - task = (Task *) linitial(taskList); + Task *task = (Task *) linitial(taskList); foreach(rtiLockCell, task->relationRowLockList) { RelationRowLock *relationRowLock = (RelationRowLock *) lfirst(rtiLockCell); @@ -1441,7 +1432,6 @@ AssignTasksToConnections(DistributedExecution *execution) foreach(taskCell, taskList) { Task *task = (Task *) lfirst(taskCell); - ShardCommandExecution *shardCommandExecution = NULL; ListCell *taskPlacementCell = NULL; bool placementExecutionReady = true; int placementExecutionIndex = 0; @@ -1450,7 +1440,7 @@ AssignTasksToConnections(DistributedExecution *execution) /* * Execution of a command on a shard, which may have multiple replicas. */ - shardCommandExecution = + ShardCommandExecution *shardCommandExecution = (ShardCommandExecution *) palloc0(sizeof(ShardCommandExecution)); shardCommandExecution->task = task; shardCommandExecution->executionOrder = ExecutionOrderForTask(modLevel, task); @@ -1467,10 +1457,7 @@ AssignTasksToConnections(DistributedExecution *execution) foreach(taskPlacementCell, task->taskPlacementList) { ShardPlacement *taskPlacement = (ShardPlacement *) lfirst(taskPlacementCell); - List *placementAccessList = NULL; - MultiConnection *connection = NULL; int connectionFlags = 0; - TaskPlacementExecution *placementExecution = NULL; char *nodeName = taskPlacement->nodeName; int nodePort = taskPlacement->nodePort; WorkerPool *workerPool = FindOrCreateWorkerPool(execution, nodeName, @@ -1480,7 +1467,7 @@ AssignTasksToConnections(DistributedExecution *execution) * Execution of a command on a shard placement, which may not always * happen if the query is read-only and the shard has multiple placements. */ - placementExecution = + TaskPlacementExecution *placementExecution = (TaskPlacementExecution *) palloc0(sizeof(TaskPlacementExecution)); placementExecution->shardCommandExecution = shardCommandExecution; placementExecution->shardPlacement = taskPlacement; @@ -1501,15 +1488,16 @@ AssignTasksToConnections(DistributedExecution *execution) placementExecutionIndex++; - placementAccessList = PlacementAccessListForTask(task, taskPlacement); + List *placementAccessList = PlacementAccessListForTask(task, taskPlacement); /* * Determine whether the task has to be assigned to a particular connection * due to a preceding access to the placement in the same transaction. */ - connection = GetConnectionIfPlacementAccessedInXact(connectionFlags, - placementAccessList, - NULL); + MultiConnection *connection = GetConnectionIfPlacementAccessedInXact( + connectionFlags, + placementAccessList, + NULL); if (connection != NULL) { /* @@ -1670,7 +1658,6 @@ FindOrCreateWorkerPool(DistributedExecution *execution, char *nodeName, int node { WorkerPool *workerPool = NULL; ListCell *workerCell = NULL; - int nodeConnectionCount = 0; foreach(workerCell, execution->workerList) { @@ -1690,7 +1677,7 @@ FindOrCreateWorkerPool(DistributedExecution *execution, char *nodeName, int node workerPool->distributedExecution = execution; /* "open" connections aggressively when there are cached connections */ - nodeConnectionCount = MaxCachedConnectionsPerWorker; + int nodeConnectionCount = MaxCachedConnectionsPerWorker; workerPool->maxNewConnectionsPerCycle = Max(1, nodeConnectionCount); dlist_init(&workerPool->pendingTaskQueue); @@ -1775,8 +1762,6 @@ FindOrCreateWorkerSession(WorkerPool *workerPool, MultiConnection *connection) static bool ShouldRunTasksSequentially(List *taskList) { - Task *initialTask = NULL; - if (list_length(taskList) < 2) { /* single task plans are already qualified as sequential by definition */ @@ -1784,7 +1769,7 @@ ShouldRunTasksSequentially(List *taskList) } /* all the tasks are the same, so we only look one */ - initialTask = (Task *) linitial(taskList); + Task *initialTask = (Task *) linitial(taskList); if (initialTask->rowValuesLists != NIL) { /* found a multi-row INSERT */ @@ -1860,7 +1845,6 @@ RunDistributedExecution(DistributedExecution *execution) while (execution->unfinishedTaskCount > 0 && !cancellationReceived) { - int eventCount = 0; int eventIndex = 0; ListCell *workerCell = NULL; long timeout = NextEventTimeout(execution); @@ -1906,14 +1890,13 @@ RunDistributedExecution(DistributedExecution *execution) } /* wait for I/O events */ - eventCount = WaitEventSetWait(execution->waitEventSet, timeout, events, - eventSetSize, WAIT_EVENT_CLIENT_READ); + int eventCount = WaitEventSetWait(execution->waitEventSet, timeout, events, + eventSetSize, WAIT_EVENT_CLIENT_READ); /* process I/O events */ for (; eventIndex < eventCount; eventIndex++) { WaitEvent *event = &events[eventIndex]; - WorkerSession *session = NULL; if (event->events & WL_POSTMASTER_DEATH) { @@ -1944,7 +1927,7 @@ RunDistributedExecution(DistributedExecution *execution) continue; } - session = (WorkerSession *) event->user_data; + WorkerSession *session = (WorkerSession *) event->user_data; session->latestUnconsumedWaitEvents = event->events; ConnectionStateMachine(session); @@ -2001,7 +1984,6 @@ ManageWorkerPool(WorkerPool *workerPool) int failedConnectionCount = workerPool->failedConnectionCount; int readyTaskCount = workerPool->readyTaskCount; int newConnectionCount = 0; - int connectionIndex = 0; /* we should always have more (or equal) active connections than idle connections */ Assert(activeConnectionCount >= idleConnectionCount); @@ -2091,19 +2073,16 @@ ManageWorkerPool(WorkerPool *workerPool) ereport(DEBUG4, (errmsg("opening %d new connections to %s:%d", newConnectionCount, workerPool->nodeName, workerPool->nodePort))); - for (connectionIndex = 0; connectionIndex < newConnectionCount; connectionIndex++) + for (int connectionIndex = 0; connectionIndex < newConnectionCount; connectionIndex++) { - MultiConnection *connection = NULL; - WorkerSession *session = NULL; - /* experimental: just to see the perf benefits of caching connections */ int connectionFlags = 0; /* open a new connection to the worker */ - connection = StartNodeUserDatabaseConnection(connectionFlags, - workerPool->nodeName, - workerPool->nodePort, - NULL, NULL); + MultiConnection *connection = StartNodeUserDatabaseConnection(connectionFlags, + workerPool->nodeName, + workerPool->nodePort, + NULL, NULL); /* * Assign the initial state in the connection state machine. The connection @@ -2119,7 +2098,7 @@ ManageWorkerPool(WorkerPool *workerPool) connection->claimedExclusively = true; /* create a session for the connection */ - session = FindOrCreateWorkerSession(workerPool, connection); + WorkerSession *session = FindOrCreateWorkerSession(workerPool, connection); /* always poll the connection in the first round */ UpdateConnectionWaitFlags(session, WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE); @@ -2250,7 +2229,6 @@ NextEventTimeout(DistributedExecution *execution) foreach(workerCell, execution->workerList) { WorkerPool *workerPool = (WorkerPool *) lfirst(workerCell); - int initiatedConnectionCount = 0; if (workerPool->failed) { @@ -2278,7 +2256,7 @@ NextEventTimeout(DistributedExecution *execution) } } - initiatedConnectionCount = list_length(workerPool->sessionList); + int initiatedConnectionCount = list_length(workerPool->sessionList); /* * If there are connections to open we wait at most up to the end of the @@ -2347,8 +2325,6 @@ ConnectionStateMachine(WorkerSession *session) case MULTI_CONNECTION_CONNECTING: { - PostgresPollingStatusType pollMode; - ConnStatusType status = PQstatus(connection->pgConn); if (status == CONNECTION_OK) { @@ -2372,7 +2348,7 @@ ConnectionStateMachine(WorkerSession *session) break; } - pollMode = PQconnectPoll(connection->pgConn); + PostgresPollingStatusType pollMode = PQconnectPoll(connection->pgConn); if (pollMode == PGRES_POLLING_FAILED) { connection->connectionState = MULTI_CONNECTION_FAILED; @@ -2543,15 +2519,13 @@ ConnectionStateMachine(WorkerSession *session) static void Activate2PCIfModifyingTransactionExpandsToNewNode(WorkerSession *session) { - DistributedExecution *execution = NULL; - if (MultiShardCommitProtocol != COMMIT_PROTOCOL_2PC) { /* we don't need 2PC, so no need to continue */ return; } - execution = session->workerPool->distributedExecution; + DistributedExecution *execution = session->workerPool->distributedExecution; if (TransactionModifiedDistributedTable(execution) && DistributedExecutionModifiesDatabase(execution) && !ConnectionModifiedPlacement(session->connection)) @@ -2622,10 +2596,8 @@ TransactionStateMachine(WorkerSession *session) } else { - TaskPlacementExecution *placementExecution = NULL; - bool placementExecutionStarted = false; - - placementExecution = PopPlacementExecution(session); + TaskPlacementExecution *placementExecution = PopPlacementExecution( + session); if (placementExecution == NULL) { /* @@ -2637,7 +2609,7 @@ TransactionStateMachine(WorkerSession *session) break; } - placementExecutionStarted = + bool placementExecutionStarted = StartPlacementExecutionOnSession(placementExecution, session); if (!placementExecutionStarted) { @@ -2659,9 +2631,7 @@ TransactionStateMachine(WorkerSession *session) case REMOTE_TRANS_SENT_BEGIN: case REMOTE_TRANS_CLEARING_RESULTS: { - PGresult *result = NULL; - - result = PQgetResult(connection->pgConn); + PGresult *result = PQgetResult(connection->pgConn); if (result != NULL) { if (!IsResponseOK(result)) @@ -2715,10 +2685,8 @@ TransactionStateMachine(WorkerSession *session) case REMOTE_TRANS_STARTED: { - TaskPlacementExecution *placementExecution = NULL; - bool placementExecutionStarted = false; - - placementExecution = PopPlacementExecution(session); + TaskPlacementExecution *placementExecution = PopPlacementExecution( + session); if (placementExecution == NULL) { /* no tasks are ready to be executed at the moment */ @@ -2726,7 +2694,7 @@ TransactionStateMachine(WorkerSession *session) break; } - placementExecutionStarted = + bool placementExecutionStarted = StartPlacementExecutionOnSession(placementExecution, session); if (!placementExecutionStarted) { @@ -2742,7 +2710,6 @@ TransactionStateMachine(WorkerSession *session) case REMOTE_TRANS_SENT_COMMAND: { - bool fetchDone = false; TaskPlacementExecution *placementExecution = session->currentTask; ShardCommandExecution *shardCommandExecution = placementExecution->shardCommandExecution; @@ -2754,7 +2721,7 @@ TransactionStateMachine(WorkerSession *session) storeRows = false; } - fetchDone = ReceiveResults(session, storeRows); + bool fetchDone = ReceiveResults(session, storeRows); if (!fetchDone) { break; @@ -2810,7 +2777,6 @@ UpdateConnectionWaitFlags(WorkerSession *session, int waitFlags) static bool CheckConnectionReady(WorkerSession *session) { - int sendStatus = 0; MultiConnection *connection = session->connection; int waitFlags = WL_SOCKET_READABLE; bool connectionReady = false; @@ -2823,7 +2789,7 @@ CheckConnectionReady(WorkerSession *session) } /* try to send all pending data */ - sendStatus = PQflush(connection->pgConn); + int sendStatus = PQflush(connection->pgConn); if (sendStatus == -1) { connection->connectionState = MULTI_CONNECTION_LOST; @@ -2865,10 +2831,9 @@ CheckConnectionReady(WorkerSession *session) static TaskPlacementExecution * PopPlacementExecution(WorkerSession *session) { - TaskPlacementExecution *placementExecution = NULL; WorkerPool *workerPool = session->workerPool; - placementExecution = PopAssignedPlacementExecution(session); + TaskPlacementExecution *placementExecution = PopAssignedPlacementExecution(session); if (placementExecution == NULL) { if (session->commandsSent > 0 && UseConnectionPerPlacement()) @@ -2894,7 +2859,6 @@ PopPlacementExecution(WorkerSession *session) static TaskPlacementExecution * PopAssignedPlacementExecution(WorkerSession *session) { - TaskPlacementExecution *placementExecution = NULL; dlist_head *readyTaskQueue = &(session->readyTaskQueue); if (dlist_is_empty(readyTaskQueue)) @@ -2902,9 +2866,10 @@ PopAssignedPlacementExecution(WorkerSession *session) return NULL; } - placementExecution = dlist_container(TaskPlacementExecution, - sessionReadyQueueNode, - dlist_pop_head_node(readyTaskQueue)); + TaskPlacementExecution *placementExecution = dlist_container(TaskPlacementExecution, + sessionReadyQueueNode, + dlist_pop_head_node( + readyTaskQueue)); return placementExecution; } @@ -2916,7 +2881,6 @@ PopAssignedPlacementExecution(WorkerSession *session) static TaskPlacementExecution * PopUnassignedPlacementExecution(WorkerPool *workerPool) { - TaskPlacementExecution *placementExecution = NULL; dlist_head *readyTaskQueue = &(workerPool->readyTaskQueue); if (dlist_is_empty(readyTaskQueue)) @@ -2924,9 +2888,10 @@ PopUnassignedPlacementExecution(WorkerPool *workerPool) return NULL; } - placementExecution = dlist_container(TaskPlacementExecution, - workerReadyQueueNode, - dlist_pop_head_node(readyTaskQueue)); + TaskPlacementExecution *placementExecution = dlist_container(TaskPlacementExecution, + workerReadyQueueNode, + dlist_pop_head_node( + readyTaskQueue)); workerPool->readyTaskCount--; @@ -2960,7 +2925,6 @@ StartPlacementExecutionOnSession(TaskPlacementExecution *placementExecution, List *placementAccessList = PlacementAccessListForTask(task, taskPlacement); char *queryString = task->queryString; int querySent = 0; - int singleRowMode = 0; /* * Make sure that subsequent commands on the same placement @@ -3007,7 +2971,7 @@ StartPlacementExecutionOnSession(TaskPlacementExecution *placementExecution, return false; } - singleRowMode = PQsetSingleRowMode(connection->pgConn); + int singleRowMode = PQsetSingleRowMode(connection->pgConn); if (singleRowMode == 0) { connection->connectionState = MULTI_CONNECTION_LOST; @@ -3036,7 +3000,6 @@ ReceiveResults(WorkerSession *session, bool storeRows) uint32 expectedColumnCount = 0; char **columnArray = execution->columnArray; Tuplestorestate *tupleStore = execution->tupleStore; - MemoryContext ioContext = NULL; if (tupleDescriptor != NULL) { @@ -3048,19 +3011,16 @@ ReceiveResults(WorkerSession *session, bool storeRows) * into tuple. The context is reseted on every row, thus we create it at the * start of the loop and reset on every iteration. */ - ioContext = AllocSetContextCreate(CurrentMemoryContext, - "IoContext", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + MemoryContext ioContext = AllocSetContextCreate(CurrentMemoryContext, + "IoContext", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); while (!PQisBusy(connection->pgConn)) { - uint32 rowIndex = 0; uint32 columnIndex = 0; uint32 rowsProcessed = 0; - uint32 columnCount = 0; - ExecStatusType resultStatus = 0; PGresult *result = PQgetResult(connection->pgConn); if (result == NULL) @@ -3070,7 +3030,7 @@ ReceiveResults(WorkerSession *session, bool storeRows) break; } - resultStatus = PQresultStatus(result); + ExecStatusType resultStatus = PQresultStatus(result); if (resultStatus == PGRES_COMMAND_OK) { char *currentAffectedTupleString = PQcmdTuples(result); @@ -3121,7 +3081,7 @@ ReceiveResults(WorkerSession *session, bool storeRows) } rowsProcessed = PQntuples(result); - columnCount = PQnfields(result); + uint32 columnCount = PQnfields(result); if (columnCount != expectedColumnCount) { @@ -3130,10 +3090,8 @@ ReceiveResults(WorkerSession *session, bool storeRows) columnCount, expectedColumnCount))); } - for (rowIndex = 0; rowIndex < rowsProcessed; rowIndex++) + for (uint32 rowIndex = 0; rowIndex < rowsProcessed; rowIndex++) { - HeapTuple heapTuple = NULL; - MemoryContext oldContextPerRow = NULL; memset(columnArray, 0, columnCount * sizeof(char *)); for (columnIndex = 0; columnIndex < columnCount; columnIndex++) @@ -3159,9 +3117,10 @@ ReceiveResults(WorkerSession *session, bool storeRows) * protects us from any memory leaks that might be present in I/O functions * called by BuildTupleFromCStrings. */ - oldContextPerRow = MemoryContextSwitchTo(ioContext); + MemoryContext oldContextPerRow = MemoryContextSwitchTo(ioContext); - heapTuple = BuildTupleFromCStrings(attributeInputMetadata, columnArray); + HeapTuple heapTuple = BuildTupleFromCStrings(attributeInputMetadata, + columnArray); MemoryContextSwitchTo(oldContextPerRow); @@ -3309,7 +3268,6 @@ PlacementExecutionDone(TaskPlacementExecution *placementExecution, bool succeede ShardCommandExecution *shardCommandExecution = placementExecution->shardCommandExecution; TaskExecutionState executionState = shardCommandExecution->executionState; - TaskExecutionState newExecutionState = TASK_EXECUTION_NOT_FINISHED; bool failedPlacementExecutionIsOnPendingQueue = false; /* mark the placement execution as finished */ @@ -3360,7 +3318,8 @@ PlacementExecutionDone(TaskPlacementExecution *placementExecution, bool succeede * Update unfinishedTaskCount only when state changes from not finished to * finished or failed state. */ - newExecutionState = TaskExecutionStateMachine(shardCommandExecution); + TaskExecutionState newExecutionState = TaskExecutionStateMachine( + shardCommandExecution); if (newExecutionState == TASK_EXECUTION_FINISHED) { execution->unfinishedTaskCount--; @@ -3597,21 +3556,18 @@ TaskExecutionStateMachine(ShardCommandExecution *shardCommandExecution) static WaitEventSet * BuildWaitEventSet(List *sessionList) { - WaitEventSet *waitEventSet = NULL; ListCell *sessionCell = NULL; /* additional 2 is for postmaster and latch */ int eventSetSize = list_length(sessionList) + 2; - waitEventSet = + WaitEventSet *waitEventSet = CreateWaitEventSet(CurrentMemoryContext, eventSetSize); foreach(sessionCell, sessionList) { WorkerSession *session = lfirst(sessionCell); MultiConnection *connection = session->connection; - int sock = 0; - int waitEventSetIndex = 0; if (connection->pgConn == NULL) { @@ -3625,15 +3581,16 @@ BuildWaitEventSet(List *sessionList) continue; } - sock = PQsocket(connection->pgConn); + int sock = PQsocket(connection->pgConn); if (sock == -1) { /* connection was closed */ continue; } - waitEventSetIndex = AddWaitEventToSet(waitEventSet, connection->waitFlags, sock, - NULL, (void *) session); + int waitEventSetIndex = AddWaitEventToSet(waitEventSet, connection->waitFlags, + sock, + NULL, (void *) session); session->waitEventSetIndex = waitEventSetIndex; } @@ -3657,7 +3614,6 @@ UpdateWaitEventSetFlags(WaitEventSet *waitEventSet, List *sessionList) { WorkerSession *session = lfirst(sessionCell); MultiConnection *connection = session->connection; - int sock = 0; int waitEventSetIndex = session->waitEventSetIndex; if (connection->pgConn == NULL) @@ -3672,7 +3628,7 @@ UpdateWaitEventSetFlags(WaitEventSet *waitEventSet, List *sessionList) continue; } - sock = PQsocket(connection->pgConn); + int sock = PQsocket(connection->pgConn); if (sock == -1) { /* connection was closed */ @@ -3724,14 +3680,13 @@ ExtractParametersFromParamList(ParamListInfo paramListInfo, const char ***parameterValues, bool useOriginalCustomTypeOids) { - int parameterIndex = 0; int parameterCount = paramListInfo->numParams; *parameterTypes = (Oid *) palloc0(parameterCount * sizeof(Oid)); *parameterValues = (const char **) palloc0(parameterCount * sizeof(char *)); /* get parameter types and values */ - for (parameterIndex = 0; parameterIndex < parameterCount; parameterIndex++) + for (int parameterIndex = 0; parameterIndex < parameterCount; parameterIndex++) { ParamExternData *parameterData = ¶mListInfo->params[parameterIndex]; Oid typeOutputFunctionId = InvalidOid; diff --git a/src/backend/distributed/executor/citus_custom_scan.c b/src/backend/distributed/executor/citus_custom_scan.c index bca6691c1..24b75b79c 100644 --- a/src/backend/distributed/executor/citus_custom_scan.c +++ b/src/backend/distributed/executor/citus_custom_scan.c @@ -119,12 +119,11 @@ RegisterCitusCustomScanMethods(void) static void CitusBeginScan(CustomScanState *node, EState *estate, int eflags) { - CitusScanState *scanState = NULL; DistributedPlan *distributedPlan = NULL; MarkCitusInitiatedCoordinatorBackend(); - scanState = (CitusScanState *) node; + CitusScanState *scanState = (CitusScanState *) node; #if PG_VERSION_NUM >= 120000 ExecInitResultSlot(&scanState->customScanState.ss.ps, &TTSOpsMinimalTuple); @@ -152,7 +151,6 @@ TupleTableSlot * CitusExecScan(CustomScanState *node) { CitusScanState *scanState = (CitusScanState *) node; - TupleTableSlot *resultSlot = NULL; if (!scanState->finishedRemoteScan) { @@ -161,7 +159,7 @@ CitusExecScan(CustomScanState *node) scanState->finishedRemoteScan = true; } - resultSlot = ReturnTupleFromTuplestore(scanState); + TupleTableSlot *resultSlot = ReturnTupleFromTuplestore(scanState); return resultSlot; } @@ -179,21 +177,18 @@ static void CitusModifyBeginScan(CustomScanState *node, EState *estate, int eflags) { CitusScanState *scanState = (CitusScanState *) node; - DistributedPlan *distributedPlan = NULL; - Job *workerJob = NULL; - Query *jobQuery = NULL; - List *taskList = NIL; /* * We must not change the distributed plan since it may be reused across multiple * executions of a prepared statement. Instead we create a deep copy that we only * use for the current execution. */ - distributedPlan = scanState->distributedPlan = copyObject(scanState->distributedPlan); + DistributedPlan *distributedPlan = scanState->distributedPlan = copyObject( + scanState->distributedPlan); - workerJob = distributedPlan->workerJob; - jobQuery = workerJob->jobQuery; - taskList = workerJob->taskList; + Job *workerJob = distributedPlan->workerJob; + Query *jobQuery = workerJob->jobQuery; + List *taskList = workerJob->taskList; if (workerJob->requiresMasterEvaluation) { @@ -407,8 +402,6 @@ ScanStateGetExecutorState(CitusScanState *scanState) CustomScan * FetchCitusCustomScanIfExists(Plan *plan) { - CustomScan *customScan = NULL; - if (plan == NULL) { return NULL; @@ -419,7 +412,7 @@ FetchCitusCustomScanIfExists(Plan *plan) return (CustomScan *) plan; } - customScan = FetchCitusCustomScanIfExists(plan->lefttree); + CustomScan *customScan = FetchCitusCustomScanIfExists(plan->lefttree); if (customScan == NULL) { @@ -457,9 +450,6 @@ IsCitusPlan(Plan *plan) bool IsCitusCustomScan(Plan *plan) { - CustomScan *customScan = NULL; - Node *privateNode = NULL; - if (plan == NULL) { return false; @@ -470,13 +460,13 @@ IsCitusCustomScan(Plan *plan) return false; } - customScan = (CustomScan *) plan; + CustomScan *customScan = (CustomScan *) plan; if (list_length(customScan->custom_private) == 0) { return false; } - privateNode = (Node *) linitial(customScan->custom_private); + Node *privateNode = (Node *) linitial(customScan->custom_private); if (!CitusIsA(privateNode, DistributedPlan)) { return false; diff --git a/src/backend/distributed/executor/insert_select_executor.c b/src/backend/distributed/executor/insert_select_executor.c index 809e53775..6943b664a 100644 --- a/src/backend/distributed/executor/insert_select_executor.c +++ b/src/backend/distributed/executor/insert_select_executor.c @@ -93,7 +93,6 @@ static TupleTableSlot * CoordinatorInsertSelectExecScanInternal(CustomScanState *node) { CitusScanState *scanState = (CitusScanState *) node; - TupleTableSlot *resultSlot = NULL; if (!scanState->finishedRemoteScan) { @@ -197,7 +196,7 @@ CoordinatorInsertSelectExecScanInternal(CustomScanState *node) scanState->finishedRemoteScan = true; } - resultSlot = ReturnTupleFromTuplestore(scanState); + TupleTableSlot *resultSlot = ReturnTupleFromTuplestore(scanState); return resultSlot; } @@ -217,36 +216,34 @@ ExecuteSelectIntoColocatedIntermediateResults(Oid targetRelationId, char *intermediateResultIdPrefix) { ParamListInfo paramListInfo = executorState->es_param_list_info; - int partitionColumnIndex = -1; - List *columnNameList = NIL; bool stopOnFailure = false; - char partitionMethod = 0; - CitusCopyDestReceiver *copyDest = NULL; - Query *queryCopy = NULL; - partitionMethod = PartitionMethod(targetRelationId); + char partitionMethod = PartitionMethod(targetRelationId); if (partitionMethod == DISTRIBUTE_BY_NONE) { stopOnFailure = true; } /* Get column name list and partition column index for the target table */ - columnNameList = BuildColumnNameListFromTargetList(targetRelationId, - insertTargetList); - partitionColumnIndex = PartitionColumnIndexFromColumnList(targetRelationId, - columnNameList); + List *columnNameList = BuildColumnNameListFromTargetList(targetRelationId, + insertTargetList); + int partitionColumnIndex = PartitionColumnIndexFromColumnList(targetRelationId, + columnNameList); /* set up a DestReceiver that copies into the intermediate table */ - copyDest = CreateCitusCopyDestReceiver(targetRelationId, columnNameList, - partitionColumnIndex, executorState, - stopOnFailure, intermediateResultIdPrefix); + CitusCopyDestReceiver *copyDest = CreateCitusCopyDestReceiver(targetRelationId, + columnNameList, + partitionColumnIndex, + executorState, + stopOnFailure, + intermediateResultIdPrefix); /* * Make a copy of the query, since ExecuteQueryIntoDestReceiver may scribble on it * and we want it to be replanned every time if it is stored in a prepared * statement. */ - queryCopy = copyObject(selectQuery); + Query *queryCopy = copyObject(selectQuery); ExecuteQueryIntoDestReceiver(queryCopy, paramListInfo, (DestReceiver *) copyDest); @@ -268,36 +265,33 @@ ExecuteSelectIntoRelation(Oid targetRelationId, List *insertTargetList, Query *selectQuery, EState *executorState) { ParamListInfo paramListInfo = executorState->es_param_list_info; - int partitionColumnIndex = -1; - List *columnNameList = NIL; bool stopOnFailure = false; - char partitionMethod = 0; - CitusCopyDestReceiver *copyDest = NULL; - Query *queryCopy = NULL; - partitionMethod = PartitionMethod(targetRelationId); + char partitionMethod = PartitionMethod(targetRelationId); if (partitionMethod == DISTRIBUTE_BY_NONE) { stopOnFailure = true; } /* Get column name list and partition column index for the target table */ - columnNameList = BuildColumnNameListFromTargetList(targetRelationId, - insertTargetList); - partitionColumnIndex = PartitionColumnIndexFromColumnList(targetRelationId, - columnNameList); + List *columnNameList = BuildColumnNameListFromTargetList(targetRelationId, + insertTargetList); + int partitionColumnIndex = PartitionColumnIndexFromColumnList(targetRelationId, + columnNameList); /* set up a DestReceiver that copies into the distributed table */ - copyDest = CreateCitusCopyDestReceiver(targetRelationId, columnNameList, - partitionColumnIndex, executorState, - stopOnFailure, NULL); + CitusCopyDestReceiver *copyDest = CreateCitusCopyDestReceiver(targetRelationId, + columnNameList, + partitionColumnIndex, + executorState, + stopOnFailure, NULL); /* * Make a copy of the query, since ExecuteQueryIntoDestReceiver may scribble on it * and we want it to be replanned every time if it is stored in a prepared * statement. */ - queryCopy = copyObject(selectQuery); + Query *queryCopy = copyObject(selectQuery); ExecuteQueryIntoDestReceiver(queryCopy, paramListInfo, (DestReceiver *) copyDest); diff --git a/src/backend/distributed/executor/intermediate_results.c b/src/backend/distributed/executor/intermediate_results.c index acf0cbffb..fed7ad6cf 100644 --- a/src/backend/distributed/executor/intermediate_results.c +++ b/src/backend/distributed/executor/intermediate_results.c @@ -111,10 +111,7 @@ broadcast_intermediate_result(PG_FUNCTION_ARGS) char *resultIdString = text_to_cstring(resultIdText); text *queryText = PG_GETARG_TEXT_P(1); char *queryString = text_to_cstring(queryText); - EState *estate = NULL; - List *nodeList = NIL; bool writeLocalFile = false; - RemoteFileDestReceiver *resultDest = NULL; ParamListInfo paramListInfo = NULL; CheckCitusVersion(ERROR); @@ -127,11 +124,13 @@ broadcast_intermediate_result(PG_FUNCTION_ARGS) */ BeginOrContinueCoordinatedTransaction(); - nodeList = ActivePrimaryWorkerNodeList(NoLock); - estate = CreateExecutorState(); - resultDest = (RemoteFileDestReceiver *) CreateRemoteFileDestReceiver(resultIdString, - estate, nodeList, - writeLocalFile); + List *nodeList = ActivePrimaryWorkerNodeList(NoLock); + EState *estate = CreateExecutorState(); + RemoteFileDestReceiver *resultDest = + (RemoteFileDestReceiver *) CreateRemoteFileDestReceiver(resultIdString, + estate, + nodeList, + writeLocalFile); ExecuteQueryStringIntoDestReceiver(queryString, paramListInfo, (DestReceiver *) resultDest); @@ -153,10 +152,8 @@ create_intermediate_result(PG_FUNCTION_ARGS) char *resultIdString = text_to_cstring(resultIdText); text *queryText = PG_GETARG_TEXT_P(1); char *queryString = text_to_cstring(queryText); - EState *estate = NULL; List *nodeList = NIL; bool writeLocalFile = true; - RemoteFileDestReceiver *resultDest = NULL; ParamListInfo paramListInfo = NULL; CheckCitusVersion(ERROR); @@ -169,10 +166,12 @@ create_intermediate_result(PG_FUNCTION_ARGS) */ BeginOrContinueCoordinatedTransaction(); - estate = CreateExecutorState(); - resultDest = (RemoteFileDestReceiver *) CreateRemoteFileDestReceiver(resultIdString, - estate, nodeList, - writeLocalFile); + EState *estate = CreateExecutorState(); + RemoteFileDestReceiver *resultDest = + (RemoteFileDestReceiver *) CreateRemoteFileDestReceiver(resultIdString, + estate, + nodeList, + writeLocalFile); ExecuteQueryStringIntoDestReceiver(queryString, paramListInfo, (DestReceiver *) resultDest); @@ -193,9 +192,8 @@ DestReceiver * CreateRemoteFileDestReceiver(char *resultId, EState *executorState, List *initialNodeList, bool writeLocalFile) { - RemoteFileDestReceiver *resultDest = NULL; - - resultDest = (RemoteFileDestReceiver *) palloc0(sizeof(RemoteFileDestReceiver)); + RemoteFileDestReceiver *resultDest = (RemoteFileDestReceiver *) palloc0( + sizeof(RemoteFileDestReceiver)); /* set up the DestReceiver function pointers */ resultDest->pub.receiveSlot = RemoteFileDestReceiverReceive; @@ -228,7 +226,6 @@ RemoteFileDestReceiverStartup(DestReceiver *dest, int operation, const char *resultId = resultDest->resultId; - CopyOutState copyOutState = NULL; const char *delimiterCharacter = "\t"; const char *nullPrintCharacter = "\\N"; @@ -240,7 +237,7 @@ RemoteFileDestReceiverStartup(DestReceiver *dest, int operation, resultDest->tupleDescriptor = inputTupleDescriptor; /* define how tuples will be serialised */ - copyOutState = (CopyOutState) palloc0(sizeof(CopyOutStateData)); + CopyOutState copyOutState = (CopyOutState) palloc0(sizeof(CopyOutStateData)); copyOutState->delim = (char *) delimiterCharacter; copyOutState->null_print = (char *) nullPrintCharacter; copyOutState->null_print_client = (char *) nullPrintCharacter; @@ -256,12 +253,11 @@ RemoteFileDestReceiverStartup(DestReceiver *dest, int operation, { const int fileFlags = (O_APPEND | O_CREAT | O_RDWR | O_TRUNC | PG_BINARY); const int fileMode = (S_IRUSR | S_IWUSR); - const char *fileName = NULL; /* make sure the directory exists */ CreateIntermediateResultsDirectory(); - fileName = QueryResultFileName(resultId); + const char *fileName = QueryResultFileName(resultId); resultDest->fileCompat = FileCompatFromFileStart(FileOpenForTransmit(fileName, fileFlags, @@ -273,7 +269,6 @@ RemoteFileDestReceiverStartup(DestReceiver *dest, int operation, WorkerNode *workerNode = (WorkerNode *) lfirst(initialNodeCell); char *nodeName = workerNode->workerName; int nodePort = workerNode->workerPort; - MultiConnection *connection = NULL; /* * We prefer to use a connection that is not associcated with @@ -281,7 +276,7 @@ RemoteFileDestReceiverStartup(DestReceiver *dest, int operation, * exclusively and that would prevent the consecutive DML/DDL * use the same connection. */ - connection = StartNonDataAccessConnection(nodeName, nodePort); + MultiConnection *connection = StartNonDataAccessConnection(nodeName, nodePort); ClaimConnectionExclusively(connection); MarkRemoteTransactionCritical(connection); @@ -296,12 +291,10 @@ RemoteFileDestReceiverStartup(DestReceiver *dest, int operation, foreach(connectionCell, connectionList) { MultiConnection *connection = (MultiConnection *) lfirst(connectionCell); - StringInfo copyCommand = NULL; - bool querySent = false; - copyCommand = ConstructCopyResultStatement(resultId); + StringInfo copyCommand = ConstructCopyResultStatement(resultId); - querySent = SendRemoteCommand(connection, copyCommand->data); + bool querySent = SendRemoteCommand(connection, copyCommand->data); if (!querySent) { ReportConnectionError(connection, ERROR); @@ -371,8 +364,6 @@ RemoteFileDestReceiverReceive(TupleTableSlot *slot, DestReceiver *dest) CopyOutState copyOutState = resultDest->copyOutState; FmgrInfo *columnOutputFunctions = resultDest->columnOutputFunctions; - Datum *columnValues = NULL; - bool *columnNulls = NULL; StringInfo copyData = copyOutState->fe_msgbuf; EState *executorState = resultDest->executorState; @@ -381,8 +372,8 @@ RemoteFileDestReceiverReceive(TupleTableSlot *slot, DestReceiver *dest) slot_getallattrs(slot); - columnValues = slot->tts_values; - columnNulls = slot->tts_isnull; + Datum *columnValues = slot->tts_values; + bool *columnNulls = slot->tts_isnull; resetStringInfo(copyData); @@ -526,11 +517,9 @@ RemoteFileDestReceiverDestroy(DestReceiver *destReceiver) void ReceiveQueryResultViaCopy(const char *resultId) { - const char *resultFileName = NULL; - CreateIntermediateResultsDirectory(); - resultFileName = QueryResultFileName(resultId); + const char *resultFileName = QueryResultFileName(resultId); RedirectCopyDataToRegularFile(resultFileName); } @@ -671,12 +660,10 @@ RemoveIntermediateResultsDirectory(void) int64 IntermediateResultSize(char *resultId) { - char *resultFileName = NULL; struct stat fileStat; - int statOK = 0; - resultFileName = QueryResultFileName(resultId); - statOK = stat(resultFileName, &fileStat); + char *resultFileName = QueryResultFileName(resultId); + int statOK = stat(resultFileName, &fileStat); if (statOK < 0) { return -1; @@ -710,24 +697,21 @@ read_intermediate_result(PG_FUNCTION_ARGS) Datum copyFormatLabelDatum = DirectFunctionCall1(enum_out, copyFormatOidDatum); char *copyFormatLabel = DatumGetCString(copyFormatLabelDatum); - char *resultFileName = NULL; struct stat fileStat; - int statOK = 0; - Tuplestorestate *tupstore = NULL; TupleDesc tupleDescriptor = NULL; CheckCitusVersion(ERROR); - resultFileName = QueryResultFileName(resultIdString); - statOK = stat(resultFileName, &fileStat); + char *resultFileName = QueryResultFileName(resultIdString); + int statOK = stat(resultFileName, &fileStat); if (statOK != 0) { ereport(ERROR, (errcode_for_file_access(), errmsg("result \"%s\" does not exist", resultIdString))); } - tupstore = SetupTuplestore(fcinfo, &tupleDescriptor); + Tuplestorestate *tupstore = SetupTuplestore(fcinfo, &tupleDescriptor); ReadFileIntoTupleStore(resultFileName, copyFormatLabel, tupleDescriptor, tupstore); diff --git a/src/backend/distributed/executor/local_executor.c b/src/backend/distributed/executor/local_executor.c index 179ec498b..ea0b82fad 100644 --- a/src/backend/distributed/executor/local_executor.c +++ b/src/backend/distributed/executor/local_executor.c @@ -142,8 +142,6 @@ ExecuteLocalTaskList(CitusScanState *scanState, List *taskList) { Task *task = (Task *) lfirst(taskCell); - PlannedStmt *localPlan = NULL; - int cursorOptions = 0; const char *shardQueryString = task->queryString; Query *shardQuery = ParseQueryString(shardQueryString, parameterTypes, numParams); @@ -153,7 +151,7 @@ ExecuteLocalTaskList(CitusScanState *scanState, List *taskList) * go through the distributed executor, which we do not want since the * query is already known to be local. */ - cursorOptions = 0; + int cursorOptions = 0; /* * Altough the shardQuery is local to this node, we prefer planner() @@ -163,7 +161,7 @@ ExecuteLocalTaskList(CitusScanState *scanState, List *taskList) * implemented. So, let planner to call distributed_planner() which * eventually calls standard_planner(). */ - localPlan = planner(shardQuery, cursorOptions, paramListInfo); + PlannedStmt *localPlan = planner(shardQuery, cursorOptions, paramListInfo); LogLocalCommand(shardQueryString); @@ -241,7 +239,6 @@ ExtractLocalAndRemoteTasks(bool readOnly, List *taskList, List **localTaskList, } else { - Task *localTask = NULL; Task *remoteTask = NULL; /* @@ -252,7 +249,7 @@ ExtractLocalAndRemoteTasks(bool readOnly, List *taskList, List **localTaskList, */ task->partiallyLocalOrRemote = true; - localTask = copyObject(task); + Task *localTask = copyObject(task); localTask->taskPlacementList = localTaskPlacementList; *localTaskList = lappend(*localTaskList, localTask); @@ -318,7 +315,6 @@ ExecuteLocalTaskPlan(CitusScanState *scanState, PlannedStmt *taskPlan, char *que DestReceiver *tupleStoreDestReceiever = CreateDestReceiver(DestTuplestore); ScanDirection scanDirection = ForwardScanDirection; QueryEnvironment *queryEnv = create_queryEnv(); - QueryDesc *queryDesc = NULL; int eflags = 0; uint64 totalRowsProcessed = 0; @@ -331,10 +327,10 @@ ExecuteLocalTaskPlan(CitusScanState *scanState, PlannedStmt *taskPlan, char *que CurrentMemoryContext, false); /* Create a QueryDesc for the query */ - queryDesc = CreateQueryDesc(taskPlan, queryString, - GetActiveSnapshot(), InvalidSnapshot, - tupleStoreDestReceiever, paramListInfo, - queryEnv, 0); + QueryDesc *queryDesc = CreateQueryDesc(taskPlan, queryString, + GetActiveSnapshot(), InvalidSnapshot, + tupleStoreDestReceiever, paramListInfo, + queryEnv, 0); ExecutorStart(queryDesc, eflags); ExecutorRun(queryDesc, scanDirection, 0L, true); @@ -365,8 +361,6 @@ ExecuteLocalTaskPlan(CitusScanState *scanState, PlannedStmt *taskPlan, char *que bool ShouldExecuteTasksLocally(List *taskList) { - bool singleTask = false; - if (!EnableLocalExecution) { return false; @@ -394,7 +388,7 @@ ShouldExecuteTasksLocally(List *taskList) return true; } - singleTask = (list_length(taskList) == 1); + bool singleTask = (list_length(taskList) == 1); if (singleTask && TaskAccessesLocalNode((Task *) linitial(taskList))) { /* diff --git a/src/backend/distributed/executor/multi_client_executor.c b/src/backend/distributed/executor/multi_client_executor.c index 13a36bdb5..e6918ce20 100644 --- a/src/backend/distributed/executor/multi_client_executor.c +++ b/src/backend/distributed/executor/multi_client_executor.c @@ -55,10 +55,9 @@ static int32 AllocateConnectionId(void) { int32 connectionId = INVALID_CONNECTION_ID; - int32 connIndex = 0; /* allocate connectionId from connection pool */ - for (connIndex = 0; connIndex < MAX_CONNECTION_COUNT; connIndex++) + for (int32 connIndex = 0; connIndex < MAX_CONNECTION_COUNT; connIndex++) { MultiConnection *connection = ClientConnectionArray[connIndex]; if (connection == NULL) @@ -84,8 +83,6 @@ int32 MultiClientConnect(const char *nodeName, uint32 nodePort, const char *nodeDatabase, const char *userName) { - MultiConnection *connection = NULL; - ConnStatusType connStatusType = CONNECTION_OK; int32 connectionId = AllocateConnectionId(); int connectionFlags = FORCE_NEW_CONNECTION; /* no cached connections for now */ @@ -103,10 +100,11 @@ MultiClientConnect(const char *nodeName, uint32 nodePort, const char *nodeDataba } /* establish synchronous connection to worker node */ - connection = GetNodeUserDatabaseConnection(connectionFlags, nodeName, nodePort, - userName, nodeDatabase); + MultiConnection *connection = GetNodeUserDatabaseConnection(connectionFlags, nodeName, + nodePort, + userName, nodeDatabase); - connStatusType = PQstatus(connection->pgConn); + ConnStatusType connStatusType = PQstatus(connection->pgConn); if (connStatusType == CONNECTION_OK) { @@ -132,8 +130,6 @@ int32 MultiClientConnectStart(const char *nodeName, uint32 nodePort, const char *nodeDatabase, const char *userName) { - MultiConnection *connection = NULL; - ConnStatusType connStatusType = CONNECTION_OK; int32 connectionId = AllocateConnectionId(); int connectionFlags = FORCE_NEW_CONNECTION; /* no cached connections for now */ @@ -151,9 +147,10 @@ MultiClientConnectStart(const char *nodeName, uint32 nodePort, const char *nodeD } /* prepare asynchronous request for worker node connection */ - connection = StartNodeUserDatabaseConnection(connectionFlags, nodeName, nodePort, - userName, nodeDatabase); - connStatusType = PQstatus(connection->pgConn); + MultiConnection *connection = StartNodeUserDatabaseConnection(connectionFlags, + nodeName, nodePort, + userName, nodeDatabase); + ConnStatusType connStatusType = PQstatus(connection->pgConn); /* * If prepared, we save the connection, and set its initial polling status @@ -181,15 +178,13 @@ MultiClientConnectStart(const char *nodeName, uint32 nodePort, const char *nodeD ConnectStatus MultiClientConnectPoll(int32 connectionId) { - MultiConnection *connection = NULL; - PostgresPollingStatusType pollingStatus = PGRES_POLLING_OK; ConnectStatus connectStatus = CLIENT_INVALID_CONNECT; Assert(connectionId != INVALID_CONNECTION_ID); - connection = ClientConnectionArray[connectionId]; + MultiConnection *connection = ClientConnectionArray[connectionId]; Assert(connection != NULL); - pollingStatus = ClientPollingStatusArray[connectionId]; + PostgresPollingStatusType pollingStatus = ClientPollingStatusArray[connectionId]; if (pollingStatus == PGRES_POLLING_OK) { connectStatus = CLIENT_CONNECTION_READY; @@ -235,11 +230,10 @@ MultiClientConnectPoll(int32 connectionId) void MultiClientDisconnect(int32 connectionId) { - MultiConnection *connection = NULL; const int InvalidPollingStatus = -1; Assert(connectionId != INVALID_CONNECTION_ID); - connection = ClientConnectionArray[connectionId]; + MultiConnection *connection = ClientConnectionArray[connectionId]; Assert(connection != NULL); CloseConnection(connection); @@ -256,15 +250,13 @@ MultiClientDisconnect(int32 connectionId) bool MultiClientConnectionUp(int32 connectionId) { - MultiConnection *connection = NULL; - ConnStatusType connStatusType = CONNECTION_OK; bool connectionUp = true; Assert(connectionId != INVALID_CONNECTION_ID); - connection = ClientConnectionArray[connectionId]; + MultiConnection *connection = ClientConnectionArray[connectionId]; Assert(connection != NULL); - connStatusType = PQstatus(connection->pgConn); + ConnStatusType connStatusType = PQstatus(connection->pgConn); if (connStatusType == CONNECTION_BAD) { connectionUp = false; @@ -278,15 +270,13 @@ MultiClientConnectionUp(int32 connectionId) bool MultiClientSendQuery(int32 connectionId, const char *query) { - MultiConnection *connection = NULL; bool success = true; - int querySent = 0; Assert(connectionId != INVALID_CONNECTION_ID); - connection = ClientConnectionArray[connectionId]; + MultiConnection *connection = ClientConnectionArray[connectionId]; Assert(connection != NULL); - querySent = SendRemoteCommand(connection, query); + int querySent = SendRemoteCommand(connection, query); if (querySent == 0) { char *errorMessage = pchomp(PQerrorMessage(connection->pgConn)); @@ -313,14 +303,11 @@ MultiClientSendQuery(int32 connectionId, const char *query) bool MultiClientCancel(int32 connectionId) { - MultiConnection *connection = NULL; - bool canceled = true; - Assert(connectionId != INVALID_CONNECTION_ID); - connection = ClientConnectionArray[connectionId]; + MultiConnection *connection = ClientConnectionArray[connectionId]; Assert(connection != NULL); - canceled = SendCancelationRequest(connection); + bool canceled = SendCancelationRequest(connection); return canceled; } @@ -330,16 +317,13 @@ MultiClientCancel(int32 connectionId) ResultStatus MultiClientResultStatus(int32 connectionId) { - MultiConnection *connection = NULL; - int consumed = 0; - ConnStatusType connStatusType = CONNECTION_OK; ResultStatus resultStatus = CLIENT_INVALID_RESULT_STATUS; Assert(connectionId != INVALID_CONNECTION_ID); - connection = ClientConnectionArray[connectionId]; + MultiConnection *connection = ClientConnectionArray[connectionId]; Assert(connection != NULL); - connStatusType = PQstatus(connection->pgConn); + ConnStatusType connStatusType = PQstatus(connection->pgConn); if (connStatusType == CONNECTION_BAD) { ereport(WARNING, (errmsg("could not maintain connection to worker node"))); @@ -347,7 +331,7 @@ MultiClientResultStatus(int32 connectionId) } /* consume input to allow status change */ - consumed = PQconsumeInput(connection->pgConn); + int consumed = PQconsumeInput(connection->pgConn); if (consumed != 0) { int connectionBusy = PQisBusy(connection->pgConn); @@ -383,15 +367,11 @@ BatchQueryStatus MultiClientBatchResult(int32 connectionId, void **queryResult, int *rowCount, int *columnCount) { - MultiConnection *connection = NULL; - PGresult *result = NULL; - ConnStatusType connStatusType = CONNECTION_OK; - ExecStatusType resultStatus = PGRES_COMMAND_OK; BatchQueryStatus queryStatus = CLIENT_INVALID_BATCH_QUERY; bool raiseInterrupts = true; Assert(connectionId != INVALID_CONNECTION_ID); - connection = ClientConnectionArray[connectionId]; + MultiConnection *connection = ClientConnectionArray[connectionId]; Assert(connection != NULL); /* set default result */ @@ -399,20 +379,20 @@ MultiClientBatchResult(int32 connectionId, void **queryResult, int *rowCount, (*rowCount) = -1; (*columnCount) = -1; - connStatusType = PQstatus(connection->pgConn); + ConnStatusType connStatusType = PQstatus(connection->pgConn); if (connStatusType == CONNECTION_BAD) { ereport(WARNING, (errmsg("could not maintain connection to worker node"))); return CLIENT_BATCH_QUERY_FAILED; } - result = GetRemoteCommandResult(connection, raiseInterrupts); + PGresult *result = GetRemoteCommandResult(connection, raiseInterrupts); if (result == NULL) { return CLIENT_BATCH_QUERY_DONE; } - resultStatus = PQresultStatus(result); + ExecStatusType resultStatus = PQresultStatus(result); if (resultStatus == PGRES_TUPLES_OK) { (*queryResult) = (void **) result; @@ -457,20 +437,16 @@ MultiClientClearResult(void *queryResult) QueryStatus MultiClientQueryStatus(int32 connectionId) { - MultiConnection *connection = NULL; - PGresult *result = NULL; int tupleCount PG_USED_FOR_ASSERTS_ONLY = 0; bool copyResults = false; - ConnStatusType connStatusType = CONNECTION_OK; - ExecStatusType resultStatus = PGRES_COMMAND_OK; QueryStatus queryStatus = CLIENT_INVALID_QUERY; bool raiseInterrupts = true; Assert(connectionId != INVALID_CONNECTION_ID); - connection = ClientConnectionArray[connectionId]; + MultiConnection *connection = ClientConnectionArray[connectionId]; Assert(connection != NULL); - connStatusType = PQstatus(connection->pgConn); + ConnStatusType connStatusType = PQstatus(connection->pgConn); if (connStatusType == CONNECTION_BAD) { ereport(WARNING, (errmsg("could not maintain connection to worker node"))); @@ -482,8 +458,8 @@ MultiClientQueryStatus(int32 connectionId) * isn't ready yet (the caller didn't wait for the connection to be ready), * we will block on this call. */ - result = GetRemoteCommandResult(connection, raiseInterrupts); - resultStatus = PQresultStatus(result); + PGresult *result = GetRemoteCommandResult(connection, raiseInterrupts); + ExecStatusType resultStatus = PQresultStatus(result); if (resultStatus == PGRES_COMMAND_OK) { @@ -536,22 +512,19 @@ MultiClientQueryStatus(int32 connectionId) CopyStatus MultiClientCopyData(int32 connectionId, int32 fileDescriptor, uint64 *returnBytesReceived) { - MultiConnection *connection = NULL; char *receiveBuffer = NULL; - int consumed = 0; - int receiveLength = 0; const int asynchronous = 1; CopyStatus copyStatus = CLIENT_INVALID_COPY; Assert(connectionId != INVALID_CONNECTION_ID); - connection = ClientConnectionArray[connectionId]; + MultiConnection *connection = ClientConnectionArray[connectionId]; Assert(connection != NULL); /* * Consume input to handle the case where previous copy operation might have * received zero bytes. */ - consumed = PQconsumeInput(connection->pgConn); + int consumed = PQconsumeInput(connection->pgConn); if (consumed == 0) { ereport(WARNING, (errmsg("could not read data from worker node"))); @@ -559,11 +532,10 @@ MultiClientCopyData(int32 connectionId, int32 fileDescriptor, uint64 *returnByte } /* receive copy data message in an asynchronous manner */ - receiveLength = PQgetCopyData(connection->pgConn, &receiveBuffer, asynchronous); + int receiveLength = PQgetCopyData(connection->pgConn, &receiveBuffer, asynchronous); while (receiveLength > 0) { /* received copy data; append these data to file */ - int appended = -1; errno = 0; if (returnBytesReceived) @@ -571,7 +543,7 @@ MultiClientCopyData(int32 connectionId, int32 fileDescriptor, uint64 *returnByte *returnBytesReceived += receiveLength; } - appended = write(fileDescriptor, receiveBuffer, receiveLength); + int appended = write(fileDescriptor, receiveBuffer, receiveLength); if (appended != receiveLength) { /* if write didn't set errno, assume problem is no disk space */ diff --git a/src/backend/distributed/executor/multi_executor.c b/src/backend/distributed/executor/multi_executor.c index 6ff22c1ef..42c75821b 100644 --- a/src/backend/distributed/executor/multi_executor.c +++ b/src/backend/distributed/executor/multi_executor.c @@ -196,9 +196,6 @@ TupleTableSlot * ReturnTupleFromTuplestore(CitusScanState *scanState) { Tuplestorestate *tupleStore = scanState->tuplestorestate; - TupleTableSlot *resultSlot = NULL; - EState *executorState = NULL; - ScanDirection scanDirection = NoMovementScanDirection; bool forwardScanDirection = true; if (tupleStore == NULL) @@ -206,8 +203,8 @@ ReturnTupleFromTuplestore(CitusScanState *scanState) return NULL; } - executorState = ScanStateGetExecutorState(scanState); - scanDirection = executorState->es_direction; + EState *executorState = ScanStateGetExecutorState(scanState); + ScanDirection scanDirection = executorState->es_direction; Assert(ScanDirectionIsValid(scanDirection)); if (ScanDirectionIsBackward(scanDirection)) @@ -215,7 +212,7 @@ ReturnTupleFromTuplestore(CitusScanState *scanState) forwardScanDirection = false; } - resultSlot = scanState->customScanState.ss.ps.ps_ResultTupleSlot; + TupleTableSlot *resultSlot = scanState->customScanState.ss.ps.ps_ResultTupleSlot; tuplestore_gettupleslot(tupleStore, forwardScanDirection, false, resultSlot); return resultSlot; @@ -234,13 +231,12 @@ void LoadTuplesIntoTupleStore(CitusScanState *citusScanState, Job *workerJob) { List *workerTaskList = workerJob->taskList; - TupleDesc tupleDescriptor = NULL; ListCell *workerTaskCell = NULL; bool randomAccess = true; bool interTransactions = false; char *copyFormat = "text"; - tupleDescriptor = ScanStateGetTupleDescriptor(citusScanState); + TupleDesc tupleDescriptor = ScanStateGetTupleDescriptor(citusScanState); Assert(citusScanState->tuplestorestate == NULL); citusScanState->tuplestorestate = @@ -254,11 +250,9 @@ LoadTuplesIntoTupleStore(CitusScanState *citusScanState, Job *workerJob) foreach(workerTaskCell, workerTaskList) { Task *workerTask = (Task *) lfirst(workerTaskCell); - StringInfo jobDirectoryName = NULL; - StringInfo taskFilename = NULL; - jobDirectoryName = MasterJobDirectoryName(workerTask->jobId); - taskFilename = TaskFilename(jobDirectoryName, workerTask->taskId); + StringInfo jobDirectoryName = MasterJobDirectoryName(workerTask->jobId); + StringInfo taskFilename = TaskFilename(jobDirectoryName, workerTask->taskId); ReadFileIntoTupleStore(taskFilename->data, copyFormat, tupleDescriptor, citusScanState->tuplestorestate); @@ -277,8 +271,6 @@ void ReadFileIntoTupleStore(char *fileName, char *copyFormat, TupleDesc tupleDescriptor, Tuplestorestate *tupstore) { - CopyState copyState = NULL; - /* * Trick BeginCopyFrom into using our tuple descriptor by pretending it belongs * to a relation. @@ -293,26 +285,23 @@ ReadFileIntoTupleStore(char *fileName, char *copyFormat, TupleDesc tupleDescript Datum *columnValues = palloc0(columnCount * sizeof(Datum)); bool *columnNulls = palloc0(columnCount * sizeof(bool)); - DefElem *copyOption = NULL; List *copyOptions = NIL; int location = -1; /* "unknown" token location */ - copyOption = makeDefElem("format", (Node *) makeString(copyFormat), location); + DefElem *copyOption = makeDefElem("format", (Node *) makeString(copyFormat), + location); copyOptions = lappend(copyOptions, copyOption); - copyState = BeginCopyFrom(NULL, stubRelation, fileName, false, NULL, - NULL, copyOptions); + CopyState copyState = BeginCopyFrom(NULL, stubRelation, fileName, false, NULL, + NULL, copyOptions); while (true) { - MemoryContext oldContext = NULL; - bool nextRowFound = false; - ResetPerTupleExprContext(executorState); - oldContext = MemoryContextSwitchTo(executorTupleContext); + MemoryContext oldContext = MemoryContextSwitchTo(executorTupleContext); - nextRowFound = NextCopyFromCompat(copyState, executorExpressionContext, - columnValues, columnNulls); + bool nextRowFound = NextCopyFromCompat(copyState, executorExpressionContext, + columnValues, columnNulls); if (!nextRowFound) { MemoryContextSwitchTo(oldContext); @@ -355,7 +344,6 @@ SortTupleStore(CitusScanState *scanState) ListCell *targetCell = NULL; int sortKeyIndex = 0; - Tuplesortstate *tuplesortstate = NULL; /* * Iterate on the returning target list and generate the necessary information @@ -380,7 +368,7 @@ SortTupleStore(CitusScanState *scanState) sortKeyIndex++; } - tuplesortstate = + Tuplesortstate *tuplesortstate = tuplesort_begin_heap(tupleDescriptor, numberOfSortKeys, sortColIdx, sortOperators, collations, nullsFirst, work_mem, NULL, false); @@ -467,7 +455,6 @@ ExecuteQueryStringIntoDestReceiver(const char *queryString, ParamListInfo params Query * ParseQueryString(const char *queryString, Oid *paramOids, int numParams) { - Query *query = NULL; RawStmt *rawStmt = (RawStmt *) ParseTreeRawStmt(queryString); List *queryTreeList = pg_analyze_and_rewrite(rawStmt, queryString, paramOids, numParams, NULL); @@ -477,7 +464,7 @@ ParseQueryString(const char *queryString, Oid *paramOids, int numParams) ereport(ERROR, (errmsg("can only execute a single query"))); } - query = (Query *) linitial(queryTreeList); + Query *query = (Query *) linitial(queryTreeList); return query; } @@ -490,13 +477,10 @@ ParseQueryString(const char *queryString, Oid *paramOids, int numParams) void ExecuteQueryIntoDestReceiver(Query *query, ParamListInfo params, DestReceiver *dest) { - PlannedStmt *queryPlan = NULL; - int cursorOptions = 0; - - cursorOptions = CURSOR_OPT_PARALLEL_OK; + int cursorOptions = CURSOR_OPT_PARALLEL_OK; /* plan the subquery, this may be another distributed query */ - queryPlan = pg_plan_query(query, cursorOptions, params); + PlannedStmt *queryPlan = pg_plan_query(query, cursorOptions, params); ExecutePlanIntoDestReceiver(queryPlan, params, dest); } @@ -510,12 +494,11 @@ void ExecutePlanIntoDestReceiver(PlannedStmt *queryPlan, ParamListInfo params, DestReceiver *dest) { - Portal portal = NULL; int eflags = 0; long count = FETCH_ALL; /* create a new portal for executing the query */ - portal = CreateNewPortal(); + Portal portal = CreateNewPortal(); /* don't display the portal in pg_cursors, it is for internal use only */ portal->visible = false; diff --git a/src/backend/distributed/executor/multi_server_executor.c b/src/backend/distributed/executor/multi_server_executor.c index 714f87dd2..99a2638c8 100644 --- a/src/backend/distributed/executor/multi_server_executor.c +++ b/src/backend/distributed/executor/multi_server_executor.c @@ -170,7 +170,6 @@ InitTaskExecution(Task *task, TaskExecStatus initialTaskExecStatus) { /* each task placement (assignment) corresponds to one worker node */ uint32 nodeCount = list_length(task->taskPlacementList); - uint32 nodeIndex = 0; TaskExecution *taskExecution = CitusMakeNode(TaskExecution); @@ -185,7 +184,7 @@ InitTaskExecution(Task *task, TaskExecStatus initialTaskExecStatus) taskExecution->connectionIdArray = palloc0(nodeCount * sizeof(int32)); taskExecution->fileDescriptorArray = palloc0(nodeCount * sizeof(int32)); - for (nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++) + for (uint32 nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++) { taskExecution->taskStatusArray[nodeIndex] = initialTaskExecStatus; taskExecution->transmitStatusArray[nodeIndex] = EXEC_TRANSMIT_UNASSIGNED; @@ -205,8 +204,7 @@ InitTaskExecution(Task *task, TaskExecStatus initialTaskExecStatus) void CleanupTaskExecution(TaskExecution *taskExecution) { - uint32 nodeIndex = 0; - for (nodeIndex = 0; nodeIndex < taskExecution->nodeCount; nodeIndex++) + for (uint32 nodeIndex = 0; nodeIndex < taskExecution->nodeCount; nodeIndex++) { int32 connectionId = taskExecution->connectionIdArray[nodeIndex]; int32 fileDescriptor = taskExecution->fileDescriptorArray[nodeIndex]; @@ -284,14 +282,12 @@ AdjustStateForFailure(TaskExecution *taskExecution) bool CheckIfSizeLimitIsExceeded(DistributedExecutionStats *executionStats) { - uint64 maxIntermediateResultInBytes = 0; - if (!SubPlanLevel || MaxIntermediateResult < 0) { return false; } - maxIntermediateResultInBytes = MaxIntermediateResult * 1024L; + uint64 maxIntermediateResultInBytes = MaxIntermediateResult * 1024L; if (executionStats->totalIntermediateResultSize < maxIntermediateResultInBytes) { return false; diff --git a/src/backend/distributed/executor/multi_task_tracker_executor.c b/src/backend/distributed/executor/multi_task_tracker_executor.c index 5c3d4c2d5..08188dac4 100644 --- a/src/backend/distributed/executor/multi_task_tracker_executor.c +++ b/src/backend/distributed/executor/multi_task_tracker_executor.c @@ -158,9 +158,7 @@ void MultiTaskTrackerExecute(Job *job) { List *jobTaskList = job->taskList; - List *taskAndExecutionList = NIL; ListCell *taskAndExecutionCell = NULL; - uint32 taskTrackerCount = 0; uint32 topLevelTaskCount = 0; uint32 failedTaskId = 0; bool allTasksCompleted = false; @@ -170,13 +168,9 @@ MultiTaskTrackerExecute(Job *job) bool sizeLimitIsExceeded = false; DistributedExecutionStats executionStats = { 0 }; - List *workerNodeList = NIL; - HTAB *taskTrackerHash = NULL; - HTAB *transmitTrackerHash = NULL; char *extensionOwner = CitusExtensionOwnerName(); const char *taskTrackerHashName = "Task Tracker Hash"; const char *transmitTrackerHashName = "Transmit Tracker Hash"; - List *jobIdList = NIL; if (ReadFromSecondaries == USE_SECONDARY_NODES_ALWAYS) { @@ -189,7 +183,7 @@ MultiTaskTrackerExecute(Job *job) * We walk over the task tree, and create a task execution struct for each * task. We then associate the task with its execution and get back a list. */ - taskAndExecutionList = TaskAndExecutionList(jobTaskList); + List *taskAndExecutionList = TaskAndExecutionList(jobTaskList); /* * We now count the number of "top level" tasks in the query tree. Once they @@ -212,15 +206,15 @@ MultiTaskTrackerExecute(Job *job) * assigning and checking the status of tasks. The second (temporary) hash * helps us in fetching results data from worker nodes to the master node. */ - workerNodeList = ActivePrimaryWorkerNodeList(NoLock); - taskTrackerCount = (uint32) list_length(workerNodeList); + List *workerNodeList = ActivePrimaryWorkerNodeList(NoLock); + uint32 taskTrackerCount = (uint32) list_length(workerNodeList); /* connect as the current user for running queries */ - taskTrackerHash = TrackerHash(taskTrackerHashName, workerNodeList, NULL); + HTAB *taskTrackerHash = TrackerHash(taskTrackerHashName, workerNodeList, NULL); /* connect as the superuser for fetching result files */ - transmitTrackerHash = TrackerHash(transmitTrackerHashName, workerNodeList, - extensionOwner); + HTAB *transmitTrackerHash = TrackerHash(transmitTrackerHashName, workerNodeList, + extensionOwner); TrackerHashConnect(taskTrackerHash); TrackerHashConnect(transmitTrackerHash); @@ -243,7 +237,6 @@ MultiTaskTrackerExecute(Job *job) { Task *task = (Task *) lfirst(taskAndExecutionCell); TaskExecution *taskExecution = task->taskExecution; - TaskExecStatus taskExecutionStatus = 0; TaskTracker *execTaskTracker = ResolveTaskTracker(taskTrackerHash, task, taskExecution); @@ -252,8 +245,9 @@ MultiTaskTrackerExecute(Job *job) Assert(execTaskTracker != NULL); /* call the function that performs the core task execution logic */ - taskExecutionStatus = ManageTaskExecution(execTaskTracker, mapTaskTracker, - task, taskExecution); + TaskExecStatus taskExecutionStatus = ManageTaskExecution(execTaskTracker, + mapTaskTracker, + task, taskExecution); /* * If task cannot execute on this task/map tracker, we fail over all @@ -261,8 +255,6 @@ MultiTaskTrackerExecute(Job *job) */ if (taskExecutionStatus == EXEC_TASK_TRACKER_FAILED) { - List *taskList = NIL; - /* mark task tracker as failed, in case it isn't marked already */ execTaskTracker->trackerFailureCount = MAX_TRACKER_FAILURE_COUNT; @@ -275,22 +267,20 @@ MultiTaskTrackerExecute(Job *job) task, taskExecution); transmitTracker->trackerFailureCount = MAX_TRACKER_FAILURE_COUNT; - taskList = ConstrainedTaskList(taskAndExecutionList, task); + List *taskList = ConstrainedTaskList(taskAndExecutionList, task); ReassignTaskList(taskList); } else if (taskExecutionStatus == EXEC_SOURCE_TASK_TRACKER_FAILED) { - List *mapFetchTaskList = NIL; - List *mapTaskList = NIL; - /* first resolve the map task this map fetch task depends on */ Task *mapTask = (Task *) linitial(task->dependedTaskList); Assert(task->taskType == MAP_OUTPUT_FETCH_TASK); - mapFetchTaskList = UpstreamDependencyList(taskAndExecutionList, mapTask); + List *mapFetchTaskList = UpstreamDependencyList(taskAndExecutionList, + mapTask); ReassignMapFetchTaskList(mapFetchTaskList); - mapTaskList = ConstrainedTaskList(taskAndExecutionList, mapTask); + List *mapTaskList = ConstrainedTaskList(taskAndExecutionList, mapTask); ReassignTaskList(mapTaskList); } @@ -313,10 +303,7 @@ MultiTaskTrackerExecute(Job *job) { Task *task = (Task *) lfirst(taskAndExecutionCell); TaskExecution *taskExecution = task->taskExecution; - TransmitExecStatus transmitExecutionStatus = 0; - TaskTracker *execTransmitTracker = NULL; - bool transmitCompleted = false; /* * We find the tasks that appear in the top level of the query tree, @@ -328,14 +315,17 @@ MultiTaskTrackerExecute(Job *job) continue; } - execTransmitTracker = ResolveTaskTracker(transmitTrackerHash, - task, taskExecution); + TaskTracker *execTransmitTracker = ResolveTaskTracker(transmitTrackerHash, + task, taskExecution); Assert(execTransmitTracker != NULL); /* call the function that fetches results for completed SQL tasks */ - transmitExecutionStatus = ManageTransmitExecution(execTransmitTracker, - task, taskExecution, - &executionStats); + TransmitExecStatus transmitExecutionStatus = ManageTransmitExecution( + execTransmitTracker, + task, + taskExecution, + & + executionStats); /* * If we cannot transmit SQL task's results to the master, we first @@ -344,13 +334,11 @@ MultiTaskTrackerExecute(Job *job) */ if (transmitExecutionStatus == EXEC_TRANSMIT_TRACKER_FAILED) { - List *taskList = NIL; - taskTracker = ResolveTaskTracker(taskTrackerHash, task, taskExecution); taskTracker->trackerFailureCount = MAX_TRACKER_FAILURE_COUNT; - taskList = ConstrainedTaskList(taskAndExecutionList, task); + List *taskList = ConstrainedTaskList(taskAndExecutionList, task); ReassignTaskList(taskList); } @@ -362,7 +350,7 @@ MultiTaskTrackerExecute(Job *job) break; } - transmitCompleted = TransmitExecutionCompleted(taskExecution); + bool transmitCompleted = TransmitExecutionCompleted(taskExecution); if (transmitCompleted) { completedTransmitCount++; @@ -430,7 +418,7 @@ MultiTaskTrackerExecute(Job *job) */ HOLD_INTERRUPTS(); - jobIdList = JobIdList(job); + List *jobIdList = JobIdList(job); TrackerCleanupResources(taskTrackerHash, transmitTrackerHash, jobIdList, taskAndExecutionList); @@ -470,7 +458,6 @@ static List * TaskAndExecutionList(List *jobTaskList) { List *taskAndExecutionList = NIL; - List *taskQueue = NIL; const int topLevelTaskHashSize = 32; int taskHashSize = list_length(jobTaskList) * topLevelTaskHashSize; HTAB *taskHash = TaskHashCreate(taskHashSize); @@ -479,11 +466,9 @@ TaskAndExecutionList(List *jobTaskList) * We walk over the task tree using breadth-first search. For the search, we * first queue top level tasks in the task tree. */ - taskQueue = list_copy(jobTaskList); + List *taskQueue = list_copy(jobTaskList); while (taskQueue != NIL) { - TaskExecution *taskExecution = NULL; - List *dependendTaskList = NIL; ListCell *dependedTaskCell = NULL; /* pop first element from the task queue */ @@ -491,12 +476,12 @@ TaskAndExecutionList(List *jobTaskList) taskQueue = list_delete_first(taskQueue); /* create task execution and associate it with task */ - taskExecution = InitTaskExecution(task, EXEC_TASK_UNASSIGNED); + TaskExecution *taskExecution = InitTaskExecution(task, EXEC_TASK_UNASSIGNED); task->taskExecution = taskExecution; taskAndExecutionList = lappend(taskAndExecutionList, task); - dependendTaskList = task->dependedTaskList; + List *dependendTaskList = task->dependedTaskList; /* * Push task node's children into the task queue, if and only if @@ -552,8 +537,6 @@ TaskHashCreate(uint32 taskHashSize) { HASHCTL info; const char *taskHashName = "Task Hash"; - int hashFlags = 0; - HTAB *taskHash = NULL; /* * Can't create a hashtable of size 0. Normally that shouldn't happen, but @@ -569,9 +552,9 @@ TaskHashCreate(uint32 taskHashSize) info.entrysize = sizeof(TaskMapEntry); info.hash = tag_hash; info.hcxt = CurrentMemoryContext; - hashFlags = (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); + int hashFlags = (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); - taskHash = hash_create(taskHashName, taskHashSize, &info, hashFlags); + HTAB *taskHash = hash_create(taskHashName, taskHashSize, &info, hashFlags); return taskHash; } @@ -584,8 +567,6 @@ TaskHashCreate(uint32 taskHashSize) static Task * TaskHashEnter(HTAB *taskHash, Task *task) { - void *hashKey = NULL; - TaskMapEntry *taskInTheHash = NULL; bool handleFound = false; TaskMapKey taskKey; @@ -595,9 +576,10 @@ TaskHashEnter(HTAB *taskHash, Task *task) taskKey.jobId = task->jobId; taskKey.taskId = task->taskId; - hashKey = (void *) &taskKey; - taskInTheHash = (TaskMapEntry *) hash_search(taskHash, hashKey, HASH_ENTER, - &handleFound); + void *hashKey = (void *) &taskKey; + TaskMapEntry *taskInTheHash = (TaskMapEntry *) hash_search(taskHash, hashKey, + HASH_ENTER, + &handleFound); /* if same node appears twice, we error-out */ if (handleFound) @@ -620,9 +602,7 @@ TaskHashEnter(HTAB *taskHash, Task *task) static Task * TaskHashLookup(HTAB *taskHash, TaskType taskType, uint64 jobId, uint32 taskId) { - TaskMapEntry *taskEntry = NULL; Task *task = NULL; - void *hashKey = NULL; bool handleFound = false; TaskMapKey taskKey; @@ -632,8 +612,9 @@ TaskHashLookup(HTAB *taskHash, TaskType taskType, uint64 jobId, uint32 taskId) taskKey.jobId = jobId; taskKey.taskId = taskId; - hashKey = (void *) &taskKey; - taskEntry = (TaskMapEntry *) hash_search(taskHash, hashKey, HASH_FIND, &handleFound); + void *hashKey = (void *) &taskKey; + TaskMapEntry *taskEntry = (TaskMapEntry *) hash_search(taskHash, hashKey, HASH_FIND, + &handleFound); if (taskEntry != NULL) { @@ -672,9 +653,8 @@ static bool TransmitExecutionCompleted(TaskExecution *taskExecution) { bool completed = false; - uint32 nodeIndex = 0; - for (nodeIndex = 0; nodeIndex < taskExecution->nodeCount; nodeIndex++) + for (uint32 nodeIndex = 0; nodeIndex < taskExecution->nodeCount; nodeIndex++) { TransmitExecStatus *transmitStatusArray = taskExecution->transmitStatusArray; @@ -710,15 +690,12 @@ TrackerHash(const char *taskTrackerHashName, List *workerNodeList, char *userNam char *nodeName = workerNode->workerName; uint32 nodePort = workerNode->workerPort; - TaskTracker *taskTracker = NULL; char taskStateHashName[MAXPGPATH]; - HTAB *taskStateHash = NULL; uint32 taskStateCount = 32; - int hashFlags = 0; HASHCTL info; /* insert task tracker into the tracker hash */ - taskTracker = TrackerHashEnter(taskTrackerHash, nodeName, nodePort); + TaskTracker *taskTracker = TrackerHashEnter(taskTrackerHash, nodeName, nodePort); /* for each task tracker, create hash to track its assigned tasks */ snprintf(taskStateHashName, MAXPGPATH, @@ -729,9 +706,10 @@ TrackerHash(const char *taskTrackerHashName, List *workerNodeList, char *userNam info.entrysize = sizeof(TrackerTaskState); info.hash = tag_hash; info.hcxt = CurrentMemoryContext; - hashFlags = (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); + int hashFlags = (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); - taskStateHash = hash_create(taskStateHashName, taskStateCount, &info, hashFlags); + HTAB *taskStateHash = hash_create(taskStateHashName, taskStateCount, &info, + hashFlags); if (taskStateHash == NULL) { ereport(FATAL, (errcode(ERRCODE_OUT_OF_MEMORY), @@ -754,18 +732,16 @@ static HTAB * TrackerHashCreate(const char *taskTrackerHashName, uint32 taskTrackerHashSize) { HASHCTL info; - int hashFlags = 0; - HTAB *taskTrackerHash = NULL; memset(&info, 0, sizeof(info)); info.keysize = WORKER_LENGTH + sizeof(uint32); info.entrysize = sizeof(TaskTracker); info.hash = tag_hash; info.hcxt = CurrentMemoryContext; - hashFlags = (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); + int hashFlags = (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); - taskTrackerHash = hash_create(taskTrackerHashName, taskTrackerHashSize, - &info, hashFlags); + HTAB *taskTrackerHash = hash_create(taskTrackerHashName, taskTrackerHashSize, + &info, hashFlags); if (taskTrackerHash == NULL) { ereport(FATAL, (errcode(ERRCODE_OUT_OF_MEMORY), @@ -784,8 +760,6 @@ TrackerHashCreate(const char *taskTrackerHashName, uint32 taskTrackerHashSize) static TaskTracker * TrackerHashEnter(HTAB *taskTrackerHash, char *nodeName, uint32 nodePort) { - TaskTracker *taskTracker = NULL; - void *hashKey = NULL; bool handleFound = false; TaskTracker taskTrackerKey; @@ -793,9 +767,9 @@ TrackerHashEnter(HTAB *taskTrackerHash, char *nodeName, uint32 nodePort) strlcpy(taskTrackerKey.workerName, nodeName, WORKER_LENGTH); taskTrackerKey.workerPort = nodePort; - hashKey = (void *) &taskTrackerKey; - taskTracker = (TaskTracker *) hash_search(taskTrackerHash, hashKey, - HASH_ENTER, &handleFound); + void *hashKey = (void *) &taskTrackerKey; + TaskTracker *taskTracker = (TaskTracker *) hash_search(taskTrackerHash, hashKey, + HASH_ENTER, &handleFound); /* if same node appears twice, we overwrite previous entry */ if (handleFound) @@ -829,15 +803,13 @@ TrackerHashConnect(HTAB *taskTrackerHash) /* loop until we tried to connect to all task trackers */ while (triedTrackerCount < taskTrackerCount) { - TaskTracker *taskTracker = NULL; HASH_SEQ_STATUS status; - long sleepIntervalPerCycle = 0; /* loop over the task tracker hash, and poll all trackers again */ triedTrackerCount = 0; hash_seq_init(&status, taskTrackerHash); - taskTracker = (TaskTracker *) hash_seq_search(&status); + TaskTracker *taskTracker = (TaskTracker *) hash_seq_search(&status); while (taskTracker != NULL) { TrackerStatus trackerStatus = TrackerConnectPoll(taskTracker); @@ -851,7 +823,7 @@ TrackerHashConnect(HTAB *taskTrackerHash) } /* sleep to avoid tight loop */ - sleepIntervalPerCycle = RemoteTaskCheckInterval * 1000L; + long sleepIntervalPerCycle = RemoteTaskCheckInterval * 1000L; pg_usleep(sleepIntervalPerCycle); } } @@ -988,10 +960,6 @@ ResolveTaskTracker(HTAB *trackerHash, Task *task, TaskExecution *taskExecution) static TaskTracker * ResolveMapTaskTracker(HTAB *trackerHash, Task *task, TaskExecution *taskExecution) { - TaskTracker *mapTaskTracker = NULL; - Task *mapTask = NULL; - TaskExecution *mapTaskExecution = NULL; - /* we only resolve source (map) task tracker for map output fetch tasks */ if (task->taskType != MAP_OUTPUT_FETCH_TASK) { @@ -999,10 +967,11 @@ ResolveMapTaskTracker(HTAB *trackerHash, Task *task, TaskExecution *taskExecutio } Assert(task->dependedTaskList != NIL); - mapTask = (Task *) linitial(task->dependedTaskList); - mapTaskExecution = mapTask->taskExecution; + Task *mapTask = (Task *) linitial(task->dependedTaskList); + TaskExecution *mapTaskExecution = mapTask->taskExecution; - mapTaskTracker = ResolveTaskTracker(trackerHash, mapTask, mapTaskExecution); + TaskTracker *mapTaskTracker = ResolveTaskTracker(trackerHash, mapTask, + mapTaskExecution); Assert(mapTaskTracker != NULL); return mapTaskTracker; @@ -1016,8 +985,6 @@ ResolveMapTaskTracker(HTAB *trackerHash, Task *task, TaskExecution *taskExecutio static TaskTracker * TrackerHashLookup(HTAB *trackerHash, const char *nodeName, uint32 nodePort) { - TaskTracker *taskTracker = NULL; - void *hashKey = NULL; bool handleFound = false; TaskTracker taskTrackerKey; @@ -1025,9 +992,9 @@ TrackerHashLookup(HTAB *trackerHash, const char *nodeName, uint32 nodePort) strlcpy(taskTrackerKey.workerName, nodeName, WORKER_LENGTH); taskTrackerKey.workerPort = nodePort; - hashKey = (void *) &taskTrackerKey; - taskTracker = (TaskTracker *) hash_search(trackerHash, hashKey, - HASH_FIND, &handleFound); + void *hashKey = (void *) &taskTrackerKey; + TaskTracker *taskTracker = (TaskTracker *) hash_search(trackerHash, hashKey, + HASH_FIND, &handleFound); if (taskTracker == NULL || !handleFound) { ereport(ERROR, (errmsg("could not find task tracker for node \"%s:%u\"", @@ -1056,7 +1023,6 @@ ManageTaskExecution(TaskTracker *taskTracker, TaskTracker *sourceTaskTracker, { TaskExecStatus *taskStatusArray = taskExecution->taskStatusArray; uint32 currentNodeIndex = taskExecution->currentNodeIndex; - uint32 nextNodeIndex = 0; TaskExecStatus currentExecutionStatus = taskStatusArray[currentNodeIndex]; TaskExecStatus nextExecutionStatus = EXEC_TASK_INVALID_FIRST; @@ -1065,9 +1031,6 @@ ManageTaskExecution(TaskTracker *taskTracker, TaskTracker *sourceTaskTracker, { case EXEC_TASK_UNASSIGNED: { - bool taskExecutionsCompleted = true; - TaskType taskType = TASK_TYPE_INVALID_FIRST; - bool trackerHealthy = TrackerHealthy(taskTracker); if (!trackerHealthy) { @@ -1079,7 +1042,8 @@ ManageTaskExecution(TaskTracker *taskTracker, TaskTracker *sourceTaskTracker, * We first retrieve this task's downstream dependencies, and then check * if these dependencies' executions have completed. */ - taskExecutionsCompleted = TaskExecutionsCompleted(task->dependedTaskList); + bool taskExecutionsCompleted = TaskExecutionsCompleted( + task->dependedTaskList); if (!taskExecutionsCompleted) { nextExecutionStatus = EXEC_TASK_UNASSIGNED; @@ -1087,14 +1051,14 @@ ManageTaskExecution(TaskTracker *taskTracker, TaskTracker *sourceTaskTracker, } /* if map fetch task, create query string from completed map task */ - taskType = task->taskType; + TaskType taskType = task->taskType; if (taskType == MAP_OUTPUT_FETCH_TASK) { - StringInfo mapFetchTaskQueryString = NULL; Task *mapTask = (Task *) linitial(task->dependedTaskList); TaskExecution *mapTaskExecution = mapTask->taskExecution; - mapFetchTaskQueryString = MapFetchTaskQueryString(task, mapTask); + StringInfo mapFetchTaskQueryString = MapFetchTaskQueryString(task, + mapTask); task->queryString = mapFetchTaskQueryString->data; taskExecution->querySourceNodeIndex = mapTaskExecution->currentNodeIndex; } @@ -1118,8 +1082,6 @@ ManageTaskExecution(TaskTracker *taskTracker, TaskTracker *sourceTaskTracker, case EXEC_TASK_QUEUED: { - TaskStatus remoteTaskStatus = TASK_STATUS_INVALID_FIRST; - bool trackerHealthy = TrackerHealthy(taskTracker); if (!trackerHealthy) { @@ -1127,7 +1089,7 @@ ManageTaskExecution(TaskTracker *taskTracker, TaskTracker *sourceTaskTracker, break; } - remoteTaskStatus = TrackerTaskStatus(taskTracker, task); + TaskStatus remoteTaskStatus = TrackerTaskStatus(taskTracker, task); if (remoteTaskStatus == TASK_SUCCEEDED) { nextExecutionStatus = EXEC_TASK_DONE; @@ -1165,22 +1127,19 @@ ManageTaskExecution(TaskTracker *taskTracker, TaskTracker *sourceTaskTracker, case EXEC_TASK_TRACKER_RETRY: { - bool trackerHealthy = false; - bool trackerConnectionUp = false; - /* * This case statement usually handles connection related issues. Some * edge cases however, like a user sending a SIGTERM to the worker node, * keep the connection open but disallow task assignments. We therefore * need to track those as intermittent tracker failures here. */ - trackerConnectionUp = TrackerConnectionUp(taskTracker); + bool trackerConnectionUp = TrackerConnectionUp(taskTracker); if (trackerConnectionUp) { taskTracker->trackerFailureCount++; } - trackerHealthy = TrackerHealthy(taskTracker); + bool trackerHealthy = TrackerHealthy(taskTracker); if (trackerHealthy) { TaskStatus remoteTaskStatus = TrackerTaskStatus(taskTracker, task); @@ -1207,7 +1166,6 @@ ManageTaskExecution(TaskTracker *taskTracker, TaskTracker *sourceTaskTracker, TaskExecution *mapTaskExecution = mapTask->taskExecution; uint32 sourceNodeIndex = mapTaskExecution->currentNodeIndex; - bool sourceTrackerHealthy = false; Assert(sourceTaskTracker != NULL); Assert(task->taskType == MAP_OUTPUT_FETCH_TASK); @@ -1227,7 +1185,7 @@ ManageTaskExecution(TaskTracker *taskTracker, TaskTracker *sourceTaskTracker, } } - sourceTrackerHealthy = TrackerHealthy(sourceTaskTracker); + bool sourceTrackerHealthy = TrackerHealthy(sourceTaskTracker); if (sourceTrackerHealthy) { /* @@ -1274,7 +1232,7 @@ ManageTaskExecution(TaskTracker *taskTracker, TaskTracker *sourceTaskTracker, } /* update task execution's status for most recent task tracker */ - nextNodeIndex = taskExecution->currentNodeIndex; + uint32 nextNodeIndex = taskExecution->currentNodeIndex; taskStatusArray[nextNodeIndex] = nextExecutionStatus; return nextExecutionStatus; @@ -1295,7 +1253,6 @@ ManageTransmitExecution(TaskTracker *transmitTracker, { int32 *fileDescriptorArray = taskExecution->fileDescriptorArray; uint32 currentNodeIndex = taskExecution->currentNodeIndex; - uint32 nextNodeIndex = 0; TransmitExecStatus *transmitStatusArray = taskExecution->transmitStatusArray; TransmitExecStatus currentTransmitStatus = transmitStatusArray[currentNodeIndex]; @@ -1308,7 +1265,6 @@ ManageTransmitExecution(TaskTracker *transmitTracker, { TaskExecStatus *taskStatusArray = taskExecution->taskStatusArray; TaskExecStatus currentExecutionStatus = taskStatusArray[currentNodeIndex]; - bool trackerHealthy = false; /* if top level task's in progress, nothing to do */ if (currentExecutionStatus != EXEC_TASK_DONE) @@ -1317,7 +1273,7 @@ ManageTransmitExecution(TaskTracker *transmitTracker, break; } - trackerHealthy = TrackerHealthy(transmitTracker); + bool trackerHealthy = TrackerHealthy(transmitTracker); if (!trackerHealthy) { nextTransmitStatus = EXEC_TRANSMIT_TRACKER_FAILED; @@ -1331,10 +1287,6 @@ ManageTransmitExecution(TaskTracker *transmitTracker, case EXEC_TRANSMIT_QUEUED: { - QueryStatus queryStatus = CLIENT_INVALID_QUERY; - int32 connectionId = INVALID_CONNECTION_ID; - TaskStatus taskStatus = TASK_STATUS_INVALID_FIRST; - bool trackerHealthy = TrackerHealthy(transmitTracker); if (!trackerHealthy) { @@ -1342,7 +1294,7 @@ ManageTransmitExecution(TaskTracker *transmitTracker, break; } - taskStatus = TrackerTaskStatus(transmitTracker, task); + TaskStatus taskStatus = TrackerTaskStatus(transmitTracker, task); if (taskStatus == TASK_FILE_TRANSMIT_QUEUED) { /* remain in queued status until tracker assigns this task */ @@ -1356,12 +1308,12 @@ ManageTransmitExecution(TaskTracker *transmitTracker, } /* the open connection belongs to this task */ - connectionId = TransmitTrackerConnectionId(transmitTracker, task); + int32 connectionId = TransmitTrackerConnectionId(transmitTracker, task); Assert(connectionId != INVALID_CONNECTION_ID); Assert(taskStatus == TASK_ASSIGNED); /* start copy protocol */ - queryStatus = MultiClientQueryStatus(connectionId); + QueryStatus queryStatus = MultiClientQueryStatus(connectionId); if (queryStatus == CLIENT_QUERY_COPY) { StringInfo jobDirectoryName = MasterJobDirectoryName(task->jobId); @@ -1413,7 +1365,6 @@ ManageTransmitExecution(TaskTracker *transmitTracker, case EXEC_TRANSMIT_COPYING: { int32 fileDescriptor = fileDescriptorArray[currentNodeIndex]; - CopyStatus copyStatus = CLIENT_INVALID_COPY; int closed = -1; uint64 bytesReceived = 0; @@ -1421,8 +1372,8 @@ ManageTransmitExecution(TaskTracker *transmitTracker, int32 connectionId = TransmitTrackerConnectionId(transmitTracker, task); Assert(connectionId != INVALID_CONNECTION_ID); - copyStatus = MultiClientCopyData(connectionId, fileDescriptor, - &bytesReceived); + CopyStatus copyStatus = MultiClientCopyData(connectionId, fileDescriptor, + &bytesReceived); if (SubPlanLevel > 0) { @@ -1481,21 +1432,18 @@ ManageTransmitExecution(TaskTracker *transmitTracker, case EXEC_TRANSMIT_TRACKER_RETRY: { - bool trackerHealthy = false; - bool trackerConnectionUp = false; - /* * The task tracker proxy handles connection errors. On the off chance * that our connection is still up and the transmit tracker misbehaved, * we capture this as an intermittent tracker failure. */ - trackerConnectionUp = TrackerConnectionUp(transmitTracker); + bool trackerConnectionUp = TrackerConnectionUp(transmitTracker); if (trackerConnectionUp) { transmitTracker->trackerFailureCount++; } - trackerHealthy = TrackerHealthy(transmitTracker); + bool trackerHealthy = TrackerHealthy(transmitTracker); if (trackerHealthy) { nextTransmitStatus = EXEC_TRANSMIT_UNASSIGNED; @@ -1536,7 +1484,7 @@ ManageTransmitExecution(TaskTracker *transmitTracker, } /* update file transmit status for most recent transmit tracker */ - nextNodeIndex = taskExecution->currentNodeIndex; + uint32 nextNodeIndex = taskExecution->currentNodeIndex; transmitStatusArray[nextNodeIndex] = nextTransmitStatus; return nextTransmitStatus; @@ -1583,7 +1531,6 @@ TaskExecutionsCompleted(List *taskList) static StringInfo MapFetchTaskQueryString(Task *mapFetchTask, Task *mapTask) { - StringInfo mapFetchQueryString = NULL; uint32 partitionFileId = mapFetchTask->partitionId; uint32 mergeTaskId = mapFetchTask->upstreamTaskId; @@ -1599,7 +1546,7 @@ MapFetchTaskQueryString(Task *mapFetchTask, Task *mapTask) Assert(mapFetchTask->taskType == MAP_OUTPUT_FETCH_TASK); Assert(mapTask->taskType == MAP_TASK); - mapFetchQueryString = makeStringInfo(); + StringInfo mapFetchQueryString = makeStringInfo(); appendStringInfo(mapFetchQueryString, MAP_OUTPUT_FETCH_COMMAND, mapTask->jobId, mapTask->taskId, partitionFileId, mergeTaskId, /* fetch results to merge task */ @@ -1619,8 +1566,6 @@ static void TrackerQueueSqlTask(TaskTracker *taskTracker, Task *task) { HTAB *taskStateHash = taskTracker->taskStateHash; - TrackerTaskState *taskState = NULL; - StringInfo taskAssignmentQuery = NULL; /* * We first wrap the original query string in a worker_execute_sql_task @@ -1644,9 +1589,10 @@ TrackerQueueSqlTask(TaskTracker *taskTracker, Task *task) } /* wrap a task assignment query outside the copy out query */ - taskAssignmentQuery = TaskAssignmentQuery(task, sqlTaskQueryString->data); + StringInfo taskAssignmentQuery = TaskAssignmentQuery(task, sqlTaskQueryString->data); - taskState = TaskStateHashEnter(taskStateHash, task->jobId, task->taskId); + TrackerTaskState *taskState = TaskStateHashEnter(taskStateHash, task->jobId, + task->taskId); taskState->status = TASK_CLIENT_SIDE_QUEUED; taskState->taskAssignmentQuery = taskAssignmentQuery; } @@ -1662,13 +1608,12 @@ static void TrackerQueueTask(TaskTracker *taskTracker, Task *task) { HTAB *taskStateHash = taskTracker->taskStateHash; - TrackerTaskState *taskState = NULL; - StringInfo taskAssignmentQuery = NULL; /* wrap a task assignment query outside the original query */ - taskAssignmentQuery = TaskAssignmentQuery(task, task->queryString); + StringInfo taskAssignmentQuery = TaskAssignmentQuery(task, task->queryString); - taskState = TaskStateHashEnter(taskStateHash, task->jobId, task->taskId); + TrackerTaskState *taskState = TaskStateHashEnter(taskStateHash, task->jobId, + task->taskId); taskState->status = TASK_CLIENT_SIDE_QUEUED; taskState->taskAssignmentQuery = taskAssignmentQuery; } @@ -1682,12 +1627,10 @@ TrackerQueueTask(TaskTracker *taskTracker, Task *task) static StringInfo TaskAssignmentQuery(Task *task, char *queryString) { - StringInfo taskAssignmentQuery = NULL; - /* quote the original query as a string literal */ char *escapedQueryString = quote_literal_cstr(queryString); - taskAssignmentQuery = makeStringInfo(); + StringInfo taskAssignmentQuery = makeStringInfo(); appendStringInfo(taskAssignmentQuery, TASK_ASSIGNMENT_QUERY, task->jobId, task->taskId, escapedQueryString); @@ -1728,17 +1671,16 @@ TrackerTaskStatus(TaskTracker *taskTracker, Task *task) static TrackerTaskState * TrackerTaskStateHashLookup(HTAB *taskStateHash, Task *task) { - TrackerTaskState *taskState = NULL; - void *hashKey = NULL; bool handleFound = false; TrackerTaskState taskStateKey; taskStateKey.jobId = task->jobId; taskStateKey.taskId = task->taskId; - hashKey = (void *) &taskStateKey; - taskState = (TrackerTaskState *) hash_search(taskStateHash, hashKey, - HASH_FIND, &handleFound); + void *hashKey = (void *) &taskStateKey; + TrackerTaskState *taskState = (TrackerTaskState *) hash_search(taskStateHash, hashKey, + HASH_FIND, + &handleFound); return taskState; } @@ -1768,9 +1710,9 @@ static void TrackerQueueFileTransmit(TaskTracker *transmitTracker, Task *task) { HTAB *transmitStateHash = transmitTracker->taskStateHash; - TrackerTaskState *transmitState = NULL; - transmitState = TaskStateHashEnter(transmitStateHash, task->jobId, task->taskId); + TrackerTaskState *transmitState = TaskStateHashEnter(transmitStateHash, task->jobId, + task->taskId); transmitState->status = TASK_FILE_TRANSMIT_QUEUED; } @@ -1782,17 +1724,16 @@ TrackerQueueFileTransmit(TaskTracker *transmitTracker, Task *task) static TrackerTaskState * TaskStateHashEnter(HTAB *taskStateHash, uint64 jobId, uint32 taskId) { - TrackerTaskState *taskState = NULL; - void *hashKey = NULL; bool handleFound = false; TrackerTaskState taskStateKey; taskStateKey.jobId = jobId; taskStateKey.taskId = taskId; - hashKey = (void *) &taskStateKey; - taskState = (TrackerTaskState *) hash_search(taskStateHash, hashKey, - HASH_ENTER, &handleFound); + void *hashKey = (void *) &taskStateKey; + TrackerTaskState *taskState = (TrackerTaskState *) hash_search(taskStateHash, hashKey, + HASH_ENTER, + &handleFound); /* if same task queued twice, we overwrite previous entry */ if (handleFound) @@ -1847,17 +1788,14 @@ static List * ConstrainedTaskList(List *taskAndExecutionList, Task *task) { List *constrainedTaskList = NIL; - Task *constrainingTask = NULL; - List *mergeTaskList = NIL; ListCell *mergeTaskCell = NULL; - List *upstreamTaskList = NIL; ListCell *upstreamTaskCell = NULL; /* * We first check if this task depends on any merge tasks. If it does *not*, * the task's dependency list becomes our tiny constraint group. */ - mergeTaskList = ConstrainedMergeTaskList(taskAndExecutionList, task); + List *mergeTaskList = ConstrainedMergeTaskList(taskAndExecutionList, task); if (mergeTaskList == NIL) { constrainedTaskList = ConstrainedNonMergeTaskList(taskAndExecutionList, task); @@ -1882,9 +1820,10 @@ ConstrainedTaskList(List *taskAndExecutionList, Task *task) * we walk over all the tasks. If we want to optimize this later on, we can * precompute a task list that excludes map fetch tasks. */ - constrainingTask = (Task *) linitial(mergeTaskList); + Task *constrainingTask = (Task *) linitial(mergeTaskList); - upstreamTaskList = UpstreamDependencyList(taskAndExecutionList, constrainingTask); + List *upstreamTaskList = UpstreamDependencyList(taskAndExecutionList, + constrainingTask); Assert(upstreamTaskList != NIL); foreach(upstreamTaskCell, upstreamTaskList) @@ -1912,7 +1851,6 @@ ConstrainedTaskList(List *taskAndExecutionList, Task *task) static List * ConstrainedNonMergeTaskList(List *taskAndExecutionList, Task *task) { - List *constrainedTaskList = NIL; Task *upstreamTask = NULL; List *dependedTaskList = NIL; @@ -1924,7 +1862,7 @@ ConstrainedNonMergeTaskList(List *taskAndExecutionList, Task *task) } Assert(upstreamTask != NULL); - constrainedTaskList = list_make1(upstreamTask); + List *constrainedTaskList = list_make1(upstreamTask); constrainedTaskList = list_concat(constrainedTaskList, dependedTaskList); return constrainedTaskList; @@ -2015,7 +1953,6 @@ ConstrainedMergeTaskList(List *taskAndExecutionList, Task *task) } else if (taskType == MERGE_TASK) { - Task *upstreamTask = NULL; List *upstreamTaskList = UpstreamDependencyList(taskAndExecutionList, task); /* @@ -2024,7 +1961,7 @@ ConstrainedMergeTaskList(List *taskAndExecutionList, Task *task) * merge task besides us. */ Assert(upstreamTaskList != NIL); - upstreamTask = (Task *) linitial(upstreamTaskList); + Task *upstreamTask = (Task *) linitial(upstreamTaskList); constrainedMergeTaskList = MergeTaskList(upstreamTask->dependedTaskList); } @@ -2145,16 +2082,13 @@ ReassignMapFetchTaskList(List *mapFetchTaskList) static void ManageTaskTracker(TaskTracker *taskTracker) { - bool trackerConnectionUp = false; - bool trackerHealthy = false; - - trackerHealthy = TrackerHealthy(taskTracker); + bool trackerHealthy = TrackerHealthy(taskTracker); if (!trackerHealthy) { return; } - trackerConnectionUp = TrackerConnectionUp(taskTracker); + bool trackerConnectionUp = TrackerConnectionUp(taskTracker); if (!trackerConnectionUp) { TrackerReconnectPoll(taskTracker); /* try an async reconnect */ @@ -2185,12 +2119,11 @@ ManageTaskTracker(TaskTracker *taskTracker) if (taskStatusBatchList) { int32 connectionId = taskTracker->connectionId; - StringInfo taskStatusBatchQuery = NULL; - bool querySent = false; - taskStatusBatchQuery = TaskStatusBatchQuery(taskStatusBatchList); + StringInfo taskStatusBatchQuery = TaskStatusBatchQuery(taskStatusBatchList); - querySent = MultiClientSendQuery(connectionId, taskStatusBatchQuery->data); + bool querySent = MultiClientSendQuery(connectionId, + taskStatusBatchQuery->data); if (querySent) { taskTracker->connectionBusy = true; @@ -2219,10 +2152,9 @@ ManageTaskTracker(TaskTracker *taskTracker) if (taskTracker->connectionBusy) { int32 connectionId = taskTracker->connectionId; - ResultStatus resultStatus = CLIENT_INVALID_RESULT_STATUS; /* if connection is available, update task status accordingly */ - resultStatus = MultiClientResultStatus(connectionId); + ResultStatus resultStatus = MultiClientResultStatus(connectionId); if (resultStatus == CLIENT_RESULT_READY) { ReceiveTaskStatusBatchQueryResponse(taskTracker); @@ -2323,10 +2255,9 @@ AssignQueuedTasks(TaskTracker *taskTracker) int32 connectionId = taskTracker->connectionId; HASH_SEQ_STATUS status; - TrackerTaskState *taskState = NULL; hash_seq_init(&status, taskStateHash); - taskState = (TrackerTaskState *) hash_seq_search(&status); + TrackerTaskState *taskState = (TrackerTaskState *) hash_seq_search(&status); while (taskState != NULL) { if (taskState->status == TASK_CLIENT_SIDE_QUEUED) @@ -2359,8 +2290,6 @@ AssignQueuedTasks(TaskTracker *taskTracker) foreach(taskCell, tasksToAssignList) { - BatchQueryStatus queryStatus = CLIENT_INVALID_BATCH_QUERY; - taskState = (TrackerTaskState *) lfirst(taskCell); if (!batchSuccess) @@ -2369,8 +2298,10 @@ AssignQueuedTasks(TaskTracker *taskTracker) continue; } - queryStatus = MultiClientBatchResult(connectionId, &queryResult, - &rowCount, &columnCount); + BatchQueryStatus queryStatus = MultiClientBatchResult(connectionId, + &queryResult, + &rowCount, + &columnCount); if (queryStatus == CLIENT_BATCH_QUERY_CONTINUE) { taskState->status = TASK_ASSIGNED; @@ -2410,22 +2341,19 @@ AssignQueuedTasks(TaskTracker *taskTracker) static List * TaskStatusBatchList(TaskTracker *taskTracker) { - int32 assignedTaskCount = 0; int32 assignedTaskIndex = 0; List *assignedTaskList = taskTracker->assignedTaskList; List *taskStatusBatchList = NIL; ListCell *taskCell = NULL; - int32 currentTaskIndex = 0; - int32 lastTaskIndex = 0; - assignedTaskCount = list_length(assignedTaskList); + int32 assignedTaskCount = list_length(assignedTaskList); if (assignedTaskCount == 0) { return NIL; } - lastTaskIndex = (assignedTaskCount - 1); - currentTaskIndex = taskTracker->currentTaskIndex; + int32 lastTaskIndex = (assignedTaskCount - 1); + int32 currentTaskIndex = taskTracker->currentTaskIndex; if (currentTaskIndex >= lastTaskIndex) { currentTaskIndex = -1; @@ -2562,17 +2490,13 @@ ReceiveTaskStatusBatchQueryResponse(TaskTracker *taskTracker) static void ManageTransmitTracker(TaskTracker *transmitTracker) { - TrackerTaskState *transmitState = NULL; - bool trackerHealthy = false; - bool trackerConnectionUp = false; - - trackerHealthy = TrackerHealthy(transmitTracker); + bool trackerHealthy = TrackerHealthy(transmitTracker); if (!trackerHealthy) { return; } - trackerConnectionUp = TrackerConnectionUp(transmitTracker); + bool trackerConnectionUp = TrackerConnectionUp(transmitTracker); if (!trackerConnectionUp) { TrackerReconnectPoll(transmitTracker); /* try an async reconnect */ @@ -2585,10 +2509,10 @@ ManageTransmitTracker(TaskTracker *transmitTracker) return; } - transmitState = NextQueuedFileTransmit(transmitTracker->taskStateHash); + TrackerTaskState *transmitState = NextQueuedFileTransmit( + transmitTracker->taskStateHash); if (transmitState != NULL) { - bool fileTransmitStarted = false; int32 connectionId = transmitTracker->connectionId; StringInfo jobDirectoryName = JobDirectoryName(transmitState->jobId); StringInfo taskFilename = TaskFilename(jobDirectoryName, transmitState->taskId); @@ -2598,7 +2522,8 @@ ManageTransmitTracker(TaskTracker *transmitTracker) appendStringInfo(fileTransmitQuery, TRANSMIT_WITH_USER_COMMAND, taskFilename->data, quote_literal_cstr(userName)); - fileTransmitStarted = MultiClientSendQuery(connectionId, fileTransmitQuery->data); + bool fileTransmitStarted = MultiClientSendQuery(connectionId, + fileTransmitQuery->data); if (fileTransmitStarted) { transmitState->status = TASK_ASSIGNED; @@ -2625,10 +2550,9 @@ static TrackerTaskState * NextQueuedFileTransmit(HTAB *taskStateHash) { HASH_SEQ_STATUS status; - TrackerTaskState *taskState = NULL; hash_seq_init(&status, taskStateHash); - taskState = (TrackerTaskState *) hash_seq_search(&status); + TrackerTaskState *taskState = (TrackerTaskState *) hash_seq_search(&status); while (taskState != NULL) { if (taskState->status == TASK_FILE_TRANSMIT_QUEUED) @@ -2653,17 +2577,15 @@ static List * JobIdList(Job *job) { List *jobIdList = NIL; - List *jobQueue = NIL; /* * We walk over the job tree using breadth-first search. For this, we first * queue the root node, and then start traversing our search space. */ - jobQueue = list_make1(job); + List *jobQueue = list_make1(job); while (jobQueue != NIL) { uint64 *jobIdPointer = (uint64 *) palloc0(sizeof(uint64)); - List *jobChildrenList = NIL; Job *currJob = (Job *) linitial(jobQueue); jobQueue = list_delete_first(jobQueue); @@ -2672,7 +2594,7 @@ JobIdList(Job *job) jobIdList = lappend(jobIdList, jobIdPointer); /* prevent dependedJobList being modified on list_concat() call */ - jobChildrenList = list_copy(currJob->dependedJobList); + List *jobChildrenList = list_copy(currJob->dependedJobList); if (jobChildrenList != NIL) { jobQueue = list_concat(jobQueue, jobChildrenList); @@ -2740,11 +2662,10 @@ TrackerCleanupResources(HTAB *taskTrackerHash, HTAB *transmitTrackerHash, static void TrackerHashWaitActiveRequest(HTAB *taskTrackerHash) { - TaskTracker *taskTracker = NULL; HASH_SEQ_STATUS status; hash_seq_init(&status, taskTrackerHash); - taskTracker = (TaskTracker *) hash_seq_search(&status); + TaskTracker *taskTracker = (TaskTracker *) hash_seq_search(&status); while (taskTracker != NULL) { bool trackerConnectionUp = TrackerConnectionUp(taskTracker); @@ -2774,11 +2695,10 @@ TrackerHashWaitActiveRequest(HTAB *taskTrackerHash) static void TrackerHashCancelActiveRequest(HTAB *taskTrackerHash) { - TaskTracker *taskTracker = NULL; HASH_SEQ_STATUS status; hash_seq_init(&status, taskTrackerHash); - taskTracker = (TaskTracker *) hash_seq_search(&status); + TaskTracker *taskTracker = (TaskTracker *) hash_seq_search(&status); while (taskTracker != NULL) { bool trackerConnectionUp = TrackerConnectionUp(taskTracker); @@ -2801,13 +2721,10 @@ TrackerHashCancelActiveRequest(HTAB *taskTrackerHash) static Task * JobCleanupTask(uint64 jobId) { - Task *jobCleanupTask = NULL; - StringInfo jobCleanupQuery = NULL; - - jobCleanupQuery = makeStringInfo(); + StringInfo jobCleanupQuery = makeStringInfo(); appendStringInfo(jobCleanupQuery, JOB_CLEANUP_QUERY, jobId); - jobCleanupTask = CitusMakeNode(Task); + Task *jobCleanupTask = CitusMakeNode(Task); jobCleanupTask->jobId = jobId; jobCleanupTask->taskId = JOB_CLEANUP_TASK_ID; jobCleanupTask->replicationModel = REPLICATION_MODEL_INVALID; @@ -2828,17 +2745,14 @@ TrackerHashCleanupJob(HTAB *taskTrackerHash, Task *jobCleanupTask) { uint64 jobId = jobCleanupTask->jobId; List *taskTrackerList = NIL; - List *remainingTaskTrackerList = NIL; const long statusCheckInterval = 10000; /* microseconds */ bool timedOut = false; - TimestampTz startTime = 0; - TaskTracker *taskTracker = NULL; HASH_SEQ_STATUS status; hash_seq_init(&status, taskTrackerHash); /* walk over task trackers and try to issue job clean up requests */ - taskTracker = (TaskTracker *) hash_seq_search(&status); + TaskTracker *taskTracker = (TaskTracker *) hash_seq_search(&status); while (taskTracker != NULL) { bool trackerConnectionUp = TrackerConnectionUp(taskTracker); @@ -2849,11 +2763,10 @@ TrackerHashCleanupJob(HTAB *taskTrackerHash, Task *jobCleanupTask) /* if we have a clear connection, send cleanup job */ if (!taskTracker->connectionBusy) { - StringInfo jobCleanupQuery = NULL; - /* assign through task tracker to manage resource utilization */ - jobCleanupQuery = TaskAssignmentQuery(jobCleanupTask, - jobCleanupTask->queryString); + StringInfo jobCleanupQuery = TaskAssignmentQuery(jobCleanupTask, + jobCleanupTask-> + queryString); jobCleanupQuerySent = MultiClientSendQuery(taskTracker->connectionId, jobCleanupQuery->data); @@ -2883,7 +2796,7 @@ TrackerHashCleanupJob(HTAB *taskTrackerHash, Task *jobCleanupTask) } /* record the time when we start waiting for cleanup jobs to be sent */ - startTime = GetCurrentTimestamp(); + TimestampTz startTime = GetCurrentTimestamp(); /* * Walk over task trackers to which we sent clean up requests. Perform @@ -2893,33 +2806,27 @@ TrackerHashCleanupJob(HTAB *taskTrackerHash, Task *jobCleanupTask) * we iterate one more time after time out occurs. This is necessary to report * warning messages for timed out cleanup jobs. */ - remainingTaskTrackerList = taskTrackerList; + List *remainingTaskTrackerList = taskTrackerList; while (list_length(remainingTaskTrackerList) > 0 && !timedOut) { List *activeTackTrackerList = remainingTaskTrackerList; ListCell *activeTaskTrackerCell = NULL; - TimestampTz currentTime = 0; remainingTaskTrackerList = NIL; pg_usleep(statusCheckInterval); - currentTime = GetCurrentTimestamp(); + TimestampTz currentTime = GetCurrentTimestamp(); timedOut = TimestampDifferenceExceeds(startTime, currentTime, NodeConnectionTimeout); foreach(activeTaskTrackerCell, activeTackTrackerList) { - int32 connectionId = 0; - char *nodeName = NULL; - uint32 nodePort = 0; - ResultStatus resultStatus = CLIENT_INVALID_RESULT_STATUS; - taskTracker = (TaskTracker *) lfirst(activeTaskTrackerCell); - connectionId = taskTracker->connectionId; - nodeName = taskTracker->workerName; - nodePort = taskTracker->workerPort; + int32 connectionId = taskTracker->connectionId; + char *nodeName = taskTracker->workerName; + uint32 nodePort = taskTracker->workerPort; - resultStatus = MultiClientResultStatus(connectionId); + ResultStatus resultStatus = MultiClientResultStatus(connectionId); if (resultStatus == CLIENT_RESULT_READY) { QueryStatus queryStatus = MultiClientQueryStatus(connectionId); @@ -2976,11 +2883,10 @@ TrackerHashCleanupJob(HTAB *taskTrackerHash, Task *jobCleanupTask) static void TrackerHashDisconnect(HTAB *taskTrackerHash) { - TaskTracker *taskTracker = NULL; HASH_SEQ_STATUS status; hash_seq_init(&status, taskTrackerHash); - taskTracker = (TaskTracker *) hash_seq_search(&status); + TaskTracker *taskTracker = (TaskTracker *) hash_seq_search(&status); while (taskTracker != NULL) { if (taskTracker->connectionId != INVALID_CONNECTION_ID) @@ -3004,7 +2910,6 @@ TupleTableSlot * TaskTrackerExecScan(CustomScanState *node) { CitusScanState *scanState = (CitusScanState *) node; - TupleTableSlot *resultSlot = NULL; if (!scanState->finishedRemoteScan) { @@ -3032,7 +2937,7 @@ TaskTrackerExecScan(CustomScanState *node) scanState->finishedRemoteScan = true; } - resultSlot = ReturnTupleFromTuplestore(scanState); + TupleTableSlot *resultSlot = ReturnTupleFromTuplestore(scanState); return resultSlot; } diff --git a/src/backend/distributed/executor/placement_access.c b/src/backend/distributed/executor/placement_access.c index 21e23f464..c3cab3f01 100644 --- a/src/backend/distributed/executor/placement_access.c +++ b/src/backend/distributed/executor/placement_access.c @@ -128,16 +128,16 @@ BuildPlacementAccessList(int32 groupId, List *relationShardList, foreach(relationShardCell, relationShardList) { RelationShard *relationShard = (RelationShard *) lfirst(relationShardCell); - ShardPlacement *placement = NULL; - ShardPlacementAccess *placementAccess = NULL; - placement = FindShardPlacementOnGroup(groupId, relationShard->shardId); + ShardPlacement *placement = FindShardPlacementOnGroup(groupId, + relationShard->shardId); if (placement == NULL) { continue; } - placementAccess = CreatePlacementAccess(placement, accessType); + ShardPlacementAccess *placementAccess = CreatePlacementAccess(placement, + accessType); placementAccessList = lappend(placementAccessList, placementAccess); } @@ -152,9 +152,8 @@ BuildPlacementAccessList(int32 groupId, List *relationShardList, ShardPlacementAccess * CreatePlacementAccess(ShardPlacement *placement, ShardPlacementAccessType accessType) { - ShardPlacementAccess *placementAccess = NULL; - - placementAccess = (ShardPlacementAccess *) palloc0(sizeof(ShardPlacementAccess)); + ShardPlacementAccess *placementAccess = (ShardPlacementAccess *) palloc0( + sizeof(ShardPlacementAccess)); placementAccess->placement = placement; placementAccess->accessType = accessType; diff --git a/src/backend/distributed/executor/subplan_execution.c b/src/backend/distributed/executor/subplan_execution.c index 6f3059d10..cb687e299 100644 --- a/src/backend/distributed/executor/subplan_execution.c +++ b/src/backend/distributed/executor/subplan_execution.c @@ -36,7 +36,6 @@ ExecuteSubPlans(DistributedPlan *distributedPlan) uint64 planId = distributedPlan->planId; List *subPlanList = distributedPlan->subPlanList; ListCell *subPlanCell = NULL; - HTAB *intermediateResultsHash = NULL; if (subPlanList == NIL) { @@ -44,7 +43,7 @@ ExecuteSubPlans(DistributedPlan *distributedPlan) return; } - intermediateResultsHash = MakeIntermediateResultHTAB(); + HTAB *intermediateResultsHash = MakeIntermediateResultHTAB(); RecordSubplanExecutionsOnNodes(intermediateResultsHash, distributedPlan); @@ -61,9 +60,7 @@ ExecuteSubPlans(DistributedPlan *distributedPlan) DistributedSubPlan *subPlan = (DistributedSubPlan *) lfirst(subPlanCell); PlannedStmt *plannedStmt = subPlan->plan; uint32 subPlanId = subPlan->subPlanId; - DestReceiver *copyDest = NULL; ParamListInfo params = NULL; - EState *estate = NULL; bool writeLocalFile = false; char *resultId = GenerateResultId(planId, subPlanId); List *workerNodeList = @@ -94,9 +91,10 @@ ExecuteSubPlans(DistributedPlan *distributedPlan) } SubPlanLevel++; - estate = CreateExecutorState(); - copyDest = CreateRemoteFileDestReceiver(resultId, estate, workerNodeList, - writeLocalFile); + EState *estate = CreateExecutorState(); + DestReceiver *copyDest = CreateRemoteFileDestReceiver(resultId, estate, + workerNodeList, + writeLocalFile); ExecutePlanIntoDestReceiver(plannedStmt, params, copyDest); diff --git a/src/backend/distributed/master/citus_create_restore_point.c b/src/backend/distributed/master/citus_create_restore_point.c index f7d642556..babf84624 100644 --- a/src/backend/distributed/master/citus_create_restore_point.c +++ b/src/backend/distributed/master/citus_create_restore_point.c @@ -49,9 +49,6 @@ Datum citus_create_restore_point(PG_FUNCTION_ARGS) { text *restoreNameText = PG_GETARG_TEXT_P(0); - char *restoreNameString = NULL; - XLogRecPtr localRestorePoint = InvalidXLogRecPtr; - List *connectionList = NIL; CheckCitusVersion(ERROR); EnsureSuperUser(); @@ -74,7 +71,7 @@ citus_create_restore_point(PG_FUNCTION_ARGS) "start."))); } - restoreNameString = text_to_cstring(restoreNameText); + char *restoreNameString = text_to_cstring(restoreNameText); if (strlen(restoreNameString) >= MAXFNAMELEN) { ereport(ERROR, @@ -87,7 +84,7 @@ citus_create_restore_point(PG_FUNCTION_ARGS) * establish connections to all nodes before taking any locks * ShareLock prevents new nodes being added, rendering connectionList incomplete */ - connectionList = OpenConnectionsToAllWorkerNodes(ShareLock); + List *connectionList = OpenConnectionsToAllWorkerNodes(ShareLock); /* * Send a BEGIN to bust through pgbouncer. We won't actually commit since @@ -100,7 +97,7 @@ citus_create_restore_point(PG_FUNCTION_ARGS) BlockDistributedTransactions(); /* do local restore point first to bail out early if something goes wrong */ - localRestorePoint = XLogRestorePoint(restoreNameString); + XLogRecPtr localRestorePoint = XLogRestorePoint(restoreNameString); /* run pg_create_restore_point on all nodes */ CreateRemoteRestorePoints(restoreNameString, connectionList); @@ -117,19 +114,18 @@ static List * OpenConnectionsToAllWorkerNodes(LOCKMODE lockMode) { List *connectionList = NIL; - List *workerNodeList = NIL; ListCell *workerNodeCell = NULL; int connectionFlags = FORCE_NEW_CONNECTION; - workerNodeList = ActivePrimaryWorkerNodeList(lockMode); + List *workerNodeList = ActivePrimaryWorkerNodeList(lockMode); foreach(workerNodeCell, workerNodeList) { WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell); - MultiConnection *connection = NULL; - connection = StartNodeConnection(connectionFlags, workerNode->workerName, - workerNode->workerPort); + MultiConnection *connection = StartNodeConnection(connectionFlags, + workerNode->workerName, + workerNode->workerPort); MarkRemoteTransactionCritical(connection); connectionList = lappend(connectionList, connection); diff --git a/src/backend/distributed/master/master_citus_tools.c b/src/backend/distributed/master/master_citus_tools.c index 06814c58d..83829547d 100644 --- a/src/backend/distributed/master/master_citus_tools.c +++ b/src/backend/distributed/master/master_citus_tools.c @@ -72,18 +72,10 @@ Datum master_run_on_worker(PG_FUNCTION_ARGS) { ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - MemoryContext per_query_ctx = NULL; - MemoryContext oldcontext = NULL; - TupleDesc tupleDescriptor = NULL; - Tuplestorestate *tupleStore = NULL; bool parallelExecution = false; StringInfo *nodeNameArray = NULL; int *nodePortArray = NULL; StringInfo *commandStringArray = NULL; - bool *statusArray = NULL; - StringInfo *resultArray = NULL; - int commandIndex = 0; - int commandCount = 0; CheckCitusVersion(ERROR); @@ -96,14 +88,14 @@ master_run_on_worker(PG_FUNCTION_ARGS) "allowed in this context"))); } - commandCount = ParseCommandParameters(fcinfo, &nodeNameArray, &nodePortArray, - &commandStringArray, ¶llelExecution); + int commandCount = ParseCommandParameters(fcinfo, &nodeNameArray, &nodePortArray, + &commandStringArray, ¶llelExecution); - per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; - oldcontext = MemoryContextSwitchTo(per_query_ctx); + MemoryContext per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; + MemoryContext oldcontext = MemoryContextSwitchTo(per_query_ctx); /* get the requested return tuple description */ - tupleDescriptor = CreateTupleDescCopy(rsinfo->expectedDesc); + TupleDesc tupleDescriptor = CreateTupleDescCopy(rsinfo->expectedDesc); /* * Check to make sure we have correct tuple descriptor @@ -121,9 +113,9 @@ master_run_on_worker(PG_FUNCTION_ARGS) } /* prepare storage for status and result values */ - statusArray = palloc0(commandCount * sizeof(bool)); - resultArray = palloc0(commandCount * sizeof(StringInfo)); - for (commandIndex = 0; commandIndex < commandCount; commandIndex++) + bool *statusArray = palloc0(commandCount * sizeof(bool)); + StringInfo *resultArray = palloc0(commandCount * sizeof(StringInfo)); + for (int commandIndex = 0; commandIndex < commandCount; commandIndex++) { resultArray[commandIndex] = makeStringInfo(); } @@ -142,9 +134,10 @@ master_run_on_worker(PG_FUNCTION_ARGS) /* let the caller know we're sending back a tuplestore */ rsinfo->returnMode = SFRM_Materialize; - tupleStore = CreateTupleStore(tupleDescriptor, - nodeNameArray, nodePortArray, statusArray, - resultArray, commandCount); + Tuplestorestate *tupleStore = CreateTupleStore(tupleDescriptor, + nodeNameArray, nodePortArray, + statusArray, + resultArray, commandCount); rsinfo->setResult = tupleStore; rsinfo->setDesc = tupleDescriptor; @@ -170,10 +163,6 @@ ParseCommandParameters(FunctionCallInfo fcinfo, StringInfo **nodeNameArray, Datum *nodeNameDatumArray = DeconstructArrayObject(nodeNameArrayObject); Datum *nodePortDatumArray = DeconstructArrayObject(nodePortArrayObject); Datum *commandStringDatumArray = DeconstructArrayObject(commandStringArrayObject); - int index = 0; - StringInfo *nodeNames = NULL; - int *nodePorts = NULL; - StringInfo *commandStrings = NULL; if (nodeNameCount != nodePortCount || nodeNameCount != commandStringCount) { @@ -182,11 +171,11 @@ ParseCommandParameters(FunctionCallInfo fcinfo, StringInfo **nodeNameArray, errmsg("expected same number of node name, port, and query string"))); } - nodeNames = palloc0(nodeNameCount * sizeof(StringInfo)); - nodePorts = palloc0(nodeNameCount * sizeof(int)); - commandStrings = palloc0(nodeNameCount * sizeof(StringInfo)); + StringInfo *nodeNames = palloc0(nodeNameCount * sizeof(StringInfo)); + int *nodePorts = palloc0(nodeNameCount * sizeof(int)); + StringInfo *commandStrings = palloc0(nodeNameCount * sizeof(StringInfo)); - for (index = 0; index < nodeNameCount; index++) + for (int index = 0; index < nodeNameCount; index++) { text *nodeNameText = DatumGetTextP(nodeNameDatumArray[index]); char *nodeName = text_to_cstring(nodeNameText); @@ -224,13 +213,12 @@ ExecuteCommandsInParallelAndStoreResults(StringInfo *nodeNameArray, int *nodePor bool *statusArray, StringInfo *resultStringArray, int commmandCount) { - int commandIndex = 0; MultiConnection **connectionArray = palloc0(commmandCount * sizeof(MultiConnection *)); int finishedCount = 0; /* start connections asynchronously */ - for (commandIndex = 0; commandIndex < commmandCount; commandIndex++) + for (int commandIndex = 0; commandIndex < commmandCount; commandIndex++) { char *nodeName = nodeNameArray[commandIndex]->data; int nodePort = nodePortArray[commandIndex]; @@ -240,7 +228,7 @@ ExecuteCommandsInParallelAndStoreResults(StringInfo *nodeNameArray, int *nodePor } /* establish connections */ - for (commandIndex = 0; commandIndex < commmandCount; commandIndex++) + for (int commandIndex = 0; commandIndex < commmandCount; commandIndex++) { MultiConnection *connection = connectionArray[commandIndex]; StringInfo queryResultString = resultStringArray[commandIndex]; @@ -264,9 +252,8 @@ ExecuteCommandsInParallelAndStoreResults(StringInfo *nodeNameArray, int *nodePor } /* send queries at once */ - for (commandIndex = 0; commandIndex < commmandCount; commandIndex++) + for (int commandIndex = 0; commandIndex < commmandCount; commandIndex++) { - int querySent = 0; MultiConnection *connection = connectionArray[commandIndex]; char *queryString = commandStringArray[commandIndex]->data; StringInfo queryResultString = resultStringArray[commandIndex]; @@ -280,7 +267,7 @@ ExecuteCommandsInParallelAndStoreResults(StringInfo *nodeNameArray, int *nodePor continue; } - querySent = SendRemoteCommand(connection, queryString); + int querySent = SendRemoteCommand(connection, queryString); if (querySent == 0) { StoreErrorMessage(connection, queryResultString); @@ -294,20 +281,19 @@ ExecuteCommandsInParallelAndStoreResults(StringInfo *nodeNameArray, int *nodePor /* check for query results */ while (finishedCount < commmandCount) { - for (commandIndex = 0; commandIndex < commmandCount; commandIndex++) + for (int commandIndex = 0; commandIndex < commmandCount; commandIndex++) { MultiConnection *connection = connectionArray[commandIndex]; StringInfo queryResultString = resultStringArray[commandIndex]; bool success = false; - bool queryFinished = false; if (connection == NULL) { continue; } - queryFinished = GetConnectionStatusAndResult(connection, &success, - queryResultString); + bool queryFinished = GetConnectionStatusAndResult(connection, &success, + queryResultString); if (queryFinished) { @@ -343,9 +329,6 @@ GetConnectionStatusAndResult(MultiConnection *connection, bool *resultStatus, { bool finished = true; ConnStatusType connectionStatus = PQstatus(connection->pgConn); - int consumeInput = 0; - PGresult *queryResult = NULL; - bool success = false; *resultStatus = false; resetStringInfo(queryResultString); @@ -356,7 +339,7 @@ GetConnectionStatusAndResult(MultiConnection *connection, bool *resultStatus, return finished; } - consumeInput = PQconsumeInput(connection->pgConn); + int consumeInput = PQconsumeInput(connection->pgConn); if (consumeInput == 0) { appendStringInfo(queryResultString, "query result unavailable"); @@ -371,8 +354,8 @@ GetConnectionStatusAndResult(MultiConnection *connection, bool *resultStatus, } /* query result is available at this point */ - queryResult = PQgetResult(connection->pgConn); - success = EvaluateQueryResult(connection, queryResult, queryResultString); + PGresult *queryResult = PQgetResult(connection->pgConn); + bool success = EvaluateQueryResult(connection, queryResult, queryResultString); PQclear(queryResult); *resultStatus = success; @@ -449,12 +432,10 @@ StoreErrorMessage(MultiConnection *connection, StringInfo queryResultString) char *errorMessage = PQerrorMessage(connection->pgConn); if (errorMessage != NULL) { - char *firstNewlineIndex = NULL; - /* copy the error message to a writable memory */ errorMessage = pnstrdup(errorMessage, strlen(errorMessage)); - firstNewlineIndex = strchr(errorMessage, '\n'); + char *firstNewlineIndex = strchr(errorMessage, '\n'); /* trim the error message at the line break */ if (firstNewlineIndex != NULL) @@ -484,17 +465,15 @@ ExecuteCommandsAndStoreResults(StringInfo *nodeNameArray, int *nodePortArray, StringInfo *commandStringArray, bool *statusArray, StringInfo *resultStringArray, int commmandCount) { - int commandIndex = 0; - for (commandIndex = 0; commandIndex < commmandCount; commandIndex++) + for (int commandIndex = 0; commandIndex < commmandCount; commandIndex++) { char *nodeName = nodeNameArray[commandIndex]->data; int32 nodePort = nodePortArray[commandIndex]; - bool success = false; char *queryString = commandStringArray[commandIndex]->data; StringInfo queryResultString = resultStringArray[commandIndex]; - success = ExecuteRemoteQueryOrCommand(nodeName, nodePort, queryString, - queryResultString); + bool success = ExecuteRemoteQueryOrCommand(nodeName, nodePort, queryString, + queryResultString); statusArray[commandIndex] = success; @@ -516,8 +495,6 @@ ExecuteRemoteQueryOrCommand(char *nodeName, uint32 nodePort, char *queryString, int connectionFlags = FORCE_NEW_CONNECTION; MultiConnection *connection = GetNodeConnection(connectionFlags, nodeName, nodePort); - bool success = false; - PGresult *queryResult = NULL; bool raiseInterrupts = true; if (PQstatus(connection->pgConn) != CONNECTION_OK) @@ -528,8 +505,8 @@ ExecuteRemoteQueryOrCommand(char *nodeName, uint32 nodePort, char *queryString, } SendRemoteCommand(connection, queryString); - queryResult = GetRemoteCommandResult(connection, raiseInterrupts); - success = EvaluateQueryResult(connection, queryResult, queryResultString); + PGresult *queryResult = GetRemoteCommandResult(connection, raiseInterrupts); + bool success = EvaluateQueryResult(connection, queryResult, queryResultString); PQclear(queryResult); @@ -547,13 +524,11 @@ CreateTupleStore(TupleDesc tupleDescriptor, StringInfo *resultArray, int commandCount) { Tuplestorestate *tupleStore = tuplestore_begin_heap(true, false, work_mem); - int commandIndex = 0; bool nulls[4] = { false, false, false, false }; - for (commandIndex = 0; commandIndex < commandCount; commandIndex++) + for (int commandIndex = 0; commandIndex < commandCount; commandIndex++) { Datum values[4]; - HeapTuple tuple = NULL; StringInfo nodeNameString = nodeNameArray[commandIndex]; StringInfo resultString = resultArray[commandIndex]; text *nodeNameText = cstring_to_text_with_len(nodeNameString->data, @@ -566,7 +541,7 @@ CreateTupleStore(TupleDesc tupleDescriptor, values[2] = BoolGetDatum(statusArray[commandIndex]); values[3] = PointerGetDatum(resultText); - tuple = heap_form_tuple(tupleDescriptor, values, nulls); + HeapTuple tuple = heap_form_tuple(tupleDescriptor, values, nulls); tuplestore_puttuple(tupleStore, tuple); heap_freetuple(tuple); diff --git a/src/backend/distributed/master/master_create_shards.c b/src/backend/distributed/master/master_create_shards.c index 91099ac58..8ba0030a2 100644 --- a/src/backend/distributed/master/master_create_shards.c +++ b/src/backend/distributed/master/master_create_shards.c @@ -106,13 +106,6 @@ void CreateShardsWithRoundRobinPolicy(Oid distributedTableId, int32 shardCount, int32 replicationFactor, bool useExclusiveConnections) { - char shardStorageType = 0; - List *workerNodeList = NIL; - int32 workerNodeCount = 0; - uint32 placementAttemptCount = 0; - uint64 hashTokenIncrement = 0; - List *existingShardList = NIL; - int64 shardIndex = 0; DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(distributedTableId); bool colocatedShard = false; List *insertedShardPlacements = NIL; @@ -132,7 +125,7 @@ CreateShardsWithRoundRobinPolicy(Oid distributedTableId, int32 shardCount, LockRelationOid(distributedTableId, ExclusiveLock); /* validate that shards haven't already been created for this table */ - existingShardList = LoadShardList(distributedTableId); + List *existingShardList = LoadShardList(distributedTableId); if (existingShardList != NIL) { char *tableName = get_rel_name(distributedTableId); @@ -171,16 +164,16 @@ CreateShardsWithRoundRobinPolicy(Oid distributedTableId, int32 shardCount, } /* calculate the split of the hash space */ - hashTokenIncrement = HASH_TOKEN_COUNT / shardCount; + uint64 hashTokenIncrement = HASH_TOKEN_COUNT / shardCount; /* don't allow concurrent node list changes that require an exclusive lock */ LockRelationOid(DistNodeRelationId(), RowShareLock); /* load and sort the worker node list for deterministic placement */ - workerNodeList = DistributedTablePlacementNodeList(NoLock); + List *workerNodeList = DistributedTablePlacementNodeList(NoLock); workerNodeList = SortList(workerNodeList, CompareWorkerNodes); - workerNodeCount = list_length(workerNodeList); + int32 workerNodeCount = list_length(workerNodeList); if (replicationFactor > workerNodeCount) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), @@ -191,26 +184,23 @@ CreateShardsWithRoundRobinPolicy(Oid distributedTableId, int32 shardCount, } /* if we have enough nodes, add an extra placement attempt for backup */ - placementAttemptCount = (uint32) replicationFactor; + uint32 placementAttemptCount = (uint32) replicationFactor; if (workerNodeCount > replicationFactor) { placementAttemptCount++; } /* set shard storage type according to relation type */ - shardStorageType = ShardStorageType(distributedTableId); + char shardStorageType = ShardStorageType(distributedTableId); - for (shardIndex = 0; shardIndex < shardCount; shardIndex++) + for (int64 shardIndex = 0; shardIndex < shardCount; shardIndex++) { uint32 roundRobinNodeIndex = shardIndex % workerNodeCount; /* initialize the hash token space for this shard */ - text *minHashTokenText = NULL; - text *maxHashTokenText = NULL; int32 shardMinHashToken = INT32_MIN + (shardIndex * hashTokenIncrement); int32 shardMaxHashToken = shardMinHashToken + (hashTokenIncrement - 1); uint64 shardId = GetNextShardId(); - List *currentInsertedShardPlacements = NIL; /* if we are at the last shard, make sure the max token value is INT_MAX */ if (shardIndex == (shardCount - 1)) @@ -219,8 +209,8 @@ CreateShardsWithRoundRobinPolicy(Oid distributedTableId, int32 shardCount, } /* insert the shard metadata row along with its min/max values */ - minHashTokenText = IntegerToText(shardMinHashToken); - maxHashTokenText = IntegerToText(shardMaxHashToken); + text *minHashTokenText = IntegerToText(shardMinHashToken); + text *maxHashTokenText = IntegerToText(shardMaxHashToken); /* * Grabbing the shard metadata lock isn't technically necessary since @@ -233,11 +223,12 @@ CreateShardsWithRoundRobinPolicy(Oid distributedTableId, int32 shardCount, InsertShardRow(distributedTableId, shardId, shardStorageType, minHashTokenText, maxHashTokenText); - currentInsertedShardPlacements = InsertShardPlacementRows(distributedTableId, - shardId, - workerNodeList, - roundRobinNodeIndex, - replicationFactor); + List *currentInsertedShardPlacements = InsertShardPlacementRows( + distributedTableId, + shardId, + workerNodeList, + roundRobinNodeIndex, + replicationFactor); insertedShardPlacements = list_concat(insertedShardPlacements, currentInsertedShardPlacements); } @@ -255,9 +246,6 @@ void CreateColocatedShards(Oid targetRelationId, Oid sourceRelationId, bool useExclusiveConnections) { - char targetShardStorageType = 0; - List *existingShardList = NIL; - List *sourceShardIntervalList = NIL; ListCell *sourceShardCell = NULL; bool colocatedShard = true; List *insertedShardPlacements = NIL; @@ -281,11 +269,11 @@ CreateColocatedShards(Oid targetRelationId, Oid sourceRelationId, bool LockRelationOid(sourceRelationId, AccessShareLock); /* prevent placement changes of the source relation until we colocate with them */ - sourceShardIntervalList = LoadShardIntervalList(sourceRelationId); + List *sourceShardIntervalList = LoadShardIntervalList(sourceRelationId); LockShardListMetadata(sourceShardIntervalList, ShareLock); /* validate that shards haven't already been created for this table */ - existingShardList = LoadShardList(targetRelationId); + List *existingShardList = LoadShardList(targetRelationId); if (existingShardList != NIL) { char *targetRelationName = get_rel_name(targetRelationId); @@ -294,7 +282,7 @@ CreateColocatedShards(Oid targetRelationId, Oid sourceRelationId, bool targetRelationName))); } - targetShardStorageType = ShardStorageType(targetRelationId); + char targetShardStorageType = ShardStorageType(targetRelationId); foreach(sourceShardCell, sourceShardIntervalList) { @@ -319,17 +307,18 @@ CreateColocatedShards(Oid targetRelationId, Oid sourceRelationId, bool int32 groupId = sourcePlacement->groupId; const RelayFileState shardState = FILE_FINALIZED; const uint64 shardSize = 0; - uint64 shardPlacementId = 0; - ShardPlacement *shardPlacement = NULL; /* * Optimistically add shard placement row the pg_dist_shard_placement, in case * of any error it will be roll-backed. */ - shardPlacementId = InsertShardPlacementRow(newShardId, INVALID_PLACEMENT_ID, - shardState, shardSize, groupId); + uint64 shardPlacementId = InsertShardPlacementRow(newShardId, + INVALID_PLACEMENT_ID, + shardState, shardSize, + groupId); - shardPlacement = LoadShardPlacement(newShardId, shardPlacementId); + ShardPlacement *shardPlacement = LoadShardPlacement(newShardId, + shardPlacementId); insertedShardPlacements = lappend(insertedShardPlacements, shardPlacement); } } @@ -347,17 +336,11 @@ CreateColocatedShards(Oid targetRelationId, Oid sourceRelationId, bool void CreateReferenceTableShard(Oid distributedTableId) { - char shardStorageType = 0; - List *nodeList = NIL; - List *existingShardList = NIL; - uint64 shardId = INVALID_SHARD_ID; int workerStartIndex = 0; - int replicationFactor = 0; text *shardMinValue = NULL; text *shardMaxValue = NULL; bool useExclusiveConnection = false; bool colocatedShard = false; - List *insertedShardPlacements = NIL; /* * In contrast to append/range partitioned tables it makes more sense to @@ -371,10 +354,10 @@ CreateReferenceTableShard(Oid distributedTableId) LockRelationOid(distributedTableId, ExclusiveLock); /* set shard storage type according to relation type */ - shardStorageType = ShardStorageType(distributedTableId); + char shardStorageType = ShardStorageType(distributedTableId); /* validate that shards haven't already been created for this table */ - existingShardList = LoadShardList(distributedTableId); + List *existingShardList = LoadShardList(distributedTableId); if (existingShardList != NIL) { char *tableName = get_rel_name(distributedTableId); @@ -387,13 +370,13 @@ CreateReferenceTableShard(Oid distributedTableId) * load and sort the worker node list for deterministic placements * create_reference_table has already acquired pg_dist_node lock */ - nodeList = ReferenceTablePlacementNodeList(ShareLock); + List *nodeList = ReferenceTablePlacementNodeList(ShareLock); nodeList = SortList(nodeList, CompareWorkerNodes); - replicationFactor = ReferenceTableReplicationFactor(); + int replicationFactor = ReferenceTableReplicationFactor(); /* get the next shard id */ - shardId = GetNextShardId(); + uint64 shardId = GetNextShardId(); /* * Grabbing the shard metadata lock isn't technically necessary since @@ -406,9 +389,9 @@ CreateReferenceTableShard(Oid distributedTableId) InsertShardRow(distributedTableId, shardId, shardStorageType, shardMinValue, shardMaxValue); - insertedShardPlacements = InsertShardPlacementRows(distributedTableId, shardId, - nodeList, workerStartIndex, - replicationFactor); + List *insertedShardPlacements = InsertShardPlacementRows(distributedTableId, shardId, + nodeList, workerStartIndex, + replicationFactor); CreateShardsOnWorkers(distributedTableId, insertedShardPlacements, useExclusiveConnection, colocatedShard); @@ -436,11 +419,10 @@ CheckHashPartitionedTable(Oid distributedTableId) text * IntegerToText(int32 value) { - text *valueText = NULL; StringInfo valueString = makeStringInfo(); appendStringInfo(valueString, "%d", value); - valueText = cstring_to_text(valueString->data); + text *valueText = cstring_to_text(valueString->data); return valueText; } diff --git a/src/backend/distributed/master/master_delete_protocol.c b/src/backend/distributed/master/master_delete_protocol.c index 972271e72..f54f40f15 100644 --- a/src/backend/distributed/master/master_delete_protocol.c +++ b/src/backend/distributed/master/master_delete_protocol.c @@ -103,23 +103,10 @@ master_apply_delete_command(PG_FUNCTION_ARGS) { text *queryText = PG_GETARG_TEXT_P(0); char *queryString = text_to_cstring(queryText); - char *relationName = NULL; - char *schemaName = NULL; - Oid relationId = InvalidOid; - List *shardIntervalList = NIL; List *deletableShardIntervalList = NIL; - List *queryTreeList = NIL; - Query *deleteQuery = NULL; - Node *whereClause = NULL; - Node *deleteCriteria = NULL; - Node *queryTreeNode = NULL; - DeleteStmt *deleteStatement = NULL; - int droppedShardCount = 0; - LOCKMODE lockMode = 0; - char partitionMethod = 0; bool failOK = false; RawStmt *rawStmt = (RawStmt *) ParseTreeRawStmt(queryString); - queryTreeNode = rawStmt->stmt; + Node *queryTreeNode = rawStmt->stmt; EnsureCoordinator(); CheckCitusVersion(ERROR); @@ -130,19 +117,19 @@ master_apply_delete_command(PG_FUNCTION_ARGS) ApplyLogRedaction(queryString)))); } - deleteStatement = (DeleteStmt *) queryTreeNode; + DeleteStmt *deleteStatement = (DeleteStmt *) queryTreeNode; - schemaName = deleteStatement->relation->schemaname; - relationName = deleteStatement->relation->relname; + char *schemaName = deleteStatement->relation->schemaname; + char *relationName = deleteStatement->relation->relname; /* * We take an exclusive lock while dropping shards to prevent concurrent * writes. We don't want to block SELECTs, which means queries might fail * if they access a shard that has just been dropped. */ - lockMode = ExclusiveLock; + LOCKMODE lockMode = ExclusiveLock; - relationId = RangeVarGetRelid(deleteStatement->relation, lockMode, failOK); + Oid relationId = RangeVarGetRelid(deleteStatement->relation, lockMode, failOK); /* schema-prefix if it is not specified already */ if (schemaName == NULL) @@ -154,15 +141,15 @@ master_apply_delete_command(PG_FUNCTION_ARGS) CheckDistributedTable(relationId); EnsureTablePermissions(relationId, ACL_DELETE); - queryTreeList = pg_analyze_and_rewrite(rawStmt, queryString, NULL, 0, NULL); - deleteQuery = (Query *) linitial(queryTreeList); + List *queryTreeList = pg_analyze_and_rewrite(rawStmt, queryString, NULL, 0, NULL); + Query *deleteQuery = (Query *) linitial(queryTreeList); CheckTableCount(deleteQuery); /* get where clause and flatten it */ - whereClause = (Node *) deleteQuery->jointree->quals; - deleteCriteria = eval_const_expressions(NULL, whereClause); + Node *whereClause = (Node *) deleteQuery->jointree->quals; + Node *deleteCriteria = eval_const_expressions(NULL, whereClause); - partitionMethod = PartitionMethod(relationId); + char partitionMethod = PartitionMethod(relationId); if (partitionMethod == DISTRIBUTE_BY_HASH) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), @@ -184,7 +171,7 @@ master_apply_delete_command(PG_FUNCTION_ARGS) CheckDeleteCriteria(deleteCriteria); CheckPartitionColumn(relationId, deleteCriteria); - shardIntervalList = LoadShardIntervalList(relationId); + List *shardIntervalList = LoadShardIntervalList(relationId); /* drop all shards if where clause is not present */ if (deleteCriteria == NULL) @@ -199,8 +186,8 @@ master_apply_delete_command(PG_FUNCTION_ARGS) deleteCriteria); } - droppedShardCount = DropShards(relationId, schemaName, relationName, - deletableShardIntervalList); + int droppedShardCount = DropShards(relationId, schemaName, relationName, + deletableShardIntervalList); PG_RETURN_INT32(droppedShardCount); } @@ -218,8 +205,6 @@ master_drop_all_shards(PG_FUNCTION_ARGS) text *schemaNameText = PG_GETARG_TEXT_P(1); text *relationNameText = PG_GETARG_TEXT_P(2); - List *shardIntervalList = NIL; - int droppedShardCount = 0; char *schemaName = text_to_cstring(schemaNameText); char *relationName = text_to_cstring(relationNameText); @@ -246,9 +231,9 @@ master_drop_all_shards(PG_FUNCTION_ARGS) */ LockRelationOid(relationId, AccessExclusiveLock); - shardIntervalList = LoadShardIntervalList(relationId); - droppedShardCount = DropShards(relationId, schemaName, relationName, - shardIntervalList); + List *shardIntervalList = LoadShardIntervalList(relationId); + int droppedShardCount = DropShards(relationId, schemaName, relationName, + shardIntervalList); PG_RETURN_INT32(droppedShardCount); } @@ -265,7 +250,6 @@ Datum master_drop_sequences(PG_FUNCTION_ARGS) { ArrayType *sequenceNamesArray = PG_GETARG_ARRAYTYPE_P(0); - ArrayIterator sequenceIterator = NULL; Datum sequenceNameDatum = 0; bool isNull = false; StringInfo dropSeqCommand = makeStringInfo(); @@ -291,20 +275,17 @@ master_drop_sequences(PG_FUNCTION_ARGS) } /* iterate over sequence names to build single command to DROP them all */ - sequenceIterator = array_create_iterator(sequenceNamesArray, 0, NULL); + ArrayIterator sequenceIterator = array_create_iterator(sequenceNamesArray, 0, NULL); while (array_iterate(sequenceIterator, &sequenceNameDatum, &isNull)) { - text *sequenceNameText = NULL; - Oid sequenceOid = InvalidOid; - if (isNull) { ereport(ERROR, (errmsg("unexpected NULL sequence name"), errcode(ERRCODE_INVALID_PARAMETER_VALUE))); } - sequenceNameText = DatumGetTextP(sequenceNameDatum); - sequenceOid = ResolveRelationId(sequenceNameText, true); + text *sequenceNameText = DatumGetTextP(sequenceNameDatum); + Oid sequenceOid = ResolveRelationId(sequenceNameText, true); if (OidIsValid(sequenceOid)) { /* @@ -379,7 +360,6 @@ DropShards(Oid relationId, char *schemaName, char *relationName, List *deletableShardIntervalList) { ListCell *shardIntervalCell = NULL; - int droppedShardCount = 0; BeginOrContinueCoordinatedTransaction(); @@ -391,20 +371,18 @@ DropShards(Oid relationId, char *schemaName, char *relationName, foreach(shardIntervalCell, deletableShardIntervalList) { - List *shardPlacementList = NIL; ListCell *shardPlacementCell = NULL; ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell); uint64 shardId = shardInterval->shardId; - char *quotedShardName = NULL; char *shardRelationName = pstrdup(relationName); Assert(shardInterval->relationId == relationId); /* Build shard relation name. */ AppendShardIdToName(&shardRelationName, shardId); - quotedShardName = quote_qualified_identifier(schemaName, shardRelationName); + char *quotedShardName = quote_qualified_identifier(schemaName, shardRelationName); - shardPlacementList = ShardPlacementList(shardId); + List *shardPlacementList = ShardPlacementList(shardId); foreach(shardPlacementCell, shardPlacementList) { ShardPlacement *shardPlacement = @@ -412,7 +390,6 @@ DropShards(Oid relationId, char *schemaName, char *relationName, char *workerName = shardPlacement->nodeName; uint32 workerPort = shardPlacement->nodePort; StringInfo workerDropQuery = makeStringInfo(); - MultiConnection *connection = NULL; uint32 connectionFlags = FOR_DDL; char storageType = shardInterval->storageType; @@ -441,8 +418,9 @@ DropShards(Oid relationId, char *schemaName, char *relationName, continue; } - connection = GetPlacementConnection(connectionFlags, shardPlacement, - NULL); + MultiConnection *connection = GetPlacementConnection(connectionFlags, + shardPlacement, + NULL); RemoteTransactionBeginIfNecessary(connection); @@ -471,7 +449,7 @@ DropShards(Oid relationId, char *schemaName, char *relationName, DeleteShardRow(shardId); } - droppedShardCount = list_length(deletableShardIntervalList); + int droppedShardCount = list_length(deletableShardIntervalList); return droppedShardCount; } @@ -573,7 +551,6 @@ ShardsMatchingDeleteCriteria(Oid relationId, List *shardIntervalList, Node *deleteCriteria) { List *dropShardIntervalList = NIL; - List *deleteCriteriaList = NIL; ListCell *shardIntervalCell = NULL; /* build the base expression for constraint */ @@ -582,7 +559,7 @@ ShardsMatchingDeleteCriteria(Oid relationId, List *shardIntervalList, Node *baseConstraint = BuildBaseConstraint(partitionColumn); Assert(deleteCriteria != NULL); - deleteCriteriaList = list_make1(deleteCriteria); + List *deleteCriteriaList = list_make1(deleteCriteria); /* walk over shard list and check if shards can be dropped */ foreach(shardIntervalCell, shardIntervalList) @@ -591,27 +568,23 @@ ShardsMatchingDeleteCriteria(Oid relationId, List *shardIntervalList, if (shardInterval->minValueExists && shardInterval->maxValueExists) { List *restrictInfoList = NIL; - bool dropShard = false; - BoolExpr *andExpr = NULL; - Expr *lessThanExpr = NULL; - Expr *greaterThanExpr = NULL; - RestrictInfo *lessThanRestrictInfo = NULL; - RestrictInfo *greaterThanRestrictInfo = NULL; /* set the min/max values in the base constraint */ UpdateConstraint(baseConstraint, shardInterval); - andExpr = (BoolExpr *) baseConstraint; - lessThanExpr = (Expr *) linitial(andExpr->args); - greaterThanExpr = (Expr *) lsecond(andExpr->args); + BoolExpr *andExpr = (BoolExpr *) baseConstraint; + Expr *lessThanExpr = (Expr *) linitial(andExpr->args); + Expr *greaterThanExpr = (Expr *) lsecond(andExpr->args); - lessThanRestrictInfo = make_simple_restrictinfo(lessThanExpr); - greaterThanRestrictInfo = make_simple_restrictinfo(greaterThanExpr); + RestrictInfo *lessThanRestrictInfo = make_simple_restrictinfo(lessThanExpr); + RestrictInfo *greaterThanRestrictInfo = make_simple_restrictinfo( + greaterThanExpr); restrictInfoList = lappend(restrictInfoList, lessThanRestrictInfo); restrictInfoList = lappend(restrictInfoList, greaterThanRestrictInfo); - dropShard = predicate_implied_by(deleteCriteriaList, restrictInfoList, false); + bool dropShard = predicate_implied_by(deleteCriteriaList, restrictInfoList, + false); if (dropShard) { dropShardIntervalList = lappend(dropShardIntervalList, shardInterval); diff --git a/src/backend/distributed/master/master_metadata_utility.c b/src/backend/distributed/master/master_metadata_utility.c index cbc0aeb99..1cad9aa5a 100644 --- a/src/backend/distributed/master/master_metadata_utility.c +++ b/src/backend/distributed/master/master_metadata_utility.c @@ -91,7 +91,6 @@ Datum citus_total_relation_size(PG_FUNCTION_ARGS) { Oid relationId = PG_GETARG_OID(0); - uint64 totalRelationSize = 0; char *tableSizeFunction = PG_TOTAL_RELATION_SIZE_FUNCTION; CheckCitusVersion(ERROR); @@ -101,7 +100,7 @@ citus_total_relation_size(PG_FUNCTION_ARGS) tableSizeFunction = CSTORE_TABLE_SIZE_FUNCTION; } - totalRelationSize = DistributedTableSize(relationId, tableSizeFunction); + uint64 totalRelationSize = DistributedTableSize(relationId, tableSizeFunction); PG_RETURN_INT64(totalRelationSize); } @@ -115,7 +114,6 @@ Datum citus_table_size(PG_FUNCTION_ARGS) { Oid relationId = PG_GETARG_OID(0); - uint64 tableSize = 0; char *tableSizeFunction = PG_TABLE_SIZE_FUNCTION; CheckCitusVersion(ERROR); @@ -125,7 +123,7 @@ citus_table_size(PG_FUNCTION_ARGS) tableSizeFunction = CSTORE_TABLE_SIZE_FUNCTION; } - tableSize = DistributedTableSize(relationId, tableSizeFunction); + uint64 tableSize = DistributedTableSize(relationId, tableSizeFunction); PG_RETURN_INT64(tableSize); } @@ -139,7 +137,6 @@ Datum citus_relation_size(PG_FUNCTION_ARGS) { Oid relationId = PG_GETARG_OID(0); - uint64 relationSize = 0; char *tableSizeFunction = PG_RELATION_SIZE_FUNCTION; CheckCitusVersion(ERROR); @@ -149,7 +146,7 @@ citus_relation_size(PG_FUNCTION_ARGS) tableSizeFunction = CSTORE_TABLE_SIZE_FUNCTION; } - relationSize = DistributedTableSize(relationId, tableSizeFunction); + uint64 relationSize = DistributedTableSize(relationId, tableSizeFunction); PG_RETURN_INT64(relationSize); } @@ -163,8 +160,6 @@ citus_relation_size(PG_FUNCTION_ARGS) static uint64 DistributedTableSize(Oid relationId, char *sizeQuery) { - Relation relation = NULL; - List *workerNodeList = NULL; ListCell *workerNodeCell = NULL; uint64 totalRelationSize = 0; @@ -175,7 +170,7 @@ DistributedTableSize(Oid relationId, char *sizeQuery) " blocks which contain multi-shard data modifications"))); } - relation = try_relation_open(relationId, AccessShareLock); + Relation relation = try_relation_open(relationId, AccessShareLock); if (relation == NULL) { @@ -185,7 +180,7 @@ DistributedTableSize(Oid relationId, char *sizeQuery) ErrorIfNotSuitableToGetSize(relationId); - workerNodeList = ActiveReadableNodeList(); + List *workerNodeList = ActiveReadableNodeList(); foreach(workerNodeCell, workerNodeList) { @@ -209,27 +204,22 @@ DistributedTableSize(Oid relationId, char *sizeQuery) static uint64 DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId, char *sizeQuery) { - StringInfo tableSizeQuery = NULL; - StringInfo tableSizeStringInfo = NULL; char *workerNodeName = workerNode->workerName; uint32 workerNodePort = workerNode->workerPort; - char *tableSizeString; - uint64 tableSize = 0; - MultiConnection *connection = NULL; uint32 connectionFlag = 0; PGresult *result = NULL; - int queryResult = 0; - List *sizeList = NIL; bool raiseErrors = true; List *shardIntervalsOnNode = ShardIntervalsOnWorkerGroup(workerNode, relationId); - tableSizeQuery = GenerateSizeQueryOnMultiplePlacements(relationId, - shardIntervalsOnNode, - sizeQuery); + StringInfo tableSizeQuery = GenerateSizeQueryOnMultiplePlacements(relationId, + shardIntervalsOnNode, + sizeQuery); - connection = GetNodeConnection(connectionFlag, workerNodeName, workerNodePort); - queryResult = ExecuteOptionalRemoteCommand(connection, tableSizeQuery->data, &result); + MultiConnection *connection = GetNodeConnection(connectionFlag, workerNodeName, + workerNodePort); + int queryResult = ExecuteOptionalRemoteCommand(connection, tableSizeQuery->data, + &result); if (queryResult != 0) { @@ -237,10 +227,10 @@ DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId, char *sizeQ errmsg("cannot get the size because of a connection error"))); } - sizeList = ReadFirstColumnAsText(result); - tableSizeStringInfo = (StringInfo) linitial(sizeList); - tableSizeString = tableSizeStringInfo->data; - tableSize = atol(tableSizeString); + List *sizeList = ReadFirstColumnAsText(result); + StringInfo tableSizeStringInfo = (StringInfo) linitial(sizeList); + char *tableSizeString = tableSizeStringInfo->data; + uint64 tableSize = atol(tableSizeString); PQclear(result); ClearResults(connection, raiseErrors); @@ -260,18 +250,17 @@ GroupShardPlacementsForTableOnGroup(Oid relationId, int32 groupId) DistTableCacheEntry *distTableCacheEntry = DistributedTableCacheEntry(relationId); List *resultList = NIL; - int shardIndex = 0; int shardIntervalArrayLength = distTableCacheEntry->shardIntervalArrayLength; - for (shardIndex = 0; shardIndex < shardIntervalArrayLength; shardIndex++) + for (int shardIndex = 0; shardIndex < shardIntervalArrayLength; shardIndex++) { GroupShardPlacement *placementArray = distTableCacheEntry->arrayOfPlacementArrays[shardIndex]; int numberOfPlacements = distTableCacheEntry->arrayOfPlacementArrayLengths[shardIndex]; - int placementIndex = 0; - for (placementIndex = 0; placementIndex < numberOfPlacements; placementIndex++) + for (int placementIndex = 0; placementIndex < numberOfPlacements; + placementIndex++) { GroupShardPlacement *placement = &placementArray[placementIndex]; @@ -298,24 +287,22 @@ ShardIntervalsOnWorkerGroup(WorkerNode *workerNode, Oid relationId) { DistTableCacheEntry *distTableCacheEntry = DistributedTableCacheEntry(relationId); List *shardIntervalList = NIL; - int shardIndex = 0; int shardIntervalArrayLength = distTableCacheEntry->shardIntervalArrayLength; - for (shardIndex = 0; shardIndex < shardIntervalArrayLength; shardIndex++) + for (int shardIndex = 0; shardIndex < shardIntervalArrayLength; shardIndex++) { GroupShardPlacement *placementArray = distTableCacheEntry->arrayOfPlacementArrays[shardIndex]; int numberOfPlacements = distTableCacheEntry->arrayOfPlacementArrayLengths[shardIndex]; - int placementIndex = 0; - for (placementIndex = 0; placementIndex < numberOfPlacements; placementIndex++) + for (int placementIndex = 0; placementIndex < numberOfPlacements; + placementIndex++) { GroupShardPlacement *placement = &placementArray[placementIndex]; uint64 shardId = placement->shardId; - bool metadataLock = false; - metadataLock = TryLockShardDistributionMetadata(shardId, ShareLock); + bool metadataLock = TryLockShardDistributionMetadata(shardId, ShareLock); /* if the lock is not acquired warn the user */ if (metadataLock == false) @@ -364,12 +351,10 @@ GenerateSizeQueryOnMultiplePlacements(Oid distributedRelationId, List *shardInte ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell); uint64 shardId = shardInterval->shardId; char *shardName = get_rel_name(distributedRelationId); - char *shardQualifiedName = NULL; - char *quotedShardName = NULL; AppendShardIdToName(&shardName, shardId); - shardQualifiedName = quote_qualified_identifier(schemaName, shardName); - quotedShardName = quote_literal_cstr(shardQualifiedName); + char *shardQualifiedName = quote_qualified_identifier(schemaName, shardName); + char *quotedShardName = quote_literal_cstr(shardQualifiedName); appendStringInfo(selectQuery, sizeQuery, quotedShardName); appendStringInfo(selectQuery, " + "); @@ -509,12 +494,11 @@ LoadShardIntervalList(Oid relationId) { DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(relationId); List *shardList = NIL; - int i = 0; - for (i = 0; i < cacheEntry->shardIntervalArrayLength; i++) + for (int i = 0; i < cacheEntry->shardIntervalArrayLength; i++) { - ShardInterval *newShardInterval = NULL; - newShardInterval = (ShardInterval *) palloc0(sizeof(ShardInterval)); + ShardInterval *newShardInterval = (ShardInterval *) palloc0( + sizeof(ShardInterval)); CopyShardInterval(cacheEntry->sortedShardIntervalArray[i], newShardInterval); @@ -557,9 +541,8 @@ LoadShardList(Oid relationId) { DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(relationId); List *shardList = NIL; - int i = 0; - for (i = 0; i < cacheEntry->shardIntervalArrayLength; i++) + for (int i = 0; i < cacheEntry->shardIntervalArrayLength; i++) { ShardInterval *currentShardInterval = cacheEntry->sortedShardIntervalArray[i]; uint64 *shardIdPointer = AllocateUint64(currentShardInterval->shardId); @@ -673,10 +656,7 @@ NodeGroupHasShardPlacements(int32 groupId, bool onlyConsiderActivePlacements) const int scanKeyCount = (onlyConsiderActivePlacements ? 2 : 1); const bool indexOK = false; - bool hasFinalizedPlacements = false; - HeapTuple heapTuple = NULL; - SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[2]; Relation pgPlacement = heap_open(DistPlacementRelationId(), @@ -690,12 +670,13 @@ NodeGroupHasShardPlacements(int32 groupId, bool onlyConsiderActivePlacements) BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(FILE_FINALIZED)); } - scanDescriptor = systable_beginscan(pgPlacement, - DistPlacementGroupidIndexId(), indexOK, - NULL, scanKeyCount, scanKey); + SysScanDesc scanDescriptor = systable_beginscan(pgPlacement, + DistPlacementGroupidIndexId(), + indexOK, + NULL, scanKeyCount, scanKey); - heapTuple = systable_getnext(scanDescriptor); - hasFinalizedPlacements = HeapTupleIsValid(heapTuple); + HeapTuple heapTuple = systable_getnext(scanDescriptor); + bool hasFinalizedPlacements = HeapTupleIsValid(heapTuple); systable_endscan(scanDescriptor); heap_close(pgPlacement, NoLock); @@ -772,23 +753,21 @@ BuildShardPlacementList(ShardInterval *shardInterval) { int64 shardId = shardInterval->shardId; List *shardPlacementList = NIL; - Relation pgPlacement = NULL; - SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 1; bool indexOK = true; - HeapTuple heapTuple = NULL; - pgPlacement = heap_open(DistPlacementRelationId(), AccessShareLock); + Relation pgPlacement = heap_open(DistPlacementRelationId(), AccessShareLock); ScanKeyInit(&scanKey[0], Anum_pg_dist_placement_shardid, BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(shardId)); - scanDescriptor = systable_beginscan(pgPlacement, - DistPlacementShardidIndexId(), indexOK, - NULL, scanKeyCount, scanKey); + SysScanDesc scanDescriptor = systable_beginscan(pgPlacement, + DistPlacementShardidIndexId(), + indexOK, + NULL, scanKeyCount, scanKey); - heapTuple = systable_getnext(scanDescriptor); + HeapTuple heapTuple = systable_getnext(scanDescriptor); while (HeapTupleIsValid(heapTuple)) { TupleDesc tupleDescriptor = RelationGetDescr(pgPlacement); @@ -817,23 +796,21 @@ List * AllShardPlacementsOnNodeGroup(int32 groupId) { List *shardPlacementList = NIL; - Relation pgPlacement = NULL; - SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 1; bool indexOK = true; - HeapTuple heapTuple = NULL; - pgPlacement = heap_open(DistPlacementRelationId(), AccessShareLock); + Relation pgPlacement = heap_open(DistPlacementRelationId(), AccessShareLock); ScanKeyInit(&scanKey[0], Anum_pg_dist_placement_groupid, BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(groupId)); - scanDescriptor = systable_beginscan(pgPlacement, - DistPlacementGroupidIndexId(), indexOK, - NULL, scanKeyCount, scanKey); + SysScanDesc scanDescriptor = systable_beginscan(pgPlacement, + DistPlacementGroupidIndexId(), + indexOK, + NULL, scanKeyCount, scanKey); - heapTuple = systable_getnext(scanDescriptor); + HeapTuple heapTuple = systable_getnext(scanDescriptor); while (HeapTupleIsValid(heapTuple)) { TupleDesc tupleDescriptor = RelationGetDescr(pgPlacement); @@ -861,7 +838,6 @@ AllShardPlacementsOnNodeGroup(int32 groupId) static GroupShardPlacement * TupleToGroupShardPlacement(TupleDesc tupleDescriptor, HeapTuple heapTuple) { - GroupShardPlacement *shardPlacement = NULL; bool isNullArray[Natts_pg_dist_placement]; Datum datumArray[Natts_pg_dist_placement]; @@ -877,7 +853,7 @@ TupleToGroupShardPlacement(TupleDesc tupleDescriptor, HeapTuple heapTuple) */ heap_deform_tuple(heapTuple, tupleDescriptor, datumArray, isNullArray); - shardPlacement = CitusMakeNode(GroupShardPlacement); + GroupShardPlacement *shardPlacement = CitusMakeNode(GroupShardPlacement); shardPlacement->placementId = DatumGetInt64( datumArray[Anum_pg_dist_placement_placementid - 1]); shardPlacement->shardId = DatumGetInt64( @@ -902,9 +878,6 @@ void InsertShardRow(Oid relationId, uint64 shardId, char storageType, text *shardMinValue, text *shardMaxValue) { - Relation pgDistShard = NULL; - TupleDesc tupleDescriptor = NULL; - HeapTuple heapTuple = NULL; Datum values[Natts_pg_dist_shard]; bool isNulls[Natts_pg_dist_shard]; @@ -932,10 +905,10 @@ InsertShardRow(Oid relationId, uint64 shardId, char storageType, } /* open shard relation and insert new tuple */ - pgDistShard = heap_open(DistShardRelationId(), RowExclusiveLock); + Relation pgDistShard = heap_open(DistShardRelationId(), RowExclusiveLock); - tupleDescriptor = RelationGetDescr(pgDistShard); - heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls); + TupleDesc tupleDescriptor = RelationGetDescr(pgDistShard); + HeapTuple heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls); CatalogTupleInsert(pgDistShard, heapTuple); @@ -958,9 +931,6 @@ InsertShardPlacementRow(uint64 shardId, uint64 placementId, char shardState, uint64 shardLength, int32 groupId) { - Relation pgDistPlacement = NULL; - TupleDesc tupleDescriptor = NULL; - HeapTuple heapTuple = NULL; Datum values[Natts_pg_dist_placement]; bool isNulls[Natts_pg_dist_placement]; @@ -979,10 +949,10 @@ InsertShardPlacementRow(uint64 shardId, uint64 placementId, values[Anum_pg_dist_placement_groupid - 1] = Int32GetDatum(groupId); /* open shard placement relation and insert new tuple */ - pgDistPlacement = heap_open(DistPlacementRelationId(), RowExclusiveLock); + Relation pgDistPlacement = heap_open(DistPlacementRelationId(), RowExclusiveLock); - tupleDescriptor = RelationGetDescr(pgDistPlacement); - heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls); + TupleDesc tupleDescriptor = RelationGetDescr(pgDistPlacement); + HeapTuple heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls); CatalogTupleInsert(pgDistPlacement, heapTuple); @@ -1003,15 +973,13 @@ InsertIntoPgDistPartition(Oid relationId, char distributionMethod, Var *distributionColumn, uint32 colocationId, char replicationModel) { - Relation pgDistPartition = NULL; char *distributionColumnString = NULL; - HeapTuple newTuple = NULL; Datum newValues[Natts_pg_dist_partition]; bool newNulls[Natts_pg_dist_partition]; /* open system catalog and insert new tuple */ - pgDistPartition = heap_open(DistPartitionRelationId(), RowExclusiveLock); + Relation pgDistPartition = heap_open(DistPartitionRelationId(), RowExclusiveLock); /* form new tuple for pg_dist_partition */ memset(newValues, 0, sizeof(newValues)); @@ -1038,7 +1006,8 @@ InsertIntoPgDistPartition(Oid relationId, char distributionMethod, newNulls[Anum_pg_dist_partition_partkey - 1] = true; } - newTuple = heap_form_tuple(RelationGetDescr(pgDistPartition), newValues, newNulls); + HeapTuple newTuple = heap_form_tuple(RelationGetDescr(pgDistPartition), newValues, + newNulls); /* finally insert tuple, build index entries & register cache invalidation */ CatalogTupleInsert(pgDistPartition, newTuple); @@ -1092,21 +1061,19 @@ RecordDistributedRelationDependencies(Oid distributedRelationId, Node *distribut void DeletePartitionRow(Oid distributedRelationId) { - Relation pgDistPartition = NULL; - HeapTuple heapTuple = NULL; - SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 1; - pgDistPartition = heap_open(DistPartitionRelationId(), RowExclusiveLock); + Relation pgDistPartition = heap_open(DistPartitionRelationId(), RowExclusiveLock); ScanKeyInit(&scanKey[0], Anum_pg_dist_partition_logicalrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(distributedRelationId)); - scanDescriptor = systable_beginscan(pgDistPartition, InvalidOid, false, NULL, - scanKeyCount, scanKey); + SysScanDesc scanDescriptor = systable_beginscan(pgDistPartition, InvalidOid, false, + NULL, + scanKeyCount, scanKey); - heapTuple = systable_getnext(scanDescriptor); + HeapTuple heapTuple = systable_getnext(scanDescriptor); if (!HeapTupleIsValid(heapTuple)) { ereport(ERROR, (errmsg("could not find valid entry for partition %d", @@ -1134,33 +1101,28 @@ DeletePartitionRow(Oid distributedRelationId) void DeleteShardRow(uint64 shardId) { - Relation pgDistShard = NULL; - SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 1; bool indexOK = true; - HeapTuple heapTuple = NULL; - Form_pg_dist_shard pgDistShardForm = NULL; - Oid distributedRelationId = InvalidOid; - pgDistShard = heap_open(DistShardRelationId(), RowExclusiveLock); + Relation pgDistShard = heap_open(DistShardRelationId(), RowExclusiveLock); ScanKeyInit(&scanKey[0], Anum_pg_dist_shard_shardid, BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(shardId)); - scanDescriptor = systable_beginscan(pgDistShard, - DistShardShardidIndexId(), indexOK, - NULL, scanKeyCount, scanKey); + SysScanDesc scanDescriptor = systable_beginscan(pgDistShard, + DistShardShardidIndexId(), indexOK, + NULL, scanKeyCount, scanKey); - heapTuple = systable_getnext(scanDescriptor); + HeapTuple heapTuple = systable_getnext(scanDescriptor); if (!HeapTupleIsValid(heapTuple)) { ereport(ERROR, (errmsg("could not find valid entry for shard " UINT64_FORMAT, shardId))); } - pgDistShardForm = (Form_pg_dist_shard) GETSTRUCT(heapTuple); - distributedRelationId = pgDistShardForm->logicalrelid; + Form_pg_dist_shard pgDistShardForm = (Form_pg_dist_shard) GETSTRUCT(heapTuple); + Oid distributedRelationId = pgDistShardForm->logicalrelid; simple_heap_delete(pgDistShard, &heapTuple->t_self); @@ -1181,35 +1143,31 @@ DeleteShardRow(uint64 shardId) void DeleteShardPlacementRow(uint64 placementId) { - Relation pgDistPlacement = NULL; - SysScanDesc scanDescriptor = NULL; const int scanKeyCount = 1; ScanKeyData scanKey[1]; bool indexOK = true; - HeapTuple heapTuple = NULL; - TupleDesc tupleDescriptor = NULL; bool isNull = false; - uint64 shardId = 0; - pgDistPlacement = heap_open(DistPlacementRelationId(), RowExclusiveLock); - tupleDescriptor = RelationGetDescr(pgDistPlacement); + Relation pgDistPlacement = heap_open(DistPlacementRelationId(), RowExclusiveLock); + TupleDesc tupleDescriptor = RelationGetDescr(pgDistPlacement); ScanKeyInit(&scanKey[0], Anum_pg_dist_placement_placementid, BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(placementId)); - scanDescriptor = systable_beginscan(pgDistPlacement, - DistPlacementPlacementidIndexId(), indexOK, - NULL, scanKeyCount, scanKey); + SysScanDesc scanDescriptor = systable_beginscan(pgDistPlacement, + DistPlacementPlacementidIndexId(), + indexOK, + NULL, scanKeyCount, scanKey); - heapTuple = systable_getnext(scanDescriptor); + HeapTuple heapTuple = systable_getnext(scanDescriptor); if (heapTuple == NULL) { ereport(ERROR, (errmsg("could not find valid entry for shard placement " INT64_FORMAT, placementId))); } - shardId = heap_getattr(heapTuple, Anum_pg_dist_placement_shardid, - tupleDescriptor, &isNull); + uint64 shardId = heap_getattr(heapTuple, Anum_pg_dist_placement_shardid, + tupleDescriptor, &isNull); if (HeapTupleHeaderGetNatts(heapTuple->t_data) != Natts_pg_dist_placement || HeapTupleHasNulls(heapTuple)) { @@ -1233,29 +1191,25 @@ DeleteShardPlacementRow(uint64 placementId) void UpdateShardPlacementState(uint64 placementId, char shardState) { - Relation pgDistPlacement = NULL; - SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 1; bool indexOK = true; - HeapTuple heapTuple = NULL; - TupleDesc tupleDescriptor = NULL; Datum values[Natts_pg_dist_placement]; bool isnull[Natts_pg_dist_placement]; bool replace[Natts_pg_dist_placement]; - uint64 shardId = INVALID_SHARD_ID; bool colIsNull = false; - pgDistPlacement = heap_open(DistPlacementRelationId(), RowExclusiveLock); - tupleDescriptor = RelationGetDescr(pgDistPlacement); + Relation pgDistPlacement = heap_open(DistPlacementRelationId(), RowExclusiveLock); + TupleDesc tupleDescriptor = RelationGetDescr(pgDistPlacement); ScanKeyInit(&scanKey[0], Anum_pg_dist_placement_placementid, BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(placementId)); - scanDescriptor = systable_beginscan(pgDistPlacement, - DistPlacementPlacementidIndexId(), indexOK, - NULL, scanKeyCount, scanKey); + SysScanDesc scanDescriptor = systable_beginscan(pgDistPlacement, + DistPlacementPlacementidIndexId(), + indexOK, + NULL, scanKeyCount, scanKey); - heapTuple = systable_getnext(scanDescriptor); + HeapTuple heapTuple = systable_getnext(scanDescriptor); if (!HeapTupleIsValid(heapTuple)) { ereport(ERROR, (errmsg("could not find valid entry for shard placement " @@ -1273,9 +1227,9 @@ UpdateShardPlacementState(uint64 placementId, char shardState) CatalogTupleUpdate(pgDistPlacement, &heapTuple->t_self, heapTuple); - shardId = DatumGetInt64(heap_getattr(heapTuple, - Anum_pg_dist_placement_shardid, - tupleDescriptor, &colIsNull)); + uint64 shardId = DatumGetInt64(heap_getattr(heapTuple, + Anum_pg_dist_placement_shardid, + tupleDescriptor, &colIsNull)); Assert(!colIsNull); CitusInvalidateRelcacheByShardId(shardId); @@ -1293,9 +1247,7 @@ UpdateShardPlacementState(uint64 placementId, char shardState) void EnsureTablePermissions(Oid relationId, AclMode mode) { - AclResult aclresult; - - aclresult = pg_class_aclcheck(relationId, GetUserId(), mode); + AclResult aclresult = pg_class_aclcheck(relationId, GetUserId(), mode); if (aclresult != ACLCHECK_OK) { @@ -1385,17 +1337,14 @@ EnsureSuperUser(void) char * TableOwner(Oid relationId) { - Oid userId = InvalidOid; - HeapTuple tuple; - - tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relationId)); + HeapTuple tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relationId)); if (!HeapTupleIsValid(tuple)) { ereport(ERROR, (errcode(ERRCODE_UNDEFINED_TABLE), errmsg("relation with OID %u does not exist", relationId))); } - userId = ((Form_pg_class) GETSTRUCT(tuple))->relowner; + Oid userId = ((Form_pg_class) GETSTRUCT(tuple))->relowner; ReleaseSysCache(tuple); diff --git a/src/backend/distributed/master/master_node_protocol.c b/src/backend/distributed/master/master_node_protocol.c index 98e4b6e05..c45eea7b9 100644 --- a/src/backend/distributed/master/master_node_protocol.c +++ b/src/backend/distributed/master/master_node_protocol.c @@ -94,26 +94,20 @@ master_get_table_metadata(PG_FUNCTION_ARGS) text *relationName = PG_GETARG_TEXT_P(0); Oid relationId = ResolveRelationId(relationName, false); - DistTableCacheEntry *partitionEntry = NULL; - char *partitionKeyString = NULL; - TypeFuncClass resultTypeClass = 0; Datum partitionKeyExpr = 0; Datum partitionKey = 0; - Datum metadataDatum = 0; - HeapTuple metadataTuple = NULL; TupleDesc metadataDescriptor = NULL; - uint64 shardMaxSizeInBytes = 0; - char shardStorageType = 0; Datum values[TABLE_METADATA_FIELDS]; bool isNulls[TABLE_METADATA_FIELDS]; CheckCitusVersion(ERROR); /* find partition tuple for partitioned relation */ - partitionEntry = DistributedTableCacheEntry(relationId); + DistTableCacheEntry *partitionEntry = DistributedTableCacheEntry(relationId); /* create tuple descriptor for return value */ - resultTypeClass = get_call_result_type(fcinfo, NULL, &metadataDescriptor); + TypeFuncClass resultTypeClass = get_call_result_type(fcinfo, NULL, + &metadataDescriptor); if (resultTypeClass != TYPEFUNC_COMPOSITE) { ereport(ERROR, (errmsg("return type must be a row type"))); @@ -123,7 +117,7 @@ master_get_table_metadata(PG_FUNCTION_ARGS) memset(values, 0, sizeof(values)); memset(isNulls, false, sizeof(isNulls)); - partitionKeyString = partitionEntry->partitionKeyString; + char *partitionKeyString = partitionEntry->partitionKeyString; /* reference tables do not have partition key */ if (partitionKeyString == NULL) @@ -140,10 +134,10 @@ master_get_table_metadata(PG_FUNCTION_ARGS) ObjectIdGetDatum(relationId)); } - shardMaxSizeInBytes = (int64) ShardMaxSize * 1024L; + uint64 shardMaxSizeInBytes = (int64) ShardMaxSize * 1024L; /* get storage type */ - shardStorageType = ShardStorageType(relationId); + char shardStorageType = ShardStorageType(relationId); values[0] = ObjectIdGetDatum(relationId); values[1] = shardStorageType; @@ -153,8 +147,8 @@ master_get_table_metadata(PG_FUNCTION_ARGS) values[5] = Int64GetDatum(shardMaxSizeInBytes); values[6] = Int32GetDatum(ShardPlacementPolicy); - metadataTuple = heap_form_tuple(metadataDescriptor, values, isNulls); - metadataDatum = HeapTupleGetDatum(metadataTuple); + HeapTuple metadataTuple = heap_form_tuple(metadataDescriptor, values, isNulls); + Datum metadataDatum = HeapTupleGetDatum(metadataTuple); PG_RETURN_DATUM(metadataDatum); } @@ -212,17 +206,16 @@ master_get_table_ddl_events(PG_FUNCTION_ARGS) Oid relationId = ResolveRelationId(relationName, false); bool includeSequenceDefaults = true; - MemoryContext oldContext = NULL; - List *tableDDLEventList = NIL; /* create a function context for cross-call persistence */ functionContext = SRF_FIRSTCALL_INIT(); /* switch to memory context appropriate for multiple function calls */ - oldContext = MemoryContextSwitchTo(functionContext->multi_call_memory_ctx); + MemoryContext oldContext = MemoryContextSwitchTo( + functionContext->multi_call_memory_ctx); /* allocate DDL statements, and then save position in DDL statements */ - tableDDLEventList = GetTableDDLEvents(relationId, includeSequenceDefaults); + List *tableDDLEventList = GetTableDDLEvents(relationId, includeSequenceDefaults); tableDDLEventCell = list_head(tableDDLEventList); functionContext->user_fctx = tableDDLEventCell; @@ -266,14 +259,11 @@ master_get_table_ddl_events(PG_FUNCTION_ARGS) Datum master_get_new_shardid(PG_FUNCTION_ARGS) { - uint64 shardId = 0; - Datum shardIdDatum = 0; - EnsureCoordinator(); CheckCitusVersion(ERROR); - shardId = GetNextShardId(); - shardIdDatum = Int64GetDatum(shardId); + uint64 shardId = GetNextShardId(); + Datum shardIdDatum = Int64GetDatum(shardId); PG_RETURN_DATUM(shardIdDatum); } @@ -290,12 +280,8 @@ master_get_new_shardid(PG_FUNCTION_ARGS) uint64 GetNextShardId() { - text *sequenceName = NULL; - Oid sequenceId = InvalidOid; - Datum sequenceIdDatum = 0; Oid savedUserId = InvalidOid; int savedSecurityContext = 0; - Datum shardIdDatum = 0; uint64 shardId = 0; /* @@ -313,15 +299,15 @@ GetNextShardId() return shardId; } - sequenceName = cstring_to_text(SHARDID_SEQUENCE_NAME); - sequenceId = ResolveRelationId(sequenceName, false); - sequenceIdDatum = ObjectIdGetDatum(sequenceId); + text *sequenceName = cstring_to_text(SHARDID_SEQUENCE_NAME); + Oid sequenceId = ResolveRelationId(sequenceName, false); + Datum sequenceIdDatum = ObjectIdGetDatum(sequenceId); GetUserIdAndSecContext(&savedUserId, &savedSecurityContext); SetUserIdAndSecContext(CitusExtensionOwner(), SECURITY_LOCAL_USERID_CHANGE); /* generate new and unique shardId from sequence */ - shardIdDatum = DirectFunctionCall1(nextval_oid, sequenceIdDatum); + Datum shardIdDatum = DirectFunctionCall1(nextval_oid, sequenceIdDatum); SetUserIdAndSecContext(savedUserId, savedSecurityContext); @@ -343,14 +329,11 @@ GetNextShardId() Datum master_get_new_placementid(PG_FUNCTION_ARGS) { - uint64 placementId = 0; - Datum placementIdDatum = 0; - EnsureCoordinator(); CheckCitusVersion(ERROR); - placementId = GetNextPlacementId(); - placementIdDatum = Int64GetDatum(placementId); + uint64 placementId = GetNextPlacementId(); + Datum placementIdDatum = Int64GetDatum(placementId); PG_RETURN_DATUM(placementIdDatum); } @@ -369,12 +352,8 @@ master_get_new_placementid(PG_FUNCTION_ARGS) uint64 GetNextPlacementId(void) { - text *sequenceName = NULL; - Oid sequenceId = InvalidOid; - Datum sequenceIdDatum = 0; Oid savedUserId = InvalidOid; int savedSecurityContext = 0; - Datum placementIdDatum = 0; uint64 placementId = 0; /* @@ -392,15 +371,15 @@ GetNextPlacementId(void) return placementId; } - sequenceName = cstring_to_text(PLACEMENTID_SEQUENCE_NAME); - sequenceId = ResolveRelationId(sequenceName, false); - sequenceIdDatum = ObjectIdGetDatum(sequenceId); + text *sequenceName = cstring_to_text(PLACEMENTID_SEQUENCE_NAME); + Oid sequenceId = ResolveRelationId(sequenceName, false); + Datum sequenceIdDatum = ObjectIdGetDatum(sequenceId); GetUserIdAndSecContext(&savedUserId, &savedSecurityContext); SetUserIdAndSecContext(CitusExtensionOwner(), SECURITY_LOCAL_USERID_CHANGE); /* generate new and unique placement id from sequence */ - placementIdDatum = DirectFunctionCall1(nextval_oid, sequenceIdDatum); + Datum placementIdDatum = DirectFunctionCall1(nextval_oid, sequenceIdDatum); SetUserIdAndSecContext(savedUserId, savedSecurityContext); @@ -465,17 +444,16 @@ master_get_active_worker_nodes(PG_FUNCTION_ARGS) if (SRF_IS_FIRSTCALL()) { - MemoryContext oldContext = NULL; - List *workerNodeList = NIL; TupleDesc tupleDescriptor = NULL; /* create a function context for cross-call persistence */ functionContext = SRF_FIRSTCALL_INIT(); /* switch to memory context appropriate for multiple function calls */ - oldContext = MemoryContextSwitchTo(functionContext->multi_call_memory_ctx); + MemoryContext oldContext = MemoryContextSwitchTo( + functionContext->multi_call_memory_ctx); - workerNodeList = ActiveReadableWorkerNodeList(); + List *workerNodeList = ActiveReadableWorkerNodeList(); workerNodeCount = (uint32) list_length(workerNodeList); functionContext->user_fctx = workerNodeList; @@ -525,14 +503,10 @@ master_get_active_worker_nodes(PG_FUNCTION_ARGS) Oid ResolveRelationId(text *relationName, bool missingOk) { - List *relationNameList = NIL; - RangeVar *relation = NULL; - Oid relationId = InvalidOid; - /* resolve relationId from passed in schema and relation name */ - relationNameList = textToQualifiedNameList(relationName); - relation = makeRangeVarFromNameList(relationNameList); - relationId = RangeVarGetRelid(relation, NoLock, missingOk); + List *relationNameList = textToQualifiedNameList(relationName); + RangeVar *relation = makeRangeVarFromNameList(relationNameList); + Oid relationId = RangeVarGetRelid(relation, NoLock, missingOk); return relationId; } @@ -551,22 +525,18 @@ List * GetTableDDLEvents(Oid relationId, bool includeSequenceDefaults) { List *tableDDLEventList = NIL; - List *tableCreationCommandList = NIL; - List *indexAndConstraintCommandList = NIL; - List *replicaIdentityEvents = NIL; - List *policyCommands = NIL; - tableCreationCommandList = GetTableCreationCommands(relationId, - includeSequenceDefaults); + List *tableCreationCommandList = GetTableCreationCommands(relationId, + includeSequenceDefaults); tableDDLEventList = list_concat(tableDDLEventList, tableCreationCommandList); - indexAndConstraintCommandList = GetTableIndexAndConstraintCommands(relationId); + List *indexAndConstraintCommandList = GetTableIndexAndConstraintCommands(relationId); tableDDLEventList = list_concat(tableDDLEventList, indexAndConstraintCommandList); - replicaIdentityEvents = GetTableReplicaIdentityCommand(relationId); + List *replicaIdentityEvents = GetTableReplicaIdentityCommand(relationId); tableDDLEventList = list_concat(tableDDLEventList, replicaIdentityEvents); - policyCommands = CreatePolicyCommands(relationId); + List *policyCommands = CreatePolicyCommands(relationId); tableDDLEventList = list_concat(tableDDLEventList, policyCommands); return tableDDLEventList; @@ -581,7 +551,6 @@ static List * GetTableReplicaIdentityCommand(Oid relationId) { List *replicaIdentityCreateCommandList = NIL; - char *replicaIdentityCreateCommand = NULL; /* * We skip non-relations because postgres does not support @@ -593,7 +562,7 @@ GetTableReplicaIdentityCommand(Oid relationId) return NIL; } - replicaIdentityCreateCommand = pg_get_replica_identity_command(relationId); + char *replicaIdentityCreateCommand = pg_get_replica_identity_command(relationId); if (replicaIdentityCreateCommand) { @@ -614,10 +583,6 @@ List * GetTableCreationCommands(Oid relationId, bool includeSequenceDefaults) { List *tableDDLEventList = NIL; - char tableType = 0; - char *tableSchemaDef = NULL; - char *tableColumnOptionsDef = NULL; - char *tableOwnerDef = NULL; /* * Set search_path to NIL so that all objects outside of pg_catalog will be @@ -630,7 +595,7 @@ GetTableCreationCommands(Oid relationId, bool includeSequenceDefaults) PushOverrideSearchPath(overridePath); /* if foreign table, fetch extension and server definitions */ - tableType = get_rel_relkind(relationId); + char tableType = get_rel_relkind(relationId); if (tableType == RELKIND_FOREIGN_TABLE) { char *extensionDef = pg_get_extensiondef_string(relationId); @@ -644,8 +609,9 @@ GetTableCreationCommands(Oid relationId, bool includeSequenceDefaults) } /* fetch table schema and column option definitions */ - tableSchemaDef = pg_get_tableschemadef_string(relationId, includeSequenceDefaults); - tableColumnOptionsDef = pg_get_tablecolumnoptionsdef_string(relationId); + char *tableSchemaDef = pg_get_tableschemadef_string(relationId, + includeSequenceDefaults); + char *tableColumnOptionsDef = pg_get_tablecolumnoptionsdef_string(relationId); tableDDLEventList = lappend(tableDDLEventList, tableSchemaDef); if (tableColumnOptionsDef != NULL) @@ -653,7 +619,7 @@ GetTableCreationCommands(Oid relationId, bool includeSequenceDefaults) tableDDLEventList = lappend(tableDDLEventList, tableColumnOptionsDef); } - tableOwnerDef = TableOwnerResetCommand(relationId); + char *tableOwnerDef = TableOwnerResetCommand(relationId); if (tableOwnerDef != NULL) { tableDDLEventList = lappend(tableDDLEventList, tableOwnerDef); @@ -674,11 +640,8 @@ List * GetTableIndexAndConstraintCommands(Oid relationId) { List *indexDDLEventList = NIL; - Relation pgIndex = NULL; - SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 1; - HeapTuple heapTuple = NULL; /* * Set search_path to NIL so that all objects outside of pg_catalog will be @@ -691,16 +654,16 @@ GetTableIndexAndConstraintCommands(Oid relationId) PushOverrideSearchPath(overridePath); /* open system catalog and scan all indexes that belong to this table */ - pgIndex = heap_open(IndexRelationId, AccessShareLock); + Relation pgIndex = heap_open(IndexRelationId, AccessShareLock); ScanKeyInit(&scanKey[0], Anum_pg_index_indrelid, BTEqualStrategyNumber, F_OIDEQ, relationId); - scanDescriptor = systable_beginscan(pgIndex, - IndexIndrelidIndexId, true, /* indexOK */ - NULL, scanKeyCount, scanKey); + SysScanDesc scanDescriptor = systable_beginscan(pgIndex, + IndexIndrelidIndexId, true, /* indexOK */ + NULL, scanKeyCount, scanKey); - heapTuple = systable_getnext(scanDescriptor); + HeapTuple heapTuple = systable_getnext(scanDescriptor); while (HeapTupleIsValid(heapTuple)) { Form_pg_index indexForm = (Form_pg_index) GETSTRUCT(heapTuple); @@ -824,8 +787,6 @@ WorkerNodeGetDatum(WorkerNode *workerNode, TupleDesc tupleDescriptor) { Datum values[WORKER_NODE_FIELDS]; bool isNulls[WORKER_NODE_FIELDS]; - HeapTuple workerNodeTuple = NULL; - Datum workerNodeDatum = 0; memset(values, 0, sizeof(values)); memset(isNulls, false, sizeof(isNulls)); @@ -833,8 +794,8 @@ WorkerNodeGetDatum(WorkerNode *workerNode, TupleDesc tupleDescriptor) values[0] = CStringGetTextDatum(workerNode->workerName); values[1] = Int64GetDatum((int64) workerNode->workerPort); - workerNodeTuple = heap_form_tuple(tupleDescriptor, values, isNulls); - workerNodeDatum = HeapTupleGetDatum(workerNodeTuple); + HeapTuple workerNodeTuple = heap_form_tuple(tupleDescriptor, values, isNulls); + Datum workerNodeDatum = HeapTupleGetDatum(workerNodeTuple); return workerNodeDatum; } diff --git a/src/backend/distributed/master/master_repair_shards.c b/src/backend/distributed/master/master_repair_shards.c index d7c8313ef..64a381c8e 100644 --- a/src/backend/distributed/master/master_repair_shards.c +++ b/src/backend/distributed/master/master_repair_shards.c @@ -139,9 +139,6 @@ BlockWritesToShardList(List *shardList) { ListCell *shardCell = NULL; - bool shouldSyncMetadata = false; - ShardInterval *firstShardInterval = NULL; - Oid firstDistributedTableId = InvalidOid; foreach(shardCell, shardList) { @@ -167,10 +164,10 @@ BlockWritesToShardList(List *shardList) * Since the function assumes that the input shards are colocated, * calculating shouldSyncMetadata for a single table is sufficient. */ - firstShardInterval = (ShardInterval *) linitial(shardList); - firstDistributedTableId = firstShardInterval->relationId; + ShardInterval *firstShardInterval = (ShardInterval *) linitial(shardList); + Oid firstDistributedTableId = firstShardInterval->relationId; - shouldSyncMetadata = ShouldSyncTableMetadata(firstDistributedTableId); + bool shouldSyncMetadata = ShouldSyncTableMetadata(firstDistributedTableId); if (shouldSyncMetadata) { LockShardListMetadataOnWorkers(ExclusiveLock, shardList); @@ -225,13 +222,7 @@ RepairShardPlacement(int64 shardId, char *sourceNodeName, int32 sourceNodePort, char relationKind = get_rel_relkind(distributedTableId); char *tableOwner = TableOwner(shardInterval->relationId); bool missingOk = false; - bool includeData = false; - bool partitionedTable = false; - List *ddlCommandList = NIL; - List *foreignConstraintCommandList = NIL; - List *placementList = NIL; - ShardPlacement *placement = NULL; /* prevent table from being dropped */ LockRelationOid(distributedTableId, AccessShareLock); @@ -287,13 +278,14 @@ RepairShardPlacement(int64 shardId, char *sourceNodeName, int32 sourceNodePort, * If the shard belongs to a partitioned table, we need to load the data after * creating the partitions and the partitioning hierarcy. */ - partitionedTable = PartitionedTableNoLock(distributedTableId); - includeData = !partitionedTable; + bool partitionedTable = PartitionedTableNoLock(distributedTableId); + bool includeData = !partitionedTable; /* we generate necessary commands to recreate the shard in target node */ - ddlCommandList = + List *ddlCommandList = CopyShardCommandList(shardInterval, sourceNodeName, sourceNodePort, includeData); - foreignConstraintCommandList = CopyShardForeignConstraintCommandList(shardInterval); + List *foreignConstraintCommandList = CopyShardForeignConstraintCommandList( + shardInterval); ddlCommandList = list_concat(ddlCommandList, foreignConstraintCommandList); /* @@ -305,12 +297,10 @@ RepairShardPlacement(int64 shardId, char *sourceNodeName, int32 sourceNodePort, */ if (partitionedTable) { - List *partitionCommandList = NIL; - char *shardName = ConstructQualifiedShardName(shardInterval); StringInfo copyShardDataCommand = makeStringInfo(); - partitionCommandList = + List *partitionCommandList = CopyPartitionShardsCommandList(shardInterval, sourceNodeName, sourceNodePort); ddlCommandList = list_concat(ddlCommandList, partitionCommandList); @@ -328,9 +318,10 @@ RepairShardPlacement(int64 shardId, char *sourceNodeName, int32 sourceNodePort, ddlCommandList); /* after successful repair, we update shard state as healthy*/ - placementList = ShardPlacementList(shardId); - placement = SearchShardPlacementInList(placementList, targetNodeName, targetNodePort, - missingOk); + List *placementList = ShardPlacementList(shardId); + ShardPlacement *placement = SearchShardPlacementInList(placementList, targetNodeName, + targetNodePort, + missingOk); UpdateShardPlacementState(placement->placementId, FILE_FINALIZED); } @@ -347,13 +338,12 @@ CopyPartitionShardsCommandList(ShardInterval *shardInterval, char *sourceNodeNam int32 sourceNodePort) { Oid distributedTableId = shardInterval->relationId; - List *partitionList = NIL; ListCell *partitionOidCell = NULL; List *ddlCommandList = NIL; Assert(PartitionedTableNoLock(distributedTableId)); - partitionList = PartitionList(distributedTableId); + List *partitionList = PartitionList(distributedTableId); foreach(partitionOidCell, partitionList) { Oid partitionOid = lfirst_oid(partitionOidCell); @@ -361,15 +351,13 @@ CopyPartitionShardsCommandList(ShardInterval *shardInterval, char *sourceNodeNam ColocatedShardIdInRelation(partitionOid, shardInterval->shardIndex); ShardInterval *partitionShardInterval = LoadShardInterval(partitionShardId); bool includeData = false; - List *copyCommandList = NIL; - char *attachPartitionCommand = NULL; - copyCommandList = + List *copyCommandList = CopyShardCommandList(partitionShardInterval, sourceNodeName, sourceNodePort, includeData); ddlCommandList = list_concat(ddlCommandList, copyCommandList); - attachPartitionCommand = + char *attachPartitionCommand = GenerateAttachShardPartitionCommand(partitionShardInterval); ddlCommandList = lappend(ddlCommandList, attachPartitionCommand); } @@ -387,21 +375,23 @@ EnsureShardCanBeRepaired(int64 shardId, char *sourceNodeName, int32 sourceNodePo char *targetNodeName, int32 targetNodePort) { List *shardPlacementList = ShardPlacementList(shardId); - ShardPlacement *sourcePlacement = NULL; - ShardPlacement *targetPlacement = NULL; bool missingSourceOk = false; bool missingTargetOk = false; - sourcePlacement = SearchShardPlacementInList(shardPlacementList, sourceNodeName, - sourceNodePort, missingSourceOk); + ShardPlacement *sourcePlacement = SearchShardPlacementInList(shardPlacementList, + sourceNodeName, + sourceNodePort, + missingSourceOk); if (sourcePlacement->shardState != FILE_FINALIZED) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("source placement must be in finalized state"))); } - targetPlacement = SearchShardPlacementInList(shardPlacementList, targetNodeName, - targetNodePort, missingTargetOk); + ShardPlacement *targetPlacement = SearchShardPlacementInList(shardPlacementList, + targetNodeName, + targetNodePort, + missingTargetOk); if (targetPlacement->shardState != FILE_INACTIVE) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), @@ -462,13 +452,11 @@ CopyShardCommandList(ShardInterval *shardInterval, char *sourceNodeName, { int64 shardId = shardInterval->shardId; char *shardName = ConstructQualifiedShardName(shardInterval); - List *tableRecreationCommandList = NIL; - List *indexCommandList = NIL; List *copyShardToNodeCommandsList = NIL; StringInfo copyShardDataCommand = makeStringInfo(); Oid relationId = shardInterval->relationId; - tableRecreationCommandList = RecreateTableDDLCommandList(relationId); + List *tableRecreationCommandList = RecreateTableDDLCommandList(relationId); tableRecreationCommandList = WorkerApplyShardDDLCommandList(tableRecreationCommandList, shardId); @@ -491,7 +479,7 @@ CopyShardCommandList(ShardInterval *shardInterval, char *sourceNodeName, copyShardDataCommand->data); } - indexCommandList = GetTableIndexAndConstraintCommands(relationId); + List *indexCommandList = GetTableIndexAndConstraintCommands(relationId); indexCommandList = WorkerApplyShardDDLCommandList(indexCommandList, shardId); copyShardToNodeCommandsList = list_concat(copyShardToNodeCommandsList, @@ -555,17 +543,13 @@ CopyShardForeignConstraintCommandListGrouped(ShardInterval *shardInterval, char *command = (char *) lfirst(commandCell); char *escapedCommand = quote_literal_cstr(command); - Oid referencedRelationId = InvalidOid; - Oid referencedSchemaId = InvalidOid; - char *referencedSchemaName = NULL; - char *escapedReferencedSchemaName = NULL; uint64 referencedShardId = INVALID_SHARD_ID; bool colocatedForeignKey = false; StringInfo applyForeignConstraintCommand = makeStringInfo(); /* we need to parse the foreign constraint command to get referencing table id */ - referencedRelationId = ForeignConstraintGetReferencedTableId(command); + Oid referencedRelationId = ForeignConstraintGetReferencedTableId(command); if (referencedRelationId == InvalidOid) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), @@ -573,9 +557,9 @@ CopyShardForeignConstraintCommandListGrouped(ShardInterval *shardInterval, errdetail("Referenced relation cannot be found."))); } - referencedSchemaId = get_rel_namespace(referencedRelationId); - referencedSchemaName = get_namespace_name(referencedSchemaId); - escapedReferencedSchemaName = quote_literal_cstr(referencedSchemaName); + Oid referencedSchemaId = get_rel_namespace(referencedRelationId); + char *referencedSchemaName = get_namespace_name(referencedSchemaId); + char *escapedReferencedSchemaName = quote_literal_cstr(referencedSchemaName); if (PartitionMethod(referencedRelationId) == DISTRIBUTE_BY_NONE) { @@ -635,9 +619,8 @@ ConstructQualifiedShardName(ShardInterval *shardInterval) Oid schemaId = get_rel_namespace(shardInterval->relationId); char *schemaName = get_namespace_name(schemaId); char *tableName = get_rel_name(shardInterval->relationId); - char *shardName = NULL; - shardName = pstrdup(tableName); + char *shardName = pstrdup(tableName); AppendShardIdToName(&shardName, shardInterval->shardId); shardName = quote_qualified_identifier(schemaName, shardName); @@ -660,9 +643,6 @@ RecreateTableDDLCommandList(Oid relationId) relationName); StringInfo dropCommand = makeStringInfo(); - List *createCommandList = NIL; - List *dropCommandList = NIL; - List *recreateCommandList = NIL; char relationKind = get_rel_relkind(relationId); bool includeSequenceDefaults = false; @@ -684,9 +664,10 @@ RecreateTableDDLCommandList(Oid relationId) "table"))); } - dropCommandList = list_make1(dropCommand->data); - createCommandList = GetTableCreationCommands(relationId, includeSequenceDefaults); - recreateCommandList = list_concat(dropCommandList, createCommandList); + List *dropCommandList = list_make1(dropCommand->data); + List *createCommandList = GetTableCreationCommands(relationId, + includeSequenceDefaults); + List *recreateCommandList = list_concat(dropCommandList, createCommandList); return recreateCommandList; } diff --git a/src/backend/distributed/master/master_split_shards.c b/src/backend/distributed/master/master_split_shards.c index d7cf8f328..4ce9e8abd 100644 --- a/src/backend/distributed/master/master_split_shards.c +++ b/src/backend/distributed/master/master_split_shards.c @@ -62,16 +62,13 @@ Datum worker_hash(PG_FUNCTION_ARGS) { Datum valueDatum = PG_GETARG_DATUM(0); - Datum hashedValueDatum = 0; - TypeCacheEntry *typeEntry = NULL; - FmgrInfo *hashFunction = NULL; - Oid valueDataType = InvalidOid; CheckCitusVersion(ERROR); /* figure out hash function from the data type */ - valueDataType = get_fn_expr_argtype(fcinfo->flinfo, 0); - typeEntry = lookup_type_cache(valueDataType, TYPECACHE_HASH_PROC_FINFO); + Oid valueDataType = get_fn_expr_argtype(fcinfo->flinfo, 0); + TypeCacheEntry *typeEntry = lookup_type_cache(valueDataType, + TYPECACHE_HASH_PROC_FINFO); if (typeEntry->hash_proc_finfo.fn_oid == InvalidOid) { @@ -80,11 +77,12 @@ worker_hash(PG_FUNCTION_ARGS) errhint("Cast input to a data type with a hash function."))); } - hashFunction = palloc0(sizeof(FmgrInfo)); + FmgrInfo *hashFunction = palloc0(sizeof(FmgrInfo)); fmgr_info_copy(hashFunction, &(typeEntry->hash_proc_finfo), CurrentMemoryContext); /* calculate hash value */ - hashedValueDatum = FunctionCall1Coll(hashFunction, PG_GET_COLLATION(), valueDatum); + Datum hashedValueDatum = FunctionCall1Coll(hashFunction, PG_GET_COLLATION(), + valueDatum); PG_RETURN_INT32(hashedValueDatum); } diff --git a/src/backend/distributed/master/master_stage_protocol.c b/src/backend/distributed/master/master_stage_protocol.c index c17f92a38..18f43502a 100644 --- a/src/backend/distributed/master/master_stage_protocol.c +++ b/src/backend/distributed/master/master_stage_protocol.c @@ -80,21 +80,17 @@ master_create_empty_shard(PG_FUNCTION_ARGS) { text *relationNameText = PG_GETARG_TEXT_P(0); char *relationName = text_to_cstring(relationNameText); - uint64 shardId = INVALID_SHARD_ID; uint32 attemptableNodeCount = 0; ObjectAddress tableAddress = { 0 }; uint32 candidateNodeIndex = 0; List *candidateNodeList = NIL; - List *workerNodeList = NIL; text *nullMinValue = NULL; text *nullMaxValue = NULL; - char partitionMethod = 0; char storageType = SHARD_STORAGE_TABLE; Oid relationId = ResolveRelationId(relationNameText, false); char relationKind = get_rel_relkind(relationId); - char replicationModel = REPLICATION_MODEL_INVALID; CheckCitusVersion(ERROR); @@ -136,7 +132,7 @@ master_create_empty_shard(PG_FUNCTION_ARGS) } } - partitionMethod = PartitionMethod(relationId); + char partitionMethod = PartitionMethod(relationId); if (partitionMethod == DISTRIBUTE_BY_HASH) { ereport(ERROR, (errmsg("relation \"%s\" is a hash partitioned table", @@ -152,15 +148,15 @@ master_create_empty_shard(PG_FUNCTION_ARGS) "on reference tables"))); } - replicationModel = TableReplicationModel(relationId); + char replicationModel = TableReplicationModel(relationId); EnsureReplicationSettings(relationId, replicationModel); /* generate new and unique shardId from sequence */ - shardId = GetNextShardId(); + uint64 shardId = GetNextShardId(); /* if enough live groups, add an extra candidate node as backup */ - workerNodeList = DistributedTablePlacementNodeList(NoLock); + List *workerNodeList = DistributedTablePlacementNodeList(NoLock); if (list_length(workerNodeList) > ShardReplicationFactor) { @@ -232,33 +228,20 @@ master_append_table_to_shard(PG_FUNCTION_ARGS) char *sourceTableName = text_to_cstring(sourceTableNameText); char *sourceNodeName = text_to_cstring(sourceNodeNameText); - Oid shardSchemaOid = 0; - char *shardSchemaName = NULL; - char *shardTableName = NULL; - char *shardQualifiedName = NULL; - List *shardPlacementList = NIL; ListCell *shardPlacementCell = NULL; - uint64 newShardSize = 0; - uint64 shardMaxSizeInBytes = 0; float4 shardFillLevel = 0.0; - char partitionMethod = 0; - ShardInterval *shardInterval = NULL; - Oid relationId = InvalidOid; - bool cstoreTable = false; - - char storageType = 0; CheckCitusVersion(ERROR); - shardInterval = LoadShardInterval(shardId); - relationId = shardInterval->relationId; + ShardInterval *shardInterval = LoadShardInterval(shardId); + Oid relationId = shardInterval->relationId; /* don't allow the table to be dropped */ LockRelationOid(relationId, AccessShareLock); - cstoreTable = CStoreTable(relationId); - storageType = shardInterval->storageType; + bool cstoreTable = CStoreTable(relationId); + char storageType = shardInterval->storageType; EnsureTablePermissions(relationId, ACL_INSERT); @@ -268,7 +251,7 @@ master_append_table_to_shard(PG_FUNCTION_ARGS) errdetail("The underlying shard is not a regular table"))); } - partitionMethod = PartitionMethod(relationId); + char partitionMethod = PartitionMethod(relationId); if (partitionMethod == DISTRIBUTE_BY_HASH || partitionMethod == DISTRIBUTE_BY_NONE) { ereport(ERROR, (errmsg("cannot append to shardId " UINT64_FORMAT, shardId), @@ -283,16 +266,17 @@ master_append_table_to_shard(PG_FUNCTION_ARGS) LockShardResource(shardId, ExclusiveLock); /* get schame name of the target shard */ - shardSchemaOid = get_rel_namespace(relationId); - shardSchemaName = get_namespace_name(shardSchemaOid); + Oid shardSchemaOid = get_rel_namespace(relationId); + char *shardSchemaName = get_namespace_name(shardSchemaOid); /* Build shard table name. */ - shardTableName = get_rel_name(relationId); + char *shardTableName = get_rel_name(relationId); AppendShardIdToName(&shardTableName, shardId); - shardQualifiedName = quote_qualified_identifier(shardSchemaName, shardTableName); + char *shardQualifiedName = quote_qualified_identifier(shardSchemaName, + shardTableName); - shardPlacementList = FinalizedShardPlacementList(shardId); + List *shardPlacementList = FinalizedShardPlacementList(shardId); if (shardPlacementList == NIL) { ereport(ERROR, (errmsg("could not find any shard placements for shardId " @@ -309,7 +293,6 @@ master_append_table_to_shard(PG_FUNCTION_ARGS) MultiConnection *connection = GetPlacementConnection(FOR_DML, shardPlacement, NULL); PGresult *queryResult = NULL; - int executeResult = 0; StringInfo workerAppendQuery = makeStringInfo(); appendStringInfo(workerAppendQuery, WORKER_APPEND_TABLE_TO_SHARD, @@ -319,8 +302,9 @@ master_append_table_to_shard(PG_FUNCTION_ARGS) RemoteTransactionBeginIfNecessary(connection); - executeResult = ExecuteOptionalRemoteCommand(connection, workerAppendQuery->data, - &queryResult); + int executeResult = ExecuteOptionalRemoteCommand(connection, + workerAppendQuery->data, + &queryResult); PQclear(queryResult); ForgetResults(connection); @@ -333,10 +317,10 @@ master_append_table_to_shard(PG_FUNCTION_ARGS) MarkFailedShardPlacements(); /* update shard statistics and get new shard size */ - newShardSize = UpdateShardStatistics(shardId); + uint64 newShardSize = UpdateShardStatistics(shardId); /* calculate ratio of current shard size compared to shard max size */ - shardMaxSizeInBytes = (int64) ShardMaxSize * 1024L; + uint64 shardMaxSizeInBytes = (int64) ShardMaxSize * 1024L; shardFillLevel = ((float4) newShardSize / (float4) shardMaxSizeInBytes); PG_RETURN_FLOAT4(shardFillLevel); @@ -351,11 +335,10 @@ Datum master_update_shard_statistics(PG_FUNCTION_ARGS) { int64 shardId = PG_GETARG_INT64(0); - uint64 shardSize = 0; CheckCitusVersion(ERROR); - shardSize = UpdateShardStatistics(shardId); + uint64 shardSize = UpdateShardStatistics(shardId); PG_RETURN_INT64(shardSize); } @@ -393,7 +376,6 @@ CreateAppendDistributedShardPlacements(Oid relationId, int64 shardId, int attemptCount = replicationFactor; int workerNodeCount = list_length(workerNodeList); int placementsCreated = 0; - int attemptNumber = 0; List *foreignConstraintCommandList = GetTableForeignConstraintCommands(relationId); bool includeSequenceDefaults = false; List *ddlCommandList = GetTableDDLEvents(relationId, includeSequenceDefaults); @@ -406,7 +388,7 @@ CreateAppendDistributedShardPlacements(Oid relationId, int64 shardId, attemptCount++; } - for (attemptNumber = 0; attemptNumber < attemptCount; attemptNumber++) + for (int attemptNumber = 0; attemptNumber < attemptCount; attemptNumber++) { int workerNodeIndex = attemptNumber % workerNodeCount; WorkerNode *workerNode = (WorkerNode *) list_nth(workerNodeList, workerNodeIndex); @@ -419,7 +401,6 @@ CreateAppendDistributedShardPlacements(Oid relationId, int64 shardId, MultiConnection *connection = GetNodeUserDatabaseConnection(connectionFlag, nodeName, nodePort, relationOwner, NULL); - List *commandList = NIL; if (PQstatus(connection->pgConn) != CONNECTION_OK) { @@ -429,9 +410,9 @@ CreateAppendDistributedShardPlacements(Oid relationId, int64 shardId, continue; } - commandList = WorkerCreateShardCommandList(relationId, shardIndex, shardId, - ddlCommandList, - foreignConstraintCommandList); + List *commandList = WorkerCreateShardCommandList(relationId, shardIndex, shardId, + ddlCommandList, + foreignConstraintCommandList); ExecuteCriticalRemoteCommandList(connection, commandList); @@ -463,23 +444,21 @@ InsertShardPlacementRows(Oid relationId, int64 shardId, List *workerNodeList, int workerStartIndex, int replicationFactor) { int workerNodeCount = list_length(workerNodeList); - int attemptNumber = 0; int placementsInserted = 0; List *insertedShardPlacements = NIL; - for (attemptNumber = 0; attemptNumber < replicationFactor; attemptNumber++) + for (int attemptNumber = 0; attemptNumber < replicationFactor; attemptNumber++) { int workerNodeIndex = (workerStartIndex + attemptNumber) % workerNodeCount; WorkerNode *workerNode = (WorkerNode *) list_nth(workerNodeList, workerNodeIndex); uint32 nodeGroupId = workerNode->groupId; const RelayFileState shardState = FILE_FINALIZED; const uint64 shardSize = 0; - uint64 shardPlacementId = 0; - ShardPlacement *shardPlacement = NULL; - shardPlacementId = InsertShardPlacementRow(shardId, INVALID_PLACEMENT_ID, - shardState, shardSize, nodeGroupId); - shardPlacement = LoadShardPlacement(shardId, shardPlacementId); + uint64 shardPlacementId = InsertShardPlacementRow(shardId, INVALID_PLACEMENT_ID, + shardState, shardSize, + nodeGroupId); + ShardPlacement *shardPlacement = LoadShardPlacement(shardId, shardPlacementId); insertedShardPlacements = lappend(insertedShardPlacements, shardPlacement); placementsInserted++; @@ -519,8 +498,6 @@ CreateShardsOnWorkers(Oid distributedRelationId, List *shardPlacements, uint64 shardId = shardPlacement->shardId; ShardInterval *shardInterval = LoadShardInterval(shardId); int shardIndex = -1; - List *commandList = NIL; - Task *task = NULL; List *relationShardList = RelationShardListForShardCreate(shardInterval); if (colocatedShard) @@ -528,11 +505,12 @@ CreateShardsOnWorkers(Oid distributedRelationId, List *shardPlacements, shardIndex = ShardIndex(shardInterval); } - commandList = WorkerCreateShardCommandList(distributedRelationId, shardIndex, - shardId, ddlCommandList, - foreignConstraintCommandList); + List *commandList = WorkerCreateShardCommandList(distributedRelationId, + shardIndex, + shardId, ddlCommandList, + foreignConstraintCommandList); - task = CitusMakeNode(Task); + Task *task = CitusMakeNode(Task); task->jobId = INVALID_JOB_ID; task->taskId = taskId++; task->taskType = DDL_TASK; @@ -580,26 +558,23 @@ CreateShardsOnWorkers(Oid distributedRelationId, List *shardPlacements, static List * RelationShardListForShardCreate(ShardInterval *shardInterval) { - List *relationShardList = NIL; - RelationShard *relationShard = NULL; Oid relationId = shardInterval->relationId; DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(relationId); List *referencedRelationList = cacheEntry->referencedRelationsViaForeignKey; List *referencingRelationList = cacheEntry->referencingRelationsViaForeignKey; - List *allForeignKeyRelations = NIL; int shardIndex = -1; ListCell *fkeyRelationIdCell = NULL; /* list_concat_*() modifies the first arg, so make a copy first */ - allForeignKeyRelations = list_copy(referencedRelationList); + List *allForeignKeyRelations = list_copy(referencedRelationList); allForeignKeyRelations = list_concat_unique_oid(allForeignKeyRelations, referencingRelationList); /* record the placement access of the shard itself */ - relationShard = CitusMakeNode(RelationShard); + RelationShard *relationShard = CitusMakeNode(RelationShard); relationShard->relationId = relationId; relationShard->shardId = shardInterval->shardId; - relationShardList = list_make1(relationShard); + List *relationShardList = list_make1(relationShard); if (cacheEntry->partitionMethod == DISTRIBUTE_BY_HASH && cacheEntry->colocationId != INVALID_COLOCATION_ID) @@ -612,7 +587,6 @@ RelationShardListForShardCreate(ShardInterval *shardInterval) foreach(fkeyRelationIdCell, allForeignKeyRelations) { Oid fkeyRelationid = lfirst_oid(fkeyRelationIdCell); - RelationShard *fkeyRelationShard = NULL; uint64 fkeyShardId = INVALID_SHARD_ID; if (!IsDistributedTable(fkeyRelationid)) @@ -645,7 +619,7 @@ RelationShardListForShardCreate(ShardInterval *shardInterval) continue; } - fkeyRelationShard = CitusMakeNode(RelationShard); + RelationShard *fkeyRelationShard = CitusMakeNode(RelationShard); fkeyRelationShard->relationId = fkeyRelationid; fkeyRelationShard->shardId = fkeyShardId; @@ -714,16 +688,12 @@ WorkerCreateShardCommandList(Oid relationId, int shardIndex, uint64 shardId, char *command = (char *) lfirst(foreignConstraintCommandCell); char *escapedCommand = quote_literal_cstr(command); - Oid referencedRelationId = InvalidOid; - Oid referencedSchemaId = InvalidOid; - char *referencedSchemaName = NULL; - char *escapedReferencedSchemaName = NULL; uint64 referencedShardId = INVALID_SHARD_ID; StringInfo applyForeignConstraintCommand = makeStringInfo(); /* we need to parse the foreign constraint command to get referencing table id */ - referencedRelationId = ForeignConstraintGetReferencedTableId(command); + Oid referencedRelationId = ForeignConstraintGetReferencedTableId(command); if (referencedRelationId == InvalidOid) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), @@ -731,9 +701,9 @@ WorkerCreateShardCommandList(Oid relationId, int shardIndex, uint64 shardId, errdetail("Referenced relation cannot be found."))); } - referencedSchemaId = get_rel_namespace(referencedRelationId); - referencedSchemaName = get_namespace_name(referencedSchemaId); - escapedReferencedSchemaName = quote_literal_cstr(referencedSchemaName); + Oid referencedSchemaId = get_rel_namespace(referencedRelationId); + char *referencedSchemaName = get_namespace_name(referencedSchemaId); + char *escapedReferencedSchemaName = quote_literal_cstr(referencedSchemaName); /* * In case of self referencing shards, relation itself might not be distributed @@ -792,8 +762,6 @@ UpdateShardStatistics(int64 shardId) Oid relationId = shardInterval->relationId; char storageType = shardInterval->storageType; char partitionType = PartitionMethod(relationId); - char *shardQualifiedName = NULL; - List *shardPlacementList = NIL; ListCell *shardPlacementCell = NULL; bool statsOK = false; uint64 shardSize = 0; @@ -807,9 +775,9 @@ UpdateShardStatistics(int64 shardId) AppendShardIdToName(&shardName, shardId); - shardQualifiedName = quote_qualified_identifier(schemaName, shardName); + char *shardQualifiedName = quote_qualified_identifier(schemaName, shardName); - shardPlacementList = FinalizedShardPlacementList(shardId); + List *shardPlacementList = FinalizedShardPlacementList(shardId); /* get shard's statistics from a shard placement */ foreach(shardPlacementCell, shardPlacementList) @@ -881,28 +849,19 @@ static bool WorkerShardStats(ShardPlacement *placement, Oid relationId, char *shardName, uint64 *shardSize, text **shardMinValue, text **shardMaxValue) { - char *quotedShardName = NULL; - bool cstoreTable = false; StringInfo tableSizeQuery = makeStringInfo(); const uint32 unusedTableId = 1; char partitionType = PartitionMethod(relationId); - Var *partitionColumn = NULL; - char *partitionColumnName = NULL; StringInfo partitionValueQuery = makeStringInfo(); PGresult *queryResult = NULL; const int minValueIndex = 0; const int maxValueIndex = 1; - uint64 tableSize = 0; - char *tableSizeString = NULL; char *tableSizeStringEnd = NULL; - bool minValueIsNull = false; - bool maxValueIsNull = false; int connectionFlags = 0; - int executeCommand = 0; MultiConnection *connection = GetPlacementConnection(connectionFlags, placement, NULL); @@ -911,9 +870,9 @@ WorkerShardStats(ShardPlacement *placement, Oid relationId, char *shardName, *shardMinValue = NULL; *shardMaxValue = NULL; - quotedShardName = quote_literal_cstr(shardName); + char *quotedShardName = quote_literal_cstr(shardName); - cstoreTable = CStoreTable(relationId); + bool cstoreTable = CStoreTable(relationId); if (cstoreTable) { appendStringInfo(tableSizeQuery, SHARD_CSTORE_TABLE_SIZE_QUERY, quotedShardName); @@ -923,14 +882,14 @@ WorkerShardStats(ShardPlacement *placement, Oid relationId, char *shardName, appendStringInfo(tableSizeQuery, SHARD_TABLE_SIZE_QUERY, quotedShardName); } - executeCommand = ExecuteOptionalRemoteCommand(connection, tableSizeQuery->data, - &queryResult); + int executeCommand = ExecuteOptionalRemoteCommand(connection, tableSizeQuery->data, + &queryResult); if (executeCommand != 0) { return false; } - tableSizeString = PQgetvalue(queryResult, 0, 0); + char *tableSizeString = PQgetvalue(queryResult, 0, 0); if (tableSizeString == NULL) { PQclear(queryResult); @@ -939,7 +898,7 @@ WorkerShardStats(ShardPlacement *placement, Oid relationId, char *shardName, } errno = 0; - tableSize = pg_strtouint64(tableSizeString, &tableSizeStringEnd, 0); + uint64 tableSize = pg_strtouint64(tableSizeString, &tableSizeStringEnd, 0); if (errno != 0 || (*tableSizeStringEnd) != '\0') { PQclear(queryResult); @@ -959,8 +918,8 @@ WorkerShardStats(ShardPlacement *placement, Oid relationId, char *shardName, } /* fill in the partition column name and shard name in the query. */ - partitionColumn = PartitionColumn(relationId, unusedTableId); - partitionColumnName = get_attname(relationId, partitionColumn->varattno, false); + Var *partitionColumn = PartitionColumn(relationId, unusedTableId); + char *partitionColumnName = get_attname(relationId, partitionColumn->varattno, false); appendStringInfo(partitionValueQuery, SHARD_RANGE_QUERY, partitionColumnName, partitionColumnName, shardName); @@ -971,8 +930,8 @@ WorkerShardStats(ShardPlacement *placement, Oid relationId, char *shardName, return false; } - minValueIsNull = PQgetisnull(queryResult, 0, minValueIndex); - maxValueIsNull = PQgetisnull(queryResult, 0, maxValueIndex); + bool minValueIsNull = PQgetisnull(queryResult, 0, minValueIndex); + bool maxValueIsNull = PQgetisnull(queryResult, 0, maxValueIndex); if (!minValueIsNull && !maxValueIsNull) { diff --git a/src/backend/distributed/master/master_truncate.c b/src/backend/distributed/master/master_truncate.c index 446a6549b..7bfa0ddac 100644 --- a/src/backend/distributed/master/master_truncate.c +++ b/src/backend/distributed/master/master_truncate.c @@ -41,21 +41,16 @@ PG_FUNCTION_INFO_V1(citus_truncate_trigger); Datum citus_truncate_trigger(PG_FUNCTION_ARGS) { - TriggerData *triggerData = NULL; - Relation truncatedRelation = NULL; - Oid relationId = InvalidOid; - char partitionMethod = 0; - if (!CALLED_AS_TRIGGER(fcinfo)) { ereport(ERROR, (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), errmsg("must be called as trigger"))); } - triggerData = (TriggerData *) fcinfo->context; - truncatedRelation = triggerData->tg_relation; - relationId = RelationGetRelid(truncatedRelation); - partitionMethod = PartitionMethod(relationId); + TriggerData *triggerData = (TriggerData *) fcinfo->context; + Relation truncatedRelation = triggerData->tg_relation; + Oid relationId = RelationGetRelid(truncatedRelation); + char partitionMethod = PartitionMethod(relationId); if (!EnableDDLPropagation) { @@ -110,7 +105,6 @@ TruncateTaskList(Oid relationId) ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell); uint64 shardId = shardInterval->shardId; StringInfo shardQueryString = makeStringInfo(); - Task *task = NULL; char *shardName = pstrdup(relationName); AppendShardIdToName(&shardName, shardId); @@ -118,7 +112,7 @@ TruncateTaskList(Oid relationId) appendStringInfo(shardQueryString, "TRUNCATE TABLE %s CASCADE", quote_qualified_identifier(schemaName, shardName)); - task = CitusMakeNode(Task); + Task *task = CitusMakeNode(Task); task->jobId = INVALID_JOB_ID; task->taskId = taskId++; task->taskType = DDL_TASK; diff --git a/src/backend/distributed/master/worker_node_manager.c b/src/backend/distributed/master/worker_node_manager.c index 47691a9ff..e550a4645 100644 --- a/src/backend/distributed/master/worker_node_manager.c +++ b/src/backend/distributed/master/worker_node_manager.c @@ -67,7 +67,6 @@ WorkerGetRandomCandidateNode(List *currentNodeList) WorkerNode *workerNode = NULL; bool wantSameRack = false; uint32 tryCount = WORKER_RACK_TRIES; - uint32 tryIndex = 0; uint32 currentNodeCount = list_length(currentNodeList); List *candidateWorkerNodeList = PrimaryNodesNotInList(currentNodeList); @@ -104,17 +103,15 @@ WorkerGetRandomCandidateNode(List *currentNodeList) * If after a predefined number of tries, we still cannot find such a node, * we simply give up and return the last worker node we found. */ - for (tryIndex = 0; tryIndex < tryCount; tryIndex++) + for (uint32 tryIndex = 0; tryIndex < tryCount; tryIndex++) { WorkerNode *firstNode = (WorkerNode *) linitial(currentNodeList); char *firstRack = firstNode->workerRack; - char *workerRack = NULL; - bool sameRack = false; workerNode = FindRandomNodeFromList(candidateWorkerNodeList); - workerRack = workerNode->workerRack; + char *workerRack = workerNode->workerRack; - sameRack = (strncmp(workerRack, firstRack, WORKER_LENGTH) == 0); + bool sameRack = (strncmp(workerRack, firstRack, WORKER_LENGTH) == 0); if ((sameRack && wantSameRack) || (!sameRack && !wantSameRack)) { break; @@ -171,7 +168,6 @@ WorkerGetLocalFirstCandidateNode(List *currentNodeList) if (currentNodeCount == 0) { StringInfo clientHostStringInfo = makeStringInfo(); - char *clientHost = NULL; char *errorMessage = ClientHostAddress(clientHostStringInfo); if (errorMessage != NULL) @@ -184,7 +180,7 @@ WorkerGetLocalFirstCandidateNode(List *currentNodeList) } /* if hostname is localhost.localdomain, change it to localhost */ - clientHost = clientHostStringInfo->data; + char *clientHost = clientHostStringInfo->data; if (strncmp(clientHost, "localhost.localdomain", WORKER_LENGTH) == 0) { clientHost = pstrdup("localhost"); @@ -343,7 +339,6 @@ FilterActiveNodeListFunc(LOCKMODE lockMode, bool (*checkFunction)(WorkerNode *)) { List *workerNodeList = NIL; WorkerNode *workerNode = NULL; - HTAB *workerNodeHash = NULL; HASH_SEQ_STATUS status; Assert(checkFunction != NULL); @@ -353,7 +348,7 @@ FilterActiveNodeListFunc(LOCKMODE lockMode, bool (*checkFunction)(WorkerNode *)) LockRelationOid(DistNodeRelationId(), lockMode); } - workerNodeHash = GetWorkerNodeHash(); + HTAB *workerNodeHash = GetWorkerNodeHash(); hash_seq_init(&status, workerNodeHash); while ((workerNode = hash_seq_search(&status)) != NULL) @@ -568,10 +563,9 @@ CompareWorkerNodes(const void *leftElement, const void *rightElement) { const void *leftWorker = *((const void **) leftElement); const void *rightWorker = *((const void **) rightElement); - int compare = 0; Size ignoredKeySize = 0; - compare = WorkerNodeCompare(leftWorker, rightWorker, ignoredKeySize); + int compare = WorkerNodeCompare(leftWorker, rightWorker, ignoredKeySize); return compare; } @@ -588,16 +582,15 @@ WorkerNodeCompare(const void *lhsKey, const void *rhsKey, Size keySize) const WorkerNode *workerLhs = (const WorkerNode *) lhsKey; const WorkerNode *workerRhs = (const WorkerNode *) rhsKey; - int nameCompare = 0; - int portCompare = 0; - nameCompare = strncmp(workerLhs->workerName, workerRhs->workerName, WORKER_LENGTH); + int nameCompare = strncmp(workerLhs->workerName, workerRhs->workerName, + WORKER_LENGTH); if (nameCompare != 0) { return nameCompare; } - portCompare = workerLhs->workerPort - workerRhs->workerPort; + int portCompare = workerLhs->workerPort - workerRhs->workerPort; return portCompare; } diff --git a/src/backend/distributed/metadata/dependency.c b/src/backend/distributed/metadata/dependency.c index bae589c63..3323fd8e7 100644 --- a/src/backend/distributed/metadata/dependency.c +++ b/src/backend/distributed/metadata/dependency.c @@ -170,9 +170,7 @@ recurse_pg_depend(const ObjectAddress *target, void (*apply)(ObjectAddressCollector *collector, Form_pg_depend row), ObjectAddressCollector *collector) { - Relation depRel = NULL; ScanKeyData key[2]; - SysScanDesc depScan = NULL; HeapTuple depTup = NULL; List *pgDependEntries = NIL; ListCell *pgDependCell = NULL; @@ -188,14 +186,15 @@ recurse_pg_depend(const ObjectAddress *target, /* * iterate the actual pg_depend catalog */ - depRel = heap_open(DependRelationId, AccessShareLock); + Relation depRel = heap_open(DependRelationId, AccessShareLock); /* scan pg_depend for classid = $1 AND objid = $2 using pg_depend_depender_index */ ScanKeyInit(&key[0], Anum_pg_depend_classid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(target->classId)); ScanKeyInit(&key[1], Anum_pg_depend_objid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(target->objectId)); - depScan = systable_beginscan(depRel, DependDependerIndexId, true, NULL, 2, key); + SysScanDesc depScan = systable_beginscan(depRel, DependDependerIndexId, true, NULL, 2, + key); while (HeapTupleIsValid(depTup = systable_getnext(depScan))) { @@ -215,9 +214,7 @@ recurse_pg_depend(const ObjectAddress *target, */ if (expand != NULL) { - List *expandedEntries = NIL; - - expandedEntries = expand(collector, target); + List *expandedEntries = expand(collector, target); pgDependEntries = list_concat(pgDependEntries, expandedEntries); } @@ -262,14 +259,13 @@ recurse_pg_depend(const ObjectAddress *target, static void InitObjectAddressCollector(ObjectAddressCollector *collector) { - int hashFlags = 0; HASHCTL info; memset(&info, 0, sizeof(info)); info.keysize = sizeof(ObjectAddress); info.entrysize = sizeof(ObjectAddress); info.hcxt = CurrentMemoryContext; - hashFlags = (HASH_ELEM | HASH_CONTEXT | HASH_BLOBS); + int hashFlags = (HASH_ELEM | HASH_CONTEXT | HASH_BLOBS); collector->dependencySet = hash_create("dependency set", 128, &info, hashFlags); collector->dependencyList = NULL; @@ -301,12 +297,12 @@ TargetObjectVisited(ObjectAddressCollector *collector, const ObjectAddress *targ static void MarkObjectVisited(ObjectAddressCollector *collector, const ObjectAddress *target) { - ObjectAddress *address = NULL; bool found = false; /* add to set */ - address = (ObjectAddress *) hash_search(collector->visitedObjects, target, - HASH_ENTER, &found); + ObjectAddress *address = (ObjectAddress *) hash_search(collector->visitedObjects, + target, + HASH_ENTER, &found); if (!found) { @@ -322,12 +318,12 @@ MarkObjectVisited(ObjectAddressCollector *collector, const ObjectAddress *target static void CollectObjectAddress(ObjectAddressCollector *collector, const ObjectAddress *collect) { - ObjectAddress *address = NULL; bool found = false; /* add to set */ - address = (ObjectAddress *) hash_search(collector->dependencySet, collect, - HASH_ENTER, &found); + ObjectAddress *address = (ObjectAddress *) hash_search(collector->dependencySet, + collect, + HASH_ENTER, &found); if (!found) { @@ -475,20 +471,19 @@ bool IsObjectAddressOwnedByExtension(const ObjectAddress *target, ObjectAddress *extensionAddress) { - Relation depRel = NULL; ScanKeyData key[2]; - SysScanDesc depScan = NULL; HeapTuple depTup = NULL; bool result = false; - depRel = heap_open(DependRelationId, AccessShareLock); + Relation depRel = heap_open(DependRelationId, AccessShareLock); /* scan pg_depend for classid = $1 AND objid = $2 using pg_depend_depender_index */ ScanKeyInit(&key[0], Anum_pg_depend_classid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(target->classId)); ScanKeyInit(&key[1], Anum_pg_depend_objid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(target->objectId)); - depScan = systable_beginscan(depRel, DependDependerIndexId, true, NULL, 2, key); + SysScanDesc depScan = systable_beginscan(depRel, DependDependerIndexId, true, NULL, 2, + key); while (HeapTupleIsValid(depTup = systable_getnext(depScan))) { diff --git a/src/backend/distributed/metadata/distobject.c b/src/backend/distributed/metadata/distobject.c index 52d395a2b..f383af634 100644 --- a/src/backend/distributed/metadata/distobject.c +++ b/src/backend/distributed/metadata/distobject.c @@ -139,13 +139,12 @@ MarkObjectDistributed(const ObjectAddress *distAddress) ObjectIdGetDatum(distAddress->objectId), Int32GetDatum(distAddress->objectSubId) }; - int spiStatus = 0; char *insertQuery = "INSERT INTO citus.pg_dist_object (classid, objid, objsubid) " "VALUES ($1, $2, $3) ON CONFLICT DO NOTHING"; - spiStatus = ExecuteCommandAsSuperuser(insertQuery, paramCount, paramTypes, - paramValues); + int spiStatus = ExecuteCommandAsSuperuser(insertQuery, paramCount, paramTypes, + paramValues); if (spiStatus < 0) { ereport(ERROR, (errmsg("failed to insert object into citus.pg_dist_object"))); @@ -160,14 +159,12 @@ MarkObjectDistributed(const ObjectAddress *distAddress) bool CitusExtensionObject(const ObjectAddress *objectAddress) { - char *extensionName = false; - if (objectAddress->classId != ExtensionRelationId) { return false; } - extensionName = get_extension_name(objectAddress->objectId); + char *extensionName = get_extension_name(objectAddress->objectId); if (extensionName != NULL && strncasecmp(extensionName, "citus", NAMEDATALEN) == 0) { @@ -188,13 +185,10 @@ static int ExecuteCommandAsSuperuser(char *query, int paramCount, Oid *paramTypes, Datum *paramValues) { - int spiConnected = 0; Oid savedUserId = InvalidOid; int savedSecurityContext = 0; - int spiStatus = 0; - int spiFinished = 0; - spiConnected = SPI_connect(); + int spiConnected = SPI_connect(); if (spiConnected != SPI_OK_CONNECT) { ereport(ERROR, (errmsg("could not connect to SPI manager"))); @@ -204,12 +198,12 @@ ExecuteCommandAsSuperuser(char *query, int paramCount, Oid *paramTypes, GetUserIdAndSecContext(&savedUserId, &savedSecurityContext); SetUserIdAndSecContext(CitusExtensionOwner(), SECURITY_LOCAL_USERID_CHANGE); - spiStatus = SPI_execute_with_args(query, paramCount, paramTypes, paramValues, - NULL, false, 0); + int spiStatus = SPI_execute_with_args(query, paramCount, paramTypes, paramValues, + NULL, false, 0); SetUserIdAndSecContext(savedUserId, savedSecurityContext); - spiFinished = SPI_finish(); + int spiFinished = SPI_finish(); if (spiFinished != SPI_OK_FINISH) { ereport(ERROR, (errmsg("could not disconnect from SPI manager"))); @@ -237,13 +231,12 @@ UnmarkObjectDistributed(const ObjectAddress *address) ObjectIdGetDatum(address->objectId), Int32GetDatum(address->objectSubId) }; - int spiStatus = 0; char *deleteQuery = "DELETE FROM citus.pg_dist_object WHERE classid = $1 AND " "objid = $2 AND objsubid = $3"; - spiStatus = ExecuteCommandAsSuperuser(deleteQuery, paramCount, paramTypes, - paramValues); + int spiStatus = ExecuteCommandAsSuperuser(deleteQuery, paramCount, paramTypes, + paramValues); if (spiStatus < 0) { ereport(ERROR, (errmsg("failed to delete object from citus.pg_dist_object"))); @@ -258,13 +251,10 @@ UnmarkObjectDistributed(const ObjectAddress *address) bool IsObjectDistributed(const ObjectAddress *address) { - Relation pgDistObjectRel = NULL; ScanKeyData key[3]; - SysScanDesc pgDistObjectScan = NULL; - HeapTuple pgDistObjectTup = NULL; bool result = false; - pgDistObjectRel = heap_open(DistObjectRelationId(), AccessShareLock); + Relation pgDistObjectRel = heap_open(DistObjectRelationId(), AccessShareLock); /* scan pg_dist_object for classid = $1 AND objid = $2 AND objsubid = $3 via index */ ScanKeyInit(&key[0], Anum_pg_dist_object_classid, BTEqualStrategyNumber, F_OIDEQ, @@ -273,10 +263,11 @@ IsObjectDistributed(const ObjectAddress *address) ObjectIdGetDatum(address->objectId)); ScanKeyInit(&key[2], Anum_pg_dist_object_objsubid, BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(address->objectSubId)); - pgDistObjectScan = systable_beginscan(pgDistObjectRel, DistObjectPrimaryKeyIndexId(), - true, NULL, 3, key); + SysScanDesc pgDistObjectScan = systable_beginscan(pgDistObjectRel, + DistObjectPrimaryKeyIndexId(), + true, NULL, 3, key); - pgDistObjectTup = systable_getnext(pgDistObjectScan); + HeapTuple pgDistObjectTup = systable_getnext(pgDistObjectScan); if (HeapTupleIsValid(pgDistObjectTup)) { result = true; @@ -299,14 +290,13 @@ ClusterHasDistributedFunctionWithDistArgument(void) { bool foundDistributedFunction = false; - SysScanDesc pgDistObjectScan = NULL; HeapTuple pgDistObjectTup = NULL; Relation pgDistObjectRel = heap_open(DistObjectRelationId(), AccessShareLock); TupleDesc tupleDescriptor = RelationGetDescr(pgDistObjectRel); - pgDistObjectScan = + SysScanDesc pgDistObjectScan = systable_beginscan(pgDistObjectRel, InvalidOid, false, NULL, 0, NULL); while (HeapTupleIsValid(pgDistObjectTup = systable_getnext(pgDistObjectScan))) { @@ -315,8 +305,7 @@ ClusterHasDistributedFunctionWithDistArgument(void) if (pg_dist_object->classid == ProcedureRelationId) { - bool distArgumentIsNull = false; - distArgumentIsNull = + bool distArgumentIsNull = heap_attisnull(pgDistObjectTup, Anum_pg_dist_object_distribution_argument_index, tupleDescriptor); @@ -345,14 +334,13 @@ ClusterHasDistributedFunctionWithDistArgument(void) List * GetDistributedObjectAddressList(void) { - Relation pgDistObjectRel = NULL; - SysScanDesc pgDistObjectScan = NULL; HeapTuple pgDistObjectTup = NULL; List *objectAddressList = NIL; - pgDistObjectRel = heap_open(DistObjectRelationId(), AccessShareLock); - pgDistObjectScan = systable_beginscan(pgDistObjectRel, InvalidOid, false, NULL, 0, - NULL); + Relation pgDistObjectRel = heap_open(DistObjectRelationId(), AccessShareLock); + SysScanDesc pgDistObjectScan = systable_beginscan(pgDistObjectRel, InvalidOid, false, + NULL, 0, + NULL); while (HeapTupleIsValid(pgDistObjectTup = systable_getnext(pgDistObjectScan))) { Form_pg_dist_object pg_dist_object = diff --git a/src/backend/distributed/metadata/metadata_cache.c b/src/backend/distributed/metadata/metadata_cache.c index 86c0da214..18c95580a 100644 --- a/src/backend/distributed/metadata/metadata_cache.c +++ b/src/backend/distributed/metadata/metadata_cache.c @@ -278,9 +278,7 @@ EnsureModificationsCanRun(void) bool IsDistributedTable(Oid relationId) { - DistTableCacheEntry *cacheEntry = NULL; - - cacheEntry = LookupDistTableCacheEntry(relationId); + DistTableCacheEntry *cacheEntry = LookupDistTableCacheEntry(relationId); /* * If extension hasn't been created, or has the wrong version and the @@ -310,8 +308,6 @@ IsDistributedTable(Oid relationId) static bool IsDistributedTableViaCatalog(Oid relationId) { - HeapTuple partitionTuple = NULL; - SysScanDesc scanDescriptor = NULL; const int scanKeyCount = 1; ScanKeyData scanKey[1]; bool indexOK = true; @@ -321,11 +317,11 @@ IsDistributedTableViaCatalog(Oid relationId) ScanKeyInit(&scanKey[0], Anum_pg_dist_partition_logicalrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relationId)); - scanDescriptor = systable_beginscan(pgDistPartition, - DistPartitionLogicalRelidIndexId(), - indexOK, NULL, scanKeyCount, scanKey); + SysScanDesc scanDescriptor = systable_beginscan(pgDistPartition, + DistPartitionLogicalRelidIndexId(), + indexOK, NULL, scanKeyCount, scanKey); - partitionTuple = systable_getnext(scanDescriptor); + HeapTuple partitionTuple = systable_getnext(scanDescriptor); systable_endscan(scanDescriptor); heap_close(pgDistPartition, AccessShareLock); @@ -340,21 +336,19 @@ IsDistributedTableViaCatalog(Oid relationId) List * DistributedTableList(void) { - List *distTableOidList = NIL; List *distributedTableList = NIL; ListCell *distTableOidCell = NULL; Assert(CitusHasBeenLoaded() && CheckCitusVersion(WARNING)); /* first, we need to iterate over pg_dist_partition */ - distTableOidList = DistTableOidList(); + List *distTableOidList = DistTableOidList(); foreach(distTableOidCell, distTableOidList) { - DistTableCacheEntry *cacheEntry = NULL; Oid relationId = lfirst_oid(distTableOidCell); - cacheEntry = DistributedTableCacheEntry(relationId); + DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(relationId); distributedTableList = lappend(distributedTableList, cacheEntry); } @@ -372,24 +366,20 @@ DistributedTableList(void) ShardInterval * LoadShardInterval(uint64 shardId) { - ShardInterval *shardInterval = NULL; - ShardInterval *sourceShardInterval = NULL; - ShardCacheEntry *shardEntry = NULL; - DistTableCacheEntry *tableEntry = NULL; + ShardCacheEntry *shardEntry = LookupShardCacheEntry(shardId); - shardEntry = LookupShardCacheEntry(shardId); - - tableEntry = shardEntry->tableEntry; + DistTableCacheEntry *tableEntry = shardEntry->tableEntry; Assert(tableEntry->isDistributedTable); /* the offset better be in a valid range */ Assert(shardEntry->shardIndex < tableEntry->shardIntervalArrayLength); - sourceShardInterval = tableEntry->sortedShardIntervalArray[shardEntry->shardIndex]; + ShardInterval *sourceShardInterval = + tableEntry->sortedShardIntervalArray[shardEntry->shardIndex]; /* copy value to return */ - shardInterval = (ShardInterval *) palloc0(sizeof(ShardInterval)); + ShardInterval *shardInterval = (ShardInterval *) palloc0(sizeof(ShardInterval)); CopyShardInterval(sourceShardInterval, shardInterval); return shardInterval; @@ -403,12 +393,9 @@ LoadShardInterval(uint64 shardId) Oid RelationIdForShard(uint64 shardId) { - ShardCacheEntry *shardEntry = NULL; - DistTableCacheEntry *tableEntry = NULL; + ShardCacheEntry *shardEntry = LookupShardCacheEntry(shardId); - shardEntry = LookupShardCacheEntry(shardId); - - tableEntry = shardEntry->tableEntry; + DistTableCacheEntry *tableEntry = shardEntry->tableEntry; Assert(tableEntry->isDistributedTable); @@ -439,24 +426,18 @@ ReferenceTableShardId(uint64 shardId) GroupShardPlacement * LoadGroupShardPlacement(uint64 shardId, uint64 placementId) { - ShardCacheEntry *shardEntry = NULL; - DistTableCacheEntry *tableEntry = NULL; - - GroupShardPlacement *placementArray = NULL; - int numberOfPlacements = 0; - - int i = 0; - - shardEntry = LookupShardCacheEntry(shardId); - tableEntry = shardEntry->tableEntry; + ShardCacheEntry *shardEntry = LookupShardCacheEntry(shardId); + DistTableCacheEntry *tableEntry = shardEntry->tableEntry; /* the offset better be in a valid range */ Assert(shardEntry->shardIndex < tableEntry->shardIntervalArrayLength); - placementArray = tableEntry->arrayOfPlacementArrays[shardEntry->shardIndex]; - numberOfPlacements = tableEntry->arrayOfPlacementArrayLengths[shardEntry->shardIndex]; + GroupShardPlacement *placementArray = + tableEntry->arrayOfPlacementArrays[shardEntry->shardIndex]; + int numberOfPlacements = + tableEntry->arrayOfPlacementArrayLengths[shardEntry->shardIndex]; - for (i = 0; i < numberOfPlacements; i++) + for (int i = 0; i < numberOfPlacements; i++) { if (placementArray[i].placementId == placementId) { @@ -479,13 +460,10 @@ LoadGroupShardPlacement(uint64 shardId, uint64 placementId) ShardPlacement * LoadShardPlacement(uint64 shardId, uint64 placementId) { - ShardCacheEntry *shardEntry = NULL; - GroupShardPlacement *groupPlacement = NULL; - ShardPlacement *nodePlacement = NULL; - - shardEntry = LookupShardCacheEntry(shardId); - groupPlacement = LoadGroupShardPlacement(shardId, placementId); - nodePlacement = ResolveGroupShardPlacement(groupPlacement, shardEntry); + ShardCacheEntry *shardEntry = LookupShardCacheEntry(shardId); + GroupShardPlacement *groupPlacement = LoadGroupShardPlacement(shardId, placementId); + ShardPlacement *nodePlacement = ResolveGroupShardPlacement(groupPlacement, + shardEntry); return nodePlacement; } @@ -499,19 +477,16 @@ LoadShardPlacement(uint64 shardId, uint64 placementId) ShardPlacement * FindShardPlacementOnGroup(int32 groupId, uint64 shardId) { - ShardCacheEntry *shardEntry = NULL; - DistTableCacheEntry *tableEntry = NULL; - GroupShardPlacement *placementArray = NULL; - int numberOfPlacements = 0; ShardPlacement *placementOnNode = NULL; - int placementIndex = 0; - shardEntry = LookupShardCacheEntry(shardId); - tableEntry = shardEntry->tableEntry; - placementArray = tableEntry->arrayOfPlacementArrays[shardEntry->shardIndex]; - numberOfPlacements = tableEntry->arrayOfPlacementArrayLengths[shardEntry->shardIndex]; + ShardCacheEntry *shardEntry = LookupShardCacheEntry(shardId); + DistTableCacheEntry *tableEntry = shardEntry->tableEntry; + GroupShardPlacement *placementArray = + tableEntry->arrayOfPlacementArrays[shardEntry->shardIndex]; + int numberOfPlacements = + tableEntry->arrayOfPlacementArrayLengths[shardEntry->shardIndex]; - for (placementIndex = 0; placementIndex < numberOfPlacements; placementIndex++) + for (int placementIndex = 0; placementIndex < numberOfPlacements; placementIndex++) { GroupShardPlacement *placement = &placementArray[placementIndex]; @@ -583,11 +558,9 @@ ResolveGroupShardPlacement(GroupShardPlacement *groupShardPlacement, WorkerNode * LookupNodeByNodeId(uint32 nodeId) { - int workerNodeIndex = 0; - PrepareWorkerNodeCache(); - for (workerNodeIndex = 0; workerNodeIndex < WorkerNodeCount; workerNodeIndex++) + for (int workerNodeIndex = 0; workerNodeIndex < WorkerNodeCount; workerNodeIndex++) { WorkerNode *workerNode = WorkerNodeArray[workerNodeIndex]; if (workerNode->nodeId == nodeId) @@ -613,11 +586,10 @@ WorkerNode * LookupNodeForGroup(int32 groupId) { bool foundAnyNodes = false; - int workerNodeIndex = 0; PrepareWorkerNodeCache(); - for (workerNodeIndex = 0; workerNodeIndex < WorkerNodeCount; workerNodeIndex++) + for (int workerNodeIndex = 0; workerNodeIndex < WorkerNodeCount; workerNodeIndex++) { WorkerNode *workerNode = WorkerNodeArray[workerNodeIndex]; int32 workerNodeGroupId = workerNode->groupId; @@ -675,23 +647,20 @@ LookupNodeForGroup(int32 groupId) List * ShardPlacementList(uint64 shardId) { - ShardCacheEntry *shardEntry = NULL; - DistTableCacheEntry *tableEntry = NULL; - GroupShardPlacement *placementArray = NULL; - int numberOfPlacements = 0; List *placementList = NIL; - int i = 0; - shardEntry = LookupShardCacheEntry(shardId); - tableEntry = shardEntry->tableEntry; + ShardCacheEntry *shardEntry = LookupShardCacheEntry(shardId); + DistTableCacheEntry *tableEntry = shardEntry->tableEntry; /* the offset better be in a valid range */ Assert(shardEntry->shardIndex < tableEntry->shardIntervalArrayLength); - placementArray = tableEntry->arrayOfPlacementArrays[shardEntry->shardIndex]; - numberOfPlacements = tableEntry->arrayOfPlacementArrayLengths[shardEntry->shardIndex]; + GroupShardPlacement *placementArray = + tableEntry->arrayOfPlacementArrays[shardEntry->shardIndex]; + int numberOfPlacements = + tableEntry->arrayOfPlacementArrayLengths[shardEntry->shardIndex]; - for (i = 0; i < numberOfPlacements; i++) + for (int i = 0; i < numberOfPlacements; i++) { GroupShardPlacement *groupShardPlacement = &placementArray[i]; ShardPlacement *shardPlacement = ResolveGroupShardPlacement(groupShardPlacement, @@ -718,7 +687,6 @@ ShardPlacementList(uint64 shardId) static ShardCacheEntry * LookupShardCacheEntry(int64 shardId) { - ShardCacheEntry *shardEntry = NULL; bool foundInCache = false; bool recheck = false; @@ -727,7 +695,8 @@ LookupShardCacheEntry(int64 shardId) InitializeCaches(); /* lookup cache entry */ - shardEntry = hash_search(DistShardCacheHash, &shardId, HASH_FIND, &foundInCache); + ShardCacheEntry *shardEntry = hash_search(DistShardCacheHash, &shardId, HASH_FIND, + &foundInCache); if (!foundInCache) { @@ -822,7 +791,6 @@ DistributedTableCacheEntry(Oid distributedRelationId) static DistTableCacheEntry * LookupDistTableCacheEntry(Oid relationId) { - DistTableCacheEntry *cacheEntry = NULL; bool foundInCache = false; void *hashKey = (void *) &relationId; @@ -865,7 +833,8 @@ LookupDistTableCacheEntry(Oid relationId) } } - cacheEntry = hash_search(DistTableCacheHash, hashKey, HASH_ENTER, &foundInCache); + DistTableCacheEntry *cacheEntry = hash_search(DistTableCacheHash, hashKey, HASH_ENTER, + &foundInCache); /* return valid matches */ if (foundInCache) @@ -916,14 +885,9 @@ LookupDistTableCacheEntry(Oid relationId) DistObjectCacheEntry * LookupDistObjectCacheEntry(Oid classid, Oid objid, int32 objsubid) { - DistObjectCacheEntry *cacheEntry = NULL; bool foundInCache = false; DistObjectCacheEntryKey hashKey; - Relation pgDistObjectRel = NULL; - TupleDesc pgDistObjectTupleDesc = NULL; ScanKeyData pgDistObjectKey[3]; - SysScanDesc pgDistObjectScan = NULL; - HeapTuple pgDistObjectTup = NULL; memset(&hashKey, 0, sizeof(DistObjectCacheEntryKey)); hashKey.classid = classid; @@ -942,7 +906,8 @@ LookupDistObjectCacheEntry(Oid classid, Oid objid, int32 objsubid) InitializeCaches(); - cacheEntry = hash_search(DistObjectCacheHash, &hashKey, HASH_ENTER, &foundInCache); + DistObjectCacheEntry *cacheEntry = hash_search(DistObjectCacheHash, &hashKey, + HASH_ENTER, &foundInCache); /* return valid matches */ if (foundInCache) @@ -970,8 +935,8 @@ LookupDistObjectCacheEntry(Oid classid, Oid objid, int32 objsubid) cacheEntry->key.objid = objid; cacheEntry->key.objsubid = objsubid; - pgDistObjectRel = heap_open(DistObjectRelationId(), AccessShareLock); - pgDistObjectTupleDesc = RelationGetDescr(pgDistObjectRel); + Relation pgDistObjectRel = heap_open(DistObjectRelationId(), AccessShareLock); + TupleDesc pgDistObjectTupleDesc = RelationGetDescr(pgDistObjectRel); ScanKeyInit(&pgDistObjectKey[0], Anum_pg_dist_object_classid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(classid)); @@ -980,9 +945,10 @@ LookupDistObjectCacheEntry(Oid classid, Oid objid, int32 objsubid) ScanKeyInit(&pgDistObjectKey[2], Anum_pg_dist_object_objsubid, BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(objsubid)); - pgDistObjectScan = systable_beginscan(pgDistObjectRel, DistObjectPrimaryKeyIndexId(), - true, NULL, 3, pgDistObjectKey); - pgDistObjectTup = systable_getnext(pgDistObjectScan); + SysScanDesc pgDistObjectScan = systable_beginscan(pgDistObjectRel, + DistObjectPrimaryKeyIndexId(), + true, NULL, 3, pgDistObjectKey); + HeapTuple pgDistObjectTup = systable_getnext(pgDistObjectScan); if (HeapTupleIsValid(pgDistObjectTup)) { @@ -1021,18 +987,12 @@ LookupDistObjectCacheEntry(Oid classid, Oid objid, int32 objsubid) static void BuildDistTableCacheEntry(DistTableCacheEntry *cacheEntry) { - HeapTuple distPartitionTuple = NULL; - Relation pgDistPartition = NULL; - Datum partitionKeyDatum = 0; - Datum replicationModelDatum = 0; MemoryContext oldContext = NULL; - TupleDesc tupleDescriptor = NULL; - bool partitionKeyIsNull = false; Datum datumArray[Natts_pg_dist_partition]; bool isNullArray[Natts_pg_dist_partition]; - pgDistPartition = heap_open(DistPartitionRelationId(), AccessShareLock); - distPartitionTuple = + Relation pgDistPartition = heap_open(DistPartitionRelationId(), AccessShareLock); + HeapTuple distPartitionTuple = LookupDistPartitionTuple(pgDistPartition, cacheEntry->relationId); /* not a distributed table, done */ @@ -1045,25 +1005,23 @@ BuildDistTableCacheEntry(DistTableCacheEntry *cacheEntry) cacheEntry->isDistributedTable = true; - tupleDescriptor = RelationGetDescr(pgDistPartition); + TupleDesc tupleDescriptor = RelationGetDescr(pgDistPartition); heap_deform_tuple(distPartitionTuple, tupleDescriptor, datumArray, isNullArray); cacheEntry->partitionMethod = datumArray[Anum_pg_dist_partition_partmethod - 1]; - partitionKeyDatum = datumArray[Anum_pg_dist_partition_partkey - 1]; - partitionKeyIsNull = isNullArray[Anum_pg_dist_partition_partkey - 1]; + Datum partitionKeyDatum = datumArray[Anum_pg_dist_partition_partkey - 1]; + bool partitionKeyIsNull = isNullArray[Anum_pg_dist_partition_partkey - 1]; /* note that for reference tables partitionKeyisNull is true */ if (!partitionKeyIsNull) { - Node *partitionNode = NULL; - oldContext = MemoryContextSwitchTo(MetadataCacheMemoryContext); /* get the string representation of the partition column Var */ cacheEntry->partitionKeyString = TextDatumGetCString(partitionKeyDatum); /* convert the string to a Node and ensure it is a Var */ - partitionNode = stringToNode(cacheEntry->partitionKeyString); + Node *partitionNode = stringToNode(cacheEntry->partitionKeyString); Assert(IsA(partitionNode, Var)); cacheEntry->partitionColumn = (Var *) partitionNode; @@ -1081,7 +1039,7 @@ BuildDistTableCacheEntry(DistTableCacheEntry *cacheEntry) cacheEntry->colocationId = INVALID_COLOCATION_ID; } - replicationModelDatum = datumArray[Anum_pg_dist_partition_repmodel - 1]; + Datum replicationModelDatum = datumArray[Anum_pg_dist_partition_repmodel - 1]; if (isNullArray[Anum_pg_dist_partition_repmodel - 1]) { /* @@ -1102,15 +1060,13 @@ BuildDistTableCacheEntry(DistTableCacheEntry *cacheEntry) /* we only need hash functions for hash distributed tables */ if (cacheEntry->partitionMethod == DISTRIBUTE_BY_HASH) { - TypeCacheEntry *typeEntry = NULL; - FmgrInfo *hashFunction = NULL; Var *partitionColumn = cacheEntry->partitionColumn; - typeEntry = lookup_type_cache(partitionColumn->vartype, - TYPECACHE_HASH_PROC_FINFO); + TypeCacheEntry *typeEntry = lookup_type_cache(partitionColumn->vartype, + TYPECACHE_HASH_PROC_FINFO); - hashFunction = MemoryContextAllocZero(MetadataCacheMemoryContext, - sizeof(FmgrInfo)); + FmgrInfo *hashFunction = MemoryContextAllocZero(MetadataCacheMemoryContext, + sizeof(FmgrInfo)); fmgr_info_copy(hashFunction, &(typeEntry->hash_proc_finfo), MetadataCacheMemoryContext); @@ -1151,9 +1107,6 @@ BuildCachedShardList(DistTableCacheEntry *cacheEntry) ShardInterval **sortedShardIntervalArray = NULL; FmgrInfo *shardIntervalCompareFunction = NULL; FmgrInfo *shardColumnCompareFunction = NULL; - List *distShardTupleList = NIL; - int shardIntervalArrayLength = 0; - int shardIndex = 0; Oid columnTypeId = InvalidOid; int32 columnTypeMod = -1; Oid intervalTypeId = InvalidOid; @@ -1166,8 +1119,8 @@ BuildCachedShardList(DistTableCacheEntry *cacheEntry) &intervalTypeId, &intervalTypeMod); - distShardTupleList = LookupDistShardTuples(cacheEntry->relationId); - shardIntervalArrayLength = list_length(distShardTupleList); + List *distShardTupleList = LookupDistShardTuples(cacheEntry->relationId); + int shardIntervalArrayLength = list_length(distShardTupleList); if (shardIntervalArrayLength > 0) { Relation distShardRelation = heap_open(DistShardRelationId(), AccessShareLock); @@ -1195,10 +1148,10 @@ BuildCachedShardList(DistTableCacheEntry *cacheEntry) distShardTupleDesc, intervalTypeId, intervalTypeMod); - ShardInterval *newShardInterval = NULL; MemoryContext oldContext = MemoryContextSwitchTo(MetadataCacheMemoryContext); - newShardInterval = (ShardInterval *) palloc0(sizeof(ShardInterval)); + ShardInterval *newShardInterval = (ShardInterval *) palloc0( + sizeof(ShardInterval)); CopyShardInterval(shardInterval, newShardInterval); shardIntervalArray[arrayIndex] = newShardInterval; @@ -1315,20 +1268,16 @@ BuildCachedShardList(DistTableCacheEntry *cacheEntry) cacheEntry->shardIntervalArrayLength = 0; /* maintain shardId->(table,ShardInterval) cache */ - for (shardIndex = 0; shardIndex < shardIntervalArrayLength; shardIndex++) + for (int shardIndex = 0; shardIndex < shardIntervalArrayLength; shardIndex++) { - ShardCacheEntry *shardEntry = NULL; ShardInterval *shardInterval = sortedShardIntervalArray[shardIndex]; bool foundInCache = false; - List *placementList = NIL; - MemoryContext oldContext = NULL; ListCell *placementCell = NULL; - GroupShardPlacement *placementArray = NULL; int placementOffset = 0; - int numberOfPlacements = 0; - shardEntry = hash_search(DistShardCacheHash, &shardInterval->shardId, HASH_ENTER, - &foundInCache); + ShardCacheEntry *shardEntry = hash_search(DistShardCacheHash, + &shardInterval->shardId, HASH_ENTER, + &foundInCache); if (foundInCache) { ereport(ERROR, (errmsg("cached metadata for shard " UINT64_FORMAT @@ -1348,12 +1297,13 @@ BuildCachedShardList(DistTableCacheEntry *cacheEntry) shardEntry->tableEntry = cacheEntry; /* build list of shard placements */ - placementList = BuildShardPlacementList(shardInterval); - numberOfPlacements = list_length(placementList); + List *placementList = BuildShardPlacementList(shardInterval); + int numberOfPlacements = list_length(placementList); /* and copy that list into the cache entry */ - oldContext = MemoryContextSwitchTo(MetadataCacheMemoryContext); - placementArray = palloc0(numberOfPlacements * sizeof(GroupShardPlacement)); + MemoryContext oldContext = MemoryContextSwitchTo(MetadataCacheMemoryContext); + GroupShardPlacement *placementArray = palloc0(numberOfPlacements * + sizeof(GroupShardPlacement)); foreach(placementCell, placementList) { GroupShardPlacement *srcPlacement = @@ -1408,9 +1358,6 @@ bool HasUniformHashDistribution(ShardInterval **shardIntervalArray, int shardIntervalArrayLength) { - uint64 hashTokenIncrement = 0; - int shardIndex = 0; - /* if there are no shards, there is no uniform distribution */ if (shardIntervalArrayLength == 0) { @@ -1418,9 +1365,9 @@ HasUniformHashDistribution(ShardInterval **shardIntervalArray, } /* calculate the hash token increment */ - hashTokenIncrement = HASH_TOKEN_COUNT / shardIntervalArrayLength; + uint64 hashTokenIncrement = HASH_TOKEN_COUNT / shardIntervalArrayLength; - for (shardIndex = 0; shardIndex < shardIntervalArrayLength; shardIndex++) + for (int shardIndex = 0; shardIndex < shardIntervalArrayLength; shardIndex++) { ShardInterval *shardInterval = shardIntervalArray[shardIndex]; int32 shardMinHashToken = INT32_MIN + (shardIndex * hashTokenIncrement); @@ -1452,7 +1399,6 @@ static bool HasUninitializedShardInterval(ShardInterval **sortedShardIntervalArray, int shardCount) { bool hasUninitializedShardInterval = false; - ShardInterval *lastShardInterval = NULL; if (shardCount == 0) { @@ -1465,7 +1411,7 @@ HasUninitializedShardInterval(ShardInterval **sortedShardIntervalArray, int shar * Since the shard interval array is sorted, and uninitialized ones stored * in the end of the array, checking the last element is enough. */ - lastShardInterval = sortedShardIntervalArray[shardCount - 1]; + ShardInterval *lastShardInterval = sortedShardIntervalArray[shardCount - 1]; if (!lastShardInterval->minValueExists || !lastShardInterval->maxValueExists) { hasUninitializedShardInterval = true; @@ -1484,8 +1430,6 @@ HasOverlappingShardInterval(ShardInterval **shardIntervalArray, int shardIntervalArrayLength, FmgrInfo *shardIntervalSortCompareFunction) { - int shardIndex = 0; - ShardInterval *lastShardInterval = NULL; Datum comparisonDatum = 0; int comparisonResult = 0; @@ -1495,8 +1439,8 @@ HasOverlappingShardInterval(ShardInterval **shardIntervalArray, return false; } - lastShardInterval = shardIntervalArray[0]; - for (shardIndex = 1; shardIndex < shardIntervalArrayLength; shardIndex++) + ShardInterval *lastShardInterval = shardIntervalArray[0]; + for (int shardIndex = 1; shardIndex < shardIntervalArrayLength; shardIndex++) { ShardInterval *curShardInterval = shardIntervalArray[shardIndex]; @@ -1586,15 +1530,13 @@ CitusHasBeenLoaded(void) static bool CitusHasBeenLoadedInternal(void) { - Oid citusExtensionOid = InvalidOid; - if (IsBinaryUpgrade) { /* never use Citus logic during pg_upgrade */ return false; } - citusExtensionOid = get_extension_oid("citus", true); + Oid citusExtensionOid = get_extension_oid("citus", true); if (citusExtensionOid == InvalidOid) { /* Citus extension does not exist yet */ @@ -1654,14 +1596,12 @@ CheckCitusVersion(int elevel) bool CheckAvailableVersion(int elevel) { - char *availableVersion = NULL; - if (!EnableVersionChecks) { return true; } - availableVersion = AvailableExtensionVersion(); + char *availableVersion = AvailableExtensionVersion(); if (!MajorVersionsCompatible(availableVersion, CITUS_EXTENSIONVERSION)) { @@ -1688,12 +1628,10 @@ CheckAvailableVersion(int elevel) static bool CheckInstalledVersion(int elevel) { - char *installedVersion = NULL; - Assert(CitusHasBeenLoaded()); Assert(EnableVersionChecks); - installedVersion = InstalledExtensionVersion(); + char *installedVersion = InstalledExtensionVersion(); if (!MajorVersionsCompatible(installedVersion, CITUS_EXTENSIONVERSION)) { @@ -1761,21 +1699,17 @@ MajorVersionsCompatible(char *leftVersion, char *rightVersion) static char * AvailableExtensionVersion(void) { - ReturnSetInfo *extensionsResultSet = NULL; - TupleTableSlot *tupleTableSlot = NULL; LOCAL_FCINFO(fcinfo, 0); FmgrInfo flinfo; - EState *estate = NULL; - bool hasTuple = false; bool goForward = true; bool doCopy = false; char *availableExtensionVersion; InitializeCaches(); - estate = CreateExecutorState(); - extensionsResultSet = makeNode(ReturnSetInfo); + EState *estate = CreateExecutorState(); + ReturnSetInfo *extensionsResultSet = makeNode(ReturnSetInfo); extensionsResultSet->econtext = GetPerTupleExprContext(estate); extensionsResultSet->allowedModes = SFRM_Materialize; @@ -1786,25 +1720,25 @@ AvailableExtensionVersion(void) /* pg_available_extensions returns result set containing all available extensions */ (*pg_available_extensions)(fcinfo); - tupleTableSlot = MakeSingleTupleTableSlotCompat(extensionsResultSet->setDesc, - &TTSOpsMinimalTuple); - hasTuple = tuplestore_gettupleslot(extensionsResultSet->setResult, goForward, doCopy, - tupleTableSlot); + TupleTableSlot *tupleTableSlot = MakeSingleTupleTableSlotCompat( + extensionsResultSet->setDesc, + &TTSOpsMinimalTuple); + bool hasTuple = tuplestore_gettupleslot(extensionsResultSet->setResult, goForward, + doCopy, + tupleTableSlot); while (hasTuple) { - Datum extensionNameDatum = 0; - char *extensionName = NULL; bool isNull = false; - extensionNameDatum = slot_getattr(tupleTableSlot, 1, &isNull); - extensionName = NameStr(*DatumGetName(extensionNameDatum)); + Datum extensionNameDatum = slot_getattr(tupleTableSlot, 1, &isNull); + char *extensionName = NameStr(*DatumGetName(extensionNameDatum)); if (strcmp(extensionName, "citus") == 0) { - MemoryContext oldMemoryContext = NULL; Datum availableVersion = slot_getattr(tupleTableSlot, 2, &isNull); /* we will cache the result of citus version to prevent catalog access */ - oldMemoryContext = MemoryContextSwitchTo(MetadataCacheMemoryContext); + MemoryContext oldMemoryContext = MemoryContextSwitchTo( + MetadataCacheMemoryContext); availableExtensionVersion = text_to_cstring(DatumGetTextPP(availableVersion)); @@ -1836,28 +1770,24 @@ AvailableExtensionVersion(void) static char * InstalledExtensionVersion(void) { - Relation relation = NULL; - SysScanDesc scandesc; ScanKeyData entry[1]; - HeapTuple extensionTuple = NULL; char *installedExtensionVersion = NULL; InitializeCaches(); - relation = heap_open(ExtensionRelationId, AccessShareLock); + Relation relation = heap_open(ExtensionRelationId, AccessShareLock); ScanKeyInit(&entry[0], Anum_pg_extension_extname, BTEqualStrategyNumber, F_NAMEEQ, CStringGetDatum("citus")); - scandesc = systable_beginscan(relation, ExtensionNameIndexId, true, - NULL, 1, entry); + SysScanDesc scandesc = systable_beginscan(relation, ExtensionNameIndexId, true, + NULL, 1, entry); - extensionTuple = systable_getnext(scandesc); + HeapTuple extensionTuple = systable_getnext(scandesc); /* We assume that there can be at most one matching tuple */ if (HeapTupleIsValid(extensionTuple)) { - MemoryContext oldMemoryContext = NULL; int extensionIndex = Anum_pg_extension_extversion; TupleDesc tupleDescriptor = RelationGetDescr(relation); bool isNull = false; @@ -1872,7 +1802,8 @@ InstalledExtensionVersion(void) } /* we will cache the result of citus version to prevent catalog access */ - oldMemoryContext = MemoryContextSwitchTo(MetadataCacheMemoryContext); + MemoryContext oldMemoryContext = MemoryContextSwitchTo( + MetadataCacheMemoryContext); installedExtensionVersion = text_to_cstring(DatumGetTextPP(installedVersion)); @@ -2340,10 +2271,7 @@ CurrentDatabaseName(void) extern Oid CitusExtensionOwner(void) { - Relation relation = NULL; - SysScanDesc scandesc; ScanKeyData entry[1]; - HeapTuple extensionTuple = NULL; Form_pg_extension extensionForm = NULL; if (MetadataCache.extensionOwner != InvalidOid) @@ -2351,17 +2279,17 @@ CitusExtensionOwner(void) return MetadataCache.extensionOwner; } - relation = heap_open(ExtensionRelationId, AccessShareLock); + Relation relation = heap_open(ExtensionRelationId, AccessShareLock); ScanKeyInit(&entry[0], Anum_pg_extension_extname, BTEqualStrategyNumber, F_NAMEEQ, CStringGetDatum("citus")); - scandesc = systable_beginscan(relation, ExtensionNameIndexId, true, - NULL, 1, entry); + SysScanDesc scandesc = systable_beginscan(relation, ExtensionNameIndexId, true, + NULL, 1, entry); - extensionTuple = systable_getnext(scandesc); + HeapTuple extensionTuple = systable_getnext(scandesc); /* We assume that there can be at most one matching tuple */ if (HeapTupleIsValid(extensionTuple)) @@ -2540,8 +2468,6 @@ Datum master_dist_partition_cache_invalidate(PG_FUNCTION_ARGS) { TriggerData *triggerData = (TriggerData *) fcinfo->context; - HeapTuple newTuple = NULL; - HeapTuple oldTuple = NULL; Oid oldLogicalRelationId = InvalidOid; Oid newLogicalRelationId = InvalidOid; @@ -2553,8 +2479,8 @@ master_dist_partition_cache_invalidate(PG_FUNCTION_ARGS) CheckCitusVersion(ERROR); - newTuple = triggerData->tg_newtuple; - oldTuple = triggerData->tg_trigtuple; + HeapTuple newTuple = triggerData->tg_newtuple; + HeapTuple oldTuple = triggerData->tg_trigtuple; /* collect logicalrelid for OLD and NEW tuple */ if (oldTuple != NULL) @@ -2603,8 +2529,6 @@ Datum master_dist_shard_cache_invalidate(PG_FUNCTION_ARGS) { TriggerData *triggerData = (TriggerData *) fcinfo->context; - HeapTuple newTuple = NULL; - HeapTuple oldTuple = NULL; Oid oldLogicalRelationId = InvalidOid; Oid newLogicalRelationId = InvalidOid; @@ -2616,8 +2540,8 @@ master_dist_shard_cache_invalidate(PG_FUNCTION_ARGS) CheckCitusVersion(ERROR); - newTuple = triggerData->tg_newtuple; - oldTuple = triggerData->tg_trigtuple; + HeapTuple newTuple = triggerData->tg_newtuple; + HeapTuple oldTuple = triggerData->tg_trigtuple; /* collect logicalrelid for OLD and NEW tuple */ if (oldTuple != NULL) @@ -2666,8 +2590,6 @@ Datum master_dist_placement_cache_invalidate(PG_FUNCTION_ARGS) { TriggerData *triggerData = (TriggerData *) fcinfo->context; - HeapTuple newTuple = NULL; - HeapTuple oldTuple = NULL; Oid oldShardId = InvalidOid; Oid newShardId = InvalidOid; @@ -2679,8 +2601,8 @@ master_dist_placement_cache_invalidate(PG_FUNCTION_ARGS) CheckCitusVersion(ERROR); - newTuple = triggerData->tg_newtuple; - oldTuple = triggerData->tg_trigtuple; + HeapTuple newTuple = triggerData->tg_newtuple; + HeapTuple oldTuple = triggerData->tg_trigtuple; /* collect shardid for OLD and NEW tuple */ if (oldTuple != NULL) @@ -3030,15 +2952,10 @@ PrepareWorkerNodeCache(void) static void InitializeWorkerNodeCache(void) { - HTAB *newWorkerNodeHash = NULL; - List *workerNodeList = NIL; ListCell *workerNodeCell = NULL; HASHCTL info; - int hashFlags = 0; long maxTableSize = (long) MaxWorkerNodesTracked; bool includeNodesFromOtherClusters = false; - int newWorkerNodeCount = 0; - WorkerNode **newWorkerNodeArray = NULL; int workerNodeIndex = 0; InitializeCaches(); @@ -3054,29 +2971,29 @@ InitializeWorkerNodeCache(void) info.hcxt = MetadataCacheMemoryContext; info.hash = WorkerNodeHashCode; info.match = WorkerNodeCompare; - hashFlags = HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT | HASH_COMPARE; + int hashFlags = HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT | HASH_COMPARE; - newWorkerNodeHash = hash_create("Worker Node Hash", maxTableSize, &info, hashFlags); + HTAB *newWorkerNodeHash = hash_create("Worker Node Hash", maxTableSize, &info, + hashFlags); /* read the list from pg_dist_node */ - workerNodeList = ReadDistNode(includeNodesFromOtherClusters); + List *workerNodeList = ReadDistNode(includeNodesFromOtherClusters); - newWorkerNodeCount = list_length(workerNodeList); - newWorkerNodeArray = MemoryContextAlloc(MetadataCacheMemoryContext, - sizeof(WorkerNode *) * newWorkerNodeCount); + int newWorkerNodeCount = list_length(workerNodeList); + WorkerNode **newWorkerNodeArray = MemoryContextAlloc(MetadataCacheMemoryContext, + sizeof(WorkerNode *) * + newWorkerNodeCount); /* iterate over the worker node list */ foreach(workerNodeCell, workerNodeList) { - WorkerNode *workerNode = NULL; WorkerNode *currentNode = lfirst(workerNodeCell); - void *hashKey = NULL; bool handleFound = false; /* search for the worker node in the hash, and then insert the values */ - hashKey = (void *) currentNode; - workerNode = (WorkerNode *) hash_search(newWorkerNodeHash, hashKey, - HASH_ENTER, &handleFound); + void *hashKey = (void *) currentNode; + WorkerNode *workerNode = (WorkerNode *) hash_search(newWorkerNodeHash, hashKey, + HASH_ENTER, &handleFound); /* fill the newly allocated workerNode in the cache */ strlcpy(workerNode->workerName, currentNode->workerName, WORKER_LENGTH); @@ -3153,14 +3070,9 @@ RegisterWorkerNodeCacheCallbacks(void) int32 GetLocalGroupId(void) { - SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 0; - HeapTuple heapTuple = NULL; - TupleDesc tupleDescriptor = NULL; int32 groupId = 0; - Relation pgDistLocalGroupId = NULL; - Oid localGroupTableOid = InvalidOid; InitializeCaches(); @@ -3172,21 +3084,22 @@ GetLocalGroupId(void) return LocalGroupId; } - localGroupTableOid = get_relname_relid("pg_dist_local_group", PG_CATALOG_NAMESPACE); + Oid localGroupTableOid = get_relname_relid("pg_dist_local_group", + PG_CATALOG_NAMESPACE); if (localGroupTableOid == InvalidOid) { return 0; } - pgDistLocalGroupId = heap_open(localGroupTableOid, AccessShareLock); + Relation pgDistLocalGroupId = heap_open(localGroupTableOid, AccessShareLock); - scanDescriptor = systable_beginscan(pgDistLocalGroupId, - InvalidOid, false, - NULL, scanKeyCount, scanKey); + SysScanDesc scanDescriptor = systable_beginscan(pgDistLocalGroupId, + InvalidOid, false, + NULL, scanKeyCount, scanKey); - tupleDescriptor = RelationGetDescr(pgDistLocalGroupId); + TupleDesc tupleDescriptor = RelationGetDescr(pgDistLocalGroupId); - heapTuple = systable_getnext(scanDescriptor); + HeapTuple heapTuple = systable_getnext(scanDescriptor); if (HeapTupleIsValid(heapTuple)) { @@ -3258,8 +3171,6 @@ WorkerNodeHashCode(const void *key, Size keySize) static void ResetDistTableCacheEntry(DistTableCacheEntry *cacheEntry) { - int shardIndex = 0; - if (cacheEntry->partitionKeyString != NULL) { pfree(cacheEntry->partitionKeyString); @@ -3289,7 +3200,7 @@ ResetDistTableCacheEntry(DistTableCacheEntry *cacheEntry) return; } - for (shardIndex = 0; shardIndex < cacheEntry->shardIntervalArrayLength; + for (int shardIndex = 0; shardIndex < cacheEntry->shardIntervalArrayLength; shardIndex++) { ShardInterval *shardInterval = cacheEntry->sortedShardIntervalArray[shardIndex]; @@ -3555,30 +3466,26 @@ InvalidateMetadataSystemCache(void) List * DistTableOidList(void) { - SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 0; - HeapTuple heapTuple = NULL; List *distTableOidList = NIL; - TupleDesc tupleDescriptor = NULL; Relation pgDistPartition = heap_open(DistPartitionRelationId(), AccessShareLock); - scanDescriptor = systable_beginscan(pgDistPartition, - InvalidOid, false, - NULL, scanKeyCount, scanKey); + SysScanDesc scanDescriptor = systable_beginscan(pgDistPartition, + InvalidOid, false, + NULL, scanKeyCount, scanKey); - tupleDescriptor = RelationGetDescr(pgDistPartition); + TupleDesc tupleDescriptor = RelationGetDescr(pgDistPartition); - heapTuple = systable_getnext(scanDescriptor); + HeapTuple heapTuple = systable_getnext(scanDescriptor); while (HeapTupleIsValid(heapTuple)) { bool isNull = false; - Oid relationId = InvalidOid; Datum relationIdDatum = heap_getattr(heapTuple, Anum_pg_dist_partition_logicalrelid, tupleDescriptor, &isNull); - relationId = DatumGetObjectId(relationIdDatum); + Oid relationId = DatumGetObjectId(relationIdDatum); distTableOidList = lappend_oid(distTableOidList, relationId); heapTuple = systable_getnext(scanDescriptor); @@ -3630,8 +3537,6 @@ static HeapTuple LookupDistPartitionTuple(Relation pgDistPartition, Oid relationId) { HeapTuple distPartitionTuple = NULL; - HeapTuple currentPartitionTuple = NULL; - SysScanDesc scanDescriptor; ScanKeyData scanKey[1]; /* copy scankey to local copy, it will be modified during the scan */ @@ -3640,11 +3545,11 @@ LookupDistPartitionTuple(Relation pgDistPartition, Oid relationId) /* set scan arguments */ scanKey[0].sk_argument = ObjectIdGetDatum(relationId); - scanDescriptor = systable_beginscan(pgDistPartition, - DistPartitionLogicalRelidIndexId(), - true, NULL, 1, scanKey); + SysScanDesc scanDescriptor = systable_beginscan(pgDistPartition, + DistPartitionLogicalRelidIndexId(), + true, NULL, 1, scanKey); - currentPartitionTuple = systable_getnext(scanDescriptor); + HeapTuple currentPartitionTuple = systable_getnext(scanDescriptor); if (HeapTupleIsValid(currentPartitionTuple)) { distPartitionTuple = heap_copytuple(currentPartitionTuple); @@ -3663,13 +3568,10 @@ LookupDistPartitionTuple(Relation pgDistPartition, Oid relationId) static List * LookupDistShardTuples(Oid relationId) { - Relation pgDistShard = NULL; List *distShardTupleList = NIL; - HeapTuple currentShardTuple = NULL; - SysScanDesc scanDescriptor; ScanKeyData scanKey[1]; - pgDistShard = heap_open(DistShardRelationId(), AccessShareLock); + Relation pgDistShard = heap_open(DistShardRelationId(), AccessShareLock); /* copy scankey to local copy, it will be modified during the scan */ memcpy(scanKey, DistShardScanKey, sizeof(DistShardScanKey)); @@ -3677,11 +3579,11 @@ LookupDistShardTuples(Oid relationId) /* set scan arguments */ scanKey[0].sk_argument = ObjectIdGetDatum(relationId); - scanDescriptor = systable_beginscan(pgDistShard, - DistShardLogicalRelidIndexId(), true, - NULL, 1, scanKey); + SysScanDesc scanDescriptor = systable_beginscan(pgDistShard, + DistShardLogicalRelidIndexId(), true, + NULL, 1, scanKey); - currentShardTuple = systable_getnext(scanDescriptor); + HeapTuple currentShardTuple = systable_getnext(scanDescriptor); while (HeapTupleIsValid(currentShardTuple)) { HeapTuple shardTupleCopy = heap_copytuple(currentShardTuple); @@ -3706,10 +3608,8 @@ LookupDistShardTuples(Oid relationId) Oid LookupShardRelation(int64 shardId, bool missingOk) { - SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 1; - HeapTuple heapTuple = NULL; Form_pg_dist_shard shardForm = NULL; Relation pgDistShard = heap_open(DistShardRelationId(), AccessShareLock); Oid relationId = InvalidOid; @@ -3717,11 +3617,11 @@ LookupShardRelation(int64 shardId, bool missingOk) ScanKeyInit(&scanKey[0], Anum_pg_dist_shard_shardid, BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(shardId)); - scanDescriptor = systable_beginscan(pgDistShard, - DistShardShardidIndexId(), true, - NULL, scanKeyCount, scanKey); + SysScanDesc scanDescriptor = systable_beginscan(pgDistShard, + DistShardShardidIndexId(), true, + NULL, scanKeyCount, scanKey); - heapTuple = systable_getnext(scanDescriptor); + HeapTuple heapTuple = systable_getnext(scanDescriptor); if (!HeapTupleIsValid(heapTuple) && !missingOk) { ereport(ERROR, (errmsg("could not find valid entry for shard " @@ -3811,18 +3711,10 @@ TupleToShardInterval(HeapTuple heapTuple, TupleDesc tupleDescriptor, Oid intervalTypeId, int32 intervalTypeMod) { - ShardInterval *shardInterval = NULL; - bool minValueNull = false; - bool maxValueNull = false; Oid inputFunctionId = InvalidOid; Oid typeIoParam = InvalidOid; Datum datumArray[Natts_pg_dist_shard]; bool isNullArray[Natts_pg_dist_shard]; - Datum minValueTextDatum = 0; - Datum maxValueTextDatum = 0; - Oid relationId = InvalidOid; - int64 shardId = InvalidOid; - char storageType = InvalidOid; Datum minValue = 0; Datum maxValue = 0; bool minValueExists = false; @@ -3838,15 +3730,15 @@ TupleToShardInterval(HeapTuple heapTuple, TupleDesc tupleDescriptor, Oid */ heap_deform_tuple(heapTuple, tupleDescriptor, datumArray, isNullArray); - relationId = DatumGetObjectId(datumArray[Anum_pg_dist_shard_logicalrelid - - 1]); - shardId = DatumGetInt64(datumArray[Anum_pg_dist_shard_shardid - 1]); - storageType = DatumGetChar(datumArray[Anum_pg_dist_shard_shardstorage - 1]); - minValueTextDatum = datumArray[Anum_pg_dist_shard_shardminvalue - 1]; - maxValueTextDatum = datumArray[Anum_pg_dist_shard_shardmaxvalue - 1]; + Oid relationId = DatumGetObjectId(datumArray[Anum_pg_dist_shard_logicalrelid - + 1]); + int64 shardId = DatumGetInt64(datumArray[Anum_pg_dist_shard_shardid - 1]); + char storageType = DatumGetChar(datumArray[Anum_pg_dist_shard_shardstorage - 1]); + Datum minValueTextDatum = datumArray[Anum_pg_dist_shard_shardminvalue - 1]; + Datum maxValueTextDatum = datumArray[Anum_pg_dist_shard_shardmaxvalue - 1]; - minValueNull = isNullArray[Anum_pg_dist_shard_shardminvalue - 1]; - maxValueNull = isNullArray[Anum_pg_dist_shard_shardmaxvalue - 1]; + bool minValueNull = isNullArray[Anum_pg_dist_shard_shardminvalue - 1]; + bool maxValueNull = isNullArray[Anum_pg_dist_shard_shardmaxvalue - 1]; if (!minValueNull && !maxValueNull) { @@ -3869,7 +3761,7 @@ TupleToShardInterval(HeapTuple heapTuple, TupleDesc tupleDescriptor, Oid maxValueExists = true; } - shardInterval = CitusMakeNode(ShardInterval); + ShardInterval *shardInterval = CitusMakeNode(ShardInterval); shardInterval->relationId = relationId; shardInterval->storageType = storageType; shardInterval->valueTypeId = intervalTypeId; @@ -3970,10 +3862,8 @@ CitusInvalidateRelcacheByRelid(Oid relationId) void CitusInvalidateRelcacheByShardId(int64 shardId) { - SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 1; - HeapTuple heapTuple = NULL; Form_pg_dist_shard shardForm = NULL; Relation pgDistShard = heap_open(DistShardRelationId(), AccessShareLock); @@ -3987,11 +3877,11 @@ CitusInvalidateRelcacheByShardId(int64 shardId) ScanKeyInit(&scanKey[0], Anum_pg_dist_shard_shardid, BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(shardId)); - scanDescriptor = systable_beginscan(pgDistShard, - DistShardShardidIndexId(), true, - NULL, scanKeyCount, scanKey); + SysScanDesc scanDescriptor = systable_beginscan(pgDistShard, + DistShardShardidIndexId(), true, + NULL, scanKeyCount, scanKey); - heapTuple = systable_getnext(scanDescriptor); + HeapTuple heapTuple = systable_getnext(scanDescriptor); if (HeapTupleIsValid(heapTuple)) { shardForm = (Form_pg_dist_shard) GETSTRUCT(heapTuple); @@ -4033,28 +3923,23 @@ Datum DistNodeMetadata(void) { Datum metadata = 0; - SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; const int scanKeyCount = 0; - HeapTuple heapTuple = NULL; - Oid metadataTableOid = InvalidOid; - Relation pgDistNodeMetadata = NULL; - TupleDesc tupleDescriptor = NULL; - metadataTableOid = get_relname_relid("pg_dist_node_metadata", - PG_CATALOG_NAMESPACE); + Oid metadataTableOid = get_relname_relid("pg_dist_node_metadata", + PG_CATALOG_NAMESPACE); if (metadataTableOid == InvalidOid) { ereport(ERROR, (errmsg("pg_dist_node_metadata was not found"))); } - pgDistNodeMetadata = heap_open(metadataTableOid, AccessShareLock); - scanDescriptor = systable_beginscan(pgDistNodeMetadata, - InvalidOid, false, - NULL, scanKeyCount, scanKey); - tupleDescriptor = RelationGetDescr(pgDistNodeMetadata); + Relation pgDistNodeMetadata = heap_open(metadataTableOid, AccessShareLock); + SysScanDesc scanDescriptor = systable_beginscan(pgDistNodeMetadata, + InvalidOid, false, + NULL, scanKeyCount, scanKey); + TupleDesc tupleDescriptor = RelationGetDescr(pgDistNodeMetadata); - heapTuple = systable_getnext(scanDescriptor); + HeapTuple heapTuple = systable_getnext(scanDescriptor); if (HeapTupleIsValid(heapTuple)) { bool isNull = false; diff --git a/src/backend/distributed/metadata/metadata_sync.c b/src/backend/distributed/metadata/metadata_sync.c index 8f5ef4395..9b28dd3e8 100644 --- a/src/backend/distributed/metadata/metadata_sync.c +++ b/src/backend/distributed/metadata/metadata_sync.c @@ -91,7 +91,6 @@ start_metadata_sync_to_node(PG_FUNCTION_ARGS) void StartMetadatSyncToNode(char *nodeNameString, int32 nodePort) { - WorkerNode *workerNode = NULL; char *escapedNodeName = quote_literal_cstr(nodeNameString); /* fail if metadata synchronization doesn't succeed */ @@ -106,7 +105,7 @@ StartMetadatSyncToNode(char *nodeNameString, int32 nodePort) LockRelationOid(DistNodeRelationId(), ExclusiveLock); - workerNode = FindWorkerNode(nodeNameString, nodePort); + WorkerNode *workerNode = FindWorkerNode(nodeNameString, nodePort); if (workerNode == NULL) { ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), @@ -159,7 +158,6 @@ stop_metadata_sync_to_node(PG_FUNCTION_ARGS) text *nodeName = PG_GETARG_TEXT_P(0); int32 nodePort = PG_GETARG_INT32(1); char *nodeNameString = text_to_cstring(nodeName); - WorkerNode *workerNode = NULL; EnsureCoordinator(); EnsureSuperUser(); @@ -167,7 +165,7 @@ stop_metadata_sync_to_node(PG_FUNCTION_ARGS) LockRelationOid(DistNodeRelationId(), ExclusiveLock); - workerNode = FindWorkerNode(nodeNameString, nodePort); + WorkerNode *workerNode = FindWorkerNode(nodeNameString, nodePort); if (workerNode == NULL) { ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), @@ -297,13 +295,13 @@ bool SendOptionalCommandListToWorkerInTransaction(char *nodeName, int32 nodePort, char *nodeUser, List *commandList) { - MultiConnection *workerConnection = NULL; ListCell *commandCell = NULL; int connectionFlags = FORCE_NEW_CONNECTION; bool failed = false; - workerConnection = GetNodeUserDatabaseConnection(connectionFlags, nodeName, nodePort, - nodeUser, NULL); + MultiConnection *workerConnection = GetNodeUserDatabaseConnection(connectionFlags, + nodeName, nodePort, + nodeUser, NULL); RemoteTransactionBegin(workerConnection); @@ -356,14 +354,13 @@ MetadataCreateCommands(void) bool includeNodesFromOtherClusters = true; List *workerNodeList = ReadDistNode(includeNodesFromOtherClusters); ListCell *distributedTableCell = NULL; - char *nodeListInsertCommand = NULL; bool includeSequenceDefaults = true; /* make sure we have deterministic output for our tests */ workerNodeList = SortList(workerNodeList, CompareWorkerNodes); /* generate insert command for pg_dist_node table */ - nodeListInsertCommand = NodeListInsertCommand(workerNodeList); + char *nodeListInsertCommand = NodeListInsertCommand(workerNodeList); metadataSnapshotCommandList = lappend(metadataSnapshotCommandList, nodeListInsertCommand); @@ -441,26 +438,22 @@ MetadataCreateCommands(void) { DistTableCacheEntry *cacheEntry = (DistTableCacheEntry *) lfirst(distributedTableCell); - List *shardIntervalList = NIL; - List *shardCreateCommandList = NIL; - char *metadataCommand = NULL; - char *truncateTriggerCreateCommand = NULL; Oid clusteredTableId = cacheEntry->relationId; /* add the table metadata command first*/ - metadataCommand = DistributionCreateCommand(cacheEntry); + char *metadataCommand = DistributionCreateCommand(cacheEntry); metadataSnapshotCommandList = lappend(metadataSnapshotCommandList, metadataCommand); /* add the truncate trigger command after the table became distributed */ - truncateTriggerCreateCommand = + char *truncateTriggerCreateCommand = TruncateTriggerCreateCommand(cacheEntry->relationId); metadataSnapshotCommandList = lappend(metadataSnapshotCommandList, truncateTriggerCreateCommand); /* add the pg_dist_shard{,placement} entries */ - shardIntervalList = LoadShardIntervalList(clusteredTableId); - shardCreateCommandList = ShardListInsertCommand(shardIntervalList); + List *shardIntervalList = LoadShardIntervalList(clusteredTableId); + List *shardCreateCommandList = ShardListInsertCommand(shardIntervalList); metadataSnapshotCommandList = list_concat(metadataSnapshotCommandList, shardCreateCommandList); @@ -481,44 +474,36 @@ GetDistributedTableDDLEvents(Oid relationId) { DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(relationId); - List *shardIntervalList = NIL; List *commandList = NIL; - List *foreignConstraintCommands = NIL; - List *shardMetadataInsertCommandList = NIL; - List *sequenceDDLCommands = NIL; - List *tableDDLCommands = NIL; - char *tableOwnerResetCommand = NULL; - char *metadataCommand = NULL; - char *truncateTriggerCreateCommand = NULL; bool includeSequenceDefaults = true; /* commands to create sequences */ - sequenceDDLCommands = SequenceDDLCommandsForTable(relationId); + List *sequenceDDLCommands = SequenceDDLCommandsForTable(relationId); commandList = list_concat(commandList, sequenceDDLCommands); /* commands to create the table */ - tableDDLCommands = GetTableDDLEvents(relationId, includeSequenceDefaults); + List *tableDDLCommands = GetTableDDLEvents(relationId, includeSequenceDefaults); commandList = list_concat(commandList, tableDDLCommands); /* command to reset the table owner */ - tableOwnerResetCommand = TableOwnerResetCommand(relationId); + char *tableOwnerResetCommand = TableOwnerResetCommand(relationId); commandList = lappend(commandList, tableOwnerResetCommand); /* command to insert pg_dist_partition entry */ - metadataCommand = DistributionCreateCommand(cacheEntry); + char *metadataCommand = DistributionCreateCommand(cacheEntry); commandList = lappend(commandList, metadataCommand); /* commands to create the truncate trigger of the table */ - truncateTriggerCreateCommand = TruncateTriggerCreateCommand(relationId); + char *truncateTriggerCreateCommand = TruncateTriggerCreateCommand(relationId); commandList = lappend(commandList, truncateTriggerCreateCommand); /* commands to insert pg_dist_shard & pg_dist_placement entries */ - shardIntervalList = LoadShardIntervalList(relationId); - shardMetadataInsertCommandList = ShardListInsertCommand(shardIntervalList); + List *shardIntervalList = LoadShardIntervalList(relationId); + List *shardMetadataInsertCommandList = ShardListInsertCommand(shardIntervalList); commandList = list_concat(commandList, shardMetadataInsertCommandList); /* commands to create foreign key constraints */ - foreignConstraintCommands = GetTableForeignConstraintCommands(relationId); + List *foreignConstraintCommands = GetTableForeignConstraintCommands(relationId); commandList = list_concat(commandList, foreignConstraintCommands); /* commands to create partitioning hierarchy */ @@ -686,10 +671,9 @@ DistributionCreateCommand(DistTableCacheEntry *cacheEntry) char * DistributionDeleteCommand(char *schemaName, char *tableName) { - char *distributedRelationName = NULL; StringInfo deleteDistributionCommand = makeStringInfo(); - distributedRelationName = quote_qualified_identifier(schemaName, tableName); + char *distributedRelationName = quote_qualified_identifier(schemaName, tableName); appendStringInfo(deleteDistributionCommand, "SELECT worker_drop_distributed_table(%s)", @@ -850,11 +834,9 @@ ShardDeleteCommandList(ShardInterval *shardInterval) { uint64 shardId = shardInterval->shardId; List *commandList = NIL; - StringInfo deletePlacementCommand = NULL; - StringInfo deleteShardCommand = NULL; /* create command to delete shard placements */ - deletePlacementCommand = makeStringInfo(); + StringInfo deletePlacementCommand = makeStringInfo(); appendStringInfo(deletePlacementCommand, "DELETE FROM pg_dist_placement WHERE shardid = " UINT64_FORMAT, shardId); @@ -862,7 +844,7 @@ ShardDeleteCommandList(ShardInterval *shardInterval) commandList = lappend(commandList, deletePlacementCommand->data); /* create command to delete shard */ - deleteShardCommand = makeStringInfo(); + StringInfo deleteShardCommand = makeStringInfo(); appendStringInfo(deleteShardCommand, "DELETE FROM pg_dist_shard WHERE shardid = " UINT64_FORMAT, shardId); @@ -1013,27 +995,23 @@ UpdateDistNodeBoolAttr(char *nodeName, int32 nodePort, int attrNum, bool value) { const bool indexOK = false; - Relation pgDistNode = NULL; - TupleDesc tupleDescriptor = NULL; ScanKeyData scanKey[2]; - SysScanDesc scanDescriptor = NULL; - HeapTuple heapTuple = NULL; Datum values[Natts_pg_dist_node]; bool isnull[Natts_pg_dist_node]; bool replace[Natts_pg_dist_node]; - pgDistNode = heap_open(DistNodeRelationId(), RowExclusiveLock); - tupleDescriptor = RelationGetDescr(pgDistNode); + Relation pgDistNode = heap_open(DistNodeRelationId(), RowExclusiveLock); + TupleDesc tupleDescriptor = RelationGetDescr(pgDistNode); ScanKeyInit(&scanKey[0], Anum_pg_dist_node_nodename, BTEqualStrategyNumber, F_TEXTEQ, CStringGetTextDatum(nodeName)); ScanKeyInit(&scanKey[1], Anum_pg_dist_node_nodeport, BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(nodePort)); - scanDescriptor = systable_beginscan(pgDistNode, InvalidOid, indexOK, - NULL, 2, scanKey); + SysScanDesc scanDescriptor = systable_beginscan(pgDistNode, InvalidOid, indexOK, + NULL, 2, scanKey); - heapTuple = systable_getnext(scanDescriptor); + HeapTuple heapTuple = systable_getnext(scanDescriptor); if (!HeapTupleIsValid(heapTuple)) { ereport(ERROR, (errmsg("could not find valid entry for node \"%s:%d\"", @@ -1113,18 +1091,15 @@ char * CreateSchemaDDLCommand(Oid schemaId) { char *schemaName = get_namespace_name(schemaId); - StringInfo schemaNameDef = NULL; - const char *ownerName = NULL; - const char *quotedSchemaName = NULL; if (strncmp(schemaName, "public", NAMEDATALEN) == 0) { return NULL; } - schemaNameDef = makeStringInfo(); - quotedSchemaName = quote_identifier(schemaName); - ownerName = quote_identifier(SchemaOwnerName(schemaId)); + StringInfo schemaNameDef = makeStringInfo(); + const char *quotedSchemaName = quote_identifier(schemaName); + const char *ownerName = quote_identifier(SchemaOwnerName(schemaId)); appendStringInfo(schemaNameDef, CREATE_SCHEMA_COMMAND, quotedSchemaName, ownerName); return schemaNameDef->data; @@ -1155,11 +1130,9 @@ TruncateTriggerCreateCommand(Oid relationId) static char * SchemaOwnerName(Oid objectId) { - HeapTuple tuple = NULL; Oid ownerId = InvalidOid; - char *ownerName = NULL; - tuple = SearchSysCache1(NAMESPACEOID, ObjectIdGetDatum(objectId)); + HeapTuple tuple = SearchSysCache1(NAMESPACEOID, ObjectIdGetDatum(objectId)); if (HeapTupleIsValid(tuple)) { ownerId = ((Form_pg_namespace) GETSTRUCT(tuple))->nspowner; @@ -1169,7 +1142,7 @@ SchemaOwnerName(Oid objectId) ownerId = GetUserId(); } - ownerName = GetUserNameFromId(ownerId, false); + char *ownerName = GetUserNameFromId(ownerId, false); ReleaseSysCache(tuple); @@ -1248,7 +1221,6 @@ DetachPartitionCommandList(void) { DistTableCacheEntry *cacheEntry = (DistTableCacheEntry *) lfirst(distributedTableCell); - List *partitionList = NIL; ListCell *partitionCell = NULL; if (!PartitionedTable(cacheEntry->relationId)) @@ -1256,7 +1228,7 @@ DetachPartitionCommandList(void) continue; } - partitionList = PartitionList(cacheEntry->relationId); + List *partitionList = PartitionList(cacheEntry->relationId); foreach(partitionCell, partitionList) { Oid partitionRelationId = lfirst_oid(partitionCell); @@ -1295,7 +1267,6 @@ DetachPartitionCommandList(void) MetadataSyncResult SyncMetadataToNodes(void) { - List *workerList = NIL; ListCell *workerCell = NULL; MetadataSyncResult result = METADATA_SYNC_SUCCESS; @@ -1314,7 +1285,7 @@ SyncMetadataToNodes(void) return METADATA_SYNC_FAILED_LOCK; } - workerList = ActivePrimaryWorkerNodeList(NoLock); + List *workerList = ActivePrimaryWorkerNodeList(NoLock); foreach(workerCell, workerList) { diff --git a/src/backend/distributed/metadata/node_metadata.c b/src/backend/distributed/metadata/node_metadata.c index 0a4237bee..8625a36ca 100644 --- a/src/backend/distributed/metadata/node_metadata.c +++ b/src/backend/distributed/metadata/node_metadata.c @@ -128,7 +128,6 @@ master_add_node(PG_FUNCTION_ARGS) text *nodeName = PG_GETARG_TEXT_P(0); int32 nodePort = PG_GETARG_INT32(1); char *nodeNameString = text_to_cstring(nodeName); - int nodeId = 0; NodeMetadata nodeMetadata = DefaultNodeMetadata(); bool nodeAlreadyExists = false; @@ -153,8 +152,8 @@ master_add_node(PG_FUNCTION_ARGS) nodeMetadata.nodeRole = PG_GETARG_OID(3); } - nodeId = AddNodeMetadata(nodeNameString, nodePort, &nodeMetadata, - &nodeAlreadyExists); + int nodeId = AddNodeMetadata(nodeNameString, nodePort, &nodeMetadata, + &nodeAlreadyExists); /* * After adding new node, if the node did not already exist, we will activate @@ -185,15 +184,14 @@ master_add_inactive_node(PG_FUNCTION_ARGS) NodeMetadata nodeMetadata = DefaultNodeMetadata(); bool nodeAlreadyExists = false; - int nodeId = 0; nodeMetadata.groupId = PG_GETARG_INT32(2); nodeMetadata.nodeRole = PG_GETARG_OID(3); nodeMetadata.nodeCluster = NameStr(*nodeClusterName); CheckCitusVersion(ERROR); - nodeId = AddNodeMetadata(nodeNameString, nodePort, &nodeMetadata, - &nodeAlreadyExists); + int nodeId = AddNodeMetadata(nodeNameString, nodePort, &nodeMetadata, + &nodeAlreadyExists); PG_RETURN_INT32(nodeId); } @@ -217,7 +215,6 @@ master_add_secondary_node(PG_FUNCTION_ARGS) Name nodeClusterName = PG_GETARG_NAME(4); NodeMetadata nodeMetadata = DefaultNodeMetadata(); bool nodeAlreadyExists = false; - int nodeId = 0; nodeMetadata.groupId = GroupForNode(primaryNameString, primaryPort); nodeMetadata.nodeCluster = NameStr(*nodeClusterName); @@ -226,8 +223,8 @@ master_add_secondary_node(PG_FUNCTION_ARGS) CheckCitusVersion(ERROR); - nodeId = AddNodeMetadata(nodeNameString, nodePort, &nodeMetadata, - &nodeAlreadyExists); + int nodeId = AddNodeMetadata(nodeNameString, nodePort, &nodeMetadata, + &nodeAlreadyExists); PG_RETURN_INT32(nodeId); } @@ -307,11 +304,9 @@ master_disable_node(PG_FUNCTION_ARGS) } PG_CATCH(); { - ErrorData *edata = NULL; - /* CopyErrorData() requires (CurrentMemoryContext != ErrorContext) */ MemoryContextSwitchTo(savedContext); - edata = CopyErrorData(); + ErrorData *edata = CopyErrorData(); ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("Disabling %s:%d failed", workerNode->workerName, @@ -397,14 +392,12 @@ SetUpDistributedTableDependencies(WorkerNode *newWorkerNode) static void PropagateRolesToNewNode(WorkerNode *newWorkerNode) { - List *ddlCommands = NIL; - if (!EnableAlterRolePropagation) { return; } - ddlCommands = GenerateAlterRoleIfExistsCommandAllRoles(); + List *ddlCommands = GenerateAlterRoleIfExistsCommandAllRoles(); SendCommandListToWorkerInSingleTransaction(newWorkerNode->workerName, newWorkerNode->workerPort, @@ -419,8 +412,6 @@ PropagateRolesToNewNode(WorkerNode *newWorkerNode) static WorkerNode * ModifiableWorkerNode(const char *nodeName, int32 nodePort) { - WorkerNode *workerNode = NULL; - CheckCitusVersion(ERROR); EnsureCoordinator(); @@ -428,7 +419,7 @@ ModifiableWorkerNode(const char *nodeName, int32 nodePort) /* take an exclusive lock on pg_dist_node to serialize pg_dist_node changes */ LockRelationOid(DistNodeRelationId(), ExclusiveLock); - workerNode = FindWorkerNodeAnyCluster(nodeName, nodePort); + WorkerNode *workerNode = FindWorkerNodeAnyCluster(nodeName, nodePort); if (workerNode == NULL) { ereport(ERROR, (errmsg("node at \"%s:%u\" does not exist", nodeName, nodePort))); @@ -581,13 +572,12 @@ PrimaryNodeForGroup(int32 groupId, bool *groupContainsNodes) static int ActivateNode(char *nodeName, int nodePort) { - WorkerNode *newWorkerNode = NULL; bool isActive = true; /* take an exclusive lock on pg_dist_node to serialize pg_dist_node changes */ LockRelationOid(DistNodeRelationId(), ExclusiveLock); - newWorkerNode = SetNodeState(nodeName, nodePort, isActive); + WorkerNode *newWorkerNode = SetNodeState(nodeName, nodePort, isActive); PropagateRolesToNewNode(newWorkerNode); SetUpDistributedTableDependencies(newWorkerNode); @@ -621,14 +611,13 @@ master_update_node(PG_FUNCTION_ARGS) int32 lock_cooldown = PG_GETARG_INT32(4); char *newNodeNameString = text_to_cstring(newNodeName); - WorkerNode *workerNode = NULL; - WorkerNode *workerNodeWithSameAddress = NULL; List *placementList = NIL; BackgroundWorkerHandle *handle = NULL; CheckCitusVersion(ERROR); - workerNodeWithSameAddress = FindWorkerNodeAnyCluster(newNodeNameString, newNodePort); + WorkerNode *workerNodeWithSameAddress = FindWorkerNodeAnyCluster(newNodeNameString, + newNodePort); if (workerNodeWithSameAddress != NULL) { /* a node with the given hostname and port already exists in the metadata */ @@ -646,7 +635,7 @@ master_update_node(PG_FUNCTION_ARGS) } } - workerNode = LookupNodeByNodeId(nodeId); + WorkerNode *workerNode = LookupNodeByNodeId(nodeId); if (workerNode == NULL) { ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), @@ -734,25 +723,22 @@ UpdateNodeLocation(int32 nodeId, char *newNodeName, int32 newNodePort) { const bool indexOK = true; - Relation pgDistNode = NULL; - TupleDesc tupleDescriptor = NULL; ScanKeyData scanKey[1]; - SysScanDesc scanDescriptor = NULL; - HeapTuple heapTuple = NULL; Datum values[Natts_pg_dist_node]; bool isnull[Natts_pg_dist_node]; bool replace[Natts_pg_dist_node]; - pgDistNode = heap_open(DistNodeRelationId(), RowExclusiveLock); - tupleDescriptor = RelationGetDescr(pgDistNode); + Relation pgDistNode = heap_open(DistNodeRelationId(), RowExclusiveLock); + TupleDesc tupleDescriptor = RelationGetDescr(pgDistNode); ScanKeyInit(&scanKey[0], Anum_pg_dist_node_nodeid, BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(nodeId)); - scanDescriptor = systable_beginscan(pgDistNode, DistNodeNodeIdIndexId(), indexOK, - NULL, 1, scanKey); + SysScanDesc scanDescriptor = systable_beginscan(pgDistNode, DistNodeNodeIdIndexId(), + indexOK, + NULL, 1, scanKey); - heapTuple = systable_getnext(scanDescriptor); + HeapTuple heapTuple = systable_getnext(scanDescriptor); if (!HeapTupleIsValid(heapTuple)) { ereport(ERROR, (errmsg("could not find valid entry for node \"%s:%d\"", @@ -791,8 +777,6 @@ Datum get_shard_id_for_distribution_column(PG_FUNCTION_ARGS) { ShardInterval *shardInterval = NULL; - char distributionMethod = 0; - Oid relationId = InvalidOid; CheckCitusVersion(ERROR); @@ -806,7 +790,7 @@ get_shard_id_for_distribution_column(PG_FUNCTION_ARGS) errmsg("relation cannot be NULL"))); } - relationId = PG_GETARG_OID(0); + Oid relationId = PG_GETARG_OID(0); EnsureTablePermissions(relationId, ACL_SELECT); if (!IsDistributedTable(relationId)) @@ -815,7 +799,7 @@ get_shard_id_for_distribution_column(PG_FUNCTION_ARGS) errmsg("relation is not distributed"))); } - distributionMethod = PartitionMethod(relationId); + char distributionMethod = PartitionMethod(relationId); if (distributionMethod == DISTRIBUTE_BY_NONE) { List *shardIntervalList = LoadShardIntervalList(relationId); @@ -829,12 +813,6 @@ get_shard_id_for_distribution_column(PG_FUNCTION_ARGS) else if (distributionMethod == DISTRIBUTE_BY_HASH || distributionMethod == DISTRIBUTE_BY_RANGE) { - Var *distributionColumn = NULL; - Oid distributionDataType = InvalidOid; - Oid inputDataType = InvalidOid; - char *distributionValueString = NULL; - Datum inputDatum = 0; - Datum distributionValueDatum = 0; DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(relationId); /* if given table is not reference table, distributionValue cannot be NULL */ @@ -845,15 +823,15 @@ get_shard_id_for_distribution_column(PG_FUNCTION_ARGS) "than reference tables."))); } - inputDatum = PG_GETARG_DATUM(1); - inputDataType = get_fn_expr_argtype(fcinfo->flinfo, 1); - distributionValueString = DatumToString(inputDatum, inputDataType); + Datum inputDatum = PG_GETARG_DATUM(1); + Oid inputDataType = get_fn_expr_argtype(fcinfo->flinfo, 1); + char *distributionValueString = DatumToString(inputDatum, inputDataType); - distributionColumn = DistPartitionKey(relationId); - distributionDataType = distributionColumn->vartype; + Var *distributionColumn = DistPartitionKey(relationId); + Oid distributionDataType = distributionColumn->vartype; - distributionValueDatum = StringToDatum(distributionValueString, - distributionDataType); + Datum distributionValueDatum = StringToDatum(distributionValueString, + distributionDataType); shardInterval = FindShardInterval(distributionValueDatum, cacheEntry); } @@ -881,18 +859,17 @@ get_shard_id_for_distribution_column(PG_FUNCTION_ARGS) WorkerNode * FindWorkerNode(char *nodeName, int32 nodePort) { - WorkerNode *cachedWorkerNode = NULL; HTAB *workerNodeHash = GetWorkerNodeHash(); bool handleFound = false; - void *hashKey = NULL; WorkerNode *searchedNode = (WorkerNode *) palloc0(sizeof(WorkerNode)); strlcpy(searchedNode->workerName, nodeName, WORKER_LENGTH); searchedNode->workerPort = nodePort; - hashKey = (void *) searchedNode; - cachedWorkerNode = (WorkerNode *) hash_search(workerNodeHash, hashKey, HASH_FIND, - &handleFound); + void *hashKey = (void *) searchedNode; + WorkerNode *cachedWorkerNode = (WorkerNode *) hash_search(workerNodeHash, hashKey, + HASH_FIND, + &handleFound); if (handleFound) { WorkerNode *workerNode = (WorkerNode *) palloc(sizeof(WorkerNode)); @@ -939,22 +916,19 @@ FindWorkerNodeAnyCluster(const char *nodeName, int32 nodePort) List * ReadDistNode(bool includeNodesFromOtherClusters) { - SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 0; - HeapTuple heapTuple = NULL; List *workerNodeList = NIL; - TupleDesc tupleDescriptor = NULL; Relation pgDistNode = heap_open(DistNodeRelationId(), AccessShareLock); - scanDescriptor = systable_beginscan(pgDistNode, - InvalidOid, false, - NULL, scanKeyCount, scanKey); + SysScanDesc scanDescriptor = systable_beginscan(pgDistNode, + InvalidOid, false, + NULL, scanKeyCount, scanKey); - tupleDescriptor = RelationGetDescr(pgDistNode); + TupleDesc tupleDescriptor = RelationGetDescr(pgDistNode); - heapTuple = systable_getnext(scanDescriptor); + HeapTuple heapTuple = systable_getnext(scanDescriptor); while (HeapTupleIsValid(heapTuple)) { WorkerNode *workerNode = TupleToWorkerNode(tupleDescriptor, heapTuple); @@ -989,7 +963,6 @@ ReadDistNode(bool includeNodesFromOtherClusters) static void RemoveNodeFromCluster(char *nodeName, int32 nodePort) { - char *nodeDeleteCommand = NULL; WorkerNode *workerNode = ModifiableWorkerNode(nodeName, nodePort); if (NodeIsPrimary(workerNode)) @@ -1012,7 +985,7 @@ RemoveNodeFromCluster(char *nodeName, int32 nodePort) DeleteNodeRow(workerNode->workerName, nodePort); - nodeDeleteCommand = NodeDeleteCommand(workerNode->nodeId); + char *nodeDeleteCommand = NodeDeleteCommand(workerNode->nodeId); /* make sure we don't have any lingering session lifespan connections */ CloseNodeConnectionsAfterTransaction(workerNode->workerName, nodePort); @@ -1059,11 +1032,6 @@ AddNodeMetadata(char *nodeName, int32 nodePort, NodeMetadata *nodeMetadata, bool *nodeAlreadyExists) { - int nextNodeIdInt = 0; - WorkerNode *workerNode = NULL; - char *nodeDeleteCommand = NULL; - uint32 primariesWithMetadata = 0; - EnsureCoordinator(); *nodeAlreadyExists = false; @@ -1075,7 +1043,7 @@ AddNodeMetadata(char *nodeName, int32 nodePort, */ LockRelationOid(DistNodeRelationId(), ExclusiveLock); - workerNode = FindWorkerNodeAnyCluster(nodeName, nodePort); + WorkerNode *workerNode = FindWorkerNodeAnyCluster(nodeName, nodePort); if (workerNode != NULL) { /* fill return data and return */ @@ -1122,18 +1090,18 @@ AddNodeMetadata(char *nodeName, int32 nodePort, } /* generate the new node id from the sequence */ - nextNodeIdInt = GetNextNodeId(); + int nextNodeIdInt = GetNextNodeId(); InsertNodeRow(nextNodeIdInt, nodeName, nodePort, nodeMetadata); workerNode = FindWorkerNodeAnyCluster(nodeName, nodePort); /* send the delete command to all primary nodes with metadata */ - nodeDeleteCommand = NodeDeleteCommand(workerNode->nodeId); + char *nodeDeleteCommand = NodeDeleteCommand(workerNode->nodeId); SendCommandToWorkers(WORKERS_WITH_METADATA, nodeDeleteCommand); /* finally prepare the insert command and send it to all primary nodes */ - primariesWithMetadata = CountPrimariesWithMetadata(); + uint32 primariesWithMetadata = CountPrimariesWithMetadata(); if (primariesWithMetadata != 0) { List *workerNodeList = list_make1(workerNode); @@ -1157,7 +1125,6 @@ SetWorkerColumn(WorkerNode *workerNode, int columnIndex, Datum value) Relation pgDistNode = heap_open(DistNodeRelationId(), RowExclusiveLock); TupleDesc tupleDescriptor = RelationGetDescr(pgDistNode); HeapTuple heapTuple = GetNodeTuple(workerNode->workerName, workerNode->workerPort); - WorkerNode *newWorkerNode = NULL; Datum values[Natts_pg_dist_node]; bool isnull[Natts_pg_dist_node]; @@ -1206,7 +1173,7 @@ SetWorkerColumn(WorkerNode *workerNode, int columnIndex, Datum value) CitusInvalidateRelcacheByRelid(DistNodeRelationId()); CommandCounterIncrement(); - newWorkerNode = TupleToWorkerNode(tupleDescriptor, heapTuple); + WorkerNode *newWorkerNode = TupleToWorkerNode(tupleDescriptor, heapTuple); heap_close(pgDistNode, NoLock); @@ -1257,18 +1224,16 @@ GetNodeTuple(const char *nodeName, int32 nodePort) const bool indexOK = false; ScanKeyData scanKey[2]; - SysScanDesc scanDescriptor = NULL; - HeapTuple heapTuple = NULL; HeapTuple nodeTuple = NULL; ScanKeyInit(&scanKey[0], Anum_pg_dist_node_nodename, BTEqualStrategyNumber, F_TEXTEQ, CStringGetTextDatum(nodeName)); ScanKeyInit(&scanKey[1], Anum_pg_dist_node_nodeport, BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(nodePort)); - scanDescriptor = systable_beginscan(pgDistNode, InvalidOid, indexOK, - NULL, scanKeyCount, scanKey); + SysScanDesc scanDescriptor = systable_beginscan(pgDistNode, InvalidOid, indexOK, + NULL, scanKeyCount, scanKey); - heapTuple = systable_getnext(scanDescriptor); + HeapTuple heapTuple = systable_getnext(scanDescriptor); if (HeapTupleIsValid(heapTuple)) { nodeTuple = heap_copytuple(heapTuple); @@ -1298,18 +1263,16 @@ GetNextGroupId() Datum sequenceIdDatum = ObjectIdGetDatum(sequenceId); Oid savedUserId = InvalidOid; int savedSecurityContext = 0; - Datum groupIdDatum = 0; - int32 groupId = 0; GetUserIdAndSecContext(&savedUserId, &savedSecurityContext); SetUserIdAndSecContext(CitusExtensionOwner(), SECURITY_LOCAL_USERID_CHANGE); /* generate new and unique shardId from sequence */ - groupIdDatum = DirectFunctionCall1(nextval_oid, sequenceIdDatum); + Datum groupIdDatum = DirectFunctionCall1(nextval_oid, sequenceIdDatum); SetUserIdAndSecContext(savedUserId, savedSecurityContext); - groupId = DatumGetInt32(groupIdDatum); + int32 groupId = DatumGetInt32(groupIdDatum); return groupId; } @@ -1332,18 +1295,16 @@ GetNextNodeId() Datum sequenceIdDatum = ObjectIdGetDatum(sequenceId); Oid savedUserId = InvalidOid; int savedSecurityContext = 0; - Datum nextNodeIdDatum; - int nextNodeId = 0; GetUserIdAndSecContext(&savedUserId, &savedSecurityContext); SetUserIdAndSecContext(CitusExtensionOwner(), SECURITY_LOCAL_USERID_CHANGE); /* generate new and unique shardId from sequence */ - nextNodeIdDatum = DirectFunctionCall1(nextval_oid, sequenceIdDatum); + Datum nextNodeIdDatum = DirectFunctionCall1(nextval_oid, sequenceIdDatum); SetUserIdAndSecContext(savedUserId, savedSecurityContext); - nextNodeId = DatumGetUInt32(nextNodeIdDatum); + int nextNodeId = DatumGetUInt32(nextNodeIdDatum); return nextNodeId; } @@ -1377,9 +1338,6 @@ EnsureCoordinator(void) static void InsertNodeRow(int nodeid, char *nodeName, int32 nodePort, NodeMetadata *nodeMetadata) { - Relation pgDistNode = NULL; - TupleDesc tupleDescriptor = NULL; - HeapTuple heapTuple = NULL; Datum values[Natts_pg_dist_node]; bool isNulls[Natts_pg_dist_node]; @@ -1404,10 +1362,10 @@ InsertNodeRow(int nodeid, char *nodeName, int32 nodePort, NodeMetadata *nodeMeta values[Anum_pg_dist_node_shouldhaveshards - 1] = BoolGetDatum( nodeMetadata->shouldHaveShards); - pgDistNode = heap_open(DistNodeRelationId(), RowExclusiveLock); + Relation pgDistNode = heap_open(DistNodeRelationId(), RowExclusiveLock); - tupleDescriptor = RelationGetDescr(pgDistNode); - heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls); + TupleDesc tupleDescriptor = RelationGetDescr(pgDistNode); + HeapTuple heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls); CatalogTupleInsert(pgDistNode, heapTuple); @@ -1430,8 +1388,6 @@ DeleteNodeRow(char *nodeName, int32 nodePort) const int scanKeyCount = 2; bool indexOK = false; - HeapTuple heapTuple = NULL; - SysScanDesc heapScan = NULL; ScanKeyData scanKey[2]; Relation pgDistNode = heap_open(DistNodeRelationId(), RowExclusiveLock); @@ -1447,10 +1403,10 @@ DeleteNodeRow(char *nodeName, int32 nodePort) ScanKeyInit(&scanKey[1], Anum_pg_dist_node_nodeport, BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(nodePort)); - heapScan = systable_beginscan(pgDistNode, InvalidOid, indexOK, - NULL, scanKeyCount, scanKey); + SysScanDesc heapScan = systable_beginscan(pgDistNode, InvalidOid, indexOK, + NULL, scanKeyCount, scanKey); - heapTuple = systable_getnext(heapScan); + HeapTuple heapTuple = systable_getnext(heapScan); if (!HeapTupleIsValid(heapTuple)) { @@ -1481,11 +1437,8 @@ DeleteNodeRow(char *nodeName, int32 nodePort) static WorkerNode * TupleToWorkerNode(TupleDesc tupleDescriptor, HeapTuple heapTuple) { - WorkerNode *workerNode = NULL; Datum datumArray[Natts_pg_dist_node]; bool isNullArray[Natts_pg_dist_node]; - char *nodeName = NULL; - char *nodeRack = NULL; Assert(!HeapTupleHasNulls(heapTuple)); @@ -1502,10 +1455,10 @@ TupleToWorkerNode(TupleDesc tupleDescriptor, HeapTuple heapTuple) */ heap_deform_tuple(heapTuple, tupleDescriptor, datumArray, isNullArray); - nodeName = DatumGetCString(datumArray[Anum_pg_dist_node_nodename - 1]); - nodeRack = DatumGetCString(datumArray[Anum_pg_dist_node_noderack - 1]); + char *nodeName = DatumGetCString(datumArray[Anum_pg_dist_node_nodename - 1]); + char *nodeRack = DatumGetCString(datumArray[Anum_pg_dist_node_noderack - 1]); - workerNode = (WorkerNode *) palloc0(sizeof(WorkerNode)); + WorkerNode *workerNode = (WorkerNode *) palloc0(sizeof(WorkerNode)); workerNode->nodeId = DatumGetUInt32(datumArray[Anum_pg_dist_node_nodeid - 1]); workerNode->workerPort = DatumGetUInt32(datumArray[Anum_pg_dist_node_nodeport - 1]); workerNode->groupId = DatumGetInt32(datumArray[Anum_pg_dist_node_groupid - 1]); @@ -1546,12 +1499,11 @@ StringToDatum(char *inputString, Oid dataType) Oid typIoFunc = InvalidOid; Oid typIoParam = InvalidOid; int32 typeModifier = -1; - Datum datum = 0; getTypeInputInfo(dataType, &typIoFunc, &typIoParam); getBaseTypeAndTypmod(dataType, &typeModifier); - datum = OidInputFunctionCall(typIoFunc, inputString, typIoParam, typeModifier); + Datum datum = OidInputFunctionCall(typIoFunc, inputString, typIoParam, typeModifier); return datum; } @@ -1563,12 +1515,11 @@ StringToDatum(char *inputString, Oid dataType) char * DatumToString(Datum datum, Oid dataType) { - char *outputString = NULL; Oid typIoFunc = InvalidOid; bool typIsVarlena = false; getTypeOutputInfo(dataType, &typIoFunc, &typIsVarlena); - outputString = OidOutputFunctionCall(typIoFunc, datum); + char *outputString = OidOutputFunctionCall(typIoFunc, datum); return outputString; } @@ -1582,34 +1533,29 @@ static bool UnsetMetadataSyncedForAll(void) { bool updatedAtLeastOne = false; - Relation relation = NULL; - SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[2]; int scanKeyCount = 2; bool indexOK = false; - HeapTuple heapTuple = NULL; - TupleDesc tupleDescriptor = NULL; - CatalogIndexState indstate; /* * Concurrent master_update_node() calls might iterate and try to update * pg_dist_node in different orders. To protect against deadlock, we * get an exclusive lock here. */ - relation = heap_open(DistNodeRelationId(), ExclusiveLock); - tupleDescriptor = RelationGetDescr(relation); + Relation relation = heap_open(DistNodeRelationId(), ExclusiveLock); + TupleDesc tupleDescriptor = RelationGetDescr(relation); ScanKeyInit(&scanKey[0], Anum_pg_dist_node_hasmetadata, BTEqualStrategyNumber, F_BOOLEQ, BoolGetDatum(true)); ScanKeyInit(&scanKey[1], Anum_pg_dist_node_metadatasynced, BTEqualStrategyNumber, F_BOOLEQ, BoolGetDatum(true)); - indstate = CatalogOpenIndexes(relation); + CatalogIndexState indstate = CatalogOpenIndexes(relation); - scanDescriptor = systable_beginscan(relation, - InvalidOid, indexOK, - NULL, scanKeyCount, scanKey); + SysScanDesc scanDescriptor = systable_beginscan(relation, + InvalidOid, indexOK, + NULL, scanKeyCount, scanKey); - heapTuple = systable_getnext(scanDescriptor); + HeapTuple heapTuple = systable_getnext(scanDescriptor); if (HeapTupleIsValid(heapTuple)) { updatedAtLeastOne = true; @@ -1617,7 +1563,6 @@ UnsetMetadataSyncedForAll(void) while (HeapTupleIsValid(heapTuple)) { - HeapTuple newHeapTuple = NULL; Datum values[Natts_pg_dist_node]; bool isnull[Natts_pg_dist_node]; bool replace[Natts_pg_dist_node]; @@ -1629,8 +1574,9 @@ UnsetMetadataSyncedForAll(void) values[Anum_pg_dist_node_metadatasynced - 1] = BoolGetDatum(false); replace[Anum_pg_dist_node_metadatasynced - 1] = true; - newHeapTuple = heap_modify_tuple(heapTuple, tupleDescriptor, values, isnull, - replace); + HeapTuple newHeapTuple = heap_modify_tuple(heapTuple, tupleDescriptor, values, + isnull, + replace); CatalogTupleUpdateWithInfo(relation, &newHeapTuple->t_self, newHeapTuple, indstate); diff --git a/src/backend/distributed/planner/deparse_shard_query.c b/src/backend/distributed/planner/deparse_shard_query.c index 3f3aadf00..f00fb8f09 100644 --- a/src/backend/distributed/planner/deparse_shard_query.c +++ b/src/backend/distributed/planner/deparse_shard_query.c @@ -61,21 +61,17 @@ RebuildQueryStrings(Query *originalQuery, List *taskList) else if (query->commandType == CMD_INSERT && task->modifyWithSubquery) { /* for INSERT..SELECT, adjust shard names in SELECT part */ - RangeTblEntry *copiedInsertRte = NULL; - RangeTblEntry *copiedSubqueryRte = NULL; - Query *copiedSubquery = NULL; List *relationShardList = task->relationShardList; ShardInterval *shardInterval = LoadShardInterval(task->anchorShardId); - char partitionMethod = 0; query = copyObject(originalQuery); - copiedInsertRte = ExtractResultRelationRTE(query); - copiedSubqueryRte = ExtractSelectRangeTableEntry(query); - copiedSubquery = copiedSubqueryRte->subquery; + RangeTblEntry *copiedInsertRte = ExtractResultRelationRTE(query); + RangeTblEntry *copiedSubqueryRte = ExtractSelectRangeTableEntry(query); + Query *copiedSubquery = copiedSubqueryRte->subquery; /* there are no restrictions to add for reference tables */ - partitionMethod = PartitionMethod(shardInterval->relationId); + char partitionMethod = PartitionMethod(shardInterval->relationId); if (partitionMethod != DISTRIBUTE_BY_NONE) { AddShardIntervalRestrictionToSelect(copiedSubquery, shardInterval); @@ -95,14 +91,12 @@ RebuildQueryStrings(Query *originalQuery, List *taskList) else if (query->commandType == CMD_INSERT && (query->onConflict != NULL || valuesRTE != NULL)) { - RangeTblEntry *rangeTableEntry = NULL; - /* * Always an alias in UPSERTs and multi-row INSERTs to avoid * deparsing issues (e.g. RETURNING might reference the original * table name, which has been replaced by a shard name). */ - rangeTableEntry = linitial(query->rtable); + RangeTblEntry *rangeTableEntry = linitial(query->rtable); if (rangeTableEntry->alias == NULL) { Alias *alias = makeAlias(CITUS_TABLE_ALIAS, NIL); @@ -184,13 +178,8 @@ UpdateTaskQueryString(Query *query, Oid distributedTableId, RangeTblEntry *value bool UpdateRelationToShardNames(Node *node, List *relationShardList) { - RangeTblEntry *newRte = NULL; uint64 shardId = INVALID_SHARD_ID; Oid relationId = InvalidOid; - Oid schemaId = InvalidOid; - char *relationName = NULL; - char *schemaName = NULL; - bool replaceRteWithNullValues = false; ListCell *relationShardCell = NULL; RelationShard *relationShard = NULL; @@ -212,7 +201,7 @@ UpdateRelationToShardNames(Node *node, List *relationShardList) relationShardList); } - newRte = (RangeTblEntry *) node; + RangeTblEntry *newRte = (RangeTblEntry *) node; if (newRte->rtekind != RTE_RELATION) { @@ -238,8 +227,8 @@ UpdateRelationToShardNames(Node *node, List *relationShardList) relationShard = NULL; } - replaceRteWithNullValues = relationShard == NULL || - relationShard->shardId == INVALID_SHARD_ID; + bool replaceRteWithNullValues = relationShard == NULL || + relationShard->shardId == INVALID_SHARD_ID; if (replaceRteWithNullValues) { ConvertRteToSubqueryWithEmptyResult(newRte); @@ -249,11 +238,11 @@ UpdateRelationToShardNames(Node *node, List *relationShardList) shardId = relationShard->shardId; relationId = relationShard->relationId; - relationName = get_rel_name(relationId); + char *relationName = get_rel_name(relationId); AppendShardIdToName(&relationName, shardId); - schemaId = get_rel_namespace(relationId); - schemaName = get_namespace_name(schemaId); + Oid schemaId = get_rel_namespace(relationId); + char *schemaName = get_namespace_name(schemaId); ModifyRangeTblExtraData(newRte, CITUS_RTE_SHARD, schemaName, relationName, NIL); @@ -271,31 +260,26 @@ ConvertRteToSubqueryWithEmptyResult(RangeTblEntry *rte) Relation relation = heap_open(rte->relid, NoLock); TupleDesc tupleDescriptor = RelationGetDescr(relation); int columnCount = tupleDescriptor->natts; - int columnIndex = 0; - Query *subquery = NULL; List *targetList = NIL; - FromExpr *joinTree = NULL; - for (columnIndex = 0; columnIndex < columnCount; columnIndex++) + for (int columnIndex = 0; columnIndex < columnCount; columnIndex++) { FormData_pg_attribute *attributeForm = TupleDescAttr(tupleDescriptor, columnIndex); - TargetEntry *targetEntry = NULL; - StringInfo resname = NULL; - Const *constValue = NULL; if (attributeForm->attisdropped) { continue; } - resname = makeStringInfo(); - constValue = makeNullConst(attributeForm->atttypid, attributeForm->atttypmod, - attributeForm->attcollation); + StringInfo resname = makeStringInfo(); + Const *constValue = makeNullConst(attributeForm->atttypid, + attributeForm->atttypmod, + attributeForm->attcollation); appendStringInfo(resname, "%s", attributeForm->attname.data); - targetEntry = makeNode(TargetEntry); + TargetEntry *targetEntry = makeNode(TargetEntry); targetEntry->expr = (Expr *) constValue; targetEntry->resno = columnIndex; targetEntry->resname = resname->data; @@ -305,10 +289,10 @@ ConvertRteToSubqueryWithEmptyResult(RangeTblEntry *rte) heap_close(relation, NoLock); - joinTree = makeNode(FromExpr); + FromExpr *joinTree = makeNode(FromExpr); joinTree->quals = makeBoolConst(false, false); - subquery = makeNode(Query); + Query *subquery = makeNode(Query); subquery->commandType = CMD_SELECT; subquery->querySource = QSRC_ORIGINAL; subquery->canSetTag = true; diff --git a/src/backend/distributed/planner/distributed_planner.c b/src/backend/distributed/planner/distributed_planner.c index 97d208903..0c8c99390 100644 --- a/src/backend/distributed/planner/distributed_planner.c +++ b/src/backend/distributed/planner/distributed_planner.c @@ -113,7 +113,6 @@ distributed_planner(Query *parse, int cursorOptions, ParamListInfo boundParams) PlannedStmt *result = NULL; bool needsDistributedPlanning = false; Query *originalQuery = NULL; - PlannerRestrictionContext *plannerRestrictionContext = NULL; bool setPartitionedTablesInherited = false; List *rangeTableList = ExtractRangeTableEntryList(parse); @@ -181,7 +180,8 @@ distributed_planner(Query *parse, int cursorOptions, ParamListInfo boundParams) ReplaceTableVisibleFunction((Node *) parse); /* create a restriction context and put it at the end if context list */ - plannerRestrictionContext = CreateAndPushPlannerRestrictionContext(); + PlannerRestrictionContext *plannerRestrictionContext = + CreateAndPushPlannerRestrictionContext(); PG_TRY(); { @@ -519,8 +519,6 @@ CreateDistributedPlannedStmt(uint64 planId, PlannedStmt *localPlan, Query *origi Query *query, ParamListInfo boundParams, PlannerRestrictionContext *plannerRestrictionContext) { - DistributedPlan *distributedPlan = NULL; - PlannedStmt *resultPlan = NULL; bool hasUnresolvedParams = false; JoinRestrictionContext *joinRestrictionContext = plannerRestrictionContext->joinRestrictionContext; @@ -533,7 +531,7 @@ CreateDistributedPlannedStmt(uint64 planId, PlannedStmt *localPlan, Query *origi plannerRestrictionContext->joinRestrictionContext = RemoveDuplicateJoinRestrictions(joinRestrictionContext); - distributedPlan = + DistributedPlan *distributedPlan = CreateDistributedPlan(planId, originalQuery, query, boundParams, hasUnresolvedParams, plannerRestrictionContext); @@ -580,7 +578,7 @@ CreateDistributedPlannedStmt(uint64 planId, PlannedStmt *localPlan, Query *origi distributedPlan->planId = planId; /* create final plan by combining local plan with distributed plan */ - resultPlan = FinalizePlan(localPlan, distributedPlan); + PlannedStmt *resultPlan = FinalizePlan(localPlan, distributedPlan); /* * As explained above, force planning costs to be unrealistically high if @@ -617,17 +615,14 @@ CreateDistributedPlan(uint64 planId, Query *originalQuery, Query *query, ParamLi PlannerRestrictionContext *plannerRestrictionContext) { DistributedPlan *distributedPlan = NULL; - MultiTreeRoot *logicalPlan = NULL; - List *subPlanList = NIL; bool hasCtes = originalQuery->cteList != NIL; if (IsModifyCommand(originalQuery)) { - Oid targetRelationId = InvalidOid; EnsureModificationsCanRun(); - targetRelationId = ModifyQueryResultRelationId(query); + Oid targetRelationId = ModifyQueryResultRelationId(query); EnsurePartitionTableNotReplicated(targetRelationId); if (InsertSelectIntoDistributedTable(originalQuery)) @@ -722,8 +717,8 @@ CreateDistributedPlan(uint64 planId, Query *originalQuery, Query *query, ParamLi * Plan subqueries and CTEs that cannot be pushed down by recursively * calling the planner and return the resulting plans to subPlanList. */ - subPlanList = GenerateSubplansForSubqueriesAndCTEs(planId, originalQuery, - plannerRestrictionContext); + List *subPlanList = GenerateSubplansForSubqueriesAndCTEs(planId, originalQuery, + plannerRestrictionContext); /* * If subqueries were recursively planned then we need to replan the query @@ -798,8 +793,8 @@ CreateDistributedPlan(uint64 planId, Query *originalQuery, Query *query, ParamLi query->cteList = NIL; Assert(originalQuery->cteList == NIL); - logicalPlan = MultiLogicalPlanCreate(originalQuery, query, - plannerRestrictionContext); + MultiTreeRoot *logicalPlan = MultiLogicalPlanCreate(originalQuery, query, + plannerRestrictionContext); MultiLogicalPlanOptimize(logicalPlan); /* @@ -937,14 +932,11 @@ ResolveExternalParams(Node *inputNode, ParamListInfo boundParams) if (IsA(inputNode, Param)) { Param *paramToProcess = (Param *) inputNode; - ParamExternData *correspondingParameterData = NULL; int numberOfParameters = boundParams->numParams; int parameterId = paramToProcess->paramid; int16 typeLength = 0; bool typeByValue = false; Datum constValue = 0; - bool paramIsNull = false; - int parameterIndex = 0; if (paramToProcess->paramkind != PARAM_EXTERN) { @@ -957,13 +949,14 @@ ResolveExternalParams(Node *inputNode, ParamListInfo boundParams) } /* parameterId starts from 1 */ - parameterIndex = parameterId - 1; + int parameterIndex = parameterId - 1; if (parameterIndex >= numberOfParameters) { return inputNode; } - correspondingParameterData = &boundParams->params[parameterIndex]; + ParamExternData *correspondingParameterData = + &boundParams->params[parameterIndex]; if (!(correspondingParameterData->pflags & PARAM_FLAG_CONST)) { @@ -972,7 +965,7 @@ ResolveExternalParams(Node *inputNode, ParamListInfo boundParams) get_typlenbyval(paramToProcess->paramtype, &typeLength, &typeByValue); - paramIsNull = correspondingParameterData->isnull; + bool paramIsNull = correspondingParameterData->isnull; if (paramIsNull) { constValue = 0; @@ -1015,17 +1008,14 @@ ResolveExternalParams(Node *inputNode, ParamListInfo boundParams) DistributedPlan * GetDistributedPlan(CustomScan *customScan) { - Node *node = NULL; - DistributedPlan *distributedPlan = NULL; - Assert(list_length(customScan->custom_private) == 1); - node = (Node *) linitial(customScan->custom_private); + Node *node = (Node *) linitial(customScan->custom_private); Assert(CitusIsA(node, DistributedPlan)); CheckNodeCopyAndSerialization(node); - distributedPlan = (DistributedPlan *) node; + DistributedPlan *distributedPlan = (DistributedPlan *) node; return distributedPlan; } @@ -1040,7 +1030,6 @@ FinalizePlan(PlannedStmt *localPlan, DistributedPlan *distributedPlan) { PlannedStmt *finalPlan = NULL; CustomScan *customScan = makeNode(CustomScan); - Node *distributedPlanData = NULL; MultiExecutorType executorType = MULTI_EXECUTOR_INVALID_FIRST; if (!distributedPlan->planningError) @@ -1092,7 +1081,7 @@ FinalizePlan(PlannedStmt *localPlan, DistributedPlan *distributedPlan) distributedPlan->relationIdList = localPlan->relationOids; distributedPlan->queryId = localPlan->queryId; - distributedPlanData = (Node *) distributedPlan; + Node *distributedPlanData = (Node *) distributedPlan; customScan->custom_private = list_make1(distributedPlanData); customScan->flags = CUSTOMPATH_SUPPORT_BACKWARD_SCAN; @@ -1119,9 +1108,7 @@ static PlannedStmt * FinalizeNonRouterPlan(PlannedStmt *localPlan, DistributedPlan *distributedPlan, CustomScan *customScan) { - PlannedStmt *finalPlan = NULL; - - finalPlan = MasterNodeSelectPlan(distributedPlan, customScan); + PlannedStmt *finalPlan = MasterNodeSelectPlan(distributedPlan, customScan); finalPlan->queryId = localPlan->queryId; finalPlan->utilityStmt = localPlan->utilityStmt; @@ -1141,8 +1128,6 @@ FinalizeNonRouterPlan(PlannedStmt *localPlan, DistributedPlan *distributedPlan, static PlannedStmt * FinalizeRouterPlan(PlannedStmt *localPlan, CustomScan *customScan) { - PlannedStmt *routerPlan = NULL; - RangeTblEntry *remoteScanRangeTableEntry = NULL; ListCell *targetEntryCell = NULL; List *targetList = NIL; List *columnNameList = NIL; @@ -1154,9 +1139,6 @@ FinalizeRouterPlan(PlannedStmt *localPlan, CustomScan *customScan) foreach(targetEntryCell, localPlan->planTree->targetlist) { TargetEntry *targetEntry = lfirst(targetEntryCell); - TargetEntry *newTargetEntry = NULL; - Var *newVar = NULL; - Value *columnName = NULL; Assert(IsA(targetEntry, TargetEntry)); @@ -1171,7 +1153,7 @@ FinalizeRouterPlan(PlannedStmt *localPlan, CustomScan *customScan) } /* build target entry pointing to remote scan range table entry */ - newVar = makeVarFromTargetEntry(customScanRangeTableIndex, targetEntry); + Var *newVar = makeVarFromTargetEntry(customScanRangeTableIndex, targetEntry); if (newVar->vartype == RECORDOID) { @@ -1184,20 +1166,20 @@ FinalizeRouterPlan(PlannedStmt *localPlan, CustomScan *customScan) newVar->vartypmod = BlessRecordExpression(targetEntry->expr); } - newTargetEntry = flatCopyTargetEntry(targetEntry); + TargetEntry *newTargetEntry = flatCopyTargetEntry(targetEntry); newTargetEntry->expr = (Expr *) newVar; targetList = lappend(targetList, newTargetEntry); - columnName = makeString(targetEntry->resname); + Value *columnName = makeString(targetEntry->resname); columnNameList = lappend(columnNameList, columnName); } customScan->scan.plan.targetlist = targetList; - routerPlan = makeNode(PlannedStmt); + PlannedStmt *routerPlan = makeNode(PlannedStmt); routerPlan->planTree = (Plan *) customScan; - remoteScanRangeTableEntry = RemoteScanRangeTableEntry(columnNameList); + RangeTblEntry *remoteScanRangeTableEntry = RemoteScanRangeTableEntry(columnNameList); routerPlan->rtable = list_make1(remoteScanRangeTableEntry); /* add original range table list for access permission checks */ @@ -1236,11 +1218,10 @@ BlessRecordExpression(Expr *expr) */ Oid resultTypeId = InvalidOid; TupleDesc resultTupleDesc = NULL; - TypeFuncClass typeClass; /* get_expr_result_type blesses the tuple descriptor */ - typeClass = get_expr_result_type((Node *) expr, &resultTypeId, - &resultTupleDesc); + TypeFuncClass typeClass = get_expr_result_type((Node *) expr, &resultTypeId, + &resultTupleDesc); if (typeClass == TYPEFUNC_COMPOSITE) { typeMod = resultTupleDesc->tdtypmod; @@ -1368,32 +1349,27 @@ multi_join_restriction_hook(PlannerInfo *root, JoinType jointype, JoinPathExtraData *extra) { - PlannerRestrictionContext *plannerRestrictionContext = NULL; - JoinRestrictionContext *joinRestrictionContext = NULL; - JoinRestriction *joinRestriction = NULL; - MemoryContext restrictionsMemoryContext = NULL; - MemoryContext oldMemoryContext = NULL; - List *restrictInfoList = NIL; - /* * Use a memory context that's guaranteed to live long enough, could be * called in a more shorted lived one (e.g. with GEQO). */ - plannerRestrictionContext = CurrentPlannerRestrictionContext(); - restrictionsMemoryContext = plannerRestrictionContext->memoryContext; - oldMemoryContext = MemoryContextSwitchTo(restrictionsMemoryContext); + PlannerRestrictionContext *plannerRestrictionContext = + CurrentPlannerRestrictionContext(); + MemoryContext restrictionsMemoryContext = plannerRestrictionContext->memoryContext; + MemoryContext oldMemoryContext = MemoryContextSwitchTo(restrictionsMemoryContext); /* * We create a copy of restrictInfoList because it may be created in a memory * context which will be deleted when we still need it, thus we create a copy * of it in our memory context. */ - restrictInfoList = copyObject(extra->restrictlist); + List *restrictInfoList = copyObject(extra->restrictlist); - joinRestrictionContext = plannerRestrictionContext->joinRestrictionContext; + JoinRestrictionContext *joinRestrictionContext = + plannerRestrictionContext->joinRestrictionContext; Assert(joinRestrictionContext != NULL); - joinRestriction = palloc0(sizeof(JoinRestriction)); + JoinRestriction *joinRestriction = palloc0(sizeof(JoinRestriction)); joinRestriction->joinType = jointype; joinRestriction->joinRestrictInfoList = restrictInfoList; joinRestriction->plannerInfo = root; @@ -1424,14 +1400,7 @@ void multi_relation_restriction_hook(PlannerInfo *root, RelOptInfo *relOptInfo, Index restrictionIndex, RangeTblEntry *rte) { - PlannerRestrictionContext *plannerRestrictionContext = NULL; - RelationRestrictionContext *relationRestrictionContext = NULL; - MemoryContext restrictionsMemoryContext = NULL; - MemoryContext oldMemoryContext = NULL; - RelationRestriction *relationRestriction = NULL; DistTableCacheEntry *cacheEntry = NULL; - bool distributedTable = false; - bool localTable = false; AdjustReadIntermediateResultCost(rte, relOptInfo); @@ -1444,14 +1413,15 @@ multi_relation_restriction_hook(PlannerInfo *root, RelOptInfo *relOptInfo, * Use a memory context that's guaranteed to live long enough, could be * called in a more shorted lived one (e.g. with GEQO). */ - plannerRestrictionContext = CurrentPlannerRestrictionContext(); - restrictionsMemoryContext = plannerRestrictionContext->memoryContext; - oldMemoryContext = MemoryContextSwitchTo(restrictionsMemoryContext); + PlannerRestrictionContext *plannerRestrictionContext = + CurrentPlannerRestrictionContext(); + MemoryContext restrictionsMemoryContext = plannerRestrictionContext->memoryContext; + MemoryContext oldMemoryContext = MemoryContextSwitchTo(restrictionsMemoryContext); - distributedTable = IsDistributedTable(rte->relid); - localTable = !distributedTable; + bool distributedTable = IsDistributedTable(rte->relid); + bool localTable = !distributedTable; - relationRestriction = palloc0(sizeof(RelationRestriction)); + RelationRestriction *relationRestriction = palloc0(sizeof(RelationRestriction)); relationRestriction->index = restrictionIndex; relationRestriction->relationId = rte->relid; relationRestriction->rte = rte; @@ -1463,7 +1433,8 @@ multi_relation_restriction_hook(PlannerInfo *root, RelOptInfo *relOptInfo, /* see comments on GetVarFromAssignedParam() */ relationRestriction->outerPlanParamsList = OuterPlanParamsList(root); - relationRestrictionContext = plannerRestrictionContext->relationRestrictionContext; + RelationRestrictionContext *relationRestrictionContext = + plannerRestrictionContext->relationRestrictionContext; relationRestrictionContext->hasDistributedRelation |= distributedTable; relationRestrictionContext->hasLocalRelation |= localTable; @@ -1644,9 +1615,8 @@ static List * OuterPlanParamsList(PlannerInfo *root) { List *planParamsList = NIL; - PlannerInfo *outerNodeRoot = NULL; - for (outerNodeRoot = root->parent_root; outerNodeRoot != NULL; + for (PlannerInfo *outerNodeRoot = root->parent_root; outerNodeRoot != NULL; outerNodeRoot = outerNodeRoot->parent_root) { RootPlanParams *rootPlanParams = palloc0(sizeof(RootPlanParams)); @@ -1729,11 +1699,9 @@ CreateAndPushPlannerRestrictionContext(void) static PlannerRestrictionContext * CurrentPlannerRestrictionContext(void) { - PlannerRestrictionContext *plannerRestrictionContext = NULL; - Assert(plannerRestrictionContextList != NIL); - plannerRestrictionContext = + PlannerRestrictionContext *plannerRestrictionContext = (PlannerRestrictionContext *) linitial(plannerRestrictionContextList); if (plannerRestrictionContext == NULL) @@ -1804,7 +1772,6 @@ HasUnresolvedExternParamsWalker(Node *expression, ParamListInfo boundParams) if (boundParams && paramId > 0 && paramId <= boundParams->numParams) { ParamExternData *externParam = NULL; - Oid paramType = InvalidOid; /* give hook a chance in case parameter is dynamic */ if (boundParams->paramFetch != NULL) @@ -1818,7 +1785,7 @@ HasUnresolvedExternParamsWalker(Node *expression, ParamListInfo boundParams) externParam = &boundParams->params[paramId - 1]; } - paramType = externParam->ptype; + Oid paramType = externParam->ptype; if (OidIsValid(paramType)) { return false; @@ -1890,7 +1857,6 @@ IsLocalReferenceTableJoin(Query *parse, List *rangeTableList) foreach(rangeTableCell, rangeTableList) { RangeTblEntry *rangeTableEntry = (RangeTblEntry *) lfirst(rangeTableCell); - DistTableCacheEntry *cacheEntry = NULL; if (rangeTableEntry->rtekind == RTE_FUNCTION) { @@ -1909,7 +1875,8 @@ IsLocalReferenceTableJoin(Query *parse, List *rangeTableList) continue; } - cacheEntry = DistributedTableCacheEntry(rangeTableEntry->relid); + DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry( + rangeTableEntry->relid); if (cacheEntry->partitionMethod == DISTRIBUTE_BY_NONE) { hasReferenceTable = true; @@ -1931,14 +1898,12 @@ IsLocalReferenceTableJoin(Query *parse, List *rangeTableList) static bool QueryIsNotSimpleSelect(Node *node) { - Query *query = NULL; - if (!IsA(node, Query)) { return false; } - query = (Query *) node; + Query *query = (Query *) node; return (query->commandType != CMD_SELECT) || (query->rowMarks != NIL); } @@ -1950,14 +1915,6 @@ QueryIsNotSimpleSelect(Node *node) static bool UpdateReferenceTablesWithShard(Node *node, void *context) { - RangeTblEntry *newRte = NULL; - uint64 shardId = INVALID_SHARD_ID; - Oid relationId = InvalidOid; - Oid schemaId = InvalidOid; - char *relationName = NULL; - DistTableCacheEntry *cacheEntry = NULL; - ShardInterval *shardInterval = NULL; - if (node == NULL) { return false; @@ -1976,32 +1933,32 @@ UpdateReferenceTablesWithShard(Node *node, void *context) NULL); } - newRte = (RangeTblEntry *) node; + RangeTblEntry *newRte = (RangeTblEntry *) node; if (newRte->rtekind != RTE_RELATION) { return false; } - relationId = newRte->relid; + Oid relationId = newRte->relid; if (!IsDistributedTable(relationId)) { return false; } - cacheEntry = DistributedTableCacheEntry(relationId); + DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(relationId); if (cacheEntry->partitionMethod != DISTRIBUTE_BY_NONE) { return false; } - shardInterval = cacheEntry->sortedShardIntervalArray[0]; - shardId = shardInterval->shardId; + ShardInterval *shardInterval = cacheEntry->sortedShardIntervalArray[0]; + uint64 shardId = shardInterval->shardId; - relationName = get_rel_name(relationId); + char *relationName = get_rel_name(relationId); AppendShardIdToName(&relationName, shardId); - schemaId = get_rel_namespace(relationId); + Oid schemaId = get_rel_namespace(relationId); newRte->relid = get_relname_relid(relationName, schemaId); /* diff --git a/src/backend/distributed/planner/extended_op_node_utils.c b/src/backend/distributed/planner/extended_op_node_utils.c index d24be16ee..d6db5ef72 100644 --- a/src/backend/distributed/planner/extended_op_node_utils.c +++ b/src/backend/distributed/planner/extended_op_node_utils.c @@ -45,28 +45,21 @@ ExtendedOpNodeProperties BuildExtendedOpNodeProperties(MultiExtendedOp *extendedOpNode) { ExtendedOpNodeProperties extendedOpNodeProperties; - List *tableNodeList = NIL; - List *targetList = NIL; - Node *havingQual = NULL; - bool groupedByDisjointPartitionColumn = false; - bool repartitionSubquery = false; - bool hasNonPartitionColumnDistinctAgg = false; - bool pullDistinctColumns = false; - bool pushDownWindowFunctions = false; - tableNodeList = FindNodesOfType((MultiNode *) extendedOpNode, T_MultiTable); - groupedByDisjointPartitionColumn = GroupedByDisjointPartitionColumn(tableNodeList, - extendedOpNode); + List *tableNodeList = FindNodesOfType((MultiNode *) extendedOpNode, T_MultiTable); + bool groupedByDisjointPartitionColumn = GroupedByDisjointPartitionColumn( + tableNodeList, + extendedOpNode); - repartitionSubquery = ExtendedOpNodeContainsRepartitionSubquery(extendedOpNode); + bool repartitionSubquery = ExtendedOpNodeContainsRepartitionSubquery(extendedOpNode); - targetList = extendedOpNode->targetList; - havingQual = extendedOpNode->havingQual; - hasNonPartitionColumnDistinctAgg = + List *targetList = extendedOpNode->targetList; + Node *havingQual = extendedOpNode->havingQual; + bool hasNonPartitionColumnDistinctAgg = HasNonPartitionColumnDistinctAgg(targetList, havingQual, tableNodeList); - pullDistinctColumns = + bool pullDistinctColumns = ShouldPullDistinctColumn(repartitionSubquery, groupedByDisjointPartitionColumn, hasNonPartitionColumnDistinctAgg); @@ -75,7 +68,7 @@ BuildExtendedOpNodeProperties(MultiExtendedOp *extendedOpNode) * using hasWindowFuncs is safe for now. However, this should be fixed * when we support pull-to-master window functions. */ - pushDownWindowFunctions = extendedOpNode->hasWindowFuncs; + bool pushDownWindowFunctions = extendedOpNode->hasWindowFuncs; extendedOpNodeProperties.groupedByDisjointPartitionColumn = groupedByDisjointPartitionColumn; @@ -103,14 +96,13 @@ GroupedByDisjointPartitionColumn(List *tableNodeList, MultiExtendedOp *opNode) { MultiTable *tableNode = (MultiTable *) lfirst(tableNodeCell); Oid relationId = tableNode->relationId; - char partitionMethod = 0; if (relationId == SUBQUERY_RELATION_ID || !IsDistributedTable(relationId)) { continue; } - partitionMethod = PartitionMethod(relationId); + char partitionMethod = PartitionMethod(relationId); if (partitionMethod != DISTRIBUTE_BY_RANGE && partitionMethod != DISTRIBUTE_BY_HASH) { @@ -173,12 +165,8 @@ HasNonPartitionColumnDistinctAgg(List *targetEntryList, Node *havingQual, foreach(aggregateCheckCell, aggregateCheckList) { Node *targetNode = lfirst(aggregateCheckCell); - Aggref *targetAgg = NULL; - List *varList = NIL; ListCell *varCell = NULL; bool isPartitionColumn = false; - TargetEntry *firstTargetEntry = NULL; - Node *firstTargetExprNode = NULL; if (IsA(targetNode, Var)) { @@ -186,7 +174,7 @@ HasNonPartitionColumnDistinctAgg(List *targetEntryList, Node *havingQual, } Assert(IsA(targetNode, Aggref)); - targetAgg = (Aggref *) targetNode; + Aggref *targetAgg = (Aggref *) targetNode; if (targetAgg->aggdistinct == NIL) { continue; @@ -201,14 +189,15 @@ HasNonPartitionColumnDistinctAgg(List *targetEntryList, Node *havingQual, return true; } - firstTargetEntry = linitial_node(TargetEntry, targetAgg->args); - firstTargetExprNode = strip_implicit_coercions((Node *) firstTargetEntry->expr); + TargetEntry *firstTargetEntry = linitial_node(TargetEntry, targetAgg->args); + Node *firstTargetExprNode = strip_implicit_coercions( + (Node *) firstTargetEntry->expr); if (!IsA(firstTargetExprNode, Var)) { return true; } - varList = pull_var_clause_default((Node *) targetAgg->args); + List *varList = pull_var_clause_default((Node *) targetAgg->args); foreach(varCell, varList) { Node *targetVar = (Node *) lfirst(varCell); diff --git a/src/backend/distributed/planner/fast_path_router_planner.c b/src/backend/distributed/planner/fast_path_router_planner.c index 06d3f9d63..b701d2141 100644 --- a/src/backend/distributed/planner/fast_path_router_planner.c +++ b/src/backend/distributed/planner/fast_path_router_planner.c @@ -71,8 +71,6 @@ static bool DistKeyInSimpleOpExpression(Expr *clause, Var *distColumn); PlannedStmt * FastPathPlanner(Query *originalQuery, Query *parse, ParamListInfo boundParams) { - PlannedStmt *result = NULL; - /* * To support prepared statements for fast-path queries, we resolve the * external parameters at this point. Note that this is normally done by @@ -98,7 +96,7 @@ FastPathPlanner(Query *originalQuery, Query *parse, ParamListInfo boundParams) (Node *) eval_const_expressions(NULL, (Node *) parse->jointree->quals); - result = GeneratePlaceHolderPlannedStmt(originalQuery); + PlannedStmt *result = GeneratePlaceHolderPlannedStmt(originalQuery); return result; } @@ -122,7 +120,6 @@ GeneratePlaceHolderPlannedStmt(Query *parse) PlannedStmt *result = makeNode(PlannedStmt); SeqScan *seqScanNode = makeNode(SeqScan); Plan *plan = &seqScanNode->plan; - Oid relationId = InvalidOid; AssertArg(FastPathRouterQuery(parse)); @@ -143,7 +140,7 @@ GeneratePlaceHolderPlannedStmt(Query *parse) result->rtable = copyObject(parse->rtable); result->planTree = (Plan *) plan; - relationId = ExtractFirstDistributedTableId(parse); + Oid relationId = ExtractFirstDistributedTableId(parse); result->relationOids = list_make1_oid(relationId); return result; @@ -166,12 +163,8 @@ GeneratePlaceHolderPlannedStmt(Query *parse) bool FastPathRouterQuery(Query *query) { - RangeTblEntry *rangeTableEntry = NULL; FromExpr *joinTree = query->jointree; Node *quals = NULL; - Oid distributedTableId = InvalidOid; - Var *distributionKey = NULL; - DistTableCacheEntry *cacheEntry = NULL; if (!EnableFastPathRouterPlanner) { @@ -201,15 +194,15 @@ FastPathRouterQuery(Query *query) return false; } - rangeTableEntry = (RangeTblEntry *) linitial(query->rtable); + RangeTblEntry *rangeTableEntry = (RangeTblEntry *) linitial(query->rtable); if (rangeTableEntry->rtekind != RTE_RELATION) { return false; } /* we don't want to deal with append/range distributed tables */ - distributedTableId = rangeTableEntry->relid; - cacheEntry = DistributedTableCacheEntry(distributedTableId); + Oid distributedTableId = rangeTableEntry->relid; + DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(distributedTableId); if (!(cacheEntry->partitionMethod == DISTRIBUTE_BY_HASH || cacheEntry->partitionMethod == DISTRIBUTE_BY_NONE)) { @@ -224,7 +217,7 @@ FastPathRouterQuery(Query *query) } /* if that's a reference table, we don't need to check anything further */ - distributionKey = PartitionColumn(distributedTableId, 1); + Var *distributionKey = PartitionColumn(distributedTableId, 1); if (!distributionKey) { return true; @@ -269,11 +262,10 @@ static bool ColumnAppearsMultipleTimes(Node *quals, Var *distributionKey) { ListCell *varClauseCell = NULL; - List *varClauseList = NIL; int partitionColumnReferenceCount = 0; /* make sure partition column is used only once in the quals */ - varClauseList = pull_var_clause_default(quals); + List *varClauseList = pull_var_clause_default(quals); foreach(varClauseCell, varClauseList) { Var *column = (Var *) lfirst(varClauseCell); diff --git a/src/backend/distributed/planner/function_call_delegation.c b/src/backend/distributed/planner/function_call_delegation.c index 33566054a..bacf7895c 100644 --- a/src/backend/distributed/planner/function_call_delegation.c +++ b/src/backend/distributed/planner/function_call_delegation.c @@ -98,7 +98,6 @@ contain_param_walker(Node *node, void *context) DistributedPlan * TryToDelegateFunctionCall(Query *query, bool *hasExternParam) { - FromExpr *joinTree = NULL; List *targetList = NIL; TargetEntry *targetEntry = NULL; FuncExpr *funcExpr = NULL; @@ -116,7 +115,6 @@ TryToDelegateFunctionCall(Query *query, bool *hasExternParam) Task *task = NULL; Job *job = NULL; DistributedPlan *distributedPlan = NULL; - int32 groupId = 0; struct ParamWalkerContext walkerParamContext = { 0 }; /* set hasExternParam now in case of early exit */ @@ -128,7 +126,7 @@ TryToDelegateFunctionCall(Query *query, bool *hasExternParam) return NULL; } - groupId = GetLocalGroupId(); + int32 groupId = GetLocalGroupId(); if (groupId != 0 || groupId == GROUP_ID_UPGRADING) { /* do not delegate from workers, or while upgrading */ @@ -147,7 +145,7 @@ TryToDelegateFunctionCall(Query *query, bool *hasExternParam) return NULL; } - joinTree = query->jointree; + FromExpr *joinTree = query->jointree; if (joinTree == NULL) { /* no join tree (mostly here to be defensive) */ diff --git a/src/backend/distributed/planner/insert_select_planner.c b/src/backend/distributed/planner/insert_select_planner.c index 95fabdfd0..e31fa0e40 100644 --- a/src/backend/distributed/planner/insert_select_planner.c +++ b/src/backend/distributed/planner/insert_select_planner.c @@ -136,9 +136,6 @@ static bool CheckInsertSelectQuery(Query *query) { CmdType commandType = query->commandType; - List *fromList = NULL; - RangeTblRef *rangeTableReference = NULL; - RangeTblEntry *subqueryRte = NULL; if (commandType != CMD_INSERT) { @@ -150,19 +147,19 @@ CheckInsertSelectQuery(Query *query) return false; } - fromList = query->jointree->fromlist; + List *fromList = query->jointree->fromlist; if (list_length(fromList) != 1) { return false; } - rangeTableReference = linitial(fromList); + RangeTblRef *rangeTableReference = linitial(fromList); if (!IsA(rangeTableReference, RangeTblRef)) { return false; } - subqueryRte = rt_fetch(rangeTableReference->rtindex, query->rtable); + RangeTblEntry *subqueryRte = rt_fetch(rangeTableReference->rtindex, query->rtable); if (subqueryRte->rtekind != RTE_SUBQUERY) { return false; @@ -185,18 +182,15 @@ DistributedPlan * CreateInsertSelectPlan(uint64 planId, Query *originalQuery, PlannerRestrictionContext *plannerRestrictionContext) { - DistributedPlan *distributedPlan = NULL; - DeferredErrorMessage *deferredError = NULL; - - deferredError = ErrorIfOnConflictNotSupported(originalQuery); + DeferredErrorMessage *deferredError = ErrorIfOnConflictNotSupported(originalQuery); if (deferredError != NULL) { /* raising the error as there is no possible solution for the unsupported on conflict statements */ RaiseDeferredError(deferredError, ERROR); } - distributedPlan = CreateDistributedInsertSelectPlan(originalQuery, - plannerRestrictionContext); + DistributedPlan *distributedPlan = CreateDistributedInsertSelectPlan(originalQuery, + plannerRestrictionContext); if (distributedPlan->planningError != NULL) { @@ -220,10 +214,8 @@ static DistributedPlan * CreateDistributedInsertSelectPlan(Query *originalQuery, PlannerRestrictionContext *plannerRestrictionContext) { - int shardOffset = 0; List *sqlTaskList = NIL; uint32 taskIdIndex = 1; /* 0 is reserved for invalid taskId */ - Job *workerJob = NULL; uint64 jobId = INVALID_JOB_ID; DistributedPlan *distributedPlan = CitusMakeNode(DistributedPlan); RangeTblEntry *insertRte = ExtractResultRelationRTE(originalQuery); @@ -234,7 +226,6 @@ CreateDistributedInsertSelectPlan(Query *originalQuery, RelationRestrictionContext *relationRestrictionContext = plannerRestrictionContext->relationRestrictionContext; bool allReferenceTables = relationRestrictionContext->allReferenceTables; - bool allDistributionKeysInQueryAreEqual = false; distributedPlan->modLevel = RowModifyLevelForQuery(originalQuery); @@ -251,7 +242,7 @@ CreateDistributedInsertSelectPlan(Query *originalQuery, return distributedPlan; } - allDistributionKeysInQueryAreEqual = + bool allDistributionKeysInQueryAreEqual = AllDistributionKeysInQueryAreEqual(originalQuery, plannerRestrictionContext); /* @@ -263,16 +254,16 @@ CreateDistributedInsertSelectPlan(Query *originalQuery, * the current shard boundaries. Finally, perform the normal shard pruning to * decide on whether to push the query to the current shard or not. */ - for (shardOffset = 0; shardOffset < shardCount; shardOffset++) + for (int shardOffset = 0; shardOffset < shardCount; shardOffset++) { ShardInterval *targetShardInterval = targetCacheEntry->sortedShardIntervalArray[shardOffset]; - Task *modifyTask = NULL; - modifyTask = RouterModifyTaskForShardInterval(originalQuery, targetShardInterval, - plannerRestrictionContext, - taskIdIndex, - allDistributionKeysInQueryAreEqual); + Task *modifyTask = RouterModifyTaskForShardInterval(originalQuery, + targetShardInterval, + plannerRestrictionContext, + taskIdIndex, + allDistributionKeysInQueryAreEqual); /* Planning error gelmisse return et, ustteki fonksiyona */ /* distributed plan gecir */ @@ -289,7 +280,7 @@ CreateDistributedInsertSelectPlan(Query *originalQuery, } /* Create the worker job */ - workerJob = CitusMakeNode(Job); + Job *workerJob = CitusMakeNode(Job); workerJob->taskList = sqlTaskList; workerJob->subqueryPushdown = false; workerJob->dependedJobList = NIL; @@ -321,17 +312,15 @@ static DeferredErrorMessage * DistributedInsertSelectSupported(Query *queryTree, RangeTblEntry *insertRte, RangeTblEntry *subqueryRte, bool allReferenceTables) { - Query *subquery = NULL; Oid selectPartitionColumnTableId = InvalidOid; Oid targetRelationId = insertRte->relid; char targetPartitionMethod = PartitionMethod(targetRelationId); ListCell *rangeTableCell = NULL; - DeferredErrorMessage *error = NULL; /* we only do this check for INSERT ... SELECT queries */ AssertArg(InsertSelectIntoDistributedTable(queryTree)); - subquery = subqueryRte->subquery; + Query *subquery = subqueryRte->subquery; if (!NeedsDistributedPlanning(subquery)) { @@ -363,7 +352,7 @@ DistributedInsertSelectSupported(Query *queryTree, RangeTblEntry *insertRte, } /* we don't support LIMIT, OFFSET and WINDOW functions */ - error = MultiTaskRouterSelectQuerySupported(subquery); + DeferredErrorMessage *error = MultiTaskRouterSelectQuerySupported(subquery); if (error) { return error; @@ -442,20 +431,15 @@ RouterModifyTaskForShardInterval(Query *originalQuery, ShardInterval *shardInter StringInfo queryString = makeStringInfo(); ListCell *restrictionCell = NULL; - Task *modifyTask = NULL; List *selectPlacementList = NIL; uint64 selectAnchorShardId = INVALID_SHARD_ID; List *relationShardList = NIL; List *prunedShardIntervalListList = NIL; uint64 jobId = INVALID_JOB_ID; - List *insertShardPlacementList = NULL; - List *intersectedPlacementList = NULL; - bool replacePrunedQueryWithDummy = false; bool allReferenceTables = plannerRestrictionContext->relationRestrictionContext->allReferenceTables; List *shardOpExpressions = NIL; RestrictInfo *shardRestrictionList = NULL; - DeferredErrorMessage *planningError = NULL; bool multiShardModifyQuery = false; List *relationRestrictionList = NIL; @@ -517,18 +501,21 @@ RouterModifyTaskForShardInterval(Query *originalQuery, ShardInterval *shardInter } /* mark that we don't want the router planner to generate dummy hosts/queries */ - replacePrunedQueryWithDummy = false; + bool replacePrunedQueryWithDummy = false; /* * Use router planner to decide on whether we can push down the query or not. * If we can, we also rely on the side-effects that all RTEs have been updated * to point to the relevant nodes and selectPlacementList is determined. */ - planningError = PlanRouterQuery(copiedSubquery, copyOfPlannerRestrictionContext, - &selectPlacementList, &selectAnchorShardId, - &relationShardList, &prunedShardIntervalListList, - replacePrunedQueryWithDummy, - &multiShardModifyQuery, NULL); + DeferredErrorMessage *planningError = PlanRouterQuery(copiedSubquery, + copyOfPlannerRestrictionContext, + &selectPlacementList, + &selectAnchorShardId, + &relationShardList, + &prunedShardIntervalListList, + replacePrunedQueryWithDummy, + &multiShardModifyQuery, NULL); Assert(!multiShardModifyQuery); @@ -552,9 +539,9 @@ RouterModifyTaskForShardInterval(Query *originalQuery, ShardInterval *shardInter } /* get the placements for insert target shard and its intersection with select */ - insertShardPlacementList = FinalizedShardPlacementList(shardId); - intersectedPlacementList = IntersectPlacementList(insertShardPlacementList, - selectPlacementList); + List *insertShardPlacementList = FinalizedShardPlacementList(shardId); + List *intersectedPlacementList = IntersectPlacementList(insertShardPlacementList, + selectPlacementList); /* * If insert target does not have exactly the same placements with the select, @@ -586,7 +573,8 @@ RouterModifyTaskForShardInterval(Query *originalQuery, ShardInterval *shardInter ereport(DEBUG2, (errmsg("distributed statement: %s", ApplyLogRedaction(queryString->data)))); - modifyTask = CreateBasicTask(jobId, taskIdIndex, MODIFY_TASK, queryString->data); + Task *modifyTask = CreateBasicTask(jobId, taskIdIndex, MODIFY_TASK, + queryString->data); modifyTask->dependedTaskList = NULL; modifyTask->anchorShardId = shardId; modifyTask->taskPlacementList = insertShardPlacementList; @@ -612,21 +600,18 @@ Query * ReorderInsertSelectTargetLists(Query *originalQuery, RangeTblEntry *insertRte, RangeTblEntry *subqueryRte) { - Query *subquery = NULL; ListCell *insertTargetEntryCell; List *newSubqueryTargetlist = NIL; List *newInsertTargetlist = NIL; int resno = 1; Index insertTableId = 1; - Oid insertRelationId = InvalidOid; - int subqueryTargetLength = 0; int targetEntryIndex = 0; AssertArg(InsertSelectIntoDistributedTable(originalQuery)); - subquery = subqueryRte->subquery; + Query *subquery = subqueryRte->subquery; - insertRelationId = insertRte->relid; + Oid insertRelationId = insertRte->relid; /* * We implement the following algorithm for the reoderding: @@ -642,11 +627,7 @@ ReorderInsertSelectTargetLists(Query *originalQuery, RangeTblEntry *insertRte, foreach(insertTargetEntryCell, originalQuery->targetList) { TargetEntry *oldInsertTargetEntry = lfirst(insertTargetEntryCell); - TargetEntry *newInsertTargetEntry = NULL; - Var *newInsertVar = NULL; TargetEntry *newSubqueryTargetEntry = NULL; - List *targetVarList = NULL; - int targetVarCount = 0; AttrNumber originalAttrNo = get_attnum(insertRelationId, oldInsertTargetEntry->resname); @@ -665,10 +646,10 @@ ReorderInsertSelectTargetLists(Query *originalQuery, RangeTblEntry *insertRte, * It is safe to pull Var clause and ignore the coercions since that * are already going to be added on the workers implicitly. */ - targetVarList = pull_var_clause((Node *) oldInsertTargetEntry->expr, - PVC_RECURSE_AGGREGATES); + List *targetVarList = pull_var_clause((Node *) oldInsertTargetEntry->expr, + PVC_RECURSE_AGGREGATES); - targetVarCount = list_length(targetVarList); + int targetVarCount = list_length(targetVarList); /* a single INSERT target entry cannot have more than one Var */ Assert(targetVarCount <= 1); @@ -702,14 +683,15 @@ ReorderInsertSelectTargetLists(Query *originalQuery, RangeTblEntry *insertRte, */ Assert(!newSubqueryTargetEntry->resjunk); - newInsertVar = makeVar(insertTableId, originalAttrNo, - exprType((Node *) newSubqueryTargetEntry->expr), - exprTypmod((Node *) newSubqueryTargetEntry->expr), - exprCollation((Node *) newSubqueryTargetEntry->expr), - 0); - newInsertTargetEntry = makeTargetEntry((Expr *) newInsertVar, originalAttrNo, - oldInsertTargetEntry->resname, - oldInsertTargetEntry->resjunk); + Var *newInsertVar = makeVar(insertTableId, originalAttrNo, + exprType((Node *) newSubqueryTargetEntry->expr), + exprTypmod((Node *) newSubqueryTargetEntry->expr), + exprCollation((Node *) newSubqueryTargetEntry->expr), + 0); + TargetEntry *newInsertTargetEntry = makeTargetEntry((Expr *) newInsertVar, + originalAttrNo, + oldInsertTargetEntry->resname, + oldInsertTargetEntry->resjunk); newInsertTargetlist = lappend(newInsertTargetlist, newInsertTargetEntry); resno++; @@ -719,12 +701,11 @@ ReorderInsertSelectTargetLists(Query *originalQuery, RangeTblEntry *insertRte, * if there are any remaining target list entries (i.e., GROUP BY column not on the * target list of subquery), update the remaining resnos. */ - subqueryTargetLength = list_length(subquery->targetList); + int subqueryTargetLength = list_length(subquery->targetList); for (; targetEntryIndex < subqueryTargetLength; ++targetEntryIndex) { TargetEntry *oldSubqueryTle = list_nth(subquery->targetList, targetEntryIndex); - TargetEntry *newSubqueryTargetEntry = NULL; /* * Skip non-junk entries since we've already processed them above and this @@ -735,7 +716,7 @@ ReorderInsertSelectTargetLists(Query *originalQuery, RangeTblEntry *insertRte, continue; } - newSubqueryTargetEntry = copyObject(oldSubqueryTle); + TargetEntry *newSubqueryTargetEntry = copyObject(oldSubqueryTle); newSubqueryTargetEntry->resno = resno; newSubqueryTargetlist = lappend(newSubqueryTargetlist, @@ -920,13 +901,8 @@ InsertPartitionColumnMatchesSelect(Query *query, RangeTblEntry *insertRte, { TargetEntry *targetEntry = (TargetEntry *) lfirst(targetEntryCell); List *insertTargetEntryColumnList = pull_var_clause_default((Node *) targetEntry); - Var *insertVar = NULL; - AttrNumber originalAttrNo = InvalidAttrNumber; - TargetEntry *subqueryTargetEntry = NULL; - Expr *selectTargetExpr = NULL; Oid subqueryPartitionColumnRelationId = InvalidOid; Var *subqueryPartitionColumn = NULL; - List *parentQueryList = NIL; /* * We only consider target entries that include a single column. Note that this @@ -941,8 +917,8 @@ InsertPartitionColumnMatchesSelect(Query *query, RangeTblEntry *insertRte, continue; } - insertVar = (Var *) linitial(insertTargetEntryColumnList); - originalAttrNo = targetEntry->resno; + Var *insertVar = (Var *) linitial(insertTargetEntryColumnList); + AttrNumber originalAttrNo = targetEntry->resno; /* skip processing of target table non-partition columns */ if (originalAttrNo != insertPartitionColumn->varattno) @@ -953,11 +929,11 @@ InsertPartitionColumnMatchesSelect(Query *query, RangeTblEntry *insertRte, /* INSERT query includes the partition column */ targetTableHasPartitionColumn = true; - subqueryTargetEntry = list_nth(subquery->targetList, - insertVar->varattno - 1); - selectTargetExpr = subqueryTargetEntry->expr; + TargetEntry *subqueryTargetEntry = list_nth(subquery->targetList, + insertVar->varattno - 1); + Expr *selectTargetExpr = subqueryTargetEntry->expr; - parentQueryList = list_make2(query, subquery); + List *parentQueryList = list_make2(query, subquery); FindReferencedTableColumn(selectTargetExpr, parentQueryList, subquery, &subqueryPartitionColumnRelationId, @@ -1135,7 +1111,6 @@ static DistributedPlan * CreateCoordinatorInsertSelectPlan(uint64 planId, Query *parse) { Query *insertSelectQuery = copyObject(parse); - Query *selectQuery = NULL; RangeTblEntry *selectRte = ExtractSelectRangeTableEntry(insertSelectQuery); RangeTblEntry *insertRte = ExtractResultRelationRTE(insertSelectQuery); @@ -1152,7 +1127,7 @@ CreateCoordinatorInsertSelectPlan(uint64 planId, Query *parse) return distributedPlan; } - selectQuery = selectRte->subquery; + Query *selectQuery = selectRte->subquery; /* * Wrap the SELECT as a subquery if the INSERT...SELECT has CTEs or the SELECT @@ -1194,15 +1169,13 @@ CreateCoordinatorInsertSelectPlan(uint64 planId, Query *parse) * insertSelectSubuery and a workerJob to execute afterwards. */ uint64 jobId = INVALID_JOB_ID; - Job *workerJob = NULL; - List *taskList = NIL; char *resultIdPrefix = InsertSelectResultIdPrefix(planId); /* generate tasks for the INSERT..SELECT phase */ - taskList = TwoPhaseInsertSelectTaskList(targetRelationId, insertSelectQuery, - resultIdPrefix); + List *taskList = TwoPhaseInsertSelectTaskList(targetRelationId, insertSelectQuery, + resultIdPrefix); - workerJob = CitusMakeNode(Job); + Job *workerJob = CitusMakeNode(Job); workerJob->taskList = taskList; workerJob->subqueryPushdown = false; workerJob->dependedJobList = NIL; @@ -1232,18 +1205,14 @@ CreateCoordinatorInsertSelectPlan(uint64 planId, Query *parse) static DeferredErrorMessage * CoordinatorInsertSelectSupported(Query *insertSelectQuery) { - RangeTblEntry *insertRte = NULL; - RangeTblEntry *subqueryRte = NULL; - Query *subquery = NULL; - DeferredErrorMessage *deferredError = NULL; - - deferredError = ErrorIfOnConflictNotSupported(insertSelectQuery); + DeferredErrorMessage *deferredError = ErrorIfOnConflictNotSupported( + insertSelectQuery); if (deferredError) { return deferredError; } - insertRte = ExtractResultRelationRTE(insertSelectQuery); + RangeTblEntry *insertRte = ExtractResultRelationRTE(insertSelectQuery); if (PartitionMethod(insertRte->relid) == DISTRIBUTE_BY_APPEND) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, @@ -1251,8 +1220,8 @@ CoordinatorInsertSelectSupported(Query *insertSelectQuery) "not supported", NULL, NULL); } - subqueryRte = ExtractSelectRangeTableEntry(insertSelectQuery); - subquery = (Query *) subqueryRte->subquery; + RangeTblEntry *subqueryRte = ExtractSelectRangeTableEntry(insertSelectQuery); + Query *subquery = (Query *) subqueryRte->subquery; if (NeedsDistributedPlanning(subquery) && contain_nextval_expression_walker((Node *) insertSelectQuery->targetList, NULL)) @@ -1274,25 +1243,22 @@ CoordinatorInsertSelectSupported(Query *insertSelectQuery) static Query * WrapSubquery(Query *subquery) { - Query *outerQuery = NULL; ParseState *pstate = make_parsestate(NULL); - Alias *selectAlias = NULL; - RangeTblEntry *newRangeTableEntry = NULL; - RangeTblRef *newRangeTableRef = NULL; ListCell *selectTargetCell = NULL; List *newTargetList = NIL; - outerQuery = makeNode(Query); + Query *outerQuery = makeNode(Query); outerQuery->commandType = CMD_SELECT; /* create range table entries */ - selectAlias = makeAlias("citus_insert_select_subquery", NIL); - newRangeTableEntry = addRangeTableEntryForSubquery(pstate, subquery, - selectAlias, false, true); + Alias *selectAlias = makeAlias("citus_insert_select_subquery", NIL); + RangeTblEntry *newRangeTableEntry = addRangeTableEntryForSubquery(pstate, subquery, + selectAlias, false, + true); outerQuery->rtable = list_make1(newRangeTableEntry); /* set the FROM expression to the subquery */ - newRangeTableRef = makeNode(RangeTblRef); + RangeTblRef *newRangeTableRef = makeNode(RangeTblRef); newRangeTableRef->rtindex = 1; outerQuery->jointree = makeFromExpr(list_make1(newRangeTableRef), NULL); @@ -1300,8 +1266,6 @@ WrapSubquery(Query *subquery) foreach(selectTargetCell, subquery->targetList) { TargetEntry *selectTargetEntry = (TargetEntry *) lfirst(selectTargetCell); - Var *newSelectVar = NULL; - TargetEntry *newSelectTargetEntry = NULL; /* exactly 1 entry in FROM */ int indexInRangeTable = 1; @@ -1311,15 +1275,15 @@ WrapSubquery(Query *subquery) continue; } - newSelectVar = makeVar(indexInRangeTable, selectTargetEntry->resno, - exprType((Node *) selectTargetEntry->expr), - exprTypmod((Node *) selectTargetEntry->expr), - exprCollation((Node *) selectTargetEntry->expr), 0); + Var *newSelectVar = makeVar(indexInRangeTable, selectTargetEntry->resno, + exprType((Node *) selectTargetEntry->expr), + exprTypmod((Node *) selectTargetEntry->expr), + exprCollation((Node *) selectTargetEntry->expr), 0); - newSelectTargetEntry = makeTargetEntry((Expr *) newSelectVar, - selectTargetEntry->resno, - selectTargetEntry->resname, - selectTargetEntry->resjunk); + TargetEntry *newSelectTargetEntry = makeTargetEntry((Expr *) newSelectVar, + selectTargetEntry->resno, + selectTargetEntry->resname, + selectTargetEntry->resjunk); newTargetList = lappend(newTargetList, newSelectTargetEntry); } @@ -1352,16 +1316,13 @@ TwoPhaseInsertSelectTaskList(Oid targetRelationId, Query *insertSelectQuery, DistTableCacheEntry *targetCacheEntry = DistributedTableCacheEntry(targetRelationId); int shardCount = targetCacheEntry->shardIntervalArrayLength; - int shardOffset = 0; uint32 taskIdIndex = 1; uint64 jobId = INVALID_JOB_ID; ListCell *targetEntryCell = NULL; - Relation distributedRelation = NULL; - TupleDesc destTupleDescriptor = NULL; - distributedRelation = heap_open(targetRelationId, RowExclusiveLock); - destTupleDescriptor = RelationGetDescr(distributedRelation); + Relation distributedRelation = heap_open(targetRelationId, RowExclusiveLock); + TupleDesc destTupleDescriptor = RelationGetDescr(distributedRelation); /* * If the type of insert column and target table's column type is @@ -1388,25 +1349,22 @@ TwoPhaseInsertSelectTaskList(Oid targetRelationId, Query *insertSelectQuery, } } - for (shardOffset = 0; shardOffset < shardCount; shardOffset++) + for (int shardOffset = 0; shardOffset < shardCount; shardOffset++) { ShardInterval *targetShardInterval = targetCacheEntry->sortedShardIntervalArray[shardOffset]; uint64 shardId = targetShardInterval->shardId; List *columnAliasList = NIL; - List *insertShardPlacementList = NIL; - Query *resultSelectQuery = NULL; StringInfo queryString = makeStringInfo(); - RelationShard *relationShard = NULL; - Task *modifyTask = NULL; StringInfo resultId = makeStringInfo(); /* during COPY, the shard ID is appended to the result name */ appendStringInfo(resultId, "%s_" UINT64_FORMAT, resultIdPrefix, shardId); /* generate the query on the intermediate result */ - resultSelectQuery = BuildSubPlanResultQuery(insertSelectQuery->targetList, - columnAliasList, resultId->data); + Query *resultSelectQuery = BuildSubPlanResultQuery(insertSelectQuery->targetList, + columnAliasList, + resultId->data); /* put the intermediate result query in the INSERT..SELECT */ selectRte->subquery = resultSelectQuery; @@ -1431,13 +1389,14 @@ TwoPhaseInsertSelectTaskList(Oid targetRelationId, Query *insertSelectQuery, ereport(DEBUG2, (errmsg("distributed statement: %s", queryString->data))); LockShardDistributionMetadata(shardId, ShareLock); - insertShardPlacementList = FinalizedShardPlacementList(shardId); + List *insertShardPlacementList = FinalizedShardPlacementList(shardId); - relationShard = CitusMakeNode(RelationShard); + RelationShard *relationShard = CitusMakeNode(RelationShard); relationShard->relationId = targetShardInterval->relationId; relationShard->shardId = targetShardInterval->shardId; - modifyTask = CreateBasicTask(jobId, taskIdIndex, MODIFY_TASK, queryString->data); + Task *modifyTask = CreateBasicTask(jobId, taskIdIndex, MODIFY_TASK, + queryString->data); modifyTask->dependedTaskList = NULL; modifyTask->anchorShardId = shardId; modifyTask->taskPlacementList = insertShardPlacementList; diff --git a/src/backend/distributed/planner/intermediate_result_pruning.c b/src/backend/distributed/planner/intermediate_result_pruning.c index 8a0909bb1..f1b75f754 100644 --- a/src/backend/distributed/planner/intermediate_result_pruning.c +++ b/src/backend/distributed/planner/intermediate_result_pruning.c @@ -52,7 +52,6 @@ FindSubPlansUsedInNode(Node *node) { char *resultId = FindIntermediateResultIdIfExists(rangeTableEntry); - Value *resultIdValue = NULL; if (resultId == NULL) { @@ -63,7 +62,7 @@ FindSubPlansUsedInNode(Node *node) * Use a Value to be able to use list_append_unique and store * the result ID in the DistributedPlan. */ - resultIdValue = makeString(resultId); + Value *resultIdValue = makeString(resultId); subPlanList = list_append_unique(subPlanList, resultIdValue); } } @@ -185,8 +184,6 @@ AppendAllAccessedWorkerNodes(List *workerNodeList, DistributedPlan *distributedP HTAB * MakeIntermediateResultHTAB() { - HTAB *intermediateResultsHash = NULL; - uint32 hashFlags = 0; HASHCTL info = { 0 }; int initialNumberOfElements = 16; @@ -194,10 +191,11 @@ MakeIntermediateResultHTAB() info.entrysize = sizeof(IntermediateResultsHashEntry); info.hash = string_hash; info.hcxt = CurrentMemoryContext; - hashFlags = (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); + uint32 hashFlags = (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); - intermediateResultsHash = hash_create("Intermediate results hash", - initialNumberOfElements, &info, hashFlags); + HTAB *intermediateResultsHash = hash_create("Intermediate results hash", + initialNumberOfElements, &info, + hashFlags); return intermediateResultsHash; } @@ -243,10 +241,10 @@ FindAllWorkerNodesUsingSubplan(HTAB *intermediateResultsHash, static IntermediateResultsHashEntry * SearchIntermediateResult(HTAB *intermediateResultsHash, char *resultId) { - IntermediateResultsHashEntry *entry = NULL; bool found = false; - entry = hash_search(intermediateResultsHash, resultId, HASH_ENTER, &found); + IntermediateResultsHashEntry *entry = hash_search(intermediateResultsHash, resultId, + HASH_ENTER, &found); /* use sane defaults */ if (!found) diff --git a/src/backend/distributed/planner/multi_explain.c b/src/backend/distributed/planner/multi_explain.c index ddf1e9cef..7b13b5c58 100644 --- a/src/backend/distributed/planner/multi_explain.c +++ b/src/backend/distributed/planner/multi_explain.c @@ -343,9 +343,8 @@ ExplainTaskList(List *taskList, ExplainState *es) foreach(taskCell, taskList) { Task *task = (Task *) lfirst(taskCell); - RemoteExplainPlan *remoteExplain = NULL; - remoteExplain = RemoteExplain(task, es); + RemoteExplainPlan *remoteExplain = RemoteExplain(task, es); remoteExplainList = lappend(remoteExplainList, remoteExplain); if (!ExplainAllTasks) @@ -374,14 +373,12 @@ ExplainTaskList(List *taskList, ExplainState *es) static RemoteExplainPlan * RemoteExplain(Task *task, ExplainState *es) { - StringInfo explainQuery = NULL; List *taskPlacementList = task->taskPlacementList; int placementCount = list_length(taskPlacementList); - int placementIndex = 0; - RemoteExplainPlan *remotePlan = NULL; - remotePlan = (RemoteExplainPlan *) palloc0(sizeof(RemoteExplainPlan)); - explainQuery = BuildRemoteExplainQuery(task->queryString, es); + RemoteExplainPlan *remotePlan = (RemoteExplainPlan *) palloc0( + sizeof(RemoteExplainPlan)); + StringInfo explainQuery = BuildRemoteExplainQuery(task->queryString, es); /* * Use a coordinated transaction to ensure that we open a transaction block @@ -389,17 +386,16 @@ RemoteExplain(Task *task, ExplainState *es) */ BeginOrContinueCoordinatedTransaction(); - for (placementIndex = 0; placementIndex < placementCount; placementIndex++) + for (int placementIndex = 0; placementIndex < placementCount; placementIndex++) { ShardPlacement *taskPlacement = list_nth(taskPlacementList, placementIndex); - MultiConnection *connection = NULL; PGresult *queryResult = NULL; int connectionFlags = 0; - int executeResult = 0; remotePlan->placementIndex = placementIndex; - connection = GetPlacementConnection(connectionFlags, taskPlacement, NULL); + MultiConnection *connection = GetPlacementConnection(connectionFlags, + taskPlacement, NULL); /* try other placements if we fail to connect this one */ if (PQstatus(connection->pgConn) != CONNECTION_OK) @@ -417,8 +413,8 @@ RemoteExplain(Task *task, ExplainState *es) ExecuteCriticalRemoteCommand(connection, "SAVEPOINT citus_explain_savepoint"); /* run explain query */ - executeResult = ExecuteOptionalRemoteCommand(connection, explainQuery->data, - &queryResult); + int executeResult = ExecuteOptionalRemoteCommand(connection, explainQuery->data, + &queryResult); if (executeResult != 0) { PQclear(queryResult); @@ -517,11 +513,9 @@ ExplainTaskPlacement(ShardPlacement *taskPlacement, List *explainOutputList, foreach(explainOutputCell, explainOutputList) { StringInfo rowString = (StringInfo) lfirst(explainOutputCell); - int rowLength = 0; - char *lineStart = NULL; - rowLength = strlen(rowString->data); - lineStart = rowString->data; + int rowLength = strlen(rowString->data); + char *lineStart = rowString->data; /* parse the lines in the remote EXPLAIN for proper indentation */ while (lineStart < rowString->data + rowLength) @@ -646,14 +640,13 @@ ExplainOneQuery(Query *query, int cursorOptions, } else { - PlannedStmt *plan; instr_time planstart, planduration; INSTR_TIME_SET_CURRENT(planstart); /* plan the query */ - plan = pg_plan_query(query, cursorOptions, params); + PlannedStmt *plan = pg_plan_query(query, cursorOptions, params); INSTR_TIME_SET_CURRENT(planduration); INSTR_TIME_SUBTRACT(planduration, planstart); diff --git a/src/backend/distributed/planner/multi_join_order.c b/src/backend/distributed/planner/multi_join_order.c index e54bd0967..57e54aeba 100644 --- a/src/backend/distributed/planner/multi_join_order.c +++ b/src/backend/distributed/planner/multi_join_order.c @@ -116,18 +116,16 @@ JoinExprList(FromExpr *fromExpr) if (joinList != NIL) { /* multiple nodes in from clause, add an explicit join between them */ - JoinExpr *newJoinExpr = NULL; - RangeTblRef *nextRangeTableRef = NULL; int nextRangeTableIndex = 0; /* find the left most range table in this node */ ExtractLeftMostRangeTableIndex((Node *) fromExpr, &nextRangeTableIndex); - nextRangeTableRef = makeNode(RangeTblRef); + RangeTblRef *nextRangeTableRef = makeNode(RangeTblRef); nextRangeTableRef->rtindex = nextRangeTableIndex; /* join the previous node with nextRangeTableRef */ - newJoinExpr = makeNode(JoinExpr); + JoinExpr *newJoinExpr = makeNode(JoinExpr); newJoinExpr->jointype = JOIN_INNER; newJoinExpr->rarg = (Node *) nextRangeTableRef; newJoinExpr->quals = NULL; @@ -261,18 +259,16 @@ JoinOnColumns(Var *currentColumn, Var *candidateColumn, List *joinClauseList) List * JoinOrderList(List *tableEntryList, List *joinClauseList) { - List *bestJoinOrder = NIL; List *candidateJoinOrderList = NIL; ListCell *tableEntryCell = NULL; foreach(tableEntryCell, tableEntryList) { TableEntry *startingTable = (TableEntry *) lfirst(tableEntryCell); - List *candidateJoinOrder = NIL; /* each candidate join order starts with a different table */ - candidateJoinOrder = JoinOrderForTable(startingTable, tableEntryList, - joinClauseList); + List *candidateJoinOrder = JoinOrderForTable(startingTable, tableEntryList, + joinClauseList); if (candidateJoinOrder != NULL) { @@ -289,7 +285,7 @@ JoinOrderList(List *tableEntryList, List *joinClauseList) "equal operator"))); } - bestJoinOrder = BestJoinOrder(candidateJoinOrderList); + List *bestJoinOrder = BestJoinOrder(candidateJoinOrderList); /* if logging is enabled, print join order */ if (LogMultiJoinOrder) @@ -312,10 +308,7 @@ JoinOrderList(List *tableEntryList, List *joinClauseList) static List * JoinOrderForTable(TableEntry *firstTable, List *tableEntryList, List *joinClauseList) { - JoinOrderNode *currentJoinNode = NULL; JoinRuleType firstJoinRule = JOIN_RULE_INVALID_FIRST; - List *joinOrderList = NIL; - List *joinedTableList = NIL; int joinedTableCount = 1; int totalTableCount = list_length(tableEntryList); @@ -331,20 +324,19 @@ JoinOrderForTable(TableEntry *firstTable, List *tableEntryList, List *joinClause firstTable); /* add first node to the join order */ - joinOrderList = list_make1(firstJoinNode); - joinedTableList = list_make1(firstTable); - currentJoinNode = firstJoinNode; + List *joinOrderList = list_make1(firstJoinNode); + List *joinedTableList = list_make1(firstTable); + JoinOrderNode *currentJoinNode = firstJoinNode; /* loop until we join all remaining tables */ while (joinedTableCount < totalTableCount) { - List *pendingTableList = NIL; ListCell *pendingTableCell = NULL; JoinOrderNode *nextJoinNode = NULL; - TableEntry *nextJoinedTable = NULL; JoinRuleType nextJoinRuleType = JOIN_RULE_LAST; - pendingTableList = TableEntryListDifference(tableEntryList, joinedTableList); + List *pendingTableList = TableEntryListDifference(tableEntryList, + joinedTableList); /* * Iterate over all pending tables, and find the next best table to @@ -354,13 +346,13 @@ JoinOrderForTable(TableEntry *firstTable, List *tableEntryList, List *joinClause foreach(pendingTableCell, pendingTableList) { TableEntry *pendingTable = (TableEntry *) lfirst(pendingTableCell); - JoinOrderNode *pendingJoinNode = NULL; - JoinRuleType pendingJoinRuleType = JOIN_RULE_LAST; JoinType joinType = JOIN_INNER; /* evaluate all join rules for this pending table */ - pendingJoinNode = EvaluateJoinRules(joinedTableList, currentJoinNode, - pendingTable, joinClauseList, joinType); + JoinOrderNode *pendingJoinNode = EvaluateJoinRules(joinedTableList, + currentJoinNode, + pendingTable, + joinClauseList, joinType); if (pendingJoinNode == NULL) { @@ -369,7 +361,7 @@ JoinOrderForTable(TableEntry *firstTable, List *tableEntryList, List *joinClause } /* if this rule is better than previous ones, keep it */ - pendingJoinRuleType = pendingJoinNode->joinRuleType; + JoinRuleType pendingJoinRuleType = pendingJoinNode->joinRuleType; if (pendingJoinRuleType < nextJoinRuleType) { nextJoinNode = pendingJoinNode; @@ -387,7 +379,7 @@ JoinOrderForTable(TableEntry *firstTable, List *tableEntryList, List *joinClause } Assert(nextJoinNode != NULL); - nextJoinedTable = nextJoinNode->tableEntry; + TableEntry *nextJoinedTable = nextJoinNode->tableEntry; /* add next node to the join order */ joinOrderList = lappend(joinOrderList, nextJoinNode); @@ -411,8 +403,6 @@ JoinOrderForTable(TableEntry *firstTable, List *tableEntryList, List *joinClause static List * BestJoinOrder(List *candidateJoinOrders) { - List *bestJoinOrder = NULL; - uint32 ruleTypeIndex = 0; uint32 highestValidIndex = JOIN_RULE_LAST - 1; uint32 candidateCount PG_USED_FOR_ASSERTS_ONLY = 0; @@ -429,7 +419,7 @@ BestJoinOrder(List *candidateJoinOrders) * have 3 or more, if there isn't a join order with fewer DPs; and so * forth. */ - for (ruleTypeIndex = highestValidIndex; ruleTypeIndex > 0; ruleTypeIndex--) + for (uint32 ruleTypeIndex = highestValidIndex; ruleTypeIndex > 0; ruleTypeIndex--) { JoinRuleType ruleType = (JoinRuleType) ruleTypeIndex; @@ -451,7 +441,7 @@ BestJoinOrder(List *candidateJoinOrders) * If there still is a tie, we pick the join order whose relation appeared * earliest in the query's range table entry list. */ - bestJoinOrder = (List *) linitial(candidateJoinOrders); + List *bestJoinOrder = (List *) linitial(candidateJoinOrders); return bestJoinOrder; } @@ -662,24 +652,21 @@ EvaluateJoinRules(List *joinedTableList, JoinOrderNode *currentJoinNode, JoinType joinType) { JoinOrderNode *nextJoinNode = NULL; - uint32 candidateTableId = 0; - List *joinedTableIdList = NIL; - List *applicableJoinClauses = NIL; uint32 lowestValidIndex = JOIN_RULE_INVALID_FIRST + 1; uint32 highestValidIndex = JOIN_RULE_LAST - 1; - uint32 ruleIndex = 0; /* * We first find all applicable join clauses between already joined tables * and the candidate table. */ - joinedTableIdList = RangeTableIdList(joinedTableList); - candidateTableId = candidateTable->rangeTableId; - applicableJoinClauses = ApplicableJoinClauses(joinedTableIdList, candidateTableId, - joinClauseList); + List *joinedTableIdList = RangeTableIdList(joinedTableList); + uint32 candidateTableId = candidateTable->rangeTableId; + List *applicableJoinClauses = ApplicableJoinClauses(joinedTableIdList, + candidateTableId, + joinClauseList); /* we then evaluate all join rules in order */ - for (ruleIndex = lowestValidIndex; ruleIndex <= highestValidIndex; ruleIndex++) + for (uint32 ruleIndex = lowestValidIndex; ruleIndex <= highestValidIndex; ruleIndex++) { JoinRuleType ruleType = (JoinRuleType) ruleIndex; RuleEvalFunction ruleEvalFunction = JoinRuleEvalFunction(ruleType); @@ -737,7 +724,6 @@ static RuleEvalFunction JoinRuleEvalFunction(JoinRuleType ruleType) { static bool ruleEvalFunctionsInitialized = false; - RuleEvalFunction ruleEvalFunction = NULL; if (!ruleEvalFunctionsInitialized) { @@ -751,7 +737,7 @@ JoinRuleEvalFunction(JoinRuleType ruleType) ruleEvalFunctionsInitialized = true; } - ruleEvalFunction = RuleEvalFunctionArray[ruleType]; + RuleEvalFunction ruleEvalFunction = RuleEvalFunctionArray[ruleType]; Assert(ruleEvalFunction != NULL); return ruleEvalFunction; @@ -763,7 +749,6 @@ static char * JoinRuleName(JoinRuleType ruleType) { static bool ruleNamesInitialized = false; - char *ruleName = NULL; if (!ruleNamesInitialized) { @@ -780,7 +765,7 @@ JoinRuleName(JoinRuleType ruleType) ruleNamesInitialized = true; } - ruleName = RuleNameArray[ruleType]; + char *ruleName = RuleNameArray[ruleType]; Assert(ruleName != NULL); return ruleName; @@ -857,7 +842,6 @@ static JoinOrderNode * LocalJoin(JoinOrderNode *currentJoinNode, TableEntry *candidateTable, List *applicableJoinClauses, JoinType joinType) { - JoinOrderNode *nextJoinNode = NULL; Oid relationId = candidateTable->relationId; uint32 tableId = candidateTable->rangeTableId; Var *candidatePartitionColumn = PartitionColumn(relationId, tableId); @@ -865,8 +849,6 @@ LocalJoin(JoinOrderNode *currentJoinNode, TableEntry *candidateTable, char candidatePartitionMethod = PartitionMethod(relationId); char currentPartitionMethod = currentJoinNode->partitionMethod; TableEntry *currentAnchorTable = currentJoinNode->anchorTable; - bool joinOnPartitionColumns = false; - bool coPartitionedTables = false; /* * If we previously dual-hash re-partitioned the tables for a join or made cartesian @@ -883,26 +865,27 @@ LocalJoin(JoinOrderNode *currentJoinNode, TableEntry *candidateTable, return NULL; } - joinOnPartitionColumns = JoinOnColumns(currentPartitionColumn, - candidatePartitionColumn, - applicableJoinClauses); + bool joinOnPartitionColumns = JoinOnColumns(currentPartitionColumn, + candidatePartitionColumn, + applicableJoinClauses); if (!joinOnPartitionColumns) { return NULL; } /* shard interval lists must have 1-1 matching for local joins */ - coPartitionedTables = CoPartitionedTables(currentAnchorTable->relationId, relationId); + bool coPartitionedTables = CoPartitionedTables(currentAnchorTable->relationId, + relationId); if (!coPartitionedTables) { return NULL; } - nextJoinNode = MakeJoinOrderNode(candidateTable, LOCAL_PARTITION_JOIN, - currentPartitionColumn, - currentPartitionMethod, - currentAnchorTable); + JoinOrderNode *nextJoinNode = MakeJoinOrderNode(candidateTable, LOCAL_PARTITION_JOIN, + currentPartitionColumn, + currentPartitionMethod, + currentAnchorTable); return nextJoinNode; @@ -925,7 +908,6 @@ SinglePartitionJoin(JoinOrderNode *currentJoinNode, TableEntry *candidateTable, TableEntry *currentAnchorTable = currentJoinNode->anchorTable; JoinRuleType currentJoinRuleType = currentJoinNode->joinRuleType; - OpExpr *joinClause = NULL; Oid relationId = candidateTable->relationId; uint32 tableId = candidateTable->rangeTableId; @@ -948,7 +930,7 @@ SinglePartitionJoin(JoinOrderNode *currentJoinNode, TableEntry *candidateTable, return NULL; } - joinClause = + OpExpr *joinClause = SinglePartitionJoinClause(currentPartitionColumn, applicableJoinClauses); if (joinClause != NULL) { diff --git a/src/backend/distributed/planner/multi_logical_optimizer.c b/src/backend/distributed/planner/multi_logical_optimizer.c index 7b7061dbd..1c7d77922 100644 --- a/src/backend/distributed/planner/multi_logical_optimizer.c +++ b/src/backend/distributed/planner/multi_logical_optimizer.c @@ -312,19 +312,8 @@ static bool HasOrderByHllType(List *sortClauseList, List *targetList); void MultiLogicalPlanOptimize(MultiTreeRoot *multiLogicalPlan) { - bool hasOrderByHllType = false; - List *selectNodeList = NIL; - List *projectNodeList = NIL; - List *collectNodeList = NIL; - List *extendedOpNodeList = NIL; - List *tableNodeList = NIL; ListCell *collectNodeCell = NULL; ListCell *tableNodeCell = NULL; - MultiProject *projectNode = NULL; - MultiExtendedOp *extendedOpNode = NULL; - MultiExtendedOp *masterExtendedOpNode = NULL; - MultiExtendedOp *workerExtendedOpNode = NULL; - ExtendedOpNodeProperties extendedOpNodeProperties; MultiNode *logicalPlanNode = (MultiNode *) multiLogicalPlan; /* check that we can optimize aggregates in the plan */ @@ -336,7 +325,7 @@ MultiLogicalPlanOptimize(MultiTreeRoot *multiLogicalPlan) * exist, we modify the tree in place to swap the original select node with * And and Or nodes. We then push down the And select node if it exists. */ - selectNodeList = FindNodesOfType(logicalPlanNode, T_MultiSelect); + List *selectNodeList = FindNodesOfType(logicalPlanNode, T_MultiSelect); if (selectNodeList != NIL) { MultiSelect *selectNode = (MultiSelect *) linitial(selectNodeList); @@ -365,12 +354,12 @@ MultiLogicalPlanOptimize(MultiTreeRoot *multiLogicalPlan) } /* push down the multi project node */ - projectNodeList = FindNodesOfType(logicalPlanNode, T_MultiProject); - projectNode = (MultiProject *) linitial(projectNodeList); + List *projectNodeList = FindNodesOfType(logicalPlanNode, T_MultiProject); + MultiProject *projectNode = (MultiProject *) linitial(projectNodeList); PushDownNodeLoop((MultiUnaryNode *) projectNode); /* pull up collect nodes and merge duplicate collects */ - collectNodeList = FindNodesOfType(logicalPlanNode, T_MultiCollect); + List *collectNodeList = FindNodesOfType(logicalPlanNode, T_MultiCollect); foreach(collectNodeCell, collectNodeList) { MultiCollect *collectNode = (MultiCollect *) lfirst(collectNodeCell); @@ -385,19 +374,20 @@ MultiLogicalPlanOptimize(MultiTreeRoot *multiLogicalPlan) * clause list to the worker operator node. We then push the worker operator * node below the collect node. */ - extendedOpNodeList = FindNodesOfType(logicalPlanNode, T_MultiExtendedOp); - extendedOpNode = (MultiExtendedOp *) linitial(extendedOpNodeList); + List *extendedOpNodeList = FindNodesOfType(logicalPlanNode, T_MultiExtendedOp); + MultiExtendedOp *extendedOpNode = (MultiExtendedOp *) linitial(extendedOpNodeList); - extendedOpNodeProperties = BuildExtendedOpNodeProperties(extendedOpNode); + ExtendedOpNodeProperties extendedOpNodeProperties = BuildExtendedOpNodeProperties( + extendedOpNode); - masterExtendedOpNode = + MultiExtendedOp *masterExtendedOpNode = MasterExtendedOpNode(extendedOpNode, &extendedOpNodeProperties); - workerExtendedOpNode = + MultiExtendedOp *workerExtendedOpNode = WorkerExtendedOpNode(extendedOpNode, &extendedOpNodeProperties); ApplyExtendedOpNodes(extendedOpNode, masterExtendedOpNode, workerExtendedOpNode); - tableNodeList = FindNodesOfType(logicalPlanNode, T_MultiTable); + List *tableNodeList = FindNodesOfType(logicalPlanNode, T_MultiTable); foreach(tableNodeCell, tableNodeList) { MultiTable *tableNode = (MultiTable *) lfirst(tableNodeCell); @@ -414,8 +404,8 @@ MultiLogicalPlanOptimize(MultiTreeRoot *multiLogicalPlan) * clause's sortop oid, so we can't push an order by on the hll data type to * the worker node. We check that here and error out if necessary. */ - hasOrderByHllType = HasOrderByHllType(workerExtendedOpNode->sortClauseList, - workerExtendedOpNode->targetList); + bool hasOrderByHllType = HasOrderByHllType(workerExtendedOpNode->sortClauseList, + workerExtendedOpNode->targetList); if (hasOrderByHllType) { ereport(ERROR, (errmsg("cannot approximate count(distinct) and order by it"), @@ -597,7 +587,6 @@ PushDownNodeLoop(MultiUnaryNode *currentNode) static void PullUpCollectLoop(MultiCollect *collectNode) { - MultiNode *childNode = NULL; MultiUnaryNode *currentNode = (MultiUnaryNode *) collectNode; PullUpStatus pullUpStatus = CanPullUp(currentNode); @@ -611,7 +600,7 @@ PullUpCollectLoop(MultiCollect *collectNode) * After pulling up the collect node, if we find that our child node is also * a collect, we merge the two collect nodes together by removing this node. */ - childNode = currentNode->childNode; + MultiNode *childNode = currentNode->childNode; if (CitusIsA(childNode, MultiCollect)) { RemoveUnaryNode(currentNode); @@ -753,8 +742,8 @@ CanPullUp(MultiUnaryNode *childNode) * Evaluate if parent can be pushed down below the child node, since it * is equivalent to pulling up the child above its parent. */ - PushDownStatus parentPushDownStatus = PUSH_DOWN_INVALID_FIRST; - parentPushDownStatus = Commutative((MultiUnaryNode *) parentNode, childNode); + PushDownStatus parentPushDownStatus = Commutative((MultiUnaryNode *) parentNode, + childNode); if (parentPushDownStatus == PUSH_DOWN_VALID) { @@ -932,8 +921,6 @@ SelectClauseTableIdList(List *selectClauseList) { Node *selectClause = (Node *) lfirst(selectClauseCell); List *selectColumnList = pull_var_clause_default(selectClause); - Var *selectColumn = NULL; - int selectColumnTableId = 0; if (list_length(selectColumnList) == 0) { @@ -941,8 +928,8 @@ SelectClauseTableIdList(List *selectClauseList) continue; } - selectColumn = (Var *) linitial(selectColumnList); - selectColumnTableId = (int) selectColumn->varno; + Var *selectColumn = (Var *) linitial(selectColumnList); + int selectColumnTableId = (int) selectColumn->varno; tableIdList = lappend_int(tableIdList, selectColumnTableId); } @@ -1014,9 +1001,9 @@ GenerateNode(MultiUnaryNode *currentNode, MultiNode *childNode) { MultiSelect *selectNode = (MultiSelect *) currentNode; List *selectClauseList = copyObject(selectNode->selectClauseList); - List *newSelectClauseList = NIL; - newSelectClauseList = TableIdListSelectClauses(tableIdList, selectClauseList); + List *newSelectClauseList = TableIdListSelectClauses(tableIdList, + selectClauseList); if (newSelectClauseList != NIL) { MultiSelect *newSelectNode = CitusMakeNode(MultiSelect); @@ -1370,7 +1357,6 @@ static MultiExtendedOp * MasterExtendedOpNode(MultiExtendedOp *originalOpNode, ExtendedOpNodeProperties *extendedOpNodeProperties) { - MultiExtendedOp *masterExtendedOpNode = NULL; List *targetEntryList = originalOpNode->targetList; List *newTargetEntryList = NIL; ListCell *targetEntryCell = NULL; @@ -1433,7 +1419,7 @@ MasterExtendedOpNode(MultiExtendedOp *originalOpNode, newHavingQual = MasterAggregateMutator(originalHavingQual, walkerContext); } - masterExtendedOpNode = CitusMakeNode(MultiExtendedOp); + MultiExtendedOp *masterExtendedOpNode = CitusMakeNode(MultiExtendedOp); masterExtendedOpNode->targetList = newTargetEntryList; masterExtendedOpNode->groupClauseList = originalOpNode->groupClauseList; masterExtendedOpNode->sortClauseList = originalOpNode->sortClauseList; @@ -1510,7 +1496,6 @@ MasterAggregateExpression(Aggref *originalAggregate, { AggregateType aggregateType = GetAggregateType(originalAggregate->aggfnoid); Expr *newMasterExpression = NULL; - Expr *typeConvertedExpression = NULL; const uint32 masterTableId = 1; /* one table on the master node */ const Index columnLevelsUp = 0; /* normal column */ const AttrNumber argumentId = 1; /* our aggregates have single arguments */ @@ -1576,9 +1561,6 @@ MasterAggregateExpression(Aggref *originalAggregate, const int argCount = 1; const int defaultTypeMod = -1; - TargetEntry *hllTargetEntry = NULL; - Aggref *unionAggregate = NULL; - FuncExpr *cardinalityExpression = NULL; /* extract schema name of hll */ Oid hllId = get_extension_oid(HLL_EXTENSION_NAME, false); @@ -1598,9 +1580,10 @@ MasterAggregateExpression(Aggref *originalAggregate, hllTypeCollationId, columnLevelsUp); walkerContext->columnId++; - hllTargetEntry = makeTargetEntry((Expr *) hllColumn, argumentId, NULL, false); + TargetEntry *hllTargetEntry = makeTargetEntry((Expr *) hllColumn, argumentId, + NULL, false); - unionAggregate = makeNode(Aggref); + Aggref *unionAggregate = makeNode(Aggref); unionAggregate->aggfnoid = unionFunctionId; unionAggregate->aggtype = hllType; unionAggregate->args = list_make1(hllTargetEntry); @@ -1610,7 +1593,7 @@ MasterAggregateExpression(Aggref *originalAggregate, unionAggregate->aggargtypes = list_make1_oid(unionAggregate->aggtype); unionAggregate->aggsplit = AGGSPLIT_SIMPLE; - cardinalityExpression = makeNode(FuncExpr); + FuncExpr *cardinalityExpression = makeNode(FuncExpr); cardinalityExpression->funcid = cardinalityFunctionId; cardinalityExpression->funcresulttype = cardinalityReturnType; cardinalityExpression->args = list_make1(unionAggregate); @@ -1647,12 +1630,6 @@ MasterAggregateExpression(Aggref *originalAggregate, * Count aggregates are handled in two steps. First, worker nodes report * their count results. Then, the master node sums up these results. */ - Var *column = NULL; - TargetEntry *columnTargetEntry = NULL; - CoerceViaIO *coerceExpr = NULL; - Const *zeroConst = NULL; - List *coalesceArgs = NULL; - CoalesceExpr *coalesceExpr = NULL; /* worker aggregate and original aggregate have the same return type */ Oid workerReturnType = exprType((Node *) originalAggregate); @@ -1673,16 +1650,17 @@ MasterAggregateExpression(Aggref *originalAggregate, newMasterAggregate->aggargtypes = list_make1_oid(newMasterAggregate->aggtype); newMasterAggregate->aggsplit = AGGSPLIT_SIMPLE; - column = makeVar(masterTableId, walkerContext->columnId, workerReturnType, - workerReturnTypeMod, workerCollationId, columnLevelsUp); + Var *column = makeVar(masterTableId, walkerContext->columnId, workerReturnType, + workerReturnTypeMod, workerCollationId, columnLevelsUp); walkerContext->columnId++; /* aggref expects its arguments to be wrapped in target entries */ - columnTargetEntry = makeTargetEntry((Expr *) column, argumentId, NULL, false); + TargetEntry *columnTargetEntry = makeTargetEntry((Expr *) column, argumentId, + NULL, false); newMasterAggregate->args = list_make1(columnTargetEntry); /* cast numeric sum result to bigint (count's return type) */ - coerceExpr = makeNode(CoerceViaIO); + CoerceViaIO *coerceExpr = makeNode(CoerceViaIO); coerceExpr->arg = (Expr *) newMasterAggregate; coerceExpr->resulttype = INT8OID; coerceExpr->resultcollid = InvalidOid; @@ -1690,10 +1668,10 @@ MasterAggregateExpression(Aggref *originalAggregate, coerceExpr->location = -1; /* convert NULL to 0 in case of no rows */ - zeroConst = MakeIntegerConstInt64(0); - coalesceArgs = list_make2(coerceExpr, zeroConst); + Const *zeroConst = MakeIntegerConstInt64(0); + List *coalesceArgs = list_make2(coerceExpr, zeroConst); - coalesceExpr = makeNode(CoalesceExpr); + CoalesceExpr *coalesceExpr = makeNode(CoalesceExpr); coalesceExpr->coalescetype = INT8OID; coalesceExpr->coalescecollid = InvalidOid; coalesceExpr->args = coalesceArgs; @@ -1713,10 +1691,6 @@ MasterAggregateExpression(Aggref *originalAggregate, * the arrays or jsons on the master and compute the array_cat_agg() * or jsonb_cat_agg() aggregate on them to get the final array or json. */ - Var *column = NULL; - TargetEntry *catAggArgument = NULL; - Aggref *newMasterAggregate = NULL; - Oid aggregateFunctionId = InvalidOid; const char *catAggregateName = NULL; Oid catInputType = InvalidOid; @@ -1753,17 +1727,18 @@ MasterAggregateExpression(Aggref *originalAggregate, Assert(catAggregateName != NULL); Assert(catInputType != InvalidOid); - aggregateFunctionId = AggregateFunctionOid(catAggregateName, - catInputType); + Oid aggregateFunctionId = AggregateFunctionOid(catAggregateName, + catInputType); /* create argument for the array_cat_agg() or jsonb_cat_agg() aggregate */ - column = makeVar(masterTableId, walkerContext->columnId, workerReturnType, - workerReturnTypeMod, workerCollationId, columnLevelsUp); - catAggArgument = makeTargetEntry((Expr *) column, argumentId, NULL, false); + Var *column = makeVar(masterTableId, walkerContext->columnId, workerReturnType, + workerReturnTypeMod, workerCollationId, columnLevelsUp); + TargetEntry *catAggArgument = makeTargetEntry((Expr *) column, argumentId, NULL, + false); walkerContext->columnId++; /* construct the master array_cat_agg() or jsonb_cat_agg() expression */ - newMasterAggregate = copyObject(originalAggregate); + Aggref *newMasterAggregate = copyObject(originalAggregate); newMasterAggregate->aggfnoid = aggregateFunctionId; newMasterAggregate->args = list_make1(catAggArgument); newMasterAggregate->aggfilter = NULL; @@ -1781,8 +1756,6 @@ MasterAggregateExpression(Aggref *originalAggregate, * to apply in the master after running the original aggregate in * workers. */ - TargetEntry *hllTargetEntry = NULL; - Aggref *unionAggregate = NULL; Oid hllType = exprType((Node *) originalAggregate); Oid unionFunctionId = AggregateFunctionOid(HLL_UNION_AGGREGATE_NAME, hllType); @@ -1793,9 +1766,10 @@ MasterAggregateExpression(Aggref *originalAggregate, hllReturnTypeMod, hllTypeCollationId, columnLevelsUp); walkerContext->columnId++; - hllTargetEntry = makeTargetEntry((Expr *) hllColumn, argumentId, NULL, false); + TargetEntry *hllTargetEntry = makeTargetEntry((Expr *) hllColumn, argumentId, + NULL, false); - unionAggregate = makeNode(Aggref); + Aggref *unionAggregate = makeNode(Aggref); unionAggregate->aggfnoid = unionFunctionId; unionAggregate->aggtype = hllType; unionAggregate->args = list_make1(hllTargetEntry); @@ -1816,8 +1790,6 @@ MasterAggregateExpression(Aggref *originalAggregate, * Then, we gather the Top-Ns on the master and take the union of all * to get the final topn. */ - TargetEntry *topNTargetEntry = NULL; - Aggref *unionAggregate = NULL; /* worker aggregate and original aggregate have same return type */ Oid topnType = exprType((Node *) originalAggregate); @@ -1831,10 +1803,11 @@ MasterAggregateExpression(Aggref *originalAggregate, topnReturnTypeMod, topnTypeCollationId, columnLevelsUp); walkerContext->columnId++; - topNTargetEntry = makeTargetEntry((Expr *) topnColumn, argumentId, NULL, false); + TargetEntry *topNTargetEntry = makeTargetEntry((Expr *) topnColumn, argumentId, + NULL, false); /* construct the master topn_union_agg() expression */ - unionAggregate = makeNode(Aggref); + Aggref *unionAggregate = makeNode(Aggref); unionAggregate->aggfnoid = unionFunctionId; unionAggregate->aggtype = topnType; unionAggregate->args = list_make1(topNTargetEntry); @@ -1869,32 +1842,30 @@ MasterAggregateExpression(Aggref *originalAggregate, if (combine != InvalidOid) { - Const *aggOidParam = NULL; - Var *column = NULL; - Const *nullTag = NULL; - List *aggArguments = NIL; - Aggref *newMasterAggregate = NULL; Oid coordCombineId = CoordCombineAggOid(); Oid workerReturnType = CSTRINGOID; int32 workerReturnTypeMod = -1; Oid workerCollationId = InvalidOid; Oid resultType = exprType((Node *) originalAggregate); - aggOidParam = makeConst(OIDOID, -1, InvalidOid, sizeof(Oid), - ObjectIdGetDatum(originalAggregate->aggfnoid), - false, true); - column = makeVar(masterTableId, walkerContext->columnId, workerReturnType, - workerReturnTypeMod, workerCollationId, columnLevelsUp); + Const *aggOidParam = makeConst(OIDOID, -1, InvalidOid, sizeof(Oid), + ObjectIdGetDatum(originalAggregate->aggfnoid), + false, true); + Var *column = makeVar(masterTableId, walkerContext->columnId, + workerReturnType, + workerReturnTypeMod, workerCollationId, columnLevelsUp); walkerContext->columnId++; - nullTag = makeNullConst(resultType, -1, InvalidOid); + Const *nullTag = makeNullConst(resultType, -1, InvalidOid); - aggArguments = list_make3(makeTargetEntry((Expr *) aggOidParam, 1, NULL, - false), - makeTargetEntry((Expr *) column, 2, NULL, false), - makeTargetEntry((Expr *) nullTag, 3, NULL, false)); + List *aggArguments = list_make3(makeTargetEntry((Expr *) aggOidParam, 1, NULL, + false), + makeTargetEntry((Expr *) column, 2, NULL, + false), + makeTargetEntry((Expr *) nullTag, 3, NULL, + false)); /* coord_combine_agg(agg, workercol) */ - newMasterAggregate = makeNode(Aggref); + Aggref *newMasterAggregate = makeNode(Aggref); newMasterAggregate->aggfnoid = coordCombineId; newMasterAggregate->aggtype = originalAggregate->aggtype; newMasterAggregate->args = aggArguments; @@ -1918,9 +1889,6 @@ MasterAggregateExpression(Aggref *originalAggregate, * All other aggregates are handled as they are. These include sum, min, * and max. */ - Var *column = NULL; - TargetEntry *columnTargetEntry = NULL; - Aggref *newMasterAggregate = NULL; /* worker aggregate and original aggregate have the same return type */ Oid workerReturnType = exprType((Node *) originalAggregate); @@ -1940,18 +1908,19 @@ MasterAggregateExpression(Aggref *originalAggregate, { masterReturnType = workerReturnType; } - newMasterAggregate = copyObject(originalAggregate); + Aggref *newMasterAggregate = copyObject(originalAggregate); newMasterAggregate->aggdistinct = NULL; newMasterAggregate->aggfnoid = aggregateFunctionId; newMasterAggregate->aggtype = masterReturnType; newMasterAggregate->aggfilter = NULL; - column = makeVar(masterTableId, walkerContext->columnId, workerReturnType, - workerReturnTypeMod, workerCollationId, columnLevelsUp); + Var *column = makeVar(masterTableId, walkerContext->columnId, workerReturnType, + workerReturnTypeMod, workerCollationId, columnLevelsUp); walkerContext->columnId++; /* aggref expects its arguments to be wrapped in target entries */ - columnTargetEntry = makeTargetEntry((Expr *) column, argumentId, NULL, false); + TargetEntry *columnTargetEntry = makeTargetEntry((Expr *) column, argumentId, + NULL, false); newMasterAggregate->args = list_make1(columnTargetEntry); newMasterExpression = (Expr *) newMasterAggregate; @@ -1964,8 +1933,8 @@ MasterAggregateExpression(Aggref *originalAggregate, * type as the original aggregate. We need this since functions like sorting * and grouping have already been chosen based on the original type. */ - typeConvertedExpression = AddTypeConversion((Node *) originalAggregate, - (Node *) newMasterExpression); + Expr *typeConvertedExpression = AddTypeConversion((Node *) originalAggregate, + (Node *) newMasterExpression); if (typeConvertedExpression != NULL) { newMasterExpression = typeConvertedExpression; @@ -1999,22 +1968,15 @@ MasterAverageExpression(Oid sumAggregateType, Oid countAggregateType, Oid sumTypeCollationId = get_typcollation(sumAggregateType); Oid countTypeCollationId = get_typcollation(countAggregateType); - Var *firstColumn = NULL; - Var *secondColumn = NULL; - TargetEntry *firstTargetEntry = NULL; - TargetEntry *secondTargetEntry = NULL; - Aggref *firstSum = NULL; - Aggref *secondSum = NULL; - List *operatorNameList = NIL; - Expr *opExpr = NULL; /* create the first argument for sum(column1) */ - firstColumn = makeVar(masterTableId, (*columnId), sumAggregateType, - defaultTypeMod, sumTypeCollationId, defaultLevelsUp); - firstTargetEntry = makeTargetEntry((Expr *) firstColumn, argumentId, NULL, false); + Var *firstColumn = makeVar(masterTableId, (*columnId), sumAggregateType, + defaultTypeMod, sumTypeCollationId, defaultLevelsUp); + TargetEntry *firstTargetEntry = makeTargetEntry((Expr *) firstColumn, argumentId, + NULL, false); (*columnId)++; - firstSum = makeNode(Aggref); + Aggref *firstSum = makeNode(Aggref); firstSum->aggfnoid = AggregateFunctionOid(sumAggregateName, sumAggregateType); firstSum->aggtype = get_func_rettype(firstSum->aggfnoid); firstSum->args = list_make1(firstTargetEntry); @@ -2024,12 +1986,13 @@ MasterAverageExpression(Oid sumAggregateType, Oid countAggregateType, firstSum->aggsplit = AGGSPLIT_SIMPLE; /* create the second argument for sum(column2) */ - secondColumn = makeVar(masterTableId, (*columnId), countAggregateType, - defaultTypeMod, countTypeCollationId, defaultLevelsUp); - secondTargetEntry = makeTargetEntry((Expr *) secondColumn, argumentId, NULL, false); + Var *secondColumn = makeVar(masterTableId, (*columnId), countAggregateType, + defaultTypeMod, countTypeCollationId, defaultLevelsUp); + TargetEntry *secondTargetEntry = makeTargetEntry((Expr *) secondColumn, argumentId, + NULL, false); (*columnId)++; - secondSum = makeNode(Aggref); + Aggref *secondSum = makeNode(Aggref); secondSum->aggfnoid = AggregateFunctionOid(sumAggregateName, countAggregateType); secondSum->aggtype = get_func_rettype(secondSum->aggfnoid); secondSum->args = list_make1(secondTargetEntry); @@ -2042,9 +2005,10 @@ MasterAverageExpression(Oid sumAggregateType, Oid countAggregateType, * Build the division operator between these two aggregates. This function * will convert the types of the aggregates if necessary. */ - operatorNameList = list_make1(makeString(DIVISION_OPER_NAME)); - opExpr = make_op(NULL, operatorNameList, (Node *) firstSum, (Node *) secondSum, NULL, - -1); + List *operatorNameList = list_make1(makeString(DIVISION_OPER_NAME)); + Expr *opExpr = make_op(NULL, operatorNameList, (Node *) firstSum, (Node *) secondSum, + NULL, + -1); return opExpr; } @@ -2061,7 +2025,6 @@ AddTypeConversion(Node *originalAggregate, Node *newExpression) Oid newTypeId = exprType(newExpression); Oid originalTypeId = exprType(originalAggregate); int32 originalTypeMod = exprTypmod(originalAggregate); - Node *typeConvertedExpression = NULL; /* nothing to do if the two types are the same */ if (originalTypeId == newTypeId) @@ -2070,10 +2033,10 @@ AddTypeConversion(Node *originalAggregate, Node *newExpression) } /* otherwise, add a type conversion function */ - typeConvertedExpression = coerce_to_target_type(NULL, newExpression, newTypeId, - originalTypeId, originalTypeMod, - COERCION_EXPLICIT, - COERCE_EXPLICIT_CAST, -1); + Node *typeConvertedExpression = coerce_to_target_type(NULL, newExpression, newTypeId, + originalTypeId, originalTypeMod, + COERCION_EXPLICIT, + COERCE_EXPLICIT_CAST, -1); Assert(typeConvertedExpression != NULL); return (Expr *) typeConvertedExpression; } @@ -2090,10 +2053,7 @@ static MultiExtendedOp * WorkerExtendedOpNode(MultiExtendedOp *originalOpNode, ExtendedOpNodeProperties *extendedOpNodeProperties) { - MultiExtendedOp *workerExtendedOpNode = NULL; - Index nextSortGroupRefIndex = 0; bool distinctPreventsLimitPushdown = false; - bool groupByExtended = false; bool groupedByDisjointPartitionColumn = extendedOpNodeProperties->groupedByDisjointPartitionColumn; @@ -2125,7 +2085,7 @@ WorkerExtendedOpNode(MultiExtendedOp *originalOpNode, memset(&queryOrderByLimit, 0, sizeof(queryGroupClause)); /* calculate the next sort group index based on the original target list */ - nextSortGroupRefIndex = GetNextSortGroupRef(originalTargetEntryList); + Index nextSortGroupRefIndex = GetNextSortGroupRef(originalTargetEntryList); /* targetProjectionNumber starts from 1 */ queryTargetList.targetProjectionNumber = 1; @@ -2167,7 +2127,7 @@ WorkerExtendedOpNode(MultiExtendedOp *originalOpNode, * (1) Creating a new group by clause during aggregate mutation, or * (2) Distinct clause is not pushed down */ - groupByExtended = + bool groupByExtended = list_length(queryGroupClause.groupClauseList) > originalGroupClauseLength; if (!groupByExtended && !distinctPreventsLimitPushdown) { @@ -2188,7 +2148,7 @@ WorkerExtendedOpNode(MultiExtendedOp *originalOpNode, } /* finally, fill the extended op node with the data we gathered */ - workerExtendedOpNode = CitusMakeNode(MultiExtendedOp); + MultiExtendedOp *workerExtendedOpNode = CitusMakeNode(MultiExtendedOp); workerExtendedOpNode->targetList = queryTargetList.targetEntryList; workerExtendedOpNode->groupClauseList = queryGroupClause.groupClauseList; @@ -2303,9 +2263,7 @@ ProcessHavingClauseForWorkerQuery(Node *originalHavingQual, QueryTargetList *queryTargetList, QueryGroupClause *queryGroupClause) { - List *newExpressionList = NIL; TargetEntry *targetEntry = NULL; - WorkerAggregateWalkerContext *workerAggContext = NULL; if (originalHavingQual == NULL) { @@ -2314,13 +2272,14 @@ ProcessHavingClauseForWorkerQuery(Node *originalHavingQual, *workerHavingQual = NULL; - workerAggContext = palloc0(sizeof(WorkerAggregateWalkerContext)); + WorkerAggregateWalkerContext *workerAggContext = palloc0( + sizeof(WorkerAggregateWalkerContext)); workerAggContext->expressionList = NIL; workerAggContext->pullDistinctColumns = extendedOpNodeProperties->pullDistinctColumns; workerAggContext->createGroupByClause = false; WorkerAggregateWalker(originalHavingQual, workerAggContext); - newExpressionList = workerAggContext->expressionList; + List *newExpressionList = workerAggContext->expressionList; ExpandWorkerTargetEntry(newExpressionList, targetEntry, workerAggContext->createGroupByClause, @@ -2385,7 +2344,6 @@ ProcessDistinctClauseForWorkerQuery(List *distinctClause, bool hasDistinctOn, bool *distinctPreventsLimitPushdown) { bool distinctClauseSupersetofGroupClause = false; - bool shouldPushdownDistinct = false; if (distinctClause == NIL) { @@ -2419,8 +2377,8 @@ ProcessDistinctClauseForWorkerQuery(List *distinctClause, bool hasDistinctOn, * distinct pushdown if distinct clause is missing some entries that * group by clause has. */ - shouldPushdownDistinct = !queryHasAggregates && - distinctClauseSupersetofGroupClause; + bool shouldPushdownDistinct = !queryHasAggregates && + distinctClauseSupersetofGroupClause; if (shouldPushdownDistinct) { queryDistinctClause->workerDistinctClause = distinctClause; @@ -2524,8 +2482,6 @@ ProcessLimitOrderByForWorkerQuery(OrderByLimitReference orderByLimitReference, QueryOrderByLimit *queryOrderByLimit, QueryTargetList *queryTargetList) { - List *newTargetEntryListForSortClauses = NIL; - queryOrderByLimit->workerLimitCount = WorkerLimitCount(originalLimitCount, limitOffset, orderByLimitReference); @@ -2539,7 +2495,7 @@ ProcessLimitOrderByForWorkerQuery(OrderByLimitReference orderByLimitReference, * TODO: Do we really need to add the target entries if we're not pushing * down ORDER BY? */ - newTargetEntryListForSortClauses = + List *newTargetEntryListForSortClauses = GenerateNewTargetEntriesForSortClauses(originalTargetList, queryOrderByLimit->workerSortClauseList, &(queryTargetList->targetProjectionNumber), @@ -2634,10 +2590,9 @@ ExpandWorkerTargetEntry(List *expressionList, TargetEntry *originalTargetEntry, foreach(newExpressionCell, expressionList) { Expr *newExpression = (Expr *) lfirst(newExpressionCell); - TargetEntry *newTargetEntry = NULL; /* generate and add the new target entry to the target list */ - newTargetEntry = + TargetEntry *newTargetEntry = GenerateWorkerTargetEntry(originalTargetEntry, newExpression, queryTargetList->targetProjectionNumber); (queryTargetList->targetProjectionNumber)++; @@ -2749,14 +2704,12 @@ AppendTargetEntryToGroupClause(TargetEntry *targetEntry, QueryGroupClause *queryGroupClause) { Expr *targetExpr PG_USED_FOR_ASSERTS_ONLY = targetEntry->expr; - Var *targetColumn = NULL; - SortGroupClause *groupByClause = NULL; /* we currently only support appending Var target entries */ AssertArg(IsA(targetExpr, Var)); - targetColumn = (Var *) targetEntry->expr; - groupByClause = CreateSortGroupClause(targetColumn); + Var *targetColumn = (Var *) targetEntry->expr; + SortGroupClause *groupByClause = CreateSortGroupClause(targetColumn); /* the target entry should have an index */ targetEntry->ressortgroupref = *queryGroupClause->nextSortGroupRefIndex; @@ -2854,10 +2807,6 @@ WorkerAggregateExpressionList(Aggref *originalAggregate, const int hashArgumentCount = 2; const int addArgumentCount = 2; - TargetEntry *hashedColumnArgument = NULL; - TargetEntry *storageSizeArgument = NULL; - List *addAggregateArgumentList = NIL; - Aggref *addAggregateFunction = NULL; /* init hll_hash() related variables */ Oid argumentType = AggregateArgumentType(originalAggregate); @@ -2888,13 +2837,14 @@ WorkerAggregateExpressionList(Aggref *originalAggregate, hashFunction->args = list_make1(argumentExpression); /* construct hll_add_agg() expression */ - hashedColumnArgument = makeTargetEntry((Expr *) hashFunction, - firstArgumentId, NULL, false); - storageSizeArgument = makeTargetEntry((Expr *) logOfStorageSizeConst, - secondArgumentId, NULL, false); - addAggregateArgumentList = list_make2(hashedColumnArgument, storageSizeArgument); + TargetEntry *hashedColumnArgument = makeTargetEntry((Expr *) hashFunction, + firstArgumentId, NULL, false); + TargetEntry *storageSizeArgument = makeTargetEntry((Expr *) logOfStorageSizeConst, + secondArgumentId, NULL, false); + List *addAggregateArgumentList = list_make2(hashedColumnArgument, + storageSizeArgument); - addAggregateFunction = makeNode(Aggref); + Aggref *addAggregateFunction = makeNode(Aggref); addAggregateFunction->aggfnoid = addFunctionId; addAggregateFunction->aggtype = hllType; addAggregateFunction->args = addAggregateArgumentList; @@ -2964,17 +2914,15 @@ WorkerAggregateExpressionList(Aggref *originalAggregate, if (combine != InvalidOid) { - Const *aggOidParam = NULL; - Aggref *newWorkerAggregate = NULL; - List *aggArguments = NIL; ListCell *originalAggArgCell; Oid workerPartialId = WorkerPartialAggOid(); - aggOidParam = makeConst(REGPROCEDUREOID, -1, InvalidOid, sizeof(Oid), - ObjectIdGetDatum(originalAggregate->aggfnoid), false, - true); - aggArguments = list_make1(makeTargetEntry((Expr *) aggOidParam, 1, NULL, - false)); + Const *aggOidParam = makeConst(REGPROCEDUREOID, -1, InvalidOid, sizeof(Oid), + ObjectIdGetDatum(originalAggregate->aggfnoid), + false, + true); + List *aggArguments = list_make1(makeTargetEntry((Expr *) aggOidParam, 1, NULL, + false)); foreach(originalAggArgCell, originalAggregate->args) { TargetEntry *arg = lfirst(originalAggArgCell); @@ -2984,7 +2932,7 @@ WorkerAggregateExpressionList(Aggref *originalAggregate, } /* worker_partial_agg(agg, ...args) */ - newWorkerAggregate = makeNode(Aggref); + Aggref *newWorkerAggregate = makeNode(Aggref); newWorkerAggregate->aggfnoid = workerPartialId; newWorkerAggregate->aggtype = CSTRINGOID; newWorkerAggregate->args = aggArguments; @@ -3030,44 +2978,33 @@ WorkerAggregateExpressionList(Aggref *originalAggregate, static AggregateType GetAggregateType(Oid aggFunctionId) { - char *aggregateProcName = NULL; - uint32 aggregateCount = 0; - uint32 aggregateIndex = 0; - bool found = false; - /* look up the function name */ - aggregateProcName = get_func_name(aggFunctionId); + char *aggregateProcName = get_func_name(aggFunctionId); if (aggregateProcName == NULL) { ereport(ERROR, (errmsg("citus cache lookup failed for function %u", aggFunctionId))); } - aggregateCount = lengthof(AggregateNames); + uint32 aggregateCount = lengthof(AggregateNames); Assert(AGGREGATE_INVALID_FIRST == 0); - for (aggregateIndex = 1; aggregateIndex < aggregateCount; aggregateIndex++) + for (uint32 aggregateIndex = 1; aggregateIndex < aggregateCount; aggregateIndex++) { const char *aggregateName = AggregateNames[aggregateIndex]; if (strncmp(aggregateName, aggregateProcName, NAMEDATALEN) == 0) { - found = true; - break; + return aggregateIndex; } } - if (!found) + if (AggregateEnabledCustom(aggFunctionId)) { - if (AggregateEnabledCustom(aggFunctionId)) - { - return AGGREGATE_CUSTOM; - } - - ereport(ERROR, (errmsg("unsupported aggregate function %s", aggregateProcName))); + return AGGREGATE_CUSTOM; } - return aggregateIndex; + ereport(ERROR, (errmsg("unsupported aggregate function %s", aggregateProcName))); } @@ -3093,18 +3030,12 @@ AggregateArgumentType(Aggref *aggregate) static bool AggregateEnabledCustom(Oid aggregateOid) { - HeapTuple aggTuple; - Form_pg_aggregate aggform; - HeapTuple typeTuple; - Form_pg_type typeform; - bool supportsSafeCombine; - - aggTuple = SearchSysCache1(AGGFNOID, aggregateOid); + HeapTuple aggTuple = SearchSysCache1(AGGFNOID, aggregateOid); if (!HeapTupleIsValid(aggTuple)) { elog(ERROR, "citus cache lookup failed."); } - aggform = (Form_pg_aggregate) GETSTRUCT(aggTuple); + Form_pg_aggregate aggform = (Form_pg_aggregate) GETSTRUCT(aggTuple); if (aggform->aggcombinefn == InvalidOid) { @@ -3112,14 +3043,14 @@ AggregateEnabledCustom(Oid aggregateOid) return false; } - typeTuple = SearchSysCache1(TYPEOID, aggform->aggtranstype); + HeapTuple typeTuple = SearchSysCache1(TYPEOID, aggform->aggtranstype); if (!HeapTupleIsValid(typeTuple)) { elog(ERROR, "citus cache lookup failed."); } - typeform = (Form_pg_type) GETSTRUCT(typeTuple); + Form_pg_type typeform = (Form_pg_type) GETSTRUCT(typeTuple); - supportsSafeCombine = typeform->typtype != TYPTYPE_PSEUDO; + bool supportsSafeCombine = typeform->typtype != TYPTYPE_PSEUDO; ReleaseSysCache(aggTuple); ReleaseSysCache(typeTuple); @@ -3137,23 +3068,20 @@ static Oid AggregateFunctionOid(const char *functionName, Oid inputType) { Oid functionOid = InvalidOid; - Relation procRelation = NULL; - SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 1; - HeapTuple heapTuple = NULL; - procRelation = heap_open(ProcedureRelationId, AccessShareLock); + Relation procRelation = heap_open(ProcedureRelationId, AccessShareLock); ScanKeyInit(&scanKey[0], Anum_pg_proc_proname, BTEqualStrategyNumber, F_NAMEEQ, CStringGetDatum(functionName)); - scanDescriptor = systable_beginscan(procRelation, - ProcedureNameArgsNspIndexId, true, - NULL, scanKeyCount, scanKey); + SysScanDesc scanDescriptor = systable_beginscan(procRelation, + ProcedureNameArgsNspIndexId, true, + NULL, scanKeyCount, scanKey); /* loop until we find the right function */ - heapTuple = systable_getnext(scanDescriptor); + HeapTuple heapTuple = systable_getnext(scanDescriptor); while (HeapTupleIsValid(heapTuple)) { Form_pg_proc procForm = (Form_pg_proc) GETSTRUCT(heapTuple); @@ -3253,11 +3181,9 @@ CoordCombineAggOid() static Oid TypeOid(Oid schemaId, const char *typeName) { - Oid typeOid; - - typeOid = GetSysCacheOid2Compat(TYPENAMENSP, Anum_pg_type_oid, - PointerGetDatum(typeName), - ObjectIdGetDatum(schemaId)); + Oid typeOid = GetSysCacheOid2Compat(TYPENAMENSP, Anum_pg_type_oid, + PointerGetDatum(typeName), + ObjectIdGetDatum(schemaId)); return typeOid; } @@ -3410,8 +3336,6 @@ ErrorIfContainsUnsupportedAggregate(MultiNode *logicalPlanNode) foreach(expressionCell, expressionList) { Node *expression = (Node *) lfirst(expressionCell); - Aggref *aggregateExpression = NULL; - AggregateType aggregateType = AGGREGATE_INVALID_FIRST; /* only consider aggregate expressions */ if (!IsA(expression, Aggref)) @@ -3420,8 +3344,8 @@ ErrorIfContainsUnsupportedAggregate(MultiNode *logicalPlanNode) } /* GetAggregateType errors out on unsupported aggregate types */ - aggregateExpression = (Aggref *) expression; - aggregateType = GetAggregateType(aggregateExpression->aggfnoid); + Aggref *aggregateExpression = (Aggref *) expression; + AggregateType aggregateType = GetAggregateType(aggregateExpression->aggfnoid); Assert(aggregateType != AGGREGATE_INVALID_FIRST); /* @@ -3514,11 +3438,6 @@ ErrorIfUnsupportedAggregateDistinct(Aggref *aggregateExpression, { char *errorDetail = NULL; bool distinctSupported = true; - List *repartitionNodeList = NIL; - Var *distinctColumn = NULL; - List *tableNodeList = NIL; - List *extendedOpNodeList = NIL; - MultiExtendedOp *extendedOpNode = NULL; AggregateType aggregateType = GetAggregateType(aggregateExpression->aggfnoid); @@ -3588,18 +3507,18 @@ ErrorIfUnsupportedAggregateDistinct(Aggref *aggregateExpression, } } - repartitionNodeList = FindNodesOfType(logicalPlanNode, T_MultiPartition); + List *repartitionNodeList = FindNodesOfType(logicalPlanNode, T_MultiPartition); if (repartitionNodeList != NIL) { distinctSupported = false; errorDetail = "aggregate (distinct) with table repartitioning is unsupported"; } - tableNodeList = FindNodesOfType(logicalPlanNode, T_MultiTable); - extendedOpNodeList = FindNodesOfType(logicalPlanNode, T_MultiExtendedOp); - extendedOpNode = (MultiExtendedOp *) linitial(extendedOpNodeList); + List *tableNodeList = FindNodesOfType(logicalPlanNode, T_MultiTable); + List *extendedOpNodeList = FindNodesOfType(logicalPlanNode, T_MultiExtendedOp); + MultiExtendedOp *extendedOpNode = (MultiExtendedOp *) linitial(extendedOpNodeList); - distinctColumn = AggregateDistinctColumn(aggregateExpression); + Var *distinctColumn = AggregateDistinctColumn(aggregateExpression); if (distinctSupported) { if (distinctColumn == NULL) @@ -3664,29 +3583,26 @@ ErrorIfUnsupportedAggregateDistinct(Aggref *aggregateExpression, static Var * AggregateDistinctColumn(Aggref *aggregateExpression) { - Var *aggregateColumn = NULL; - int aggregateArgumentCount = 0; - TargetEntry *aggregateTargetEntry = NULL; - /* only consider aggregates with distincts */ if (!aggregateExpression->aggdistinct) { return NULL; } - aggregateArgumentCount = list_length(aggregateExpression->args); + int aggregateArgumentCount = list_length(aggregateExpression->args); if (aggregateArgumentCount != 1) { return NULL; } - aggregateTargetEntry = (TargetEntry *) linitial(aggregateExpression->args); + TargetEntry *aggregateTargetEntry = (TargetEntry *) linitial( + aggregateExpression->args); if (!IsA(aggregateTargetEntry->expr, Var)) { return NULL; } - aggregateColumn = (Var *) aggregateTargetEntry->expr; + Var *aggregateColumn = (Var *) aggregateTargetEntry->expr; return aggregateColumn; } @@ -3710,8 +3626,6 @@ TablePartitioningSupportsDistinct(List *tableNodeList, MultiExtendedOp *opNode, MultiTable *tableNode = (MultiTable *) lfirst(tableNodeCell); Oid relationId = tableNode->relationId; bool tableDistinctSupported = false; - char partitionMethod = 0; - List *shardList = NIL; if (relationId == SUBQUERY_RELATION_ID || relationId == SUBQUERY_PUSHDOWN_RELATION_ID) @@ -3720,7 +3634,7 @@ TablePartitioningSupportsDistinct(List *tableNodeList, MultiExtendedOp *opNode, } /* if table has one shard, task results don't overlap */ - shardList = LoadShardList(relationId); + List *shardList = LoadShardList(relationId); if (list_length(shardList) == 1) { continue; @@ -3730,13 +3644,12 @@ TablePartitioningSupportsDistinct(List *tableNodeList, MultiExtendedOp *opNode, * We need to check that task results don't overlap. We can only do this * if table is range partitioned. */ - partitionMethod = PartitionMethod(relationId); + char partitionMethod = PartitionMethod(relationId); if (partitionMethod == DISTRIBUTE_BY_RANGE || partitionMethod == DISTRIBUTE_BY_HASH) { Var *tablePartitionColumn = tableNode->partitionColumn; - bool groupedByPartitionColumn = false; if (aggregateType == AGGREGATE_COUNT) { @@ -3752,9 +3665,9 @@ TablePartitioningSupportsDistinct(List *tableNodeList, MultiExtendedOp *opNode, } /* if results are grouped by partition column, we can push down */ - groupedByPartitionColumn = GroupedByColumn(opNode->groupClauseList, - opNode->targetList, - tablePartitionColumn); + bool groupedByPartitionColumn = GroupedByColumn(opNode->groupClauseList, + opNode->targetList, + tablePartitionColumn); if (groupedByPartitionColumn) { tableDistinctSupported = true; @@ -3901,8 +3814,6 @@ FindReferencedTableColumn(Expr *columnExpression, List *parentQueryList, Query * { Var *candidateColumn = NULL; List *rangetableList = query->rtable; - Index rangeTableEntryIndex = 0; - RangeTblEntry *rangeTableEntry = NULL; Expr *strippedColumnExpression = (Expr *) strip_implicit_coercions( (Node *) columnExpression); @@ -3940,8 +3851,8 @@ FindReferencedTableColumn(Expr *columnExpression, List *parentQueryList, Query * return; } - rangeTableEntryIndex = candidateColumn->varno - 1; - rangeTableEntry = list_nth(rangetableList, rangeTableEntryIndex); + Index rangeTableEntryIndex = candidateColumn->varno - 1; + RangeTblEntry *rangeTableEntry = list_nth(rangetableList, rangeTableEntryIndex); if (rangeTableEntry->rtekind == RTE_RELATION) { @@ -4402,7 +4313,6 @@ HasOrderByComplexExpression(List *sortClauseList, List *targetList) { SortGroupClause *sortClause = (SortGroupClause *) lfirst(sortClauseCell); Node *sortExpression = get_sortgroupclause_expr(sortClause, targetList); - bool nestedAggregate = false; /* simple aggregate functions are ok */ if (IsA(sortExpression, Aggref)) @@ -4410,7 +4320,7 @@ HasOrderByComplexExpression(List *sortClauseList, List *targetList) continue; } - nestedAggregate = contain_agg_clause(sortExpression); + bool nestedAggregate = contain_agg_clause(sortExpression); if (nestedAggregate) { hasOrderByComplexExpression = true; @@ -4430,20 +4340,17 @@ static bool HasOrderByHllType(List *sortClauseList, List *targetList) { bool hasOrderByHllType = false; - Oid hllId = InvalidOid; - Oid hllSchemaOid = InvalidOid; - Oid hllTypeId = InvalidOid; ListCell *sortClauseCell = NULL; /* check whether HLL is loaded */ - hllId = get_extension_oid(HLL_EXTENSION_NAME, true); + Oid hllId = get_extension_oid(HLL_EXTENSION_NAME, true); if (!OidIsValid(hllId)) { return hasOrderByHllType; } - hllSchemaOid = get_extension_schema(hllId); - hllTypeId = TypeOid(hllSchemaOid, HLL_TYPE_NAME); + Oid hllSchemaOid = get_extension_schema(hllId); + Oid hllTypeId = TypeOid(hllSchemaOid, HLL_TYPE_NAME); foreach(sortClauseCell, sortClauseList) { diff --git a/src/backend/distributed/planner/multi_logical_planner.c b/src/backend/distributed/planner/multi_logical_planner.c index e2a305b10..e735a28ac 100644 --- a/src/backend/distributed/planner/multi_logical_planner.c +++ b/src/backend/distributed/planner/multi_logical_planner.c @@ -134,7 +134,6 @@ MultiLogicalPlanCreate(Query *originalQuery, Query *queryTree, PlannerRestrictionContext *plannerRestrictionContext) { MultiNode *multiQueryNode = NULL; - MultiTreeRoot *rootNode = NULL; if (ShouldUseSubqueryPushDown(originalQuery, queryTree, plannerRestrictionContext)) @@ -148,7 +147,7 @@ MultiLogicalPlanCreate(Query *originalQuery, Query *queryTree, } /* add a root node to serve as the permanent handle to the tree */ - rootNode = CitusMakeNode(MultiTreeRoot); + MultiTreeRoot *rootNode = CitusMakeNode(MultiTreeRoot); SetChild((MultiUnaryNode *) rootNode, multiQueryNode); return rootNode; @@ -206,9 +205,7 @@ bool SingleRelationRepartitionSubquery(Query *queryTree) { List *rangeTableIndexList = NULL; - RangeTblEntry *rangeTableEntry = NULL; List *rangeTableList = queryTree->rtable; - int rangeTableIndex = 0; /* we don't support subqueries in WHERE */ if (queryTree->hasSubLinks) @@ -234,8 +231,8 @@ SingleRelationRepartitionSubquery(Query *queryTree) return false; } - rangeTableIndex = linitial_int(rangeTableIndexList); - rangeTableEntry = rt_fetch(rangeTableIndex, rangeTableList); + int rangeTableIndex = linitial_int(rangeTableIndexList); + RangeTblEntry *rangeTableEntry = rt_fetch(rangeTableIndex, rangeTableList); if (rangeTableEntry->rtekind == RTE_RELATION) { return true; @@ -413,9 +410,6 @@ QueryContainsDistributedTableRTE(Query *query) bool IsDistributedTableRTE(Node *node) { - RangeTblEntry *rangeTableEntry = NULL; - Oid relationId = InvalidOid; - if (node == NULL) { return false; @@ -426,13 +420,13 @@ IsDistributedTableRTE(Node *node) return false; } - rangeTableEntry = (RangeTblEntry *) node; + RangeTblEntry *rangeTableEntry = (RangeTblEntry *) node; if (rangeTableEntry->rtekind != RTE_RELATION) { return false; } - relationId = rangeTableEntry->relid; + Oid relationId = rangeTableEntry->relid; if (!IsDistributedTable(relationId) || PartitionMethod(relationId) == DISTRIBUTE_BY_NONE) { @@ -453,7 +447,6 @@ FullCompositeFieldList(List *compositeFieldList) bool fullCompositeFieldList = true; bool *compositeFieldArray = NULL; uint32 compositeFieldCount = 0; - uint32 fieldIndex = 0; ListCell *fieldSelectCell = NULL; foreach(fieldSelectCell, compositeFieldList) @@ -490,7 +483,7 @@ FullCompositeFieldList(List *compositeFieldList) compositeFieldArray[compositeFieldIndex] = true; } - for (fieldIndex = 0; fieldIndex < compositeFieldCount; fieldIndex++) + for (uint32 fieldIndex = 0; fieldIndex < compositeFieldCount; fieldIndex++) { if (!compositeFieldArray[fieldIndex]) { @@ -523,8 +516,6 @@ CompositeFieldRecursive(Expr *expression, Query *query) { FieldSelect *compositeField = NULL; List *rangetableList = query->rtable; - Index rangeTableEntryIndex = 0; - RangeTblEntry *rangeTableEntry = NULL; Var *candidateColumn = NULL; if (IsA(expression, FieldSelect)) @@ -542,8 +533,8 @@ CompositeFieldRecursive(Expr *expression, Query *query) return NULL; } - rangeTableEntryIndex = candidateColumn->varno - 1; - rangeTableEntry = list_nth(rangetableList, rangeTableEntryIndex); + Index rangeTableEntryIndex = candidateColumn->varno - 1; + RangeTblEntry *rangeTableEntry = list_nth(rangetableList, rangeTableEntryIndex); if (rangeTableEntry->rtekind == RTE_SUBQUERY) { @@ -633,29 +624,24 @@ MultiNodeTree(Query *queryTree) { List *rangeTableList = queryTree->rtable; List *targetEntryList = queryTree->targetList; - List *whereClauseList = NIL; List *joinClauseList = NIL; List *joinOrderList = NIL; List *tableEntryList = NIL; List *tableNodeList = NIL; List *collectTableList = NIL; - List *subqueryEntryList = NIL; MultiNode *joinTreeNode = NULL; - MultiSelect *selectNode = NULL; - MultiProject *projectNode = NULL; - MultiExtendedOp *extendedOpNode = NULL; MultiNode *currentTopNode = NULL; - DeferredErrorMessage *unsupportedQueryError = NULL; /* verify we can perform distributed planning on this query */ - unsupportedQueryError = DeferErrorIfQueryNotSupported(queryTree); + DeferredErrorMessage *unsupportedQueryError = DeferErrorIfQueryNotSupported( + queryTree); if (unsupportedQueryError != NULL) { RaiseDeferredError(unsupportedQueryError, ERROR); } /* extract where clause qualifiers and verify we can plan for them */ - whereClauseList = WhereClauseList(queryTree->jointree); + List *whereClauseList = WhereClauseList(queryTree->jointree); unsupportedQueryError = DeferErrorIfUnsupportedClause(whereClauseList); if (unsupportedQueryError) { @@ -666,29 +652,23 @@ MultiNodeTree(Query *queryTree) * If we have a subquery, build a multi table node for the subquery and * add a collect node on top of the multi table node. */ - subqueryEntryList = SubqueryEntryList(queryTree); + List *subqueryEntryList = SubqueryEntryList(queryTree); if (subqueryEntryList != NIL) { - RangeTblEntry *subqueryRangeTableEntry = NULL; MultiCollect *subqueryCollectNode = CitusMakeNode(MultiCollect); - MultiTable *subqueryNode = NULL; - MultiNode *subqueryExtendedNode = NULL; - Query *subqueryTree = NULL; - List *whereClauseColumnList = NIL; - List *targetListColumnList = NIL; - List *columnList = NIL; ListCell *columnCell = NULL; /* we only support single subquery in the entry list */ Assert(list_length(subqueryEntryList) == 1); - subqueryRangeTableEntry = (RangeTblEntry *) linitial(subqueryEntryList); - subqueryTree = subqueryRangeTableEntry->subquery; + RangeTblEntry *subqueryRangeTableEntry = (RangeTblEntry *) linitial( + subqueryEntryList); + Query *subqueryTree = subqueryRangeTableEntry->subquery; /* ensure if subquery satisfies preconditions */ Assert(DeferErrorIfUnsupportedSubqueryRepartition(subqueryTree) == NULL); - subqueryNode = CitusMakeNode(MultiTable); + MultiTable *subqueryNode = CitusMakeNode(MultiTable); subqueryNode->relationId = SUBQUERY_RELATION_ID; subqueryNode->rangeTableId = SUBQUERY_RANGE_TABLE_ID; subqueryNode->partitionColumn = NULL; @@ -704,10 +684,10 @@ MultiNodeTree(Query *queryTree) */ Assert(list_length(subqueryEntryList) == 1); - whereClauseColumnList = pull_var_clause_default((Node *) whereClauseList); - targetListColumnList = pull_var_clause_default((Node *) targetEntryList); + List *whereClauseColumnList = pull_var_clause_default((Node *) whereClauseList); + List *targetListColumnList = pull_var_clause_default((Node *) targetEntryList); - columnList = list_concat(whereClauseColumnList, targetListColumnList); + List *columnList = list_concat(whereClauseColumnList, targetListColumnList); foreach(columnCell, columnList) { Var *column = (Var *) lfirst(columnCell); @@ -715,7 +695,7 @@ MultiNodeTree(Query *queryTree) } /* recursively create child nested multitree */ - subqueryExtendedNode = MultiNodeTree(subqueryTree); + MultiNode *subqueryExtendedNode = MultiNodeTree(subqueryTree); SetChild((MultiUnaryNode *) subqueryCollectNode, (MultiNode *) subqueryNode); SetChild((MultiUnaryNode *) subqueryNode, subqueryExtendedNode); @@ -751,7 +731,7 @@ MultiNodeTree(Query *queryTree) Assert(currentTopNode != NULL); /* build select node if the query has selection criteria */ - selectNode = MultiSelectNode(whereClauseList); + MultiSelect *selectNode = MultiSelectNode(whereClauseList); if (selectNode != NULL) { SetChild((MultiUnaryNode *) selectNode, currentTopNode); @@ -759,7 +739,7 @@ MultiNodeTree(Query *queryTree) } /* build project node for the columns to project */ - projectNode = MultiProjectNode(targetEntryList); + MultiProject *projectNode = MultiProjectNode(targetEntryList); SetChild((MultiUnaryNode *) projectNode, currentTopNode); currentTopNode = (MultiNode *) projectNode; @@ -769,7 +749,7 @@ MultiNodeTree(Query *queryTree) * distinguish between aggregates and expressions; and we address this later * in the logical optimizer. */ - extendedOpNode = MultiExtendedOpNode(queryTree); + MultiExtendedOp *extendedOpNode = MultiExtendedOpNode(queryTree); SetChild((MultiUnaryNode *) extendedOpNode, currentTopNode); currentTopNode = (MultiNode *) extendedOpNode; @@ -816,16 +796,13 @@ IsReadIntermediateResultFunction(Node *node) char * FindIntermediateResultIdIfExists(RangeTblEntry *rte) { - List *functionList = NULL; - RangeTblFunction *rangeTblfunction = NULL; - FuncExpr *funcExpr = NULL; char *resultId = NULL; Assert(rte->rtekind == RTE_FUNCTION); - functionList = rte->functions; - rangeTblfunction = (RangeTblFunction *) linitial(functionList); - funcExpr = (FuncExpr *) rangeTblfunction->funcexpr; + List *functionList = rte->functions; + RangeTblFunction *rangeTblfunction = (RangeTblFunction *) linitial(functionList); + FuncExpr *funcExpr = (FuncExpr *) rangeTblfunction->funcexpr; if (IsReadIntermediateResultFunction((Node *) funcExpr)) { @@ -850,9 +827,6 @@ DeferredErrorMessage * DeferErrorIfQueryNotSupported(Query *queryTree) { char *errorMessage = NULL; - bool hasTablesample = false; - bool hasUnsupportedJoin = false; - bool hasComplexRangeTableType = false; bool preconditionsSatisfied = true; StringInfo errorInfo = NULL; const char *errorHint = NULL; @@ -922,7 +896,7 @@ DeferErrorIfQueryNotSupported(Query *queryTree) errorHint = filterHint; } - hasTablesample = HasTablesample(queryTree); + bool hasTablesample = HasTablesample(queryTree); if (hasTablesample) { preconditionsSatisfied = false; @@ -930,7 +904,8 @@ DeferErrorIfQueryNotSupported(Query *queryTree) errorHint = filterHint; } - hasUnsupportedJoin = HasUnsupportedJoinWalker((Node *) queryTree->jointree, NULL); + bool hasUnsupportedJoin = HasUnsupportedJoinWalker((Node *) queryTree->jointree, + NULL); if (hasUnsupportedJoin) { preconditionsSatisfied = false; @@ -939,7 +914,7 @@ DeferErrorIfQueryNotSupported(Query *queryTree) errorHint = joinHint; } - hasComplexRangeTableType = HasComplexRangeTableType(queryTree); + bool hasComplexRangeTableType = HasComplexRangeTableType(queryTree); if (hasComplexRangeTableType) { preconditionsSatisfied = false; @@ -1079,9 +1054,6 @@ DeferErrorIfUnsupportedSubqueryRepartition(Query *subqueryTree) char *errorDetail = NULL; bool preconditionsSatisfied = true; List *joinTreeTableIndexList = NIL; - int rangeTableIndex = 0; - RangeTblEntry *rangeTableEntry = NULL; - Query *innerSubquery = NULL; if (!subqueryTree->hasAggs) { @@ -1136,15 +1108,15 @@ DeferErrorIfUnsupportedSubqueryRepartition(Query *subqueryTree) Assert(list_length(joinTreeTableIndexList) == 1); /* continue with the inner subquery */ - rangeTableIndex = linitial_int(joinTreeTableIndexList); - rangeTableEntry = rt_fetch(rangeTableIndex, subqueryTree->rtable); + int rangeTableIndex = linitial_int(joinTreeTableIndexList); + RangeTblEntry *rangeTableEntry = rt_fetch(rangeTableIndex, subqueryTree->rtable); if (rangeTableEntry->rtekind == RTE_RELATION) { return NULL; } Assert(rangeTableEntry->rtekind == RTE_SUBQUERY); - innerSubquery = rangeTableEntry->subquery; + Query *innerSubquery = rangeTableEntry->subquery; /* recursively continue to the inner subqueries */ return DeferErrorIfUnsupportedSubqueryRepartition(innerSubquery); @@ -1225,10 +1197,9 @@ WhereClauseList(FromExpr *fromExpr) { FromExpr *fromExprCopy = copyObject(fromExpr); QualifierWalkerContext *walkerContext = palloc0(sizeof(QualifierWalkerContext)); - List *whereClauseList = NIL; ExtractFromExpressionWalker((Node *) fromExprCopy, walkerContext); - whereClauseList = walkerContext->baseQualifierList; + List *whereClauseList = walkerContext->baseQualifierList; return whereClauseList; } @@ -1335,7 +1306,6 @@ JoinClauseList(List *whereClauseList) static bool ExtractFromExpressionWalker(Node *node, QualifierWalkerContext *walkerContext) { - bool walkerResult = false; if (node == NULL) { return false; @@ -1406,8 +1376,8 @@ ExtractFromExpressionWalker(Node *node, QualifierWalkerContext *walkerContext) } } - walkerResult = expression_tree_walker(node, ExtractFromExpressionWalker, - (void *) walkerContext); + bool walkerResult = expression_tree_walker(node, ExtractFromExpressionWalker, + (void *) walkerContext); return walkerResult; } @@ -1421,10 +1391,6 @@ ExtractFromExpressionWalker(Node *node, QualifierWalkerContext *walkerContext) bool IsJoinClause(Node *clause) { - OpExpr *operatorExpression = NULL; - bool equalsOperator = false; - List *varList = NIL; - Var *initialVar = NULL; Var *var = NULL; if (!IsA(clause, OpExpr)) @@ -1432,8 +1398,8 @@ IsJoinClause(Node *clause) return false; } - operatorExpression = castNode(OpExpr, clause); - equalsOperator = OperatorImplementsEquality(operatorExpression->opno); + OpExpr *operatorExpression = castNode(OpExpr, clause); + bool equalsOperator = OperatorImplementsEquality(operatorExpression->opno); if (!equalsOperator) { @@ -1452,13 +1418,13 @@ IsJoinClause(Node *clause) * take all column references from the clause, if we find 2 column references from a * different relation we assume this is a join clause */ - varList = pull_var_clause_default(clause); + List *varList = pull_var_clause_default(clause); if (list_length(varList) <= 0) { /* no column references in query, not describing a join */ return false; } - initialVar = castNode(Var, linitial(varList)); + Var *initialVar = castNode(Var, linitial(varList)); foreach_ptr(var, varList) { @@ -1635,16 +1601,17 @@ MultiJoinTree(List *joinOrderList, List *collectTableList, List *joinWhereClause JoinRuleType joinRuleType = joinOrderNode->joinRuleType; JoinType joinType = joinOrderNode->joinType; Var *partitionColumn = joinOrderNode->partitionColumn; - MultiNode *newJoinNode = NULL; List *joinClauseList = joinOrderNode->joinClauseList; /* * Build a join node between the top of our join tree and the next * table in the join order. */ - newJoinNode = ApplyJoinRule(currentTopNode, (MultiNode *) collectNode, - joinRuleType, partitionColumn, joinType, - joinClauseList); + MultiNode *newJoinNode = ApplyJoinRule(currentTopNode, + (MultiNode *) collectNode, + joinRuleType, partitionColumn, + joinType, + joinClauseList); /* the new join node becomes the top of our join tree */ currentTopNode = newJoinNode; @@ -1727,22 +1694,19 @@ MultiSelectNode(List *whereClauseList) static bool IsSelectClause(Node *clause) { - List *columnList = NIL; ListCell *columnCell = NULL; - Var *firstColumn = NULL; - Index firstColumnTableId = 0; bool isSelectClause = true; /* extract columns from the clause */ - columnList = pull_var_clause_default(clause); + List *columnList = pull_var_clause_default(clause); if (list_length(columnList) == 0) { return true; } /* get first column's tableId */ - firstColumn = (Var *) linitial(columnList); - firstColumnTableId = firstColumn->varno; + Var *firstColumn = (Var *) linitial(columnList); + Index firstColumnTableId = firstColumn->varno; /* check if all columns are from the same table */ foreach(columnCell, columnList) @@ -1766,13 +1730,11 @@ IsSelectClause(Node *clause) MultiProject * MultiProjectNode(List *targetEntryList) { - MultiProject *projectNode = NULL; List *uniqueColumnList = NIL; - List *columnList = NIL; ListCell *columnCell = NULL; /* extract the list of columns and remove any duplicates */ - columnList = pull_var_clause_default((Node *) targetEntryList); + List *columnList = pull_var_clause_default((Node *) targetEntryList); foreach(columnCell, columnList) { Var *column = (Var *) lfirst(columnCell); @@ -1781,7 +1743,7 @@ MultiProjectNode(List *targetEntryList) } /* create project node with list of columns to project */ - projectNode = CitusMakeNode(MultiProject); + MultiProject *projectNode = CitusMakeNode(MultiProject); projectNode->columnList = uniqueColumnList; return projectNode; @@ -1932,7 +1894,6 @@ List * FindNodesOfType(MultiNode *node, int type) { List *nodeList = NIL; - int nodeType = T_Invalid; /* terminal condition for recursion */ if (node == NULL) @@ -1941,7 +1902,7 @@ FindNodesOfType(MultiNode *node, int type) } /* current node has expected node type */ - nodeType = CitusNodeTag(node); + int nodeType = CitusNodeTag(node); if (nodeType == type) { nodeList = lappend(nodeList, node); @@ -1997,27 +1958,22 @@ static MultiNode * ApplyJoinRule(MultiNode *leftNode, MultiNode *rightNode, JoinRuleType ruleType, Var *partitionColumn, JoinType joinType, List *joinClauseList) { - RuleApplyFunction ruleApplyFunction = NULL; - MultiNode *multiNode = NULL; - - List *applicableJoinClauses = NIL; List *leftTableIdList = OutputTableIdList(leftNode); List *rightTableIdList = OutputTableIdList(rightNode); int rightTableIdCount PG_USED_FOR_ASSERTS_ONLY = 0; - uint32 rightTableId = 0; rightTableIdCount = list_length(rightTableIdList); Assert(rightTableIdCount == 1); /* find applicable join clauses between the left and right data sources */ - rightTableId = (uint32) linitial_int(rightTableIdList); - applicableJoinClauses = ApplicableJoinClauses(leftTableIdList, rightTableId, - joinClauseList); + uint32 rightTableId = (uint32) linitial_int(rightTableIdList); + List *applicableJoinClauses = ApplicableJoinClauses(leftTableIdList, rightTableId, + joinClauseList); /* call the join rule application function to create the new join node */ - ruleApplyFunction = JoinRuleApplyFunction(ruleType); - multiNode = (*ruleApplyFunction)(leftNode, rightNode, partitionColumn, - joinType, applicableJoinClauses); + RuleApplyFunction ruleApplyFunction = JoinRuleApplyFunction(ruleType); + MultiNode *multiNode = (*ruleApplyFunction)(leftNode, rightNode, partitionColumn, + joinType, applicableJoinClauses); if (joinType != JOIN_INNER && CitusIsA(multiNode, MultiJoin)) { @@ -2041,7 +1997,6 @@ static RuleApplyFunction JoinRuleApplyFunction(JoinRuleType ruleType) { static bool ruleApplyFunctionInitialized = false; - RuleApplyFunction ruleApplyFunction = NULL; if (!ruleApplyFunctionInitialized) { @@ -2057,7 +2012,7 @@ JoinRuleApplyFunction(JoinRuleType ruleType) ruleApplyFunctionInitialized = true; } - ruleApplyFunction = RuleApplyFunctionArray[ruleType]; + RuleApplyFunction ruleApplyFunction = RuleApplyFunctionArray[ruleType]; Assert(ruleApplyFunction != NULL); return ruleApplyFunction; @@ -2154,11 +2109,6 @@ ApplySinglePartitionJoin(MultiNode *leftNode, MultiNode *rightNode, Var *partitionColumn, JoinType joinType, List *applicableJoinClauses) { - OpExpr *joinClause = NULL; - Var *leftColumn = NULL; - Var *rightColumn = NULL; - List *rightTableIdList = NIL; - uint32 rightTableId = 0; uint32 partitionTableId = partitionColumn->varno; /* create all operator structures up front */ @@ -2171,12 +2121,13 @@ ApplySinglePartitionJoin(MultiNode *leftNode, MultiNode *rightNode, * column against the join clause's columns. If one of the columns matches, * we introduce a (re-)partition operator for the other column. */ - joinClause = SinglePartitionJoinClause(partitionColumn, applicableJoinClauses); + OpExpr *joinClause = SinglePartitionJoinClause(partitionColumn, + applicableJoinClauses); Assert(joinClause != NULL); /* both are verified in SinglePartitionJoinClause to not be NULL, assert is to guard */ - leftColumn = LeftColumnOrNULL(joinClause); - rightColumn = RightColumnOrNULL(joinClause); + Var *leftColumn = LeftColumnOrNULL(joinClause); + Var *rightColumn = RightColumnOrNULL(joinClause); Assert(leftColumn != NULL); Assert(rightColumn != NULL); @@ -2193,8 +2144,8 @@ ApplySinglePartitionJoin(MultiNode *leftNode, MultiNode *rightNode, } /* determine the node the partition operator goes on top of */ - rightTableIdList = OutputTableIdList(rightNode); - rightTableId = (uint32) linitial_int(rightTableIdList); + List *rightTableIdList = OutputTableIdList(rightNode); + uint32 rightTableId = (uint32) linitial_int(rightTableIdList); Assert(list_length(rightTableIdList) == 1); /* @@ -2238,33 +2189,22 @@ ApplyDualPartitionJoin(MultiNode *leftNode, MultiNode *rightNode, Var *partitionColumn, JoinType joinType, List *applicableJoinClauses) { - MultiJoin *joinNode = NULL; - OpExpr *joinClause = NULL; - MultiPartition *leftPartitionNode = NULL; - MultiPartition *rightPartitionNode = NULL; - MultiCollect *leftCollectNode = NULL; - MultiCollect *rightCollectNode = NULL; - Var *leftColumn = NULL; - Var *rightColumn = NULL; - List *rightTableIdList = NIL; - uint32 rightTableId = 0; - /* find the appropriate join clause */ - joinClause = DualPartitionJoinClause(applicableJoinClauses); + OpExpr *joinClause = DualPartitionJoinClause(applicableJoinClauses); Assert(joinClause != NULL); /* both are verified in DualPartitionJoinClause to not be NULL, assert is to guard */ - leftColumn = LeftColumnOrNULL(joinClause); - rightColumn = RightColumnOrNULL(joinClause); + Var *leftColumn = LeftColumnOrNULL(joinClause); + Var *rightColumn = RightColumnOrNULL(joinClause); Assert(leftColumn != NULL); Assert(rightColumn != NULL); - rightTableIdList = OutputTableIdList(rightNode); - rightTableId = (uint32) linitial_int(rightTableIdList); + List *rightTableIdList = OutputTableIdList(rightNode); + uint32 rightTableId = (uint32) linitial_int(rightTableIdList); Assert(list_length(rightTableIdList) == 1); - leftPartitionNode = CitusMakeNode(MultiPartition); - rightPartitionNode = CitusMakeNode(MultiPartition); + MultiPartition *leftPartitionNode = CitusMakeNode(MultiPartition); + MultiPartition *rightPartitionNode = CitusMakeNode(MultiPartition); /* find the partition node each join clause column belongs to */ if (leftColumn->varno == rightTableId) @@ -2283,14 +2223,14 @@ ApplyDualPartitionJoin(MultiNode *leftNode, MultiNode *rightNode, SetChild((MultiUnaryNode *) rightPartitionNode, rightNode); /* add collect operators on top of the two partition operators */ - leftCollectNode = CitusMakeNode(MultiCollect); - rightCollectNode = CitusMakeNode(MultiCollect); + MultiCollect *leftCollectNode = CitusMakeNode(MultiCollect); + MultiCollect *rightCollectNode = CitusMakeNode(MultiCollect); SetChild((MultiUnaryNode *) leftCollectNode, (MultiNode *) leftPartitionNode); SetChild((MultiUnaryNode *) rightCollectNode, (MultiNode *) rightPartitionNode); /* add join operator on top of the two collect operators */ - joinNode = CitusMakeNode(MultiJoin); + MultiJoin *joinNode = CitusMakeNode(MultiJoin); joinNode->joinRuleType = DUAL_PARTITION_JOIN; joinNode->joinType = joinType; joinNode->joinClauseList = applicableJoinClauses; diff --git a/src/backend/distributed/planner/multi_master_planner.c b/src/backend/distributed/planner/multi_master_planner.c index 8992e3b50..852584c6d 100644 --- a/src/backend/distributed/planner/multi_master_planner.c +++ b/src/backend/distributed/planner/multi_master_planner.c @@ -71,13 +71,13 @@ PlannedStmt * MasterNodeSelectPlan(DistributedPlan *distributedPlan, CustomScan *remoteScan) { Query *masterQuery = distributedPlan->masterQuery; - PlannedStmt *masterSelectPlan = NULL; Job *workerJob = distributedPlan->workerJob; List *workerTargetList = workerJob->jobQuery->targetList; List *masterTargetList = MasterTargetList(workerTargetList); - masterSelectPlan = BuildSelectStatement(masterQuery, masterTargetList, remoteScan); + PlannedStmt *masterSelectPlan = BuildSelectStatement(masterQuery, masterTargetList, + remoteScan); return masterSelectPlan; } @@ -99,15 +99,13 @@ MasterTargetList(List *workerTargetList) foreach(workerTargetCell, workerTargetList) { TargetEntry *workerTargetEntry = (TargetEntry *) lfirst(workerTargetCell); - TargetEntry *masterTargetEntry = NULL; - Var *masterColumn = NULL; if (workerTargetEntry->resjunk) { continue; } - masterColumn = makeVarFromTargetEntry(tableId, workerTargetEntry); + Var *masterColumn = makeVarFromTargetEntry(tableId, workerTargetEntry); masterColumn->varattno = columnId; masterColumn->varoattno = columnId; columnId++; @@ -124,7 +122,7 @@ MasterTargetList(List *workerTargetList) * from the worker target entry. Note that any changes to worker target * entry's sort and group clauses will *break* us here. */ - masterTargetEntry = flatCopyTargetEntry(workerTargetEntry); + TargetEntry *masterTargetEntry = flatCopyTargetEntry(workerTargetEntry); masterTargetEntry->expr = (Expr *) masterColumn; masterTargetList = lappend(masterTargetList, masterTargetEntry); } @@ -469,16 +467,14 @@ BuildAggregatePlan(PlannerInfo *root, Query *masterQuery, Plan *subPlan) static bool HasDistinctAggregate(Query *masterQuery) { - List *targetVarList = NIL; - List *havingVarList = NIL; - List *allColumnList = NIL; ListCell *allColumnCell = NULL; - targetVarList = pull_var_clause((Node *) masterQuery->targetList, - PVC_INCLUDE_AGGREGATES); - havingVarList = pull_var_clause(masterQuery->havingQual, PVC_INCLUDE_AGGREGATES); + List *targetVarList = pull_var_clause((Node *) masterQuery->targetList, + PVC_INCLUDE_AGGREGATES); + List *havingVarList = pull_var_clause(masterQuery->havingQual, + PVC_INCLUDE_AGGREGATES); - allColumnList = list_concat(targetVarList, havingVarList); + List *allColumnList = list_concat(targetVarList, havingVarList); foreach(allColumnCell, allColumnList) { Node *columnNode = lfirst(allColumnCell); @@ -506,7 +502,6 @@ static bool UseGroupAggregateWithHLL(Query *masterQuery) { Oid hllId = get_extension_oid(HLL_EXTENSION_NAME, true); - const char *gucStrValue = NULL; /* If HLL extension is not loaded, return false */ if (!OidIsValid(hllId)) @@ -515,7 +510,7 @@ UseGroupAggregateWithHLL(Query *masterQuery) } /* If HLL is loaded but related GUC is not set, return false */ - gucStrValue = GetConfigOption(HLL_FORCE_GROUPAGG_GUC_NAME, true, false); + const char *gucStrValue = GetConfigOption(HLL_FORCE_GROUPAGG_GUC_NAME, true, false); if (gucStrValue == NULL || strcmp(gucStrValue, "off") == 0) { return false; @@ -532,10 +527,9 @@ UseGroupAggregateWithHLL(Query *masterQuery) static bool QueryContainsAggregateWithHLL(Query *query) { - List *varList = NIL; ListCell *varCell = NULL; - varList = pull_var_clause((Node *) query->targetList, PVC_INCLUDE_AGGREGATES); + List *varList = pull_var_clause((Node *) query->targetList, PVC_INCLUDE_AGGREGATES); foreach(varCell, varList) { Var *var = (Var *) lfirst(varCell); @@ -579,10 +573,8 @@ static Plan * BuildDistinctPlan(Query *masterQuery, Plan *subPlan) { Plan *distinctPlan = NULL; - bool distinctClausesHashable = true; List *distinctClauseList = masterQuery->distinctClause; List *targetList = copyObject(masterQuery->targetList); - bool hasDistinctAggregate = false; /* * We don't need to add distinct plan if all of the columns used in group by @@ -602,8 +594,8 @@ BuildDistinctPlan(Query *masterQuery, Plan *subPlan) * members are hashable, and not containing distinct aggregate. * Otherwise create sort+unique plan. */ - distinctClausesHashable = grouping_is_hashable(distinctClauseList); - hasDistinctAggregate = HasDistinctAggregate(masterQuery); + bool distinctClausesHashable = grouping_is_hashable(distinctClauseList); + bool hasDistinctAggregate = HasDistinctAggregate(masterQuery); if (enable_hashagg && distinctClausesHashable && !hasDistinctAggregate) { diff --git a/src/backend/distributed/planner/multi_physical_planner.c b/src/backend/distributed/planner/multi_physical_planner.c index 14ff78f4b..efbd35d1e 100644 --- a/src/backend/distributed/planner/multi_physical_planner.c +++ b/src/backend/distributed/planner/multi_physical_planner.c @@ -217,22 +217,17 @@ DistributedPlan * CreatePhysicalDistributedPlan(MultiTreeRoot *multiTree, PlannerRestrictionContext *plannerRestrictionContext) { - DistributedPlan *distributedPlan = NULL; - Job *workerJob = NULL; - Query *masterQuery = NULL; - List *masterDependedJobList = NIL; - /* build the worker job tree and check that we only one job in the tree */ - workerJob = BuildJobTree(multiTree); + Job *workerJob = BuildJobTree(multiTree); /* create the tree of executable tasks for the worker job */ workerJob = BuildJobTreeTaskList(workerJob, plannerRestrictionContext); /* build the final merge query to execute on the master */ - masterDependedJobList = list_make1(workerJob); - masterQuery = BuildJobQuery((MultiNode *) multiTree, masterDependedJobList); + List *masterDependedJobList = list_make1(workerJob); + Query *masterQuery = BuildJobQuery((MultiNode *) multiTree, masterDependedJobList); - distributedPlan = CitusMakeNode(DistributedPlan); + DistributedPlan *distributedPlan = CitusMakeNode(DistributedPlan); distributedPlan->workerJob = workerJob; distributedPlan->masterQuery = masterQuery; distributedPlan->routerExecutable = DistributedPlanRouterExecutable(distributedPlan); @@ -258,7 +253,6 @@ DistributedPlanRouterExecutable(DistributedPlan *distributedPlan) List *workerTaskList = job->taskList; int taskCount = list_length(workerTaskList); int dependedJobCount = list_length(job->dependedJobList); - bool masterQueryHasAggregates = false; if (!EnableRouterExecution) { @@ -292,7 +286,7 @@ DistributedPlanRouterExecutable(DistributedPlan *distributedPlan) * have either an aggregate or a function expression which has to be executed for * the correct results. */ - masterQueryHasAggregates = job->jobQuery->hasAggs; + bool masterQueryHasAggregates = job->jobQuery->hasAggs; if (masterQueryHasAggregates) { return false; @@ -521,9 +515,6 @@ static Oid RangePartitionJoinBaseRelationId(MultiJoin *joinNode) { MultiPartition *partitionNode = NULL; - MultiTable *baseTable = NULL; - Index baseTableId = 0; - Oid baseRelationId = InvalidOid; MultiNode *leftChildNode = joinNode->binaryNode.leftChildNode; MultiNode *rightChildNode = joinNode->binaryNode.rightChildNode; @@ -537,9 +528,9 @@ RangePartitionJoinBaseRelationId(MultiJoin *joinNode) partitionNode = (MultiPartition *) rightChildNode; } - baseTableId = partitionNode->splitPointTableId; - baseTable = FindTableNode((MultiNode *) joinNode, baseTableId); - baseRelationId = baseTable->relationId; + Index baseTableId = partitionNode->splitPointTableId; + MultiTable *baseTable = FindTableNode((MultiNode *) joinNode, baseTableId); + Oid baseRelationId = baseTable->relationId; return baseRelationId; } @@ -580,19 +571,11 @@ FindTableNode(MultiNode *multiNode, int rangeTableId) static Query * BuildJobQuery(MultiNode *multiNode, List *dependedJobList) { - Query *jobQuery = NULL; - MultiNode *parentNode = NULL; bool updateColumnAttributes = false; - List *rangeTableList = NIL; List *targetList = NIL; - List *extendedOpNodeList = NIL; List *sortClauseList = NIL; - List *groupClauseList = NIL; - List *selectClauseList = NIL; Node *limitCount = NULL; Node *limitOffset = NULL; - FromExpr *joinTree = NULL; - Node *joinRoot = NULL; Node *havingQual = NULL; bool hasDistinctOn = false; List *distinctClause = NIL; @@ -611,7 +594,7 @@ BuildJobQuery(MultiNode *multiNode, List *dependedJobList) * Note that we don't do this for master queries, as column attributes for * master target entries are already set during the master/worker split. */ - parentNode = ParentNode(multiNode); + MultiNode *parentNode = ParentNode(multiNode); if (parentNode != NULL) { updateColumnAttributes = true; @@ -640,7 +623,7 @@ BuildJobQuery(MultiNode *multiNode, List *dependedJobList) * Otherwise, we use the target list based on the MultiProject node at this * level in the query tree. */ - extendedOpNodeList = FindNodesOfType(multiNode, T_MultiExtendedOp); + List *extendedOpNodeList = FindNodesOfType(multiNode, T_MultiExtendedOp); if (extendedOpNodeList != NIL) { MultiExtendedOp *extendedOp = (MultiExtendedOp *) linitial(extendedOpNodeList); @@ -654,8 +637,8 @@ BuildJobQuery(MultiNode *multiNode, List *dependedJobList) } /* build the join tree and the range table list */ - rangeTableList = BaseRangeTableList(multiNode); - joinRoot = QueryJoinTree(multiNode, dependedJobList, &rangeTableList); + List *rangeTableList = BaseRangeTableList(multiNode); + Node *joinRoot = QueryJoinTree(multiNode, dependedJobList, &rangeTableList); /* update the column attributes for target entries */ if (updateColumnAttributes) @@ -675,11 +658,11 @@ BuildJobQuery(MultiNode *multiNode, List *dependedJobList) } /* build group clauses */ - groupClauseList = QueryGroupClauseList(multiNode); + List *groupClauseList = QueryGroupClauseList(multiNode); /* build the where clause list using select predicates */ - selectClauseList = QuerySelectClauseList(multiNode); + List *selectClauseList = QuerySelectClauseList(multiNode); /* set correct column attributes for select and having clauses */ if (updateColumnAttributes) @@ -711,12 +694,12 @@ BuildJobQuery(MultiNode *multiNode, List *dependedJobList) * AND'd, since both partition and join pruning depends on the clauses being * expressed as a list. */ - joinTree = makeNode(FromExpr); + FromExpr *joinTree = makeNode(FromExpr); joinTree->quals = (Node *) list_copy(selectClauseList); joinTree->fromlist = list_make1(joinRoot); /* build the query structure for this job */ - jobQuery = makeNode(Query); + Query *jobQuery = makeNode(Query); jobQuery->commandType = CMD_SELECT; jobQuery->querySource = QSRC_ORIGINAL; jobQuery->canSetTag = true; @@ -745,44 +728,35 @@ BuildJobQuery(MultiNode *multiNode, List *dependedJobList) static Query * BuildReduceQuery(MultiExtendedOp *extendedOpNode, List *dependedJobList) { - Query *reduceQuery = NULL; MultiNode *multiNode = (MultiNode *) extendedOpNode; List *derivedRangeTableList = NIL; List *targetList = NIL; - List *whereClauseList = NIL; - List *selectClauseList = NIL; - List *joinClauseList = NIL; - List *columnList = NIL; ListCell *columnCell = NULL; - FromExpr *joinTree = NULL; List *columnNameList = NIL; - RangeTblEntry *rangeTableEntry = NULL; Job *dependedJob = linitial(dependedJobList); List *dependedTargetList = dependedJob->jobQuery->targetList; uint32 columnCount = (uint32) list_length(dependedTargetList); - uint32 columnIndex = 0; - for (columnIndex = 0; columnIndex < columnCount; columnIndex++) + for (uint32 columnIndex = 0; columnIndex < columnCount; columnIndex++) { - Value *columnValue = NULL; StringInfo columnNameString = makeStringInfo(); appendStringInfo(columnNameString, MERGE_COLUMN_FORMAT, columnIndex); - columnValue = makeString(columnNameString->data); + Value *columnValue = makeString(columnNameString->data); columnNameList = lappend(columnNameList, columnValue); } /* create a derived range table for the subtree below the collect */ - rangeTableEntry = DerivedRangeTableEntry(multiNode, columnNameList, - OutputTableIdList(multiNode)); + RangeTblEntry *rangeTableEntry = DerivedRangeTableEntry(multiNode, columnNameList, + OutputTableIdList(multiNode)); rangeTableEntry->eref->colnames = columnNameList; ModifyRangeTblExtraData(rangeTableEntry, CITUS_RTE_SHARD, NULL, NULL, NULL); derivedRangeTableList = lappend(derivedRangeTableList, rangeTableEntry); targetList = copyObject(extendedOpNode->targetList); - columnList = pull_var_clause_default((Node *) targetList); + List *columnList = pull_var_clause_default((Node *) targetList); foreach(columnCell, columnList) { @@ -795,21 +769,21 @@ BuildReduceQuery(MultiExtendedOp *extendedOpNode, List *dependedJobList) } /* build the where clause list using select and join predicates */ - selectClauseList = QuerySelectClauseList((MultiNode *) extendedOpNode); - joinClauseList = QueryJoinClauseList((MultiNode *) extendedOpNode); - whereClauseList = list_concat(selectClauseList, joinClauseList); + List *selectClauseList = QuerySelectClauseList((MultiNode *) extendedOpNode); + List *joinClauseList = QueryJoinClauseList((MultiNode *) extendedOpNode); + List *whereClauseList = list_concat(selectClauseList, joinClauseList); /* * Build the From/Where construct. We keep the where-clause list implicitly * AND'd, since both partition and join pruning depends on the clauses being * expressed as a list. */ - joinTree = makeNode(FromExpr); + FromExpr *joinTree = makeNode(FromExpr); joinTree->quals = (Node *) whereClauseList; joinTree->fromlist = QueryFromList(derivedRangeTableList); /* build the query structure for this job */ - reduceQuery = makeNode(Query); + Query *reduceQuery = makeNode(Query); reduceQuery->commandType = CMD_SELECT; reduceQuery->querySource = QSRC_ORIGINAL; reduceQuery->canSetTag = true; @@ -908,18 +882,16 @@ static List * DerivedColumnNameList(uint32 columnCount, uint64 generatingJobId) { List *columnNameList = NIL; - uint32 columnIndex = 0; - for (columnIndex = 0; columnIndex < columnCount; columnIndex++) + for (uint32 columnIndex = 0; columnIndex < columnCount; columnIndex++) { StringInfo columnName = makeStringInfo(); - Value *columnValue = NULL; appendStringInfo(columnName, "intermediate_column_"); appendStringInfo(columnName, UINT64_FORMAT "_", generatingJobId); appendStringInfo(columnName, "%u", columnIndex); - columnValue = makeString(columnName->data); + Value *columnValue = makeString(columnName->data); columnNameList = lappend(columnNameList, columnValue); } @@ -938,16 +910,12 @@ DerivedColumnNameList(uint32 columnCount, uint64 generatingJobId) static List * QueryTargetList(MultiNode *multiNode) { - MultiProject *topProjectNode = NULL; - List *columnList = NIL; - List *queryTargetList = NIL; - List *projectNodeList = FindNodesOfType(multiNode, T_MultiProject); Assert(list_length(projectNodeList) > 0); - topProjectNode = (MultiProject *) linitial(projectNodeList); - columnList = topProjectNode->columnList; - queryTargetList = TargetEntryList(columnList); + MultiProject *topProjectNode = (MultiProject *) linitial(projectNodeList); + List *columnList = topProjectNode->columnList; + List *queryTargetList = TargetEntryList(columnList); Assert(queryTargetList != NIL); return queryTargetList; @@ -1163,9 +1131,7 @@ QueryJoinTree(MultiNode *multiNode, List *dependedJobList, List **rangeTableList { MultiJoin *joinNode = (MultiJoin *) multiNode; MultiBinaryNode *binaryNode = (MultiBinaryNode *) multiNode; - List *columnList = NIL; ListCell *columnCell = NULL; - RangeTblEntry *rangeTableEntry = NULL; JoinExpr *joinExpr = makeNode(JoinExpr); joinExpr->jointype = joinNode->joinType; joinExpr->isNatural = false; @@ -1194,12 +1160,13 @@ QueryJoinTree(MultiNode *multiNode, List *dependedJobList, List **rangeTableList joinExpr->jointype = JOIN_LEFT; } - rangeTableEntry = JoinRangeTableEntry(joinExpr, dependedJobList, - *rangeTableList); + RangeTblEntry *rangeTableEntry = JoinRangeTableEntry(joinExpr, + dependedJobList, + *rangeTableList); *rangeTableList = lappend(*rangeTableList, rangeTableEntry); /* fix the column attributes in ON (...) clauses */ - columnList = pull_var_clause_default((Node *) joinNode->joinClauseList); + List *columnList = pull_var_clause_default((Node *) joinNode->joinClauseList); foreach(columnCell, columnList) { Var *column = (Var *) lfirst(columnCell); @@ -1263,7 +1230,6 @@ QueryJoinTree(MultiNode *multiNode, List *dependedJobList, List **rangeTableList case T_MultiCartesianProduct: { MultiBinaryNode *binaryNode = (MultiBinaryNode *) multiNode; - RangeTblEntry *rangeTableEntry = NULL; JoinExpr *joinExpr = makeNode(JoinExpr); joinExpr->jointype = JOIN_INNER; @@ -1277,8 +1243,9 @@ QueryJoinTree(MultiNode *multiNode, List *dependedJobList, List **rangeTableList joinExpr->quals = NULL; joinExpr->rtindex = list_length(*rangeTableList) + 1; - rangeTableEntry = JoinRangeTableEntry(joinExpr, dependedJobList, - *rangeTableList); + RangeTblEntry *rangeTableEntry = JoinRangeTableEntry(joinExpr, + dependedJobList, + *rangeTableList); *rangeTableList = lappend(*rangeTableList, rangeTableEntry); return (Node *) joinExpr; @@ -1291,12 +1258,11 @@ QueryJoinTree(MultiNode *multiNode, List *dependedJobList, List **rangeTableList case T_MultiPartition: { MultiUnaryNode *unaryNode = (MultiUnaryNode *) multiNode; - Node *childNode = NULL; Assert(UnaryOperator(multiNode)); - childNode = QueryJoinTree(unaryNode->childNode, dependedJobList, - rangeTableList); + Node *childNode = QueryJoinTree(unaryNode->childNode, dependedJobList, + rangeTableList); return childNode; } @@ -1444,10 +1410,9 @@ static List * QueryFromList(List *rangeTableList) { List *fromList = NIL; - Index rangeTableIndex = 1; int rangeTableCount = list_length(rangeTableList); - for (rangeTableIndex = 1; rangeTableIndex <= rangeTableCount; rangeTableIndex++) + for (Index rangeTableIndex = 1; rangeTableIndex <= rangeTableCount; rangeTableIndex++) { RangeTblRef *rangeTableReference = makeNode(RangeTblRef); rangeTableReference->rtindex = rangeTableIndex; @@ -1479,21 +1444,11 @@ QueryFromList(List *rangeTableList) static Query * BuildSubqueryJobQuery(MultiNode *multiNode) { - Query *jobQuery = NULL; - Query *subquery = NULL; - MultiTable *multiTable = NULL; - RangeTblEntry *rangeTableEntry = NULL; - List *subqueryMultiTableList = NIL; - List *rangeTableList = NIL; List *targetList = NIL; - List *extendedOpNodeList = NIL; List *sortClauseList = NIL; - List *groupClauseList = NIL; - List *whereClauseList = NIL; Node *havingQual = NULL; Node *limitCount = NULL; Node *limitOffset = NULL; - FromExpr *joinTree = NULL; bool hasAggregates = false; List *distinctClause = NIL; bool hasDistinctOn = false; @@ -1503,28 +1458,28 @@ BuildSubqueryJobQuery(MultiNode *multiNode) /* we start building jobs from below the collect node */ Assert(!CitusIsA(multiNode, MultiCollect)); - subqueryMultiTableList = SubqueryMultiTableList(multiNode); + List *subqueryMultiTableList = SubqueryMultiTableList(multiNode); Assert(list_length(subqueryMultiTableList) == 1); - multiTable = (MultiTable *) linitial(subqueryMultiTableList); - subquery = multiTable->subquery; + MultiTable *multiTable = (MultiTable *) linitial(subqueryMultiTableList); + Query *subquery = multiTable->subquery; /* build subquery range table list */ - rangeTableEntry = makeNode(RangeTblEntry); + RangeTblEntry *rangeTableEntry = makeNode(RangeTblEntry); rangeTableEntry->rtekind = RTE_SUBQUERY; rangeTableEntry->inFromCl = true; rangeTableEntry->eref = multiTable->referenceNames; rangeTableEntry->alias = multiTable->alias; rangeTableEntry->subquery = subquery; - rangeTableList = list_make1(rangeTableEntry); + List *rangeTableList = list_make1(rangeTableEntry); /* * If we have an extended operator, then we copy the operator's target list. * Otherwise, we use the target list based on the MultiProject node at this * level in the query tree. */ - extendedOpNodeList = FindNodesOfType(multiNode, T_MultiExtendedOp); + List *extendedOpNodeList = FindNodesOfType(multiNode, T_MultiExtendedOp); if (extendedOpNodeList != NIL) { MultiExtendedOp *extendedOp = (MultiExtendedOp *) linitial(extendedOpNodeList); @@ -1551,10 +1506,10 @@ BuildSubqueryJobQuery(MultiNode *multiNode) } /* build group clauses */ - groupClauseList = QueryGroupClauseList(multiNode); + List *groupClauseList = QueryGroupClauseList(multiNode); /* build the where clause list using select predicates */ - whereClauseList = QuerySelectClauseList(multiNode); + List *whereClauseList = QuerySelectClauseList(multiNode); if (contain_agg_clause((Node *) targetList) || contain_agg_clause((Node *) havingQual)) @@ -1575,12 +1530,12 @@ BuildSubqueryJobQuery(MultiNode *multiNode) * AND'd, since both partition and join pruning depends on the clauses being * expressed as a list. */ - joinTree = makeNode(FromExpr); + FromExpr *joinTree = makeNode(FromExpr); joinTree->quals = (Node *) whereClauseList; joinTree->fromlist = QueryFromList(rangeTableList); /* build the query structure for this job */ - jobQuery = makeNode(Query); + Query *jobQuery = makeNode(Query); jobQuery->commandType = CMD_SELECT; jobQuery->querySource = QSRC_ORIGINAL; jobQuery->canSetTag = true; @@ -1665,11 +1620,10 @@ NewTableId(Index originalTableId, List *rangeTableList) { RangeTblEntry *rangeTableEntry = (RangeTblEntry *) lfirst(rangeTableCell); List *originalTableIdList = NIL; - bool listMember = false; ExtractRangeTblExtraData(rangeTableEntry, NULL, NULL, NULL, &originalTableIdList); - listMember = list_member_int(originalTableIdList, originalTableId); + bool listMember = list_member_int(originalTableIdList, originalTableId); if (listMember) { return rangeTableIndex; @@ -1740,7 +1694,6 @@ NewColumnId(Index originalTableId, AttrNumber originalColumnId, static Job * JobForRangeTable(List *jobList, RangeTblEntry *rangeTableEntry) { - Job *searchedJob = NULL; List *searchedTableIdList = NIL; CitusRTEKind rangeTableKind; @@ -1749,7 +1702,7 @@ JobForRangeTable(List *jobList, RangeTblEntry *rangeTableEntry) Assert(rangeTableKind == CITUS_RTE_REMOTE_QUERY); - searchedJob = JobForTableIdList(jobList, searchedTableIdList); + Job *searchedJob = JobForTableIdList(jobList, searchedTableIdList); return searchedJob; } @@ -1773,8 +1726,6 @@ JobForTableIdList(List *jobList, List *searchedTableIdList) List *jobRangeTableList = job->jobQuery->rtable; List *jobTableIdList = NIL; ListCell *jobRangeTableCell = NULL; - List *lhsDiff = NIL; - List *rhsDiff = NIL; foreach(jobRangeTableCell, jobRangeTableList) { @@ -1792,8 +1743,8 @@ JobForTableIdList(List *jobList, List *searchedTableIdList) * Check if the searched range table's tableIds and the current job's * tableIds are the same. */ - lhsDiff = list_difference_int(jobTableIdList, searchedTableIdList); - rhsDiff = list_difference_int(searchedTableIdList, jobTableIdList); + List *lhsDiff = list_difference_int(jobTableIdList, searchedTableIdList); + List *rhsDiff = list_difference_int(searchedTableIdList, jobTableIdList); if (lhsDiff == NIL && rhsDiff == NIL) { searchedJob = job; @@ -1855,7 +1806,6 @@ UniqueJobId(void) static uint32 jobIdCounter = 0; uint64 jobId = 0; - uint64 jobIdNumber = 0; uint64 processId = 0; uint64 localGroupId = 0; @@ -1893,7 +1843,7 @@ UniqueJobId(void) * Use the remaining 23 bits to distinguish jobs by the * same backend. */ - jobIdNumber = jobIdCounter & 0x1FFFFFF; + uint64 jobIdNumber = jobIdCounter & 0x1FFFFFF; jobId = jobId | jobIdNumber; return jobId; @@ -1925,7 +1875,6 @@ BuildMapMergeJob(Query *jobQuery, List *dependedJobList, Var *partitionKey, PartitionType partitionType, Oid baseRelationId, BoundaryNodeJobType boundaryNodeJobType) { - MapMergeJob *mapMergeJob = NULL; List *rangeTableList = jobQuery->rtable; Var *partitionColumn = copyObject(partitionKey); @@ -1935,7 +1884,7 @@ BuildMapMergeJob(Query *jobQuery, List *dependedJobList, Var *partitionKey, UpdateColumnAttributes(partitionColumn, rangeTableList, dependedJobList); } - mapMergeJob = CitusMakeNode(MapMergeJob); + MapMergeJob *mapMergeJob = CitusMakeNode(MapMergeJob); mapMergeJob->job.jobId = UniqueJobId(); mapMergeJob->job.jobQuery = jobQuery; mapMergeJob->job.dependedJobList = dependedJobList; @@ -1960,11 +1909,10 @@ BuildMapMergeJob(Query *jobQuery, List *dependedJobList, Var *partitionKey, RANGE_PARTITION_TYPE) { DistTableCacheEntry *cache = DistributedTableCacheEntry(baseRelationId); - bool hasUninitializedShardInterval = false; uint32 shardCount = cache->shardIntervalArrayLength; ShardInterval **sortedShardIntervalArray = cache->sortedShardIntervalArray; - hasUninitializedShardInterval = cache->hasUninitializedShardInterval; + bool hasUninitializedShardInterval = cache->hasUninitializedShardInterval; if (hasUninitializedShardInterval) { ereport(ERROR, (errmsg("cannot range repartition shard with " @@ -2007,8 +1955,6 @@ HashPartitionCount(void) static ArrayType * SplitPointObject(ShardInterval **shardIntervalArray, uint32 shardIntervalCount) { - ArrayType *splitPointObject = NULL; - uint32 intervalIndex = 0; Oid typeId = InvalidOid; bool typeByValue = false; char typeAlignment = 0; @@ -2018,7 +1964,7 @@ SplitPointObject(ShardInterval **shardIntervalArray, uint32 shardIntervalCount) uint32 minDatumCount = shardIntervalCount; Datum *minDatumArray = palloc0(minDatumCount * sizeof(Datum)); - for (intervalIndex = 0; intervalIndex < shardIntervalCount; intervalIndex++) + for (uint32 intervalIndex = 0; intervalIndex < shardIntervalCount; intervalIndex++) { ShardInterval *shardInterval = shardIntervalArray[intervalIndex]; minDatumArray[intervalIndex] = shardInterval->minValue; @@ -2033,8 +1979,8 @@ SplitPointObject(ShardInterval **shardIntervalArray, uint32 shardIntervalCount) /* construct the split point object from the sorted array */ get_typlenbyvalalign(typeId, &typeLength, &typeByValue, &typeAlignment); - splitPointObject = construct_array(minDatumArray, minDatumCount, typeId, - typeLength, typeByValue, typeAlignment); + ArrayType *splitPointObject = construct_array(minDatumArray, minDatumCount, typeId, + typeLength, typeByValue, typeAlignment); return splitPointObject; } @@ -2055,8 +2001,6 @@ static Job * BuildJobTreeTaskList(Job *jobTree, PlannerRestrictionContext *plannerRestrictionContext) { List *flattenedJobList = NIL; - uint32 flattenedJobCount = 0; - int32 jobIndex = 0; /* * We traverse the job tree in preorder, and append each visited job to our @@ -2079,12 +2023,11 @@ BuildJobTreeTaskList(Job *jobTree, PlannerRestrictionContext *plannerRestriction * we can create dependencies between tasks bottom up, and assign them to * worker nodes accordingly. */ - flattenedJobCount = (int32) list_length(flattenedJobList); - for (jobIndex = (flattenedJobCount - 1); jobIndex >= 0; jobIndex--) + uint32 flattenedJobCount = (int32) list_length(flattenedJobList); + for (int32 jobIndex = (flattenedJobCount - 1); jobIndex >= 0; jobIndex--) { Job *job = (Job *) list_nth(flattenedJobList, jobIndex); List *sqlTaskList = NIL; - List *assignedSqlTaskList = NIL; ListCell *assignedSqlTaskCell = NULL; /* create sql tasks for the job, and prune redundant data fetch tasks */ @@ -2113,7 +2056,7 @@ BuildJobTreeTaskList(Job *jobTree, PlannerRestrictionContext *plannerRestriction * We first assign sql and merge tasks to worker nodes. Next, we assign * sql tasks' data fetch dependencies. */ - assignedSqlTaskList = AssignTaskList(sqlTaskList); + List *assignedSqlTaskList = AssignTaskList(sqlTaskList); AssignDataFetchDependencies(assignedSqlTaskList); /* now assign merge task's data fetch dependencies */ @@ -2167,9 +2110,6 @@ QueryPushdownSqlTaskList(Query *query, uint64 jobId, ListCell *restrictionCell = NULL; uint32 taskIdIndex = 1; /* 0 is reserved for invalid taskId */ int shardCount = 0; - int shardOffset = 0; - int minShardOffset = 0; - int maxShardOffset = 0; bool *taskRequiredForShardIndex = NULL; ListCell *prunedRelationShardCell = NULL; @@ -2183,8 +2123,8 @@ QueryPushdownSqlTaskList(Query *query, uint64 jobId, } /* defaults to be used if this is a reference table-only query */ - minShardOffset = 0; - maxShardOffset = 0; + int minShardOffset = 0; + int maxShardOffset = 0; forboth(prunedRelationShardCell, prunedRelationShardList, restrictionCell, relationRestrictionContext->relationRestrictionList) @@ -2194,9 +2134,8 @@ QueryPushdownSqlTaskList(Query *query, uint64 jobId, Oid relationId = relationRestriction->relationId; List *prunedShardList = (List *) lfirst(prunedRelationShardCell); ListCell *shardIntervalCell = NULL; - DistTableCacheEntry *cacheEntry = NULL; - cacheEntry = DistributedTableCacheEntry(relationId); + DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(relationId); if (cacheEntry->partitionMethod == DISTRIBUTE_BY_NONE) { continue; @@ -2249,19 +2188,19 @@ QueryPushdownSqlTaskList(Query *query, uint64 jobId, * given that hash-distributed tables typically only have a few shards the * iteration is still very fast. */ - for (shardOffset = minShardOffset; shardOffset <= maxShardOffset; shardOffset++) + for (int shardOffset = minShardOffset; shardOffset <= maxShardOffset; shardOffset++) { - Task *subqueryTask = NULL; - if (taskRequiredForShardIndex != NULL && !taskRequiredForShardIndex[shardOffset]) { /* this shard index is pruned away for all relations */ continue; } - subqueryTask = QueryPushdownTaskCreate(query, shardOffset, - relationRestrictionContext, taskIdIndex, - taskType, modifyRequiresMasterEvaluation); + Task *subqueryTask = QueryPushdownTaskCreate(query, shardOffset, + relationRestrictionContext, + taskIdIndex, + taskType, + modifyRequiresMasterEvaluation); subqueryTask->jobId = jobId; sqlTaskList = lappend(sqlTaskList, subqueryTask); @@ -2367,7 +2306,6 @@ ErrorIfUnsupportedShardDistribution(Query *query) foreach(relationIdCell, nonReferenceRelations) { Oid relationId = lfirst_oid(relationIdCell); - bool coPartitionedTables = false; Oid currentRelationId = relationId; /* get shard list of first relation and continue for the next relation */ @@ -2380,8 +2318,8 @@ ErrorIfUnsupportedShardDistribution(Query *query) } /* check if this table has 1-1 shard partitioning with first table */ - coPartitionedTables = CoPartitionedTables(firstTableRelationId, - currentRelationId); + bool coPartitionedTables = CoPartitionedTables(firstTableRelationId, + currentRelationId); if (!coPartitionedTables) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), @@ -2406,10 +2344,8 @@ QueryPushdownTaskCreate(Query *originalQuery, int shardIndex, StringInfo queryString = makeStringInfo(); ListCell *restrictionCell = NULL; - Task *subqueryTask = NULL; List *taskShardList = NIL; List *relationShardList = NIL; - List *selectPlacementList = NIL; uint64 jobId = INVALID_JOB_ID; uint64 anchorShardId = INVALID_SHARD_ID; bool modifyWithSubselect = false; @@ -2435,11 +2371,9 @@ QueryPushdownTaskCreate(Query *originalQuery, int shardIndex, RelationRestriction *relationRestriction = (RelationRestriction *) lfirst(restrictionCell); Oid relationId = relationRestriction->relationId; - DistTableCacheEntry *cacheEntry = NULL; ShardInterval *shardInterval = NULL; - RelationShard *relationShard = NULL; - cacheEntry = DistributedTableCacheEntry(relationId); + DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(relationId); if (cacheEntry->partitionMethod == DISTRIBUTE_BY_NONE) { /* reference table only has one shard */ @@ -2469,7 +2403,7 @@ QueryPushdownTaskCreate(Query *originalQuery, int shardIndex, taskShardList = lappend(taskShardList, list_make1(shardInterval)); - relationShard = CitusMakeNode(RelationShard); + RelationShard *relationShard = CitusMakeNode(RelationShard); relationShard->relationId = shardInterval->relationId; relationShard->shardId = shardInterval->shardId; @@ -2478,7 +2412,7 @@ QueryPushdownTaskCreate(Query *originalQuery, int shardIndex, Assert(anchorShardId != INVALID_SHARD_ID); - selectPlacementList = WorkersContainingAllShards(taskShardList); + List *selectPlacementList = WorkersContainingAllShards(taskShardList); if (list_length(selectPlacementList) == 0) { ereport(ERROR, (errmsg("cannot find a worker that has active placements for all " @@ -2502,7 +2436,7 @@ QueryPushdownTaskCreate(Query *originalQuery, int shardIndex, (List *) taskQuery->jointree->quals); } - subqueryTask = CreateBasicTask(jobId, taskId, taskType, NULL); + Task *subqueryTask = CreateBasicTask(jobId, taskId, taskType, NULL); if ((taskType == MODIFY_TASK && !modifyRequiresMasterEvaluation) || taskType == SQL_TASK) @@ -2534,7 +2468,6 @@ bool CoPartitionedTables(Oid firstRelationId, Oid secondRelationId) { bool coPartitionedTables = true; - uint32 intervalIndex = 0; DistTableCacheEntry *firstTableCache = DistributedTableCacheEntry(firstRelationId); DistTableCacheEntry *secondTableCache = DistributedTableCacheEntry(secondRelationId); ShardInterval **sortedFirstIntervalArray = firstTableCache->sortedShardIntervalArray; @@ -2588,7 +2521,7 @@ CoPartitionedTables(Oid firstRelationId, Oid secondRelationId) * and if any pair of shard intervals are not equal or they are not located * in the same node it returns false. */ - for (intervalIndex = 0; intervalIndex < firstListShardCount; intervalIndex++) + for (uint32 intervalIndex = 0; intervalIndex < firstListShardCount; intervalIndex++) { ShardInterval *firstInterval = sortedFirstIntervalArray[intervalIndex]; ShardInterval *secondInterval = sortedSecondIntervalArray[intervalIndex]; @@ -2654,15 +2587,11 @@ ShardIntervalsEqual(FmgrInfo *comparisonFunction, ShardInterval *firstInterval, ShardInterval *secondInterval) { bool shardIntervalsEqual = false; - Datum firstMin = 0; - Datum firstMax = 0; - Datum secondMin = 0; - Datum secondMax = 0; - firstMin = firstInterval->minValue; - firstMax = firstInterval->maxValue; - secondMin = secondInterval->minValue; - secondMax = secondInterval->maxValue; + Datum firstMin = firstInterval->minValue; + Datum firstMax = firstInterval->maxValue; + Datum secondMin = secondInterval->minValue; + Datum secondMax = secondInterval->maxValue; if (firstInterval->minValueExists && firstInterval->maxValueExists && secondInterval->minValueExists && secondInterval->maxValueExists) @@ -2698,10 +2627,6 @@ SqlTaskList(Job *job) uint64 jobId = job->jobId; bool anchorRangeTableBasedAssignment = false; uint32 anchorRangeTableId = 0; - Node *whereClauseTree = NULL; - List *rangeTableFragmentsList = NIL; - List *fragmentCombinationList = NIL; - ListCell *fragmentCombinationCell = NULL; Query *jobQuery = job->jobQuery; List *rangeTableList = jobQuery->rtable; @@ -2732,7 +2657,8 @@ SqlTaskList(Job *job) * that the query string is generated as (...) AND (...) as opposed to * (...), (...). */ - whereClauseTree = (Node *) make_ands_explicit((List *) jobQuery->jointree->quals); + Node *whereClauseTree = (Node *) make_ands_explicit( + (List *) jobQuery->jointree->quals); jobQuery->jointree->quals = whereClauseTree; /* @@ -2740,8 +2666,9 @@ SqlTaskList(Job *job) * We also apply partition pruning based on the selection criteria. If all * range table fragments are pruned away, we return an empty task list. */ - rangeTableFragmentsList = RangeTableFragmentsList(rangeTableList, whereClauseList, - dependedJobList); + List *rangeTableFragmentsList = RangeTableFragmentsList(rangeTableList, + whereClauseList, + dependedJobList); if (rangeTableFragmentsList == NIL) { return NIL; @@ -2752,35 +2679,31 @@ SqlTaskList(Job *job) * with each other (and apply join pruning). Each fragment combination then * represents one SQL task's dependencies. */ - fragmentCombinationList = FragmentCombinationList(rangeTableFragmentsList, - jobQuery, dependedJobList); + List *fragmentCombinationList = FragmentCombinationList(rangeTableFragmentsList, + jobQuery, dependedJobList); - fragmentCombinationCell = NULL; + ListCell *fragmentCombinationCell = NULL; foreach(fragmentCombinationCell, fragmentCombinationList) { List *fragmentCombination = (List *) lfirst(fragmentCombinationCell); - List *dataFetchTaskList = NIL; - int32 dataFetchTaskCount = 0; - StringInfo sqlQueryString = NULL; - Task *sqlTask = NULL; - Query *taskQuery = NULL; - List *fragmentRangeTableList = NIL; /* create tasks to fetch fragments required for the sql task */ - dataFetchTaskList = DataFetchTaskList(jobId, taskIdIndex, fragmentCombination); - dataFetchTaskCount = list_length(dataFetchTaskList); + List *dataFetchTaskList = DataFetchTaskList(jobId, taskIdIndex, + fragmentCombination); + int32 dataFetchTaskCount = list_length(dataFetchTaskList); taskIdIndex += dataFetchTaskCount; /* update range table entries with fragment aliases (in place) */ - taskQuery = copyObject(jobQuery); - fragmentRangeTableList = taskQuery->rtable; + Query *taskQuery = copyObject(jobQuery); + List *fragmentRangeTableList = taskQuery->rtable; UpdateRangeTableAlias(fragmentRangeTableList, fragmentCombination); /* transform the updated task query to a SQL query string */ - sqlQueryString = makeStringInfo(); + StringInfo sqlQueryString = makeStringInfo(); pg_get_query_def(taskQuery, sqlQueryString); - sqlTask = CreateBasicTask(jobId, taskIdIndex, SQL_TASK, sqlQueryString->data); + Task *sqlTask = CreateBasicTask(jobId, taskIdIndex, SQL_TASK, + sqlQueryString->data); sqlTask->dependedTaskList = dataFetchTaskList; sqlTask->relationShardList = BuildRelationShardList(fragmentRangeTableList, fragmentCombination); @@ -3049,16 +2972,14 @@ RangeTableFragmentsList(List *rangeTableList, List *whereClauseList, } else if (rangeTableKind == CITUS_RTE_REMOTE_QUERY) { - MapMergeJob *dependedMapMergeJob = NULL; List *mergeTaskFragmentList = NIL; - List *mergeTaskList = NIL; ListCell *mergeTaskCell = NULL; Job *dependedJob = JobForRangeTable(dependedJobList, rangeTableEntry); Assert(CitusIsA(dependedJob, MapMergeJob)); - dependedMapMergeJob = (MapMergeJob *) dependedJob; - mergeTaskList = dependedMapMergeJob->mergeTaskList; + MapMergeJob *dependedMapMergeJob = (MapMergeJob *) dependedJob; + List *mergeTaskList = dependedMapMergeJob->mergeTaskList; /* if there are no tasks for the depended job, just return NIL */ if (mergeTaskList == NIL) @@ -3099,16 +3020,12 @@ RangeTableFragmentsList(List *rangeTableList, List *whereClauseList, Node * BuildBaseConstraint(Var *column) { - Node *baseConstraint = NULL; - OpExpr *lessThanExpr = NULL; - OpExpr *greaterThanExpr = NULL; - /* Build these expressions with only one argument for now */ - lessThanExpr = MakeOpExpression(column, BTLessEqualStrategyNumber); - greaterThanExpr = MakeOpExpression(column, BTGreaterEqualStrategyNumber); + OpExpr *lessThanExpr = MakeOpExpression(column, BTLessEqualStrategyNumber); + OpExpr *greaterThanExpr = MakeOpExpression(column, BTGreaterEqualStrategyNumber); /* Build base constaint as an and of two qual conditions */ - baseConstraint = make_and_qual((Node *) lessThanExpr, (Node *) greaterThanExpr); + Node *baseConstraint = make_and_qual((Node *) lessThanExpr, (Node *) greaterThanExpr); return baseConstraint; } @@ -3126,19 +3043,14 @@ MakeOpExpression(Var *variable, int16 strategyNumber) Oid typeModId = variable->vartypmod; Oid collationId = variable->varcollid; - OperatorCacheEntry *operatorCacheEntry = NULL; Oid accessMethodId = BTREE_AM_OID; - Oid operatorId = InvalidOid; - Oid operatorClassInputType = InvalidOid; - Const *constantValue = NULL; - OpExpr *expression = NULL; - char typeType = 0; - operatorCacheEntry = LookupOperatorByType(typeId, accessMethodId, strategyNumber); + OperatorCacheEntry *operatorCacheEntry = LookupOperatorByType(typeId, accessMethodId, + strategyNumber); - operatorId = operatorCacheEntry->operatorId; - operatorClassInputType = operatorCacheEntry->operatorClassInputType; - typeType = operatorCacheEntry->typeType; + Oid operatorId = operatorCacheEntry->operatorId; + Oid operatorClassInputType = operatorCacheEntry->operatorClassInputType; + char typeType = operatorCacheEntry->typeType; /* * Relabel variable if input type of default operator class is not equal to @@ -3151,15 +3063,15 @@ MakeOpExpression(Var *variable, int16 strategyNumber) -1, collationId, COERCE_IMPLICIT_CAST); } - constantValue = makeNullConst(operatorClassInputType, typeModId, collationId); + Const *constantValue = makeNullConst(operatorClassInputType, typeModId, collationId); /* Now make the expression with the given variable and a null constant */ - expression = (OpExpr *) make_opclause(operatorId, - InvalidOid, /* no result type yet */ - false, /* no return set */ - (Expr *) variable, - (Expr *) constantValue, - InvalidOid, collationId); + OpExpr *expression = (OpExpr *) make_opclause(operatorId, + InvalidOid, /* no result type yet */ + false, /* no return set */ + (Expr *) variable, + (Expr *) constantValue, + InvalidOid, collationId); /* Set implementing function id and result type */ expression->opfuncid = get_opcode(operatorId); @@ -3200,11 +3112,7 @@ LookupOperatorByType(Oid typeId, Oid accessMethodId, int16 strategyNumber) /* if not found in the cache, call GetOperatorByType and put the result in cache */ if (matchingCacheEntry == NULL) { - MemoryContext oldContext = NULL; Oid operatorClassId = GetDefaultOpClass(typeId, accessMethodId); - Oid operatorId = InvalidOid; - Oid operatorClassInputType = InvalidOid; - char typeType = InvalidOid; if (operatorClassId == InvalidOid) { @@ -3214,9 +3122,9 @@ LookupOperatorByType(Oid typeId, Oid accessMethodId, int16 strategyNumber) } /* fill the other fields to the cache */ - operatorId = GetOperatorByType(typeId, accessMethodId, strategyNumber); - operatorClassInputType = get_opclass_input_type(operatorClassId); - typeType = get_typtype(operatorClassInputType); + Oid operatorId = GetOperatorByType(typeId, accessMethodId, strategyNumber); + Oid operatorClassInputType = get_opclass_input_type(operatorClassId); + char typeType = get_typtype(operatorClassInputType); /* make sure we've initialized CacheMemoryContext */ if (CacheMemoryContext == NULL) @@ -3224,7 +3132,7 @@ LookupOperatorByType(Oid typeId, Oid accessMethodId, int16 strategyNumber) CreateCacheMemoryContext(); } - oldContext = MemoryContextSwitchTo(CacheMemoryContext); + MemoryContext oldContext = MemoryContextSwitchTo(CacheMemoryContext); matchingCacheEntry = palloc0(sizeof(OperatorCacheEntry)); matchingCacheEntry->typeId = typeId; @@ -3392,8 +3300,6 @@ UpdateConstraint(Node *baseConstraint, ShardInterval *shardInterval) Node *minNode = get_rightop((Expr *) greaterThanExpr); /* right op */ Node *maxNode = get_rightop((Expr *) lessThanExpr); /* right op */ - Const *minConstant = NULL; - Const *maxConstant = NULL; Assert(shardInterval != NULL); Assert(shardInterval->minValueExists); @@ -3401,8 +3307,8 @@ UpdateConstraint(Node *baseConstraint, ShardInterval *shardInterval) Assert(IsA(minNode, Const)); Assert(IsA(maxNode, Const)); - minConstant = (Const *) minNode; - maxConstant = (Const *) maxNode; + Const *minConstant = (Const *) minNode; + Const *maxConstant = (Const *) maxNode; minConstant->constvalue = shardInterval->minValue; maxConstant->constvalue = shardInterval->maxValue; @@ -3425,13 +3331,13 @@ FragmentCombinationList(List *rangeTableFragmentsList, Query *jobQuery, List *dependedJobList) { List *fragmentCombinationList = NIL; - JoinSequenceNode *joinSequenceArray = NULL; List *fragmentCombinationQueue = NIL; List *emptyList = NIL; /* find a sequence that joins the range tables in the list */ - joinSequenceArray = JoinSequenceArray(rangeTableFragmentsList, jobQuery, - dependedJobList); + JoinSequenceNode *joinSequenceArray = JoinSequenceArray(rangeTableFragmentsList, + jobQuery, + dependedJobList); /* * We use breadth-first search with pruning to create fragment combinations. @@ -3441,25 +3347,19 @@ FragmentCombinationList(List *rangeTableFragmentsList, Query *jobQuery, fragmentCombinationQueue = lappend(fragmentCombinationQueue, emptyList); while (fragmentCombinationQueue != NIL) { - List *fragmentCombination = NIL; - int32 joinSequenceIndex = 0; - uint32 tableId = 0; - List *tableFragments = NIL; ListCell *tableFragmentCell = NULL; - int32 joiningTableId = NON_PRUNABLE_JOIN; int32 joiningTableSequenceIndex = -1; - int32 rangeTableCount = 0; /* pop first element from the fragment queue */ - fragmentCombination = linitial(fragmentCombinationQueue); + List *fragmentCombination = linitial(fragmentCombinationQueue); fragmentCombinationQueue = list_delete_first(fragmentCombinationQueue); /* * If this combination covered all range tables in a join sequence, add * this combination to our result set. */ - joinSequenceIndex = list_length(fragmentCombination); - rangeTableCount = list_length(rangeTableFragmentsList); + int32 joinSequenceIndex = list_length(fragmentCombination); + int32 rangeTableCount = list_length(rangeTableFragmentsList); if (joinSequenceIndex == rangeTableCount) { fragmentCombinationList = lappend(fragmentCombinationList, @@ -3468,15 +3368,16 @@ FragmentCombinationList(List *rangeTableFragmentsList, Query *jobQuery, } /* find the next range table to add to our search space */ - tableId = joinSequenceArray[joinSequenceIndex].rangeTableId; - tableFragments = FindRangeTableFragmentsList(rangeTableFragmentsList, tableId); + uint32 tableId = joinSequenceArray[joinSequenceIndex].rangeTableId; + List *tableFragments = FindRangeTableFragmentsList(rangeTableFragmentsList, + tableId); /* resolve sequence index for the previous range table we join against */ - joiningTableId = joinSequenceArray[joinSequenceIndex].joiningRangeTableId; + int32 joiningTableId = joinSequenceArray[joinSequenceIndex].joiningRangeTableId; if (joiningTableId != NON_PRUNABLE_JOIN) { - int32 sequenceIndex = 0; - for (sequenceIndex = 0; sequenceIndex < rangeTableCount; sequenceIndex++) + for (int32 sequenceIndex = 0; sequenceIndex < rangeTableCount; + sequenceIndex++) { JoinSequenceNode *joinSequenceNode = &joinSequenceArray[sequenceIndex]; if (joinSequenceNode->rangeTableId == joiningTableId) @@ -3537,12 +3438,11 @@ JoinSequenceArray(List *rangeTableFragmentsList, Query *jobQuery, List *depended uint32 rangeTableCount = (uint32) list_length(rangeTableList); uint32 sequenceNodeSize = sizeof(JoinSequenceNode); uint32 joinedTableCount = 0; - List *joinExprList = NIL; ListCell *joinExprCell = NULL; uint32 firstRangeTableId = 1; JoinSequenceNode *joinSequenceArray = palloc0(rangeTableCount * sequenceNodeSize); - joinExprList = JoinExprList(jobQuery->jointree); + List *joinExprList = JoinExprList(jobQuery->jointree); /* pick first range table as starting table for the join sequence */ if (list_length(joinExprList) > 0) @@ -3565,7 +3465,6 @@ JoinSequenceArray(List *rangeTableFragmentsList, Query *jobQuery, List *depended { JoinExpr *joinExpr = (JoinExpr *) lfirst(joinExprCell); RangeTblRef *rightTableRef = (RangeTblRef *) joinExpr->rarg; - JoinSequenceNode *nextJoinSequenceNode = NULL; uint32 nextRangeTableId = rightTableRef->rtindex; ListCell *nextJoinClauseCell = NULL; Index existingRangeTableId = 0; @@ -3591,27 +3490,21 @@ JoinSequenceArray(List *rangeTableFragmentsList, Query *jobQuery, List *depended foreach(nextJoinClauseCell, nextJoinClauseList) { OpExpr *nextJoinClause = (OpExpr *) lfirst(nextJoinClauseCell); - Var *leftColumn = NULL; - Var *rightColumn = NULL; - Index leftRangeTableId = 0; - Index rightRangeTableId = 0; - bool leftPartitioned = false; - bool rightPartitioned = false; if (!IsJoinClause((Node *) nextJoinClause)) { continue; } - leftColumn = LeftColumnOrNULL(nextJoinClause); - rightColumn = RightColumnOrNULL(nextJoinClause); + Var *leftColumn = LeftColumnOrNULL(nextJoinClause); + Var *rightColumn = RightColumnOrNULL(nextJoinClause); if (leftColumn == NULL || rightColumn == NULL) { continue; } - leftRangeTableId = leftColumn->varno; - rightRangeTableId = rightColumn->varno; + Index leftRangeTableId = leftColumn->varno; + Index rightRangeTableId = rightColumn->varno; /* * We have a table from the existing join list joining with the next @@ -3636,10 +3529,10 @@ JoinSequenceArray(List *rangeTableFragmentsList, Query *jobQuery, List *depended continue; } - leftPartitioned = PartitionedOnColumn(leftColumn, rangeTableList, - dependedJobList); - rightPartitioned = PartitionedOnColumn(rightColumn, rangeTableList, - dependedJobList); + bool leftPartitioned = PartitionedOnColumn(leftColumn, rangeTableList, + dependedJobList); + bool rightPartitioned = PartitionedOnColumn(rightColumn, rangeTableList, + dependedJobList); if (leftPartitioned && rightPartitioned) { /* make sure this join clause references only simple columns */ @@ -3651,7 +3544,7 @@ JoinSequenceArray(List *rangeTableFragmentsList, Query *jobQuery, List *depended } /* set next joining range table's info in the join sequence */ - nextJoinSequenceNode = &joinSequenceArray[joinedTableCount]; + JoinSequenceNode *nextJoinSequenceNode = &joinSequenceArray[joinedTableCount]; if (applyJoinPruning) { nextJoinSequenceNode->rangeTableId = nextRangeTableId; @@ -3707,9 +3600,6 @@ PartitionedOnColumn(Var *column, List *rangeTableList, List *dependedJobList) { Job *job = JobForRangeTable(dependedJobList, rangeTableEntry); MapMergeJob *mapMergeJob = (MapMergeJob *) job; - Var *partitionColumn = NULL; - Var *remoteRelationColumn = NULL; - TargetEntry *targetEntry = NULL; /* * The column's current attribute number is it's location in the target @@ -3722,12 +3612,12 @@ PartitionedOnColumn(Var *column, List *rangeTableList, List *dependedJobList) Assert(columnIndex >= 0); Assert(columnIndex < list_length(targetEntryList)); - targetEntry = (TargetEntry *) list_nth(targetEntryList, columnIndex); - remoteRelationColumn = (Var *) targetEntry->expr; + TargetEntry *targetEntry = (TargetEntry *) list_nth(targetEntryList, columnIndex); + Var *remoteRelationColumn = (Var *) targetEntry->expr; Assert(IsA(remoteRelationColumn, Var)); /* retrieve the partition column for the job */ - partitionColumn = mapMergeJob->partitionColumn; + Var *partitionColumn = mapMergeJob->partitionColumn; if (partitionColumn->varattno == remoteRelationColumn->varattno) { partitionedOnColumn = true; @@ -3800,9 +3690,6 @@ static bool JoinPrunable(RangeTableFragment *leftFragment, RangeTableFragment *rightFragment) { bool joinPrunable = false; - bool overlap = false; - ShardInterval *leftFragmentInterval = NULL; - ShardInterval *rightFragmentInterval = NULL; /* * If both range tables are remote queries, we then have a hash repartition @@ -3834,10 +3721,10 @@ JoinPrunable(RangeTableFragment *leftFragment, RangeTableFragment *rightFragment * We have a single (re)partition join. We now get shard intervals for both * fragments, and then check if these intervals overlap. */ - leftFragmentInterval = FragmentInterval(leftFragment); - rightFragmentInterval = FragmentInterval(rightFragment); + ShardInterval *leftFragmentInterval = FragmentInterval(leftFragment); + ShardInterval *rightFragmentInterval = FragmentInterval(rightFragment); - overlap = ShardIntervalsOverlap(leftFragmentInterval, rightFragmentInterval); + bool overlap = ShardIntervalsOverlap(leftFragmentInterval, rightFragmentInterval); if (!overlap) { if (IsLoggableLevel(DEBUG2)) @@ -3871,11 +3758,9 @@ FragmentInterval(RangeTableFragment *fragment) } else if (fragment->fragmentType == CITUS_RTE_REMOTE_QUERY) { - Task *mergeTask = NULL; - Assert(CitusIsA(fragment->fragmentReference, Task)); - mergeTask = (Task *) fragment->fragmentReference; + Task *mergeTask = (Task *) fragment->fragmentReference; fragmentInterval = mergeTask->shardInterval; } @@ -3892,16 +3777,11 @@ ShardIntervalsOverlap(ShardInterval *firstInterval, ShardInterval *secondInterva DistributedTableCacheEntry(firstInterval->relationId); FmgrInfo *comparisonFunction = intervalRelation->shardIntervalCompareFunction; - Datum firstMin = 0; - Datum firstMax = 0; - Datum secondMin = 0; - Datum secondMax = 0; - - firstMin = firstInterval->minValue; - firstMax = firstInterval->maxValue; - secondMin = secondInterval->minValue; - secondMax = secondInterval->maxValue; + Datum firstMin = firstInterval->minValue; + Datum firstMax = firstInterval->maxValue; + Datum secondMin = secondInterval->minValue; + Datum secondMax = secondInterval->maxValue; /* * We need to have min/max values for both intervals first. Then, we assume @@ -3934,25 +3814,21 @@ ShardIntervalsOverlap(ShardInterval *firstInterval, ShardInterval *secondInterva static StringInfo FragmentIntervalString(ShardInterval *fragmentInterval) { - StringInfo fragmentIntervalString = NULL; Oid typeId = fragmentInterval->valueTypeId; Oid outputFunctionId = InvalidOid; bool typeVariableLength = false; - FmgrInfo *outputFunction = NULL; - char *minValueString = NULL; - char *maxValueString = NULL; Assert(fragmentInterval->minValueExists); Assert(fragmentInterval->maxValueExists); - outputFunction = (FmgrInfo *) palloc0(sizeof(FmgrInfo)); + FmgrInfo *outputFunction = (FmgrInfo *) palloc0(sizeof(FmgrInfo)); getTypeOutputInfo(typeId, &outputFunctionId, &typeVariableLength); fmgr_info(outputFunctionId, outputFunction); - minValueString = OutputFunctionCall(outputFunction, fragmentInterval->minValue); - maxValueString = OutputFunctionCall(outputFunction, fragmentInterval->maxValue); + char *minValueString = OutputFunctionCall(outputFunction, fragmentInterval->minValue); + char *maxValueString = OutputFunctionCall(outputFunction, fragmentInterval->maxValue); - fragmentIntervalString = makeStringInfo(); + StringInfo fragmentIntervalString = makeStringInfo(); appendStringInfo(fragmentIntervalString, "[%s,%s]", minValueString, maxValueString); return fragmentIntervalString; @@ -3996,30 +3872,24 @@ DataFetchTaskList(uint64 jobId, uint32 taskIdIndex, List *fragmentList) static StringInfo DatumArrayString(Datum *datumArray, uint32 datumCount, Oid datumTypeId) { - StringInfo arrayStringInfo = NULL; - FmgrInfo *arrayOutFunction = NULL; - ArrayType *arrayObject = NULL; - Datum arrayObjectDatum = 0; - Datum arrayStringDatum = 0; - char *arrayString = NULL; int16 typeLength = 0; bool typeByValue = false; char typeAlignment = 0; /* construct the array object from the given array */ get_typlenbyvalalign(datumTypeId, &typeLength, &typeByValue, &typeAlignment); - arrayObject = construct_array(datumArray, datumCount, datumTypeId, - typeLength, typeByValue, typeAlignment); - arrayObjectDatum = PointerGetDatum(arrayObject); + ArrayType *arrayObject = construct_array(datumArray, datumCount, datumTypeId, + typeLength, typeByValue, typeAlignment); + Datum arrayObjectDatum = PointerGetDatum(arrayObject); /* convert the array object to its string representation */ - arrayOutFunction = (FmgrInfo *) palloc0(sizeof(FmgrInfo)); + FmgrInfo *arrayOutFunction = (FmgrInfo *) palloc0(sizeof(FmgrInfo)); fmgr_info(ARRAY_OUT_FUNC_ID, arrayOutFunction); - arrayStringDatum = FunctionCall1(arrayOutFunction, arrayObjectDatum); - arrayString = DatumGetCString(arrayStringDatum); + Datum arrayStringDatum = FunctionCall1(arrayOutFunction, arrayObjectDatum); + char *arrayString = DatumGetCString(arrayStringDatum); - arrayStringInfo = makeStringInfo(); + StringInfo arrayStringInfo = makeStringInfo(); appendStringInfo(arrayStringInfo, "%s", arrayString); return arrayStringInfo; @@ -4108,7 +3978,6 @@ UpdateRangeTableAlias(List *rangeTableList, List *fragmentList) static Alias * FragmentAlias(RangeTblEntry *rangeTableEntry, RangeTableFragment *fragment) { - Alias *alias = NULL; char *aliasName = NULL; char *schemaName = NULL; char *fragmentName = NULL; @@ -4163,7 +4032,7 @@ FragmentAlias(RangeTblEntry *rangeTableEntry, RangeTableFragment *fragment) * We need to set the aliasname to relation name, as pg_get_query_def() uses * the relation name to disambiguate column names from different tables. */ - alias = rangeTableEntry->alias; + Alias *alias = rangeTableEntry->alias; if (alias == NULL) { alias = makeNode(Alias); @@ -4194,12 +4063,10 @@ AnchorShardId(List *fragmentList, uint32 anchorRangeTableId) RangeTableFragment *fragment = (RangeTableFragment *) lfirst(fragmentCell); if (fragment->rangeTableId == anchorRangeTableId) { - ShardInterval *shardInterval = NULL; - Assert(fragment->fragmentType == CITUS_RTE_RELATION); Assert(CitusIsA(fragment->fragmentReference, ShardInterval)); - shardInterval = (ShardInterval *) fragment->fragmentReference; + ShardInterval *shardInterval = (ShardInterval *) fragment->fragmentReference; anchorShardId = shardInterval->shardId; break; } @@ -4237,11 +4104,10 @@ PruneSqlTaskDependencies(List *sqlTaskList) */ if (dataFetchTask->taskType == MERGE_FETCH_TASK) { - Task *mergeTaskReference = NULL; List *mergeFetchDependencyList = dataFetchTask->dependedTaskList; Assert(list_length(mergeFetchDependencyList) == 1); - mergeTaskReference = (Task *) linitial(mergeFetchDependencyList); + Task *mergeTaskReference = (Task *) linitial(mergeFetchDependencyList); prunedDependedTaskList = lappend(prunedDependedTaskList, mergeTaskReference); @@ -4298,7 +4164,6 @@ MapTaskList(MapMergeJob *mapMergeJob, List *filterTaskList) Task *filterTask = (Task *) lfirst(filterTaskCell); uint64 jobId = filterTask->jobId; uint32 taskId = filterTask->taskId; - Task *mapTask = NULL; /* wrap repartition query string around filter query string */ StringInfo mapQueryString = makeStringInfo(); @@ -4349,7 +4214,7 @@ MapTaskList(MapMergeJob *mapMergeJob, List *filterTaskList) } /* convert filter query task into map task */ - mapTask = filterTask; + Task *mapTask = filterTask; mapTask->queryString = mapQueryString->data; mapTask->taskType = MAP_TASK; @@ -4373,9 +4238,8 @@ GenerateSyntheticShardIntervalArray(int partitionCount) ShardInterval **shardIntervalArray = palloc0(partitionCount * sizeof(ShardInterval *)); uint64 hashTokenIncrement = HASH_TOKEN_COUNT / partitionCount; - int shardIndex = 0; - for (shardIndex = 0; shardIndex < partitionCount; ++shardIndex) + for (int shardIndex = 0; shardIndex < partitionCount; ++shardIndex) { ShardInterval *shardInterval = CitusMakeNode(ShardInterval); @@ -4477,14 +4341,9 @@ ColumnName(Var *column, List *rangeTableList) static StringInfo SplitPointArrayString(ArrayType *splitPointObject, Oid columnType, int32 columnTypeMod) { - StringInfo splitPointArrayString = NULL; Datum splitPointDatum = PointerGetDatum(splitPointObject); Oid outputFunctionId = InvalidOid; bool typeVariableLength = false; - FmgrInfo *arrayOutFunction = NULL; - char *arrayOutputText = NULL; - char *arrayOutputEscapedText = NULL; - char *arrayOutTypeName = NULL; Oid arrayOutType = get_array_type(columnType); if (arrayOutType == InvalidOid) @@ -4494,17 +4353,17 @@ SplitPointArrayString(ArrayType *splitPointObject, Oid columnType, int32 columnT columnTypeName))); } - arrayOutFunction = (FmgrInfo *) palloc0(sizeof(FmgrInfo)); + FmgrInfo *arrayOutFunction = (FmgrInfo *) palloc0(sizeof(FmgrInfo)); getTypeOutputInfo(arrayOutType, &outputFunctionId, &typeVariableLength); fmgr_info(outputFunctionId, arrayOutFunction); - arrayOutputText = OutputFunctionCall(arrayOutFunction, splitPointDatum); - arrayOutputEscapedText = quote_literal_cstr(arrayOutputText); + char *arrayOutputText = OutputFunctionCall(arrayOutFunction, splitPointDatum); + char *arrayOutputEscapedText = quote_literal_cstr(arrayOutputText); /* add an explicit cast to array's string representation */ - arrayOutTypeName = format_type_with_typemod(arrayOutType, columnTypeMod); + char *arrayOutTypeName = format_type_with_typemod(arrayOutType, columnTypeMod); - splitPointArrayString = makeStringInfo(); + StringInfo splitPointArrayString = makeStringInfo(); appendStringInfo(splitPointArrayString, "%s::%s", arrayOutputEscapedText, arrayOutTypeName); @@ -4523,8 +4382,6 @@ MergeTaskList(MapMergeJob *mapMergeJob, List *mapTaskList, uint32 taskIdIndex) List *mergeTaskList = NIL; uint64 jobId = mapMergeJob->job.jobId; uint32 partitionCount = mapMergeJob->partitionCount; - uint32 partitionId = 0; - uint32 initialPartitionId = 0; /* build column name and column type arrays (table schema) */ Query *filterQuery = mapMergeJob->job.jobQuery; @@ -4543,7 +4400,7 @@ MergeTaskList(MapMergeJob *mapMergeJob, List *mapTaskList, uint32 taskIdIndex) * range re-partitioned OUTER joins, we will need these rows for the * relation whose rows are retained in the OUTER join. */ - initialPartitionId = 0; + uint32 initialPartitionId = 0; if (mapMergeJob->partitionType == RANGE_PARTITION_TYPE) { initialPartitionId = 1; @@ -4555,7 +4412,8 @@ MergeTaskList(MapMergeJob *mapMergeJob, List *mapTaskList, uint32 taskIdIndex) } /* build merge tasks and their associated "map output fetch" tasks */ - for (partitionId = initialPartitionId; partitionId < partitionCount; partitionId++) + for (uint32 partitionId = initialPartitionId; partitionId < partitionCount; + partitionId++) { Task *mergeTask = NULL; List *mapOutputFetchTaskList = NIL; @@ -4652,7 +4510,6 @@ MergeTaskList(MapMergeJob *mapMergeJob, List *mapTaskList, uint32 taskIdIndex) static StringInfo ColumnNameArrayString(uint32 columnCount, uint64 generatingJobId) { - StringInfo columnNameArrayString = NULL; Datum *columnNameArray = palloc0(columnCount * sizeof(Datum)); uint32 columnNameIndex = 0; @@ -4670,7 +4527,8 @@ ColumnNameArrayString(uint32 columnCount, uint64 generatingJobId) columnNameIndex++; } - columnNameArrayString = DatumArrayString(columnNameArray, columnCount, CSTRINGOID); + StringInfo columnNameArrayString = DatumArrayString(columnNameArray, columnCount, + CSTRINGOID); return columnNameArrayString; } @@ -4683,7 +4541,6 @@ ColumnNameArrayString(uint32 columnCount, uint64 generatingJobId) static StringInfo ColumnTypeArrayString(List *targetEntryList) { - StringInfo columnTypeArrayString = NULL; ListCell *targetEntryCell = NULL; uint32 columnCount = (uint32) list_length(targetEntryList); @@ -4704,7 +4561,8 @@ ColumnTypeArrayString(List *targetEntryList) columnTypeIndex++; } - columnTypeArrayString = DatumArrayString(columnTypeArray, columnCount, CSTRINGOID); + StringInfo columnTypeArrayString = DatumArrayString(columnTypeArray, columnCount, + CSTRINGOID); return columnTypeArrayString; } @@ -4721,13 +4579,10 @@ static List * AssignTaskList(List *sqlTaskList) { List *assignedSqlTaskList = NIL; - Task *firstSqlTask = NULL; bool hasAnchorShardId = false; - bool hasMergeTaskDependencies = false; ListCell *sqlTaskCell = NULL; List *primarySqlTaskList = NIL; ListCell *primarySqlTaskCell = NULL; - List *constrainedSqlTaskList = NIL; ListCell *constrainedSqlTaskCell = NULL; /* no tasks to assign */ @@ -4736,7 +4591,7 @@ AssignTaskList(List *sqlTaskList) return NIL; } - firstSqlTask = (Task *) linitial(sqlTaskList); + Task *firstSqlTask = (Task *) linitial(sqlTaskList); if (firstSqlTask->anchorShardId != INVALID_SHARD_ID) { hasAnchorShardId = true; @@ -4747,7 +4602,7 @@ AssignTaskList(List *sqlTaskList) * one independently of the other. We therefore go ahead and assign these * SQL tasks using the "anchor shard based" assignment algorithms. */ - hasMergeTaskDependencies = HasMergeTaskDependencies(sqlTaskList); + bool hasMergeTaskDependencies = HasMergeTaskDependencies(sqlTaskList); if (!hasMergeTaskDependencies) { Assert(hasAnchorShardId); @@ -4812,7 +4667,7 @@ AssignTaskList(List *sqlTaskList) * primary's task assignment. We propagate the primary's task assignment in * each set to the remaining (constrained) tasks. */ - constrainedSqlTaskList = TaskListDifference(sqlTaskList, primarySqlTaskList); + List *constrainedSqlTaskList = TaskListDifference(sqlTaskList, primarySqlTaskList); foreach(constrainedSqlTaskCell, constrainedSqlTaskList) { @@ -5024,7 +4879,6 @@ static List * GreedyAssignTaskList(List *taskList) { List *assignedTaskList = NIL; - List *activeShardPlacementLists = NIL; uint32 assignedTaskCount = 0; uint32 taskCount = list_length(taskList); @@ -5039,7 +4893,7 @@ GreedyAssignTaskList(List *taskList) * their insertion time, and append them to a new list. */ taskList = SortList(taskList, CompareTasksByShardId); - activeShardPlacementLists = ActiveShardPlacementLists(taskList); + List *activeShardPlacementLists = ActiveShardPlacementLists(taskList); while (assignedTaskCount < taskCount) { @@ -5105,8 +4959,6 @@ GreedyAssignTask(WorkerNode *workerNode, List *taskList, List *activeShardPlacem { Task *task = (Task *) lfirst(taskCell); List *placementList = (List *) lfirst(placementListCell); - ShardPlacement *placement = NULL; - uint32 placementCount = 0; /* check if we already assigned this task */ if (task == NULL) @@ -5115,13 +4967,14 @@ GreedyAssignTask(WorkerNode *workerNode, List *taskList, List *activeShardPlacem } /* check if we have enough replicas */ - placementCount = list_length(placementList); + uint32 placementCount = list_length(placementList); if (placementCount <= replicaIndex) { continue; } - placement = (ShardPlacement *) list_nth(placementList, replicaIndex); + ShardPlacement *placement = (ShardPlacement *) list_nth(placementList, + replicaIndex); if ((strncmp(placement->nodeName, workerName, WORKER_LENGTH) == 0) && (placement->nodePort == workerPort)) { @@ -5233,7 +5086,6 @@ static List * ReorderAndAssignTaskList(List *taskList, List * (*reorderFunction)(Task *, List *)) { List *assignedTaskList = NIL; - List *activeShardPlacementLists = NIL; ListCell *taskCell = NULL; ListCell *placementListCell = NULL; uint32 unAssignedTaskCount = 0; @@ -5244,7 +5096,7 @@ ReorderAndAssignTaskList(List *taskList, List * (*reorderFunction)(Task *, List * these lists just to make our policy more deterministic. */ taskList = SortList(taskList, CompareTasksByShardId); - activeShardPlacementLists = ActiveShardPlacementLists(taskList); + List *activeShardPlacementLists = ActiveShardPlacementLists(taskList); forboth(taskCell, taskList, placementListCell, activeShardPlacementLists) { @@ -5255,15 +5107,14 @@ ReorderAndAssignTaskList(List *taskList, List * (*reorderFunction)(Task *, List uint32 activePlacementCount = list_length(placementList); if (activePlacementCount > 0) { - ShardPlacement *primaryPlacement = NULL; - if (reorderFunction != NULL) { placementList = reorderFunction(task, placementList); } task->taskPlacementList = placementList; - primaryPlacement = (ShardPlacement *) linitial(task->taskPlacementList); + ShardPlacement *primaryPlacement = (ShardPlacement *) linitial( + task->taskPlacementList); ereport(DEBUG3, (errmsg("assigned task %u to node %s:%u", task->taskId, primaryPlacement->nodeName, primaryPlacement->nodePort))); @@ -5395,10 +5246,9 @@ ActivePlacementList(List *placementList) foreach(placementCell, placementList) { ShardPlacement *placement = (ShardPlacement *) lfirst(placementCell); - WorkerNode *workerNode = NULL; /* check if the worker node for this shard placement is active */ - workerNode = FindWorkerNode(placement->nodeName, placement->nodePort); + WorkerNode *workerNode = FindWorkerNode(placement->nodeName, placement->nodePort); if (workerNode != NULL && workerNode->isActive) { activePlacementList = lappend(activePlacementList, placement); @@ -5420,8 +5270,7 @@ LeftRotateList(List *list, uint32 rotateCount) { List *rotatedList = list_copy(list); - uint32 rotateIndex = 0; - for (rotateIndex = 0; rotateIndex < rotateCount; rotateIndex++) + for (uint32 rotateIndex = 0; rotateIndex < rotateCount; rotateIndex++) { void *firstElement = linitial(rotatedList); @@ -5489,10 +5338,9 @@ AssignDualHashTaskList(List *taskList) { Task *task = (Task *) lfirst(taskCell); List *taskPlacementList = NIL; - ShardPlacement *primaryPlacement = NULL; - uint32 replicaIndex = 0; - for (replicaIndex = 0; replicaIndex < ShardReplicationFactor; replicaIndex++) + for (uint32 replicaIndex = 0; replicaIndex < ShardReplicationFactor; + replicaIndex++) { uint32 assignmentOffset = beginningNodeIndex + assignedTaskIndex + replicaIndex; @@ -5509,7 +5357,8 @@ AssignDualHashTaskList(List *taskList) task->taskPlacementList = taskPlacementList; - primaryPlacement = (ShardPlacement *) linitial(task->taskPlacementList); + ShardPlacement *primaryPlacement = (ShardPlacement *) linitial( + task->taskPlacementList); ereport(DEBUG3, (errmsg("assigned task %u to node %s:%u", task->taskId, primaryPlacement->nodeName, primaryPlacement->nodePort))); @@ -5606,12 +5455,11 @@ MergeTableQueryString(uint32 taskIdIndex, List *targetEntryList) StringInfo mergeTableName = makeStringInfo(); StringInfo columnsString = makeStringInfo(); ListCell *targetEntryCell = NULL; - uint32 columnCount = 0; uint32 columnIndex = 0; appendStringInfo(mergeTableName, "%s%s", taskTableName->data, MERGE_TABLE_SUFFIX); - columnCount = (uint32) list_length(targetEntryList); + uint32 columnCount = (uint32) list_length(targetEntryList); foreach(targetEntryCell, targetEntryList) { @@ -5619,14 +5467,12 @@ MergeTableQueryString(uint32 taskIdIndex, List *targetEntryList) Node *columnExpression = (Node *) targetEntry->expr; Oid columnTypeId = exprType(columnExpression); int32 columnTypeMod = exprTypmod(columnExpression); - char *columnName = NULL; - char *columnType = NULL; StringInfo columnNameString = makeStringInfo(); appendStringInfo(columnNameString, MERGE_COLUMN_FORMAT, columnIndex); - columnName = columnNameString->data; - columnType = format_type_with_typemod(columnTypeId, columnTypeMod); + char *columnName = columnNameString->data; + char *columnType = format_type_with_typemod(columnTypeId, columnTypeMod); appendStringInfo(columnsString, "%s %s", columnName, columnType); @@ -5657,16 +5503,11 @@ IntermediateTableQueryString(uint64 jobId, uint32 taskIdIndex, Query *reduceQuer StringInfo columnsString = makeStringInfo(); StringInfo taskReduceQueryString = makeStringInfo(); Query *taskReduceQuery = copyObject(reduceQuery); - RangeTblEntry *rangeTableEntry = NULL; - Alias *referenceNames = NULL; - List *columnNames = NIL; - List *rangeTableList = NIL; ListCell *columnNameCell = NULL; - uint32 columnCount = 0; uint32 columnIndex = 0; - columnCount = FinalTargetEntryCount(reduceQuery->targetList); - columnNames = DerivedColumnNameList(columnCount, jobId); + uint32 columnCount = FinalTargetEntryCount(reduceQuery->targetList); + List *columnNames = DerivedColumnNameList(columnCount, jobId); foreach(columnNameCell, columnNames) { @@ -5684,9 +5525,9 @@ IntermediateTableQueryString(uint64 jobId, uint32 taskIdIndex, Query *reduceQuer appendStringInfo(mergeTableName, "%s%s", taskTableName->data, MERGE_TABLE_SUFFIX); - rangeTableList = taskReduceQuery->rtable; - rangeTableEntry = (RangeTblEntry *) linitial(rangeTableList); - referenceNames = rangeTableEntry->eref; + List *rangeTableList = taskReduceQuery->rtable; + RangeTblEntry *rangeTableEntry = (RangeTblEntry *) linitial(rangeTableList); + Alias *referenceNames = rangeTableEntry->eref; referenceNames->aliasname = mergeTableName->data; rangeTableEntry->alias = rangeTableEntry->eref; diff --git a/src/backend/distributed/planner/multi_router_planner.c b/src/backend/distributed/planner/multi_router_planner.c index abdb3dd58..bc7ce1311 100644 --- a/src/backend/distributed/planner/multi_router_planner.c +++ b/src/backend/distributed/planner/multi_router_planner.c @@ -260,13 +260,11 @@ CreateSingleTaskRouterPlan(DistributedPlan *distributedPlan, Query *originalQuer Query *query, PlannerRestrictionContext *plannerRestrictionContext) { - Job *job = NULL; - distributedPlan->modLevel = RowModifyLevelForQuery(query); /* we cannot have multi shard update/delete query via this code path */ - job = RouterJob(originalQuery, plannerRestrictionContext, - &distributedPlan->planningError); + Job *job = RouterJob(originalQuery, plannerRestrictionContext, + &distributedPlan->planningError); if (distributedPlan->planningError != NULL) { @@ -302,7 +300,6 @@ ShardIntervalOpExpressions(ShardInterval *shardInterval, Index rteIndex) Oid relationId = shardInterval->relationId; char partitionMethod = PartitionMethod(shardInterval->relationId); Var *partitionColumn = NULL; - Node *baseConstraint = NULL; if (partitionMethod == DISTRIBUTE_BY_HASH) { @@ -321,7 +318,7 @@ ShardIntervalOpExpressions(ShardInterval *shardInterval, Index rteIndex) } /* build the base expression for constraint */ - baseConstraint = BuildBaseConstraint(partitionColumn); + Node *baseConstraint = BuildBaseConstraint(partitionColumn); /* walk over shard list and check if shards can be pruned */ if (shardInterval->minValueExists && shardInterval->maxValueExists) @@ -349,14 +346,7 @@ AddShardIntervalRestrictionToSelect(Query *subqery, ShardInterval *shardInterval List *targetList = subqery->targetList; ListCell *targetEntryCell = NULL; Var *targetPartitionColumnVar = NULL; - Oid integer4GEoperatorId = InvalidOid; - Oid integer4LEoperatorId = InvalidOid; - TypeCacheEntry *typeEntry = NULL; - FuncExpr *hashFunctionExpr = NULL; - OpExpr *greaterThanAndEqualsBoundExpr = NULL; - OpExpr *lessThanAndEqualsBoundExpr = NULL; List *boundExpressionList = NIL; - Expr *andedBoundExpressions = NULL; /* iterate through the target entries */ foreach(targetEntryCell, targetList) @@ -374,20 +364,20 @@ AddShardIntervalRestrictionToSelect(Query *subqery, ShardInterval *shardInterval /* we should have found target partition column */ Assert(targetPartitionColumnVar != NULL); - integer4GEoperatorId = get_opfamily_member(INTEGER_BTREE_FAM_OID, INT4OID, - INT4OID, - BTGreaterEqualStrategyNumber); - integer4LEoperatorId = get_opfamily_member(INTEGER_BTREE_FAM_OID, INT4OID, - INT4OID, - BTLessEqualStrategyNumber); + Oid integer4GEoperatorId = get_opfamily_member(INTEGER_BTREE_FAM_OID, INT4OID, + INT4OID, + BTGreaterEqualStrategyNumber); + Oid integer4LEoperatorId = get_opfamily_member(INTEGER_BTREE_FAM_OID, INT4OID, + INT4OID, + BTLessEqualStrategyNumber); /* ensure that we find the correct operators */ Assert(integer4GEoperatorId != InvalidOid); Assert(integer4LEoperatorId != InvalidOid); /* look up the type cache */ - typeEntry = lookup_type_cache(targetPartitionColumnVar->vartype, - TYPECACHE_HASH_PROC_FINFO); + TypeCacheEntry *typeEntry = lookup_type_cache(targetPartitionColumnVar->vartype, + TYPECACHE_HASH_PROC_FINFO); /* probable never possible given that the tables are already hash partitioned */ if (!OidIsValid(typeEntry->hash_proc_finfo.fn_oid)) @@ -398,7 +388,7 @@ AddShardIntervalRestrictionToSelect(Query *subqery, ShardInterval *shardInterval } /* generate hashfunc(partCol) expression */ - hashFunctionExpr = makeNode(FuncExpr); + FuncExpr *hashFunctionExpr = makeNode(FuncExpr); hashFunctionExpr->funcid = CitusWorkerHashFunctionId(); hashFunctionExpr->args = list_make1(targetPartitionColumnVar); @@ -406,7 +396,7 @@ AddShardIntervalRestrictionToSelect(Query *subqery, ShardInterval *shardInterval hashFunctionExpr->funcresulttype = INT4OID; /* generate hashfunc(partCol) >= shardMinValue OpExpr */ - greaterThanAndEqualsBoundExpr = + OpExpr *greaterThanAndEqualsBoundExpr = (OpExpr *) make_opclause(integer4GEoperatorId, InvalidOid, false, (Expr *) hashFunctionExpr, @@ -421,7 +411,7 @@ AddShardIntervalRestrictionToSelect(Query *subqery, ShardInterval *shardInterval get_func_rettype(greaterThanAndEqualsBoundExpr->opfuncid); /* generate hashfunc(partCol) <= shardMinValue OpExpr */ - lessThanAndEqualsBoundExpr = + OpExpr *lessThanAndEqualsBoundExpr = (OpExpr *) make_opclause(integer4LEoperatorId, InvalidOid, false, (Expr *) hashFunctionExpr, @@ -438,7 +428,7 @@ AddShardIntervalRestrictionToSelect(Query *subqery, ShardInterval *shardInterval boundExpressionList = lappend(boundExpressionList, greaterThanAndEqualsBoundExpr); boundExpressionList = lappend(boundExpressionList, lessThanAndEqualsBoundExpr); - andedBoundExpressions = make_ands_explicit(boundExpressionList); + Expr *andedBoundExpressions = make_ands_explicit(boundExpressionList); /* finally add the quals */ if (subqery->jointree->quals == NULL) @@ -461,19 +451,15 @@ AddShardIntervalRestrictionToSelect(Query *subqery, ShardInterval *shardInterval RangeTblEntry * ExtractSelectRangeTableEntry(Query *query) { - List *fromList = NULL; - RangeTblRef *reference = NULL; - RangeTblEntry *subqueryRte = NULL; - Assert(InsertSelectIntoDistributedTable(query)); /* * Since we already asserted InsertSelectIntoDistributedTable() it is safe to access * both lists */ - fromList = query->jointree->fromlist; - reference = linitial(fromList); - subqueryRte = rt_fetch(reference->rtindex, query->rtable); + List *fromList = query->jointree->fromlist; + RangeTblRef *reference = linitial(fromList); + RangeTblEntry *subqueryRte = rt_fetch(reference->rtindex, query->rtable); return subqueryRte; } @@ -490,8 +476,6 @@ ExtractSelectRangeTableEntry(Query *query) Oid ModifyQueryResultRelationId(Query *query) { - RangeTblEntry *resultRte = NULL; - /* only modify queries have result relations */ if (!IsModifyCommand(query)) { @@ -499,7 +483,7 @@ ModifyQueryResultRelationId(Query *query) errmsg("input query is not a modification query"))); } - resultRte = ExtractResultRelationRTE(query); + RangeTblEntry *resultRte = ExtractResultRelationRTE(query); Assert(OidIsValid(resultRte->relid)); return resultRte->relid; @@ -562,7 +546,6 @@ DeferredErrorMessage * ModifyQuerySupported(Query *queryTree, Query *originalQuery, bool multiShardQuery, PlannerRestrictionContext *plannerRestrictionContext) { - DeferredErrorMessage *deferredError = NULL; Oid distributedTableId = ExtractFirstDistributedTableId(queryTree); uint32 rangeTableId = 1; Var *partitionColumn = PartitionColumn(distributedTableId, rangeTableId); @@ -571,7 +554,7 @@ ModifyQuerySupported(Query *queryTree, Query *originalQuery, bool multiShardQuer uint32 queryTableCount = 0; CmdType commandType = queryTree->commandType; - deferredError = DeferErrorIfModifyView(queryTree); + DeferredErrorMessage *deferredError = DeferErrorIfModifyView(queryTree); if (deferredError != NULL) { return deferredError; @@ -624,7 +607,6 @@ ModifyQuerySupported(Query *queryTree, Query *originalQuery, bool multiShardQuer { CommonTableExpr *cte = (CommonTableExpr *) lfirst(cteCell); Query *cteQuery = (Query *) cte->ctequery; - DeferredErrorMessage *cteError = NULL; if (cteQuery->commandType != CMD_SELECT) { @@ -649,7 +631,7 @@ ModifyQuerySupported(Query *queryTree, Query *originalQuery, bool multiShardQuer NULL, NULL); } - cteError = MultiRouterPlannableQuery(cteQuery); + DeferredErrorMessage *cteError = MultiRouterPlannableQuery(cteQuery); if (cteError) { return cteError; @@ -957,12 +939,7 @@ DeferErrorIfModifyView(Query *queryTree) DeferredErrorMessage * ErrorIfOnConflictNotSupported(Query *queryTree) { - Oid distributedTableId = InvalidOid; uint32 rangeTableId = 1; - Var *partitionColumn = NULL; - List *onConflictSet = NIL; - Node *arbiterWhere = NULL; - Node *onConflictWhere = NULL; ListCell *setTargetCell = NULL; bool specifiesPartitionValue = false; @@ -972,12 +949,12 @@ ErrorIfOnConflictNotSupported(Query *queryTree) return NULL; } - distributedTableId = ExtractFirstDistributedTableId(queryTree); - partitionColumn = PartitionColumn(distributedTableId, rangeTableId); + Oid distributedTableId = ExtractFirstDistributedTableId(queryTree); + Var *partitionColumn = PartitionColumn(distributedTableId, rangeTableId); - onConflictSet = queryTree->onConflict->onConflictSet; - arbiterWhere = queryTree->onConflict->arbiterWhere; - onConflictWhere = queryTree->onConflict->onConflictWhere; + List *onConflictSet = queryTree->onConflict->onConflictSet; + Node *arbiterWhere = queryTree->onConflict->arbiterWhere; + Node *onConflictWhere = queryTree->onConflict->onConflictWhere; /* * onConflictSet is expanded via expand_targetlist() on the standard planner. @@ -1207,11 +1184,10 @@ UpdateOrDeleteQuery(Query *query) static bool MasterIrreducibleExpression(Node *expression, bool *varArgument, bool *badCoalesce) { - bool result; WalkerState data; data.containsVar = data.varArgument = data.badCoalesce = false; - result = MasterIrreducibleExpressionWalker(expression, &data); + bool result = MasterIrreducibleExpressionWalker(expression, &data); *varArgument |= data.varArgument; *badCoalesce |= data.badCoalesce; @@ -1379,14 +1355,13 @@ TargetEntryChangesValue(TargetEntry *targetEntry, Var *column, FromExpr *joinTre List *restrictClauseList = WhereClauseList(joinTree); OpExpr *equalityExpr = MakeOpExpression(column, BTEqualStrategyNumber); Const *rightConst = (Const *) get_rightop((Expr *) equalityExpr); - bool predicateIsImplied = false; rightConst->constvalue = newValue->constvalue; rightConst->constisnull = newValue->constisnull; rightConst->constbyval = newValue->constbyval; - predicateIsImplied = predicate_implied_by(list_make1(equalityExpr), - restrictClauseList, false); + bool predicateIsImplied = predicate_implied_by(list_make1(equalityExpr), + restrictClauseList, false); if (predicateIsImplied) { /* target entry of the form SET col = WHERE col = AND ... */ @@ -1408,7 +1383,6 @@ RouterInsertJob(Query *originalQuery, Query *query, DeferredErrorMessage **plann { Oid distributedTableId = ExtractFirstDistributedTableId(query); List *taskList = NIL; - Job *job = NULL; bool requiresMasterEvaluation = false; bool deferredPruning = false; Const *partitionKeyValue = NULL; @@ -1459,7 +1433,7 @@ RouterInsertJob(Query *originalQuery, Query *query, DeferredErrorMessage **plann partitionKeyValue = ExtractInsertPartitionKeyValue(originalQuery); } - job = CreateJob(originalQuery); + Job *job = CreateJob(originalQuery); job->taskList = taskList; job->requiresMasterEvaluation = requiresMasterEvaluation; job->deferredPruning = deferredPruning; @@ -1475,9 +1449,7 @@ RouterInsertJob(Query *originalQuery, Query *query, DeferredErrorMessage **plann static Job * CreateJob(Query *query) { - Job *job = NULL; - - job = CitusMakeNode(Job); + Job *job = CitusMakeNode(Job); job->jobId = UniqueJobId(); job->jobQuery = query; job->taskList = NIL; @@ -1498,8 +1470,6 @@ static bool CanShardPrune(Oid distributedTableId, Query *query) { uint32 rangeTableId = 1; - Var *partitionColumn = NULL; - List *insertValuesList = NIL; ListCell *insertValuesCell = NULL; if (query->commandType != CMD_INSERT) @@ -1508,7 +1478,7 @@ CanShardPrune(Oid distributedTableId, Query *query) return true; } - partitionColumn = PartitionColumn(distributedTableId, rangeTableId); + Var *partitionColumn = PartitionColumn(distributedTableId, rangeTableId); if (partitionColumn == NULL) { /* can always do shard pruning for reference tables */ @@ -1516,7 +1486,7 @@ CanShardPrune(Oid distributedTableId, Query *query) } /* get full list of partition values and ensure they are all Consts */ - insertValuesList = ExtractInsertValuesList(query, partitionColumn); + List *insertValuesList = ExtractInsertValuesList(query, partitionColumn); foreach(insertValuesCell, insertValuesList) { InsertValues *insertValues = (InsertValues *) lfirst(insertValuesCell); @@ -1561,7 +1531,6 @@ List * RouterInsertTaskList(Query *query, DeferredErrorMessage **planningError) { List *insertTaskList = NIL; - List *modifyRouteList = NIL; ListCell *modifyRouteCell = NULL; Oid distributedTableId = ExtractFirstDistributedTableId(query); @@ -1571,7 +1540,7 @@ RouterInsertTaskList(Query *query, DeferredErrorMessage **planningError) Assert(query->commandType == CMD_INSERT); - modifyRouteList = BuildRoutesForInsert(query, planningError); + List *modifyRouteList = BuildRoutesForInsert(query, planningError); if (*planningError != NULL) { return NIL; @@ -1599,9 +1568,7 @@ RouterInsertTaskList(Query *query, DeferredErrorMessage **planningError) static Task * CreateTask(TaskType taskType) { - Task *task = NULL; - - task = CitusMakeNode(Task); + Task *task = CitusMakeNode(Task); task->taskType = taskType; task->jobId = INVALID_JOB_ID; task->taskId = INVALID_TASK_ID; @@ -1666,22 +1633,18 @@ static Job * RouterJob(Query *originalQuery, PlannerRestrictionContext *plannerRestrictionContext, DeferredErrorMessage **planningError) { - Job *job = NULL; uint64 shardId = INVALID_SHARD_ID; List *placementList = NIL; List *relationShardList = NIL; List *prunedShardIntervalListList = NIL; - bool replacePrunedQueryWithDummy = false; - bool requiresMasterEvaluation = false; - RangeTblEntry *updateOrDeleteRTE = NULL; bool isMultiShardModifyQuery = false; Const *partitionKeyValue = NULL; /* router planner should create task even if it doesn't hit a shard at all */ - replacePrunedQueryWithDummy = true; + bool replacePrunedQueryWithDummy = true; /* check if this query requires master evaluation */ - requiresMasterEvaluation = RequiresMasterEvaluation(originalQuery); + bool requiresMasterEvaluation = RequiresMasterEvaluation(originalQuery); (*planningError) = PlanRouterQuery(originalQuery, plannerRestrictionContext, &placementList, &shardId, &relationShardList, @@ -1694,10 +1657,10 @@ RouterJob(Query *originalQuery, PlannerRestrictionContext *plannerRestrictionCon return NULL; } - job = CreateJob(originalQuery); + Job *job = CreateJob(originalQuery); job->partitionKeyValue = partitionKeyValue; - updateOrDeleteRTE = GetUpdateOrDeleteRTE(originalQuery); + RangeTblEntry *updateOrDeleteRTE = GetUpdateOrDeleteRTE(originalQuery); /* * If all of the shards are pruned, we replace the relation RTE into @@ -1770,16 +1733,12 @@ ReorderTaskPlacementsByTaskAssignmentPolicy(Job *job, { if (taskAssignmentPolicy == TASK_ASSIGNMENT_ROUND_ROBIN) { - Task *task = NULL; - List *reorderedPlacementList = NIL; - ShardPlacement *primaryPlacement = NULL; - /* * We hit a single shard on router plans, and there should be only * one task in the task list */ Assert(list_length(job->taskList) == 1); - task = (Task *) linitial(job->taskList); + Task *task = (Task *) linitial(job->taskList); /* * For round-robin SELECT queries, we don't want to include the coordinator @@ -1796,10 +1755,11 @@ ReorderTaskPlacementsByTaskAssignmentPolicy(Job *job, placementList = RemoveCoordinatorPlacement(placementList); /* reorder the placement list */ - reorderedPlacementList = RoundRobinReorder(task, placementList); + List *reorderedPlacementList = RoundRobinReorder(task, placementList); task->taskPlacementList = reorderedPlacementList; - primaryPlacement = (ShardPlacement *) linitial(reorderedPlacementList); + ShardPlacement *primaryPlacement = (ShardPlacement *) linitial( + reorderedPlacementList); ereport(DEBUG3, (errmsg("assigned task %u to node %s:%u", task->taskId, primaryPlacement->nodeName, primaryPlacement->nodePort))); @@ -1916,16 +1876,14 @@ SingleShardModifyTaskList(Query *query, uint64 jobId, List *relationShardList, { Task *task = CreateTask(MODIFY_TASK); StringInfo queryString = makeStringInfo(); - DistTableCacheEntry *modificationTableCacheEntry = NULL; - char modificationPartitionMethod = 0; List *rangeTableList = NIL; - RangeTblEntry *updateOrDeleteRTE = NULL; ExtractRangeTableEntryWalker((Node *) query, &rangeTableList); - updateOrDeleteRTE = GetUpdateOrDeleteRTE(query); + RangeTblEntry *updateOrDeleteRTE = GetUpdateOrDeleteRTE(query); - modificationTableCacheEntry = DistributedTableCacheEntry(updateOrDeleteRTE->relid); - modificationPartitionMethod = modificationTableCacheEntry->partitionMethod; + DistTableCacheEntry *modificationTableCacheEntry = DistributedTableCacheEntry( + updateOrDeleteRTE->relid); + char modificationPartitionMethod = modificationTableCacheEntry->partitionMethod; if (modificationPartitionMethod == DISTRIBUTE_BY_NONE && SelectsFromDistributedTable(rangeTableList, query)) @@ -1983,14 +1941,14 @@ SelectsFromDistributedTable(List *rangeTableList, Query *query) foreach(rangeTableCell, rangeTableList) { RangeTblEntry *rangeTableEntry = (RangeTblEntry *) lfirst(rangeTableCell); - DistTableCacheEntry *cacheEntry = NULL; if (rangeTableEntry->relid == InvalidOid) { continue; } - cacheEntry = DistributedTableCacheEntry(rangeTableEntry->relid); + DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry( + rangeTableEntry->relid); if (cacheEntry->partitionMethod != DISTRIBUTE_BY_NONE && (resultRangeTableEntry == NULL || resultRangeTableEntry->relid != rangeTableEntry->relid)) @@ -2242,7 +2200,6 @@ GetAnchorShardId(List *prunedShardIntervalListList) foreach(prunedShardIntervalListCell, prunedShardIntervalListList) { List *prunedShardIntervalList = (List *) lfirst(prunedShardIntervalListCell); - ShardInterval *shardInterval = NULL; /* no shard is present or all shards are pruned out case will be handled later */ if (prunedShardIntervalList == NIL) @@ -2250,7 +2207,7 @@ GetAnchorShardId(List *prunedShardIntervalListList) continue; } - shardInterval = linitial(prunedShardIntervalList); + ShardInterval *shardInterval = linitial(prunedShardIntervalList); if (ReferenceTableShardId(shardInterval->shardId)) { @@ -2341,7 +2298,6 @@ TargetShardIntervalsForRestrictInfo(RelationRestrictionContext *restrictionConte List *prunedShardIntervalList = NIL; List *joinInfoList = relationRestriction->relOptInfo->joininfo; List *pseudoRestrictionList = extract_actual_clauses(joinInfoList, true); - bool whereFalseQuery = false; relationRestriction->prunedShardIntervalList = NIL; @@ -2351,7 +2307,7 @@ TargetShardIntervalsForRestrictInfo(RelationRestrictionContext *restrictionConte * inside relOptInfo->joininfo list. We treat such cases as if all * shards of the table are pruned out. */ - whereFalseQuery = ContainsFalseClause(pseudoRestrictionList); + bool whereFalseQuery = ContainsFalseClause(pseudoRestrictionList); if (!whereFalseQuery && shardCount > 0) { Const *restrictionPartitionValueConst = NULL; @@ -2445,9 +2401,6 @@ WorkersContainingAllShards(List *prunedShardIntervalsList) foreach(prunedShardIntervalCell, prunedShardIntervalsList) { List *shardIntervalList = (List *) lfirst(prunedShardIntervalCell); - ShardInterval *shardInterval = NULL; - uint64 shardId = INVALID_SHARD_ID; - List *newPlacementList = NIL; if (shardIntervalList == NIL) { @@ -2456,11 +2409,11 @@ WorkersContainingAllShards(List *prunedShardIntervalsList) Assert(list_length(shardIntervalList) == 1); - shardInterval = (ShardInterval *) linitial(shardIntervalList); - shardId = shardInterval->shardId; + ShardInterval *shardInterval = (ShardInterval *) linitial(shardIntervalList); + uint64 shardId = shardInterval->shardId; /* retrieve all active shard placements for this shard */ - newPlacementList = FinalizedShardPlacementList(shardId); + List *newPlacementList = FinalizedShardPlacementList(shardId); if (firstShard) { @@ -2506,8 +2459,6 @@ BuildRoutesForInsert(Query *query, DeferredErrorMessage **planningError) DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(distributedTableId); char partitionMethod = cacheEntry->partitionMethod; uint32 rangeTableId = 1; - Var *partitionColumn = NULL; - List *insertValuesList = NIL; List *modifyRouteList = NIL; ListCell *insertValuesCell = NULL; @@ -2516,24 +2467,20 @@ BuildRoutesForInsert(Query *query, DeferredErrorMessage **planningError) /* reference tables can only have one shard */ if (partitionMethod == DISTRIBUTE_BY_NONE) { - int shardCount = 0; List *shardIntervalList = LoadShardIntervalList(distributedTableId); - RangeTblEntry *valuesRTE = NULL; - ShardInterval *shardInterval = NULL; - ModifyRoute *modifyRoute = NULL; - shardCount = list_length(shardIntervalList); + int shardCount = list_length(shardIntervalList); if (shardCount != 1) { ereport(ERROR, (errmsg("reference table cannot have %d shards", shardCount))); } - shardInterval = linitial(shardIntervalList); - modifyRoute = palloc(sizeof(ModifyRoute)); + ShardInterval *shardInterval = linitial(shardIntervalList); + ModifyRoute *modifyRoute = palloc(sizeof(ModifyRoute)); modifyRoute->shardId = shardInterval->shardId; - valuesRTE = ExtractDistributedInsertValuesRTE(query); + RangeTblEntry *valuesRTE = ExtractDistributedInsertValuesRTE(query); if (valuesRTE != NULL) { /* add the values list for a multi-row INSERT */ @@ -2549,18 +2496,15 @@ BuildRoutesForInsert(Query *query, DeferredErrorMessage **planningError) return modifyRouteList; } - partitionColumn = PartitionColumn(distributedTableId, rangeTableId); + Var *partitionColumn = PartitionColumn(distributedTableId, rangeTableId); /* get full list of insert values and iterate over them to prune */ - insertValuesList = ExtractInsertValuesList(query, partitionColumn); + List *insertValuesList = ExtractInsertValuesList(query, partitionColumn); foreach(insertValuesCell, insertValuesList) { InsertValues *insertValues = (InsertValues *) lfirst(insertValuesCell); - Const *partitionValueConst = NULL; List *prunedShardIntervalList = NIL; - int prunedShardIntervalCount = 0; - ShardInterval *targetShard = NULL; if (!IsA(insertValues->partitionValueExpr, Const)) { @@ -2568,7 +2512,7 @@ BuildRoutesForInsert(Query *query, DeferredErrorMessage **planningError) return NIL; } - partitionValueConst = (Const *) insertValues->partitionValueExpr; + Const *partitionValueConst = (Const *) insertValues->partitionValueExpr; if (partitionValueConst->constisnull) { ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), @@ -2580,10 +2524,9 @@ BuildRoutesForInsert(Query *query, DeferredErrorMessage **planningError) DISTRIBUTE_BY_RANGE) { Datum partitionValue = partitionValueConst->constvalue; - ShardInterval *shardInterval = NULL; cacheEntry = DistributedTableCacheEntry(distributedTableId); - shardInterval = FindShardInterval(partitionValue, cacheEntry); + ShardInterval *shardInterval = FindShardInterval(partitionValue, cacheEntry); if (shardInterval != NULL) { prunedShardIntervalList = list_make1(shardInterval); @@ -2591,7 +2534,6 @@ BuildRoutesForInsert(Query *query, DeferredErrorMessage **planningError) } else { - List *restrictClauseList = NIL; Index tableId = 1; OpExpr *equalityExpr = MakeOpExpression(partitionColumn, BTEqualStrategyNumber); @@ -2604,13 +2546,13 @@ BuildRoutesForInsert(Query *query, DeferredErrorMessage **planningError) rightConst->constisnull = partitionValueConst->constisnull; rightConst->constbyval = partitionValueConst->constbyval; - restrictClauseList = list_make1(equalityExpr); + List *restrictClauseList = list_make1(equalityExpr); prunedShardIntervalList = PruneShards(distributedTableId, tableId, restrictClauseList, NULL); } - prunedShardIntervalCount = list_length(prunedShardIntervalList); + int prunedShardIntervalCount = list_length(prunedShardIntervalList); if (prunedShardIntervalCount != 1) { char *partitionKeyString = cacheEntry->partitionKeyString; @@ -2651,7 +2593,7 @@ BuildRoutesForInsert(Query *query, DeferredErrorMessage **planningError) return NIL; } - targetShard = (ShardInterval *) linitial(prunedShardIntervalList); + ShardInterval *targetShard = (ShardInterval *) linitial(prunedShardIntervalList); insertValues->shardId = targetShard->shardId; } @@ -2768,19 +2710,15 @@ NormalizeMultiRowInsertTargetList(Query *query) { TargetEntry *targetEntry = lfirst(targetEntryCell); Node *targetExprNode = (Node *) targetEntry->expr; - Oid targetType = InvalidOid; - int32 targetTypmod = -1; - Oid targetColl = InvalidOid; - Var *syntheticVar = NULL; /* RTE_VALUES comes 2nd, after destination table */ Index valuesVarno = 2; targetEntryNo++; - targetType = exprType(targetExprNode); - targetTypmod = exprTypmod(targetExprNode); - targetColl = exprCollation(targetExprNode); + Oid targetType = exprType(targetExprNode); + int32 targetTypmod = exprTypmod(targetExprNode); + Oid targetColl = exprCollation(targetExprNode); valuesRTE->coltypes = lappend_oid(valuesRTE->coltypes, targetType); valuesRTE->coltypmods = lappend_int(valuesRTE->coltypmods, targetTypmod); @@ -2794,8 +2732,8 @@ NormalizeMultiRowInsertTargetList(Query *query) } /* replace the original expression with a Var referencing values_lists */ - syntheticVar = makeVar(valuesVarno, targetEntryNo, targetType, targetTypmod, - targetColl, 0); + Var *syntheticVar = makeVar(valuesVarno, targetEntryNo, targetType, targetTypmod, + targetColl, 0); targetEntry->expr = (Expr *) syntheticVar; } } @@ -2935,11 +2873,10 @@ ExtractInsertValuesList(Query *query, Var *partitionColumn) if (IsA(targetEntry->expr, Var)) { Var *partitionVar = (Var *) targetEntry->expr; - RangeTblEntry *referencedRTE = NULL; ListCell *valuesListCell = NULL; Index ivIndex = 0; - referencedRTE = rt_fetch(partitionVar->varno, query->rtable); + RangeTblEntry *referencedRTE = rt_fetch(partitionVar->varno, query->rtable); foreach(valuesListCell, referencedRTE->values_lists) { InsertValues *insertValues = (InsertValues *) palloc(sizeof(InsertValues)); @@ -2980,10 +2917,7 @@ ExtractInsertPartitionKeyValue(Query *query) { Oid distributedTableId = ExtractFirstDistributedTableId(query); uint32 rangeTableId = 1; - Var *partitionColumn = NULL; - TargetEntry *targetEntry = NULL; Const *singlePartitionValueConst = NULL; - Node *targetExpression = NULL; char partitionMethod = PartitionMethod(distributedTableId); if (partitionMethod == DISTRIBUTE_BY_NONE) @@ -2991,15 +2925,16 @@ ExtractInsertPartitionKeyValue(Query *query) return NULL; } - partitionColumn = PartitionColumn(distributedTableId, rangeTableId); - targetEntry = get_tle_by_resno(query->targetList, partitionColumn->varattno); + Var *partitionColumn = PartitionColumn(distributedTableId, rangeTableId); + TargetEntry *targetEntry = get_tle_by_resno(query->targetList, + partitionColumn->varattno); if (targetEntry == NULL) { /* partition column value not specified */ return NULL; } - targetExpression = strip_implicit_coercions((Node *) targetEntry->expr); + Node *targetExpression = strip_implicit_coercions((Node *) targetEntry->expr); /* * Multi-row INSERTs have a Var in the target list that points to @@ -3008,10 +2943,9 @@ ExtractInsertPartitionKeyValue(Query *query) if (IsA(targetExpression, Var)) { Var *partitionVar = (Var *) targetExpression; - RangeTblEntry *referencedRTE = NULL; ListCell *valuesListCell = NULL; - referencedRTE = rt_fetch(partitionVar->varno, query->rtable); + RangeTblEntry *referencedRTE = rt_fetch(partitionVar->varno, query->rtable); foreach(valuesListCell, referencedRTE->values_lists) { @@ -3019,7 +2953,6 @@ ExtractInsertPartitionKeyValue(Query *query) Node *partitionValueNode = list_nth(rowValues, partitionVar->varattno - 1); Expr *partitionValueExpr = (Expr *) strip_implicit_coercions( partitionValueNode); - Const *partitionValueConst = NULL; if (!IsA(partitionValueExpr, Const)) { @@ -3028,7 +2961,7 @@ ExtractInsertPartitionKeyValue(Query *query) break; } - partitionValueConst = (Const *) partitionValueExpr; + Const *partitionValueConst = (Const *) partitionValueExpr; if (singlePartitionValueConst == NULL) { @@ -3098,7 +3031,6 @@ MultiRouterPlannableQuery(Query *query) { /* only hash partitioned tables are supported */ Oid distributedTableId = rte->relid; - char partitionMethod = 0; if (!IsDistributedTable(distributedTableId)) { @@ -3109,7 +3041,7 @@ MultiRouterPlannableQuery(Query *query) NULL, NULL); } - partitionMethod = PartitionMethod(distributedTableId); + char partitionMethod = PartitionMethod(distributedTableId); if (!(partitionMethod == DISTRIBUTE_BY_HASH || partitionMethod == DISTRIBUTE_BY_NONE || partitionMethod == DISTRIBUTE_BY_RANGE)) { diff --git a/src/backend/distributed/planner/postgres_planning_functions.c b/src/backend/distributed/planner/postgres_planning_functions.c index c867566c8..b45de35f3 100644 --- a/src/backend/distributed/planner/postgres_planning_functions.c +++ b/src/backend/distributed/planner/postgres_planning_functions.c @@ -43,9 +43,6 @@ make_unique_from_sortclauses(Plan *lefttree, List *distinctList) Plan *plan = &node->plan; int numCols = list_length(distinctList); int keyno = 0; - AttrNumber *uniqColIdx; - Oid *uniqOperators; - Oid *uniqCollations; ListCell *slitem; plan->targetlist = lefttree->targetlist; @@ -58,9 +55,9 @@ make_unique_from_sortclauses(Plan *lefttree, List *distinctList) * operators, as wanted by executor */ Assert(numCols > 0); - uniqColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numCols); - uniqOperators = (Oid *) palloc(sizeof(Oid) * numCols); - uniqCollations = (Oid *) palloc(sizeof(Oid) * numCols); + AttrNumber *uniqColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numCols); + Oid *uniqOperators = (Oid *) palloc(sizeof(Oid) * numCols); + Oid *uniqCollations = (Oid *) palloc(sizeof(Oid) * numCols); foreach(slitem, distinctList) { @@ -97,8 +94,6 @@ make_unique_from_sortclauses(Plan *lefttree, List *distinctList) Plan *plan = &node->plan; int numCols = list_length(distinctList); int keyno = 0; - AttrNumber *uniqColIdx; - Oid *uniqOperators; ListCell *slitem; plan->targetlist = lefttree->targetlist; @@ -111,8 +106,8 @@ make_unique_from_sortclauses(Plan *lefttree, List *distinctList) * operators, as wanted by executor */ Assert(numCols > 0); - uniqColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numCols); - uniqOperators = (Oid *) palloc(sizeof(Oid) * numCols); + AttrNumber *uniqColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numCols); + Oid *uniqOperators = (Oid *) palloc(sizeof(Oid) * numCols); foreach(slitem, distinctList) { diff --git a/src/backend/distributed/planner/query_colocation_checker.c b/src/backend/distributed/planner/query_colocation_checker.c index 358f6a3b2..f5d0c75ec 100644 --- a/src/backend/distributed/planner/query_colocation_checker.c +++ b/src/backend/distributed/planner/query_colocation_checker.c @@ -49,14 +49,10 @@ CreateColocatedJoinChecker(Query *subquery, PlannerRestrictionContext *restricti { ColocatedJoinChecker colocatedJoinChecker; - RangeTblEntry *anchorRangeTblEntry = NULL; Query *anchorSubquery = NULL; - PlannerRestrictionContext *anchorPlannerRestrictionContext = NULL; - RelationRestrictionContext *anchorRelationRestrictionContext = NULL; - List *anchorRestrictionEquivalences = NIL; /* we couldn't pick an anchor subquery, no need to continue */ - anchorRangeTblEntry = AnchorRte(subquery); + RangeTblEntry *anchorRangeTblEntry = AnchorRte(subquery); if (anchorRangeTblEntry == NULL) { colocatedJoinChecker.anchorRelationRestrictionList = NIL; @@ -84,11 +80,11 @@ CreateColocatedJoinChecker(Query *subquery, PlannerRestrictionContext *restricti pg_unreachable(); } - anchorPlannerRestrictionContext = + PlannerRestrictionContext *anchorPlannerRestrictionContext = FilterPlannerRestrictionForQuery(restrictionContext, anchorSubquery); - anchorRelationRestrictionContext = + RelationRestrictionContext *anchorRelationRestrictionContext = anchorPlannerRestrictionContext->relationRestrictionContext; - anchorRestrictionEquivalences = + List *anchorRestrictionEquivalences = GenerateAllAttributeEquivalences(anchorPlannerRestrictionContext); /* fill the non colocated planning context */ @@ -191,9 +187,6 @@ SubqueryColocated(Query *subquery, ColocatedJoinChecker *checker) List *filteredRestrictionList = filteredPlannerContext->relationRestrictionContext->relationRestrictionList; - List *unionedRelationRestrictionList = NULL; - RelationRestrictionContext *unionedRelationRestrictionContext = NULL; - PlannerRestrictionContext *unionedPlannerRestrictionContext = NULL; /* * There are no relations in the input subquery, such as a subquery @@ -213,7 +206,7 @@ SubqueryColocated(Query *subquery, ColocatedJoinChecker *checker) * forming this temporary context is to check whether the context contains * distribution key equality or not. */ - unionedRelationRestrictionList = + List *unionedRelationRestrictionList = UnionRelationRestrictionLists(anchorRelationRestrictionList, filteredRestrictionList); @@ -224,11 +217,13 @@ SubqueryColocated(Query *subquery, ColocatedJoinChecker *checker) * join restrictions, we're already relying on the attributeEquivalances * provided by the context. */ - unionedRelationRestrictionContext = palloc0(sizeof(RelationRestrictionContext)); + RelationRestrictionContext *unionedRelationRestrictionContext = palloc0( + sizeof(RelationRestrictionContext)); unionedRelationRestrictionContext->relationRestrictionList = unionedRelationRestrictionList; - unionedPlannerRestrictionContext = palloc0(sizeof(PlannerRestrictionContext)); + PlannerRestrictionContext *unionedPlannerRestrictionContext = palloc0( + sizeof(PlannerRestrictionContext)); unionedPlannerRestrictionContext->relationRestrictionContext = unionedRelationRestrictionContext; @@ -256,14 +251,11 @@ WrapRteRelationIntoSubquery(RangeTblEntry *rteRelation) { Query *subquery = makeNode(Query); RangeTblRef *newRangeTableRef = makeNode(RangeTblRef); - RangeTblEntry *newRangeTableEntry = NULL; - Var *targetColumn = NULL; - TargetEntry *targetEntry = NULL; subquery->commandType = CMD_SELECT; /* we copy the input rteRelation to preserve the rteIdentity */ - newRangeTableEntry = copyObject(rteRelation); + RangeTblEntry *newRangeTableEntry = copyObject(rteRelation); subquery->rtable = list_make1(newRangeTableEntry); /* set the FROM expression to the subquery */ @@ -272,11 +264,12 @@ WrapRteRelationIntoSubquery(RangeTblEntry *rteRelation) subquery->jointree = makeFromExpr(list_make1(newRangeTableRef), NULL); /* Need the whole row as a junk var */ - targetColumn = makeWholeRowVar(newRangeTableEntry, newRangeTableRef->rtindex, 0, - false); + Var *targetColumn = makeWholeRowVar(newRangeTableEntry, newRangeTableRef->rtindex, 0, + false); /* create a dummy target entry */ - targetEntry = makeTargetEntry((Expr *) targetColumn, 1, "wholerow", true); + TargetEntry *targetEntry = makeTargetEntry((Expr *) targetColumn, 1, "wholerow", + true); subquery->targetList = lappend(subquery->targetList, targetEntry); @@ -292,15 +285,13 @@ WrapRteRelationIntoSubquery(RangeTblEntry *rteRelation) static List * UnionRelationRestrictionLists(List *firstRelationList, List *secondRelationList) { - RelationRestrictionContext *unionedRestrictionContext = NULL; List *unionedRelationRestrictionList = NULL; ListCell *relationRestrictionCell = NULL; Relids rteIdentities = NULL; - List *allRestrictionList = NIL; /* list_concat destructively modifies the first list, thus copy it */ firstRelationList = list_copy(firstRelationList); - allRestrictionList = list_concat(firstRelationList, secondRelationList); + List *allRestrictionList = list_concat(firstRelationList, secondRelationList); foreach(relationRestrictionCell, allRestrictionList) { @@ -320,7 +311,8 @@ UnionRelationRestrictionLists(List *firstRelationList, List *secondRelationList) rteIdentities = bms_add_member(rteIdentities, rteIdentity); } - unionedRestrictionContext = palloc0(sizeof(RelationRestrictionContext)); + RelationRestrictionContext *unionedRestrictionContext = palloc0( + sizeof(RelationRestrictionContext)); unionedRestrictionContext->relationRestrictionList = unionedRelationRestrictionList; return unionedRelationRestrictionList; diff --git a/src/backend/distributed/planner/query_pushdown_planning.c b/src/backend/distributed/planner/query_pushdown_planning.c index e24368ad1..13d511b17 100644 --- a/src/backend/distributed/planner/query_pushdown_planning.c +++ b/src/backend/distributed/planner/query_pushdown_planning.c @@ -107,7 +107,6 @@ bool ShouldUseSubqueryPushDown(Query *originalQuery, Query *rewrittenQuery, PlannerRestrictionContext *plannerRestrictionContext) { - List *qualifierList = NIL; StringInfo errorMessage = NULL; /* @@ -183,7 +182,7 @@ ShouldUseSubqueryPushDown(Query *originalQuery, Query *rewrittenQuery, * Some unsupported join clauses in logical planner * may be supported by subquery pushdown planner. */ - qualifierList = QualifierList(rewrittenQuery->jointree); + List *qualifierList = QualifierList(rewrittenQuery->jointree); if (DeferErrorIfUnsupportedClause(qualifierList) != NULL) { return true; @@ -283,7 +282,6 @@ bool WhereOrHavingClauseContainsSubquery(Query *query) { FromExpr *joinTree = query->jointree; - Node *queryQuals = NULL; if (FindNodeCheck(query->havingQual, IsNodeSubquery)) { @@ -295,7 +293,7 @@ WhereOrHavingClauseContainsSubquery(Query *query) return false; } - queryQuals = joinTree->quals; + Node *queryQuals = joinTree->quals; return FindNodeCheck(queryQuals, IsNodeSubquery); } @@ -450,15 +448,13 @@ WindowPartitionOnDistributionColumn(Query *query) foreach(windowClauseCell, windowClauseList) { WindowClause *windowClause = lfirst(windowClauseCell); - List *groupTargetEntryList = NIL; - bool partitionOnDistributionColumn = false; List *partitionClauseList = windowClause->partitionClause; List *targetEntryList = query->targetList; - groupTargetEntryList = + List *groupTargetEntryList = GroupTargetEntryList(partitionClauseList, targetEntryList); - partitionOnDistributionColumn = + bool partitionOnDistributionColumn = TargetListOnPartitionColumn(query, groupTargetEntryList); if (!partitionOnDistributionColumn) @@ -495,14 +491,13 @@ SubqueryMultiNodeTree(Query *originalQuery, Query *queryTree, PlannerRestrictionContext *plannerRestrictionContext) { MultiNode *multiQueryNode = NULL; - DeferredErrorMessage *subqueryPushdownError = NULL; - DeferredErrorMessage *unsupportedQueryError = NULL; /* * This is a generic error check that applies to both subquery pushdown * and single table repartition subquery. */ - unsupportedQueryError = DeferErrorIfQueryNotSupported(originalQuery); + DeferredErrorMessage *unsupportedQueryError = DeferErrorIfQueryNotSupported( + originalQuery); if (unsupportedQueryError != NULL) { RaiseDeferredError(unsupportedQueryError, ERROR); @@ -513,38 +508,35 @@ SubqueryMultiNodeTree(Query *originalQuery, Query *queryTree, * to create a logical plan, continue with trying the single table * repartition subquery planning. */ - subqueryPushdownError = DeferErrorIfUnsupportedSubqueryPushdown(originalQuery, - plannerRestrictionContext); + DeferredErrorMessage *subqueryPushdownError = DeferErrorIfUnsupportedSubqueryPushdown( + originalQuery, + plannerRestrictionContext); if (!subqueryPushdownError) { multiQueryNode = SubqueryPushdownMultiNodeTree(originalQuery); } else if (subqueryPushdownError) { - bool singleRelationRepartitionSubquery = false; - RangeTblEntry *subqueryRangeTableEntry = NULL; - Query *subqueryTree = NULL; - DeferredErrorMessage *repartitionQueryError = NULL; - List *subqueryEntryList = NULL; - /* * If not eligible for single relation repartition query, we should raise * subquery pushdown error. */ - singleRelationRepartitionSubquery = + bool singleRelationRepartitionSubquery = SingleRelationRepartitionSubquery(originalQuery); if (!singleRelationRepartitionSubquery) { RaiseDeferredErrorInternal(subqueryPushdownError, ERROR); } - subqueryEntryList = SubqueryEntryList(queryTree); - subqueryRangeTableEntry = (RangeTblEntry *) linitial(subqueryEntryList); + List *subqueryEntryList = SubqueryEntryList(queryTree); + RangeTblEntry *subqueryRangeTableEntry = (RangeTblEntry *) linitial( + subqueryEntryList); Assert(subqueryRangeTableEntry->rtekind == RTE_SUBQUERY); - subqueryTree = subqueryRangeTableEntry->subquery; + Query *subqueryTree = subqueryRangeTableEntry->subquery; - repartitionQueryError = DeferErrorIfUnsupportedSubqueryRepartition(subqueryTree); + DeferredErrorMessage *repartitionQueryError = + DeferErrorIfUnsupportedSubqueryRepartition(subqueryTree); if (repartitionQueryError) { RaiseDeferredErrorInternal(repartitionQueryError, ERROR); @@ -574,7 +566,6 @@ DeferErrorIfUnsupportedSubqueryPushdown(Query *originalQuery, bool outerMostQueryHasLimit = false; ListCell *subqueryCell = NULL; List *subqueryList = NIL; - DeferredErrorMessage *error = NULL; if (originalQuery->limitCount != NULL) { @@ -610,7 +601,7 @@ DeferErrorIfUnsupportedSubqueryPushdown(Query *originalQuery, } /* we shouldn't allow reference tables in the FROM clause when the query has sublinks */ - error = DeferErrorIfFromClauseRecurs(originalQuery); + DeferredErrorMessage *error = DeferErrorIfFromClauseRecurs(originalQuery); if (error) { return error; @@ -666,14 +657,12 @@ DeferErrorIfUnsupportedSubqueryPushdown(Query *originalQuery, static DeferredErrorMessage * DeferErrorIfFromClauseRecurs(Query *queryTree) { - RecurringTuplesType recurType = RECURRING_TUPLES_INVALID; - if (!queryTree->hasSubLinks) { return NULL; } - recurType = FromClauseRecurringTupleType(queryTree); + RecurringTuplesType recurType = FromClauseRecurringTupleType(queryTree); if (recurType == RECURRING_TUPLES_REFERENCE_TABLE) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, @@ -892,9 +881,9 @@ DeferErrorIfCannotPushdownSubquery(Query *subqueryTree, bool outerMostQueryHasLi bool preconditionsSatisfied = true; char *errorDetail = NULL; StringInfo errorInfo = NULL; - DeferredErrorMessage *deferredError = NULL; - deferredError = DeferErrorIfUnsupportedTableCombination(subqueryTree); + DeferredErrorMessage *deferredError = DeferErrorIfUnsupportedTableCombination( + subqueryTree); if (deferredError) { return deferredError; @@ -1187,9 +1176,8 @@ DeferErrorIfUnsupportedUnionQuery(Query *subqueryTree) if (IsA(leftArg, RangeTblRef)) { - Query *leftArgSubquery = NULL; leftArgRTI = ((RangeTblRef *) leftArg)->rtindex; - leftArgSubquery = rt_fetch(leftArgRTI, subqueryTree->rtable)->subquery; + Query *leftArgSubquery = rt_fetch(leftArgRTI, subqueryTree->rtable)->subquery; recurType = FromClauseRecurringTupleType(leftArgSubquery); if (recurType != RECURRING_TUPLES_INVALID) { @@ -1199,9 +1187,9 @@ DeferErrorIfUnsupportedUnionQuery(Query *subqueryTree) if (IsA(rightArg, RangeTblRef)) { - Query *rightArgSubquery = NULL; rightArgRTI = ((RangeTblRef *) rightArg)->rtindex; - rightArgSubquery = rt_fetch(rightArgRTI, subqueryTree->rtable)->subquery; + Query *rightArgSubquery = rt_fetch(rightArgRTI, + subqueryTree->rtable)->subquery; recurType = FromClauseRecurringTupleType(rightArgSubquery); if (recurType != RECURRING_TUPLES_INVALID) { @@ -1251,7 +1239,6 @@ DeferErrorIfUnsupportedUnionQuery(Query *subqueryTree) static bool ExtractSetOperationStatmentWalker(Node *node, List **setOperationList) { - bool walkerResult = false; if (node == NULL) { return false; @@ -1264,8 +1251,8 @@ ExtractSetOperationStatmentWalker(Node *node, List **setOperationList) (*setOperationList) = lappend(*setOperationList, setOperation); } - walkerResult = expression_tree_walker(node, ExtractSetOperationStatmentWalker, - setOperationList); + bool walkerResult = expression_tree_walker(node, ExtractSetOperationStatmentWalker, + setOperationList); return walkerResult; } @@ -1522,21 +1509,11 @@ static MultiNode * SubqueryPushdownMultiNodeTree(Query *queryTree) { List *targetEntryList = queryTree->targetList; - List *columnList = NIL; - List *flattenedExprList = NIL; - List *targetColumnList = NIL; MultiCollect *subqueryCollectNode = CitusMakeNode(MultiCollect); - MultiTable *subqueryNode = NULL; - MultiProject *projectNode = NULL; - MultiExtendedOp *extendedOpNode = NULL; - MultiNode *currentTopNode = NULL; - Query *pushedDownQuery = NULL; - List *subqueryTargetEntryList = NIL; - List *havingClauseColumnList = NIL; - DeferredErrorMessage *unsupportedQueryError = NULL; /* verify we can perform distributed planning on this query */ - unsupportedQueryError = DeferErrorIfQueryNotSupported(queryTree); + DeferredErrorMessage *unsupportedQueryError = DeferErrorIfQueryNotSupported( + queryTree); if (unsupportedQueryError != NULL) { RaiseDeferredError(unsupportedQueryError, ERROR); @@ -1588,14 +1565,14 @@ SubqueryPushdownMultiNodeTree(Query *queryTree) * columnList. Columns mentioned in multiProject node and multiExtendedOp * node are indexed with their respective position in columnList. */ - targetColumnList = pull_var_clause_default((Node *) targetEntryList); - havingClauseColumnList = pull_var_clause_default(queryTree->havingQual); - columnList = list_concat(targetColumnList, havingClauseColumnList); + List *targetColumnList = pull_var_clause_default((Node *) targetEntryList); + List *havingClauseColumnList = pull_var_clause_default(queryTree->havingQual); + List *columnList = list_concat(targetColumnList, havingClauseColumnList); - flattenedExprList = FlattenJoinVars(columnList, queryTree); + List *flattenedExprList = FlattenJoinVars(columnList, queryTree); /* create a target entry for each unique column */ - subqueryTargetEntryList = CreateSubqueryTargetEntryList(flattenedExprList); + List *subqueryTargetEntryList = CreateSubqueryTargetEntryList(flattenedExprList); /* * Update varno/varattno fields of columns in columnList to @@ -1605,7 +1582,7 @@ SubqueryPushdownMultiNodeTree(Query *queryTree) subqueryTargetEntryList); /* new query only has target entries, join tree, and rtable*/ - pushedDownQuery = makeNode(Query); + Query *pushedDownQuery = makeNode(Query); pushedDownQuery->commandType = queryTree->commandType; pushedDownQuery->targetList = subqueryTargetEntryList; pushedDownQuery->jointree = copyObject(queryTree->jointree); @@ -1614,13 +1591,13 @@ SubqueryPushdownMultiNodeTree(Query *queryTree) pushedDownQuery->querySource = queryTree->querySource; pushedDownQuery->hasSubLinks = queryTree->hasSubLinks; - subqueryNode = MultiSubqueryPushdownTable(pushedDownQuery); + MultiTable *subqueryNode = MultiSubqueryPushdownTable(pushedDownQuery); SetChild((MultiUnaryNode *) subqueryCollectNode, (MultiNode *) subqueryNode); - currentTopNode = (MultiNode *) subqueryCollectNode; + MultiNode *currentTopNode = (MultiNode *) subqueryCollectNode; /* build project node for the columns to project */ - projectNode = MultiProjectNode(targetEntryList); + MultiProject *projectNode = MultiProjectNode(targetEntryList); SetChild((MultiUnaryNode *) projectNode, currentTopNode); currentTopNode = (MultiNode *) projectNode; @@ -1630,7 +1607,7 @@ SubqueryPushdownMultiNodeTree(Query *queryTree) * distinguish between aggregates and expressions; and we address this later * in the logical optimizer. */ - extendedOpNode = MultiExtendedOpNode(queryTree); + MultiExtendedOp *extendedOpNode = MultiExtendedOpNode(queryTree); /* * Postgres standard planner converts having qual node to a list of and @@ -1724,8 +1701,6 @@ FlattenJoinVarsMutator(Node *node, Query *queryTree) RangeTblEntry *rte = rt_fetch(column->varno, queryTree->rtable); if (rte->rtekind == RTE_JOIN) { - Node *newColumn = NULL; - /* * if join has an alias, it is copied over join RTE. We should * reference this RTE. @@ -1737,7 +1712,7 @@ FlattenJoinVarsMutator(Node *node, Query *queryTree) /* join RTE does not have and alias defined at this level, deeper look is needed */ Assert(column->varattno > 0); - newColumn = (Node *) list_nth(rte->joinaliasvars, column->varattno - 1); + Node *newColumn = (Node *) list_nth(rte->joinaliasvars, column->varattno - 1); Assert(newColumn != NULL); /* @@ -1894,7 +1869,6 @@ UpdateColumnToMatchingTargetEntry(Var *column, Node *flattenedExpr, List *target static MultiTable * MultiSubqueryPushdownTable(Query *subquery) { - MultiTable *subqueryTableNode = NULL; StringInfo rteName = makeStringInfo(); List *columnNamesList = NIL; ListCell *targetEntryCell = NULL; @@ -1907,7 +1881,7 @@ MultiSubqueryPushdownTable(Query *subquery) columnNamesList = lappend(columnNamesList, makeString(targetEntry->resname)); } - subqueryTableNode = CitusMakeNode(MultiTable); + MultiTable *subqueryTableNode = CitusMakeNode(MultiTable); subqueryTableNode->subquery = subquery; subqueryTableNode->relationId = SUBQUERY_PUSHDOWN_RELATION_ID; subqueryTableNode->rangeTableId = SUBQUERY_RANGE_TABLE_ID; diff --git a/src/backend/distributed/planner/recursive_planning.c b/src/backend/distributed/planner/recursive_planning.c index bed395bb9..a71953a42 100644 --- a/src/backend/distributed/planner/recursive_planning.c +++ b/src/backend/distributed/planner/recursive_planning.c @@ -189,7 +189,6 @@ GenerateSubplansForSubqueriesAndCTEs(uint64 planId, Query *originalQuery, PlannerRestrictionContext *plannerRestrictionContext) { RecursivePlanningContext context; - DeferredErrorMessage *error = NULL; recursivePlanningDepth++; @@ -217,7 +216,8 @@ GenerateSubplansForSubqueriesAndCTEs(uint64 planId, Query *originalQuery, context.allDistributionKeysInQueryAreEqual = AllDistributionKeysInQueryAreEqual(originalQuery, plannerRestrictionContext); - error = RecursivelyPlanSubqueriesAndCTEs(originalQuery, &context); + DeferredErrorMessage *error = RecursivelyPlanSubqueriesAndCTEs(originalQuery, + &context); if (error != NULL) { recursivePlanningDepth--; @@ -257,9 +257,7 @@ GenerateSubplansForSubqueriesAndCTEs(uint64 planId, Query *originalQuery, static DeferredErrorMessage * RecursivelyPlanSubqueriesAndCTEs(Query *query, RecursivePlanningContext *context) { - DeferredErrorMessage *error = NULL; - - error = RecursivelyPlanCTEs(query, context); + DeferredErrorMessage *error = RecursivelyPlanCTEs(query, context); if (error != NULL) { return error; @@ -410,14 +408,12 @@ ContainsSubquery(Query *query) static void RecursivelyPlanNonColocatedSubqueries(Query *subquery, RecursivePlanningContext *context) { - ColocatedJoinChecker colocatedJoinChecker; - FromExpr *joinTree = subquery->jointree; - PlannerRestrictionContext *restrictionContext = NULL; /* create the context for the non colocated subquery planning */ - restrictionContext = context->plannerRestrictionContext; - colocatedJoinChecker = CreateColocatedJoinChecker(subquery, restrictionContext); + PlannerRestrictionContext *restrictionContext = context->plannerRestrictionContext; + ColocatedJoinChecker colocatedJoinChecker = CreateColocatedJoinChecker(subquery, + restrictionContext); /* * Although this is a rare case, we weren't able to pick an anchor @@ -490,7 +486,6 @@ RecursivelyPlanNonColocatedJoinWalker(Node *joinNode, int rangeTableIndex = ((RangeTblRef *) joinNode)->rtindex; List *rangeTableList = colocatedJoinChecker->subquery->rtable; RangeTblEntry *rte = rt_fetch(rangeTableIndex, rangeTableList); - Query *subquery = NULL; /* we're only interested in subqueries for now */ if (rte->rtekind != RTE_SUBQUERY) @@ -502,7 +497,7 @@ RecursivelyPlanNonColocatedJoinWalker(Node *joinNode, * If the subquery is not colocated with the anchor subquery, * recursively plan it. */ - subquery = rte->subquery; + Query *subquery = rte->subquery; if (!SubqueryColocated(subquery, colocatedJoinChecker)) { RecursivelyPlanSubquery(subquery, recursivePlanningContext); @@ -560,7 +555,6 @@ static List * SublinkList(Query *originalQuery) { FromExpr *joinTree = originalQuery->jointree; - Node *queryQuals = NULL; List *sublinkList = NIL; if (!joinTree) @@ -568,7 +562,7 @@ SublinkList(Query *originalQuery) return NIL; } - queryQuals = joinTree->quals; + Node *queryQuals = joinTree->quals; ExtractSublinkWalker(queryQuals, &sublinkList); return sublinkList; @@ -610,17 +604,14 @@ ExtractSublinkWalker(Node *node, List **sublinkList) static bool ShouldRecursivelyPlanAllSubqueriesInWhere(Query *query) { - FromExpr *joinTree = NULL; - Node *whereClause = NULL; - - joinTree = query->jointree; + FromExpr *joinTree = query->jointree; if (joinTree == NULL) { /* there is no FROM clause */ return false; } - whereClause = joinTree->quals; + Node *whereClause = joinTree->quals; if (whereClause == NULL) { /* there is no WHERE clause */ @@ -703,11 +694,7 @@ RecursivelyPlanCTEs(Query *query, RecursivePlanningContext *planningContext) char *cteName = cte->ctename; Query *subquery = (Query *) cte->ctequery; uint64 planId = planningContext->planId; - uint32 subPlanId = 0; - char *resultId = NULL; List *cteTargetList = NIL; - Query *resultQuery = NULL; - DistributedSubPlan *subPlan = NULL; ListCell *rteCell = NULL; int replacedCtesCount = 0; @@ -729,7 +716,7 @@ RecursivelyPlanCTEs(Query *query, RecursivePlanningContext *planningContext) continue; } - subPlanId = list_length(planningContext->subPlanList) + 1; + uint32 subPlanId = list_length(planningContext->subPlanList) + 1; if (IsLoggableLevel(DEBUG1)) { @@ -742,11 +729,11 @@ RecursivelyPlanCTEs(Query *query, RecursivePlanningContext *planningContext) } /* build a sub plan for the CTE */ - subPlan = CreateDistributedSubPlan(subPlanId, subquery); + DistributedSubPlan *subPlan = CreateDistributedSubPlan(subPlanId, subquery); planningContext->subPlanList = lappend(planningContext->subPlanList, subPlan); /* build the result_id parameter for the call to read_intermediate_result */ - resultId = GenerateResultId(planId, subPlanId); + char *resultId = GenerateResultId(planId, subPlanId); if (subquery->returningList) { @@ -760,8 +747,8 @@ RecursivelyPlanCTEs(Query *query, RecursivePlanningContext *planningContext) } /* replace references to the CTE with a subquery that reads results */ - resultQuery = BuildSubPlanResultQuery(cteTargetList, cte->aliascolnames, - resultId); + Query *resultQuery = BuildSubPlanResultQuery(cteTargetList, cte->aliascolnames, + resultId); foreach(rteCell, context.cteReferenceList) { @@ -832,7 +819,6 @@ RecursivelyPlanSubqueryWalker(Node *node, RecursivePlanningContext *context) if (IsA(node, Query)) { Query *query = (Query *) node; - DeferredErrorMessage *error = NULL; context->level += 1; @@ -840,7 +826,7 @@ RecursivelyPlanSubqueryWalker(Node *node, RecursivePlanningContext *context) * First, make sure any subqueries and CTEs within this subquery * are recursively planned if necessary. */ - error = RecursivelyPlanSubqueriesAndCTEs(query, context); + DeferredErrorMessage *error = RecursivelyPlanSubqueriesAndCTEs(query, context); if (error != NULL) { RaiseDeferredError(error, ERROR); @@ -934,19 +920,16 @@ static bool AllDistributionKeysInSubqueryAreEqual(Query *subquery, PlannerRestrictionContext *restrictionContext) { - bool allDistributionKeysInSubqueryAreEqual = false; - PlannerRestrictionContext *filteredRestrictionContext = NULL; - /* we don't support distribution eq. checks for CTEs yet */ if (subquery->cteList != NIL) { return false; } - filteredRestrictionContext = + PlannerRestrictionContext *filteredRestrictionContext = FilterPlannerRestrictionForQuery(restrictionContext, subquery); - allDistributionKeysInSubqueryAreEqual = + bool allDistributionKeysInSubqueryAreEqual = AllDistributionKeysInQueryAreEqual(subquery, filteredRestrictionContext); if (!allDistributionKeysInSubqueryAreEqual) { @@ -965,8 +948,6 @@ AllDistributionKeysInSubqueryAreEqual(Query *subquery, static bool ShouldRecursivelyPlanSetOperation(Query *query, RecursivePlanningContext *context) { - PlannerRestrictionContext *filteredRestrictionContext = NULL; - SetOperationStmt *setOperations = (SetOperationStmt *) query->setOperations; if (setOperations == NULL) { @@ -1000,7 +981,7 @@ ShouldRecursivelyPlanSetOperation(Query *query, RecursivePlanningContext *contex return true; } - filteredRestrictionContext = + PlannerRestrictionContext *filteredRestrictionContext = FilterPlannerRestrictionForQuery(context->plannerRestrictionContext, query); if (!SafeToPushdownUnionSubquery(filteredRestrictionContext)) { @@ -1062,9 +1043,6 @@ RecursivelyPlanSetOperations(Query *query, Node *node, static bool IsLocalTableRTE(Node *node) { - RangeTblEntry *rangeTableEntry = NULL; - Oid relationId = InvalidOid; - if (node == NULL) { return false; @@ -1075,7 +1053,7 @@ IsLocalTableRTE(Node *node) return false; } - rangeTableEntry = (RangeTblEntry *) node; + RangeTblEntry *rangeTableEntry = (RangeTblEntry *) node; if (rangeTableEntry->rtekind != RTE_RELATION) { return false; @@ -1086,7 +1064,7 @@ IsLocalTableRTE(Node *node) return false; } - relationId = rangeTableEntry->relid; + Oid relationId = rangeTableEntry->relid; if (IsDistributedTable(relationId)) { return false; @@ -1111,11 +1089,7 @@ IsLocalTableRTE(Node *node) static void RecursivelyPlanSubquery(Query *subquery, RecursivePlanningContext *planningContext) { - DistributedSubPlan *subPlan = NULL; uint64 planId = planningContext->planId; - int subPlanId = 0; - char *resultId = NULL; - Query *resultQuery = NULL; Query *debugQuery = NULL; if (ContainsReferencesToOuterQuery(subquery)) @@ -1138,19 +1112,19 @@ RecursivelyPlanSubquery(Query *subquery, RecursivePlanningContext *planningConte /* * Create the subplan and append it to the list in the planning context. */ - subPlanId = list_length(planningContext->subPlanList) + 1; + int subPlanId = list_length(planningContext->subPlanList) + 1; - subPlan = CreateDistributedSubPlan(subPlanId, subquery); + DistributedSubPlan *subPlan = CreateDistributedSubPlan(subPlanId, subquery); planningContext->subPlanList = lappend(planningContext->subPlanList, subPlan); /* build the result_id parameter for the call to read_intermediate_result */ - resultId = GenerateResultId(planId, subPlanId); + char *resultId = GenerateResultId(planId, subPlanId); /* * BuildSubPlanResultQuery() can optionally use provided column aliases. * We do not need to send additional alias list for subqueries. */ - resultQuery = BuildSubPlanResultQuery(subquery->targetList, NIL, resultId); + Query *resultQuery = BuildSubPlanResultQuery(subquery->targetList, NIL, resultId); if (IsLoggableLevel(DEBUG1)) { @@ -1176,7 +1150,6 @@ RecursivelyPlanSubquery(Query *subquery, RecursivePlanningContext *planningConte static DistributedSubPlan * CreateDistributedSubPlan(uint32 subPlanId, Query *subPlanQuery) { - DistributedSubPlan *subPlan = NULL; int cursorOptions = 0; if (ContainsReadIntermediateResultFunction((Node *) subPlanQuery)) @@ -1192,7 +1165,7 @@ CreateDistributedSubPlan(uint32 subPlanId, Query *subPlanQuery) cursorOptions |= CURSOR_OPT_FORCE_DISTRIBUTED; } - subPlan = CitusMakeNode(DistributedSubPlan); + DistributedSubPlan *subPlan = CitusMakeNode(DistributedSubPlan); subPlan->plan = planner(subPlanQuery, cursorOptions, NULL); subPlan->subPlanId = subPlanId; @@ -1310,12 +1283,11 @@ ContainsReferencesToOuterQueryWalker(Node *node, VarLevelsUpWalkerContext *conte else if (IsA(node, Query)) { Query *query = (Query *) node; - bool found = false; int flags = 0; context->level += 1; - found = query_tree_walker(query, ContainsReferencesToOuterQueryWalker, - context, flags); + bool found = query_tree_walker(query, ContainsReferencesToOuterQueryWalker, + context, flags); context->level -= 1; return found; @@ -1383,19 +1355,16 @@ TransformFunctionRTE(RangeTblEntry *rangeTblEntry) { Query *subquery = makeNode(Query); RangeTblRef *newRangeTableRef = makeNode(RangeTblRef); - RangeTblEntry *newRangeTableEntry = NULL; Var *targetColumn = NULL; TargetEntry *targetEntry = NULL; - RangeTblFunction *rangeTblFunction = NULL; AttrNumber targetColumnIndex = 0; - TupleDesc tupleDesc = NULL; - rangeTblFunction = linitial(rangeTblEntry->functions); + RangeTblFunction *rangeTblFunction = linitial(rangeTblEntry->functions); subquery->commandType = CMD_SELECT; /* copy the input rangeTblEntry to prevent cycles */ - newRangeTableEntry = copyObject(rangeTblEntry); + RangeTblEntry *newRangeTableEntry = copyObject(rangeTblEntry); /* set the FROM expression to the subquery */ subquery->rtable = list_make1(newRangeTableEntry); @@ -1407,8 +1376,8 @@ TransformFunctionRTE(RangeTblEntry *rangeTblEntry) * If function return type is not composite or rowtype can't be determined, * tupleDesc is set to null here */ - tupleDesc = (TupleDesc) get_expr_result_tupdesc(rangeTblFunction->funcexpr, - true); + TupleDesc tupleDesc = (TupleDesc) get_expr_result_tupdesc(rangeTblFunction->funcexpr, + true); /* * If tupleDesc is not null, we iterate over all the attributes and @@ -1460,10 +1429,9 @@ TransformFunctionRTE(RangeTblEntry *rangeTblEntry) else { /* create target entries for all columns returned by the function */ - List *functionColumnNames = NULL; ListCell *functionColumnName = NULL; - functionColumnNames = rangeTblEntry->eref->colnames; + List *functionColumnNames = rangeTblEntry->eref->colnames; foreach(functionColumnName, functionColumnNames) { char *columnName = strVal(lfirst(functionColumnName)); @@ -1574,19 +1542,10 @@ ShouldTransformRTE(RangeTblEntry *rangeTableEntry) Query * BuildSubPlanResultQuery(List *targetEntryList, List *columnAliasList, char *resultId) { - Query *resultQuery = NULL; - Const *resultIdConst = NULL; - Const *resultFormatConst = NULL; - FuncExpr *funcExpr = NULL; - Alias *funcAlias = NULL; List *funcColNames = NIL; List *funcColTypes = NIL; List *funcColTypMods = NIL; List *funcColCollations = NIL; - RangeTblFunction *rangeTableFunction = NULL; - RangeTblEntry *rangeTableEntry = NULL; - RangeTblRef *rangeTableRef = NULL; - FromExpr *joinTree = NULL; ListCell *targetEntryCell = NULL; List *targetList = NIL; int columnNumber = 1; @@ -1603,8 +1562,6 @@ BuildSubPlanResultQuery(List *targetEntryList, List *columnAliasList, char *resu Oid columnType = exprType(targetExpr); Oid columnTypMod = exprTypmod(targetExpr); Oid columnCollation = exprCollation(targetExpr); - Var *functionColumnVar = NULL; - TargetEntry *newTargetEntry = NULL; if (targetEntry->resjunk) { @@ -1616,7 +1573,7 @@ BuildSubPlanResultQuery(List *targetEntryList, List *columnAliasList, char *resu funcColTypMods = lappend_int(funcColTypMods, columnTypMod); funcColCollations = lappend_int(funcColCollations, columnCollation); - functionColumnVar = makeNode(Var); + Var *functionColumnVar = makeNode(Var); functionColumnVar->varno = 1; functionColumnVar->varattno = columnNumber; functionColumnVar->vartype = columnType; @@ -1627,7 +1584,7 @@ BuildSubPlanResultQuery(List *targetEntryList, List *columnAliasList, char *resu functionColumnVar->varoattno = columnNumber; functionColumnVar->location = -1; - newTargetEntry = makeNode(TargetEntry); + TargetEntry *newTargetEntry = makeNode(TargetEntry); newTargetEntry->expr = (Expr *) functionColumnVar; newTargetEntry->resno = columnNumber; @@ -1659,7 +1616,7 @@ BuildSubPlanResultQuery(List *targetEntryList, List *columnAliasList, char *resu columnNumber++; } - resultIdConst = makeNode(Const); + Const *resultIdConst = makeNode(Const); resultIdConst->consttype = TEXTOID; resultIdConst->consttypmod = -1; resultIdConst->constlen = -1; @@ -1674,7 +1631,7 @@ BuildSubPlanResultQuery(List *targetEntryList, List *columnAliasList, char *resu copyFormatId = TextCopyFormatId(); } - resultFormatConst = makeNode(Const); + Const *resultFormatConst = makeNode(Const); resultFormatConst->consttype = CitusCopyFormatTypeId(); resultFormatConst->consttypmod = -1; resultFormatConst->constlen = 4; @@ -1684,7 +1641,7 @@ BuildSubPlanResultQuery(List *targetEntryList, List *columnAliasList, char *resu resultFormatConst->location = -1; /* build the call to read_intermediate_result */ - funcExpr = makeNode(FuncExpr); + FuncExpr *funcExpr = makeNode(FuncExpr); funcExpr->funcid = CitusReadIntermediateResultFuncId(); funcExpr->funcretset = true; funcExpr->funcvariadic = false; @@ -1695,7 +1652,7 @@ BuildSubPlanResultQuery(List *targetEntryList, List *columnAliasList, char *resu funcExpr->args = list_make2(resultIdConst, resultFormatConst); /* build the RTE for the call to read_intermediate_result */ - rangeTableFunction = makeNode(RangeTblFunction); + RangeTblFunction *rangeTableFunction = makeNode(RangeTblFunction); rangeTableFunction->funccolcount = list_length(funcColNames); rangeTableFunction->funccolnames = funcColNames; rangeTableFunction->funccoltypes = funcColTypes; @@ -1704,25 +1661,25 @@ BuildSubPlanResultQuery(List *targetEntryList, List *columnAliasList, char *resu rangeTableFunction->funcparams = NULL; rangeTableFunction->funcexpr = (Node *) funcExpr; - funcAlias = makeNode(Alias); + Alias *funcAlias = makeNode(Alias); funcAlias->aliasname = "intermediate_result"; funcAlias->colnames = funcColNames; - rangeTableEntry = makeNode(RangeTblEntry); + RangeTblEntry *rangeTableEntry = makeNode(RangeTblEntry); rangeTableEntry->rtekind = RTE_FUNCTION; rangeTableEntry->functions = list_make1(rangeTableFunction); rangeTableEntry->inFromCl = true; rangeTableEntry->eref = funcAlias; /* build the join tree using the read_intermediate_result RTE */ - rangeTableRef = makeNode(RangeTblRef); + RangeTblRef *rangeTableRef = makeNode(RangeTblRef); rangeTableRef->rtindex = 1; - joinTree = makeNode(FromExpr); + FromExpr *joinTree = makeNode(FromExpr); joinTree->fromlist = list_make1(rangeTableRef); /* build the SELECT query */ - resultQuery = makeNode(Query); + Query *resultQuery = makeNode(Query); resultQuery->commandType = CMD_SELECT; resultQuery->rtable = list_make1(rangeTableEntry); resultQuery->jointree = joinTree; diff --git a/src/backend/distributed/planner/relation_restriction_equivalence.c b/src/backend/distributed/planner/relation_restriction_equivalence.c index a4adb248d..6ab91ec2e 100644 --- a/src/backend/distributed/planner/relation_restriction_equivalence.c +++ b/src/backend/distributed/planner/relation_restriction_equivalence.c @@ -160,9 +160,6 @@ bool AllDistributionKeysInQueryAreEqual(Query *originalQuery, PlannerRestrictionContext *plannerRestrictionContext) { - bool restrictionEquivalenceForPartitionKeys = false; - RelationRestrictionContext *restrictionContext = NULL; - /* we don't support distribution key equality checks for CTEs yet */ if (originalQuery->cteList != NIL) { @@ -170,13 +167,14 @@ AllDistributionKeysInQueryAreEqual(Query *originalQuery, } /* we don't support distribution key equality checks for local tables */ - restrictionContext = plannerRestrictionContext->relationRestrictionContext; + RelationRestrictionContext *restrictionContext = + plannerRestrictionContext->relationRestrictionContext; if (ContextContainsLocalRelation(restrictionContext)) { return false; } - restrictionEquivalenceForPartitionKeys = + bool restrictionEquivalenceForPartitionKeys = RestrictionEquivalenceForPartitionKeys(plannerRestrictionContext); if (restrictionEquivalenceForPartitionKeys) { @@ -245,9 +243,6 @@ SafeToPushdownUnionSubquery(PlannerRestrictionContext *plannerRestrictionContext AttributeEquivalenceClass *attributeEquivalance = palloc0(sizeof(AttributeEquivalenceClass)); ListCell *relationRestrictionCell = NULL; - List *relationRestrictionAttributeEquivalenceList = NIL; - List *joinRestrictionAttributeEquivalenceList = NIL; - List *allAttributeEquivalenceList = NIL; attributeEquivalance->equivalenceId = attributeEquivalenceId++; @@ -338,12 +333,12 @@ SafeToPushdownUnionSubquery(PlannerRestrictionContext *plannerRestrictionContext * we determine whether all relations are joined on the partition column * by adding the equivalence classes that can be inferred from joins. */ - relationRestrictionAttributeEquivalenceList = + List *relationRestrictionAttributeEquivalenceList = GenerateAttributeEquivalencesForRelationRestrictions(restrictionContext); - joinRestrictionAttributeEquivalenceList = + List *joinRestrictionAttributeEquivalenceList = GenerateAttributeEquivalencesForJoinRestrictions(joinRestrictionContext); - allAttributeEquivalenceList = + List *allAttributeEquivalenceList = list_concat(relationRestrictionAttributeEquivalenceList, joinRestrictionAttributeEquivalenceList); @@ -373,8 +368,6 @@ FindTranslatedVar(List *appendRelList, Oid relationOid, Index relationRteIndex, AppendRelInfo *targetAppendRelInfo = NULL; ListCell *translatedVarCell = NULL; AttrNumber childAttrNumber = 0; - Var *relationPartitionKey = NULL; - List *translaterVars = NULL; *partitionKeyIndex = 0; @@ -400,13 +393,12 @@ FindTranslatedVar(List *appendRelList, Oid relationOid, Index relationRteIndex, return NULL; } - relationPartitionKey = DistPartitionKey(relationOid); + Var *relationPartitionKey = DistPartitionKey(relationOid); - translaterVars = targetAppendRelInfo->translated_vars; + List *translaterVars = targetAppendRelInfo->translated_vars; foreach(translatedVarCell, translaterVars) { Node *targetNode = (Node *) lfirst(translatedVarCell); - Var *targetVar = NULL; childAttrNumber++; @@ -415,7 +407,7 @@ FindTranslatedVar(List *appendRelList, Oid relationOid, Index relationRteIndex, continue; } - targetVar = (Var *) lfirst(translatedVarCell); + Var *targetVar = (Var *) lfirst(translatedVarCell); if (targetVar->varno == relationRteIndex && targetVar->varattno == relationPartitionKey->varattno) { @@ -464,15 +456,13 @@ FindTranslatedVar(List *appendRelList, Oid relationOid, Index relationRteIndex, bool RestrictionEquivalenceForPartitionKeys(PlannerRestrictionContext *restrictionContext) { - List *attributeEquivalenceList = NIL; - /* there is a single distributed relation, no need to continue */ if (!ContainsMultipleDistributedRelations(restrictionContext)) { return true; } - attributeEquivalenceList = GenerateAllAttributeEquivalences(restrictionContext); + List *attributeEquivalenceList = GenerateAllAttributeEquivalences(restrictionContext); return RestrictionEquivalenceForPartitionKeysViaEquivalances(restrictionContext, attributeEquivalenceList); @@ -554,20 +544,18 @@ GenerateAllAttributeEquivalences(PlannerRestrictionContext *plannerRestrictionCo JoinRestrictionContext *joinRestrictionContext = plannerRestrictionContext->joinRestrictionContext; - List *relationRestrictionAttributeEquivalenceList = NIL; - List *joinRestrictionAttributeEquivalenceList = NIL; - List *allAttributeEquivalenceList = NIL; /* reset the equivalence id counter per call to prevent overflows */ attributeEquivalenceId = 1; - relationRestrictionAttributeEquivalenceList = + List *relationRestrictionAttributeEquivalenceList = GenerateAttributeEquivalencesForRelationRestrictions(relationRestrictionContext); - joinRestrictionAttributeEquivalenceList = + List *joinRestrictionAttributeEquivalenceList = GenerateAttributeEquivalencesForJoinRestrictions(joinRestrictionContext); - allAttributeEquivalenceList = list_concat(relationRestrictionAttributeEquivalenceList, - joinRestrictionAttributeEquivalenceList); + List *allAttributeEquivalenceList = list_concat( + relationRestrictionAttributeEquivalenceList, + joinRestrictionAttributeEquivalenceList); return allAttributeEquivalenceList; } @@ -609,7 +597,6 @@ bool EquivalenceListContainsRelationsEquality(List *attributeEquivalenceList, RelationRestrictionContext *restrictionContext) { - AttributeEquivalenceClass *commonEquivalenceClass = NULL; ListCell *commonEqClassCell = NULL; ListCell *relationRestrictionCell = NULL; Relids commonRteIdentities = NULL; @@ -619,8 +606,9 @@ EquivalenceListContainsRelationsEquality(List *attributeEquivalenceList, * common equivalence class. The main goal is to test whether this main class * contains all partition keys of the existing relations. */ - commonEquivalenceClass = GenerateCommonEquivalence(attributeEquivalenceList, - restrictionContext); + AttributeEquivalenceClass *commonEquivalenceClass = GenerateCommonEquivalence( + attributeEquivalenceList, + restrictionContext); /* add the rte indexes of relations to a bitmap */ foreach(commonEqClassCell, commonEquivalenceClass->equivalentAttributes) @@ -885,13 +873,12 @@ static AttributeEquivalenceClass * GenerateCommonEquivalence(List *attributeEquivalenceList, RelationRestrictionContext *relationRestrictionContext) { - AttributeEquivalenceClass *commonEquivalenceClass = NULL; - AttributeEquivalenceClass *firstEquivalenceClass = NULL; Bitmapset *addedEquivalenceIds = NULL; uint32 equivalenceListSize = list_length(attributeEquivalenceList); uint32 equivalenceClassIndex = 0; - commonEquivalenceClass = palloc0(sizeof(AttributeEquivalenceClass)); + AttributeEquivalenceClass *commonEquivalenceClass = palloc0( + sizeof(AttributeEquivalenceClass)); commonEquivalenceClass->equivalenceId = 0; /* @@ -899,7 +886,7 @@ GenerateCommonEquivalence(List *attributeEquivalenceList, * table since we always want the input distributed relations to be * on the common class. */ - firstEquivalenceClass = + AttributeEquivalenceClass *firstEquivalenceClass = GenerateEquivalanceClassForRelationRestriction(relationRestrictionContext); /* we skip the calculation if there are not enough information */ @@ -915,12 +902,12 @@ GenerateCommonEquivalence(List *attributeEquivalenceList, while (equivalenceClassIndex < equivalenceListSize) { - AttributeEquivalenceClass *currentEquivalenceClass = NULL; ListCell *equivalenceMemberCell = NULL; bool restartLoop = false; - currentEquivalenceClass = list_nth(attributeEquivalenceList, - equivalenceClassIndex); + AttributeEquivalenceClass *currentEquivalenceClass = list_nth( + attributeEquivalenceList, + equivalenceClassIndex); /* * This is an optimization. If we already added the same equivalence class, @@ -1077,22 +1064,14 @@ GenerateAttributeEquivalencesForJoinRestrictions(JoinRestrictionContext * foreach(restrictionInfoList, joinRestriction->joinRestrictInfoList) { RestrictInfo *rinfo = (RestrictInfo *) lfirst(restrictionInfoList); - OpExpr *restrictionOpExpr = NULL; - Node *leftNode = NULL; - Node *rightNode = NULL; - Expr *strippedLeftExpr = NULL; - Expr *strippedRightExpr = NULL; - Var *leftVar = NULL; - Var *rightVar = NULL; Expr *restrictionClause = rinfo->clause; - AttributeEquivalenceClass *attributeEquivalance = NULL; if (!IsA(restrictionClause, OpExpr)) { continue; } - restrictionOpExpr = (OpExpr *) restrictionClause; + OpExpr *restrictionOpExpr = (OpExpr *) restrictionClause; if (list_length(restrictionOpExpr->args) != 2) { continue; @@ -1102,22 +1081,24 @@ GenerateAttributeEquivalencesForJoinRestrictions(JoinRestrictionContext * continue; } - leftNode = linitial(restrictionOpExpr->args); - rightNode = lsecond(restrictionOpExpr->args); + Node *leftNode = linitial(restrictionOpExpr->args); + Node *rightNode = lsecond(restrictionOpExpr->args); /* we also don't want implicit coercions */ - strippedLeftExpr = (Expr *) strip_implicit_coercions((Node *) leftNode); - strippedRightExpr = (Expr *) strip_implicit_coercions((Node *) rightNode); + Expr *strippedLeftExpr = (Expr *) strip_implicit_coercions((Node *) leftNode); + Expr *strippedRightExpr = (Expr *) strip_implicit_coercions( + (Node *) rightNode); if (!(IsA(strippedLeftExpr, Var) && IsA(strippedRightExpr, Var))) { continue; } - leftVar = (Var *) strippedLeftExpr; - rightVar = (Var *) strippedRightExpr; + Var *leftVar = (Var *) strippedLeftExpr; + Var *rightVar = (Var *) strippedRightExpr; - attributeEquivalance = palloc0(sizeof(AttributeEquivalenceClass)); + AttributeEquivalenceClass *attributeEquivalance = palloc0( + sizeof(AttributeEquivalenceClass)); attributeEquivalance->equivalenceId = attributeEquivalenceId++; AddToAttributeEquivalenceClass(&attributeEquivalance, @@ -1167,8 +1148,6 @@ static void AddToAttributeEquivalenceClass(AttributeEquivalenceClass **attributeEquivalanceClass, PlannerInfo *root, Var *varToBeAdded) { - RangeTblEntry *rangeTableEntry = NULL; - /* punt if it's a whole-row var rather than a plain column reference */ if (varToBeAdded->varattno == InvalidAttrNumber) { @@ -1181,7 +1160,7 @@ AddToAttributeEquivalenceClass(AttributeEquivalenceClass **attributeEquivalanceC return; } - rangeTableEntry = root->simple_rte_array[varToBeAdded->varno]; + RangeTblEntry *rangeTableEntry = root->simple_rte_array[varToBeAdded->varno]; if (rangeTableEntry->rtekind == RTE_RELATION) { AddRteRelationToAttributeEquivalenceClass(attributeEquivalanceClass, @@ -1210,7 +1189,6 @@ AddRteSubqueryToAttributeEquivalenceClass(AttributeEquivalenceClass PlannerInfo *root, Var *varToBeAdded) { RelOptInfo *baseRelOptInfo = find_base_rel(root, varToBeAdded->varno); - TargetEntry *subqueryTargetEntry = NULL; Query *targetSubquery = GetTargetSubquery(root, rangeTableEntry, varToBeAdded); /* @@ -1229,8 +1207,8 @@ AddRteSubqueryToAttributeEquivalenceClass(AttributeEquivalenceClass return; } - subqueryTargetEntry = get_tle_by_resno(targetSubquery->targetList, - varToBeAdded->varattno); + TargetEntry *subqueryTargetEntry = get_tle_by_resno(targetSubquery->targetList, + varToBeAdded->varattno); /* if we fail to find corresponding target entry, do not proceed */ if (subqueryTargetEntry == NULL || subqueryTargetEntry->resjunk) @@ -1402,9 +1380,7 @@ AddRteRelationToAttributeEquivalenceClass(AttributeEquivalenceClass ** RangeTblEntry *rangeTableEntry, Var *varToBeAdded) { - AttributeEquivalenceClassMember *attributeEqMember = NULL; Oid relationId = rangeTableEntry->relid; - Var *relationPartitionKey = NULL; /* we don't consider local tables in the equality on columns */ if (!IsDistributedTable(relationId)) @@ -1412,7 +1388,7 @@ AddRteRelationToAttributeEquivalenceClass(AttributeEquivalenceClass ** return; } - relationPartitionKey = DistPartitionKey(relationId); + Var *relationPartitionKey = DistPartitionKey(relationId); Assert(rangeTableEntry->rtekind == RTE_RELATION); @@ -1428,7 +1404,8 @@ AddRteRelationToAttributeEquivalenceClass(AttributeEquivalenceClass ** return; } - attributeEqMember = palloc0(sizeof(AttributeEquivalenceClassMember)); + AttributeEquivalenceClassMember *attributeEqMember = palloc0( + sizeof(AttributeEquivalenceClassMember)); attributeEqMember->varattno = varToBeAdded->varattno; attributeEqMember->varno = varToBeAdded->varno; @@ -1481,7 +1458,6 @@ static List * AddAttributeClassToAttributeClassList(List *attributeEquivalenceList, AttributeEquivalenceClass *attributeEquivalance) { - List *equivalentAttributes = NULL; ListCell *attributeEquivalanceCell = NULL; if (attributeEquivalance == NULL) @@ -1493,7 +1469,7 @@ AddAttributeClassToAttributeClassList(List *attributeEquivalenceList, * Note that in some cases we allow having equivalentAttributes with zero or * one elements. For the details, see AddToAttributeEquivalenceClass(). */ - equivalentAttributes = attributeEquivalance->equivalentAttributes; + List *equivalentAttributes = attributeEquivalance->equivalentAttributes; if (list_length(equivalentAttributes) < 2) { return attributeEquivalenceList; @@ -1589,15 +1565,10 @@ bool ContainsUnionSubquery(Query *queryTree) { List *rangeTableList = queryTree->rtable; - Node *setOperations = queryTree->setOperations; List *joinTreeTableIndexList = NIL; - Index subqueryRteIndex = 0; - uint32 joiningRangeTableCount = 0; - RangeTblEntry *rangeTableEntry = NULL; - Query *subqueryTree = NULL; ExtractRangeTableIndexWalker((Node *) queryTree->jointree, &joinTreeTableIndexList); - joiningRangeTableCount = list_length(joinTreeTableIndexList); + uint32 joiningRangeTableCount = list_length(joinTreeTableIndexList); /* don't allow joins on top of unions */ if (joiningRangeTableCount > 1) @@ -1611,15 +1582,15 @@ ContainsUnionSubquery(Query *queryTree) return false; } - subqueryRteIndex = linitial_int(joinTreeTableIndexList); - rangeTableEntry = rt_fetch(subqueryRteIndex, rangeTableList); + Index subqueryRteIndex = linitial_int(joinTreeTableIndexList); + RangeTblEntry *rangeTableEntry = rt_fetch(subqueryRteIndex, rangeTableList); if (rangeTableEntry->rtekind != RTE_SUBQUERY) { return false; } - subqueryTree = rangeTableEntry->subquery; - setOperations = subqueryTree->setOperations; + Query *subqueryTree = rangeTableEntry->subquery; + Node *setOperations = subqueryTree->setOperations; if (setOperations != NULL) { SetOperationStmt *setOperationStatement = (SetOperationStmt *) setOperations; @@ -1648,15 +1619,12 @@ ContainsUnionSubquery(Query *queryTree) static Index RelationRestrictionPartitionKeyIndex(RelationRestriction *relationRestriction) { - PlannerInfo *relationPlannerRoot = NULL; - Query *relationPlannerParseQuery = NULL; - List *relationTargetList = NIL; ListCell *targetEntryCell = NULL; Index partitionKeyTargetAttrIndex = 0; - relationPlannerRoot = relationRestriction->plannerInfo; - relationPlannerParseQuery = relationPlannerRoot->parse; - relationTargetList = relationPlannerParseQuery->targetList; + PlannerInfo *relationPlannerRoot = relationRestriction->plannerInfo; + Query *relationPlannerParseQuery = relationPlannerRoot->parse; + List *relationTargetList = relationPlannerParseQuery->targetList; foreach(targetEntryCell, relationTargetList) { @@ -1689,12 +1657,11 @@ List * DistributedRelationIdList(Query *query) { List *rangeTableList = NIL; - List *tableEntryList = NIL; List *relationIdList = NIL; ListCell *tableEntryCell = NULL; ExtractRangeTableRelationWalker((Node *) query, &rangeTableList); - tableEntryList = TableEntryList(rangeTableList); + List *tableEntryList = TableEntryList(rangeTableList); foreach(tableEntryCell, tableEntryList) { @@ -1724,10 +1691,6 @@ PlannerRestrictionContext * FilterPlannerRestrictionForQuery(PlannerRestrictionContext *plannerRestrictionContext, Query *query) { - PlannerRestrictionContext *filteredPlannerRestrictionContext = NULL; - int referenceRelationCount = 0; - int totalRelationCount = 0; - Relids queryRteIdentities = QueryRteIdentities(query); RelationRestrictionContext *relationRestrictionContext = @@ -1742,14 +1705,16 @@ FilterPlannerRestrictionForQuery(PlannerRestrictionContext *plannerRestrictionCo FilterJoinRestrictionContext(joinRestrictionContext, queryRteIdentities); /* allocate the filtered planner restriction context and set all the fields */ - filteredPlannerRestrictionContext = palloc0(sizeof(PlannerRestrictionContext)); + PlannerRestrictionContext *filteredPlannerRestrictionContext = palloc0( + sizeof(PlannerRestrictionContext)); filteredPlannerRestrictionContext->memoryContext = plannerRestrictionContext->memoryContext; - totalRelationCount = list_length( + int totalRelationCount = list_length( filteredRelationRestrictionContext->relationRestrictionList); - referenceRelationCount = ReferenceRelationCount(filteredRelationRestrictionContext); + int referenceRelationCount = ReferenceRelationCount( + filteredRelationRestrictionContext); filteredRelationRestrictionContext->allReferenceTables = (totalRelationCount == referenceRelationCount); @@ -1850,10 +1815,8 @@ static bool RangeTableArrayContainsAnyRTEIdentities(RangeTblEntry **rangeTableEntries, int rangeTableArrayLength, Relids queryRteIdentities) { - int rteIndex = 0; - /* simple_rte_array starts from 1, see plannerInfo struct */ - for (rteIndex = 1; rteIndex < rangeTableArrayLength; ++rteIndex) + for (int rteIndex = 1; rteIndex < rangeTableArrayLength; ++rteIndex) { RangeTblEntry *rangeTableEntry = rangeTableEntries[rteIndex]; List *rangeTableRelationList = NULL; @@ -1883,11 +1846,10 @@ RangeTableArrayContainsAnyRTEIdentities(RangeTblEntry **rangeTableEntries, int foreach(rteRelationCell, rangeTableRelationList) { RangeTblEntry *rteRelation = (RangeTblEntry *) lfirst(rteRelationCell); - int rteIdentity = 0; Assert(rteRelation->rtekind == RTE_RELATION); - rteIdentity = GetRTEIdentity(rteRelation); + int rteIdentity = GetRTEIdentity(rteRelation); if (bms_is_member(rteIdentity, queryRteIdentities)) { return true; @@ -1916,12 +1878,11 @@ QueryRteIdentities(Query *queryTree) foreach(rangeTableCell, rangeTableList) { RangeTblEntry *rangeTableEntry = (RangeTblEntry *) lfirst(rangeTableCell); - int rteIdentity = 0; /* we're only interested in relations */ Assert(rangeTableEntry->rtekind == RTE_RELATION); - rteIdentity = GetRTEIdentity(rangeTableEntry); + int rteIdentity = GetRTEIdentity(rangeTableEntry); queryRteIdentities = bms_add_member(queryRteIdentities, rteIdentity); } diff --git a/src/backend/distributed/planner/shard_pruning.c b/src/backend/distributed/planner/shard_pruning.c index 8e77b7d72..187a7e440 100644 --- a/src/backend/distributed/planner/shard_pruning.c +++ b/src/backend/distributed/planner/shard_pruning.c @@ -308,7 +308,6 @@ PruneShards(Oid relationId, Index rangeTableId, List *whereClauseList, foreach(pruneCell, context.pruningInstances) { PruningInstance *prune = (PruningInstance *) lfirst(pruneCell); - List *pruneOneList; /* * If this is a partial instance, a fully built one has also been @@ -358,7 +357,7 @@ PruneShards(Oid relationId, Index rangeTableId, List *whereClauseList, } } - pruneOneList = PruneOne(cacheEntry, &context, prune); + List *pruneOneList = PruneOne(cacheEntry, &context, prune); if (prunedList) { @@ -643,12 +642,9 @@ AddSAOPartitionKeyRestrictionToInstance(ClauseWalkerContext *context, equal(strippedLeftOpExpression, context->partitionColumn) && IsA(arrayArgument, Const)) { - ArrayType *array = NULL; int16 typlen = 0; bool typbyval = false; char typalign = '\0'; - Oid elementType = 0; - ArrayIterator arrayIterator = NULL; Datum arrayElement = 0; Datum inArray = ((Const *) arrayArgument)->constvalue; bool isNull = false; @@ -659,26 +655,25 @@ AddSAOPartitionKeyRestrictionToInstance(ClauseWalkerContext *context, return; } - array = DatumGetArrayTypeP(((Const *) arrayArgument)->constvalue); + ArrayType *array = DatumGetArrayTypeP(((Const *) arrayArgument)->constvalue); /* get the necessary information from array type to iterate over it */ - elementType = ARR_ELEMTYPE(array); + Oid elementType = ARR_ELEMTYPE(array); get_typlenbyvalalign(elementType, &typlen, &typbyval, &typalign); /* Iterate over the righthand array of expression */ - arrayIterator = array_create_iterator(array, 0, NULL); + ArrayIterator arrayIterator = array_create_iterator(array, 0, NULL); while (array_iterate(arrayIterator, &arrayElement, &isNull)) { - OpExpr *arrayEqualityOp = NULL; Const *constElement = makeConst(elementType, -1, DEFAULT_COLLATION_OID, typlen, arrayElement, isNull, typbyval); /* build partcol = arrayelem operator */ - arrayEqualityOp = makeNode(OpExpr); + OpExpr *arrayEqualityOp = makeNode(OpExpr); arrayEqualityOp->opno = arrayOperatorExpression->opno; arrayEqualityOp->opfuncid = arrayOperatorExpression->opfuncid; arrayEqualityOp->inputcollid = arrayOperatorExpression->inputcollid; @@ -734,7 +729,6 @@ AddPartitionKeyRestrictionToInstance(ClauseWalkerContext *context, OpExpr *opCla Var *partitionColumn, Const *constantClause) { PruningInstance *prune = context->currentPruningInstance; - List *btreeInterpretationList = NULL; ListCell *btreeInterpretationCell = NULL; bool matchedOp = false; @@ -756,7 +750,7 @@ AddPartitionKeyRestrictionToInstance(ClauseWalkerContext *context, OpExpr *opCla /* at this point, we'd better be able to pass binary Datums to comparison functions */ Assert(IsBinaryCoercible(constantClause->consttype, partitionColumn->vartype)); - btreeInterpretationList = get_op_btree_interpretation(opClause->opno); + List *btreeInterpretationList = get_op_btree_interpretation(opClause->opno); foreach(btreeInterpretationCell, btreeInterpretationList) { OpBtreeInterpretation *btreeInterpretation = @@ -924,13 +918,12 @@ AddHashRestrictionToInstance(ClauseWalkerContext *context, OpExpr *opClause, Var *varClause, Const *constantClause) { PruningInstance *prune = context->currentPruningInstance; - List *btreeInterpretationList = NULL; ListCell *btreeInterpretationCell = NULL; /* be paranoid */ Assert(IsBinaryCoercible(constantClause->consttype, INT4OID)); - btreeInterpretationList = + List *btreeInterpretationList = get_op_btree_interpretation(opClause->opno); foreach(btreeInterpretationCell, btreeInterpretationList) { @@ -986,9 +979,8 @@ static List * ShardArrayToList(ShardInterval **shardArray, int length) { List *shardIntervalList = NIL; - int shardIndex; - for (shardIndex = 0; shardIndex < length; shardIndex++) + for (int shardIndex = 0; shardIndex < length; shardIndex++) { ShardInterval *shardInterval = shardArray[shardIndex]; @@ -1068,13 +1060,12 @@ PruneOne(DistTableCacheEntry *cacheEntry, ClauseWalkerContext *context, */ if (prune->hashedEqualConsts) { - int shardIndex = INVALID_SHARD_INDEX; ShardInterval **sortedShardIntervalArray = cacheEntry->sortedShardIntervalArray; Assert(context->partitionMethod == DISTRIBUTE_BY_HASH); - shardIndex = FindShardIntervalIndex(prune->hashedEqualConsts->constvalue, - cacheEntry); + int shardIndex = FindShardIntervalIndex(prune->hashedEqualConsts->constvalue, + cacheEntry); if (shardIndex == INVALID_SHARD_INDEX) { @@ -1198,14 +1189,12 @@ LowerShardBoundary(Datum partitionColumnValue, ShardInterval **shardIntervalCach while (lowerBoundIndex < upperBoundIndex) { int middleIndex = lowerBoundIndex + ((upperBoundIndex - lowerBoundIndex) / 2); - int maxValueComparison = 0; - int minValueComparison = 0; /* setup minValue as argument */ fcSetArg(compareFunction, 1, shardIntervalCache[middleIndex]->minValue); /* execute cmp(partitionValue, lowerBound) */ - minValueComparison = PerformCompare(compareFunction); + int minValueComparison = PerformCompare(compareFunction); /* and evaluate results */ if (minValueComparison < 0) @@ -1219,7 +1208,7 @@ LowerShardBoundary(Datum partitionColumnValue, ShardInterval **shardIntervalCach fcSetArg(compareFunction, 1, shardIntervalCache[middleIndex]->maxValue); /* execute cmp(partitionValue, upperBound) */ - maxValueComparison = PerformCompare(compareFunction); + int maxValueComparison = PerformCompare(compareFunction); if ((maxValueComparison == 0 && !includeMax) || maxValueComparison > 0) @@ -1276,14 +1265,12 @@ UpperShardBoundary(Datum partitionColumnValue, ShardInterval **shardIntervalCach while (lowerBoundIndex < upperBoundIndex) { int middleIndex = lowerBoundIndex + ((upperBoundIndex - lowerBoundIndex) / 2); - int maxValueComparison = 0; - int minValueComparison = 0; /* setup minValue as argument */ fcSetArg(compareFunction, 1, shardIntervalCache[middleIndex]->minValue); /* execute cmp(partitionValue, lowerBound) */ - minValueComparison = PerformCompare(compareFunction); + int minValueComparison = PerformCompare(compareFunction); /* and evaluate results */ if ((minValueComparison == 0 && !includeMin) || @@ -1298,7 +1285,7 @@ UpperShardBoundary(Datum partitionColumnValue, ShardInterval **shardIntervalCach fcSetArg(compareFunction, 1, shardIntervalCache[middleIndex]->maxValue); /* execute cmp(partitionValue, upperBound) */ - maxValueComparison = PerformCompare(compareFunction); + int maxValueComparison = PerformCompare(compareFunction); if (maxValueComparison > 0) { @@ -1355,7 +1342,6 @@ PruneWithBoundaries(DistTableCacheEntry *cacheEntry, ClauseWalkerContext *contex bool upperBoundInclusive = false; int lowerBoundIdx = -1; int upperBoundIdx = -1; - int curIdx = 0; FunctionCallInfo compareFunctionCall = (FunctionCallInfo) & context->compareIntervalFunctionCall; @@ -1442,7 +1428,7 @@ PruneWithBoundaries(DistTableCacheEntry *cacheEntry, ClauseWalkerContext *contex /* * Build list of all shards that are in the range of shards (possibly 0). */ - for (curIdx = lowerBoundIdx; curIdx <= upperBoundIdx; curIdx++) + for (int curIdx = lowerBoundIdx; curIdx <= upperBoundIdx; curIdx++) { remainingShardList = lappend(remainingShardList, sortedShardIntervalArray[curIdx]); @@ -1463,9 +1449,8 @@ ExhaustivePrune(DistTableCacheEntry *cacheEntry, ClauseWalkerContext *context, List *remainingShardList = NIL; int shardCount = cacheEntry->shardIntervalArrayLength; ShardInterval **sortedShardIntervalArray = cacheEntry->sortedShardIntervalArray; - int curIdx = 0; - for (curIdx = 0; curIdx < shardCount; curIdx++) + for (int curIdx = 0; curIdx < shardCount; curIdx++) { ShardInterval *curInterval = sortedShardIntervalArray[curIdx]; diff --git a/src/backend/distributed/progress/multi_progress.c b/src/backend/distributed/progress/multi_progress.c index 18f951af6..66c764c47 100644 --- a/src/backend/distributed/progress/multi_progress.c +++ b/src/backend/distributed/progress/multi_progress.c @@ -39,11 +39,6 @@ ProgressMonitorData * CreateProgressMonitor(uint64 progressTypeMagicNumber, int stepCount, Size stepSize, Oid relationId) { - dsm_segment *dsmSegment = NULL; - dsm_handle dsmHandle = 0; - ProgressMonitorData *monitor = NULL; - Size monitorSize = 0; - if (stepSize <= 0 || stepCount <= 0) { ereport(ERROR, @@ -51,8 +46,8 @@ CreateProgressMonitor(uint64 progressTypeMagicNumber, int stepCount, Size stepSi "positive values"))); } - monitorSize = sizeof(ProgressMonitorData) + stepSize * stepCount; - dsmSegment = dsm_create(monitorSize, DSM_CREATE_NULL_IF_MAXSEGMENTS); + Size monitorSize = sizeof(ProgressMonitorData) + stepSize * stepCount; + dsm_segment *dsmSegment = dsm_create(monitorSize, DSM_CREATE_NULL_IF_MAXSEGMENTS); if (dsmSegment == NULL) { @@ -62,9 +57,9 @@ CreateProgressMonitor(uint64 progressTypeMagicNumber, int stepCount, Size stepSi return NULL; } - dsmHandle = dsm_segment_handle(dsmSegment); + dsm_handle dsmHandle = dsm_segment_handle(dsmSegment); - monitor = MonitorDataFromDSMHandle(dsmHandle, &dsmSegment); + ProgressMonitorData *monitor = MonitorDataFromDSMHandle(dsmHandle, &dsmSegment); monitor->stepCount = stepCount; monitor->processId = MyProcPid; @@ -143,42 +138,38 @@ ProgressMonitorList(uint64 commandTypeMagicNumber, List **attachedDSMSegments) */ text *commandTypeText = cstring_to_text("VACUUM"); Datum commandTypeDatum = PointerGetDatum(commandTypeText); - Oid getProgressInfoFunctionOid = InvalidOid; - TupleTableSlot *tupleTableSlot = NULL; - ReturnSetInfo *progressResultSet = NULL; List *monitorList = NIL; - getProgressInfoFunctionOid = FunctionOid("pg_catalog", - "pg_stat_get_progress_info", - 1); + Oid getProgressInfoFunctionOid = FunctionOid("pg_catalog", + "pg_stat_get_progress_info", + 1); - progressResultSet = FunctionCallGetTupleStore1(pg_stat_get_progress_info, - getProgressInfoFunctionOid, - commandTypeDatum); + ReturnSetInfo *progressResultSet = FunctionCallGetTupleStore1( + pg_stat_get_progress_info, + getProgressInfoFunctionOid, + commandTypeDatum); - tupleTableSlot = MakeSingleTupleTableSlotCompat(progressResultSet->setDesc, - &TTSOpsMinimalTuple); + TupleTableSlot *tupleTableSlot = MakeSingleTupleTableSlotCompat( + progressResultSet->setDesc, + &TTSOpsMinimalTuple); /* iterate over tuples in tuple store, and send them to destination */ for (;;) { - bool nextTuple = false; bool isNull = false; - Datum magicNumberDatum = 0; - uint64 magicNumber = 0; - nextTuple = tuplestore_gettupleslot(progressResultSet->setResult, - true, - false, - tupleTableSlot); + bool nextTuple = tuplestore_gettupleslot(progressResultSet->setResult, + true, + false, + tupleTableSlot); if (!nextTuple) { break; } - magicNumberDatum = slot_getattr(tupleTableSlot, magicNumberIndex, &isNull); - magicNumber = DatumGetUInt64(magicNumberDatum); + Datum magicNumberDatum = slot_getattr(tupleTableSlot, magicNumberIndex, &isNull); + uint64 magicNumber = DatumGetUInt64(magicNumberDatum); if (!isNull && magicNumber == commandTypeMagicNumber) { diff --git a/src/backend/distributed/relay/relay_event_utility.c b/src/backend/distributed/relay/relay_event_utility.c index 05a67b45c..54db8acf5 100644 --- a/src/backend/distributed/relay/relay_event_utility.c +++ b/src/backend/distributed/relay/relay_event_utility.c @@ -118,7 +118,6 @@ RelayEventExtendNames(Node *parseTree, char *schemaName, uint64 shardId) command->subtype == AT_ValidateConstraint) { char **constraintName = &(command->name); - Oid constraintOid = InvalidOid; const bool constraintMissingOk = true; if (!OidIsValid(relationId)) @@ -129,9 +128,9 @@ RelayEventExtendNames(Node *parseTree, char *schemaName, uint64 shardId) rvMissingOk); } - constraintOid = get_relation_constraint_oid(relationId, - command->name, - constraintMissingOk); + Oid constraintOid = get_relation_constraint_oid(relationId, + command->name, + constraintMissingOk); if (!OidIsValid(constraintOid)) { AppendShardIdToName(constraintName, shardId); @@ -161,8 +160,6 @@ RelayEventExtendNames(Node *parseTree, char *schemaName, uint64 shardId) case T_ClusterStmt: { ClusterStmt *clusterStmt = (ClusterStmt *) parseTree; - char **relationName = NULL; - char **relationSchemaName = NULL; /* we do not support clustering the entire database */ if (clusterStmt->relation == NULL) @@ -170,8 +167,8 @@ RelayEventExtendNames(Node *parseTree, char *schemaName, uint64 shardId) ereport(ERROR, (errmsg("cannot extend name for multi-relation cluster"))); } - relationName = &(clusterStmt->relation->relname); - relationSchemaName = &(clusterStmt->relation->schemaname); + char **relationName = &(clusterStmt->relation->relname); + char **relationSchemaName = &(clusterStmt->relation->schemaname); /* prefix with schema name if it is not added already */ SetSchemaNameIfNotExist(relationSchemaName, schemaName); @@ -232,11 +229,8 @@ RelayEventExtendNames(Node *parseTree, char *schemaName, uint64 shardId) if (objectType == OBJECT_TABLE || objectType == OBJECT_INDEX || objectType == OBJECT_FOREIGN_TABLE || objectType == OBJECT_FOREIGN_SERVER) { - List *relationNameList = NULL; - int relationNameListLength = 0; Value *relationSchemaNameValue = NULL; Value *relationNameValue = NULL; - char **relationName = NULL; uint32 dropCount = list_length(dropStmt->objects); if (dropCount > 1) @@ -253,8 +247,8 @@ RelayEventExtendNames(Node *parseTree, char *schemaName, uint64 shardId) * have the correct memory address for the name. */ - relationNameList = (List *) linitial(dropStmt->objects); - relationNameListLength = list_length(relationNameList); + List *relationNameList = (List *) linitial(dropStmt->objects); + int relationNameListLength = list_length(relationNameList); switch (relationNameListLength) { @@ -294,7 +288,7 @@ RelayEventExtendNames(Node *parseTree, char *schemaName, uint64 shardId) relationNameList = lcons(schemaNameValue, relationNameList); } - relationName = &(relationNameValue->val.str); + char **relationName = &(relationNameValue->val.str); AppendShardIdToName(relationName, shardId); } else if (objectType == OBJECT_POLICY) @@ -418,7 +412,6 @@ RelayEventExtendNames(Node *parseTree, char *schemaName, uint64 shardId) char **oldRelationName = &(renameStmt->relation->relname); char **newRelationName = &(renameStmt->newname); char **objectSchemaName = &(renameStmt->relation->schemaname); - int newRelationNameLength; /* prefix with schema name if it is not added already */ SetSchemaNameIfNotExist(objectSchemaName, schemaName); @@ -440,7 +433,7 @@ RelayEventExtendNames(Node *parseTree, char *schemaName, uint64 shardId) * * See also https://github.com/citusdata/citus/issues/1664 */ - newRelationNameLength = strlen(*newRelationName); + int newRelationNameLength = strlen(*newRelationName); if (newRelationNameLength >= (NAMEDATALEN - 1)) { ereport(ERROR, @@ -676,10 +669,8 @@ AppendShardIdToName(char **name, uint64 shardId) char extendedName[NAMEDATALEN]; int nameLength = strlen(*name); char shardIdAndSeparator[NAMEDATALEN]; - int shardIdAndSeparatorLength; uint32 longNameHash = 0; int multiByteClipLength = 0; - int neededBytes = 0; if (nameLength >= NAMEDATALEN) { @@ -690,7 +681,7 @@ AppendShardIdToName(char **name, uint64 shardId) snprintf(shardIdAndSeparator, NAMEDATALEN, "%c" UINT64_FORMAT, SHARD_NAME_SEPARATOR, shardId); - shardIdAndSeparatorLength = strlen(shardIdAndSeparator); + int shardIdAndSeparatorLength = strlen(shardIdAndSeparator); /* * If *name strlen is < (NAMEDATALEN - shardIdAndSeparatorLength), @@ -740,7 +731,7 @@ AppendShardIdToName(char **name, uint64 shardId) } (*name) = (char *) repalloc((*name), NAMEDATALEN); - neededBytes = snprintf((*name), NAMEDATALEN, "%s", extendedName); + int neededBytes = snprintf((*name), NAMEDATALEN, "%s", extendedName); if (neededBytes < 0) { ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), @@ -764,10 +755,7 @@ shard_name(PG_FUNCTION_ARGS) { Oid relationId = PG_GETARG_OID(0); int64 shardId = PG_GETARG_INT64(1); - char *relationName = NULL; - Oid schemaId = InvalidOid; - char *schemaName = NULL; char *qualifiedName = NULL; CheckCitusVersion(ERROR); @@ -785,7 +773,7 @@ shard_name(PG_FUNCTION_ARGS) errmsg("object_name does not reference a valid relation"))); } - relationName = get_rel_name(relationId); + char *relationName = get_rel_name(relationId); if (relationName == NULL) { @@ -795,8 +783,8 @@ shard_name(PG_FUNCTION_ARGS) AppendShardIdToName(&relationName, shardId); - schemaId = get_rel_namespace(relationId); - schemaName = get_namespace_name(schemaId); + Oid schemaId = get_rel_namespace(relationId); + char *schemaName = get_namespace_name(schemaId); if (strncmp(schemaName, "public", NAMEDATALEN) == 0) { diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index dec92955e..2a02b6783 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -273,10 +273,9 @@ static void ResizeStackToMaximumDepth(void) { #ifndef WIN32 - volatile char *stack_resizer = NULL; long max_stack_depth_bytes = max_stack_depth * 1024L; - stack_resizer = alloca(max_stack_depth_bytes); + volatile char *stack_resizer = alloca(max_stack_depth_bytes); /* * Different architectures might have different directions while @@ -345,14 +344,13 @@ StartupCitusBackend(void) static void CreateRequiredDirectories(void) { - int dirNo = 0; const char *subdirs[] = { "pg_foreign_file", "pg_foreign_file/cached", "base/" PG_JOB_CACHE_DIR }; - for (dirNo = 0; dirNo < lengthof(subdirs); dirNo++) + for (int dirNo = 0; dirNo < lengthof(subdirs); dirNo++) { int ret = mkdir(subdirs[dirNo], S_IRWXU); @@ -1380,15 +1378,12 @@ NodeConninfoGucCheckHook(char **newval, void **extra, GucSource source) static void NodeConninfoGucAssignHook(const char *newval, void *extra) { - PQconninfoOption *optionArray = NULL; - PQconninfoOption *option = NULL; - if (newval == NULL) { newval = ""; } - optionArray = PQconninfoParse(newval, NULL); + PQconninfoOption *optionArray = PQconninfoParse(newval, NULL); if (optionArray == NULL) { ereport(FATAL, (errmsg("cannot parse node_conninfo value"), @@ -1398,7 +1393,7 @@ NodeConninfoGucAssignHook(const char *newval, void *extra) ResetConnParams(); - for (option = optionArray; option->keyword != NULL; option++) + for (PQconninfoOption *option = optionArray; option->keyword != NULL; option++) { if (option->val == NULL || option->val[0] == '\0') { diff --git a/src/backend/distributed/test/colocation_utils.c b/src/backend/distributed/test/colocation_utils.c index 8d24bdf9b..5eee45825 100644 --- a/src/backend/distributed/test/colocation_utils.c +++ b/src/backend/distributed/test/colocation_utils.c @@ -83,7 +83,6 @@ get_colocated_table_array(PG_FUNCTION_ARGS) { Oid distributedTableId = PG_GETARG_OID(0); - ArrayType *colocatedTablesArrayType = NULL; List *colocatedTableList = ColocatedTableList(distributedTableId); ListCell *colocatedTableCell = NULL; int colocatedTableCount = list_length(colocatedTableList); @@ -100,8 +99,9 @@ get_colocated_table_array(PG_FUNCTION_ARGS) colocatedTableIndex++; } - colocatedTablesArrayType = DatumArrayToArrayType(colocatedTablesDatumArray, - colocatedTableCount, arrayTypeId); + ArrayType *colocatedTablesArrayType = DatumArrayToArrayType(colocatedTablesDatumArray, + colocatedTableCount, + arrayTypeId); PG_RETURN_ARRAYTYPE_P(colocatedTablesArrayType); } diff --git a/src/backend/distributed/test/deparse_function_query.c b/src/backend/distributed/test/deparse_function_query.c index f990f7edb..7a6e54424 100644 --- a/src/backend/distributed/test/deparse_function_query.c +++ b/src/backend/distributed/test/deparse_function_query.c @@ -31,15 +31,12 @@ Datum deparse_test(PG_FUNCTION_ARGS) { text *queryStringText = PG_GETARG_TEXT_P(0); - char *queryStringChar = NULL; - Query *query = NULL; - const char *deparsedQuery = NULL; - queryStringChar = text_to_cstring(queryStringText); - query = ParseQueryString(queryStringChar, NULL, 0); + char *queryStringChar = text_to_cstring(queryStringText); + Query *query = ParseQueryString(queryStringChar, NULL, 0); QualifyTreeNode(query->utilityStmt); - deparsedQuery = DeparseTreeNode(query->utilityStmt); + const char *deparsedQuery = DeparseTreeNode(query->utilityStmt); PG_RETURN_TEXT_P(cstring_to_text(deparsedQuery)); } diff --git a/src/backend/distributed/test/deparse_shard_query.c b/src/backend/distributed/test/deparse_shard_query.c index bae0bc065..67b5532c8 100644 --- a/src/backend/distributed/test/deparse_shard_query.c +++ b/src/backend/distributed/test/deparse_shard_query.c @@ -50,10 +50,10 @@ deparse_shard_query_test(PG_FUNCTION_ARGS) { Node *parsetree = (Node *) lfirst(parseTreeCell); ListCell *queryTreeCell = NULL; - List *queryTreeList = NIL; - queryTreeList = pg_analyze_and_rewrite((RawStmt *) parsetree, queryStringChar, - NULL, 0, NULL); + List *queryTreeList = pg_analyze_and_rewrite((RawStmt *) parsetree, + queryStringChar, + NULL, 0, NULL); foreach(queryTreeCell, queryTreeList) { diff --git a/src/backend/distributed/test/distributed_deadlock_detection.c b/src/backend/distributed/test/distributed_deadlock_detection.c index bde1f9f2d..83f78c9fc 100644 --- a/src/backend/distributed/test/distributed_deadlock_detection.c +++ b/src/backend/distributed/test/distributed_deadlock_detection.c @@ -40,10 +40,7 @@ Datum get_adjacency_list_wait_graph(PG_FUNCTION_ARGS) { TupleDesc tupleDescriptor = NULL; - Tuplestorestate *tupleStore = NULL; - WaitGraph *waitGraph = NULL; - HTAB *adjacencyList = NULL; HASH_SEQ_STATUS status; TransactionNode *transactionNode = NULL; @@ -52,9 +49,9 @@ get_adjacency_list_wait_graph(PG_FUNCTION_ARGS) CheckCitusVersion(ERROR); - tupleStore = SetupTuplestore(fcinfo, &tupleDescriptor); - waitGraph = BuildGlobalWaitGraph(); - adjacencyList = BuildAdjacencyListsForWaitGraph(waitGraph); + Tuplestorestate *tupleStore = SetupTuplestore(fcinfo, &tupleDescriptor); + WaitGraph *waitGraph = BuildGlobalWaitGraph(); + HTAB *adjacencyList = BuildAdjacencyListsForWaitGraph(waitGraph); /* iterate on all nodes */ hash_seq_init(&status, adjacencyList); diff --git a/src/backend/distributed/test/distribution_metadata.c b/src/backend/distributed/test/distribution_metadata.c index f9c5bb283..01091c3d7 100644 --- a/src/backend/distributed/test/distribution_metadata.c +++ b/src/backend/distributed/test/distribution_metadata.c @@ -62,17 +62,14 @@ Datum load_shard_id_array(PG_FUNCTION_ARGS) { Oid distributedTableId = PG_GETARG_OID(0); - ArrayType *shardIdArrayType = NULL; ListCell *shardCell = NULL; int shardIdIndex = 0; Oid shardIdTypeId = INT8OID; - int shardIdCount = -1; - Datum *shardIdDatumArray = NULL; List *shardList = LoadShardIntervalList(distributedTableId); - shardIdCount = list_length(shardList); - shardIdDatumArray = palloc0(shardIdCount * sizeof(Datum)); + int shardIdCount = list_length(shardList); + Datum *shardIdDatumArray = palloc0(shardIdCount * sizeof(Datum)); foreach(shardCell, shardList) { @@ -83,8 +80,8 @@ load_shard_id_array(PG_FUNCTION_ARGS) shardIdIndex++; } - shardIdArrayType = DatumArrayToArrayType(shardIdDatumArray, shardIdCount, - shardIdTypeId); + ArrayType *shardIdArrayType = DatumArrayToArrayType(shardIdDatumArray, shardIdCount, + shardIdTypeId); PG_RETURN_ARRAYTYPE_P(shardIdArrayType); } @@ -103,12 +100,11 @@ load_shard_interval_array(PG_FUNCTION_ARGS) Oid expectedType PG_USED_FOR_ASSERTS_ONLY = get_fn_expr_argtype(fcinfo->flinfo, 1); ShardInterval *shardInterval = LoadShardInterval(shardId); Datum shardIntervalArray[] = { shardInterval->minValue, shardInterval->maxValue }; - ArrayType *shardIntervalArrayType = NULL; Assert(expectedType == shardInterval->valueTypeId); - shardIntervalArrayType = DatumArrayToArrayType(shardIntervalArray, 2, - shardInterval->valueTypeId); + ArrayType *shardIntervalArrayType = DatumArrayToArrayType(shardIntervalArray, 2, + shardInterval->valueTypeId); PG_RETURN_ARRAYTYPE_P(shardIntervalArrayType); } @@ -126,12 +122,9 @@ load_shard_placement_array(PG_FUNCTION_ARGS) { int64 shardId = PG_GETARG_INT64(0); bool onlyFinalized = PG_GETARG_BOOL(1); - ArrayType *placementArrayType = NULL; List *placementList = NIL; ListCell *placementCell = NULL; - int placementCount = -1; int placementIndex = 0; - Datum *placementDatumArray = NULL; Oid placementTypeId = TEXTOID; StringInfo placementInfo = makeStringInfo(); @@ -146,8 +139,8 @@ load_shard_placement_array(PG_FUNCTION_ARGS) placementList = SortList(placementList, CompareShardPlacementsByWorker); - placementCount = list_length(placementList); - placementDatumArray = palloc0(placementCount * sizeof(Datum)); + int placementCount = list_length(placementList); + Datum *placementDatumArray = palloc0(placementCount * sizeof(Datum)); foreach(placementCell, placementList) { @@ -160,8 +153,9 @@ load_shard_placement_array(PG_FUNCTION_ARGS) resetStringInfo(placementInfo); } - placementArrayType = DatumArrayToArrayType(placementDatumArray, placementCount, - placementTypeId); + ArrayType *placementArrayType = DatumArrayToArrayType(placementDatumArray, + placementCount, + placementTypeId); PG_RETURN_ARRAYTYPE_P(placementArrayType); } @@ -224,14 +218,12 @@ create_monolithic_shard_row(PG_FUNCTION_ARGS) StringInfo minInfo = makeStringInfo(); StringInfo maxInfo = makeStringInfo(); uint64 newShardId = GetNextShardId(); - text *maxInfoText = NULL; - text *minInfoText = NULL; appendStringInfo(minInfo, "%d", INT32_MIN); appendStringInfo(maxInfo, "%d", INT32_MAX); - minInfoText = cstring_to_text(minInfo->data); - maxInfoText = cstring_to_text(maxInfo->data); + text *minInfoText = cstring_to_text(minInfo->data); + text *maxInfoText = cstring_to_text(maxInfo->data); InsertShardRow(distributedTableId, newShardId, SHARD_STORAGE_TABLE, minInfoText, maxInfoText); @@ -270,10 +262,10 @@ relation_count_in_query(PG_FUNCTION_ARGS) { Node *parsetree = (Node *) lfirst(parseTreeCell); ListCell *queryTreeCell = NULL; - List *queryTreeList = NIL; - queryTreeList = pg_analyze_and_rewrite((RawStmt *) parsetree, queryStringChar, - NULL, 0, NULL); + List *queryTreeList = pg_analyze_and_rewrite((RawStmt *) parsetree, + queryStringChar, + NULL, 0, NULL); foreach(queryTreeCell, queryTreeList) { diff --git a/src/backend/distributed/test/metadata_sync.c b/src/backend/distributed/test/metadata_sync.c index 0450cb196..092f6c3b7 100644 --- a/src/backend/distributed/test/metadata_sync.c +++ b/src/backend/distributed/test/metadata_sync.c @@ -41,17 +41,14 @@ master_metadata_snapshot(PG_FUNCTION_ARGS) List *createSnapshotCommands = MetadataCreateCommands(); List *snapshotCommandList = NIL; ListCell *snapshotCommandCell = NULL; - int snapshotCommandCount = 0; - Datum *snapshotCommandDatumArray = NULL; - ArrayType *snapshotCommandArrayType = NULL; int snapshotCommandIndex = 0; Oid ddlCommandTypeId = TEXTOID; snapshotCommandList = list_concat(snapshotCommandList, dropSnapshotCommands); snapshotCommandList = list_concat(snapshotCommandList, createSnapshotCommands); - snapshotCommandCount = list_length(snapshotCommandList); - snapshotCommandDatumArray = palloc0(snapshotCommandCount * sizeof(Datum)); + int snapshotCommandCount = list_length(snapshotCommandList); + Datum *snapshotCommandDatumArray = palloc0(snapshotCommandCount * sizeof(Datum)); foreach(snapshotCommandCell, snapshotCommandList) { @@ -62,9 +59,9 @@ master_metadata_snapshot(PG_FUNCTION_ARGS) snapshotCommandIndex++; } - snapshotCommandArrayType = DatumArrayToArrayType(snapshotCommandDatumArray, - snapshotCommandCount, - ddlCommandTypeId); + ArrayType *snapshotCommandArrayType = DatumArrayToArrayType(snapshotCommandDatumArray, + snapshotCommandCount, + ddlCommandTypeId); PG_RETURN_ARRAYTYPE_P(snapshotCommandArrayType); } @@ -78,13 +75,10 @@ Datum wait_until_metadata_sync(PG_FUNCTION_ARGS) { uint32 timeout = PG_GETARG_UINT32(0); - int waitResult = 0; List *workerList = ActivePrimaryWorkerNodeList(NoLock); ListCell *workerCell = NULL; bool waitNotifications = false; - MultiConnection *connection = NULL; - int waitFlags = 0; foreach(workerCell, workerList) { @@ -109,13 +103,13 @@ wait_until_metadata_sync(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } - connection = GetNodeConnection(FORCE_NEW_CONNECTION, - "localhost", PostPortNumber); + MultiConnection *connection = GetNodeConnection(FORCE_NEW_CONNECTION, + "localhost", PostPortNumber); ExecuteCriticalRemoteCommand(connection, "LISTEN " METADATA_SYNC_CHANNEL); - waitFlags = WL_SOCKET_READABLE | WL_TIMEOUT | WL_POSTMASTER_DEATH; - waitResult = WaitLatchOrSocket(NULL, waitFlags, PQsocket(connection->pgConn), - timeout, 0); + int waitFlags = WL_SOCKET_READABLE | WL_TIMEOUT | WL_POSTMASTER_DEATH; + int waitResult = WaitLatchOrSocket(NULL, waitFlags, PQsocket(connection->pgConn), + timeout, 0); if (waitResult & WL_POSTMASTER_DEATH) { ereport(ERROR, (errmsg("postmaster was shut down, exiting"))); diff --git a/src/backend/distributed/test/progress_utils.c b/src/backend/distributed/test/progress_utils.c index 993c88abd..2d9475a6b 100644 --- a/src/backend/distributed/test/progress_utils.c +++ b/src/backend/distributed/test/progress_utils.c @@ -95,8 +95,7 @@ show_progress(PG_FUNCTION_ARGS) ProgressMonitorData *monitor = lfirst(monitorCell); uint64 *steps = monitor->steps; - int stepIndex = 0; - for (stepIndex = 0; stepIndex < monitor->stepCount; stepIndex++) + for (int stepIndex = 0; stepIndex < monitor->stepCount; stepIndex++) { uint64 step = steps[stepIndex]; diff --git a/src/backend/distributed/test/prune_shard_list.c b/src/backend/distributed/test/prune_shard_list.c index 44bd5c33e..aa1b6de55 100644 --- a/src/backend/distributed/test/prune_shard_list.c +++ b/src/backend/distributed/test/prune_shard_list.c @@ -202,20 +202,16 @@ MakeTextPartitionExpression(Oid distributedTableId, text *value) static ArrayType * PrunedShardIdsForTable(Oid distributedTableId, List *whereClauseList) { - ArrayType *shardIdArrayType = NULL; ListCell *shardCell = NULL; int shardIdIndex = 0; Oid shardIdTypeId = INT8OID; Index tableId = 1; - List *shardList = NIL; - int shardIdCount = -1; - Datum *shardIdDatumArray = NULL; - shardList = PruneShards(distributedTableId, tableId, whereClauseList, NULL); + List *shardList = PruneShards(distributedTableId, tableId, whereClauseList, NULL); - shardIdCount = list_length(shardList); - shardIdDatumArray = palloc0(shardIdCount * sizeof(Datum)); + int shardIdCount = list_length(shardList); + Datum *shardIdDatumArray = palloc0(shardIdCount * sizeof(Datum)); foreach(shardCell, shardList) { @@ -226,8 +222,8 @@ PrunedShardIdsForTable(Oid distributedTableId, List *whereClauseList) shardIdIndex++; } - shardIdArrayType = DatumArrayToArrayType(shardIdDatumArray, shardIdCount, - shardIdTypeId); + ArrayType *shardIdArrayType = DatumArrayToArrayType(shardIdDatumArray, shardIdCount, + shardIdTypeId); return shardIdArrayType; } @@ -240,8 +236,6 @@ PrunedShardIdsForTable(Oid distributedTableId, List *whereClauseList) static ArrayType * SortedShardIntervalArray(Oid distributedTableId) { - ArrayType *shardIdArrayType = NULL; - int shardIndex = 0; Oid shardIdTypeId = INT8OID; DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(distributedTableId); @@ -249,7 +243,7 @@ SortedShardIntervalArray(Oid distributedTableId) int shardIdCount = cacheEntry->shardIntervalArrayLength; Datum *shardIdDatumArray = palloc0(shardIdCount * sizeof(Datum)); - for (shardIndex = 0; shardIndex < shardIdCount; ++shardIndex) + for (int shardIndex = 0; shardIndex < shardIdCount; ++shardIndex) { ShardInterval *shardId = shardIntervalArray[shardIndex]; Datum shardIdDatum = Int64GetDatum(shardId->shardId); @@ -257,8 +251,8 @@ SortedShardIntervalArray(Oid distributedTableId) shardIdDatumArray[shardIndex] = shardIdDatum; } - shardIdArrayType = DatumArrayToArrayType(shardIdDatumArray, shardIdCount, - shardIdTypeId); + ArrayType *shardIdArrayType = DatumArrayToArrayType(shardIdDatumArray, shardIdCount, + shardIdTypeId); return shardIdArrayType; } diff --git a/src/backend/distributed/test/run_from_same_connection.c b/src/backend/distributed/test/run_from_same_connection.c index deab45c42..9d08a7cb2 100644 --- a/src/backend/distributed/test/run_from_same_connection.c +++ b/src/backend/distributed/test/run_from_same_connection.c @@ -136,7 +136,6 @@ run_commands_on_session_level_connection_to_node(PG_FUNCTION_ARGS) StringInfo workerProcessStringInfo = makeStringInfo(); MultiConnection *localConnection = GetNodeConnection(0, LOCAL_HOST_NAME, PostPortNumber); - Oid pgReloadConfOid = InvalidOid; if (!singleConnection) { @@ -160,7 +159,7 @@ run_commands_on_session_level_connection_to_node(PG_FUNCTION_ARGS) CloseConnection(localConnection); /* Call pg_reload_conf UDF to update changed GUCs above on each backend */ - pgReloadConfOid = FunctionOid("pg_catalog", "pg_reload_conf", 0); + Oid pgReloadConfOid = FunctionOid("pg_catalog", "pg_reload_conf", 0); OidFunctionCall0(pgReloadConfOid); @@ -197,21 +196,19 @@ GetRemoteProcessId() { StringInfo queryStringInfo = makeStringInfo(); PGresult *result = NULL; - int64 rowCount = 0; - int64 resultValue = 0; appendStringInfo(queryStringInfo, GET_PROCESS_ID); ExecuteOptionalRemoteCommand(singleConnection, queryStringInfo->data, &result); - rowCount = PQntuples(result); + int64 rowCount = PQntuples(result); if (rowCount != 1) { PG_RETURN_VOID(); } - resultValue = ParseIntField(result, 0, 0); + int64 resultValue = ParseIntField(result, 0, 0); PQclear(result); ClearResults(singleConnection, false); diff --git a/src/backend/distributed/transaction/backend_data.c b/src/backend/distributed/transaction/backend_data.c index 403c79789..040b7800a 100644 --- a/src/backend/distributed/transaction/backend_data.c +++ b/src/backend/distributed/transaction/backend_data.c @@ -155,12 +155,10 @@ Datum get_current_transaction_id(PG_FUNCTION_ARGS) { TupleDesc tupleDescriptor = NULL; - HeapTuple heapTuple = NULL; Datum values[5]; bool isNulls[5]; - DistributedTransactionId *distributedTransctionId = NULL; CheckCitusVersion(ERROR); @@ -176,7 +174,8 @@ get_current_transaction_id(PG_FUNCTION_ARGS) ereport(ERROR, (errmsg("backend is not ready for distributed transactions"))); } - distributedTransctionId = GetCurrentDistributedTransactionId(); + DistributedTransactionId *distributedTransctionId = + GetCurrentDistributedTransactionId(); memset(values, 0, sizeof(values)); memset(isNulls, false, sizeof(isNulls)); @@ -198,7 +197,7 @@ get_current_transaction_id(PG_FUNCTION_ARGS) isNulls[4] = true; } - heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls); + HeapTuple heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls); PG_RETURN_DATUM(HeapTupleGetDatum(heapTuple)); } @@ -215,7 +214,6 @@ Datum get_global_active_transactions(PG_FUNCTION_ARGS) { TupleDesc tupleDescriptor = NULL; - Tuplestorestate *tupleStore = NULL; List *workerNodeList = ActivePrimaryWorkerNodeList(NoLock); ListCell *workerNodeCell = NULL; List *connectionList = NIL; @@ -223,7 +221,7 @@ get_global_active_transactions(PG_FUNCTION_ARGS) StringInfo queryToSend = makeStringInfo(); CheckCitusVersion(ERROR); - tupleStore = SetupTuplestore(fcinfo, &tupleDescriptor); + Tuplestorestate *tupleStore = SetupTuplestore(fcinfo, &tupleDescriptor); appendStringInfo(queryToSend, GET_ACTIVE_TRANSACTION_QUERY); @@ -236,7 +234,6 @@ get_global_active_transactions(PG_FUNCTION_ARGS) WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell); char *nodeName = workerNode->workerName; int nodePort = workerNode->workerPort; - MultiConnection *connection = NULL; int connectionFlags = 0; if (workerNode->groupId == GetLocalGroupId()) @@ -245,7 +242,8 @@ get_global_active_transactions(PG_FUNCTION_ARGS) continue; } - connection = StartNodeConnection(connectionFlags, nodeName, nodePort); + MultiConnection *connection = StartNodeConnection(connectionFlags, nodeName, + nodePort); connectionList = lappend(connectionList, connection); } @@ -256,9 +254,8 @@ get_global_active_transactions(PG_FUNCTION_ARGS) foreach(connectionCell, connectionList) { MultiConnection *connection = (MultiConnection *) lfirst(connectionCell); - int querySent = false; - querySent = SendRemoteCommand(connection, queryToSend->data); + int querySent = SendRemoteCommand(connection, queryToSend->data); if (querySent == 0) { ReportConnectionError(connection, WARNING); @@ -269,28 +266,24 @@ get_global_active_transactions(PG_FUNCTION_ARGS) foreach(connectionCell, connectionList) { MultiConnection *connection = (MultiConnection *) lfirst(connectionCell); - PGresult *result = NULL; bool raiseInterrupts = true; Datum values[ACTIVE_TRANSACTION_COLUMN_COUNT]; bool isNulls[ACTIVE_TRANSACTION_COLUMN_COUNT]; - int64 rowIndex = 0; - int64 rowCount = 0; - int64 colCount = 0; if (PQstatus(connection->pgConn) != CONNECTION_OK) { continue; } - result = GetRemoteCommandResult(connection, raiseInterrupts); + PGresult *result = GetRemoteCommandResult(connection, raiseInterrupts); if (!IsResponseOK(result)) { ReportResultError(connection, result, WARNING); continue; } - rowCount = PQntuples(result); - colCount = PQnfields(result); + int64 rowCount = PQntuples(result); + int64 colCount = PQnfields(result); /* Although it is not expected */ if (colCount != ACTIVE_TRANSACTION_COLUMN_COUNT) @@ -300,7 +293,7 @@ get_global_active_transactions(PG_FUNCTION_ARGS) continue; } - for (rowIndex = 0; rowIndex < rowCount; rowIndex++) + for (int64 rowIndex = 0; rowIndex < rowCount; rowIndex++) { memset(values, 0, sizeof(values)); memset(isNulls, false, sizeof(isNulls)); @@ -334,10 +327,9 @@ Datum get_all_active_transactions(PG_FUNCTION_ARGS) { TupleDesc tupleDescriptor = NULL; - Tuplestorestate *tupleStore = NULL; CheckCitusVersion(ERROR); - tupleStore = SetupTuplestore(fcinfo, &tupleDescriptor); + Tuplestorestate *tupleStore = SetupTuplestore(fcinfo, &tupleDescriptor); StoreAllActiveTransactions(tupleStore, tupleDescriptor); @@ -355,7 +347,6 @@ get_all_active_transactions(PG_FUNCTION_ARGS) static void StoreAllActiveTransactions(Tuplestorestate *tupleStore, TupleDesc tupleDescriptor) { - int backendIndex = 0; Datum values[ACTIVE_TRANSACTION_COLUMN_COUNT]; bool isNulls[ACTIVE_TRANSACTION_COLUMN_COUNT]; bool showAllTransactions = superuser(); @@ -377,18 +368,14 @@ StoreAllActiveTransactions(Tuplestorestate *tupleStore, TupleDesc tupleDescripto /* we're reading all distributed transactions, prevent new backends */ LockBackendSharedMemory(LW_SHARED); - for (backendIndex = 0; backendIndex < MaxBackends; ++backendIndex) + for (int backendIndex = 0; backendIndex < MaxBackends; ++backendIndex) { BackendData *currentBackend = &backendManagementShmemData->backends[backendIndex]; - bool coordinatorOriginatedQuery = false; /* to work on data after releasing g spinlock to protect against errors */ - Oid databaseId = InvalidOid; - int backendPid = -1; int initiatorNodeIdentifier = -1; uint64 transactionNumber = 0; - TimestampTz transactionIdTimestamp = 0; SpinLockAcquire(¤tBackend->mutex); @@ -409,8 +396,8 @@ StoreAllActiveTransactions(Tuplestorestate *tupleStore, TupleDesc tupleDescripto continue; } - databaseId = currentBackend->databaseId; - backendPid = ProcGlobal->allProcs[backendIndex].pid; + Oid databaseId = currentBackend->databaseId; + int backendPid = ProcGlobal->allProcs[backendIndex].pid; initiatorNodeIdentifier = currentBackend->citusBackend.initiatorNodeIdentifier; /* @@ -421,10 +408,11 @@ StoreAllActiveTransactions(Tuplestorestate *tupleStore, TupleDesc tupleDescripto * field with the same name. The reason is that it also covers backends that are not * inside a distributed transaction. */ - coordinatorOriginatedQuery = currentBackend->citusBackend.transactionOriginator; + bool coordinatorOriginatedQuery = + currentBackend->citusBackend.transactionOriginator; transactionNumber = currentBackend->transactionId.transactionNumber; - transactionIdTimestamp = currentBackend->transactionId.timestamp; + TimestampTz transactionIdTimestamp = currentBackend->transactionId.timestamp; SpinLockRelease(¤tBackend->mutex); @@ -489,8 +477,6 @@ BackendManagementShmemInit(void) if (!alreadyInitialized) { - int backendIndex = 0; - int totalProcs = 0; char *trancheName = "Backend Management Tranche"; NamedLWLockTranche *namedLockTranche = @@ -518,8 +504,8 @@ BackendManagementShmemInit(void) * We also initiate initiatorNodeIdentifier to -1, which can never be * used as a node id. */ - totalProcs = TotalProcCount(); - for (backendIndex = 0; backendIndex < totalProcs; ++backendIndex) + int totalProcs = TotalProcCount(); + for (int backendIndex = 0; backendIndex < totalProcs; ++backendIndex) { BackendData *backendData = &backendManagementShmemData->backends[backendIndex]; @@ -809,7 +795,6 @@ CurrentDistributedTransactionNumber(void) void GetBackendDataForProc(PGPROC *proc, BackendData *result) { - BackendData *backendData = NULL; int pgprocno = proc->pgprocno; if (proc->lockGroupLeader != NULL) @@ -817,7 +802,7 @@ GetBackendDataForProc(PGPROC *proc, BackendData *result) pgprocno = proc->lockGroupLeader->pgprocno; } - backendData = &backendManagementShmemData->backends[pgprocno]; + BackendData *backendData = &backendManagementShmemData->backends[pgprocno]; SpinLockAcquire(&backendData->mutex); @@ -903,14 +888,12 @@ List * ActiveDistributedTransactionNumbers(void) { List *activeTransactionNumberList = NIL; - int curBackend = 0; /* build list of starting procs */ - for (curBackend = 0; curBackend < MaxBackends; curBackend++) + for (int curBackend = 0; curBackend < MaxBackends; curBackend++) { PGPROC *currentProc = &ProcGlobal->allProcs[curBackend]; BackendData currentBackendData; - uint64 *transactionNumber = NULL; if (currentProc->pid == 0) { @@ -932,7 +915,7 @@ ActiveDistributedTransactionNumbers(void) continue; } - transactionNumber = (uint64 *) palloc0(sizeof(uint64)); + uint64 *transactionNumber = (uint64 *) palloc0(sizeof(uint64)); *transactionNumber = currentBackendData.transactionId.transactionNumber; activeTransactionNumberList = lappend(activeTransactionNumberList, diff --git a/src/backend/distributed/transaction/citus_dist_stat_activity.c b/src/backend/distributed/transaction/citus_dist_stat_activity.c index c7ae102ed..11ad81930 100644 --- a/src/backend/distributed/transaction/citus_dist_stat_activity.c +++ b/src/backend/distributed/transaction/citus_dist_stat_activity.c @@ -269,11 +269,9 @@ PG_FUNCTION_INFO_V1(citus_worker_stat_activity); Datum citus_dist_stat_activity(PG_FUNCTION_ARGS) { - List *citusDistStatStatements = NIL; - CheckCitusVersion(ERROR); - citusDistStatStatements = CitusStatActivity(CITUS_DIST_STAT_ACTIVITY_QUERY); + List *citusDistStatStatements = CitusStatActivity(CITUS_DIST_STAT_ACTIVITY_QUERY); ReturnCitusDistStats(citusDistStatStatements, fcinfo); @@ -289,11 +287,9 @@ citus_dist_stat_activity(PG_FUNCTION_ARGS) Datum citus_worker_stat_activity(PG_FUNCTION_ARGS) { - List *citusWorkerStatStatements = NIL; - CheckCitusVersion(ERROR); - citusWorkerStatStatements = CitusStatActivity(CITUS_WORKER_STAT_ACTIVITY_QUERY); + List *citusWorkerStatStatements = CitusStatActivity(CITUS_WORKER_STAT_ACTIVITY_QUERY); ReturnCitusDistStats(citusWorkerStatStatements, fcinfo); @@ -315,11 +311,8 @@ citus_worker_stat_activity(PG_FUNCTION_ARGS) static List * CitusStatActivity(const char *statQuery) { - List *citusStatsList = NIL; - List *workerNodeList = ActivePrimaryWorkerNodeList(NoLock); ListCell *workerNodeCell = NULL; - char *nodeUser = NULL; List *connectionList = NIL; ListCell *connectionCell = NULL; @@ -329,14 +322,14 @@ CitusStatActivity(const char *statQuery) * the authentication for self-connection via any user who calls the citus * stat activity functions. */ - citusStatsList = GetLocalNodeCitusDistStat(statQuery); + List *citusStatsList = GetLocalNodeCitusDistStat(statQuery); /* * We prefer to connect with the current user to the remote nodes. This will * ensure that we have the same privilage restrictions that pg_stat_activity * enforces. */ - nodeUser = CurrentUserName(); + char *nodeUser = CurrentUserName(); /* open connections in parallel */ foreach(workerNodeCell, workerNodeList) @@ -344,7 +337,6 @@ CitusStatActivity(const char *statQuery) WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell); char *nodeName = workerNode->workerName; int nodePort = workerNode->workerPort; - MultiConnection *connection = NULL; int connectionFlags = 0; if (workerNode->groupId == GetLocalGroupId()) @@ -353,8 +345,9 @@ CitusStatActivity(const char *statQuery) continue; } - connection = StartNodeUserDatabaseConnection(connectionFlags, nodeName, nodePort, - nodeUser, NULL); + MultiConnection *connection = StartNodeUserDatabaseConnection(connectionFlags, + nodeName, nodePort, + nodeUser, NULL); connectionList = lappend(connectionList, connection); } @@ -365,9 +358,8 @@ CitusStatActivity(const char *statQuery) foreach(connectionCell, connectionList) { MultiConnection *connection = (MultiConnection *) lfirst(connectionCell); - int querySent = false; - querySent = SendRemoteCommand(connection, statQuery); + int querySent = SendRemoteCommand(connection, statQuery); if (querySent == 0) { ReportConnectionError(connection, WARNING); @@ -378,21 +370,17 @@ CitusStatActivity(const char *statQuery) foreach(connectionCell, connectionList) { MultiConnection *connection = (MultiConnection *) lfirst(connectionCell); - PGresult *result = NULL; bool raiseInterrupts = true; - int64 rowIndex = 0; - int64 rowCount = 0; - int64 colCount = 0; - result = GetRemoteCommandResult(connection, raiseInterrupts); + PGresult *result = GetRemoteCommandResult(connection, raiseInterrupts); if (!IsResponseOK(result)) { ReportResultError(connection, result, WARNING); continue; } - rowCount = PQntuples(result); - colCount = PQnfields(result); + int64 rowCount = PQntuples(result); + int64 colCount = PQnfields(result); if (colCount != CITUS_DIST_STAT_ACTIVITY_QUERY_COLS) { @@ -405,7 +393,7 @@ CitusStatActivity(const char *statQuery) continue; } - for (rowIndex = 0; rowIndex < rowCount; rowIndex++) + for (int64 rowIndex = 0; rowIndex < rowCount; rowIndex++) { CitusDistStat *citusDistStat = ParseCitusDistStat(result, rowIndex); @@ -436,9 +424,7 @@ GetLocalNodeCitusDistStat(const char *statQuery) { List *citusStatsList = NIL; - List *workerNodeList = NIL; ListCell *workerNodeCell = NULL; - int localGroupId = -1; if (IsCoordinator()) { @@ -452,10 +438,10 @@ GetLocalNodeCitusDistStat(const char *statQuery) return citusStatsList; } - localGroupId = GetLocalGroupId(); + int localGroupId = GetLocalGroupId(); /* get the current worker's node stats */ - workerNodeList = ActivePrimaryWorkerNodeList(NoLock); + List *workerNodeList = ActivePrimaryWorkerNodeList(NoLock); foreach(workerNodeCell, workerNodeList) { WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell); @@ -488,10 +474,9 @@ static CitusDistStat * ParseCitusDistStat(PGresult *result, int64 rowIndex) { CitusDistStat *citusDistStat = (CitusDistStat *) palloc0(sizeof(CitusDistStat)); - int initiator_node_identifier = 0; - initiator_node_identifier = + int initiator_node_identifier = PQgetisnull(result, rowIndex, 0) ? -1 : ParseIntField(result, rowIndex, 0); ReplaceInitiatorNodeIdentifier(initiator_node_identifier, citusDistStat); @@ -591,14 +576,11 @@ static List * LocalNodeCitusDistStat(const char *statQuery, const char *hostname, int port) { List *localNodeCitusDistStatList = NIL; - int spiConnectionResult = 0; - int spiQueryResult = 0; bool readOnly = true; - uint32 rowIndex = 0; MemoryContext upperContext = CurrentMemoryContext, oldContext = NULL; - spiConnectionResult = SPI_connect(); + int spiConnectionResult = SPI_connect(); if (spiConnectionResult != SPI_OK_CONNECT) { ereport(WARNING, (errmsg("could not connect to SPI manager to get " @@ -609,7 +591,7 @@ LocalNodeCitusDistStat(const char *statQuery, const char *hostname, int port) return NIL; } - spiQueryResult = SPI_execute(statQuery, readOnly, 0); + int spiQueryResult = SPI_execute(statQuery, readOnly, 0); if (spiQueryResult != SPI_OK_SELECT) { ereport(WARNING, (errmsg("execution was not successful while trying to get " @@ -629,15 +611,13 @@ LocalNodeCitusDistStat(const char *statQuery, const char *hostname, int port) */ oldContext = MemoryContextSwitchTo(upperContext); - for (rowIndex = 0; rowIndex < SPI_processed; rowIndex++) + for (uint32 rowIndex = 0; rowIndex < SPI_processed; rowIndex++) { - HeapTuple row = NULL; TupleDesc rowDescriptor = SPI_tuptable->tupdesc; - CitusDistStat *citusDistStat = NULL; /* we use pointers from the tuple, so copy it before processing */ - row = SPI_copytuple(SPI_tuptable->vals[rowIndex]); - citusDistStat = HeapTupleToCitusDistStat(row, rowDescriptor); + HeapTuple row = SPI_copytuple(SPI_tuptable->vals[rowIndex]); + CitusDistStat *citusDistStat = HeapTupleToCitusDistStat(row, rowDescriptor); /* * Add the query_host_name and query_host_port which denote where @@ -670,9 +650,8 @@ static CitusDistStat * HeapTupleToCitusDistStat(HeapTuple result, TupleDesc rowDescriptor) { CitusDistStat *citusDistStat = (CitusDistStat *) palloc0(sizeof(CitusDistStat)); - int initiator_node_identifier = 0; - initiator_node_identifier = ParseIntFieldFromHeapTuple(result, rowDescriptor, 1); + int initiator_node_identifier = ParseIntFieldFromHeapTuple(result, rowDescriptor, 1); ReplaceInitiatorNodeIdentifier(initiator_node_identifier, citusDistStat); @@ -721,10 +700,9 @@ HeapTupleToCitusDistStat(HeapTuple result, TupleDesc rowDescriptor) static int64 ParseIntFieldFromHeapTuple(HeapTuple tuple, TupleDesc tupdesc, int colIndex) { - Datum resultDatum; bool isNull = false; - resultDatum = SPI_getbinval(tuple, tupdesc, colIndex, &isNull); + Datum resultDatum = SPI_getbinval(tuple, tupdesc, colIndex, &isNull); if (isNull) { return 0; @@ -741,10 +719,9 @@ ParseIntFieldFromHeapTuple(HeapTuple tuple, TupleDesc tupdesc, int colIndex) static text * ParseTextFieldFromHeapTuple(HeapTuple tuple, TupleDesc tupdesc, int colIndex) { - Datum resultDatum; bool isNull = false; - resultDatum = SPI_getbinval(tuple, tupdesc, colIndex, &isNull); + Datum resultDatum = SPI_getbinval(tuple, tupdesc, colIndex, &isNull); if (isNull) { return NULL; @@ -761,10 +738,9 @@ ParseTextFieldFromHeapTuple(HeapTuple tuple, TupleDesc tupdesc, int colIndex) static Name ParseNameFieldFromHeapTuple(HeapTuple tuple, TupleDesc tupdesc, int colIndex) { - Datum resultDatum; bool isNull = false; - resultDatum = SPI_getbinval(tuple, tupdesc, colIndex, &isNull); + Datum resultDatum = SPI_getbinval(tuple, tupdesc, colIndex, &isNull); if (isNull) { return NULL; @@ -781,10 +757,9 @@ ParseNameFieldFromHeapTuple(HeapTuple tuple, TupleDesc tupdesc, int colIndex) static inet * ParseInetFieldFromHeapTuple(HeapTuple tuple, TupleDesc tupdesc, int colIndex) { - Datum resultDatum; bool isNull = false; - resultDatum = SPI_getbinval(tuple, tupdesc, colIndex, &isNull); + Datum resultDatum = SPI_getbinval(tuple, tupdesc, colIndex, &isNull); if (isNull) { return NULL; @@ -801,10 +776,9 @@ ParseInetFieldFromHeapTuple(HeapTuple tuple, TupleDesc tupdesc, int colIndex) static TimestampTz ParseTimestampTzFieldFromHeapTuple(HeapTuple tuple, TupleDesc tupdesc, int colIndex) { - Datum resultDatum; bool isNull = false; - resultDatum = SPI_getbinval(tuple, tupdesc, colIndex, &isNull); + Datum resultDatum = SPI_getbinval(tuple, tupdesc, colIndex, &isNull); if (isNull) { return DT_NOBEGIN; @@ -821,10 +795,9 @@ ParseTimestampTzFieldFromHeapTuple(HeapTuple tuple, TupleDesc tupdesc, int colIn static TransactionId ParseXIDFieldFromHeapTuple(HeapTuple tuple, TupleDesc tupdesc, int colIndex) { - Datum resultDatum; bool isNull = false; - resultDatum = SPI_getbinval(tuple, tupdesc, colIndex, &isNull); + Datum resultDatum = SPI_getbinval(tuple, tupdesc, colIndex, &isNull); if (isNull) { /* @@ -845,18 +818,14 @@ ParseXIDFieldFromHeapTuple(HeapTuple tuple, TupleDesc tupdesc, int colIndex) static text * ParseTextField(PGresult *result, int rowIndex, int colIndex) { - char *resultString = NULL; - Datum resultStringDatum = 0; - Datum textDatum = 0; - if (PQgetisnull(result, rowIndex, colIndex)) { return NULL; } - resultString = PQgetvalue(result, rowIndex, colIndex); - resultStringDatum = CStringGetDatum(resultString); - textDatum = DirectFunctionCall1(textin, resultStringDatum); + char *resultString = PQgetvalue(result, rowIndex, colIndex); + Datum resultStringDatum = CStringGetDatum(resultString); + Datum textDatum = DirectFunctionCall1(textin, resultStringDatum); return (text *) DatumGetPointer(textDatum); } @@ -869,8 +838,6 @@ ParseTextField(PGresult *result, int rowIndex, int colIndex) static Name ParseNameField(PGresult *result, int rowIndex, int colIndex) { - char *resultString = NULL; - Datum resultStringDatum = 0; Datum nameDatum = 0; if (PQgetisnull(result, rowIndex, colIndex)) @@ -878,8 +845,8 @@ ParseNameField(PGresult *result, int rowIndex, int colIndex) return (Name) nameDatum; } - resultString = PQgetvalue(result, rowIndex, colIndex); - resultStringDatum = CStringGetDatum(resultString); + char *resultString = PQgetvalue(result, rowIndex, colIndex); + Datum resultStringDatum = CStringGetDatum(resultString); nameDatum = DirectFunctionCall1(namein, resultStringDatum); return (Name) DatumGetPointer(nameDatum); @@ -893,18 +860,14 @@ ParseNameField(PGresult *result, int rowIndex, int colIndex) static inet * ParseInetField(PGresult *result, int rowIndex, int colIndex) { - char *resultString = NULL; - Datum resultStringDatum = 0; - Datum inetDatum = 0; - if (PQgetisnull(result, rowIndex, colIndex)) { return NULL; } - resultString = PQgetvalue(result, rowIndex, colIndex); - resultStringDatum = CStringGetDatum(resultString); - inetDatum = DirectFunctionCall1(inet_in, resultStringDatum); + char *resultString = PQgetvalue(result, rowIndex, colIndex); + Datum resultStringDatum = CStringGetDatum(resultString); + Datum inetDatum = DirectFunctionCall1(inet_in, resultStringDatum); return DatumGetInetP(inetDatum); } @@ -917,10 +880,6 @@ ParseInetField(PGresult *result, int rowIndex, int colIndex) static TransactionId ParseXIDField(PGresult *result, int rowIndex, int colIndex) { - char *resultString = NULL; - Datum resultStringDatum = 0; - Datum XIDDatum = 0; - if (PQgetisnull(result, rowIndex, colIndex)) { /* @@ -930,9 +889,9 @@ ParseXIDField(PGresult *result, int rowIndex, int colIndex) return PG_UINT32_MAX; } - resultString = PQgetvalue(result, rowIndex, colIndex); - resultStringDatum = CStringGetDatum(resultString); - XIDDatum = DirectFunctionCall1(xidin, resultStringDatum); + char *resultString = PQgetvalue(result, rowIndex, colIndex); + Datum resultStringDatum = CStringGetDatum(resultString); + Datum XIDDatum = DirectFunctionCall1(xidin, resultStringDatum); return DatumGetTransactionId(XIDDatum); } diff --git a/src/backend/distributed/transaction/distributed_deadlock_detection.c b/src/backend/distributed/transaction/distributed_deadlock_detection.c index 1e968140f..5a141164a 100644 --- a/src/backend/distributed/transaction/distributed_deadlock_detection.c +++ b/src/backend/distributed/transaction/distributed_deadlock_detection.c @@ -103,11 +103,8 @@ check_distributed_deadlocks(PG_FUNCTION_ARGS) bool CheckForDistributedDeadlocks(void) { - WaitGraph *waitGraph = NULL; - HTAB *adjacencyLists = NULL; HASH_SEQ_STATUS status; TransactionNode *transactionNode = NULL; - int edgeCount = 0; int localGroupId = GetLocalGroupId(); List *workerNodeList = ActiveReadableNodeList(); @@ -122,10 +119,10 @@ CheckForDistributedDeadlocks(void) return false; } - waitGraph = BuildGlobalWaitGraph(); - adjacencyLists = BuildAdjacencyListsForWaitGraph(waitGraph); + WaitGraph *waitGraph = BuildGlobalWaitGraph(); + HTAB *adjacencyLists = BuildAdjacencyListsForWaitGraph(waitGraph); - edgeCount = waitGraph->edgeCount; + int edgeCount = waitGraph->edgeCount; /* * We iterate on transaction nodes and search for deadlocks where the @@ -134,7 +131,6 @@ CheckForDistributedDeadlocks(void) hash_seq_init(&status, adjacencyLists); while ((transactionNode = (TransactionNode *) hash_seq_search(&status)) != 0) { - bool deadlockFound = false; List *deadlockPath = NIL; /* @@ -151,9 +147,9 @@ CheckForDistributedDeadlocks(void) ResetVisitedFields(adjacencyLists); - deadlockFound = CheckDeadlockForTransactionNode(transactionNode, - maxStackDepth, - &deadlockPath); + bool deadlockFound = CheckDeadlockForTransactionNode(transactionNode, + maxStackDepth, + &deadlockPath); if (deadlockFound) { TransactionNode *youngestAliveTransaction = NULL; @@ -184,8 +180,6 @@ CheckForDistributedDeadlocks(void) (TransactionNode *) lfirst(participantTransactionCell); bool transactionAssociatedWithProc = AssociateDistributedTransactionWithBackendProc(currentNode); - TimestampTz youngestTimestamp = 0; - TimestampTz currentTimestamp = 0; LogTransactionNode(currentNode); @@ -201,8 +195,9 @@ CheckForDistributedDeadlocks(void) continue; } - youngestTimestamp = youngestAliveTransaction->transactionId.timestamp; - currentTimestamp = currentNode->transactionId.timestamp; + TimestampTz youngestTimestamp = + youngestAliveTransaction->transactionId.timestamp; + TimestampTz currentTimestamp = currentNode->transactionId.timestamp; if (timestamptz_cmp_internal(currentTimestamp, youngestTimestamp) == 1) { youngestAliveTransaction = currentNode; @@ -258,7 +253,6 @@ CheckDeadlockForTransactionNode(TransactionNode *startingTransactionNode, /* traverse the graph and search for the deadlocks */ while (toBeVisitedNodes != NIL) { - int currentStackDepth; QueuedTransactionNode *queuedTransactionNode = (QueuedTransactionNode *) linitial(toBeVisitedNodes); TransactionNode *currentTransactionNode = queuedTransactionNode->transactionNode; @@ -284,7 +278,7 @@ CheckDeadlockForTransactionNode(TransactionNode *startingTransactionNode, currentTransactionNode->transactionVisited = true; /* set the stack's corresponding element with the current node */ - currentStackDepth = queuedTransactionNode->currentStackDepth; + int currentStackDepth = queuedTransactionNode->currentStackDepth; Assert(currentStackDepth < maxStackDepth); transactionNodeStack[currentStackDepth] = currentTransactionNode; @@ -335,11 +329,10 @@ BuildDeadlockPathList(QueuedTransactionNode *cycledTransactionNode, List **deadlockPath) { int deadlockStackDepth = cycledTransactionNode->currentStackDepth; - int stackIndex = 0; *deadlockPath = NIL; - for (stackIndex = 0; stackIndex < deadlockStackDepth; stackIndex++) + for (int stackIndex = 0; stackIndex < deadlockStackDepth; stackIndex++) { *deadlockPath = lappend(*deadlockPath, transactionNodeStack[stackIndex]); } @@ -380,13 +373,10 @@ ResetVisitedFields(HTAB *adjacencyList) static bool AssociateDistributedTransactionWithBackendProc(TransactionNode *transactionNode) { - int backendIndex = 0; - - for (backendIndex = 0; backendIndex < MaxBackends; ++backendIndex) + for (int backendIndex = 0; backendIndex < MaxBackends; ++backendIndex) { PGPROC *currentProc = &ProcGlobal->allProcs[backendIndex]; BackendData currentBackendData; - DistributedTransactionId *currentTransactionId = NULL; /* we're not interested in processes that are not active or waiting on a lock */ if (currentProc->pid <= 0) @@ -402,7 +392,8 @@ AssociateDistributedTransactionWithBackendProc(TransactionNode *transactionNode) continue; } - currentTransactionId = ¤tBackendData.transactionId; + DistributedTransactionId *currentTransactionId = + ¤tBackendData.transactionId; if (currentTransactionId->transactionNumber != transactionNode->transactionId.transactionNumber) @@ -455,9 +446,6 @@ extern HTAB * BuildAdjacencyListsForWaitGraph(WaitGraph *waitGraph) { HASHCTL info; - uint32 hashFlags = 0; - HTAB *adjacencyList = NULL; - int edgeIndex = 0; int edgeCount = waitGraph->edgeCount; memset(&info, 0, sizeof(info)); @@ -466,15 +454,14 @@ BuildAdjacencyListsForWaitGraph(WaitGraph *waitGraph) info.hash = DistributedTransactionIdHash; info.match = DistributedTransactionIdCompare; info.hcxt = CurrentMemoryContext; - hashFlags = (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT | HASH_COMPARE); + uint32 hashFlags = (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT | HASH_COMPARE); - adjacencyList = hash_create("distributed deadlock detection", 64, &info, hashFlags); + HTAB *adjacencyList = hash_create("distributed deadlock detection", 64, &info, + hashFlags); - for (edgeIndex = 0; edgeIndex < edgeCount; edgeIndex++) + for (int edgeIndex = 0; edgeIndex < edgeCount; edgeIndex++) { WaitEdge *edge = &waitGraph->edges[edgeIndex]; - TransactionNode *waitingTransaction = NULL; - TransactionNode *blockingTransaction = NULL; bool transactionOriginator = false; DistributedTransactionId waitingId = { @@ -491,9 +478,9 @@ BuildAdjacencyListsForWaitGraph(WaitGraph *waitGraph) edge->blockingTransactionStamp }; - waitingTransaction = + TransactionNode *waitingTransaction = GetOrCreateTransactionNode(adjacencyList, &waitingId); - blockingTransaction = + TransactionNode *blockingTransaction = GetOrCreateTransactionNode(adjacencyList, &blockingId); waitingTransaction->waitsFor = lappend(waitingTransaction->waitsFor, @@ -512,11 +499,12 @@ BuildAdjacencyListsForWaitGraph(WaitGraph *waitGraph) static TransactionNode * GetOrCreateTransactionNode(HTAB *adjacencyList, DistributedTransactionId *transactionId) { - TransactionNode *transactionNode = NULL; bool found = false; - transactionNode = (TransactionNode *) hash_search(adjacencyList, transactionId, - HASH_ENTER, &found); + TransactionNode *transactionNode = (TransactionNode *) hash_search(adjacencyList, + transactionId, + HASH_ENTER, + &found); if (!found) { transactionNode->waitsFor = NIL; @@ -535,9 +523,8 @@ static uint32 DistributedTransactionIdHash(const void *key, Size keysize) { DistributedTransactionId *entry = (DistributedTransactionId *) key; - uint32 hash = 0; - hash = hash_uint32(entry->initiatorNodeIdentifier); + uint32 hash = hash_uint32(entry->initiatorNodeIdentifier); hash = hash_combine(hash, hash_any((unsigned char *) &entry->transactionNumber, sizeof(int64))); hash = hash_combine(hash, hash_any((unsigned char *) &entry->timestamp, @@ -601,14 +588,12 @@ DistributedTransactionIdCompare(const void *a, const void *b, Size keysize) static void LogCancellingBackend(TransactionNode *transactionNode) { - StringInfo logMessage = NULL; - if (!LogDistributedDeadlockDetection) { return; } - logMessage = makeStringInfo(); + StringInfo logMessage = makeStringInfo(); appendStringInfo(logMessage, "Cancelling the following backend " "to resolve distributed deadlock " @@ -627,16 +612,13 @@ LogCancellingBackend(TransactionNode *transactionNode) static void LogTransactionNode(TransactionNode *transactionNode) { - StringInfo logMessage = NULL; - DistributedTransactionId *transactionId = NULL; - if (!LogDistributedDeadlockDetection) { return; } - logMessage = makeStringInfo(); - transactionId = &(transactionNode->transactionId); + StringInfo logMessage = makeStringInfo(); + DistributedTransactionId *transactionId = &(transactionNode->transactionId); appendStringInfo(logMessage, "[DistributedTransactionId: (%d, " UINT64_FORMAT ", %s)] = ", diff --git a/src/backend/distributed/transaction/lock_graph.c b/src/backend/distributed/transaction/lock_graph.c index bfc2a29e3..911a286c7 100644 --- a/src/backend/distributed/transaction/lock_graph.c +++ b/src/backend/distributed/transaction/lock_graph.c @@ -73,9 +73,7 @@ PG_FUNCTION_INFO_V1(dump_global_wait_edges); Datum dump_global_wait_edges(PG_FUNCTION_ARGS) { - WaitGraph *waitGraph = NULL; - - waitGraph = BuildGlobalWaitGraph(); + WaitGraph *waitGraph = BuildGlobalWaitGraph(); ReturnWaitGraph(waitGraph, fcinfo); @@ -106,7 +104,6 @@ BuildGlobalWaitGraph(void) WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell); char *nodeName = workerNode->workerName; int nodePort = workerNode->workerPort; - MultiConnection *connection = NULL; int connectionFlags = 0; if (workerNode->groupId == localNodeId) @@ -115,8 +112,9 @@ BuildGlobalWaitGraph(void) continue; } - connection = StartNodeUserDatabaseConnection(connectionFlags, nodeName, nodePort, - nodeUser, NULL); + MultiConnection *connection = StartNodeUserDatabaseConnection(connectionFlags, + nodeName, nodePort, + nodeUser, NULL); connectionList = lappend(connectionList, connection); } @@ -127,10 +125,9 @@ BuildGlobalWaitGraph(void) foreach(connectionCell, connectionList) { MultiConnection *connection = (MultiConnection *) lfirst(connectionCell); - int querySent = false; const char *command = "SELECT * FROM dump_local_wait_edges()"; - querySent = SendRemoteCommand(connection, command); + int querySent = SendRemoteCommand(connection, command); if (querySent == 0) { ReportConnectionError(connection, WARNING); @@ -141,21 +138,17 @@ BuildGlobalWaitGraph(void) foreach(connectionCell, connectionList) { MultiConnection *connection = (MultiConnection *) lfirst(connectionCell); - PGresult *result = NULL; bool raiseInterrupts = true; - int64 rowIndex = 0; - int64 rowCount = 0; - int64 colCount = 0; - result = GetRemoteCommandResult(connection, raiseInterrupts); + PGresult *result = GetRemoteCommandResult(connection, raiseInterrupts); if (!IsResponseOK(result)) { ReportResultError(connection, result, WARNING); continue; } - rowCount = PQntuples(result); - colCount = PQnfields(result); + int64 rowCount = PQntuples(result); + int64 colCount = PQnfields(result); if (colCount != 9) { @@ -164,7 +157,7 @@ BuildGlobalWaitGraph(void) continue; } - for (rowIndex = 0; rowIndex < rowCount; rowIndex++) + for (int64 rowIndex = 0; rowIndex < rowCount; rowIndex++) { AddWaitEdgeFromResult(waitGraph, result, rowIndex); } @@ -205,14 +198,12 @@ AddWaitEdgeFromResult(WaitGraph *waitGraph, PGresult *result, int rowIndex) int64 ParseIntField(PGresult *result, int rowIndex, int colIndex) { - char *resultString = NULL; - if (PQgetisnull(result, rowIndex, colIndex)) { return 0; } - resultString = PQgetvalue(result, rowIndex, colIndex); + char *resultString = PQgetvalue(result, rowIndex, colIndex); return pg_strtouint64(resultString, NULL, 10); } @@ -225,14 +216,12 @@ ParseIntField(PGresult *result, int rowIndex, int colIndex) bool ParseBoolField(PGresult *result, int rowIndex, int colIndex) { - char *resultString = NULL; - if (PQgetisnull(result, rowIndex, colIndex)) { return false; } - resultString = PQgetvalue(result, rowIndex, colIndex); + char *resultString = PQgetvalue(result, rowIndex, colIndex); if (strlen(resultString) != 1) { return false; @@ -249,18 +238,14 @@ ParseBoolField(PGresult *result, int rowIndex, int colIndex) TimestampTz ParseTimestampTzField(PGresult *result, int rowIndex, int colIndex) { - char *resultString = NULL; - Datum resultStringDatum = 0; - Datum timestampDatum = 0; - if (PQgetisnull(result, rowIndex, colIndex)) { return DT_NOBEGIN; } - resultString = PQgetvalue(result, rowIndex, colIndex); - resultStringDatum = CStringGetDatum(resultString); - timestampDatum = DirectFunctionCall3(timestamptz_in, resultStringDatum, 0, -1); + char *resultString = PQgetvalue(result, rowIndex, colIndex); + Datum resultStringDatum = CStringGetDatum(resultString); + Datum timestampDatum = DirectFunctionCall3(timestamptz_in, resultStringDatum, 0, -1); return DatumGetTimestampTz(timestampDatum); } @@ -286,7 +271,6 @@ dump_local_wait_edges(PG_FUNCTION_ARGS) static void ReturnWaitGraph(WaitGraph *waitGraph, FunctionCallInfo fcinfo) { - size_t curEdgeNum = 0; TupleDesc tupleDesc; Tuplestorestate *tupleStore = SetupTuplestore(fcinfo, &tupleDesc); @@ -302,7 +286,7 @@ ReturnWaitGraph(WaitGraph *waitGraph, FunctionCallInfo fcinfo) * 07: blocking_transaction_stamp * 08: blocking_transaction_waiting */ - for (curEdgeNum = 0; curEdgeNum < waitGraph->edgeCount; curEdgeNum++) + for (size_t curEdgeNum = 0; curEdgeNum < waitGraph->edgeCount; curEdgeNum++) { Datum values[9]; bool nulls[9]; @@ -353,8 +337,6 @@ ReturnWaitGraph(WaitGraph *waitGraph, FunctionCallInfo fcinfo) static WaitGraph * BuildLocalWaitGraph(void) { - WaitGraph *waitGraph = NULL; - int curBackend = 0; PROCStack remaining; int totalProcs = TotalProcCount(); @@ -364,7 +346,7 @@ BuildLocalWaitGraph(void) * more than enough space to build the list of wait edges without a single * allocation. */ - waitGraph = (WaitGraph *) palloc0(sizeof(WaitGraph)); + WaitGraph *waitGraph = (WaitGraph *) palloc0(sizeof(WaitGraph)); waitGraph->localNodeId = GetLocalGroupId(); waitGraph->allocatedSize = totalProcs * 3; waitGraph->edgeCount = 0; @@ -384,7 +366,7 @@ BuildLocalWaitGraph(void) */ /* build list of starting procs */ - for (curBackend = 0; curBackend < totalProcs; curBackend++) + for (int curBackend = 0; curBackend < totalProcs; curBackend++) { PGPROC *currentProc = &ProcGlobal->allProcs[curBackend]; BackendData currentBackendData; @@ -476,24 +458,20 @@ BuildLocalWaitGraph(void) static bool IsProcessWaitingForSafeOperations(PGPROC *proc) { - PROCLOCK *waitProcLock = NULL; - LOCK *waitLock = NULL; - PGXACT *pgxact = NULL; - if (proc->waitStatus != STATUS_WAITING) { return false; } /* get the transaction that the backend associated with */ - pgxact = &ProcGlobal->allPgXact[proc->pgprocno]; + PGXACT *pgxact = &ProcGlobal->allPgXact[proc->pgprocno]; if (pgxact->vacuumFlags & PROC_IS_AUTOVACUUM) { return true; } - waitProcLock = proc->waitProcLock; - waitLock = waitProcLock->tag.myLock; + PROCLOCK *waitProcLock = proc->waitProcLock; + LOCK *waitLock = waitProcLock->tag.myLock; return waitLock->tag.locktag_type == LOCKTAG_RELATION_EXTEND || waitLock->tag.locktag_type == LOCKTAG_PAGE || @@ -511,11 +489,9 @@ IsProcessWaitingForSafeOperations(PGPROC *proc) static void LockLockData(void) { - int partitionNum = 0; - LockBackendSharedMemory(LW_SHARED); - for (partitionNum = 0; partitionNum < NUM_LOCK_PARTITIONS; partitionNum++) + for (int partitionNum = 0; partitionNum < NUM_LOCK_PARTITIONS; partitionNum++) { LWLockAcquire(LockHashPartitionLockByIndex(partitionNum), LW_SHARED); } @@ -533,9 +509,7 @@ LockLockData(void) static void UnlockLockData(void) { - int partitionNum = 0; - - for (partitionNum = NUM_LOCK_PARTITIONS - 1; partitionNum >= 0; partitionNum--) + for (int partitionNum = NUM_LOCK_PARTITIONS - 1; partitionNum >= 0; partitionNum--) { LWLockRelease(LockHashPartitionLockByIndex(partitionNum)); } diff --git a/src/backend/distributed/transaction/relation_access_tracking.c b/src/backend/distributed/transaction/relation_access_tracking.c index 1dbf525b5..07e31d416 100644 --- a/src/backend/distributed/transaction/relation_access_tracking.c +++ b/src/backend/distributed/transaction/relation_access_tracking.c @@ -133,14 +133,13 @@ void AllocateRelationAccessHash(void) { HASHCTL info; - uint32 hashFlags = 0; memset(&info, 0, sizeof(info)); info.keysize = sizeof(RelationAccessHashKey); info.entrysize = sizeof(RelationAccessHashEntry); info.hash = tag_hash; info.hcxt = ConnectionContext; - hashFlags = (HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + uint32 hashFlags = (HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); RelationAccessHash = hash_create("citus connection cache (relationid)", 8, &info, hashFlags); @@ -244,12 +243,12 @@ static void RecordPlacementAccessToCache(Oid relationId, ShardPlacementAccessType accessType) { RelationAccessHashKey hashKey; - RelationAccessHashEntry *hashEntry; bool found = false; hashKey.relationId = relationId; - hashEntry = hash_search(RelationAccessHash, &hashKey, HASH_ENTER, &found); + RelationAccessHashEntry *hashEntry = hash_search(RelationAccessHash, &hashKey, + HASH_ENTER, &found); if (!found) { hashEntry->relationAccessMode = 0; @@ -270,8 +269,6 @@ RecordPlacementAccessToCache(Oid relationId, ShardPlacementAccessType accessType void RecordParallelRelationAccessForTaskList(List *taskList) { - Task *firstTask = NULL; - if (MultiShardConnectionType == SEQUENTIAL_CONNECTION) { /* sequential mode prevents parallel access */ @@ -288,7 +285,7 @@ RecordParallelRelationAccessForTaskList(List *taskList) * Since all the tasks in a task list is expected to operate on the same * distributed table(s), we only need to process the first task. */ - firstTask = linitial(taskList); + Task *firstTask = linitial(taskList); if (firstTask->taskType == SQL_TASK) { @@ -328,7 +325,6 @@ RecordParallelRelationAccessForTaskList(List *taskList) static void RecordRelationParallelSelectAccessForTask(Task *task) { - List *relationShardList = NIL; ListCell *relationShardCell = NULL; Oid lastRelationId = InvalidOid; @@ -338,7 +334,7 @@ RecordRelationParallelSelectAccessForTask(Task *task) return; } - relationShardList = task->relationShardList; + List *relationShardList = task->relationShardList; foreach(relationShardCell, relationShardList) { @@ -528,13 +524,12 @@ RecordParallelRelationAccessToCache(Oid relationId, ShardPlacementAccessType placementAccess) { RelationAccessHashKey hashKey; - RelationAccessHashEntry *hashEntry; bool found = false; - int parallelRelationAccessBit = 0; hashKey.relationId = relationId; - hashEntry = hash_search(RelationAccessHash, &hashKey, HASH_ENTER, &found); + RelationAccessHashEntry *hashEntry = hash_search(RelationAccessHash, &hashKey, + HASH_ENTER, &found); if (!found) { hashEntry->relationAccessMode = 0; @@ -544,7 +539,7 @@ RecordParallelRelationAccessToCache(Oid relationId, hashEntry->relationAccessMode |= (1 << (placementAccess)); /* set the bit representing access mode */ - parallelRelationAccessBit = placementAccess + PARALLEL_MODE_FLAG_OFFSET; + int parallelRelationAccessBit = placementAccess + PARALLEL_MODE_FLAG_OFFSET; hashEntry->relationAccessMode |= (1 << parallelRelationAccessBit); } @@ -557,7 +552,6 @@ bool ParallelQueryExecutedInTransaction(void) { HASH_SEQ_STATUS status; - RelationAccessHashEntry *hashEntry; if (!ShouldRecordRelationAccess() || RelationAccessHash == NULL) { @@ -566,7 +560,8 @@ ParallelQueryExecutedInTransaction(void) hash_seq_init(&status, RelationAccessHash); - hashEntry = (RelationAccessHashEntry *) hash_seq_search(&status); + RelationAccessHashEntry *hashEntry = (RelationAccessHashEntry *) hash_seq_search( + &status); while (hashEntry != NULL) { int relationAccessMode = hashEntry->relationAccessMode; @@ -621,8 +616,6 @@ static RelationAccessMode GetRelationAccessMode(Oid relationId, ShardPlacementAccessType accessType) { RelationAccessHashKey hashKey; - RelationAccessHashEntry *hashEntry; - int relationAcessMode = 0; bool found = false; int parallelRelationAccessBit = accessType + PARALLEL_MODE_FLAG_OFFSET; @@ -634,7 +627,8 @@ GetRelationAccessMode(Oid relationId, ShardPlacementAccessType accessType) hashKey.relationId = relationId; - hashEntry = hash_search(RelationAccessHash, &hashKey, HASH_FIND, &found); + RelationAccessHashEntry *hashEntry = hash_search(RelationAccessHash, &hashKey, + HASH_FIND, &found); if (!found) { /* relation not accessed at all */ @@ -642,7 +636,7 @@ GetRelationAccessMode(Oid relationId, ShardPlacementAccessType accessType) } - relationAcessMode = hashEntry->relationAccessMode; + int relationAcessMode = hashEntry->relationAccessMode; if (!(relationAcessMode & (1 << accessType))) { /* relation not accessed with the given access type */ @@ -692,7 +686,6 @@ ShouldRecordRelationAccess() static void CheckConflictingRelationAccesses(Oid relationId, ShardPlacementAccessType accessType) { - DistTableCacheEntry *cacheEntry = NULL; Oid conflictingReferencingRelationId = InvalidOid; ShardPlacementAccessType conflictingAccessType = PLACEMENT_ACCESS_SELECT; @@ -701,7 +694,7 @@ CheckConflictingRelationAccesses(Oid relationId, ShardPlacementAccessType access return; } - cacheEntry = DistributedTableCacheEntry(relationId); + DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(relationId); if (!(cacheEntry->partitionMethod == DISTRIBUTE_BY_NONE && cacheEntry->referencingRelationsViaForeignKey != NIL)) @@ -791,7 +784,6 @@ static void CheckConflictingParallelRelationAccesses(Oid relationId, ShardPlacementAccessType accessType) { - DistTableCacheEntry *cacheEntry = NULL; Oid conflictingReferencingRelationId = InvalidOid; ShardPlacementAccessType conflictingAccessType = PLACEMENT_ACCESS_SELECT; @@ -800,7 +792,7 @@ CheckConflictingParallelRelationAccesses(Oid relationId, ShardPlacementAccessTyp return; } - cacheEntry = DistributedTableCacheEntry(relationId); + DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(relationId); if (!(cacheEntry->partitionMethod == DISTRIBUTE_BY_HASH && cacheEntry->referencedRelationsViaForeignKey != NIL)) { @@ -877,9 +869,6 @@ HoldsConflictingLockWithReferencedRelations(Oid relationId, ShardPlacementAccess foreach(referencedRelationCell, cacheEntry->referencedRelationsViaForeignKey) { Oid referencedRelation = lfirst_oid(referencedRelationCell); - RelationAccessMode selectMode = RELATION_NOT_ACCESSED; - RelationAccessMode dmlMode = RELATION_NOT_ACCESSED; - RelationAccessMode ddlMode = RELATION_NOT_ACCESSED; /* we're only interested in foreign keys to reference tables */ if (PartitionMethod(referencedRelation) != DISTRIBUTE_BY_NONE) @@ -891,7 +880,7 @@ HoldsConflictingLockWithReferencedRelations(Oid relationId, ShardPlacementAccess * A select on a reference table could conflict with a DDL * on a distributed table. */ - selectMode = GetRelationSelectAccessMode(referencedRelation); + RelationAccessMode selectMode = GetRelationSelectAccessMode(referencedRelation); if (placementAccess == PLACEMENT_ACCESS_DDL && selectMode != RELATION_NOT_ACCESSED) { @@ -905,7 +894,7 @@ HoldsConflictingLockWithReferencedRelations(Oid relationId, ShardPlacementAccess * Both DML and DDL operations on a reference table conflicts with * any parallel operation on distributed tables. */ - dmlMode = GetRelationDMLAccessMode(referencedRelation); + RelationAccessMode dmlMode = GetRelationDMLAccessMode(referencedRelation); if (dmlMode != RELATION_NOT_ACCESSED) { *conflictingRelationId = referencedRelation; @@ -914,7 +903,7 @@ HoldsConflictingLockWithReferencedRelations(Oid relationId, ShardPlacementAccess return true; } - ddlMode = GetRelationDDLAccessMode(referencedRelation); + RelationAccessMode ddlMode = GetRelationDDLAccessMode(referencedRelation); if (ddlMode != RELATION_NOT_ACCESSED) { *conflictingRelationId = referencedRelation; @@ -985,7 +974,6 @@ HoldsConflictingLockWithReferencingRelations(Oid relationId, ShardPlacementAcces } else if (placementAccess == PLACEMENT_ACCESS_DML) { - RelationAccessMode ddlMode = RELATION_NOT_ACCESSED; RelationAccessMode dmlMode = GetRelationDMLAccessMode(referencingRelation); if (dmlMode == RELATION_PARALLEL_ACCESSED) @@ -994,7 +982,7 @@ HoldsConflictingLockWithReferencingRelations(Oid relationId, ShardPlacementAcces *conflictingAccessMode = PLACEMENT_ACCESS_DML; } - ddlMode = GetRelationDDLAccessMode(referencingRelation); + RelationAccessMode ddlMode = GetRelationDDLAccessMode(referencingRelation); if (ddlMode == RELATION_PARALLEL_ACCESSED) { /* SELECT on a distributed table conflicts with DDL / TRUNCATE */ @@ -1004,25 +992,22 @@ HoldsConflictingLockWithReferencingRelations(Oid relationId, ShardPlacementAcces } else if (placementAccess == PLACEMENT_ACCESS_DDL) { - RelationAccessMode selectMode = RELATION_NOT_ACCESSED; - RelationAccessMode ddlMode = RELATION_NOT_ACCESSED; - RelationAccessMode dmlMode = RELATION_NOT_ACCESSED; - - selectMode = GetRelationSelectAccessMode(referencingRelation); + RelationAccessMode selectMode = GetRelationSelectAccessMode( + referencingRelation); if (selectMode == RELATION_PARALLEL_ACCESSED) { holdsConflictingLocks = true; *conflictingAccessMode = PLACEMENT_ACCESS_SELECT; } - dmlMode = GetRelationDMLAccessMode(referencingRelation); + RelationAccessMode dmlMode = GetRelationDMLAccessMode(referencingRelation); if (dmlMode == RELATION_PARALLEL_ACCESSED) { holdsConflictingLocks = true; *conflictingAccessMode = PLACEMENT_ACCESS_DML; } - ddlMode = GetRelationDDLAccessMode(referencingRelation); + RelationAccessMode ddlMode = GetRelationDDLAccessMode(referencingRelation); if (ddlMode == RELATION_PARALLEL_ACCESSED) { holdsConflictingLocks = true; diff --git a/src/backend/distributed/transaction/remote_transaction.c b/src/backend/distributed/transaction/remote_transaction.c index c9250ed96..d284ba497 100644 --- a/src/backend/distributed/transaction/remote_transaction.c +++ b/src/backend/distributed/transaction/remote_transaction.c @@ -59,10 +59,7 @@ StartRemoteTransactionBegin(struct MultiConnection *connection) { RemoteTransaction *transaction = &connection->remoteTransaction; StringInfo beginAndSetDistributedTransactionId = makeStringInfo(); - DistributedTransactionId *distributedTransactionId = NULL; ListCell *subIdCell = NULL; - List *activeSubXacts = NIL; - const char *timestamp = NULL; Assert(transaction->transactionState == REMOTE_TRANS_INVALID); @@ -84,8 +81,9 @@ StartRemoteTransactionBegin(struct MultiConnection *connection) * and send both in one step. The reason is purely performance, we don't want * seperate roundtrips for these two statements. */ - distributedTransactionId = GetCurrentDistributedTransactionId(); - timestamp = timestamptz_to_str(distributedTransactionId->timestamp); + DistributedTransactionId *distributedTransactionId = + GetCurrentDistributedTransactionId(); + const char *timestamp = timestamptz_to_str(distributedTransactionId->timestamp); appendStringInfo(beginAndSetDistributedTransactionId, "SELECT assign_distributed_transaction_id(%d, " UINT64_FORMAT ", '%s');", @@ -94,7 +92,7 @@ StartRemoteTransactionBegin(struct MultiConnection *connection) timestamp); /* append context for in-progress SAVEPOINTs for this transaction */ - activeSubXacts = ActiveSubXactContexts(); + List *activeSubXacts = ActiveSubXactContexts(); transaction->lastSuccessfulSubXact = TopSubTransactionId; transaction->lastQueuedSubXact = TopSubTransactionId; foreach(subIdCell, activeSubXacts) @@ -139,12 +137,11 @@ void FinishRemoteTransactionBegin(struct MultiConnection *connection) { RemoteTransaction *transaction = &connection->remoteTransaction; - bool clearSuccessful = true; bool raiseErrors = true; Assert(transaction->transactionState == REMOTE_TRANS_STARTING); - clearSuccessful = ClearResults(connection, raiseErrors); + bool clearSuccessful = ClearResults(connection, raiseErrors); if (clearSuccessful) { transaction->transactionState = REMOTE_TRANS_STARTED; @@ -276,7 +273,6 @@ void FinishRemoteTransactionCommit(MultiConnection *connection) { RemoteTransaction *transaction = &connection->remoteTransaction; - PGresult *result = NULL; const bool raiseErrors = false; const bool isCommit = true; @@ -284,7 +280,7 @@ FinishRemoteTransactionCommit(MultiConnection *connection) transaction->transactionState == REMOTE_TRANS_1PC_COMMITTING || transaction->transactionState == REMOTE_TRANS_2PC_COMMITTING); - result = GetRemoteCommandResult(connection, raiseErrors); + PGresult *result = GetRemoteCommandResult(connection, raiseErrors); if (!IsResponseOK(result)) { @@ -476,7 +472,6 @@ StartRemoteTransactionPrepare(struct MultiConnection *connection) RemoteTransaction *transaction = &connection->remoteTransaction; StringInfoData command; const bool raiseErrors = true; - WorkerNode *workerNode = NULL; /* can't prepare a nonexistant transaction */ Assert(transaction->transactionState != REMOTE_TRANS_INVALID); @@ -490,7 +485,7 @@ StartRemoteTransactionPrepare(struct MultiConnection *connection) Assign2PCIdentifier(connection); /* log transactions to workers in pg_dist_transaction */ - workerNode = FindWorkerNode(connection->hostname, connection->port); + WorkerNode *workerNode = FindWorkerNode(connection->hostname, connection->port); if (workerNode != NULL) { LogTransactionRecord(workerNode->groupId, transaction->preparedName); @@ -520,12 +515,11 @@ void FinishRemoteTransactionPrepare(struct MultiConnection *connection) { RemoteTransaction *transaction = &connection->remoteTransaction; - PGresult *result = NULL; const bool raiseErrors = true; Assert(transaction->transactionState == REMOTE_TRANS_PREPARING); - result = GetRemoteCommandResult(connection, raiseErrors); + PGresult *result = GetRemoteCommandResult(connection, raiseErrors); if (!IsResponseOK(result)) { @@ -596,7 +590,6 @@ void RemoteTransactionsBeginIfNecessary(List *connectionList) { ListCell *connectionCell = NULL; - bool raiseInterrupts = true; /* * Don't do anything if not in a coordinated transaction. That allows the @@ -630,7 +623,7 @@ RemoteTransactionsBeginIfNecessary(List *connectionList) StartRemoteTransactionBegin(connection); } - raiseInterrupts = true; + bool raiseInterrupts = true; WaitForAllConnections(connectionList, raiseInterrupts); /* get result of all the BEGINs */ @@ -798,7 +791,6 @@ void CoordinatedRemoteTransactionsPrepare(void) { dlist_iter iter; - bool raiseInterrupts = false; List *connectionList = NIL; /* issue PREPARE TRANSACTION; to all relevant remote nodes */ @@ -822,7 +814,7 @@ CoordinatedRemoteTransactionsPrepare(void) connectionList = lappend(connectionList, connection); } - raiseInterrupts = true; + bool raiseInterrupts = true; WaitForAllConnections(connectionList, raiseInterrupts); /* Wait for result */ @@ -857,7 +849,6 @@ CoordinatedRemoteTransactionsCommit(void) { dlist_iter iter; List *connectionList = NIL; - bool raiseInterrupts = false; /* * Issue appropriate transaction commands to remote nodes. If everything @@ -885,7 +876,7 @@ CoordinatedRemoteTransactionsCommit(void) connectionList = lappend(connectionList, connection); } - raiseInterrupts = false; + bool raiseInterrupts = false; WaitForAllConnections(connectionList, raiseInterrupts); /* wait for the replies to the commands to come in */ @@ -921,7 +912,6 @@ CoordinatedRemoteTransactionsAbort(void) { dlist_iter iter; List *connectionList = NIL; - bool raiseInterrupts = false; /* asynchronously send ROLLBACK [PREPARED] */ dlist_foreach(iter, &InProgressTransactions) @@ -942,7 +932,7 @@ CoordinatedRemoteTransactionsAbort(void) connectionList = lappend(connectionList, connection); } - raiseInterrupts = false; + bool raiseInterrupts = false; WaitForAllConnections(connectionList, raiseInterrupts); /* and wait for the results */ diff --git a/src/backend/distributed/transaction/transaction_recovery.c b/src/backend/distributed/transaction/transaction_recovery.c index a5df26dcd..330ac2a16 100644 --- a/src/backend/distributed/transaction/transaction_recovery.c +++ b/src/backend/distributed/transaction/transaction_recovery.c @@ -65,11 +65,9 @@ static bool RecoverPreparedTransactionOnWorker(MultiConnection *connection, Datum recover_prepared_transactions(PG_FUNCTION_ARGS) { - int recoveredTransactionCount = 0; - CheckCitusVersion(ERROR); - recoveredTransactionCount = RecoverTwoPhaseCommits(); + int recoveredTransactionCount = RecoverTwoPhaseCommits(); PG_RETURN_INT32(recoveredTransactionCount); } @@ -83,9 +81,6 @@ recover_prepared_transactions(PG_FUNCTION_ARGS) void LogTransactionRecord(int32 groupId, char *transactionName) { - Relation pgDistTransaction = NULL; - TupleDesc tupleDescriptor = NULL; - HeapTuple heapTuple = NULL; Datum values[Natts_pg_dist_transaction]; bool isNulls[Natts_pg_dist_transaction]; @@ -97,10 +92,10 @@ LogTransactionRecord(int32 groupId, char *transactionName) values[Anum_pg_dist_transaction_gid - 1] = CStringGetTextDatum(transactionName); /* open transaction relation and insert new tuple */ - pgDistTransaction = heap_open(DistTransactionRelationId(), RowExclusiveLock); + Relation pgDistTransaction = heap_open(DistTransactionRelationId(), RowExclusiveLock); - tupleDescriptor = RelationGetDescr(pgDistTransaction); - heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls); + TupleDesc tupleDescriptor = RelationGetDescr(pgDistTransaction); + HeapTuple heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls); CatalogTupleInsert(pgDistTransaction, heapTuple); @@ -118,11 +113,10 @@ LogTransactionRecord(int32 groupId, char *transactionName) int RecoverTwoPhaseCommits(void) { - List *workerList = NIL; ListCell *workerNodeCell = NULL; int recoveredTransactionCount = 0; - workerList = ActivePrimaryNodeList(NoLock); + List *workerList = ActivePrimaryNodeList(NoLock); foreach(workerNodeCell, workerList) { @@ -148,26 +142,14 @@ RecoverWorkerTransactions(WorkerNode *workerNode) char *nodeName = workerNode->workerName; int nodePort = workerNode->workerPort; - List *activeTransactionNumberList = NIL; - HTAB *activeTransactionNumberSet = NULL; - List *pendingTransactionList = NIL; - HTAB *pendingTransactionSet = NULL; - List *recheckTransactionList = NIL; - HTAB *recheckTransactionSet = NULL; - - Relation pgDistTransaction = NULL; - SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 1; bool indexOK = true; HeapTuple heapTuple = NULL; - TupleDesc tupleDescriptor = NULL; HASH_SEQ_STATUS status; - MemoryContext localContext = NULL; - MemoryContext oldContext = NULL; bool recoveryFailed = false; int connectionFlags = 0; @@ -180,17 +162,18 @@ RecoverWorkerTransactions(WorkerNode *workerNode) return 0; } - localContext = AllocSetContextCreateExtended(CurrentMemoryContext, - "RecoverWorkerTransactions", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + MemoryContext localContext = AllocSetContextCreateExtended(CurrentMemoryContext, + "RecoverWorkerTransactions", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); - oldContext = MemoryContextSwitchTo(localContext); + MemoryContext oldContext = MemoryContextSwitchTo(localContext); /* take table lock first to avoid running concurrently */ - pgDistTransaction = heap_open(DistTransactionRelationId(), ShareUpdateExclusiveLock); - tupleDescriptor = RelationGetDescr(pgDistTransaction); + Relation pgDistTransaction = heap_open(DistTransactionRelationId(), + ShareUpdateExclusiveLock); + TupleDesc tupleDescriptor = RelationGetDescr(pgDistTransaction); /* * We're going to check the list of prepared transactions on the worker, @@ -225,31 +208,33 @@ RecoverWorkerTransactions(WorkerNode *workerNode) */ /* find stale prepared transactions on the remote node */ - pendingTransactionList = PendingWorkerTransactionList(connection); - pendingTransactionSet = ListToHashSet(pendingTransactionList, NAMEDATALEN, true); + List *pendingTransactionList = PendingWorkerTransactionList(connection); + HTAB *pendingTransactionSet = ListToHashSet(pendingTransactionList, NAMEDATALEN, + true); /* find in-progress distributed transactions */ - activeTransactionNumberList = ActiveDistributedTransactionNumbers(); - activeTransactionNumberSet = ListToHashSet(activeTransactionNumberList, - sizeof(uint64), false); + List *activeTransactionNumberList = ActiveDistributedTransactionNumbers(); + HTAB *activeTransactionNumberSet = ListToHashSet(activeTransactionNumberList, + sizeof(uint64), false); /* scan through all recovery records of the current worker */ ScanKeyInit(&scanKey[0], Anum_pg_dist_transaction_groupid, BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(groupId)); /* get a snapshot of pg_dist_transaction */ - scanDescriptor = systable_beginscan(pgDistTransaction, - DistTransactionGroupIndexId(), indexOK, - NULL, scanKeyCount, scanKey); + SysScanDesc scanDescriptor = systable_beginscan(pgDistTransaction, + DistTransactionGroupIndexId(), + indexOK, + NULL, scanKeyCount, scanKey); /* find stale prepared transactions on the remote node */ - recheckTransactionList = PendingWorkerTransactionList(connection); - recheckTransactionSet = ListToHashSet(recheckTransactionList, NAMEDATALEN, true); + List *recheckTransactionList = PendingWorkerTransactionList(connection); + HTAB *recheckTransactionSet = ListToHashSet(recheckTransactionList, NAMEDATALEN, + true); while (HeapTupleIsValid(heapTuple = systable_getnext(scanDescriptor))) { bool isNull = false; - bool isTransactionInProgress = false; bool foundPreparedTransactionBeforeCommit = false; bool foundPreparedTransactionAfterCommit = false; @@ -258,8 +243,8 @@ RecoverWorkerTransactions(WorkerNode *workerNode) tupleDescriptor, &isNull); char *transactionName = TextDatumGetCString(transactionNameDatum); - isTransactionInProgress = IsTransactionInProgress(activeTransactionNumberSet, - transactionName); + bool isTransactionInProgress = IsTransactionInProgress(activeTransactionNumberSet, + transactionName); if (isTransactionInProgress) { /* @@ -375,17 +360,15 @@ RecoverWorkerTransactions(WorkerNode *workerNode) while ((pendingTransactionName = hash_seq_search(&status)) != NULL) { - bool isTransactionInProgress = false; - bool shouldCommit = false; - - isTransactionInProgress = IsTransactionInProgress(activeTransactionNumberSet, - pendingTransactionName); + bool isTransactionInProgress = IsTransactionInProgress( + activeTransactionNumberSet, + pendingTransactionName); if (isTransactionInProgress) { continue; } - shouldCommit = false; + bool shouldCommit = false; abortSucceeded = RecoverPreparedTransactionOnWorker(connection, pendingTransactionName, shouldCommit); @@ -415,10 +398,6 @@ PendingWorkerTransactionList(MultiConnection *connection) { StringInfo command = makeStringInfo(); bool raiseInterrupts = true; - int querySent = 0; - PGresult *result = NULL; - int rowCount = 0; - int rowIndex = 0; List *transactionNames = NIL; int coordinatorId = GetLocalGroupId(); @@ -426,21 +405,21 @@ PendingWorkerTransactionList(MultiConnection *connection) "WHERE gid LIKE 'citus\\_%d\\_%%'", coordinatorId); - querySent = SendRemoteCommand(connection, command->data); + int querySent = SendRemoteCommand(connection, command->data); if (querySent == 0) { ReportConnectionError(connection, ERROR); } - result = GetRemoteCommandResult(connection, raiseInterrupts); + PGresult *result = GetRemoteCommandResult(connection, raiseInterrupts); if (!IsResponseOK(result)) { ReportResultError(connection, result, ERROR); } - rowCount = PQntuples(result); + int rowCount = PQntuples(result); - for (rowIndex = 0; rowIndex < rowCount; rowIndex++) + for (int rowIndex = 0; rowIndex < rowCount; rowIndex++) { const int columnIndex = 0; char *transactionName = PQgetvalue(result, rowIndex, columnIndex); @@ -468,11 +447,12 @@ IsTransactionInProgress(HTAB *activeTransactionNumberSet, char *preparedTransact int procId = 0; uint32 connectionNumber = 0; uint64 transactionNumber = 0; - bool isValidName = false; bool isTransactionInProgress = false; - isValidName = ParsePreparedTransactionName(preparedTransactionName, &groupId, &procId, - &transactionNumber, &connectionNumber); + bool isValidName = ParsePreparedTransactionName(preparedTransactionName, &groupId, + &procId, + &transactionNumber, + &connectionNumber); if (isValidName) { hash_search(activeTransactionNumberSet, &transactionNumber, HASH_FIND, @@ -493,7 +473,6 @@ RecoverPreparedTransactionOnWorker(MultiConnection *connection, char *transactio { StringInfo command = makeStringInfo(); PGresult *result = NULL; - int executeCommand = 0; bool raiseInterrupts = false; if (shouldCommit) @@ -509,7 +488,7 @@ RecoverPreparedTransactionOnWorker(MultiConnection *connection, char *transactio quote_literal_cstr(transactionName)); } - executeCommand = ExecuteOptionalRemoteCommand(connection, command->data, &result); + int executeCommand = ExecuteOptionalRemoteCommand(connection, command->data, &result); if (executeCommand == QUERY_SEND_FAILED) { return false; diff --git a/src/backend/distributed/transaction/worker_transaction.c b/src/backend/distributed/transaction/worker_transaction.c index 2d8cd4342..1d5dcc0b4 100644 --- a/src/backend/distributed/transaction/worker_transaction.c +++ b/src/backend/distributed/transaction/worker_transaction.c @@ -75,14 +75,15 @@ void SendCommandToWorkerAsUser(char *nodeName, int32 nodePort, const char *nodeUser, const char *command) { - MultiConnection *transactionConnection = NULL; uint connectionFlags = 0; BeginOrContinueCoordinatedTransaction(); CoordinatedTransactionUse2PC(); - transactionConnection = GetNodeUserDatabaseConnection(connectionFlags, nodeName, - nodePort, nodeUser, NULL); + MultiConnection *transactionConnection = GetNodeUserDatabaseConnection( + connectionFlags, nodeName, + nodePort, + nodeUser, NULL); MarkRemoteTransactionCritical(transactionConnection); RemoteTransactionBeginIfNecessary(transactionConnection); @@ -155,14 +156,15 @@ SendBareCommandListToWorkers(TargetWorkerSet targetWorkerSet, List *commandList) /* run commands serially */ foreach(workerNodeCell, workerNodeList) { - MultiConnection *workerConnection = NULL; WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell); char *nodeName = workerNode->workerName; int nodePort = workerNode->workerPort; int connectionFlags = FORCE_NEW_CONNECTION; - workerConnection = GetNodeUserDatabaseConnection(connectionFlags, nodeName, - nodePort, nodeUser, NULL); + MultiConnection *workerConnection = GetNodeUserDatabaseConnection(connectionFlags, + nodeName, + nodePort, + nodeUser, NULL); /* iterate over the commands and execute them in the same connection */ foreach(commandCell, commandList) @@ -194,14 +196,15 @@ SendBareOptionalCommandListToWorkersAsUser(TargetWorkerSet targetWorkerSet, /* run commands serially */ foreach(workerNodeCell, workerNodeList) { - MultiConnection *workerConnection = NULL; WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell); char *nodeName = workerNode->workerName; int nodePort = workerNode->workerPort; int connectionFlags = FORCE_NEW_CONNECTION; - workerConnection = GetNodeUserDatabaseConnection(connectionFlags, nodeName, - nodePort, user, NULL); + MultiConnection *workerConnection = GetNodeUserDatabaseConnection(connectionFlags, + nodeName, + nodePort, user, + NULL); /* iterate over the commands and execute them in the same connection */ foreach(commandCell, commandList) @@ -250,11 +253,11 @@ SendCommandToWorkersParams(TargetWorkerSet targetWorkerSet, const char *command, WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell); char *nodeName = workerNode->workerName; int nodePort = workerNode->workerPort; - MultiConnection *connection = NULL; int32 connectionFlags = 0; - connection = StartNodeUserDatabaseConnection(connectionFlags, nodeName, nodePort, - user, NULL); + MultiConnection *connection = StartNodeUserDatabaseConnection(connectionFlags, + nodeName, nodePort, + user, NULL); MarkRemoteTransactionCritical(connection); @@ -323,12 +326,12 @@ void SendCommandListToWorkerInSingleTransaction(const char *nodeName, int32 nodePort, const char *nodeUser, List *commandList) { - MultiConnection *workerConnection = NULL; ListCell *commandCell = NULL; int connectionFlags = FORCE_NEW_CONNECTION; - workerConnection = GetNodeUserDatabaseConnection(connectionFlags, nodeName, nodePort, - nodeUser, NULL); + MultiConnection *workerConnection = GetNodeUserDatabaseConnection(connectionFlags, + nodeName, nodePort, + nodeUser, NULL); MarkRemoteTransactionCritical(workerConnection); RemoteTransactionBegin(workerConnection); diff --git a/src/backend/distributed/utils/acquire_lock.c b/src/backend/distributed/utils/acquire_lock.c index 3a954f433..fd32c7fb4 100644 --- a/src/backend/distributed/utils/acquire_lock.c +++ b/src/backend/distributed/utils/acquire_lock.c @@ -66,7 +66,6 @@ StartLockAcquireHelperBackgroundWorker(int backendToHelp, int32 lock_cooldown) BackgroundWorkerHandle *handle = NULL; LockAcquireHelperArgs args; BackgroundWorker worker; - MemoryContextCallback *workerCleanup = NULL; memset(&args, 0, sizeof(args)); memset(&worker, 0, sizeof(worker)); @@ -104,7 +103,7 @@ StartLockAcquireHelperBackgroundWorker(int backendToHelp, int32 lock_cooldown) errhint("Increasing max_worker_processes might help."))); } - workerCleanup = palloc0(sizeof(MemoryContextCallback)); + MemoryContextCallback *workerCleanup = palloc0(sizeof(MemoryContextCallback)); workerCleanup->func = EnsureStopLockAcquireHelper; workerCleanup->arg = handle; @@ -156,16 +155,14 @@ lock_acquire_helper_sigterm(SIGNAL_ARGS) static bool ShouldAcquireLock(long sleepms) { - int rc; - /* early escape in case we already got the signal to stop acquiring the lock */ if (got_sigterm) { return false; } - rc = WaitLatch(MyLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, - sleepms * 1L, PG_WAIT_EXTENSION); + int rc = WaitLatch(MyLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, + sleepms * 1L, PG_WAIT_EXTENSION); ResetLatch(MyLatch); /* emergency bailout if postmaster has died */ @@ -246,7 +243,6 @@ LockAcquireHelperMain(Datum main_arg) while (ShouldAcquireLock(100)) { int row = 0; - int spiStatus = 0; elog(LOG, "canceling competing backends for backend %d", backendPid); @@ -259,24 +255,23 @@ LockAcquireHelperMain(Datum main_arg) PushActiveSnapshot(GetTransactionSnapshot()); pgstat_report_activity(STATE_RUNNING, sql.data); - spiStatus = SPI_execute_with_args(sql.data, paramCount, paramTypes, paramValues, - NULL, false, 0); + int spiStatus = SPI_execute_with_args(sql.data, paramCount, paramTypes, + paramValues, + NULL, false, 0); if (spiStatus == SPI_OK_SELECT) { for (row = 0; row < SPI_processed; row++) { - int terminatedPid = 0; - bool isTerminated = false; bool isnull = false; - terminatedPid = DatumGetInt32(SPI_getbinval(SPI_tuptable->vals[0], - SPI_tuptable->tupdesc, - 1, &isnull)); + int terminatedPid = DatumGetInt32(SPI_getbinval(SPI_tuptable->vals[0], + SPI_tuptable->tupdesc, + 1, &isnull)); - isTerminated = DatumGetBool(SPI_getbinval(SPI_tuptable->vals[0], - SPI_tuptable->tupdesc, - 2, &isnull)); + bool isTerminated = DatumGetBool(SPI_getbinval(SPI_tuptable->vals[0], + SPI_tuptable->tupdesc, + 2, &isnull)); if (isTerminated) { diff --git a/src/backend/distributed/utils/aggregate_utils.c b/src/backend/distributed/utils/aggregate_utils.c index 9620e4f0a..3771f428d 100644 --- a/src/backend/distributed/utils/aggregate_utils.c +++ b/src/backend/distributed/utils/aggregate_utils.c @@ -149,7 +149,6 @@ static void InitializeStypeBox(FunctionCallInfo fcinfo, StypeBox *box, HeapTuple aggTuple, Oid transtype) { - Datum textInitVal; Form_pg_aggregate aggform = (Form_pg_aggregate) GETSTRUCT(aggTuple); Oid userId = GetUserId(); @@ -161,9 +160,9 @@ InitializeStypeBox(FunctionCallInfo fcinfo, StypeBox *box, HeapTuple aggTuple, O aclcheckAggregate(OBJECT_FUNCTION, userId, aggform->aggserialfn); aclcheckAggregate(OBJECT_FUNCTION, userId, aggform->aggcombinefn); - textInitVal = SysCacheGetAttr(AGGFNOID, aggTuple, - Anum_pg_aggregate_agginitval, - &box->valueNull); + Datum textInitVal = SysCacheGetAttr(AGGFNOID, aggTuple, + Anum_pg_aggregate_agginitval, + &box->valueNull); box->transtype = transtype; box->valueInit = !box->valueNull; if (box->valueNull) @@ -174,18 +173,16 @@ InitializeStypeBox(FunctionCallInfo fcinfo, StypeBox *box, HeapTuple aggTuple, O { Oid typinput, typioparam; - char *strInitVal; MemoryContext aggregateContext; - MemoryContext oldContext; if (!AggCheckCallContext(fcinfo, &aggregateContext)) { elog(ERROR, "InitializeStypeBox called from non aggregate context"); } - oldContext = MemoryContextSwitchTo(aggregateContext); + MemoryContext oldContext = MemoryContextSwitchTo(aggregateContext); getTypeInputInfo(transtype, &typinput, &typioparam); - strInitVal = TextDatumGetCString(textInitVal); + char *strInitVal = TextDatumGetCString(textInitVal); box->value = OidInputFunctionCall(typinput, strInitVal, typioparam, -1); pfree(strInitVal); @@ -211,7 +208,6 @@ HandleTransition(StypeBox *box, FunctionCallInfo fcinfo, FunctionCallInfo innerF if (!newValIsNull) { MemoryContext aggregateContext; - MemoryContext oldContext; if (!AggCheckCallContext(fcinfo, &aggregateContext)) { @@ -219,7 +215,7 @@ HandleTransition(StypeBox *box, FunctionCallInfo fcinfo, FunctionCallInfo innerF "HandleTransition called from non aggregate context"); } - oldContext = MemoryContextSwitchTo(aggregateContext); + MemoryContext oldContext = MemoryContextSwitchTo(aggregateContext); if (!(DatumIsReadWriteExpandedObject(newVal, false, box->transtypeLen) && MemoryContextGetParent(DatumGetEOHP(newVal)->eoh_context) == @@ -257,14 +253,13 @@ static void HandleStrictUninit(StypeBox *box, FunctionCallInfo fcinfo, Datum value) { MemoryContext aggregateContext; - MemoryContext oldContext; if (!AggCheckCallContext(fcinfo, &aggregateContext)) { elog(ERROR, "HandleStrictUninit called from non aggregate context"); } - oldContext = MemoryContextSwitchTo(aggregateContext); + MemoryContext oldContext = MemoryContextSwitchTo(aggregateContext); box->value = datumCopy(value, box->transtypeByVal, box->transtypeLen); MemoryContextSwitchTo(oldContext); @@ -287,8 +282,6 @@ worker_partial_agg_sfunc(PG_FUNCTION_ARGS) { StypeBox *box = NULL; Form_pg_aggregate aggform; - HeapTuple aggtuple; - Oid aggsfunc; LOCAL_FCINFO(innerFcinfo, FUNC_MAX_ARGS); FmgrInfo info; int argumentIndex = 0; @@ -305,8 +298,8 @@ worker_partial_agg_sfunc(PG_FUNCTION_ARGS) Assert(box->agg == PG_GETARG_OID(1)); } - aggtuple = GetAggregateForm(box->agg, &aggform); - aggsfunc = aggform->aggtransfn; + HeapTuple aggtuple = GetAggregateForm(box->agg, &aggform); + Oid aggsfunc = aggform->aggtransfn; if (initialCall) { InitializeStypeBox(fcinfo, box, aggtuple, aggform->aggtranstype); @@ -370,19 +363,16 @@ worker_partial_agg_ffunc(PG_FUNCTION_ARGS) LOCAL_FCINFO(innerFcinfo, 1); FmgrInfo info; StypeBox *box = (StypeBox *) (PG_ARGISNULL(0) ? NULL : PG_GETARG_POINTER(0)); - HeapTuple aggtuple; Form_pg_aggregate aggform; Oid typoutput = InvalidOid; bool typIsVarlena = false; - Oid transtype; - Datum result; if (box == NULL || box->valueNull) { PG_RETURN_NULL(); } - aggtuple = GetAggregateForm(box->agg, &aggform); + HeapTuple aggtuple = GetAggregateForm(box->agg, &aggform); if (aggform->aggcombinefn == InvalidOid) { @@ -397,7 +387,7 @@ worker_partial_agg_ffunc(PG_FUNCTION_ARGS) "worker_partial_agg_ffunc does not support aggregates with INTERNAL transition state"))); } - transtype = aggform->aggtranstype; + Oid transtype = aggform->aggtranstype; ReleaseSysCache(aggtuple); getTypeOutputInfo(transtype, &typoutput, &typIsVarlena); @@ -408,7 +398,7 @@ worker_partial_agg_ffunc(PG_FUNCTION_ARGS) fcinfo->context, fcinfo->resultinfo); fcSetArgExt(innerFcinfo, 0, box->value, box->valueNull); - result = FunctionCallInvoke(innerFcinfo); + Datum result = FunctionCallInvoke(innerFcinfo); if (innerFcinfo->isnull) { @@ -433,15 +423,9 @@ coord_combine_agg_sfunc(PG_FUNCTION_ARGS) { LOCAL_FCINFO(innerFcinfo, 3); FmgrInfo info; - HeapTuple aggtuple; - HeapTuple transtypetuple; Form_pg_aggregate aggform; Form_pg_type transtypeform; - Oid combine; - Oid deserial; - Oid ioparam; Datum value; - bool valueNull; StypeBox *box = NULL; if (PG_ARGISNULL(0)) @@ -455,7 +439,7 @@ coord_combine_agg_sfunc(PG_FUNCTION_ARGS) Assert(box->agg == PG_GETARG_OID(1)); } - aggtuple = GetAggregateForm(box->agg, &aggform); + HeapTuple aggtuple = GetAggregateForm(box->agg, &aggform); if (aggform->aggcombinefn == InvalidOid) { @@ -470,7 +454,7 @@ coord_combine_agg_sfunc(PG_FUNCTION_ARGS) "coord_combine_agg_sfunc does not support aggregates with INTERNAL transition state"))); } - combine = aggform->aggcombinefn; + Oid combine = aggform->aggcombinefn; if (PG_ARGISNULL(0)) { @@ -486,10 +470,10 @@ coord_combine_agg_sfunc(PG_FUNCTION_ARGS) &box->transtypeByVal); } - valueNull = PG_ARGISNULL(2); - transtypetuple = GetTypeForm(box->transtype, &transtypeform); - ioparam = getTypeIOParam(transtypetuple); - deserial = transtypeform->typinput; + bool valueNull = PG_ARGISNULL(2); + HeapTuple transtypetuple = GetTypeForm(box->transtype, &transtypeform); + Oid ioparam = getTypeIOParam(transtypetuple); + Oid deserial = transtypeform->typinput; ReleaseSysCache(transtypetuple); fmgr_info(deserial, &info); @@ -551,19 +535,12 @@ coord_combine_agg_sfunc(PG_FUNCTION_ARGS) Datum coord_combine_agg_ffunc(PG_FUNCTION_ARGS) { - Datum result; StypeBox *box = (StypeBox *) (PG_ARGISNULL(0) ? NULL : PG_GETARG_POINTER(0)); LOCAL_FCINFO(innerFcinfo, FUNC_MAX_ARGS); FmgrInfo info; int innerNargs = 0; - HeapTuple aggtuple; - HeapTuple ffunctuple; Form_pg_aggregate aggform; Form_pg_proc ffuncform; - Oid ffunc = InvalidOid; - bool fextra = false; - bool finalStrict = false; - int argumentIndex = 0; if (box == NULL) { @@ -574,9 +551,9 @@ coord_combine_agg_ffunc(PG_FUNCTION_ARGS) PG_RETURN_NULL(); } - aggtuple = GetAggregateForm(box->agg, &aggform); - ffunc = aggform->aggfinalfn; - fextra = aggform->aggfinalextra; + HeapTuple aggtuple = GetAggregateForm(box->agg, &aggform); + Oid ffunc = aggform->aggfinalfn; + bool fextra = aggform->aggfinalextra; ReleaseSysCache(aggtuple); if (ffunc == InvalidOid) @@ -588,8 +565,8 @@ coord_combine_agg_ffunc(PG_FUNCTION_ARGS) PG_RETURN_DATUM(box->value); } - ffunctuple = GetProcForm(ffunc, &ffuncform); - finalStrict = ffuncform->proisstrict; + HeapTuple ffunctuple = GetProcForm(ffunc, &ffuncform); + bool finalStrict = ffuncform->proisstrict; ReleaseSysCache(ffunctuple); if (finalStrict && box->valueNull) @@ -609,12 +586,12 @@ coord_combine_agg_ffunc(PG_FUNCTION_ARGS) InitFunctionCallInfoData(*innerFcinfo, &info, innerNargs, fcinfo->fncollation, fcinfo->context, fcinfo->resultinfo); fcSetArgExt(innerFcinfo, 0, box->value, box->valueNull); - for (argumentIndex = 1; argumentIndex < innerNargs; argumentIndex++) + for (int argumentIndex = 1; argumentIndex < innerNargs; argumentIndex++) { fcSetArgNull(innerFcinfo, argumentIndex); } - result = FunctionCallInvoke(innerFcinfo); + Datum result = FunctionCallInvoke(innerFcinfo); fcinfo->isnull = innerFcinfo->isnull; return result; } diff --git a/src/backend/distributed/utils/citus_clauses.c b/src/backend/distributed/utils/citus_clauses.c index 79f195ec2..d358ac48d 100644 --- a/src/backend/distributed/utils/citus_clauses.c +++ b/src/backend/distributed/utils/citus_clauses.c @@ -154,7 +154,6 @@ citus_evaluate_expr(Expr *expr, Oid result_type, int32 result_typmod, EState *estate; ExprState *exprstate; ExprContext *econtext; - MemoryContext oldcontext; Datum const_val; bool const_is_null; int16 resultTypLen; @@ -166,7 +165,7 @@ citus_evaluate_expr(Expr *expr, Oid result_type, int32 result_typmod, estate = CreateExecutorState(); /* We can use the estate's working context to avoid memory leaks. */ - oldcontext = MemoryContextSwitchTo(estate->es_query_cxt); + MemoryContext oldcontext = MemoryContextSwitchTo(estate->es_query_cxt); /* Make sure any opfuncids are filled in. */ fix_opfuncids((Node *) expr); diff --git a/src/backend/distributed/utils/citus_copyfuncs.c b/src/backend/distributed/utils/citus_copyfuncs.c index 1f2888f9c..e8bd9f0ea 100644 --- a/src/backend/distributed/utils/citus_copyfuncs.c +++ b/src/backend/distributed/utils/citus_copyfuncs.c @@ -169,7 +169,6 @@ void CopyNodeMapMergeJob(COPYFUNC_ARGS) { DECLARE_FROM_AND_NEW_NODE(MapMergeJob); - int arrayLength = 0; copyJobInfo(&newnode->job, &from->job); @@ -179,7 +178,7 @@ CopyNodeMapMergeJob(COPYFUNC_ARGS) COPY_SCALAR_FIELD(partitionCount); COPY_SCALAR_FIELD(sortedShardIntervalArrayLength); - arrayLength = from->sortedShardIntervalArrayLength; + int arrayLength = from->sortedShardIntervalArrayLength; /* now build & read sortedShardIntervalArray */ COPY_NODE_ARRAY(sortedShardIntervalArray, ShardInterval, arrayLength); diff --git a/src/backend/distributed/utils/citus_nodefuncs.c b/src/backend/distributed/utils/citus_nodefuncs.c index a58686493..e86fcff9f 100644 --- a/src/backend/distributed/utils/citus_nodefuncs.c +++ b/src/backend/distributed/utils/citus_nodefuncs.c @@ -74,17 +74,10 @@ SetRangeTblExtraData(RangeTblEntry *rte, CitusRTEKind rteKind, char *fragmentSchemaName, char *fragmentTableName, List *tableIdList) { - RangeTblFunction *fauxFunction = NULL; - FuncExpr *fauxFuncExpr = NULL; - Const *rteKindData = NULL; - Const *fragmentSchemaData = NULL; - Const *fragmentTableData = NULL; - Const *tableIdListData = NULL; - Assert(rte->eref); /* store RTE kind as a plain int4 */ - rteKindData = makeNode(Const); + Const *rteKindData = makeNode(Const); rteKindData->consttype = INT4OID; rteKindData->constlen = 4; rteKindData->constvalue = Int32GetDatum(rteKind); @@ -93,7 +86,7 @@ SetRangeTblExtraData(RangeTblEntry *rte, CitusRTEKind rteKind, rteKindData->location = -1; /* store the fragment schema as a cstring */ - fragmentSchemaData = makeNode(Const); + Const *fragmentSchemaData = makeNode(Const); fragmentSchemaData->consttype = CSTRINGOID; fragmentSchemaData->constlen = -2; fragmentSchemaData->constvalue = CStringGetDatum(fragmentSchemaName); @@ -102,7 +95,7 @@ SetRangeTblExtraData(RangeTblEntry *rte, CitusRTEKind rteKind, fragmentSchemaData->location = -1; /* store the fragment name as a cstring */ - fragmentTableData = makeNode(Const); + Const *fragmentTableData = makeNode(Const); fragmentTableData->consttype = CSTRINGOID; fragmentTableData->constlen = -2; fragmentTableData->constvalue = CStringGetDatum(fragmentTableName); @@ -111,7 +104,7 @@ SetRangeTblExtraData(RangeTblEntry *rte, CitusRTEKind rteKind, fragmentTableData->location = -1; /* store the table id list as an array of integers: FIXME */ - tableIdListData = makeNode(Const); + Const *tableIdListData = makeNode(Const); tableIdListData->consttype = CSTRINGOID; tableIdListData->constbyval = false; tableIdListData->constlen = -2; @@ -130,14 +123,14 @@ SetRangeTblExtraData(RangeTblEntry *rte, CitusRTEKind rteKind, } /* create function expression to store our faux arguments in */ - fauxFuncExpr = makeNode(FuncExpr); + FuncExpr *fauxFuncExpr = makeNode(FuncExpr); fauxFuncExpr->funcid = CitusExtraDataContainerFuncId(); fauxFuncExpr->funcretset = true; fauxFuncExpr->location = -1; fauxFuncExpr->args = list_make4(rteKindData, fragmentSchemaData, fragmentTableData, tableIdListData); - fauxFunction = makeNode(RangeTblFunction); + RangeTblFunction *fauxFunction = makeNode(RangeTblFunction); fauxFunction->funcexpr = (Node *) fauxFuncExpr; /* set the column count to pass ruleutils checks, not used elsewhere */ @@ -159,10 +152,6 @@ ExtractRangeTblExtraData(RangeTblEntry *rte, CitusRTEKind *rteKind, char **fragmentSchemaName, char **fragmentTableName, List **tableIdList) { - RangeTblFunction *fauxFunction = NULL; - FuncExpr *fauxFuncExpr = NULL; - Const *tmpConst = NULL; - /* set base rte kind first, so this can be used for 'non-extended' RTEs as well */ if (rteKind != NULL) { @@ -199,13 +188,13 @@ ExtractRangeTblExtraData(RangeTblEntry *rte, CitusRTEKind *rteKind, } /* should pretty much always be a FuncExpr, but be liberal in what we expect... */ - fauxFunction = linitial(rte->functions); + RangeTblFunction *fauxFunction = linitial(rte->functions); if (!IsA(fauxFunction->funcexpr, FuncExpr)) { return; } - fauxFuncExpr = (FuncExpr *) fauxFunction->funcexpr; + FuncExpr *fauxFuncExpr = (FuncExpr *) fauxFunction->funcexpr; /* * There will never be a range table entry with this function id, but for @@ -229,7 +218,7 @@ ExtractRangeTblExtraData(RangeTblEntry *rte, CitusRTEKind *rteKind, } /* extract rteKind */ - tmpConst = (Const *) linitial(fauxFuncExpr->args); + Const *tmpConst = (Const *) linitial(fauxFuncExpr->args); Assert(IsA(tmpConst, Const)); Assert(tmpConst->consttype == INT4OID); if (rteKind != NULL) @@ -419,12 +408,10 @@ const ExtensibleNodeMethods nodeMethods[] = void RegisterNodes(void) { - int off; - StaticAssertExpr(lengthof(nodeMethods) == lengthof(CitusNodeTagNamesD), "number of node methods and names do not match"); - for (off = 0; off < lengthof(nodeMethods); off++) + for (int off = 0; off < lengthof(nodeMethods); off++) { RegisterExtensibleNodeMethods(&nodeMethods[off]); } diff --git a/src/backend/distributed/utils/citus_readfuncs.c b/src/backend/distributed/utils/citus_readfuncs.c index b178b6afc..d3e9ab7dd 100644 --- a/src/backend/distributed/utils/citus_readfuncs.c +++ b/src/backend/distributed/utils/citus_readfuncs.c @@ -279,8 +279,6 @@ ReadShardInterval(READFUNC_ARGS) READFUNC_RET ReadMapMergeJob(READFUNC_ARGS) { - int arrayLength; - int i; READ_LOCALS(MapMergeJob); @@ -292,13 +290,13 @@ ReadMapMergeJob(READFUNC_ARGS) READ_UINT_FIELD(partitionCount); READ_INT_FIELD(sortedShardIntervalArrayLength); - arrayLength = local_node->sortedShardIntervalArrayLength; + int arrayLength = local_node->sortedShardIntervalArrayLength; /* now build & read sortedShardIntervalArray */ local_node->sortedShardIntervalArray = (ShardInterval**) palloc(arrayLength * sizeof(ShardInterval *)); - for (i = 0; i < arrayLength; ++i) + for (int i = 0; i < arrayLength; ++i) { /* can't use READ_NODE_FIELD, no field names */ local_node->sortedShardIntervalArray[i] = nodeRead(NULL, 0); diff --git a/src/backend/distributed/utils/colocation_utils.c b/src/backend/distributed/utils/colocation_utils.c index 531e2816b..5c7f4a890 100644 --- a/src/backend/distributed/utils/colocation_utils.c +++ b/src/backend/distributed/utils/colocation_utils.c @@ -68,8 +68,6 @@ mark_tables_colocated(PG_FUNCTION_ARGS) { Oid sourceRelationId = PG_GETARG_OID(0); ArrayType *relationIdArrayObject = PG_GETARG_ARRAYTYPE_P(1); - Datum *relationIdDatumArray = NULL; - int relationIndex = 0; int relationCount = ArrayObjectCount(relationIdArrayObject); if (relationCount < 1) @@ -82,9 +80,9 @@ mark_tables_colocated(PG_FUNCTION_ARGS) EnsureCoordinator(); EnsureTableOwner(sourceRelationId); - relationIdDatumArray = DeconstructArrayObject(relationIdArrayObject); + Datum *relationIdDatumArray = DeconstructArrayObject(relationIdArrayObject); - for (relationIndex = 0; relationIndex < relationCount; relationIndex++) + for (int relationIndex = 0; relationIndex < relationCount; relationIndex++) { Oid nextRelationOid = DatumGetObjectId(relationIdDatumArray[relationIndex]); @@ -108,7 +106,6 @@ get_colocated_shard_array(PG_FUNCTION_ARGS) uint32 shardId = PG_GETARG_UINT32(0); ShardInterval *shardInterval = LoadShardInterval(shardId); - ArrayType *colocatedShardsArrayType = NULL; List *colocatedShardList = ColocatedShardIntervalList(shardInterval); ListCell *colocatedShardCell = NULL; int colocatedShardCount = list_length(colocatedShardList); @@ -128,8 +125,9 @@ get_colocated_shard_array(PG_FUNCTION_ARGS) colocatedShardIndex++; } - colocatedShardsArrayType = DatumArrayToArrayType(colocatedShardsDatumArray, - colocatedShardCount, arrayTypeId); + ArrayType *colocatedShardsArrayType = DatumArrayToArrayType(colocatedShardsDatumArray, + colocatedShardCount, + arrayTypeId); PG_RETURN_ARRAYTYPE_P(colocatedShardsArrayType); } @@ -144,10 +142,6 @@ get_colocated_shard_array(PG_FUNCTION_ARGS) static void MarkTablesColocated(Oid sourceRelationId, Oid targetRelationId) { - uint32 sourceColocationId = INVALID_COLOCATION_ID; - uint32 targetColocationId = INVALID_COLOCATION_ID; - Relation pgDistColocation = NULL; - CheckReplicationModel(sourceRelationId, targetRelationId); CheckDistributionColumnType(sourceRelationId, targetRelationId); @@ -156,7 +150,7 @@ MarkTablesColocated(Oid sourceRelationId, Oid targetRelationId) * can be sure that there will no modifications on the colocation table * until this transaction is committed. */ - pgDistColocation = heap_open(DistColocationRelationId(), ExclusiveLock); + Relation pgDistColocation = heap_open(DistColocationRelationId(), ExclusiveLock); /* check if shard placements are colocated */ ErrorIfShardPlacementsNotColocated(sourceRelationId, targetRelationId); @@ -165,7 +159,7 @@ MarkTablesColocated(Oid sourceRelationId, Oid targetRelationId) * Get colocation group of the source table, if the source table does not * have a colocation group, create a new one, and set it for the source table. */ - sourceColocationId = TableColocationId(sourceRelationId); + uint32 sourceColocationId = TableColocationId(sourceRelationId); if (sourceColocationId == INVALID_COLOCATION_ID) { uint32 shardCount = ShardIntervalCount(sourceRelationId); @@ -185,7 +179,7 @@ MarkTablesColocated(Oid sourceRelationId, Oid targetRelationId) UpdateRelationColocationGroup(sourceRelationId, sourceColocationId); } - targetColocationId = TableColocationId(targetRelationId); + uint32 targetColocationId = TableColocationId(targetRelationId); /* finally set colocation group for the target relation */ UpdateRelationColocationGroup(targetRelationId, sourceColocationId); @@ -212,28 +206,22 @@ MarkTablesColocated(Oid sourceRelationId, Oid targetRelationId) static void ErrorIfShardPlacementsNotColocated(Oid leftRelationId, Oid rightRelationId) { - List *leftShardIntervalList = NIL; - List *rightShardIntervalList = NIL; ListCell *leftShardIntervalCell = NULL; ListCell *rightShardIntervalCell = NULL; - char *leftRelationName = NULL; - char *rightRelationName = NULL; - uint32 leftShardCount = 0; - uint32 rightShardCount = 0; /* get sorted shard interval lists for both tables */ - leftShardIntervalList = LoadShardIntervalList(leftRelationId); - rightShardIntervalList = LoadShardIntervalList(rightRelationId); + List *leftShardIntervalList = LoadShardIntervalList(leftRelationId); + List *rightShardIntervalList = LoadShardIntervalList(rightRelationId); /* prevent concurrent placement changes */ LockShardListMetadata(leftShardIntervalList, ShareLock); LockShardListMetadata(rightShardIntervalList, ShareLock); - leftRelationName = get_rel_name(leftRelationId); - rightRelationName = get_rel_name(rightRelationId); + char *leftRelationName = get_rel_name(leftRelationId); + char *rightRelationName = get_rel_name(rightRelationId); - leftShardCount = list_length(leftShardIntervalList); - rightShardCount = list_length(rightShardIntervalList); + uint32 leftShardCount = list_length(leftShardIntervalList); + uint32 rightShardCount = list_length(rightShardIntervalList); if (leftShardCount != rightShardCount) { @@ -250,10 +238,6 @@ ErrorIfShardPlacementsNotColocated(Oid leftRelationId, Oid rightRelationId) ShardInterval *leftInterval = (ShardInterval *) lfirst(leftShardIntervalCell); ShardInterval *rightInterval = (ShardInterval *) lfirst(rightShardIntervalCell); - List *leftPlacementList = NIL; - List *rightPlacementList = NIL; - List *sortedLeftPlacementList = NIL; - List *sortedRightPlacementList = NIL; ListCell *leftPlacementCell = NULL; ListCell *rightPlacementCell = NULL; @@ -269,8 +253,8 @@ ErrorIfShardPlacementsNotColocated(Oid leftRelationId, Oid rightRelationId) leftRelationName, rightRelationName))); } - leftPlacementList = ShardPlacementList(leftShardId); - rightPlacementList = ShardPlacementList(rightShardId); + List *leftPlacementList = ShardPlacementList(leftShardId); + List *rightPlacementList = ShardPlacementList(rightShardId); if (list_length(leftPlacementList) != list_length(rightPlacementList)) { @@ -284,10 +268,10 @@ ErrorIfShardPlacementsNotColocated(Oid leftRelationId, Oid rightRelationId) } /* sort shard placements according to the node */ - sortedLeftPlacementList = SortList(leftPlacementList, - CompareShardPlacementsByNode); - sortedRightPlacementList = SortList(rightPlacementList, - CompareShardPlacementsByNode); + List *sortedLeftPlacementList = SortList(leftPlacementList, + CompareShardPlacementsByNode); + List *sortedRightPlacementList = SortList(rightPlacementList, + CompareShardPlacementsByNode); /* compare shard placements one by one */ forboth(leftPlacementCell, sortedLeftPlacementList, @@ -297,14 +281,13 @@ ErrorIfShardPlacementsNotColocated(Oid leftRelationId, Oid rightRelationId) (ShardPlacement *) lfirst(leftPlacementCell); ShardPlacement *rightPlacement = (ShardPlacement *) lfirst(rightPlacementCell); - int nodeCompare = 0; /* * If shard placements are on different nodes, these shard * placements are not colocated. */ - nodeCompare = CompareShardPlacementsByNode((void *) &leftPlacement, - (void *) &rightPlacement); + int nodeCompare = CompareShardPlacementsByNode((void *) &leftPlacement, + (void *) &rightPlacement); if (nodeCompare != 0) { ereport(ERROR, (errmsg("cannot colocate tables %s and %s", @@ -442,8 +425,6 @@ uint32 ColocationId(int shardCount, int replicationFactor, Oid distributionColumnType) { uint32 colocationId = INVALID_COLOCATION_ID; - HeapTuple colocationTuple = NULL; - SysScanDesc scanDescriptor; const int scanKeyCount = 3; ScanKeyData scanKey[3]; bool indexOK = true; @@ -458,11 +439,11 @@ ColocationId(int shardCount, int replicationFactor, Oid distributionColumnType) ScanKeyInit(&scanKey[2], Anum_pg_dist_colocation_distributioncolumntype, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(distributionColumnType)); - scanDescriptor = systable_beginscan(pgDistColocation, - DistColocationConfigurationIndexId(), - indexOK, NULL, scanKeyCount, scanKey); + SysScanDesc scanDescriptor = systable_beginscan(pgDistColocation, + DistColocationConfigurationIndexId(), + indexOK, NULL, scanKeyCount, scanKey); - colocationTuple = systable_getnext(scanDescriptor); + HeapTuple colocationTuple = systable_getnext(scanDescriptor); if (HeapTupleIsValid(colocationTuple)) { Form_pg_dist_colocation colocationForm = @@ -487,9 +468,6 @@ uint32 CreateColocationGroup(int shardCount, int replicationFactor, Oid distributionColumnType) { uint32 colocationId = GetNextColocationId(); - Relation pgDistColocation = NULL; - TupleDesc tupleDescriptor = NULL; - HeapTuple heapTuple = NULL; Datum values[Natts_pg_dist_colocation]; bool isNulls[Natts_pg_dist_colocation]; @@ -505,10 +483,10 @@ CreateColocationGroup(int shardCount, int replicationFactor, Oid distributionCol ObjectIdGetDatum(distributionColumnType); /* open colocation relation and insert the new tuple */ - pgDistColocation = heap_open(DistColocationRelationId(), RowExclusiveLock); + Relation pgDistColocation = heap_open(DistColocationRelationId(), RowExclusiveLock); - tupleDescriptor = RelationGetDescr(pgDistColocation); - heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls); + TupleDesc tupleDescriptor = RelationGetDescr(pgDistColocation); + HeapTuple heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls); CatalogTupleInsert(pgDistColocation, heapTuple); @@ -538,18 +516,16 @@ GetNextColocationId() Datum sequenceIdDatum = ObjectIdGetDatum(sequenceId); Oid savedUserId = InvalidOid; int savedSecurityContext = 0; - Datum colocationIdDatum = 0; - uint32 colocationId = INVALID_COLOCATION_ID; GetUserIdAndSecContext(&savedUserId, &savedSecurityContext); SetUserIdAndSecContext(CitusExtensionOwner(), SECURITY_LOCAL_USERID_CHANGE); /* generate new and unique colocation id from sequence */ - colocationIdDatum = DirectFunctionCall1(nextval_oid, sequenceIdDatum); + Datum colocationIdDatum = DirectFunctionCall1(nextval_oid, sequenceIdDatum); SetUserIdAndSecContext(savedUserId, savedSecurityContext); - colocationId = DatumGetUInt32(colocationIdDatum); + uint32 colocationId = DatumGetUInt32(colocationIdDatum); return colocationId; } @@ -562,16 +538,11 @@ GetNextColocationId() void CheckReplicationModel(Oid sourceRelationId, Oid targetRelationId) { - DistTableCacheEntry *sourceTableEntry = NULL; - DistTableCacheEntry *targetTableEntry = NULL; - char sourceReplicationModel = 0; - char targetReplicationModel = 0; + DistTableCacheEntry *sourceTableEntry = DistributedTableCacheEntry(sourceRelationId); + char sourceReplicationModel = sourceTableEntry->replicationModel; - sourceTableEntry = DistributedTableCacheEntry(sourceRelationId); - sourceReplicationModel = sourceTableEntry->replicationModel; - - targetTableEntry = DistributedTableCacheEntry(targetRelationId); - targetReplicationModel = targetTableEntry->replicationModel; + DistTableCacheEntry *targetTableEntry = DistributedTableCacheEntry(targetRelationId); + char targetReplicationModel = targetTableEntry->replicationModel; if (sourceReplicationModel != targetReplicationModel) { @@ -593,13 +564,11 @@ CheckReplicationModel(Oid sourceRelationId, Oid targetRelationId) void CheckDistributionColumnType(Oid sourceRelationId, Oid targetRelationId) { - Var *sourceDistributionColumn = NULL; - Var *targetDistributionColumn = NULL; Oid sourceDistributionColumnType = InvalidOid; Oid targetDistributionColumnType = InvalidOid; /* reference tables have NULL distribution column */ - sourceDistributionColumn = DistPartitionKey(sourceRelationId); + Var *sourceDistributionColumn = DistPartitionKey(sourceRelationId); if (sourceDistributionColumn == NULL) { sourceDistributionColumnType = InvalidOid; @@ -610,7 +579,7 @@ CheckDistributionColumnType(Oid sourceRelationId, Oid targetRelationId) } /* reference tables have NULL distribution column */ - targetDistributionColumn = DistPartitionKey(targetRelationId); + Var *targetDistributionColumn = DistPartitionKey(targetRelationId); if (targetDistributionColumn == NULL) { targetDistributionColumnType = InvalidOid; @@ -641,11 +610,6 @@ CheckDistributionColumnType(Oid sourceRelationId, Oid targetRelationId) static void UpdateRelationColocationGroup(Oid distributedRelationId, uint32 colocationId) { - Relation pgDistPartition = NULL; - HeapTuple heapTuple = NULL; - TupleDesc tupleDescriptor = NULL; - SysScanDesc scanDescriptor = NULL; - bool shouldSyncMetadata = false; bool indexOK = true; int scanKeyCount = 1; ScanKeyData scanKey[1]; @@ -653,17 +617,18 @@ UpdateRelationColocationGroup(Oid distributedRelationId, uint32 colocationId) bool isNull[Natts_pg_dist_partition]; bool replace[Natts_pg_dist_partition]; - pgDistPartition = heap_open(DistPartitionRelationId(), RowExclusiveLock); - tupleDescriptor = RelationGetDescr(pgDistPartition); + Relation pgDistPartition = heap_open(DistPartitionRelationId(), RowExclusiveLock); + TupleDesc tupleDescriptor = RelationGetDescr(pgDistPartition); ScanKeyInit(&scanKey[0], Anum_pg_dist_partition_logicalrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(distributedRelationId)); - scanDescriptor = systable_beginscan(pgDistPartition, - DistPartitionLogicalRelidIndexId(), indexOK, - NULL, scanKeyCount, scanKey); + SysScanDesc scanDescriptor = systable_beginscan(pgDistPartition, + DistPartitionLogicalRelidIndexId(), + indexOK, + NULL, scanKeyCount, scanKey); - heapTuple = systable_getnext(scanDescriptor); + HeapTuple heapTuple = systable_getnext(scanDescriptor); if (!HeapTupleIsValid(heapTuple)) { char *distributedRelationName = get_rel_name(distributedRelationId); @@ -691,7 +656,7 @@ UpdateRelationColocationGroup(Oid distributedRelationId, uint32 colocationId) systable_endscan(scanDescriptor); heap_close(pgDistPartition, NoLock); - shouldSyncMetadata = ShouldSyncTableMetadata(distributedRelationId); + bool shouldSyncMetadata = ShouldSyncTableMetadata(distributedRelationId); if (shouldSyncMetadata) { char *updateColocationIdCommand = ColocationIdUpdateCommand(distributedRelationId, @@ -724,16 +689,13 @@ TableColocationId(Oid distributedTableId) bool TablesColocated(Oid leftDistributedTableId, Oid rightDistributedTableId) { - uint32 leftColocationId = INVALID_COLOCATION_ID; - uint32 rightColocationId = INVALID_COLOCATION_ID; - if (leftDistributedTableId == rightDistributedTableId) { return true; } - leftColocationId = TableColocationId(leftDistributedTableId); - rightColocationId = TableColocationId(rightDistributedTableId); + uint32 leftColocationId = TableColocationId(leftDistributedTableId); + uint32 rightColocationId = TableColocationId(rightDistributedTableId); if (leftColocationId == INVALID_COLOCATION_ID || rightColocationId == INVALID_COLOCATION_ID) { @@ -805,10 +767,6 @@ static List * ColocationGroupTableList(Oid colocationId) { List *colocatedTableList = NIL; - Relation pgDistPartition = NULL; - TupleDesc tupleDescriptor = NULL; - SysScanDesc scanDescriptor = NULL; - HeapTuple heapTuple = NULL; bool indexOK = true; int scanKeyCount = 1; ScanKeyData scanKey[1]; @@ -825,13 +783,13 @@ ColocationGroupTableList(Oid colocationId) ScanKeyInit(&scanKey[0], Anum_pg_dist_partition_colocationid, BTEqualStrategyNumber, F_INT4EQ, ObjectIdGetDatum(colocationId)); - pgDistPartition = heap_open(DistPartitionRelationId(), AccessShareLock); - tupleDescriptor = RelationGetDescr(pgDistPartition); - scanDescriptor = systable_beginscan(pgDistPartition, - DistPartitionColocationidIndexId(), - indexOK, NULL, scanKeyCount, scanKey); + Relation pgDistPartition = heap_open(DistPartitionRelationId(), AccessShareLock); + TupleDesc tupleDescriptor = RelationGetDescr(pgDistPartition); + SysScanDesc scanDescriptor = systable_beginscan(pgDistPartition, + DistPartitionColocationidIndexId(), + indexOK, NULL, scanKeyCount, scanKey); - heapTuple = systable_getnext(scanDescriptor); + HeapTuple heapTuple = systable_getnext(scanDescriptor); while (HeapTupleIsValid(heapTuple)) { bool isNull = false; @@ -861,8 +819,6 @@ ColocatedShardIntervalList(ShardInterval *shardInterval) { Oid distributedTableId = shardInterval->relationId; List *colocatedShardList = NIL; - int shardIntervalIndex = -1; - List *colocatedTableList = NIL; ListCell *colocatedTableCell = NULL; DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(distributedTableId); @@ -883,8 +839,8 @@ ColocatedShardIntervalList(ShardInterval *shardInterval) return colocatedShardList; } - shardIntervalIndex = ShardIndex(shardInterval); - colocatedTableList = ColocatedTableList(distributedTableId); + int shardIntervalIndex = ShardIndex(shardInterval); + List *colocatedTableList = ColocatedTableList(distributedTableId); /* ShardIndex have to find index of given shard */ Assert(shardIntervalIndex >= 0); @@ -894,8 +850,6 @@ ColocatedShardIntervalList(ShardInterval *shardInterval) Oid colocatedTableId = lfirst_oid(colocatedTableCell); DistTableCacheEntry *colocatedTableCacheEntry = DistributedTableCacheEntry(colocatedTableId); - ShardInterval *colocatedShardInterval = NULL; - ShardInterval *copyShardInterval = NULL; /* * Since we iterate over co-located tables, shard count of each table should be @@ -904,10 +858,10 @@ ColocatedShardIntervalList(ShardInterval *shardInterval) Assert(cacheEntry->shardIntervalArrayLength == colocatedTableCacheEntry->shardIntervalArrayLength); - colocatedShardInterval = + ShardInterval *colocatedShardInterval = colocatedTableCacheEntry->sortedShardIntervalArray[shardIntervalIndex]; - copyShardInterval = CitusMakeNode(ShardInterval); + ShardInterval *copyShardInterval = CitusMakeNode(ShardInterval); CopyShardInterval(colocatedShardInterval, copyShardInterval); colocatedShardList = lappend(colocatedShardList, copyShardInterval); @@ -930,10 +884,6 @@ Oid ColocatedTableId(Oid colocationId) { Oid colocatedTableId = InvalidOid; - Relation pgDistPartition = NULL; - TupleDesc tupleDescriptor = NULL; - SysScanDesc scanDescriptor = NULL; - HeapTuple heapTuple = NULL; bool indexOK = true; bool isNull = false; ScanKeyData scanKey[1]; @@ -951,17 +901,15 @@ ColocatedTableId(Oid colocationId) ScanKeyInit(&scanKey[0], Anum_pg_dist_partition_colocationid, BTEqualStrategyNumber, F_INT4EQ, ObjectIdGetDatum(colocationId)); - pgDistPartition = heap_open(DistPartitionRelationId(), AccessShareLock); - tupleDescriptor = RelationGetDescr(pgDistPartition); - scanDescriptor = systable_beginscan(pgDistPartition, - DistPartitionColocationidIndexId(), - indexOK, NULL, scanKeyCount, scanKey); + Relation pgDistPartition = heap_open(DistPartitionRelationId(), AccessShareLock); + TupleDesc tupleDescriptor = RelationGetDescr(pgDistPartition); + SysScanDesc scanDescriptor = systable_beginscan(pgDistPartition, + DistPartitionColocationidIndexId(), + indexOK, NULL, scanKeyCount, scanKey); - heapTuple = systable_getnext(scanDescriptor); + HeapTuple heapTuple = systable_getnext(scanDescriptor); while (HeapTupleIsValid(heapTuple)) { - Relation colocatedRelation = NULL; - colocatedTableId = heap_getattr(heapTuple, Anum_pg_dist_partition_logicalrelid, tupleDescriptor, &isNull); @@ -975,7 +923,7 @@ ColocatedTableId(Oid colocationId) * The relation might have been dropped just before we locked it. * Let's look it up. */ - colocatedRelation = RelationIdGetRelation(colocatedTableId); + Relation colocatedRelation = RelationIdGetRelation(colocatedTableId); if (RelationIsValid(colocatedRelation)) { /* relation still exists, we can use it */ @@ -1037,23 +985,20 @@ DeleteColocationGroupIfNoTablesBelong(uint32 colocationId) static void DeleteColocationGroup(uint32 colocationId) { - Relation pgDistColocation = NULL; - SysScanDesc scanDescriptor = NULL; int scanKeyCount = 1; ScanKeyData scanKey[1]; bool indexOK = false; - HeapTuple heapTuple = NULL; - pgDistColocation = heap_open(DistColocationRelationId(), RowExclusiveLock); + Relation pgDistColocation = heap_open(DistColocationRelationId(), RowExclusiveLock); ScanKeyInit(&scanKey[0], Anum_pg_dist_colocation_colocationid, BTEqualStrategyNumber, F_INT4EQ, UInt32GetDatum(colocationId)); - scanDescriptor = systable_beginscan(pgDistColocation, InvalidOid, indexOK, - NULL, scanKeyCount, scanKey); + SysScanDesc scanDescriptor = systable_beginscan(pgDistColocation, InvalidOid, indexOK, + NULL, scanKeyCount, scanKey); /* if a record is found, delete it */ - heapTuple = systable_getnext(scanDescriptor); + HeapTuple heapTuple = systable_getnext(scanDescriptor); if (HeapTupleIsValid(heapTuple)) { /* diff --git a/src/backend/distributed/utils/distribution_column.c b/src/backend/distributed/utils/distribution_column.c index b9b504c59..48a4ba418 100644 --- a/src/backend/distributed/utils/distribution_column.c +++ b/src/backend/distributed/utils/distribution_column.c @@ -51,19 +51,15 @@ column_name_to_column(PG_FUNCTION_ARGS) { Oid relationId = PG_GETARG_OID(0); text *columnText = PG_GETARG_TEXT_P(1); - Relation relation = NULL; char *columnName = text_to_cstring(columnText); - Var *column = NULL; - char *columnNodeString = NULL; - text *columnNodeText = NULL; CheckCitusVersion(ERROR); - relation = relation_open(relationId, AccessShareLock); + Relation relation = relation_open(relationId, AccessShareLock); - column = BuildDistributionKeyFromColumnName(relation, columnName); - columnNodeString = nodeToString(column); - columnNodeText = cstring_to_text(columnNodeString); + Var *column = BuildDistributionKeyFromColumnName(relation, columnName); + char *columnNodeString = nodeToString(column); + text *columnNodeText = cstring_to_text(columnNodeString); relation_close(relation, AccessShareLock); @@ -82,12 +78,10 @@ column_name_to_column_id(PG_FUNCTION_ARGS) { Oid distributedTableId = PG_GETARG_OID(0); char *columnName = PG_GETARG_CSTRING(1); - Relation relation = NULL; - Var *column = NULL; - relation = relation_open(distributedTableId, AccessExclusiveLock); + Relation relation = relation_open(distributedTableId, AccessExclusiveLock); - column = BuildDistributionKeyFromColumnName(relation, columnName); + Var *column = BuildDistributionKeyFromColumnName(relation, columnName); relation_close(relation, NoLock); @@ -108,14 +102,12 @@ column_to_column_name(PG_FUNCTION_ARGS) text *columnNodeText = PG_GETARG_TEXT_P(1); char *columnNodeString = text_to_cstring(columnNodeText); - char *columnName = NULL; - text *columnText = NULL; CheckCitusVersion(ERROR); - columnName = ColumnNameToColumn(relationId, columnNodeString); + char *columnName = ColumnNameToColumn(relationId, columnNodeString); - columnText = cstring_to_text(columnName); + text *columnText = cstring_to_text(columnName); PG_RETURN_TEXT_P(columnText); } @@ -133,9 +125,6 @@ column_to_column_name(PG_FUNCTION_ARGS) Var * BuildDistributionKeyFromColumnName(Relation distributedRelation, char *columnName) { - HeapTuple columnTuple = NULL; - Form_pg_attribute columnForm = NULL; - Var *distributionColumn = NULL; char *tableName = RelationGetRelationName(distributedRelation); /* short circuit for reference tables */ @@ -148,8 +137,8 @@ BuildDistributionKeyFromColumnName(Relation distributedRelation, char *columnNam truncate_identifier(columnName, strlen(columnName), true); /* lookup column definition */ - columnTuple = SearchSysCacheAttName(RelationGetRelid(distributedRelation), - columnName); + HeapTuple columnTuple = SearchSysCacheAttName(RelationGetRelid(distributedRelation), + columnName); if (!HeapTupleIsValid(columnTuple)) { ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), @@ -157,7 +146,7 @@ BuildDistributionKeyFromColumnName(Relation distributedRelation, char *columnNam columnName, tableName))); } - columnForm = (Form_pg_attribute) GETSTRUCT(columnTuple); + Form_pg_attribute columnForm = (Form_pg_attribute) GETSTRUCT(columnTuple); /* check if the column may be referenced in the distribution key */ if (columnForm->attnum <= 0) @@ -168,8 +157,8 @@ BuildDistributionKeyFromColumnName(Relation distributedRelation, char *columnNam } /* build Var referencing only the chosen distribution column */ - distributionColumn = makeVar(1, columnForm->attnum, columnForm->atttypid, - columnForm->atttypmod, columnForm->attcollation, 0); + Var *distributionColumn = makeVar(1, columnForm->attnum, columnForm->atttypid, + columnForm->atttypmod, columnForm->attcollation, 0); ReleaseSysCache(columnTuple); @@ -186,17 +175,12 @@ BuildDistributionKeyFromColumnName(Relation distributedRelation, char *columnNam char * ColumnNameToColumn(Oid relationId, char *columnNodeString) { - Node *columnNode = NULL; - Var *column = NULL; - AttrNumber columnNumber = InvalidAttrNumber; - char *columnName = NULL; - - columnNode = stringToNode(columnNodeString); + Node *columnNode = stringToNode(columnNodeString); Assert(IsA(columnNode, Var)); - column = (Var *) columnNode; + Var *column = (Var *) columnNode; - columnNumber = column->varattno; + AttrNumber columnNumber = column->varattno; if (!AttrNumberIsForUserDefinedAttr(columnNumber)) { char *relationName = get_rel_name(relationId); @@ -206,7 +190,7 @@ ColumnNameToColumn(Oid relationId, char *columnNodeString) columnNumber, relationName))); } - columnName = get_attname(relationId, column->varattno, false); + char *columnName = get_attname(relationId, column->varattno, false); if (columnName == NULL) { char *relationName = get_rel_name(relationId); diff --git a/src/backend/distributed/utils/enable_ssl.c b/src/backend/distributed/utils/enable_ssl.c index 0b239b432..feff78a5c 100644 --- a/src/backend/distributed/utils/enable_ssl.c +++ b/src/backend/distributed/utils/enable_ssl.c @@ -75,13 +75,11 @@ citus_setup_ssl(PG_FUNCTION_ARGS) #else /* USE_SSL */ if (!EnableSSL && ShouldUseAutoSSL()) { - Node *enableSSLParseTree = NULL; - ereport(LOG, (errmsg("citus extension created on postgres without ssl enabled, " "turning it on during creation of the extension"))); /* execute the alter system statement to enable ssl on within postgres */ - enableSSLParseTree = ParseTreeNode(ENABLE_SSL_QUERY); + Node *enableSSLParseTree = ParseTreeNode(ENABLE_SSL_QUERY); AlterSystemSetConfigFile((AlterSystemStmt *) enableSSLParseTree); if (strcmp(SSLCipherSuites, POSTGRES_DEFAULT_SSL_CIPHERS) == 0) @@ -141,14 +139,12 @@ citus_check_defaults_for_sslmode(PG_FUNCTION_ARGS) */ if (strcmp(NodeConninfo, "sslmode=require") == 0) { - Node *resetCitusNodeConnInfoParseTree = NULL; - /* execute the alter system statement to reset node_conninfo to the old default */ ereport(LOG, (errmsg("reset citus.node_conninfo to old default value as the new " "value is incompatible with the current ssl setting"))); - resetCitusNodeConnInfoParseTree = ParseTreeNode(RESET_CITUS_NODE_CONNINFO); + Node *resetCitusNodeConnInfoParseTree = ParseTreeNode(RESET_CITUS_NODE_CONNINFO); AlterSystemSetConfigFile((AlterSystemStmt *) resetCitusNodeConnInfoParseTree); configChanged = true; } @@ -289,13 +285,8 @@ CreateCertificatesWhenNeeded() static EVP_PKEY * GeneratePrivateKey() { - int success = 0; - EVP_PKEY *privateKey = NULL; - BIGNUM *exponent = NULL; - RSA *rsa = NULL; - /* Allocate memory for the EVP_PKEY structure. */ - privateKey = EVP_PKEY_new(); + EVP_PKEY *privateKey = EVP_PKEY_new(); if (!privateKey) { ereport(ERROR, (errmsg("unable to allocate space for private key"))); @@ -303,17 +294,17 @@ GeneratePrivateKey() EnsureReleaseResource((MemoryContextCallbackFunction) (&EVP_PKEY_free), privateKey); - exponent = BN_new(); + BIGNUM *exponent = BN_new(); EnsureReleaseResource((MemoryContextCallbackFunction) (&BN_free), exponent); /* load the exponent to use for the generation of the key */ - success = BN_set_word(exponent, RSA_F4); + int success = BN_set_word(exponent, RSA_F4); if (success != 1) { ereport(ERROR, (errmsg("unable to prepare exponent for RSA algorithm"))); } - rsa = RSA_new(); + RSA *rsa = RSA_new(); success = RSA_generate_key_ex(rsa, 2048, exponent, NULL); if (success != 1) { @@ -338,10 +329,7 @@ GeneratePrivateKey() static X509 * CreateCertificate(EVP_PKEY *privateKey) { - X509 *certificate = NULL; - X509_NAME *subjectName = NULL; - - certificate = X509_new(); + X509 *certificate = X509_new(); if (!certificate) { ereport(ERROR, (errmsg("unable to allocate space for the x509 certificate"))); @@ -374,7 +362,7 @@ CreateCertificate(EVP_PKEY *privateKey) X509_set_pubkey(certificate, privateKey); /* Set the common name for the certificate */ - subjectName = X509_get_subject_name(certificate); + X509_NAME *subjectName = X509_get_subject_name(certificate); X509_NAME_add_entry_by_txt(subjectName, X509_SUBJECT_COMMON_NAME, MBSTRING_ASC, (unsigned char *) CITUS_AUTO_SSL_COMMON_NAME, -1, -1, 0); @@ -402,19 +390,17 @@ StoreCertificate(EVP_PKEY *privateKey, X509 *certificate) const char *privateKeyFilename = ssl_key_file; const char *certificateFilename = ssl_cert_file; - FILE *privateKeyFile = NULL; - FILE *certificateFile = NULL; - int success = 0; /* Open the private key file and write the private key in PEM format to it */ - privateKeyFile = fopen(privateKeyFilename, "wb"); + FILE *privateKeyFile = fopen(privateKeyFilename, "wb"); if (!privateKeyFile) { ereport(ERROR, (errmsg("unable to open private key file '%s' for writing", privateKeyFilename))); } - success = PEM_write_PrivateKey(privateKeyFile, privateKey, NULL, NULL, 0, NULL, NULL); + int success = PEM_write_PrivateKey(privateKeyFile, privateKey, NULL, NULL, 0, NULL, + NULL); fclose(privateKeyFile); if (!success) { @@ -422,7 +408,7 @@ StoreCertificate(EVP_PKEY *privateKey, X509 *certificate) } /* Open the certificate file and write the certificate in the PEM format to it */ - certificateFile = fopen(certificateFilename, "wb"); + FILE *certificateFile = fopen(certificateFilename, "wb"); if (!certificateFile) { ereport(ERROR, (errmsg("unable to open certificate file '%s' for writing", diff --git a/src/backend/distributed/utils/foreign_key_relationship.c b/src/backend/distributed/utils/foreign_key_relationship.c index 3572f9b11..12bf25d89 100644 --- a/src/backend/distributed/utils/foreign_key_relationship.c +++ b/src/backend/distributed/utils/foreign_key_relationship.c @@ -128,13 +128,13 @@ GetForeignConstraintRelationshipHelper(Oid relationId, bool isReferencing) List *foreignNodeList = NIL; ListCell *nodeCell = NULL; bool isFound = false; - ForeignConstraintRelationshipNode *relationNode = NULL; CreateForeignConstraintRelationshipGraph(); - relationNode = (ForeignConstraintRelationshipNode *) hash_search( - fConstraintRelationshipGraph->nodeMap, &relationId, - HASH_FIND, &isFound); + ForeignConstraintRelationshipNode *relationNode = + (ForeignConstraintRelationshipNode *) hash_search( + fConstraintRelationshipGraph->nodeMap, &relationId, + HASH_FIND, &isFound); if (!isFound) { @@ -175,10 +175,7 @@ GetForeignConstraintRelationshipHelper(Oid relationId, bool isReferencing) static void CreateForeignConstraintRelationshipGraph() { - MemoryContext oldContext; - MemoryContext fConstraintRelationshipMemoryContext = NULL; HASHCTL info; - uint32 hashFlags = 0; /* if we have already created the graph, use it */ if (IsForeignConstraintRelationshipGraphValid()) @@ -188,14 +185,15 @@ CreateForeignConstraintRelationshipGraph() ClearForeignConstraintRelationshipGraphContext(); - fConstraintRelationshipMemoryContext = AllocSetContextCreateExtended( + MemoryContext fConstraintRelationshipMemoryContext = AllocSetContextCreateExtended( CacheMemoryContext, "Forign Constraint Relationship Graph Context", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); - oldContext = MemoryContextSwitchTo(fConstraintRelationshipMemoryContext); + MemoryContext oldContext = MemoryContextSwitchTo( + fConstraintRelationshipMemoryContext); fConstraintRelationshipGraph = (ForeignConstraintRelationshipGraph *) palloc( sizeof(ForeignConstraintRelationshipGraph)); @@ -207,7 +205,7 @@ CreateForeignConstraintRelationshipGraph() info.entrysize = sizeof(ForeignConstraintRelationshipNode); info.hash = oid_hash; info.hcxt = CurrentMemoryContext; - hashFlags = (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); + uint32 hashFlags = (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); fConstraintRelationshipGraph->nodeMap = hash_create( "foreign key relationship map (oid)", @@ -293,9 +291,7 @@ GetConnectedListHelper(ForeignConstraintRelationshipNode *node, List **adjacentN static void PopulateAdjacencyLists(void) { - SysScanDesc scanDescriptor; HeapTuple tuple; - Relation pgConstraint; ScanKeyData scanKey[1]; int scanKeyCount = 1; @@ -304,19 +300,18 @@ PopulateAdjacencyLists(void) List *frelEdgeList = NIL; ListCell *frelEdgeCell = NULL; - pgConstraint = heap_open(ConstraintRelationId, AccessShareLock); + Relation pgConstraint = heap_open(ConstraintRelationId, AccessShareLock); ScanKeyInit(&scanKey[0], Anum_pg_constraint_contype, BTEqualStrategyNumber, F_CHAREQ, CharGetDatum(CONSTRAINT_FOREIGN)); - scanDescriptor = systable_beginscan(pgConstraint, InvalidOid, false, - NULL, scanKeyCount, scanKey); + SysScanDesc scanDescriptor = systable_beginscan(pgConstraint, InvalidOid, false, + NULL, scanKeyCount, scanKey); while (HeapTupleIsValid(tuple = systable_getnext(scanDescriptor))) { Form_pg_constraint constraintForm = (Form_pg_constraint) GETSTRUCT(tuple); - ForeignConstraintRelationshipEdge *currentFConstraintRelationshipEdge = NULL; - currentFConstraintRelationshipEdge = palloc( + ForeignConstraintRelationshipEdge *currentFConstraintRelationshipEdge = palloc( sizeof(ForeignConstraintRelationshipEdge)); currentFConstraintRelationshipEdge->referencingRelationOID = constraintForm->conrelid; diff --git a/src/backend/distributed/utils/function_utils.c b/src/backend/distributed/utils/function_utils.c index c0847cd29..07c85b796 100644 --- a/src/backend/distributed/utils/function_utils.c +++ b/src/backend/distributed/utils/function_utils.c @@ -39,18 +39,16 @@ Oid FunctionOidExtended(const char *schemaName, const char *functionName, int argumentCount, bool missingOK) { - FuncCandidateList functionList = NULL; - Oid functionOid = InvalidOid; - char *qualifiedFunctionName = quote_qualified_identifier(schemaName, functionName); List *qualifiedFunctionNameList = stringToQualifiedNameList(qualifiedFunctionName); List *argumentList = NIL; const bool findVariadics = false; const bool findDefaults = false; - functionList = FuncnameGetCandidates(qualifiedFunctionNameList, argumentCount, - argumentList, findVariadics, - findDefaults, true); + FuncCandidateList functionList = FuncnameGetCandidates(qualifiedFunctionNameList, + argumentCount, + argumentList, findVariadics, + findDefaults, true); if (functionList == NULL) { @@ -69,7 +67,7 @@ FunctionOidExtended(const char *schemaName, const char *functionName, int argume } /* get function oid from function list's head */ - functionOid = functionList->oid; + Oid functionOid = functionList->oid; return functionOid; } diff --git a/src/backend/distributed/utils/listutils.c b/src/backend/distributed/utils/listutils.c index 2d027c8eb..ba1bf9fe8 100644 --- a/src/backend/distributed/utils/listutils.c +++ b/src/backend/distributed/utils/listutils.c @@ -97,14 +97,13 @@ PointerArrayFromList(List *pointerList) ArrayType * DatumArrayToArrayType(Datum *datumArray, int datumCount, Oid datumTypeId) { - ArrayType *arrayObject = NULL; int16 typeLength = 0; bool typeByValue = false; char typeAlignment = 0; get_typlenbyvalalign(datumTypeId, &typeLength, &typeByValue, &typeAlignment); - arrayObject = construct_array(datumArray, datumCount, datumTypeId, - typeLength, typeByValue, typeAlignment); + ArrayType *arrayObject = construct_array(datumArray, datumCount, datumTypeId, + typeLength, typeByValue, typeAlignment); return arrayObject; } @@ -125,7 +124,6 @@ HTAB * ListToHashSet(List *itemList, Size keySize, bool isStringList) { HASHCTL info; - HTAB *itemSet = NULL; ListCell *itemCell = NULL; int flags = HASH_ELEM | HASH_CONTEXT; @@ -143,7 +141,7 @@ ListToHashSet(List *itemList, Size keySize, bool isStringList) flags |= HASH_BLOBS; } - itemSet = hash_create("ListToHashSet", capacity, &info, flags); + HTAB *itemSet = hash_create("ListToHashSet", capacity, &info, flags); foreach(itemCell, itemList) { diff --git a/src/backend/distributed/utils/maintenanced.c b/src/backend/distributed/utils/maintenanced.c index 2466a63cd..ddb9f9830 100644 --- a/src/backend/distributed/utils/maintenanced.c +++ b/src/backend/distributed/utils/maintenanced.c @@ -136,15 +136,17 @@ InitializeMaintenanceDaemon(void) void InitializeMaintenanceDaemonBackend(void) { - MaintenanceDaemonDBData *dbData = NULL; Oid extensionOwner = CitusExtensionOwner(); bool found; LWLockAcquire(&MaintenanceDaemonControl->lock, LW_EXCLUSIVE); - dbData = (MaintenanceDaemonDBData *) hash_search(MaintenanceDaemonDBHash, - &MyDatabaseId, - HASH_ENTER_NULL, &found); + MaintenanceDaemonDBData *dbData = (MaintenanceDaemonDBData *) hash_search( + MaintenanceDaemonDBHash, + & + MyDatabaseId, + HASH_ENTER_NULL, + &found); if (dbData == NULL) { @@ -228,7 +230,6 @@ void CitusMaintenanceDaemonMain(Datum main_arg) { Oid databaseOid = DatumGetObjectId(main_arg); - MaintenanceDaemonDBData *myDbData = NULL; TimestampTz nextStatsCollectionTime USED_WITH_LIBCURL_ONLY = TimestampTzPlusMilliseconds(GetCurrentTimestamp(), 60 * 1000); bool retryStatsCollection USED_WITH_LIBCURL_ONLY = false; @@ -241,8 +242,9 @@ CitusMaintenanceDaemonMain(Datum main_arg) */ LWLockAcquire(&MaintenanceDaemonControl->lock, LW_SHARED); - myDbData = (MaintenanceDaemonDBData *) - hash_search(MaintenanceDaemonDBHash, &databaseOid, HASH_FIND, NULL); + MaintenanceDaemonDBData *myDbData = (MaintenanceDaemonDBData *) + hash_search(MaintenanceDaemonDBHash, &databaseOid, + HASH_FIND, NULL); if (!myDbData) { /* @@ -371,7 +373,6 @@ CitusMaintenanceDaemonMain(Datum main_arg) GetCurrentTimestamp() >= nextMetadataSyncTime)) { bool metadataSyncFailed = false; - int64 nextTimeout = 0; InvalidateMetadataSystemCache(); StartTransactionCommand(); @@ -406,8 +407,8 @@ CitusMaintenanceDaemonMain(Datum main_arg) CommitTransactionCommand(); ProcessCompletedNotifies(); - nextTimeout = metadataSyncFailed ? MetadataSyncRetryInterval : - MetadataSyncInterval; + int64 nextTimeout = metadataSyncFailed ? MetadataSyncRetryInterval : + MetadataSyncInterval; nextMetadataSyncTime = TimestampTzPlusMilliseconds(GetCurrentTimestamp(), nextTimeout); timeout = Min(timeout, nextTimeout); @@ -561,7 +562,6 @@ static size_t MaintenanceDaemonShmemSize(void) { Size size = 0; - Size hashSize = 0; size = add_size(size, sizeof(MaintenanceDaemonControlData)); @@ -570,7 +570,8 @@ MaintenanceDaemonShmemSize(void) * worker process. We couldn't start more anyway, so there's little point * in allocating more. */ - hashSize = hash_estimate_size(max_worker_processes, sizeof(MaintenanceDaemonDBData)); + Size hashSize = hash_estimate_size(max_worker_processes, + sizeof(MaintenanceDaemonDBData)); size = add_size(size, hashSize); return size; @@ -586,7 +587,6 @@ MaintenanceDaemonShmemInit(void) { bool alreadyInitialized = false; HASHCTL hashInfo; - int hashFlags = 0; LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE); @@ -615,7 +615,7 @@ MaintenanceDaemonShmemInit(void) hashInfo.keysize = sizeof(Oid); hashInfo.entrysize = sizeof(MaintenanceDaemonDBData); hashInfo.hash = tag_hash; - hashFlags = (HASH_ELEM | HASH_FUNCTION); + int hashFlags = (HASH_ELEM | HASH_FUNCTION); MaintenanceDaemonDBHash = ShmemInitHash("Maintenance Database Hash", max_worker_processes, max_worker_processes, @@ -669,8 +669,6 @@ MaintenanceDaemonErrorContext(void *arg) static bool LockCitusExtension(void) { - Oid recheckExtensionOid = InvalidOid; - Oid extensionOid = get_extension_oid("citus", true); if (extensionOid == InvalidOid) { @@ -684,7 +682,7 @@ LockCitusExtension(void) * The extension may have been dropped and possibly recreated prior to * obtaining a lock. Check whether we still get the expected OID. */ - recheckExtensionOid = get_extension_oid("citus", true); + Oid recheckExtensionOid = get_extension_oid("citus", true); if (recheckExtensionOid != extensionOid) { return false; @@ -703,13 +701,14 @@ void StopMaintenanceDaemon(Oid databaseId) { bool found = false; - MaintenanceDaemonDBData *dbData = NULL; pid_t workerPid = 0; LWLockAcquire(&MaintenanceDaemonControl->lock, LW_EXCLUSIVE); - dbData = (MaintenanceDaemonDBData *) hash_search(MaintenanceDaemonDBHash, - &databaseId, HASH_REMOVE, &found); + MaintenanceDaemonDBData *dbData = (MaintenanceDaemonDBData *) hash_search( + MaintenanceDaemonDBHash, + &databaseId, + HASH_REMOVE, &found); if (found) { workerPid = dbData->workerPid; @@ -732,12 +731,13 @@ void TriggerMetadataSync(Oid databaseId) { bool found = false; - MaintenanceDaemonDBData *dbData = NULL; LWLockAcquire(&MaintenanceDaemonControl->lock, LW_EXCLUSIVE); - dbData = (MaintenanceDaemonDBData *) hash_search(MaintenanceDaemonDBHash, - &databaseId, HASH_FIND, &found); + MaintenanceDaemonDBData *dbData = (MaintenanceDaemonDBData *) hash_search( + MaintenanceDaemonDBHash, + &databaseId, + HASH_FIND, &found); if (found) { dbData->triggerMetadataSync = true; @@ -757,11 +757,9 @@ TriggerMetadataSync(Oid databaseId) static bool MetadataSyncTriggeredCheckAndReset(MaintenanceDaemonDBData *dbData) { - bool metadataSyncTriggered = false; - LWLockAcquire(&MaintenanceDaemonControl->lock, LW_EXCLUSIVE); - metadataSyncTriggered = dbData->triggerMetadataSync; + bool metadataSyncTriggered = dbData->triggerMetadataSync; dbData->triggerMetadataSync = false; LWLockRelease(&MaintenanceDaemonControl->lock); diff --git a/src/backend/distributed/utils/multi_partitioning_utils.c b/src/backend/distributed/utils/multi_partitioning_utils.c index e8019f80f..f62c6bcfc 100644 --- a/src/backend/distributed/utils/multi_partitioning_utils.c +++ b/src/backend/distributed/utils/multi_partitioning_utils.c @@ -99,7 +99,6 @@ bool PartitionTable(Oid relationId) { Relation rel = try_relation_open(relationId, AccessShareLock); - bool partitionTable = false; /* don't error out for tables that are dropped */ if (rel == NULL) @@ -107,7 +106,7 @@ PartitionTable(Oid relationId) return false; } - partitionTable = rel->rd_rel->relispartition; + bool partitionTable = rel->rd_rel->relispartition; /* keep the lock */ heap_close(rel, NoLock); @@ -125,7 +124,6 @@ bool PartitionTableNoLock(Oid relationId) { Relation rel = try_relation_open_nolock(relationId); - bool partitionTable = false; /* don't error out for tables that are dropped */ if (rel == NULL) @@ -133,7 +131,7 @@ PartitionTableNoLock(Oid relationId) return false; } - partitionTable = rel->rd_rel->relispartition; + bool partitionTable = rel->rd_rel->relispartition; /* keep the lock */ heap_close(rel, NoLock); @@ -157,13 +155,12 @@ PartitionTableNoLock(Oid relationId) static Relation try_relation_open_nolock(Oid relationId) { - Relation relation = NULL; if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relationId))) { return NULL; } - relation = RelationIdGetRelation(relationId); + Relation relation = RelationIdGetRelation(relationId); if (!RelationIsValid(relation)) { return NULL; @@ -183,20 +180,18 @@ try_relation_open_nolock(Oid relationId) bool IsChildTable(Oid relationId) { - Relation pgInherits = NULL; - SysScanDesc scan = NULL; ScanKeyData key[1]; HeapTuple inheritsTuple = NULL; bool tableInherits = false; - pgInherits = heap_open(InheritsRelationId, AccessShareLock); + Relation pgInherits = heap_open(InheritsRelationId, AccessShareLock); ScanKeyInit(&key[0], Anum_pg_inherits_inhrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relationId)); - scan = systable_beginscan(pgInherits, InvalidOid, false, - NULL, 1, key); + SysScanDesc scan = systable_beginscan(pgInherits, InvalidOid, false, + NULL, 1, key); while ((inheritsTuple = systable_getnext(scan)) != NULL) { @@ -230,19 +225,17 @@ IsChildTable(Oid relationId) bool IsParentTable(Oid relationId) { - Relation pgInherits = NULL; - SysScanDesc scan = NULL; ScanKeyData key[1]; bool tableInherited = false; - pgInherits = heap_open(InheritsRelationId, AccessShareLock); + Relation pgInherits = heap_open(InheritsRelationId, AccessShareLock); ScanKeyInit(&key[0], Anum_pg_inherits_inhparent, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relationId)); - scan = systable_beginscan(pgInherits, InheritsParentIndexId, true, - NULL, 1, key); + SysScanDesc scan = systable_beginscan(pgInherits, InheritsParentIndexId, true, + NULL, 1, key); if (systable_getnext(scan) != NULL) { @@ -270,9 +263,7 @@ IsParentTable(Oid relationId) Oid PartitionParentOid(Oid partitionOid) { - Oid partitionParentOid = InvalidOid; - - partitionParentOid = get_partition_parent(partitionOid); + Oid partitionParentOid = get_partition_parent(partitionOid); return partitionParentOid; } @@ -288,8 +279,6 @@ PartitionList(Oid parentRelationId) Relation rel = heap_open(parentRelationId, AccessShareLock); List *partitionList = NIL; - int partitionIndex = 0; - int partitionCount = 0; if (!PartitionedTable(parentRelationId)) { @@ -300,8 +289,8 @@ PartitionList(Oid parentRelationId) Assert(rel->rd_partdesc != NULL); - partitionCount = rel->rd_partdesc->nparts; - for (partitionIndex = 0; partitionIndex < partitionCount; ++partitionIndex) + int partitionCount = rel->rd_partdesc->nparts; + for (int partitionIndex = 0; partitionIndex < partitionCount; ++partitionIndex) { partitionList = lappend_oid(partitionList, rel->rd_partdesc->oids[partitionIndex]); @@ -322,9 +311,6 @@ char * GenerateDetachPartitionCommand(Oid partitionTableId) { StringInfo detachPartitionCommand = makeStringInfo(); - Oid parentId = InvalidOid; - char *tableQualifiedName = NULL; - char *parentTableQualifiedName = NULL; if (!PartitionTable(partitionTableId)) { @@ -333,9 +319,9 @@ GenerateDetachPartitionCommand(Oid partitionTableId) ereport(ERROR, (errmsg("\"%s\" is not a partition", relationName))); } - parentId = get_partition_parent(partitionTableId); - tableQualifiedName = generate_qualified_relation_name(partitionTableId); - parentTableQualifiedName = generate_qualified_relation_name(parentId); + Oid parentId = get_partition_parent(partitionTableId); + char *tableQualifiedName = generate_qualified_relation_name(partitionTableId); + char *parentTableQualifiedName = generate_qualified_relation_name(parentId); appendStringInfo(detachPartitionCommand, "ALTER TABLE IF EXISTS %s DETACH PARTITION %s;", @@ -353,7 +339,6 @@ char * GeneratePartitioningInformation(Oid parentTableId) { char *partitionBoundCString = ""; - Datum partitionBoundDatum = 0; if (!PartitionedTable(parentTableId)) { @@ -362,8 +347,8 @@ GeneratePartitioningInformation(Oid parentTableId) ereport(ERROR, (errmsg("\"%s\" is not a parent table", relationName))); } - partitionBoundDatum = DirectFunctionCall1(pg_get_partkeydef, - ObjectIdGetDatum(parentTableId)); + Datum partitionBoundDatum = DirectFunctionCall1(pg_get_partkeydef, + ObjectIdGetDatum(parentTableId)); partitionBoundCString = TextDatumGetCString(partitionBoundDatum); @@ -386,10 +371,6 @@ GenerateAttachShardPartitionCommand(ShardInterval *shardInterval) char *escapedCommand = quote_literal_cstr(command); int shardIndex = ShardIndex(shardInterval); - Oid parentSchemaId = InvalidOid; - char *parentSchemaName = NULL; - char *escapedParentSchemaName = NULL; - uint64 parentShardId = INVALID_SHARD_ID; StringInfo attachPartitionCommand = makeStringInfo(); @@ -401,10 +382,10 @@ GenerateAttachShardPartitionCommand(ShardInterval *shardInterval) errdetail("Referenced relation cannot be found."))); } - parentSchemaId = get_rel_namespace(parentRelationId); - parentSchemaName = get_namespace_name(parentSchemaId); - escapedParentSchemaName = quote_literal_cstr(parentSchemaName); - parentShardId = ColocatedShardIdInRelation(parentRelationId, shardIndex); + Oid parentSchemaId = get_rel_namespace(parentRelationId); + char *parentSchemaName = get_namespace_name(parentSchemaId); + char *escapedParentSchemaName = quote_literal_cstr(parentSchemaName); + uint64 parentShardId = ColocatedShardIdInRelation(parentRelationId, shardIndex); appendStringInfo(attachPartitionCommand, WORKER_APPLY_INTER_SHARD_DDL_COMMAND, parentShardId, @@ -423,11 +404,7 @@ char * GenerateAlterTableAttachPartitionCommand(Oid partitionTableId) { StringInfo createPartitionCommand = makeStringInfo(); - char *partitionBoundCString = NULL; - Oid parentId = InvalidOid; - char *tableQualifiedName = NULL; - char *parentTableQualifiedName = NULL; if (!PartitionTable(partitionTableId)) { @@ -436,11 +413,11 @@ GenerateAlterTableAttachPartitionCommand(Oid partitionTableId) ereport(ERROR, (errmsg("\"%s\" is not a partition", relationName))); } - parentId = get_partition_parent(partitionTableId); - tableQualifiedName = generate_qualified_relation_name(partitionTableId); - parentTableQualifiedName = generate_qualified_relation_name(parentId); + Oid parentId = get_partition_parent(partitionTableId); + char *tableQualifiedName = generate_qualified_relation_name(partitionTableId); + char *parentTableQualifiedName = generate_qualified_relation_name(parentId); - partitionBoundCString = PartitionBound(partitionTableId); + char *partitionBoundCString = PartitionBound(partitionTableId); appendStringInfo(createPartitionCommand, "ALTER TABLE %s ATTACH PARTITION %s %s;", parentTableQualifiedName, tableQualifiedName, @@ -460,13 +437,9 @@ GenerateAlterTableAttachPartitionCommand(Oid partitionTableId) static char * PartitionBound(Oid partitionId) { - char *partitionBoundString = NULL; - HeapTuple tuple = NULL; - Datum datum = 0; bool isnull = false; - Datum partitionBoundDatum = 0; - tuple = SearchSysCache1(RELOID, partitionId); + HeapTuple tuple = SearchSysCache1(RELOID, partitionId); if (!HeapTupleIsValid(tuple)) { elog(ERROR, "cache lookup failed for relation %u", partitionId); @@ -485,15 +458,15 @@ PartitionBound(Oid partitionId) return ""; } - datum = SysCacheGetAttr(RELOID, tuple, - Anum_pg_class_relpartbound, - &isnull); + Datum datum = SysCacheGetAttr(RELOID, tuple, + Anum_pg_class_relpartbound, + &isnull); Assert(!isnull); - partitionBoundDatum = + Datum partitionBoundDatum = DirectFunctionCall2(pg_get_expr, datum, ObjectIdGetDatum(partitionId)); - partitionBoundString = TextDatumGetCString(partitionBoundDatum); + char *partitionBoundString = TextDatumGetCString(partitionBoundDatum); ReleaseSysCache(tuple); diff --git a/src/backend/distributed/utils/multi_resowner.c b/src/backend/distributed/utils/multi_resowner.c index 67c9be0ab..86898ffba 100644 --- a/src/backend/distributed/utils/multi_resowner.c +++ b/src/backend/distributed/utils/multi_resowner.c @@ -113,10 +113,8 @@ ResourceOwnerEnlargeJobDirectories(ResourceOwner owner) void ResourceOwnerRememberJobDirectory(ResourceOwner owner, uint64 jobId) { - JobDirectoryEntry *entry = NULL; - Assert(NumRegisteredJobDirectories + 1 <= NumAllocatedJobDirectories); - entry = &RegisteredJobDirectories[NumRegisteredJobDirectories]; + JobDirectoryEntry *entry = &RegisteredJobDirectories[NumRegisteredJobDirectories]; entry->owner = owner; entry->jobId = jobId; NumRegisteredJobDirectories++; @@ -128,9 +126,8 @@ void ResourceOwnerForgetJobDirectory(ResourceOwner owner, uint64 jobId) { int lastJobIndex = NumRegisteredJobDirectories - 1; - int jobIndex = 0; - for (jobIndex = lastJobIndex; jobIndex >= 0; jobIndex--) + for (int jobIndex = lastJobIndex; jobIndex >= 0; jobIndex--) { JobDirectoryEntry *entry = &RegisteredJobDirectories[jobIndex]; diff --git a/src/backend/distributed/utils/reference_table_utils.c b/src/backend/distributed/utils/reference_table_utils.c index 423746fdc..48d134112 100644 --- a/src/backend/distributed/utils/reference_table_utils.c +++ b/src/backend/distributed/utils/reference_table_utils.c @@ -55,8 +55,6 @@ Datum upgrade_to_reference_table(PG_FUNCTION_ARGS) { Oid relationId = PG_GETARG_OID(0); - List *shardIntervalList = NIL; - DistTableCacheEntry *tableEntry = NULL; CheckCitusVersion(ERROR); EnsureCoordinator(); @@ -72,7 +70,7 @@ upgrade_to_reference_table(PG_FUNCTION_ARGS) "create_reference_table('%s');", relationName))); } - tableEntry = DistributedTableCacheEntry(relationId); + DistTableCacheEntry *tableEntry = DistributedTableCacheEntry(relationId); if (tableEntry->partitionMethod == DISTRIBUTE_BY_NONE) { @@ -95,7 +93,7 @@ upgrade_to_reference_table(PG_FUNCTION_ARGS) LockRelationOid(relationId, AccessExclusiveLock); - shardIntervalList = LoadShardIntervalList(relationId); + List *shardIntervalList = LoadShardIntervalList(relationId); if (list_length(shardIntervalList) != 1) { char *relationName = get_rel_name(relationId); @@ -237,11 +235,10 @@ ReplicateSingleShardTableToAllNodes(Oid relationId) static void ReplicateShardToAllNodes(ShardInterval *shardInterval) { - List *workerNodeList = NULL; ListCell *workerNodeCell = NULL; /* prevent concurrent pg_dist_node changes */ - workerNodeList = ReferenceTablePlacementNodeList(ShareLock); + List *workerNodeList = ReferenceTablePlacementNodeList(ShareLock); /* * We will iterate over all worker nodes and if a healthy placement does not exist @@ -376,7 +373,6 @@ ConvertToReferenceTableMetadata(Oid relationId, uint64 shardId) uint32 CreateReferenceTableColocationId() { - uint32 colocationId = INVALID_COLOCATION_ID; int shardCount = 1; Oid distributionColumnType = InvalidOid; @@ -387,7 +383,8 @@ CreateReferenceTableColocationId() int replicationFactor = -1; /* check for existing colocations */ - colocationId = ColocationId(shardCount, replicationFactor, distributionColumnType); + uint32 colocationId = ColocationId(shardCount, replicationFactor, + distributionColumnType); if (colocationId == INVALID_COLOCATION_ID) { colocationId = CreateColocationGroup(shardCount, replicationFactor, @@ -430,7 +427,6 @@ DeleteAllReferenceTablePlacementsFromNodeGroup(int32 groupId) foreach(referenceTableCell, referenceTableList) { - GroupShardPlacement *placement = NULL; StringInfo deletePlacementCommand = makeStringInfo(); Oid referenceTableId = lfirst_oid(referenceTableCell); @@ -442,7 +438,7 @@ DeleteAllReferenceTablePlacementsFromNodeGroup(int32 groupId) continue; } - placement = (GroupShardPlacement *) linitial(placements); + GroupShardPlacement *placement = (GroupShardPlacement *) linitial(placements); LockShardDistributionMetadata(placement->shardId, ExclusiveLock); @@ -474,10 +470,9 @@ ReferenceTableOidList() foreach(distTableOidCell, distTableOidList) { - DistTableCacheEntry *cacheEntry = NULL; Oid relationId = lfirst_oid(distTableOidCell); - cacheEntry = DistributedTableCacheEntry(relationId); + DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(relationId); if (cacheEntry->partitionMethod == DISTRIBUTE_BY_NONE) { diff --git a/src/backend/distributed/utils/resource_lock.c b/src/backend/distributed/utils/resource_lock.c index 6c4156bb0..79d8cf342 100644 --- a/src/backend/distributed/utils/resource_lock.c +++ b/src/backend/distributed/utils/resource_lock.c @@ -98,9 +98,6 @@ lock_shard_metadata(PG_FUNCTION_ARGS) { LOCKMODE lockMode = IntToLockMode(PG_GETARG_INT32(0)); ArrayType *shardIdArrayObject = PG_GETARG_ARRAYTYPE_P(1); - Datum *shardIdArrayDatum = NULL; - int shardIdCount = 0; - int shardIdIndex = 0; CheckCitusVersion(ERROR); @@ -112,10 +109,10 @@ lock_shard_metadata(PG_FUNCTION_ARGS) /* we don't want random users to block writes */ EnsureSuperUser(); - shardIdCount = ArrayObjectCount(shardIdArrayObject); - shardIdArrayDatum = DeconstructArrayObject(shardIdArrayObject); + int shardIdCount = ArrayObjectCount(shardIdArrayObject); + Datum *shardIdArrayDatum = DeconstructArrayObject(shardIdArrayObject); - for (shardIdIndex = 0; shardIdIndex < shardIdCount; shardIdIndex++) + for (int shardIdIndex = 0; shardIdIndex < shardIdCount; shardIdIndex++) { int64 shardId = DatumGetInt64(shardIdArrayDatum[shardIdIndex]); @@ -138,9 +135,6 @@ lock_shard_resources(PG_FUNCTION_ARGS) { LOCKMODE lockMode = IntToLockMode(PG_GETARG_INT32(0)); ArrayType *shardIdArrayObject = PG_GETARG_ARRAYTYPE_P(1); - Datum *shardIdArrayDatum = NULL; - int shardIdCount = 0; - int shardIdIndex = 0; CheckCitusVersion(ERROR); @@ -152,10 +146,10 @@ lock_shard_resources(PG_FUNCTION_ARGS) /* we don't want random users to block writes */ EnsureSuperUser(); - shardIdCount = ArrayObjectCount(shardIdArrayObject); - shardIdArrayDatum = DeconstructArrayObject(shardIdArrayObject); + int shardIdCount = ArrayObjectCount(shardIdArrayObject); + Datum *shardIdArrayDatum = DeconstructArrayObject(shardIdArrayObject); - for (shardIdIndex = 0; shardIdIndex < shardIdCount; shardIdIndex++) + for (int shardIdIndex = 0; shardIdIndex < shardIdCount; shardIdIndex++) { int64 shardId = DatumGetInt64(shardIdArrayDatum[shardIdIndex]); @@ -183,7 +177,6 @@ LockShardListResourcesOnFirstWorker(LOCKMODE lockmode, List *shardIntervalList) WorkerNode *firstWorkerNode = GetFirstPrimaryWorkerNode(); int connectionFlags = 0; const char *superuser = CitusExtensionOwnerName(); - MultiConnection *firstWorkerConnection = NULL; appendStringInfo(lockCommand, "SELECT lock_shard_resources(%d, ARRAY[", lockmode); @@ -210,10 +203,14 @@ LockShardListResourcesOnFirstWorker(LOCKMODE lockmode, List *shardIntervalList) * Use the superuser connection to make sure we are allowed to lock. * This also helps ensure we only use one connection. */ - firstWorkerConnection = GetNodeUserDatabaseConnection(connectionFlags, - firstWorkerNode->workerName, - firstWorkerNode->workerPort, - superuser, NULL); + MultiConnection *firstWorkerConnection = GetNodeUserDatabaseConnection( + connectionFlags, + firstWorkerNode + ->workerName, + firstWorkerNode + ->workerPort, + superuser, + NULL); /* the SELECT .. FOR UPDATE breaks if we lose the connection */ MarkRemoteTransactionCritical(firstWorkerConnection); @@ -234,7 +231,6 @@ static bool IsFirstWorkerNode() { List *workerNodeList = ActivePrimaryWorkerNodeList(NoLock); - WorkerNode *firstWorkerNode = NULL; workerNodeList = SortList(workerNodeList, CompareWorkerNodes); @@ -243,7 +239,7 @@ IsFirstWorkerNode() return false; } - firstWorkerNode = (WorkerNode *) linitial(workerNodeList); + WorkerNode *firstWorkerNode = (WorkerNode *) linitial(workerNodeList); if (firstWorkerNode->groupId == GetLocalGroupId()) { @@ -433,14 +429,13 @@ GetSortedReferenceShardIntervals(List *relationList) foreach(relationCell, relationList) { Oid relationId = lfirst_oid(relationCell); - List *currentShardIntervalList = NIL; if (PartitionMethod(relationId) != DISTRIBUTE_BY_NONE) { continue; } - currentShardIntervalList = LoadShardIntervalList(relationId); + List *currentShardIntervalList = LoadShardIntervalList(relationId); shardIntervalList = lappend(shardIntervalList, linitial( currentShardIntervalList)); } @@ -463,11 +458,10 @@ TryLockShardDistributionMetadata(int64 shardId, LOCKMODE lockMode) LOCKTAG tag; const bool sessionLock = false; const bool dontWait = true; - bool lockAcquired = false; SET_LOCKTAG_SHARD_METADATA_RESOURCE(tag, MyDatabaseId, shardId); - lockAcquired = LockAcquire(&tag, lockMode, sessionLock, dontWait); + bool lockAcquired = LockAcquire(&tag, lockMode, sessionLock, dontWait); return lockAcquired; } @@ -704,8 +698,7 @@ LockModeTextToLockMode(const char *lockModeName) { LOCKMODE lockMode = -1; - int lockIndex = 0; - for (lockIndex = 0; lockIndex < lock_mode_to_string_map_count; lockIndex++) + for (int lockIndex = 0; lockIndex < lock_mode_to_string_map_count; lockIndex++) { const struct LockModeToStringType *lockMap = lockmode_to_string_map + lockIndex; if (pg_strncasecmp(lockMap->name, lockModeName, NAMEDATALEN) == 0) @@ -738,8 +731,7 @@ LockModeToLockModeText(LOCKMODE lockMode) { const char *lockModeText = NULL; - int lockIndex = 0; - for (lockIndex = 0; lockIndex < lock_mode_to_string_map_count; lockIndex++) + for (int lockIndex = 0; lockIndex < lock_mode_to_string_map_count; lockIndex++) { const struct LockModeToStringType *lockMap = lockmode_to_string_map + lockIndex; if (lockMode == lockMap->lockMode) @@ -777,28 +769,23 @@ lock_relation_if_exists(PG_FUNCTION_ARGS) { text *relationName = PG_GETARG_TEXT_P(0); text *lockModeText = PG_GETARG_TEXT_P(1); - Oid relationId = InvalidOid; char *lockModeCString = text_to_cstring(lockModeText); - List *relationNameList = NIL; - RangeVar *relation = NULL; - LOCKMODE lockMode = NoLock; - bool relationExists = false; /* ensure that we're in a transaction block */ RequireTransactionBlock(true, "lock_relation_if_exists"); /* get the lock mode */ - lockMode = LockModeTextToLockMode(lockModeCString); + LOCKMODE lockMode = LockModeTextToLockMode(lockModeCString); /* resolve relationId from passed in schema and relation name */ - relationNameList = textToQualifiedNameList(relationName); - relation = makeRangeVarFromNameList(relationNameList); + List *relationNameList = textToQualifiedNameList(relationName); + RangeVar *relation = makeRangeVarFromNameList(relationNameList); /* lock the relation with the lock mode */ - relationId = RangeVarGetRelidExtended(relation, lockMode, RVR_MISSING_OK, - CitusRangeVarCallbackForLockTable, - (void *) &lockMode); - relationExists = OidIsValid(relationId); + Oid relationId = RangeVarGetRelidExtended(relation, lockMode, RVR_MISSING_OK, + CitusRangeVarCallbackForLockTable, + (void *) &lockMode); + bool relationExists = OidIsValid(relationId); PG_RETURN_BOOL(relationExists); } @@ -816,7 +803,6 @@ CitusRangeVarCallbackForLockTable(const RangeVar *rangeVar, Oid relationId, Oid oldRelationId, void *arg) { LOCKMODE lockmode = *(LOCKMODE *) arg; - AclResult aclResult; if (!OidIsValid(relationId)) { @@ -832,7 +818,7 @@ CitusRangeVarCallbackForLockTable(const RangeVar *rangeVar, Oid relationId, } /* check permissions */ - aclResult = CitusLockTableAclCheck(relationId, lockmode, GetUserId()); + AclResult aclResult = CitusLockTableAclCheck(relationId, lockmode, GetUserId()); if (aclResult != ACLCHECK_OK) { aclcheck_error(aclResult, get_relkind_objtype(get_rel_relkind(relationId)), @@ -851,7 +837,6 @@ CitusRangeVarCallbackForLockTable(const RangeVar *rangeVar, Oid relationId, static AclResult CitusLockTableAclCheck(Oid relationId, LOCKMODE lockmode, Oid userId) { - AclResult aclResult; AclMode aclMask; /* verify adequate privilege */ @@ -868,7 +853,7 @@ CitusLockTableAclCheck(Oid relationId, LOCKMODE lockmode, Oid userId) aclMask = ACL_UPDATE | ACL_DELETE | ACL_TRUNCATE; } - aclResult = pg_class_aclcheck(relationId, userId, aclMask); + AclResult aclResult = pg_class_aclcheck(relationId, userId, aclMask); return aclResult; } diff --git a/src/backend/distributed/utils/role.c b/src/backend/distributed/utils/role.c index 0490857c9..83ea01367 100644 --- a/src/backend/distributed/utils/role.c +++ b/src/backend/distributed/utils/role.c @@ -32,14 +32,13 @@ alter_role_if_exists(PG_FUNCTION_ARGS) const char *rolename = text_to_cstring(rolenameText); text *utilityQueryText = PG_GETARG_TEXT_P(1); const char *utilityQuery = text_to_cstring(utilityQueryText); - Node *parseTree = NULL; if (get_role_oid(rolename, true) == InvalidOid) { PG_RETURN_BOOL(false); } - parseTree = ParseTreeNode(utilityQuery); + Node *parseTree = ParseTreeNode(utilityQuery); CitusProcessUtility(parseTree, utilityQuery, PROCESS_UTILITY_TOPLEVEL, NULL, None_Receiver, NULL); diff --git a/src/backend/distributed/utils/shardinterval_utils.c b/src/backend/distributed/utils/shardinterval_utils.c index 425c77970..35b5f4a41 100644 --- a/src/backend/distributed/utils/shardinterval_utils.c +++ b/src/backend/distributed/utils/shardinterval_utils.c @@ -250,7 +250,6 @@ ShardInterval * FindShardInterval(Datum partitionColumnValue, DistTableCacheEntry *cacheEntry) { Datum searchedValue = partitionColumnValue; - int shardIndex = INVALID_SHARD_INDEX; if (cacheEntry->partitionMethod == DISTRIBUTE_BY_HASH) { @@ -259,7 +258,7 @@ FindShardInterval(Datum partitionColumnValue, DistTableCacheEntry *cacheEntry) partitionColumnValue); } - shardIndex = FindShardIntervalIndex(searchedValue, cacheEntry); + int shardIndex = FindShardIntervalIndex(searchedValue, cacheEntry); if (shardIndex == INVALID_SHARD_INDEX) { @@ -379,13 +378,12 @@ SearchCachedShardInterval(Datum partitionColumnValue, ShardInterval **shardInter while (lowerBoundIndex < upperBoundIndex) { int middleIndex = (lowerBoundIndex + upperBoundIndex) / 2; - int maxValueComparison = 0; - int minValueComparison = 0; - minValueComparison = FunctionCall2Coll(compareFunction, - DEFAULT_COLLATION_OID, - partitionColumnValue, - shardIntervalCache[middleIndex]->minValue); + int minValueComparison = FunctionCall2Coll(compareFunction, + DEFAULT_COLLATION_OID, + partitionColumnValue, + shardIntervalCache[middleIndex]-> + minValue); if (DatumGetInt32(minValueComparison) < 0) { @@ -393,10 +391,11 @@ SearchCachedShardInterval(Datum partitionColumnValue, ShardInterval **shardInter continue; } - maxValueComparison = FunctionCall2Coll(compareFunction, - DEFAULT_COLLATION_OID, - partitionColumnValue, - shardIntervalCache[middleIndex]->maxValue); + int maxValueComparison = FunctionCall2Coll(compareFunction, + DEFAULT_COLLATION_OID, + partitionColumnValue, + shardIntervalCache[middleIndex]-> + maxValue); if (DatumGetInt32(maxValueComparison) <= 0) { @@ -420,7 +419,6 @@ SingleReplicatedTable(Oid relationId) { List *shardList = LoadShardList(relationId); List *shardPlacementList = NIL; - Oid shardId = INVALID_SHARD_ID; /* we could have append/range distributed tables without shards */ if (list_length(shardList) <= 1) @@ -429,7 +427,7 @@ SingleReplicatedTable(Oid relationId) } /* checking only for the first shard id should suffice */ - shardId = (*(uint64 *) linitial(shardList)); + Oid shardId = (*(uint64 *) linitial(shardList)); /* for hash distributed tables, it is sufficient to only check one shard */ if (PartitionMethod(relationId) == DISTRIBUTE_BY_HASH) diff --git a/src/backend/distributed/utils/statistics_collection.c b/src/backend/distributed/utils/statistics_collection.c index 5172d1680..0cce1181f 100644 --- a/src/backend/distributed/utils/statistics_collection.c +++ b/src/backend/distributed/utils/statistics_collection.c @@ -122,9 +122,8 @@ CollectBasicUsageStatistics(void) } PG_CATCH(); { - ErrorData *edata = NULL; MemoryContextSwitchTo(savedContext); - edata = CopyErrorData(); + ErrorData *edata = CopyErrorData(); FlushErrorState(); RollbackAndReleaseCurrentSubTransaction(); @@ -193,7 +192,6 @@ DistributedTablesSize(List *distTableOids) foreach(distTableOidCell, distTableOids) { Oid relationId = lfirst_oid(distTableOidCell); - Datum tableSizeDatum = 0; /* * Relations can get dropped after getting the Oid list and before we @@ -217,8 +215,8 @@ DistributedTablesSize(List *distTableOids) continue; } - tableSizeDatum = DirectFunctionCall1(citus_table_size, - ObjectIdGetDatum(relationId)); + Datum tableSizeDatum = DirectFunctionCall1(citus_table_size, + ObjectIdGetDatum(relationId)); totalSize += DatumGetInt64(tableSizeDatum); heap_close(relation, AccessShareLock); } @@ -265,10 +263,9 @@ SendHttpPostJsonRequest(const char *url, const char *jsonObj, long timeoutSecond curl_write_callback responseCallback) { bool success = false; - CURL *curl = NULL; curl_global_init(CURL_GLOBAL_DEFAULT); - curl = curl_easy_init(); + CURL *curl = curl_easy_init(); if (curl) { struct curl_slist *headers = NULL; @@ -348,8 +345,7 @@ citus_server_id(PG_FUNCTION_ARGS) */ if (!pg_strong_random((char *) buf, UUID_LEN)) { - int bufIdx = 0; - for (bufIdx = 0; bufIdx < UUID_LEN; bufIdx++) + for (int bufIdx = 0; bufIdx < UUID_LEN; bufIdx++) { buf[bufIdx] = (uint8) (random() & 0xFF); } @@ -411,11 +407,10 @@ uname(struct utsname *buf) { SYSTEM_INFO info; - DWORD procarch; char *arch; GetSystemInfo(&info); - procarch = info.wProcessorArchitecture; + DWORD procarch = info.wProcessorArchitecture; switch (procarch) { diff --git a/src/backend/distributed/worker/task_tracker.c b/src/backend/distributed/worker/task_tracker.c index ada4b8a7c..b8099f302 100644 --- a/src/backend/distributed/worker/task_tracker.c +++ b/src/backend/distributed/worker/task_tracker.c @@ -131,7 +131,6 @@ TaskTrackerRegister(void) void TaskTrackerMain(Datum main_arg) { - MemoryContext TaskTrackerContext = NULL; sigjmp_buf local_sigjmp_buf; static bool processStartUp = true; @@ -147,10 +146,11 @@ TaskTrackerMain(Datum main_arg) * that we can reset the context during error recovery and thereby avoid * possible memory leaks. */ - TaskTrackerContext = AllocSetContextCreateExtended(TopMemoryContext, "Task Tracker", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + MemoryContext TaskTrackerContext = AllocSetContextCreateExtended(TopMemoryContext, + "Task Tracker", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); MemoryContextSwitchTo(TaskTrackerContext); /* @@ -282,17 +282,15 @@ TaskTrackerMain(Datum main_arg) WorkerTask * WorkerTasksHashEnter(uint64 jobId, uint32 taskId) { - WorkerTask *workerTask = NULL; - void *hashKey = NULL; bool handleFound = false; WorkerTask searchTask; searchTask.jobId = jobId; searchTask.taskId = taskId; - hashKey = (void *) &searchTask; - workerTask = (WorkerTask *) hash_search(TaskTrackerTaskHash, hashKey, - HASH_ENTER_NULL, &handleFound); + void *hashKey = (void *) &searchTask; + WorkerTask *workerTask = (WorkerTask *) hash_search(TaskTrackerTaskHash, hashKey, + HASH_ENTER_NULL, &handleFound); if (workerTask == NULL) { ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), @@ -320,16 +318,13 @@ WorkerTasksHashEnter(uint64 jobId, uint32 taskId) WorkerTask * WorkerTasksHashFind(uint64 jobId, uint32 taskId) { - WorkerTask *workerTask = NULL; - void *hashKey = NULL; - WorkerTask searchTask; searchTask.jobId = jobId; searchTask.taskId = taskId; - hashKey = (void *) &searchTask; - workerTask = (WorkerTask *) hash_search(TaskTrackerTaskHash, hashKey, - HASH_FIND, NULL); + void *hashKey = (void *) &searchTask; + WorkerTask *workerTask = (WorkerTask *) hash_search(TaskTrackerTaskHash, hashKey, + HASH_FIND, NULL); return workerTask; } @@ -383,7 +378,6 @@ TrackerCleanupJobSchemas(void) foreach(databaseNameCell, databaseNameList) { char *databaseName = (char *) lfirst(databaseNameCell); - WorkerTask *cleanupTask = NULL; /* template0 database does not accept connections */ int skipDatabaseName = strncmp(databaseName, TEMPLATE0_NAME, NAMEDATALEN); @@ -397,7 +391,7 @@ TrackerCleanupJobSchemas(void) * tracker process. We also assign high priorities to these tasks so * that they get scheduled before everyone else. */ - cleanupTask = WorkerTasksHashEnter(jobId, taskIndex); + WorkerTask *cleanupTask = WorkerTasksHashEnter(jobId, taskIndex); cleanupTask->assignedAt = HIGH_PRIORITY_TASK_TIME; cleanupTask->taskStatus = TASK_ASSIGNED; @@ -429,11 +423,10 @@ static void TrackerCleanupConnections(HTAB *WorkerTasksHash) { HASH_SEQ_STATUS status; - WorkerTask *currentTask = NULL; hash_seq_init(&status, WorkerTasksHash); - currentTask = (WorkerTask *) hash_seq_search(&status); + WorkerTask *currentTask = (WorkerTask *) hash_seq_search(&status); while (currentTask != NULL) { if (currentTask->connectionId != INVALID_CONNECTION_ID) @@ -457,11 +450,10 @@ TrackerRegisterShutDown(HTAB *WorkerTasksHash) { uint64 jobId = RESERVED_JOB_ID; uint32 taskId = SHUTDOWN_MARKER_TASK_ID; - WorkerTask *shutdownMarkerTask = NULL; LWLockAcquire(&WorkerTasksSharedState->taskHashLock, LW_EXCLUSIVE); - shutdownMarkerTask = WorkerTasksHashEnter(jobId, taskId); + WorkerTask *shutdownMarkerTask = WorkerTasksHashEnter(jobId, taskId); shutdownMarkerTask->taskStatus = TASK_SUCCEEDED; shutdownMarkerTask->connectionId = INVALID_CONNECTION_ID; @@ -539,11 +531,10 @@ static Size TaskTrackerShmemSize(void) { Size size = 0; - Size hashSize = 0; size = add_size(size, sizeof(WorkerTasksSharedStateData)); - hashSize = hash_estimate_size(MaxTrackedTasksPerNode, WORKER_TASK_SIZE); + Size hashSize = hash_estimate_size(MaxTrackedTasksPerNode, WORKER_TASK_SIZE); size = add_size(size, hashSize); return size; @@ -556,12 +547,9 @@ TaskTrackerShmemInit(void) { bool alreadyInitialized = false; HASHCTL info; - int hashFlags = 0; - long maxTableSize = 0; - long initTableSize = 0; - maxTableSize = (long) MaxTrackedTasksPerNode; - initTableSize = maxTableSize / 8; + long maxTableSize = (long) MaxTrackedTasksPerNode; + long initTableSize = maxTableSize / 8; /* * Allocate the control structure for the hash table that maps unique task @@ -572,7 +560,7 @@ TaskTrackerShmemInit(void) info.keysize = sizeof(uint64) + sizeof(uint32); info.entrysize = WORKER_TASK_SIZE; info.hash = tag_hash; - hashFlags = (HASH_ELEM | HASH_FUNCTION); + int hashFlags = (HASH_ELEM | HASH_FUNCTION); /* * Currently the lock isn't required because allocation only happens at @@ -632,34 +620,30 @@ static List * SchedulableTaskList(HTAB *WorkerTasksHash) { List *schedulableTaskList = NIL; - WorkerTask *schedulableTaskQueue = NULL; - uint32 runningTaskCount = 0; - uint32 schedulableTaskCount = 0; - uint32 tasksToScheduleCount = 0; - uint32 queueIndex = 0; - runningTaskCount = CountTasksMatchingCriteria(WorkerTasksHash, &RunningTask); + uint32 runningTaskCount = CountTasksMatchingCriteria(WorkerTasksHash, &RunningTask); if (runningTaskCount >= MaxRunningTasksPerNode) { return NIL; /* we already have enough tasks running */ } - schedulableTaskCount = CountTasksMatchingCriteria(WorkerTasksHash, &SchedulableTask); + uint32 schedulableTaskCount = CountTasksMatchingCriteria(WorkerTasksHash, + &SchedulableTask); if (schedulableTaskCount == 0) { return NIL; /* we do not have any new tasks to schedule */ } - tasksToScheduleCount = MaxRunningTasksPerNode - runningTaskCount; + uint32 tasksToScheduleCount = MaxRunningTasksPerNode - runningTaskCount; if (tasksToScheduleCount > schedulableTaskCount) { tasksToScheduleCount = schedulableTaskCount; } /* get all schedulable tasks ordered according to a priority criteria */ - schedulableTaskQueue = SchedulableTaskPriorityQueue(WorkerTasksHash); + WorkerTask *schedulableTaskQueue = SchedulableTaskPriorityQueue(WorkerTasksHash); - for (queueIndex = 0; queueIndex < tasksToScheduleCount; queueIndex++) + for (uint32 queueIndex = 0; queueIndex < tasksToScheduleCount; queueIndex++) { WorkerTask *schedulableTask = (WorkerTask *) palloc0(WORKER_TASK_SIZE); WorkerTask *queuedTask = WORKER_TASK_AT(schedulableTaskQueue, queueIndex); @@ -685,25 +669,22 @@ static WorkerTask * SchedulableTaskPriorityQueue(HTAB *WorkerTasksHash) { HASH_SEQ_STATUS status; - WorkerTask *currentTask = NULL; - WorkerTask *priorityQueue = NULL; - uint32 queueSize = 0; uint32 queueIndex = 0; /* our priority queue size equals to the number of schedulable tasks */ - queueSize = CountTasksMatchingCriteria(WorkerTasksHash, &SchedulableTask); + uint32 queueSize = CountTasksMatchingCriteria(WorkerTasksHash, &SchedulableTask); if (queueSize == 0) { return NULL; } /* allocate an array of tasks for our priority queue */ - priorityQueue = (WorkerTask *) palloc0(WORKER_TASK_SIZE * queueSize); + WorkerTask *priorityQueue = (WorkerTask *) palloc0(WORKER_TASK_SIZE * queueSize); /* copy tasks in the shared hash to the priority queue */ hash_seq_init(&status, WorkerTasksHash); - currentTask = (WorkerTask *) hash_seq_search(&status); + WorkerTask *currentTask = (WorkerTask *) hash_seq_search(&status); while (currentTask != NULL) { if (SchedulableTask(currentTask)) @@ -734,12 +715,11 @@ CountTasksMatchingCriteria(HTAB *WorkerTasksHash, bool (*CriteriaFunction)(WorkerTask *)) { HASH_SEQ_STATUS status; - WorkerTask *currentTask = NULL; uint32 taskCount = 0; hash_seq_init(&status, WorkerTasksHash); - currentTask = (WorkerTask *) hash_seq_search(&status); + WorkerTask *currentTask = (WorkerTask *) hash_seq_search(&status); while (currentTask != NULL) { bool matchesCriteria = (*CriteriaFunction)(currentTask); @@ -809,11 +789,10 @@ ScheduleWorkerTasks(HTAB *WorkerTasksHash, List *schedulableTaskList) foreach(schedulableTaskCell, schedulableTaskList) { WorkerTask *schedulableTask = (WorkerTask *) lfirst(schedulableTaskCell); - WorkerTask *taskToSchedule = NULL; void *hashKey = (void *) schedulableTask; - taskToSchedule = (WorkerTask *) hash_search(WorkerTasksHash, hashKey, - HASH_FIND, NULL); + WorkerTask *taskToSchedule = (WorkerTask *) hash_search(WorkerTasksHash, hashKey, + HASH_FIND, NULL); /* if task is null, the shared hash is in an incosistent state */ if (taskToSchedule == NULL) @@ -850,12 +829,10 @@ static void ManageWorkerTasksHash(HTAB *WorkerTasksHash) { HASH_SEQ_STATUS status; - List *schedulableTaskList = NIL; - WorkerTask *currentTask = NULL; /* ask the scheduler if we have new tasks to schedule */ LWLockAcquire(&WorkerTasksSharedState->taskHashLock, LW_SHARED); - schedulableTaskList = SchedulableTaskList(WorkerTasksHash); + List *schedulableTaskList = SchedulableTaskList(WorkerTasksHash); LWLockRelease(&WorkerTasksSharedState->taskHashLock); LWLockAcquire(&WorkerTasksSharedState->taskHashLock, LW_EXCLUSIVE); @@ -875,7 +852,7 @@ ManageWorkerTasksHash(HTAB *WorkerTasksHash) /* now iterate over all tasks, and manage them */ hash_seq_init(&status, WorkerTasksHash); - currentTask = (WorkerTask *) hash_seq_search(&status); + WorkerTask *currentTask = (WorkerTask *) hash_seq_search(&status); while (currentTask != NULL) { ManageWorkerTask(currentTask, WorkerTasksHash); diff --git a/src/backend/distributed/worker/task_tracker_protocol.c b/src/backend/distributed/worker/task_tracker_protocol.c index d42c25286..1d445ccc3 100644 --- a/src/backend/distributed/worker/task_tracker_protocol.c +++ b/src/backend/distributed/worker/task_tracker_protocol.c @@ -68,18 +68,15 @@ task_tracker_assign_task(PG_FUNCTION_ARGS) text *taskCallStringText = PG_GETARG_TEXT_P(2); StringInfo jobSchemaName = JobSchemaName(jobId); - bool schemaExists = false; - WorkerTask *workerTask = NULL; char *taskCallString = text_to_cstring(taskCallStringText); uint32 taskCallStringLength = strlen(taskCallString); - bool taskTrackerRunning = false; CheckCitusVersion(ERROR); /* check that we have a running task tracker on this host */ - taskTrackerRunning = TaskTrackerRunning(); + bool taskTrackerRunning = TaskTrackerRunning(); if (!taskTrackerRunning) { ereport(ERROR, (errcode(ERRCODE_CANNOT_CONNECT_NOW), @@ -102,7 +99,7 @@ task_tracker_assign_task(PG_FUNCTION_ARGS) * schema is already visible, and we immediately release the resource lock. */ LockJobResource(jobId, AccessExclusiveLock); - schemaExists = JobSchemaExists(jobSchemaName); + bool schemaExists = JobSchemaExists(jobSchemaName); if (!schemaExists) { /* lock gets automatically released upon return from this function */ @@ -120,7 +117,7 @@ task_tracker_assign_task(PG_FUNCTION_ARGS) LWLockAcquire(&WorkerTasksSharedState->taskHashLock, LW_EXCLUSIVE); /* check if we already have the task in our shared hash */ - workerTask = WorkerTasksHashFind(jobId, taskId); + WorkerTask *workerTask = WorkerTasksHashFind(jobId, taskId); if (workerTask == NULL) { CreateTask(jobId, taskId, taskCallString); @@ -146,11 +143,10 @@ task_tracker_task_status(PG_FUNCTION_ARGS) WorkerTask *workerTask = NULL; uint32 taskStatus = 0; char *userName = CurrentUserName(); - bool taskTrackerRunning = false; CheckCitusVersion(ERROR); - taskTrackerRunning = TaskTrackerRunning(); + bool taskTrackerRunning = TaskTrackerRunning(); if (taskTrackerRunning) { @@ -188,15 +184,11 @@ task_tracker_cleanup_job(PG_FUNCTION_ARGS) { uint64 jobId = PG_GETARG_INT64(0); - bool schemaExists = false; HASH_SEQ_STATUS status; - WorkerTask *currentTask = NULL; - StringInfo jobDirectoryName = NULL; - StringInfo jobSchemaName = NULL; CheckCitusVersion(ERROR); - jobSchemaName = JobSchemaName(jobId); + StringInfo jobSchemaName = JobSchemaName(jobId); /* * We'll keep this lock for a while, but that's ok because nothing @@ -204,7 +196,7 @@ task_tracker_cleanup_job(PG_FUNCTION_ARGS) */ LockJobResource(jobId, AccessExclusiveLock); - schemaExists = JobSchemaExists(jobSchemaName); + bool schemaExists = JobSchemaExists(jobSchemaName); if (schemaExists) { Oid schemaId = get_namespace_oid(jobSchemaName->data, false); @@ -220,7 +212,7 @@ task_tracker_cleanup_job(PG_FUNCTION_ARGS) hash_seq_init(&status, TaskTrackerTaskHash); - currentTask = (WorkerTask *) hash_seq_search(&status); + WorkerTask *currentTask = (WorkerTask *) hash_seq_search(&status); while (currentTask != NULL) { if (currentTask->jobId == jobId) @@ -239,7 +231,7 @@ task_tracker_cleanup_job(PG_FUNCTION_ARGS) * schema drop call can block if another process is creating the schema or * writing to a table within the schema. */ - jobDirectoryName = JobDirectoryName(jobId); + StringInfo jobDirectoryName = JobDirectoryName(jobId); CitusRemoveDirectory(jobDirectoryName); RemoveJobSchema(jobSchemaName); @@ -281,12 +273,10 @@ task_tracker_conninfo_cache_invalidate(PG_FUNCTION_ARGS) static bool TaskTrackerRunning(void) { - WorkerTask *workerTask = NULL; - bool postmasterAlive = true; bool taskTrackerRunning = true; /* if postmaster shut down, infer task tracker shut down from it */ - postmasterAlive = PostmasterIsAlive(); + bool postmasterAlive = PostmasterIsAlive(); if (!postmasterAlive) { return false; @@ -299,7 +289,8 @@ TaskTrackerRunning(void) */ LWLockAcquire(&WorkerTasksSharedState->taskHashLock, LW_SHARED); - workerTask = WorkerTasksHashFind(RESERVED_JOB_ID, SHUTDOWN_MARKER_TASK_ID); + WorkerTask *workerTask = WorkerTasksHashFind(RESERVED_JOB_ID, + SHUTDOWN_MARKER_TASK_ID); if (workerTask != NULL) { taskTrackerRunning = false; @@ -321,15 +312,13 @@ static void CreateJobSchema(StringInfo schemaName) { const char *queryString = NULL; - bool oldAllowSystemTableMods = false; Oid savedUserId = InvalidOid; int savedSecurityContext = 0; - CreateSchemaStmt *createSchemaStmt = NULL; RoleSpec currentUserRole = { 0 }; /* allow schema names that start with pg_ */ - oldAllowSystemTableMods = allowSystemTableMods; + bool oldAllowSystemTableMods = allowSystemTableMods; allowSystemTableMods = true; /* ensure we're allowed to create this schema */ @@ -342,7 +331,7 @@ CreateJobSchema(StringInfo schemaName) currentUserRole.rolename = GetUserNameFromId(savedUserId, false); currentUserRole.location = -1; - createSchemaStmt = makeNode(CreateSchemaStmt); + CreateSchemaStmt *createSchemaStmt = makeNode(CreateSchemaStmt); createSchemaStmt->schemaname = schemaName->data; createSchemaStmt->schemaElts = NIL; @@ -366,20 +355,18 @@ CreateJobSchema(StringInfo schemaName) static void CreateTask(uint64 jobId, uint32 taskId, char *taskCallString) { - WorkerTask *workerTask = NULL; - uint32 assignmentTime = 0; char *databaseName = CurrentDatabaseName(); char *userName = CurrentUserName(); /* increase task priority for cleanup tasks */ - assignmentTime = (uint32) time(NULL); + uint32 assignmentTime = (uint32) time(NULL); if (taskId == JOB_CLEANUP_TASK_ID) { assignmentTime = HIGH_PRIORITY_TASK_TIME; } /* enter the worker task into shared hash and initialize the task */ - workerTask = WorkerTasksHashEnter(jobId, taskId); + WorkerTask *workerTask = WorkerTasksHashEnter(jobId, taskId); workerTask->assignedAt = assignmentTime; strlcpy(workerTask->taskCallString, taskCallString, MaxTaskStringSize); @@ -399,9 +386,7 @@ CreateTask(uint64 jobId, uint32 taskId, char *taskCallString) static void UpdateTask(WorkerTask *workerTask, char *taskCallString) { - TaskStatus taskStatus = TASK_STATUS_INVALID_FIRST; - - taskStatus = workerTask->taskStatus; + TaskStatus taskStatus = workerTask->taskStatus; Assert(taskStatus != TASK_STATUS_INVALID_FIRST); /* @@ -434,7 +419,6 @@ UpdateTask(WorkerTask *workerTask, char *taskCallString) static void CleanupTask(WorkerTask *workerTask) { - WorkerTask *taskRemoved = NULL; void *hashKey = (void *) workerTask; /* @@ -461,7 +445,8 @@ CleanupTask(WorkerTask *workerTask) } /* remove the task from the shared hash */ - taskRemoved = hash_search(TaskTrackerTaskHash, hashKey, HASH_REMOVE, NULL); + WorkerTask *taskRemoved = hash_search(TaskTrackerTaskHash, hashKey, HASH_REMOVE, + NULL); if (taskRemoved == NULL) { ereport(FATAL, (errmsg("worker task hash corrupted"))); diff --git a/src/backend/distributed/worker/worker_create_or_replace.c b/src/backend/distributed/worker/worker_create_or_replace.c index 43beafa22..ed27300c2 100644 --- a/src/backend/distributed/worker/worker_create_or_replace.c +++ b/src/backend/distributed/worker/worker_create_or_replace.c @@ -73,7 +73,6 @@ worker_create_or_replace_object(PG_FUNCTION_ARGS) { text *sqlStatementText = PG_GETARG_TEXT_P(0); const char *sqlStatement = text_to_cstring(sqlStatementText); - const ObjectAddress *address = NULL; Node *parseTree = ParseTreeNode(sqlStatement); /* @@ -81,12 +80,9 @@ worker_create_or_replace_object(PG_FUNCTION_ARGS) * if the type actually exists instead of adding the IF EXISTS keyword to the * statement. */ - address = GetObjectAddressFromParseTree(parseTree, true); + const ObjectAddress *address = GetObjectAddressFromParseTree(parseTree, true); if (ObjectExists(address)) { - char *newName = NULL; - const char *sqlRenameStmt = NULL; - RenameStmt *renameStmt = NULL; const char *localSqlStatement = CreateStmtByObjectAddress(address); if (strcmp(sqlStatement, localSqlStatement) == 0) @@ -107,10 +103,10 @@ worker_create_or_replace_object(PG_FUNCTION_ARGS) PG_RETURN_BOOL(false); } - newName = GenerateBackupNameForCollision(address); + char *newName = GenerateBackupNameForCollision(address); - renameStmt = CreateRenameStatement(address, newName); - sqlRenameStmt = DeparseTreeNode((Node *) renameStmt); + RenameStmt *renameStmt = CreateRenameStatement(address, newName); + const char *sqlRenameStmt = DeparseTreeNode((Node *) renameStmt); CitusProcessUtility((Node *) renameStmt, sqlRenameStmt, PROCESS_UTILITY_TOPLEVEL, NULL, None_Receiver, NULL); diff --git a/src/backend/distributed/worker/worker_data_fetch_protocol.c b/src/backend/distributed/worker/worker_data_fetch_protocol.c index a1434ee4b..e0372a728 100644 --- a/src/backend/distributed/worker/worker_data_fetch_protocol.c +++ b/src/backend/distributed/worker/worker_data_fetch_protocol.c @@ -100,7 +100,6 @@ worker_fetch_partition_file(PG_FUNCTION_ARGS) uint32 upstreamTaskId = PG_GETARG_UINT32(3); text *nodeNameText = PG_GETARG_TEXT_P(4); uint32 nodePort = PG_GETARG_UINT32(5); - char *nodeName = NULL; /* remote filename is // */ StringInfo remoteDirectoryName = TaskDirectoryName(jobId, partitionTaskId); @@ -123,7 +122,7 @@ worker_fetch_partition_file(PG_FUNCTION_ARGS) InitTaskDirectory(jobId, upstreamTaskId); } - nodeName = text_to_cstring(nodeNameText); + char *nodeName = text_to_cstring(nodeNameText); /* we've made sure the file names are sanitized, safe to fetch as superuser */ FetchRegularFileAsSuperUser(nodeName, nodePort, remoteFilename, taskFilename); @@ -169,32 +168,27 @@ static void FetchRegularFileAsSuperUser(const char *nodeName, uint32 nodePort, StringInfo remoteFilename, StringInfo localFilename) { - char *nodeUser = NULL; - StringInfo attemptFilename = NULL; - StringInfo transmitCommand = NULL; char *userName = CurrentUserName(); uint32 randomId = (uint32) random(); - bool received = false; - int renamed = 0; /* * We create an attempt file to signal that the file is still in transit. We * further append a random id to the filename to handle the unexpected case * of another process concurrently fetching the same file. */ - attemptFilename = makeStringInfo(); + StringInfo attemptFilename = makeStringInfo(); appendStringInfo(attemptFilename, "%s_%0*u%s", localFilename->data, MIN_TASK_FILENAME_WIDTH, randomId, ATTEMPT_FILE_SUFFIX); - transmitCommand = makeStringInfo(); + StringInfo transmitCommand = makeStringInfo(); appendStringInfo(transmitCommand, TRANSMIT_WITH_USER_COMMAND, remoteFilename->data, quote_literal_cstr(userName)); /* connect as superuser to give file access */ - nodeUser = CitusExtensionOwnerName(); + char *nodeUser = CitusExtensionOwnerName(); - received = ReceiveRegularFile(nodeName, nodePort, nodeUser, transmitCommand, - attemptFilename); + bool received = ReceiveRegularFile(nodeName, nodePort, nodeUser, transmitCommand, + attemptFilename); if (!received) { ereport(ERROR, (errmsg("could not receive file \"%s\" from %s:%u", @@ -202,7 +196,7 @@ FetchRegularFileAsSuperUser(const char *nodeName, uint32 nodePort, } /* atomically rename the attempt file */ - renamed = rename(attemptFilename->data, localFilename->data); + int renamed = rename(attemptFilename->data, localFilename->data); if (renamed != 0) { ereport(ERROR, (errcode_for_file_access(), @@ -224,23 +218,17 @@ static bool ReceiveRegularFile(const char *nodeName, uint32 nodePort, const char *nodeUser, StringInfo transmitCommand, StringInfo filePath) { - int32 fileDescriptor = -1; char filename[MAXPGPATH]; - int closed = -1; const int fileFlags = (O_APPEND | O_CREAT | O_RDWR | O_TRUNC | PG_BINARY); const int fileMode = (S_IRUSR | S_IWUSR); - QueryStatus queryStatus = CLIENT_INVALID_QUERY; - int32 connectionId = INVALID_CONNECTION_ID; - char *nodeDatabase = NULL; - bool querySent = false; bool queryReady = false; bool copyDone = false; /* create local file to append remote data to */ snprintf(filename, MAXPGPATH, "%s", filePath->data); - fileDescriptor = BasicOpenFilePerm(filename, fileFlags, fileMode); + int32 fileDescriptor = BasicOpenFilePerm(filename, fileFlags, fileMode); if (fileDescriptor < 0) { ereport(WARNING, (errcode_for_file_access(), @@ -250,10 +238,10 @@ ReceiveRegularFile(const char *nodeName, uint32 nodePort, const char *nodeUser, } /* we use the same database name on the master and worker nodes */ - nodeDatabase = CurrentDatabaseName(); + char *nodeDatabase = CurrentDatabaseName(); /* connect to remote node */ - connectionId = MultiClientConnect(nodeName, nodePort, nodeDatabase, nodeUser); + int32 connectionId = MultiClientConnect(nodeName, nodePort, nodeDatabase, nodeUser); if (connectionId == INVALID_CONNECTION_ID) { ReceiveResourceCleanup(connectionId, filename, fileDescriptor); @@ -262,7 +250,7 @@ ReceiveRegularFile(const char *nodeName, uint32 nodePort, const char *nodeUser, } /* send request to remote node to start transmitting data */ - querySent = MultiClientSendQuery(connectionId, transmitCommand->data); + bool querySent = MultiClientSendQuery(connectionId, transmitCommand->data); if (!querySent) { ReceiveResourceCleanup(connectionId, filename, fileDescriptor); @@ -293,7 +281,7 @@ ReceiveRegularFile(const char *nodeName, uint32 nodePort, const char *nodeUser, } /* check query response is as expected */ - queryStatus = MultiClientQueryStatus(connectionId); + QueryStatus queryStatus = MultiClientQueryStatus(connectionId); if (queryStatus != CLIENT_QUERY_COPY) { ReceiveResourceCleanup(connectionId, filename, fileDescriptor); @@ -324,7 +312,7 @@ ReceiveRegularFile(const char *nodeName, uint32 nodePort, const char *nodeUser, /* we are done executing; release the connection and the file handle */ MultiClientDisconnect(connectionId); - closed = close(fileDescriptor); + int closed = close(fileDescriptor); if (closed < 0) { ereport(WARNING, (errcode_for_file_access(), @@ -357,17 +345,14 @@ ReceiveResourceCleanup(int32 connectionId, const char *filename, int32 fileDescr if (fileDescriptor != -1) { - int closed = -1; - int deleted = -1; - - closed = close(fileDescriptor); + int closed = close(fileDescriptor); if (closed < 0) { ereport(WARNING, (errcode_for_file_access(), errmsg("could not close file \"%s\": %m", filename))); } - deleted = unlink(filename); + int deleted = unlink(filename); if (deleted != 0) { ereport(WARNING, (errcode_for_file_access(), @@ -462,10 +447,6 @@ worker_apply_sequence_command(PG_FUNCTION_ARGS) Oid sequenceTypeId = PG_GETARG_OID(1); const char *commandString = text_to_cstring(commandText); Node *commandNode = ParseTreeNode(commandString); - CreateSeqStmt *createSequenceStatement = NULL; - char *sequenceName = NULL; - char *sequenceSchema = NULL; - Oid sequenceRelationId = InvalidOid; NodeTag nodeType = nodeTag(commandNode); @@ -483,14 +464,14 @@ worker_apply_sequence_command(PG_FUNCTION_ARGS) None_Receiver, NULL); CommandCounterIncrement(); + CreateSeqStmt *createSequenceStatement = (CreateSeqStmt *) commandNode; + + char *sequenceName = createSequenceStatement->sequence->relname; + char *sequenceSchema = createSequenceStatement->sequence->schemaname; createSequenceStatement = (CreateSeqStmt *) commandNode; - sequenceName = createSequenceStatement->sequence->relname; - sequenceSchema = createSequenceStatement->sequence->schemaname; - createSequenceStatement = (CreateSeqStmt *) commandNode; - - sequenceRelationId = RangeVarGetRelid(createSequenceStatement->sequence, - AccessShareLock, false); + Oid sequenceRelationId = RangeVarGetRelid(createSequenceStatement->sequence, + AccessShareLock, false); Assert(sequenceRelationId != InvalidOid); AlterSequenceMinMax(sequenceRelationId, sequenceSchema, sequenceName, sequenceTypeId); @@ -507,12 +488,10 @@ worker_apply_sequence_command(PG_FUNCTION_ARGS) uint64 ExtractShardIdFromTableName(const char *tableName, bool missingOk) { - uint64 shardId = 0; - char *shardIdString = NULL; char *shardIdStringEnd = NULL; /* find the last underscore and increment for shardId string */ - shardIdString = strrchr(tableName, SHARD_NAME_SEPARATOR); + char *shardIdString = strrchr(tableName, SHARD_NAME_SEPARATOR); if (shardIdString == NULL && !missingOk) { ereport(ERROR, (errmsg("could not extract shardId from table name \"%s\"", @@ -526,7 +505,7 @@ ExtractShardIdFromTableName(const char *tableName, bool missingOk) shardIdString++; errno = 0; - shardId = pg_strtouint64(shardIdString, &shardIdStringEnd, 0); + uint64 shardId = pg_strtouint64(shardIdString, &shardIdStringEnd, 0); if (errno != 0 || (*shardIdStringEnd != '\0')) { @@ -553,18 +532,15 @@ ExtractShardIdFromTableName(const char *tableName, bool missingOk) List * TableDDLCommandList(const char *nodeName, uint32 nodePort, const char *tableName) { - List *ddlCommandList = NIL; - StringInfo queryString = NULL; - MultiConnection *connection = NULL; PGresult *result = NULL; uint32 connectionFlag = FORCE_NEW_CONNECTION; - queryString = makeStringInfo(); + StringInfo queryString = makeStringInfo(); appendStringInfo(queryString, GET_TABLE_DDL_EVENTS, tableName); - connection = GetNodeConnection(connectionFlag, nodeName, nodePort); + MultiConnection *connection = GetNodeConnection(connectionFlag, nodeName, nodePort); ExecuteOptionalRemoteCommand(connection, queryString->data, &result); - ddlCommandList = ReadFirstColumnAsText(result); + List *ddlCommandList = ReadFirstColumnAsText(result); PQclear(result); ForgetResults(connection); @@ -594,11 +570,7 @@ ParseTreeNode(const char *ddlCommand) Node * ParseTreeRawStmt(const char *ddlCommand) { - Node *parseTreeNode = NULL; - List *parseTreeList = NULL; - uint32 parseTreeCount = 0; - - parseTreeList = pg_parse_query(ddlCommand); + List *parseTreeList = pg_parse_query(ddlCommand); /* log immediately if dictated by log statement */ if (check_log_statement(parseTreeList)) @@ -607,7 +579,7 @@ ParseTreeRawStmt(const char *ddlCommand) errhidestmt(true))); } - parseTreeCount = list_length(parseTreeList); + uint32 parseTreeCount = list_length(parseTreeList); if (parseTreeCount != 1) { ereport(ERROR, (errmsg("cannot execute multiple utility events"))); @@ -619,7 +591,7 @@ ParseTreeRawStmt(const char *ddlCommand) * those commands are safe, we can safely set the ProcessUtilityContext to * PROCESS_UTILITY_TOPLEVEL. */ - parseTreeNode = (Node *) linitial(parseTreeList); + Node *parseTreeNode = (Node *) linitial(parseTreeList); return parseTreeNode; } @@ -644,20 +616,9 @@ worker_append_table_to_shard(PG_FUNCTION_ARGS) char *shardTableName = NULL; char *shardSchemaName = NULL; - char *shardQualifiedName = NULL; char *sourceSchemaName = NULL; char *sourceTableName = NULL; - char *sourceQualifiedName = NULL; - StringInfo localFilePath = NULL; - StringInfo sourceCopyCommand = NULL; - CopyStmt *localCopyCommand = NULL; - RangeVar *localTable = NULL; - uint64 shardId = INVALID_SHARD_ID; - bool received = false; - StringInfo queryString = NULL; - Oid sourceShardRelationId = InvalidOid; - Oid sourceSchemaId = InvalidOid; Oid savedUserId = InvalidOid; int savedSecurityContext = 0; @@ -674,16 +635,17 @@ worker_append_table_to_shard(PG_FUNCTION_ARGS) * the transaction for this function commits, this lock will automatically * be released. This ensures appends to a shard happen in a serial manner. */ - shardId = ExtractShardIdFromTableName(shardTableName, false); + uint64 shardId = ExtractShardIdFromTableName(shardTableName, false); LockShardResource(shardId, AccessExclusiveLock); /* copy remote table's data to this node */ - localFilePath = makeStringInfo(); + StringInfo localFilePath = makeStringInfo(); appendStringInfo(localFilePath, "base/%s/%s" UINT64_FORMAT, PG_JOB_CACHE_DIR, TABLE_FILE_PREFIX, shardId); - sourceQualifiedName = quote_qualified_identifier(sourceSchemaName, sourceTableName); - sourceCopyCommand = makeStringInfo(); + char *sourceQualifiedName = quote_qualified_identifier(sourceSchemaName, + sourceTableName); + StringInfo sourceCopyCommand = makeStringInfo(); /* * Partitioned tables do not support "COPY table TO STDOUT". Thus, we use @@ -692,8 +654,8 @@ worker_append_table_to_shard(PG_FUNCTION_ARGS) * If the schema name is not explicitly set, we use the public schema. */ sourceSchemaName = sourceSchemaName ? sourceSchemaName : "public"; - sourceSchemaId = get_namespace_oid(sourceSchemaName, false); - sourceShardRelationId = get_relname_relid(sourceTableName, sourceSchemaId); + Oid sourceSchemaId = get_namespace_oid(sourceSchemaName, false); + Oid sourceShardRelationId = get_relname_relid(sourceTableName, sourceSchemaId); if (PartitionedTableNoLock(sourceShardRelationId)) { appendStringInfo(sourceCopyCommand, COPY_SELECT_ALL_OUT_COMMAND, @@ -704,8 +666,9 @@ worker_append_table_to_shard(PG_FUNCTION_ARGS) appendStringInfo(sourceCopyCommand, COPY_OUT_COMMAND, sourceQualifiedName); } - received = ReceiveRegularFile(sourceNodeName, sourceNodePort, NULL, sourceCopyCommand, - localFilePath); + bool received = ReceiveRegularFile(sourceNodeName, sourceNodePort, NULL, + sourceCopyCommand, + localFilePath); if (!received) { ereport(ERROR, (errmsg("could not copy table \"%s\" from \"%s:%u\"", @@ -713,13 +676,13 @@ worker_append_table_to_shard(PG_FUNCTION_ARGS) } /* copy local file into the given shard */ - localTable = makeRangeVar(shardSchemaName, shardTableName, -1); - localCopyCommand = CopyStatement(localTable, localFilePath->data); + RangeVar *localTable = makeRangeVar(shardSchemaName, shardTableName, -1); + CopyStmt *localCopyCommand = CopyStatement(localTable, localFilePath->data); - shardQualifiedName = quote_qualified_identifier(shardSchemaName, - shardTableName); + char *shardQualifiedName = quote_qualified_identifier(shardSchemaName, + shardTableName); - queryString = makeStringInfo(); + StringInfo queryString = makeStringInfo(); appendStringInfo(queryString, COPY_IN_COMMAND, shardQualifiedName, localFilePath->data); @@ -796,8 +759,6 @@ AlterSequenceMinMax(Oid sequenceId, char *schemaName, char *sequenceName, Oid sequenceTypeId) { Form_pg_sequence sequenceData = pg_get_sequencedef(sequenceId); - int64 startValue = 0; - int64 maxValue = 0; int64 sequenceMaxValue = sequenceData->seqmax; int64 sequenceMinValue = sequenceData->seqmin; int valueBitLength = 48; @@ -815,8 +776,8 @@ AlterSequenceMinMax(Oid sequenceId, char *schemaName, char *sequenceName, } /* calculate min/max values that the sequence can generate in this worker */ - startValue = (((int64) GetLocalGroupId()) << valueBitLength) + 1; - maxValue = startValue + ((int64) 1 << valueBitLength); + int64 startValue = (((int64) GetLocalGroupId()) << valueBitLength) + 1; + int64 maxValue = startValue + ((int64) 1 << valueBitLength); /* * We alter the sequence if the previously set min and max values are not equal to @@ -826,8 +787,6 @@ AlterSequenceMinMax(Oid sequenceId, char *schemaName, char *sequenceName, { StringInfo startNumericString = makeStringInfo(); StringInfo maxNumericString = makeStringInfo(); - Node *startFloatArg = NULL; - Node *maxFloatArg = NULL; AlterSeqStmt *alterSequenceStatement = makeNode(AlterSeqStmt); const char *dummyString = "-"; @@ -838,10 +797,10 @@ AlterSequenceMinMax(Oid sequenceId, char *schemaName, char *sequenceName, * larger numbers we need to construct a float represented as a string. */ appendStringInfo(startNumericString, INT64_FORMAT, startValue); - startFloatArg = (Node *) makeFloat(startNumericString->data); + Node *startFloatArg = (Node *) makeFloat(startNumericString->data); appendStringInfo(maxNumericString, INT64_FORMAT, maxValue); - maxFloatArg = (Node *) makeFloat(maxNumericString->data); + Node *maxFloatArg = (Node *) makeFloat(maxNumericString->data); SetDefElemArg(alterSequenceStatement, "start", startFloatArg); SetDefElemArg(alterSequenceStatement, "minvalue", startFloatArg); diff --git a/src/backend/distributed/worker/worker_drop_protocol.c b/src/backend/distributed/worker/worker_drop_protocol.c index 1a5f9b944..bf5aa27c5 100644 --- a/src/backend/distributed/worker/worker_drop_protocol.c +++ b/src/backend/distributed/worker/worker_drop_protocol.c @@ -51,8 +51,6 @@ worker_drop_distributed_table(PG_FUNCTION_ARGS) Oid relationId = ResolveRelationId(relationName, true); ObjectAddress distributedTableObject = { InvalidOid, InvalidOid, 0 }; - Relation distributedRelation = NULL; - List *shardList = NULL; ListCell *shardCell = NULL; char relationKind = '\0'; @@ -66,10 +64,10 @@ worker_drop_distributed_table(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } - shardList = LoadShardList(relationId); + List *shardList = LoadShardList(relationId); /* first check the relation type */ - distributedRelation = relation_open(relationId, AccessShareLock); + Relation distributedRelation = relation_open(relationId, AccessShareLock); relationKind = distributedRelation->rd_rel->relkind; EnsureRelationKindSupported(relationId); @@ -112,12 +110,11 @@ worker_drop_distributed_table(PG_FUNCTION_ARGS) /* iterate over shardList to delete the corresponding rows */ foreach(shardCell, shardList) { - List *shardPlacementList = NIL; ListCell *shardPlacementCell = NULL; uint64 *shardIdPointer = (uint64 *) lfirst(shardCell); uint64 shardId = (*shardIdPointer); - shardPlacementList = ShardPlacementList(shardId); + List *shardPlacementList = ShardPlacementList(shardId); foreach(shardPlacementCell, shardPlacementList) { ShardPlacement *placement = (ShardPlacement *) lfirst(shardPlacementCell); diff --git a/src/backend/distributed/worker/worker_merge_protocol.c b/src/backend/distributed/worker/worker_merge_protocol.c index 27347bd2d..44291fbaa 100644 --- a/src/backend/distributed/worker/worker_merge_protocol.c +++ b/src/backend/distributed/worker/worker_merge_protocol.c @@ -77,9 +77,6 @@ worker_merge_files_into_table(PG_FUNCTION_ARGS) StringInfo jobSchemaName = JobSchemaName(jobId); StringInfo taskTableName = TaskTableName(taskId); StringInfo taskDirectoryName = TaskDirectoryName(jobId, taskId); - bool schemaExists = false; - List *columnNameList = NIL; - List *columnTypeList = NIL; Oid savedUserId = InvalidOid; int savedSecurityContext = 0; Oid userId = GetUserId(); @@ -100,7 +97,7 @@ worker_merge_files_into_table(PG_FUNCTION_ARGS) * If the schema for the job isn't already created by the task tracker * protocol, we fall to using the default 'public' schema. */ - schemaExists = JobSchemaExists(jobSchemaName); + bool schemaExists = JobSchemaExists(jobSchemaName); if (!schemaExists) { /* @@ -125,8 +122,8 @@ worker_merge_files_into_table(PG_FUNCTION_ARGS) } /* create the task table and copy files into the table */ - columnNameList = ArrayObjectToCStringList(columnNameObject); - columnTypeList = ArrayObjectToCStringList(columnTypeObject); + List *columnNameList = ArrayObjectToCStringList(columnNameObject); + List *columnTypeList = ArrayObjectToCStringList(columnTypeObject); CreateTaskTable(jobSchemaName, taskTableName, columnNameList, columnTypeList); @@ -172,12 +169,6 @@ worker_merge_files_and_run_query(PG_FUNCTION_ARGS) StringInfo intermediateTableName = TaskTableName(taskId); StringInfo mergeTableName = makeStringInfo(); StringInfo setSearchPathString = makeStringInfo(); - bool schemaExists = false; - int connected = 0; - int setSearchPathResult = 0; - int createMergeTableResult = 0; - int createIntermediateTableResult = 0; - int finished = 0; Oid savedUserId = InvalidOid; int savedSecurityContext = 0; Oid userId = GetUserId(); @@ -188,7 +179,7 @@ worker_merge_files_and_run_query(PG_FUNCTION_ARGS) * If the schema for the job isn't already created by the task tracker * protocol, we fall to using the default 'public' schema. */ - schemaExists = JobSchemaExists(jobSchemaName); + bool schemaExists = JobSchemaExists(jobSchemaName); if (!schemaExists) { resetStringInfo(jobSchemaName); @@ -206,20 +197,20 @@ worker_merge_files_and_run_query(PG_FUNCTION_ARGS) /* Add "public" to search path to access UDFs in public schema */ appendStringInfo(setSearchPathString, ",public"); - connected = SPI_connect(); + int connected = SPI_connect(); if (connected != SPI_OK_CONNECT) { ereport(ERROR, (errmsg("could not connect to SPI manager"))); } - setSearchPathResult = SPI_exec(setSearchPathString->data, 0); + int setSearchPathResult = SPI_exec(setSearchPathString->data, 0); if (setSearchPathResult < 0) { ereport(ERROR, (errmsg("execution was not successful \"%s\"", setSearchPathString->data))); } - createMergeTableResult = SPI_exec(createMergeTableQuery, 0); + int createMergeTableResult = SPI_exec(createMergeTableQuery, 0); if (createMergeTableResult < 0) { ereport(ERROR, (errmsg("execution was not successful \"%s\"", @@ -237,14 +228,14 @@ worker_merge_files_and_run_query(PG_FUNCTION_ARGS) SetUserIdAndSecContext(savedUserId, savedSecurityContext); - createIntermediateTableResult = SPI_exec(createIntermediateTableQuery, 0); + int createIntermediateTableResult = SPI_exec(createIntermediateTableQuery, 0); if (createIntermediateTableResult < 0) { ereport(ERROR, (errmsg("execution was not successful \"%s\"", createIntermediateTableQuery))); } - finished = SPI_finish(); + int finished = SPI_finish(); if (finished != SPI_OK_FINISH) { ereport(ERROR, (errmsg("could not disconnect from SPI manager"))); @@ -341,8 +332,7 @@ ArrayObjectToCStringList(ArrayType *arrayObject) Datum *datumArray = DeconstructArrayObject(arrayObject); int32 arraySize = ArrayObjectCount(arrayObject); - int32 arrayIndex = 0; - for (arrayIndex = 0; arrayIndex < arraySize; arrayIndex++) + for (int32 arrayIndex = 0; arrayIndex < arraySize; arrayIndex++) { Datum datum = datumArray[arrayIndex]; char *cstring = TextDatumGetCString(datum); @@ -371,10 +361,9 @@ void RemoveJobSchema(StringInfo schemaName) { Datum schemaNameDatum = CStringGetDatum(schemaName->data); - Oid schemaId = InvalidOid; - schemaId = GetSysCacheOid1Compat(NAMESPACENAME, Anum_pg_namespace_oid, - schemaNameDatum); + Oid schemaId = GetSysCacheOid1Compat(NAMESPACENAME, Anum_pg_namespace_oid, + schemaNameDatum); if (OidIsValid(schemaId)) { ObjectAddress schemaObject = { 0, 0, 0 }; @@ -420,11 +409,7 @@ static void CreateTaskTable(StringInfo schemaName, StringInfo relationName, List *columnNameList, List *columnTypeList) { - CreateStmt *createStatement = NULL; - RangeVar *relation = NULL; - List *columnDefinitionList = NIL; Oid relationId PG_USED_FOR_ASSERTS_ONLY = InvalidOid; - ObjectAddress relationObject; Assert(schemaName != NULL); Assert(relationName != NULL); @@ -434,13 +419,14 @@ CreateTaskTable(StringInfo schemaName, StringInfo relationName, * statements occur in the same transaction. Still, we want to make the * relation unlogged once we upgrade to PostgreSQL 9.1. */ - relation = makeRangeVar(schemaName->data, relationName->data, -1); - columnDefinitionList = ColumnDefinitionList(columnNameList, columnTypeList); + RangeVar *relation = makeRangeVar(schemaName->data, relationName->data, -1); + List *columnDefinitionList = ColumnDefinitionList(columnNameList, columnTypeList); - createStatement = CreateStatement(relation, columnDefinitionList); + CreateStmt *createStatement = CreateStatement(relation, columnDefinitionList); - relationObject = DefineRelation(createStatement, RELKIND_RELATION, InvalidOid, NULL, - NULL); + ObjectAddress relationObject = DefineRelation(createStatement, RELKIND_RELATION, + InvalidOid, NULL, + NULL); relationId = relationObject.objectId; Assert(relationId != InvalidOid); @@ -474,14 +460,12 @@ ColumnDefinitionList(List *columnNameList, List *columnTypeList) Oid columnTypeId = InvalidOid; int32 columnTypeMod = -1; bool missingOK = false; - TypeName *typeName = NULL; - ColumnDef *columnDefinition = NULL; parseTypeString(columnType, &columnTypeId, &columnTypeMod, missingOK); - typeName = makeTypeNameFromOid(columnTypeId, columnTypeMod); + TypeName *typeName = makeTypeNameFromOid(columnTypeId, columnTypeMod); /* we then create the column definition */ - columnDefinition = makeNode(ColumnDef); + ColumnDef *columnDefinition = makeNode(ColumnDef); columnDefinition->colname = (char *) columnName; columnDefinition->typeName = typeName; columnDefinition->is_local = true; @@ -533,7 +517,6 @@ CopyTaskFilesFromDirectory(StringInfo schemaName, StringInfo relationName, StringInfo sourceDirectoryName, Oid userId) { const char *directoryName = sourceDirectoryName->data; - struct dirent *directoryEntry = NULL; uint64 copiedRowTotal = 0; StringInfo expectedFileSuffix = makeStringInfo(); @@ -546,14 +529,11 @@ CopyTaskFilesFromDirectory(StringInfo schemaName, StringInfo relationName, appendStringInfo(expectedFileSuffix, ".%u", userId); - directoryEntry = ReadDir(directory, directoryName); + struct dirent *directoryEntry = ReadDir(directory, directoryName); for (; directoryEntry != NULL; directoryEntry = ReadDir(directory, directoryName)) { const char *baseFilename = directoryEntry->d_name; const char *queryString = NULL; - StringInfo fullFilename = NULL; - RangeVar *relation = NULL; - CopyStmt *copyStatement = NULL; uint64 copiedRowCount = 0; /* if system file or lingering task file, skip it */ @@ -576,12 +556,12 @@ CopyTaskFilesFromDirectory(StringInfo schemaName, StringInfo relationName, continue; } - fullFilename = makeStringInfo(); + StringInfo fullFilename = makeStringInfo(); appendStringInfo(fullFilename, "%s/%s", directoryName, baseFilename); /* build relation object and copy statement */ - relation = makeRangeVar(schemaName->data, relationName->data, -1); - copyStatement = CopyStatement(relation, fullFilename->data); + RangeVar *relation = makeRangeVar(schemaName->data, relationName->data, -1); + CopyStmt *copyStatement = CopyStatement(relation, fullFilename->data); if (BinaryWorkerCopyFormat) { DefElem *copyOption = makeDefElem("format", (Node *) makeString("binary"), diff --git a/src/backend/distributed/worker/worker_partition_protocol.c b/src/backend/distributed/worker/worker_partition_protocol.c index ad3d94c76..da452ed42 100644 --- a/src/backend/distributed/worker/worker_partition_protocol.c +++ b/src/backend/distributed/worker/worker_partition_protocol.c @@ -112,14 +112,6 @@ worker_range_partition_table(PG_FUNCTION_ARGS) const char *filterQuery = text_to_cstring(filterQueryText); const char *partitionColumn = text_to_cstring(partitionColumnText); - RangePartitionContext *partitionContext = NULL; - FmgrInfo *comparisonFunction = NULL; - Datum *splitPointArray = NULL; - int32 splitPointCount = 0; - uint32 fileCount = 0; - StringInfo taskDirectory = NULL; - StringInfo taskAttemptDirectory = NULL; - FileOutputStream *partitionFileArray = NULL; /* first check that array element's and partition column's types match */ Oid splitPointType = ARR_ELEMTYPE(splitPointObject); @@ -133,25 +125,26 @@ worker_range_partition_table(PG_FUNCTION_ARGS) } /* use column's type information to get the comparison function */ - comparisonFunction = GetFunctionInfo(partitionColumnType, - BTREE_AM_OID, BTORDER_PROC); + FmgrInfo *comparisonFunction = GetFunctionInfo(partitionColumnType, + BTREE_AM_OID, BTORDER_PROC); /* deserialize split points into their array representation */ - splitPointArray = DeconstructArrayObject(splitPointObject); - splitPointCount = ArrayObjectCount(splitPointObject); - fileCount = splitPointCount + 1; /* range partitioning needs an extra bucket */ + Datum *splitPointArray = DeconstructArrayObject(splitPointObject); + int32 splitPointCount = ArrayObjectCount(splitPointObject); + uint32 fileCount = splitPointCount + 1; /* range partitioning needs an extra bucket */ /* create range partition context object */ - partitionContext = palloc0(sizeof(RangePartitionContext)); + RangePartitionContext *partitionContext = palloc0(sizeof(RangePartitionContext)); partitionContext->comparisonFunction = comparisonFunction; partitionContext->splitPointArray = splitPointArray; partitionContext->splitPointCount = splitPointCount; /* init directories and files to write the partitioned data to */ - taskDirectory = InitTaskDirectory(jobId, taskId); - taskAttemptDirectory = InitTaskAttemptDirectory(jobId, taskId); + StringInfo taskDirectory = InitTaskDirectory(jobId, taskId); + StringInfo taskAttemptDirectory = InitTaskAttemptDirectory(jobId, taskId); - partitionFileArray = OpenPartitionFiles(taskAttemptDirectory, fileCount); + FileOutputStream *partitionFileArray = OpenPartitionFiles(taskAttemptDirectory, + fileCount); FileBufferSizeInBytes = FileBufferSize(PartitionBufferSize, fileCount); /* call the partitioning function that does the actual work */ @@ -191,20 +184,14 @@ worker_hash_partition_table(PG_FUNCTION_ARGS) const char *filterQuery = text_to_cstring(filterQueryText); const char *partitionColumn = text_to_cstring(partitionColumnText); - HashPartitionContext *partitionContext = NULL; - FmgrInfo *hashFunction = NULL; Datum *hashRangeArray = DeconstructArrayObject(hashRangeObject); int32 partitionCount = ArrayObjectCount(hashRangeObject); - StringInfo taskDirectory = NULL; - StringInfo taskAttemptDirectory = NULL; - FileOutputStream *partitionFileArray = NULL; - uint32 fileCount = 0; uint32 (*hashPartitionIdFunction)(Datum, const void *); CheckCitusVersion(ERROR); - partitionContext = palloc0(sizeof(HashPartitionContext)); + HashPartitionContext *partitionContext = palloc0(sizeof(HashPartitionContext)); partitionContext->syntheticShardIntervalArray = SyntheticShardIntervalArrayForShardMinValues(hashRangeArray, partitionCount); partitionContext->hasUniformHashDistribution = @@ -214,10 +201,11 @@ worker_hash_partition_table(PG_FUNCTION_ARGS) hashPartitionIdFunction = &HashPartitionId; /* use column's type information to get the hashing function */ - hashFunction = GetFunctionInfo(partitionColumnType, HASH_AM_OID, HASHSTANDARD_PROC); + FmgrInfo *hashFunction = GetFunctionInfo(partitionColumnType, HASH_AM_OID, + HASHSTANDARD_PROC); /* we create as many files as the number of split points */ - fileCount = partitionCount; + uint32 fileCount = partitionCount; partitionContext->hashFunction = hashFunction; partitionContext->partitionCount = partitionCount; @@ -231,10 +219,11 @@ worker_hash_partition_table(PG_FUNCTION_ARGS) } /* init directories and files to write the partitioned data to */ - taskDirectory = InitTaskDirectory(jobId, taskId); - taskAttemptDirectory = InitTaskAttemptDirectory(jobId, taskId); + StringInfo taskDirectory = InitTaskDirectory(jobId, taskId); + StringInfo taskAttemptDirectory = InitTaskAttemptDirectory(jobId, taskId); - partitionFileArray = OpenPartitionFiles(taskAttemptDirectory, fileCount); + FileOutputStream *partitionFileArray = OpenPartitionFiles(taskAttemptDirectory, + fileCount); FileBufferSizeInBytes = FileBufferSize(PartitionBufferSize, fileCount); /* call the partitioning function that does the actual work */ @@ -262,12 +251,11 @@ worker_hash_partition_table(PG_FUNCTION_ARGS) static ShardInterval ** SyntheticShardIntervalArrayForShardMinValues(Datum *shardMinValues, int shardCount) { - int shardIndex = 0; Datum nextShardMaxValue = Int32GetDatum(INT32_MAX); ShardInterval **syntheticShardIntervalArray = palloc(sizeof(ShardInterval *) * shardCount); - for (shardIndex = shardCount - 1; shardIndex >= 0; --shardIndex) + for (int shardIndex = shardCount - 1; shardIndex >= 0; --shardIndex) { Datum currentShardMinValue = shardMinValues[shardIndex]; ShardInterval *shardInterval = CitusMakeNode(ShardInterval); @@ -327,7 +315,6 @@ DeconstructArrayObject(ArrayType *arrayObject) bool *datumArrayNulls = NULL; int datumArrayLength = 0; - Oid typeId = InvalidOid; bool typeByVal = false; char typeAlign = 0; int16 typeLength = 0; @@ -339,7 +326,7 @@ DeconstructArrayObject(ArrayType *arrayObject) errmsg("worker array object cannot contain null values"))); } - typeId = ARR_ELEMTYPE(arrayObject); + Oid typeId = ARR_ELEMTYPE(arrayObject); get_typlenbyvalalign(typeId, &typeLength, &typeByVal, &typeAlign); deconstruct_array(arrayObject, typeId, typeLength, typeByVal, typeAlign, @@ -358,7 +345,6 @@ ArrayObjectCount(ArrayType *arrayObject) { int32 dimensionCount = ARR_NDIM(arrayObject); int32 *dimensionLengthArray = ARR_DIMS(arrayObject); - int32 arrayLength = 0; if (dimensionCount == 0) { @@ -368,7 +354,7 @@ ArrayObjectCount(ArrayType *arrayObject) /* we currently allow split point arrays to have only one subarray */ Assert(dimensionCount == 1); - arrayLength = ArrayGetNItems(dimensionCount, dimensionLengthArray); + int32 arrayLength = ArrayGetNItems(dimensionCount, dimensionLengthArray); if (arrayLength <= 0) { ereport(ERROR, (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR), @@ -387,9 +373,6 @@ ArrayObjectCount(ArrayType *arrayObject) StringInfo InitTaskDirectory(uint64 jobId, uint32 taskId) { - bool jobDirectoryExists = false; - bool taskDirectoryExists = false; - /* * If the task tracker assigned this task (regular case), the tracker should * have already created the job directory. @@ -399,13 +382,13 @@ InitTaskDirectory(uint64 jobId, uint32 taskId) LockJobResource(jobId, AccessExclusiveLock); - jobDirectoryExists = DirectoryExists(jobDirectoryName); + bool jobDirectoryExists = DirectoryExists(jobDirectoryName); if (!jobDirectoryExists) { CitusCreateDirectory(jobDirectoryName); } - taskDirectoryExists = DirectoryExists(taskDirectoryName); + bool taskDirectoryExists = DirectoryExists(taskDirectoryName); if (!taskDirectoryExists) { CitusCreateDirectory(taskDirectoryName); @@ -467,15 +450,13 @@ FileBufferSize(int partitionBufferSizeInKB, uint32 fileCount) static FileOutputStream * OpenPartitionFiles(StringInfo directoryName, uint32 fileCount) { - FileOutputStream *partitionFileArray = NULL; File fileDescriptor = 0; - uint32 fileIndex = 0; const int fileFlags = (O_APPEND | O_CREAT | O_RDWR | O_TRUNC | PG_BINARY); const int fileMode = (S_IRUSR | S_IWUSR); - partitionFileArray = palloc0(fileCount * sizeof(FileOutputStream)); + FileOutputStream *partitionFileArray = palloc0(fileCount * sizeof(FileOutputStream)); - for (fileIndex = 0; fileIndex < fileCount; fileIndex++) + for (uint32 fileIndex = 0; fileIndex < fileCount; fileIndex++) { StringInfo filePath = UserPartitionFilename(directoryName, fileIndex); @@ -504,8 +485,7 @@ OpenPartitionFiles(StringInfo directoryName, uint32 fileCount) static void ClosePartitionFiles(FileOutputStream *partitionFileArray, uint32 fileCount) { - uint32 fileIndex = 0; - for (fileIndex = 0; fileIndex < fileCount; fileIndex++) + for (uint32 fileIndex = 0; fileIndex < fileCount; fileIndex++) { FileOutputStream *partitionFile = &partitionFileArray[fileIndex]; @@ -619,12 +599,11 @@ bool JobDirectoryElement(const char *filename) { bool directoryElement = false; - char *directoryPathFound = NULL; StringInfo directoryPath = makeStringInfo(); appendStringInfo(directoryPath, "base/%s/%s", PG_JOB_CACHE_DIR, JOB_DIRECTORY_PREFIX); - directoryPathFound = strstr(filename, directoryPath->data); + char *directoryPathFound = strstr(filename, directoryPath->data); if (directoryPathFound != NULL) { directoryElement = true; @@ -644,12 +623,11 @@ bool CacheDirectoryElement(const char *filename) { bool directoryElement = false; - char *directoryPathFound = NULL; StringInfo directoryPath = makeStringInfo(); appendStringInfo(directoryPath, "base/%s/", PG_JOB_CACHE_DIR); - directoryPathFound = strstr(filename, directoryPath->data); + char *directoryPathFound = strstr(filename, directoryPath->data); /* * If directoryPath occurs at the beginning of the filename, then the @@ -764,7 +742,6 @@ CitusRemoveDirectory(StringInfo filename) if (S_ISDIR(fileStat.st_mode) && !FileIsLink(filename->data, fileStat)) { const char *directoryName = filename->data; - struct dirent *directoryEntry = NULL; DIR *directory = AllocateDir(directoryName); if (directory == NULL) @@ -774,11 +751,10 @@ CitusRemoveDirectory(StringInfo filename) directoryName))); } - directoryEntry = ReadDir(directory, directoryName); + struct dirent *directoryEntry = ReadDir(directory, directoryName); for (; directoryEntry != NULL; directoryEntry = ReadDir(directory, directoryName)) { const char *baseFilename = directoryEntry->d_name; - StringInfo fullFilename = NULL; /* if system file, skip it */ if (strncmp(baseFilename, ".", MAXPGPATH) == 0 || @@ -787,7 +763,7 @@ CitusRemoveDirectory(StringInfo filename) continue; } - fullFilename = makeStringInfo(); + StringInfo fullFilename = makeStringInfo(); appendStringInfo(fullFilename, "%s/%s", directoryName, baseFilename); CitusRemoveDirectory(fullFilename); @@ -857,11 +833,10 @@ static void FileOutputStreamFlush(FileOutputStream *file) { StringInfo fileBuffer = file->fileBuffer; - int written = 0; errno = 0; - written = FileWriteCompat(&file->fileCompat, fileBuffer->data, fileBuffer->len, - PG_WAIT_IO); + int written = FileWriteCompat(&file->fileCompat, fileBuffer->data, fileBuffer->len, + PG_WAIT_IO); if (written != fileBuffer->len) { ereport(ERROR, (errcode_for_file_access(), @@ -886,16 +861,9 @@ FilterAndPartitionTable(const char *filterQuery, FileOutputStream *partitionFileArray, uint32 fileCount) { - CopyOutState rowOutputState = NULL; FmgrInfo *columnOutputFunctions = NULL; int partitionColumnIndex = 0; Oid partitionColumnTypeId = InvalidOid; - Portal queryPortal = NULL; - int connected = 0; - int finished = 0; - uint32 columnCount = 0; - Datum *valueArray = NULL; - bool *isNullArray = NULL; const char *noPortalName = NULL; const bool readOnly = true; @@ -903,22 +871,22 @@ FilterAndPartitionTable(const char *filterQuery, const int noCursorOptions = 0; const int prefetchCount = ROW_PREFETCH_COUNT; - connected = SPI_connect(); + int connected = SPI_connect(); if (connected != SPI_OK_CONNECT) { ereport(ERROR, (errmsg("could not connect to SPI manager"))); } - queryPortal = SPI_cursor_open_with_args(noPortalName, filterQuery, - 0, NULL, NULL, NULL, /* no arguments */ - readOnly, noCursorOptions); + Portal queryPortal = SPI_cursor_open_with_args(noPortalName, filterQuery, + 0, NULL, NULL, NULL, /* no arguments */ + readOnly, noCursorOptions); if (queryPortal == NULL) { ereport(ERROR, (errmsg("could not open implicit cursor for query \"%s\"", ApplyLogRedaction(filterQuery)))); } - rowOutputState = InitRowOutputState(); + CopyOutState rowOutputState = InitRowOutputState(); SPI_cursor_fetch(queryPortal, fetchForward, prefetchCount); if (SPI_processed > 0) @@ -947,26 +915,21 @@ FilterAndPartitionTable(const char *filterQuery, OutputBinaryHeaders(partitionFileArray, fileCount); } - columnCount = (uint32) SPI_tuptable->tupdesc->natts; - valueArray = (Datum *) palloc0(columnCount * sizeof(Datum)); - isNullArray = (bool *) palloc0(columnCount * sizeof(bool)); + uint32 columnCount = (uint32) SPI_tuptable->tupdesc->natts; + Datum *valueArray = (Datum *) palloc0(columnCount * sizeof(Datum)); + bool *isNullArray = (bool *) palloc0(columnCount * sizeof(bool)); while (SPI_processed > 0) { - int rowIndex = 0; - - for (rowIndex = 0; rowIndex < SPI_processed; rowIndex++) + for (int rowIndex = 0; rowIndex < SPI_processed; rowIndex++) { HeapTuple row = SPI_tuptable->vals[rowIndex]; TupleDesc rowDescriptor = SPI_tuptable->tupdesc; - FileOutputStream *partitionFile = NULL; - StringInfo rowText = NULL; - Datum partitionKey = 0; bool partitionKeyNull = false; uint32 partitionId = 0; - partitionKey = SPI_getbinval(row, rowDescriptor, - partitionColumnIndex, &partitionKeyNull); + Datum partitionKey = SPI_getbinval(row, rowDescriptor, + partitionColumnIndex, &partitionKeyNull); /* * If we have a partition key, we compute its bucket. Else if we have @@ -993,9 +956,9 @@ FilterAndPartitionTable(const char *filterQuery, AppendCopyRowData(valueArray, isNullArray, rowDescriptor, rowOutputState, columnOutputFunctions, NULL); - rowText = rowOutputState->fe_msgbuf; + StringInfo rowText = rowOutputState->fe_msgbuf; - partitionFile = &partitionFileArray[partitionId]; + FileOutputStream *partitionFile = &partitionFileArray[partitionId]; FileOutputStreamWrite(partitionFile, rowText); resetStringInfo(rowText); @@ -1020,7 +983,7 @@ FilterAndPartitionTable(const char *filterQuery, /* delete row output memory context */ ClearRowOutputState(rowOutputState); - finished = SPI_finish(); + int finished = SPI_finish(); if (finished != SPI_OK_FINISH) { ereport(ERROR, (errmsg("could not disconnect from SPI manager"))); @@ -1139,8 +1102,7 @@ ClearRowOutputState(CopyOutState rowOutputState) static void OutputBinaryHeaders(FileOutputStream *partitionFileArray, uint32 fileCount) { - uint32 fileIndex = 0; - for (fileIndex = 0; fileIndex < fileCount; fileIndex++) + for (uint32 fileIndex = 0; fileIndex < fileCount; fileIndex++) { /* Generate header for a binary copy */ FileOutputStream partitionFile = { }; @@ -1165,8 +1127,7 @@ OutputBinaryHeaders(FileOutputStream *partitionFileArray, uint32 fileCount) static void OutputBinaryFooters(FileOutputStream *partitionFileArray, uint32 fileCount) { - uint32 fileIndex = 0; - for (fileIndex = 0; fileIndex < fileCount; fileIndex++) + for (uint32 fileIndex = 0; fileIndex < fileCount; fileIndex++) { /* Generate footer for a binary copy */ FileOutputStream partitionFile = { }; @@ -1224,19 +1185,15 @@ RangePartitionId(Datum partitionValue, const void *context) */ while (currentLength > 0) { - uint32 middleIndex = 0; - Datum middlePoint = 0; - Datum comparisonDatum = 0; - int comparisonResult = 0; - halfLength = currentLength >> 1; - middleIndex = firstIndex; + uint32 middleIndex = firstIndex; middleIndex += halfLength; - middlePoint = pointArray[middleIndex]; + Datum middlePoint = pointArray[middleIndex]; - comparisonDatum = CompareCall2(comparisonFunction, partitionValue, middlePoint); - comparisonResult = DatumGetInt32(comparisonDatum); + Datum comparisonDatum = CompareCall2(comparisonFunction, partitionValue, + middlePoint); + int comparisonResult = DatumGetInt32(comparisonDatum); /* if partition value is less than middle point */ if (comparisonResult < 0) diff --git a/src/backend/distributed/worker/worker_shard_visibility.c b/src/backend/distributed/worker/worker_shard_visibility.c index 5762a89f7..58f818267 100644 --- a/src/backend/distributed/worker/worker_shard_visibility.c +++ b/src/backend/distributed/worker/worker_shard_visibility.c @@ -107,14 +107,8 @@ citus_table_is_visible(PG_FUNCTION_ARGS) bool RelationIsAKnownShard(Oid shardRelationId, bool onlySearchPath) { - int localGroupId = -1; - char *shardRelationName = NULL; - char *generatedRelationName = NULL; bool missingOk = true; - uint64 shardId = INVALID_SHARD_ID; - Oid relationId = InvalidOid; char relKind = '\0'; - Relation relation = NULL; if (!OidIsValid(shardRelationId)) { @@ -122,7 +116,7 @@ RelationIsAKnownShard(Oid shardRelationId, bool onlySearchPath) return false; } - localGroupId = GetLocalGroupId(); + int localGroupId = GetLocalGroupId(); if (localGroupId == 0) { bool coordinatorIsKnown = false; @@ -139,7 +133,7 @@ RelationIsAKnownShard(Oid shardRelationId, bool onlySearchPath) } } - relation = try_relation_open(shardRelationId, AccessShareLock); + Relation relation = try_relation_open(shardRelationId, AccessShareLock); if (relation == NULL) { return false; @@ -164,9 +158,9 @@ RelationIsAKnownShard(Oid shardRelationId, bool onlySearchPath) } /* get the shard's relation name */ - shardRelationName = get_rel_name(shardRelationId); + char *shardRelationName = get_rel_name(shardRelationId); - shardId = ExtractShardIdFromTableName(shardRelationName, missingOk); + uint64 shardId = ExtractShardIdFromTableName(shardRelationName, missingOk); if (shardId == INVALID_SHARD_ID) { /* @@ -177,7 +171,7 @@ RelationIsAKnownShard(Oid shardRelationId, bool onlySearchPath) } /* try to get the relation id */ - relationId = LookupShardRelation(shardId, true); + Oid relationId = LookupShardRelation(shardId, true); if (!OidIsValid(relationId)) { /* there is no such relation */ @@ -195,7 +189,7 @@ RelationIsAKnownShard(Oid shardRelationId, bool onlySearchPath) * to do that because otherwise a local table with a valid shardId * appended to its name could be misleading. */ - generatedRelationName = get_rel_name(relationId); + char *generatedRelationName = get_rel_name(relationId); AppendShardIdToName(&generatedRelationName, shardId); if (strncmp(shardRelationName, generatedRelationName, NAMEDATALEN) == 0) { diff --git a/src/backend/distributed/worker/worker_sql_task_protocol.c b/src/backend/distributed/worker/worker_sql_task_protocol.c index f4fc60877..7d441b8fe 100644 --- a/src/backend/distributed/worker/worker_sql_task_protocol.c +++ b/src/backend/distributed/worker/worker_sql_task_protocol.c @@ -78,15 +78,13 @@ worker_execute_sql_task(PG_FUNCTION_ARGS) char *queryString = text_to_cstring(queryText); bool binaryCopyFormat = PG_GETARG_BOOL(3); - int64 tuplesSent = 0; - Query *query = NULL; /* job directory is created prior to scheduling the task */ StringInfo jobDirectoryName = JobDirectoryName(jobId); StringInfo taskFilename = UserTaskFilename(jobDirectoryName, taskId); - query = ParseQueryString(queryString, NULL, 0); - tuplesSent = WorkerExecuteSqlTask(query, taskFilename->data, binaryCopyFormat); + Query *query = ParseQueryString(queryString, NULL, 0); + int64 tuplesSent = WorkerExecuteSqlTask(query, taskFilename->data, binaryCopyFormat); PG_RETURN_INT64(tuplesSent); } @@ -99,19 +97,16 @@ worker_execute_sql_task(PG_FUNCTION_ARGS) int64 WorkerExecuteSqlTask(Query *query, char *taskFilename, bool binaryCopyFormat) { - EState *estate = NULL; - TaskFileDestReceiver *taskFileDest = NULL; ParamListInfo paramListInfo = NULL; - int64 tuplesSent = 0L; - estate = CreateExecutorState(); - taskFileDest = + EState *estate = CreateExecutorState(); + TaskFileDestReceiver *taskFileDest = (TaskFileDestReceiver *) CreateTaskFileDestReceiver(taskFilename, estate, binaryCopyFormat); ExecuteQueryIntoDestReceiver(query, paramListInfo, (DestReceiver *) taskFileDest); - tuplesSent = taskFileDest->tuplesSent; + int64 tuplesSent = taskFileDest->tuplesSent; taskFileDest->pub.rDestroy((DestReceiver *) taskFileDest); FreeExecutorState(estate); @@ -127,9 +122,8 @@ WorkerExecuteSqlTask(Query *query, char *taskFilename, bool binaryCopyFormat) static DestReceiver * CreateTaskFileDestReceiver(char *filePath, EState *executorState, bool binaryCopyFormat) { - TaskFileDestReceiver *taskFileDest = NULL; - - taskFileDest = (TaskFileDestReceiver *) palloc0(sizeof(TaskFileDestReceiver)); + TaskFileDestReceiver *taskFileDest = (TaskFileDestReceiver *) palloc0( + sizeof(TaskFileDestReceiver)); /* set up the DestReceiver function pointers */ taskFileDest->pub.receiveSlot = TaskFileDestReceiverReceive; @@ -159,7 +153,6 @@ TaskFileDestReceiverStartup(DestReceiver *dest, int operation, { TaskFileDestReceiver *taskFileDest = (TaskFileDestReceiver *) dest; - CopyOutState copyOutState = NULL; const char *delimiterCharacter = "\t"; const char *nullPrintCharacter = "\\N"; @@ -172,7 +165,7 @@ TaskFileDestReceiverStartup(DestReceiver *dest, int operation, taskFileDest->tupleDescriptor = inputTupleDescriptor; /* define how tuples will be serialised */ - copyOutState = (CopyOutState) palloc0(sizeof(CopyOutStateData)); + CopyOutState copyOutState = (CopyOutState) palloc0(sizeof(CopyOutStateData)); copyOutState->delim = (char *) delimiterCharacter; copyOutState->null_print = (char *) nullPrintCharacter; copyOutState->null_print_client = (char *) nullPrintCharacter; @@ -217,8 +210,6 @@ TaskFileDestReceiverReceive(TupleTableSlot *slot, DestReceiver *dest) CopyOutState copyOutState = taskFileDest->copyOutState; FmgrInfo *columnOutputFunctions = taskFileDest->columnOutputFunctions; - Datum *columnValues = NULL; - bool *columnNulls = NULL; StringInfo copyData = copyOutState->fe_msgbuf; EState *executorState = taskFileDest->executorState; @@ -227,8 +218,8 @@ TaskFileDestReceiverReceive(TupleTableSlot *slot, DestReceiver *dest) slot_getallattrs(slot); - columnValues = slot->tts_values; - columnNulls = slot->tts_isnull; + Datum *columnValues = slot->tts_values; + bool *columnNulls = slot->tts_isnull; resetStringInfo(copyData);