diff --git a/src/backend/distributed/commands/index.c b/src/backend/distributed/commands/index.c index 2375946e6..120aa8ede 100644 --- a/src/backend/distributed/commands/index.c +++ b/src/backend/distributed/commands/index.c @@ -140,7 +140,7 @@ PreprocessIndexStmt(Node *node, const char *createIndexCommand) Relation relation = heap_openrv(createIndexStatement->relation, lockmode); Oid relationId = RelationGetRelid(relation); - bool isDistributedRelation = IsCitusTable(relationId); + bool isCitusRelation = IsCitusTable(relationId); if (createIndexStatement->relation->schemaname == NULL) { @@ -161,7 +161,7 @@ PreprocessIndexStmt(Node *node, const char *createIndexCommand) heap_close(relation, NoLock); - if (isDistributedRelation) + if (isCitusRelation) { char *indexName = createIndexStatement->idxname; char *namespaceName = createIndexStatement->relation->schemaname; @@ -212,7 +212,7 @@ PreprocessReindexStmt(Node *node, const char *reindexCommand) { Relation relation = NULL; Oid relationId = InvalidOid; - bool isDistributedRelation = false; + bool isCitusRelation = false; #if PG_VERSION_NUM >= 120000 LOCKMODE lockmode = reindexStatement->concurrent ? ShareUpdateExclusiveLock : AccessExclusiveLock; @@ -249,7 +249,7 @@ PreprocessReindexStmt(Node *node, const char *reindexCommand) relationId = RelationGetRelid(relation); } - isDistributedRelation = IsCitusTable(relationId); + isCitusRelation = IsCitusTable(relationId); if (reindexStatement->relation->schemaname == NULL) { @@ -277,7 +277,7 @@ PreprocessReindexStmt(Node *node, const char *reindexCommand) heap_close(relation, NoLock); } - if (isDistributedRelation) + if (isCitusRelation) { DDLJob *ddlJob = palloc0(sizeof(DDLJob)); ddlJob->targetRelationId = relationId; @@ -359,8 +359,8 @@ PreprocessDropIndexStmt(Node *node, const char *dropIndexCommand) } Oid relationId = IndexGetRelation(indexId, false); - bool isDistributedRelation = IsCitusTable(relationId); - if (isDistributedRelation) + bool isCitusRelation = IsCitusTable(relationId); + if (isCitusRelation) { distributedIndexId = indexId; distributedRelationId = relationId; diff --git a/src/backend/distributed/commands/multi_copy.c b/src/backend/distributed/commands/multi_copy.c index b8d724491..47ba48a0f 100644 --- a/src/backend/distributed/commands/multi_copy.c +++ b/src/backend/distributed/commands/multi_copy.c @@ -2587,7 +2587,7 @@ ProcessCopyStmt(CopyStmt *copyStatement, char *completionTag, const char *queryS isFrom ? RowExclusiveLock : AccessShareLock); - bool isDistributedRelation = IsCitusTable(RelationGetRelid(copiedRelation)); + bool isCitusRelation = IsCitusTable(RelationGetRelid(copiedRelation)); /* ensure future lookups hit the same relation */ char *schemaName = get_namespace_name(RelationGetNamespace(copiedRelation)); @@ -2600,7 +2600,7 @@ ProcessCopyStmt(CopyStmt *copyStatement, char *completionTag, const char *queryS heap_close(copiedRelation, NoLock); - if (isDistributedRelation) + if (isCitusRelation) { if (copyStatement->is_from) { @@ -2710,7 +2710,7 @@ ProcessCopyStmt(CopyStmt *copyStatement, char *completionTag, const char *queryS /* - * CitusCopyTo runs a COPY .. TO STDOUT command on each shard to to a full + * CitusCopyTo runs a COPY .. TO STDOUT command on each shard to do a full * table dump. */ static void diff --git a/src/backend/distributed/commands/rename.c b/src/backend/distributed/commands/rename.c index ff62df62d..01acc9d82 100644 --- a/src/backend/distributed/commands/rename.c +++ b/src/backend/distributed/commands/rename.c @@ -96,16 +96,15 @@ PreprocessRenameStmt(Node *node, const char *renameCommand) return NIL; } - bool isDistributedRelation = IsCitusTable(tableRelationId); - if (!isDistributedRelation) + bool isCitusRelation = IsCitusTable(tableRelationId); + if (!isCitusRelation) { return NIL; } /* - * We might ERROR out on some commands, but only for Citus tables where - * isDistributedRelation is true. That's why this test comes this late in - * the function. + * We might ERROR out on some commands, but only for Citus tables. + * That's why this test comes this late in the function. */ ErrorIfUnsupportedRenameStmt(renameStmt); diff --git a/src/backend/distributed/commands/table.c b/src/backend/distributed/commands/table.c index 91799c3e6..0c9e3d552 100644 --- a/src/backend/distributed/commands/table.c +++ b/src/backend/distributed/commands/table.c @@ -459,7 +459,7 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand) SetLocalMultiShardModifyModeToSequential(); } - /* fill them here as it is possible to use them in some condtional blocks below */ + /* fill them here as it is possible to use them in some conditional blocks below */ DDLJob *ddlJob = palloc0(sizeof(DDLJob)); ddlJob->targetRelationId = leftRelationId; ddlJob->concurrentIndexCmd = false; @@ -567,8 +567,8 @@ WorkerProcessAlterTableStmt(AlterTableStmt *alterTableStatement, return (Node *) alterTableStatement; } - bool isDistributedRelation = IsCitusTable(leftRelationId); - if (!isDistributedRelation) + bool isCitusRelation = IsCitusTable(leftRelationId); + if (!isCitusRelation) { return (Node *) alterTableStatement; } @@ -654,8 +654,8 @@ ErrorIfAlterDropsPartitionColumn(AlterTableStmt *alterTableStatement) return; } - bool isDistributedRelation = IsCitusTable(leftRelationId); - if (!isDistributedRelation) + bool isCitusRelation = IsCitusTable(leftRelationId); + if (!isCitusRelation) { return; } diff --git a/src/backend/distributed/executor/citus_custom_scan.c b/src/backend/distributed/executor/citus_custom_scan.c index 6590f7f29..155795f8b 100644 --- a/src/backend/distributed/executor/citus_custom_scan.c +++ b/src/backend/distributed/executor/citus_custom_scan.c @@ -472,7 +472,7 @@ CacheLocalPlanForShardQuery(Task *task, DistributedPlan *originalDistributedPlan /* * We prefer to use jobQuery (over task->query) because we don't want any - * functions/params have been evaluated in the cached plan. + * functions/params to have been evaluated in the cached plan. */ Query *shardQuery = copyObject(originalDistributedPlan->workerJob->jobQuery); diff --git a/src/backend/distributed/master/master_repair_shards.c b/src/backend/distributed/master/master_repair_shards.c index 62704f6d2..329a7b51f 100644 --- a/src/backend/distributed/master/master_repair_shards.c +++ b/src/backend/distributed/master/master_repair_shards.c @@ -237,7 +237,7 @@ RepairShardPlacement(int64 shardId, const char *sourceNodeName, int32 sourceNode /* * Let's not allow repairing partitions to prevent any edge cases. * We're already not allowing any kind of modifications on the partitions - * so their placements are not likely to to be marked as INVALID. The only + * so their placements are not likely to be marked as INVALID. The only * possible case to mark placement of a partition as invalid is * "ALTER TABLE parent_table DETACH PARTITION partition_table". But, * given that the table would become a regular distributed table if the diff --git a/src/backend/distributed/planner/multi_physical_planner.c b/src/backend/distributed/planner/multi_physical_planner.c index af4650e19..2feaebe17 100644 --- a/src/backend/distributed/planner/multi_physical_planner.c +++ b/src/backend/distributed/planner/multi_physical_planner.c @@ -1559,7 +1559,7 @@ BuildSubqueryJobQuery(MultiNode *multiNode) hasAggregates = true; } - /* distinct is not send to worker query if there are top level aggregates */ + /* distinct is not sent to worker query if there are top level aggregates */ if (hasAggregates) { hasDistinctOn = false; diff --git a/src/backend/distributed/planner/query_pushdown_planning.c b/src/backend/distributed/planner/query_pushdown_planning.c index 4eb7092cb..4653c6e37 100644 --- a/src/backend/distributed/planner/query_pushdown_planning.c +++ b/src/backend/distributed/planner/query_pushdown_planning.c @@ -336,7 +336,7 @@ IsFunctionRTE(Node *node) * IsNodeSubquery returns true if the given node is a Query or SubPlan or a * Param node with paramkind PARAM_EXEC. * - * The check for SubPlan is needed whev this is used on a already rewritten + * The check for SubPlan is needed when this is used on a already rewritten * query. Such a query has SubPlan nodes instead of SubLink nodes (which * contain a Query node). * The check for PARAM_EXEC is needed because some very simple subqueries like @@ -544,7 +544,7 @@ SubqueryMultiNodeTree(Query *originalQuery, Query *queryTree, RaiseDeferredErrorInternal(repartitionQueryError, ERROR); } - /* all checks has passed, safe to create the multi plan */ + /* all checks have passed, safe to create the multi plan */ multiQueryNode = MultiNodeTree(queryTree); } @@ -1502,7 +1502,7 @@ HasRecurringTuples(Node *node, RecurringTuplesType *recurType) * from other parts of code although it causes some code duplication. * * Current subquery pushdown support in MultiTree logic requires a single range - * table entry in the top most from clause. Therefore we inject an synthetic + * table entry in the top most from clause. Therefore we inject a synthetic * query derived from the top level query and make it the only range table * entry for the top level query. This way we can push down any subquery joins * down to workers without invoking join order planner. diff --git a/src/backend/distributed/planner/recursive_planning.c b/src/backend/distributed/planner/recursive_planning.c index dfd332481..dffa4357e 100644 --- a/src/backend/distributed/planner/recursive_planning.c +++ b/src/backend/distributed/planner/recursive_planning.c @@ -442,7 +442,7 @@ RecursivelyPlanNonColocatedSubqueries(Query *subquery, RecursivePlanningContext * RecursivelyPlanNonColocatedJoinWalker gets a join node and walks over it to find * subqueries that live under the node. * - * When a subquery found, its checked whether the subquery is colocated with the + * When a subquery found, it's checked whether the subquery is colocated with the * anchor subquery specified in the nonColocatedJoinContext. If not, * the subquery is recursively planned. */ @@ -518,7 +518,7 @@ RecursivelyPlanNonColocatedJoinWalker(Node *joinNode, * RecursivelyPlanNonColocatedJoinWalker gets a query and walks over its sublinks * to find subqueries that live in WHERE clause. * - * When a subquery found, its checked whether the subquery is colocated with the + * When a subquery found, it's checked whether the subquery is colocated with the * anchor subquery specified in the nonColocatedJoinContext. If not, * the subquery is recursively planned. */ diff --git a/src/include/distributed/metadata_cache.h b/src/include/distributed/metadata_cache.h index a0b2bdd6e..6bb13e681 100644 --- a/src/include/distributed/metadata_cache.h +++ b/src/include/distributed/metadata_cache.h @@ -82,7 +82,7 @@ typedef struct /* * The following two lists consists of relationIds that this distributed * relation has a foreign key to (e.g., referencedRelationsViaForeignKey) or - * other relations has a foreign key to to this relation (e.g., + * other relations has a foreign key to this relation (e.g., * referencingRelationsViaForeignKey). * * Note that we're keeping all transitive foreign key references as well