Merge pull request #5285 from citusdata/typos_in_comment_functions

Fixes function names in comments
pull/5338/head^2
Halil Ozan Akgül 2021-10-06 10:52:45 +03:00 committed by GitHub
commit 52879fdc96
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
31 changed files with 60 additions and 60 deletions

View File

@ -265,7 +265,7 @@ create_distributed_table(PG_FUNCTION_ARGS)
/* /*
* CreateReferenceTable creates a distributed table with the given relationId. The * create_reference_table creates a distributed table with the given relationId. The
* created table has one shard and replication factor is set to the active worker * created table has one shard and replication factor is set to the active worker
* count. In fact, the above is the definition of a reference table in Citus. * count. In fact, the above is the definition of a reference table in Citus.
*/ */

View File

@ -540,10 +540,10 @@ PostprocessAlterExtensionCitusUpdateStmt(Node *node)
/* /*
* MarkAllExistingObjectsDistributed marks all objects that could be distributed by * MarkExistingObjectDependenciesDistributedIfSupported marks all objects that could
* resolving dependencies of "existing distributed tables" and "already distributed * be distributed by resolving dependencies of "existing distributed tables" and
* objects" to introduce the objects created in older versions of Citus to distributed * "already distributed objects" to introduce the objects created in older versions
* object infrastructure as well. * of Citus to distributed object infrastructure as well.
* *
* Note that this function is not responsible for ensuring if dependencies exist on * Note that this function is not responsible for ensuring if dependencies exist on
* nodes and satisfying these dependendencies if not exists, which is already done by * nodes and satisfying these dependendencies if not exists, which is already done by

View File

@ -722,7 +722,7 @@ get_relation_constraint_oid_compat(HeapTuple heapTuple)
/* /*
* HasForeignKeyToLocalTable returns true if relation has foreign key * HasForeignKeyWithLocalTable returns true if relation has foreign key
* relationship with a local table. * relationship with a local table.
*/ */
bool bool
@ -857,8 +857,8 @@ TableReferencing(Oid relationId)
/* /*
* ConstraintWithNameIsOfType is a wrapper around ConstraintWithNameIsOfType that returns true * ConstraintIsAUniquenessConstraint is a wrapper around ConstraintWithNameIsOfType
* if given constraint name identifies a uniqueness constraint, i.e: * that returns true if given constraint name identifies a uniqueness constraint, i.e:
* - primary key constraint, or * - primary key constraint, or
* - unique constraint * - unique constraint
*/ */

View File

@ -331,7 +331,7 @@ ExecuteFunctionOnEachTableIndex(Oid relationId, PGIndexProcessor pgIndexProcesso
/* /*
* SwitchToSequentialOrLocalExecutionIfIndexNameTooLong generates the longest index name * SwitchToSequentialAndLocalExecutionIfIndexNameTooLong generates the longest index name
* on the shards of the partitions, and if exceeds the limit switches to sequential and * on the shards of the partitions, and if exceeds the limit switches to sequential and
* local execution to prevent self-deadlocks. * local execution to prevent self-deadlocks.
*/ */

View File

@ -137,7 +137,7 @@ PreprocessRenameStmt(Node *node, const char *renameCommand,
/* /*
* ErrorIfDistributedRenameStmt errors out if the corresponding rename statement * ErrorIfUnsupportedRenameStmt errors out if the corresponding rename statement
* operates on any part of a distributed table other than a column. * operates on any part of a distributed table other than a column.
* *
* Note: This function handles RenameStmt applied to relations handed by Citus. * Note: This function handles RenameStmt applied to relations handed by Citus.

View File

@ -1576,7 +1576,7 @@ IsAlterTableRenameStmt(RenameStmt *renameStmt)
/* /*
* ErrorIfDropPartitionColumn checks if any subcommands of the given alter table * ErrorIfAlterDropsPartitionColumn checks if any subcommands of the given alter table
* command is a DROP COLUMN command which drops the partition column of a distributed * command is a DROP COLUMN command which drops the partition column of a distributed
* table. If there is such a subcommand, this function errors out. * table. If there is such a subcommand, this function errors out.
*/ */
@ -2967,7 +2967,7 @@ AlterInvolvesPartitionColumn(AlterTableStmt *alterTableStatement,
/* /*
* ErrorIfUnsopprtedAlterAddConstraintStmt runs the constraint checks on distributed * ErrorIfUnsupportedAlterAddConstraintStmt runs the constraint checks on distributed
* table using the same logic with create_distributed_table. * table using the same logic with create_distributed_table.
*/ */
static void static void

View File

@ -98,7 +98,7 @@ static bool ShouldUndistributeCitusLocalTables(void);
/* /*
* ProcessUtilityForParseTree is a convenience method to create a PlannedStmt out of * ProcessUtilityParseTree is a convenience method to create a PlannedStmt out of
* pieces of a utility statement before invoking ProcessUtility. * pieces of a utility statement before invoking ProcessUtility.
*/ */
void void

View File

@ -198,7 +198,7 @@ ExecuteVacuumOnDistributedTables(VacuumStmt *vacuumStmt, List *relationIdList,
/* /*
* IsSupportedDistributedVacuumStmt returns whether distributed execution of a * IsDistributedVacuumStmt returns whether distributed execution of a
* given VacuumStmt is supported. The provided relationId list represents * given VacuumStmt is supported. The provided relationId list represents
* the list of tables targeted by the provided statement. * the list of tables targeted by the provided statement.
* *

View File

@ -43,7 +43,7 @@ static Size CalculateMaxSize(void);
static int uri_prefix_length(const char *connstr); static int uri_prefix_length(const char *connstr);
/* /*
* InitConnParms initializes the ConnParams field to point to enough memory to * InitConnParams initializes the ConnParams field to point to enough memory to
* store settings for every valid libpq value, though these regions are set to * store settings for every valid libpq value, though these regions are set to
* zeros from the outset and the size appropriately also set to zero. * zeros from the outset and the size appropriately also set to zero.
* *

View File

@ -80,7 +80,7 @@ ForgetResults(MultiConnection *connection)
/* /*
* ClearResultsInternal clears a connection from pending activity, * ClearResults clears a connection from pending activity,
* returns true if all pending commands return success. It raises * returns true if all pending commands return success. It raises
* error if raiseErrors flag is set, any command fails and transaction * error if raiseErrors flag is set, any command fails and transaction
* is marked critical. * is marked critical.

View File

@ -174,8 +174,8 @@ AppendRoleOption(StringInfo buf, ListCell *optionCell)
/* /*
* AppendAlterRoleStmt generates the string representation of the * AppendAlterRoleSetStmt generates the string representation of the
* AlterRoleStmt and appends it to the buffer. * AlterRoleSetStmt and appends it to the buffer.
*/ */
static void static void
AppendAlterRoleSetStmt(StringInfo buf, AlterRoleSetStmt *stmt) AppendAlterRoleSetStmt(StringInfo buf, AlterRoleSetStmt *stmt)

View File

@ -146,8 +146,8 @@ QualifyAlterStatisticsStmt(Node *node)
#endif #endif
/* /*
* QualifyAlterStatisticsStmt qualifies AlterOwnerStmt's with schema name for * QualifyAlterStatisticsOwnerStmt qualifies AlterOwnerStmt's with schema
* ALTER STATISTICS .. OWNER TO statements. * name for ALTER STATISTICS .. OWNER TO statements.
*/ */
void void
QualifyAlterStatisticsOwnerStmt(Node *node) QualifyAlterStatisticsOwnerStmt(Node *node)

View File

@ -974,7 +974,7 @@ ExecuteTaskListOutsideTransaction(RowModifyLevel modLevel, List *taskList,
/* /*
* ExecuteTaskListIntoTupleStore is a proxy to ExecuteTaskListExtended() with defaults * ExecuteTaskListIntoTupleDest is a proxy to ExecuteTaskListExtended() with defaults
* for some of the arguments. * for some of the arguments.
*/ */
uint64 uint64
@ -2418,8 +2418,8 @@ RunDistributedExecution(DistributedExecution *execution)
/* /*
* ProcessSessionsWithFailedEventSetOperations goes over the session list and * ProcessSessionsWithFailedWaitEventSetOperations goes over the session list
* processes sessions with failed wait event set operations. * and processes sessions with failed wait event set operations.
* *
* Failed sessions are not going to generate any further events, so it is our * Failed sessions are not going to generate any further events, so it is our
* only chance to process the failure by calling into `ConnectionStateMachine`. * only chance to process the failure by calling into `ConnectionStateMachine`.

View File

@ -297,7 +297,7 @@ CreatePartitioningTupleDest(CitusTableCacheEntry *targetRelation)
/* /*
* PartitioningTupleDestTupleDescForQuery implements TupleDestination->putTuple for * PartitioningTupleDestPutTuple implements TupleDestination->putTuple for
* PartitioningTupleDest. * PartitioningTupleDest.
*/ */
static void static void

View File

@ -144,7 +144,7 @@ GetCurrentLocalExecutionStatus(void)
/* /*
* ExecuteLocalTasks executes the given tasks locally. * ExecuteLocalTaskList executes the given tasks locally.
* *
* The function goes over the task list and executes them locally. * The function goes over the task list and executes them locally.
* The returning tuples (if any) is stored in the tupleStoreState. * The returning tuples (if any) is stored in the tupleStoreState.

View File

@ -205,7 +205,7 @@ CreateTupleDestNone(void)
/* /*
* TupleStoreTupleDestPutTuple implements TupleDestination->putTuple for * TupleDestNonePutTuple implements TupleDestination->putTuple for
* no-op tuple destination. * no-op tuple destination.
*/ */
static void static void
@ -218,7 +218,7 @@ TupleDestNonePutTuple(TupleDestination *self, Task *task,
/* /*
* TupleStoreTupleDestTupleDescForQuery implements TupleDestination->TupleDescForQuery * TupleDestNoneTupleDescForQuery implements TupleDestination->TupleDescForQuery
* for no-op tuple destination. * for no-op tuple destination.
*/ */
static TupleDesc static TupleDesc
@ -262,7 +262,7 @@ TupleDestDestReceiverStartup(DestReceiver *destReceiver, int operation,
/* /*
* TupleDestDestReceiverStartup implements DestReceiver->receiveSlot for * TupleDestDestReceiverReceive implements DestReceiver->receiveSlot for
* TupleDestDestReceiver. * TupleDestDestReceiver.
*/ */
static bool static bool
@ -292,7 +292,7 @@ TupleDestDestReceiverReceive(TupleTableSlot *slot,
/* /*
* TupleDestDestReceiverStartup implements DestReceiver->rShutdown for * TupleDestDestReceiverShutdown implements DestReceiver->rShutdown for
* TupleDestDestReceiver. * TupleDestDestReceiver.
*/ */
static void static void
@ -303,7 +303,7 @@ TupleDestDestReceiverShutdown(DestReceiver *destReceiver)
/* /*
* TupleDestDestReceiverStartup implements DestReceiver->rDestroy for * TupleDestDestReceiverDestroy implements DestReceiver->rDestroy for
* TupleDestDestReceiver. * TupleDestDestReceiver.
*/ */
static void static void

View File

@ -942,7 +942,7 @@ InitializeTableCacheEntry(int64 shardId)
/* /*
* RefreshInvalidTableCacheEntry checks if the cache entry is still valid and * RefreshTableCacheEntryIfInvalid checks if the cache entry is still valid and
* refreshes it in cache when it's not. It returns true if it refreshed the * refreshes it in cache when it's not. It returns true if it refreshed the
* entry in the cache and false if it didn't. * entry in the cache and false if it didn't.
*/ */
@ -3618,7 +3618,7 @@ ResetCitusTableCacheEntry(CitusTableCacheEntry *cacheEntry)
/* /*
* RemoveShardIdCacheEntries removes all shard ID cache entries belonging to the * RemoveStaleShardIdCacheEntries removes all shard ID cache entries belonging to the
* given table entry. If the shard ID belongs to a different (newer) table entry, * given table entry. If the shard ID belongs to a different (newer) table entry,
* we leave it in place. * we leave it in place.
*/ */

View File

@ -701,7 +701,7 @@ GroupForNode(char *nodeName, int nodePort)
/* /*
* NodeIsPrimaryAndLocal returns whether the argument represents the local * NodeIsPrimaryAndRemote returns whether the argument represents the remote
* primary node. * primary node.
*/ */
bool bool

View File

@ -206,7 +206,7 @@ ParseCommandParameters(FunctionCallInfo fcinfo, StringInfo **nodeNameArray,
/* /*
* ExecuteCommandsInParellelAndStoreResults connects to each node specified in * ExecuteCommandsInParallelAndStoreResults connects to each node specified in
* nodeNameArray and nodePortArray, and executes command in commandStringArray * nodeNameArray and nodePortArray, and executes command in commandStringArray
* in parallel fashion. Execution success status and result is reported for * in parallel fashion. Execution success status and result is reported for
* each command in statusArray and resultStringArray. Each array contains * each command in statusArray and resultStringArray. Each array contains

View File

@ -525,7 +525,7 @@ ErrorIfMoveUnsupportedTableType(Oid relationId)
/* /*
* BlockWritesToColocatedShardList blocks writes to all shards in the given shard * BlockWritesToShardList blocks writes to all shards in the given shard
* list. The function assumes that all the shards in the list are colocated. * list. The function assumes that all the shards in the list are colocated.
*/ */
void void

View File

@ -217,7 +217,7 @@ DropOrphanedShards(bool waitForLocks)
/* /*
* TryLockRelationAndCleanup tries to lock the given relation * TryLockRelationAndPlacementCleanup tries to lock the given relation
* and the placement cleanup. If it cannot, it returns false. * and the placement cleanup. If it cannot, it returns false.
* *
*/ */

View File

@ -2048,7 +2048,7 @@ CompareShardCostAsc(const void *void1, const void *void2)
/* /*
* CompareShardCostAsc can be used to sort shard costs from high cost to low * CompareShardCostDesc can be used to sort shard costs from high cost to low
* cost. * cost.
*/ */
static int static int
@ -2109,8 +2109,8 @@ CompareDisallowedPlacementAsc(const void *void1, const void *void2)
/* /*
* CompareDisallowedPlacementAsc can be used to sort disallowed placements from * CompareDisallowedPlacementDesc can be used to sort disallowed placements from
* low cost to high cost. * high cost to low cost.
*/ */
static int static int
CompareDisallowedPlacementDesc(const void *a, const void *b) CompareDisallowedPlacementDesc(const void *a, const void *b)
@ -2619,7 +2619,7 @@ ActivePlacementsHash(List *shardPlacementList)
/* /*
* PlacementsHashFinds returns true if there exists a shard placement with the * PlacementsHashFind returns true if there exists a shard placement with the
* given workerNode and shard id in the given placements hash, otherwise it * given workerNode and shard id in the given placements hash, otherwise it
* returns false. * returns false.
*/ */
@ -2681,7 +2681,7 @@ PlacementsHashRemove(HTAB *placementsHash, uint64 shardId, WorkerNode *workerNod
/* /*
* ShardPlacementCompare compares two shard placements using shard id, node name, * PlacementsHashCompare compares two shard placements using shard id, node name,
* and node port number. * and node port number.
*/ */
static int static int
@ -2722,7 +2722,7 @@ PlacementsHashCompare(const void *lhsKey, const void *rhsKey, Size keySize)
/* /*
* ShardPlacementHashCode computes the hash code for a shard placement from the * PlacementsHashHashCode computes the hash code for a shard placement from the
* placement's shard id, node name, and node port number. * placement's shard id, node name, and node port number.
*/ */
static uint32 static uint32
@ -2924,7 +2924,7 @@ EnsureNodeCapacityUDF(Oid functionOid)
/* /*
* EnsureNodeCapacityUDF checks that the UDF matching the oid has the correct * EnsureShardAllowedOnNodeUDF checks that the UDF matching the oid has the correct
* signature to be used as a NodeCapacity function. The expected signature is: * signature to be used as a NodeCapacity function. The expected signature is:
* *
* shard_allowed_on_node(shardid bigint, nodeid int) returns boolean * shard_allowed_on_node(shardid bigint, nodeid int) returns boolean

View File

@ -1635,7 +1635,7 @@ ExplainOneQuery(Query *query, int cursorOptions,
/* /*
* ExplainAnalyzeWorkerPlan produces explain output into es. If es->analyze, it also executes * ExplainWorkerPlan produces explain output into es. If es->analyze, it also executes
* the given plannedStmt and sends the results to dest. It puts total time to execute in * the given plannedStmt and sends the results to dest. It puts total time to execute in
* executionDurationMillisec. * executionDurationMillisec.
* *

View File

@ -2626,7 +2626,7 @@ ProcessHavingClauseForWorkerQuery(Node *originalHavingQual,
/* /*
* PrcoessDistinctClauseForWorkerQuery gets the inputs and modifies the outputs * ProcessDistinctClauseForWorkerQuery gets the inputs and modifies the outputs
* such that worker query's DISTINCT and DISTINCT ON clauses are set accordingly. * such that worker query's DISTINCT and DISTINCT ON clauses are set accordingly.
* Note the function may or may not decide to pushdown the DISTINCT and DISTINCT * Note the function may or may not decide to pushdown the DISTINCT and DISTINCT
* on clauses based on the inputs. * on clauses based on the inputs.
@ -2789,7 +2789,7 @@ ProcessLimitOrderByForWorkerQuery(OrderByLimitReference orderByLimitReference,
/* /*
* BuildLimitOrderByReference is a helper function that simply builds * BuildOrderByLimitReference is a helper function that simply builds
* the necessary information for processing the limit and order by. * the necessary information for processing the limit and order by.
* The return value should be used in a read-only manner. * The return value should be used in a read-only manner.
*/ */

View File

@ -264,7 +264,7 @@ CreateModifyPlan(Query *originalQuery, Query *query,
/* /*
* CreateSingleTaskRouterPlan creates a physical plan for given SELECT query. * CreateSingleTaskRouterSelectPlan creates a physical plan for given SELECT query.
* The returned plan is a router task that returns query results from a single worker. * The returned plan is a router task that returns query results from a single worker.
* If not router plannable, the returned plan's planningError describes the problem. * If not router plannable, the returned plan's planningError describes the problem.
*/ */
@ -1878,7 +1878,7 @@ RouterJob(Query *originalQuery, PlannerRestrictionContext *plannerRestrictionCon
/* /*
* SingleShardRouterTaskList is a wrapper around other corresponding task * GenerateSingleShardRouterTaskList is a wrapper around other corresponding task
* list generation functions specific to single shard selects and modifications. * list generation functions specific to single shard selects and modifications.
* *
* The function updates the input job's taskList in-place. * The function updates the input job's taskList in-place.

View File

@ -77,7 +77,7 @@ static DeferredErrorMessage * DeferredErrorIfUnsupportedRecurringTuplesJoin(
PlannerRestrictionContext *plannerRestrictionContext); PlannerRestrictionContext *plannerRestrictionContext);
static DeferredErrorMessage * DeferErrorIfUnsupportedTableCombination(Query *queryTree); static DeferredErrorMessage * DeferErrorIfUnsupportedTableCombination(Query *queryTree);
static DeferredErrorMessage * DeferErrorIfSubqueryRequiresMerge(Query *subqueryTree); static DeferredErrorMessage * DeferErrorIfSubqueryRequiresMerge(Query *subqueryTree);
static bool ExtractSetOperationStatmentWalker(Node *node, List **setOperationList); static bool ExtractSetOperationStatementWalker(Node *node, List **setOperationList);
static RecurringTuplesType FetchFirstRecurType(PlannerInfo *plannerInfo, static RecurringTuplesType FetchFirstRecurType(PlannerInfo *plannerInfo,
Relids relids); Relids relids);
static bool ContainsRecurringRTE(RangeTblEntry *rangeTableEntry, static bool ContainsRecurringRTE(RangeTblEntry *rangeTableEntry,
@ -1254,7 +1254,7 @@ DeferErrorIfUnsupportedUnionQuery(Query *subqueryTree)
ListCell *setOperationStatmentCell = NULL; ListCell *setOperationStatmentCell = NULL;
RecurringTuplesType recurType = RECURRING_TUPLES_INVALID; RecurringTuplesType recurType = RECURRING_TUPLES_INVALID;
ExtractSetOperationStatmentWalker((Node *) subqueryTree->setOperations, ExtractSetOperationStatementWalker((Node *) subqueryTree->setOperations,
&setOperationStatementList); &setOperationStatementList);
foreach(setOperationStatmentCell, setOperationStatementList) foreach(setOperationStatmentCell, setOperationStatementList)
{ {
@ -1343,7 +1343,7 @@ DeferErrorIfUnsupportedUnionQuery(Query *subqueryTree)
* and finds all set operations in the tree. * and finds all set operations in the tree.
*/ */
static bool static bool
ExtractSetOperationStatmentWalker(Node *node, List **setOperationList) ExtractSetOperationStatementWalker(Node *node, List **setOperationList)
{ {
if (node == NULL) if (node == NULL)
{ {
@ -1358,7 +1358,7 @@ ExtractSetOperationStatmentWalker(Node *node, List **setOperationList)
} }
bool walkerResult = expression_tree_walker(node, bool walkerResult = expression_tree_walker(node,
ExtractSetOperationStatmentWalker, ExtractSetOperationStatementWalker,
setOperationList); setOperationList);
return walkerResult; return walkerResult;

View File

@ -564,8 +564,8 @@ RecursivelyPlanNonColocatedJoinWalker(Node *joinNode,
/* /*
* RecursivelyPlanNonColocatedJoinWalker gets a query and walks over its sublinks * RecursivelyPlanNonColocatedSubqueriesInWhere gets a query and walks over its
* to find subqueries that live in WHERE clause. * sublinks to find subqueries that live in WHERE clause.
* *
* When a subquery found, it's checked whether the subquery is colocated with the * When a subquery found, it's checked whether the subquery is colocated with the
* anchor subquery specified in the nonColocatedJoinContext. If not, * anchor subquery specified in the nonColocatedJoinContext. If not,
@ -1122,7 +1122,7 @@ IsRelationLocalTableOrMatView(Oid relationId)
/* /*
* RecursivelyPlanQuery recursively plans a query, replaces it with a * RecursivelyPlanSubquery recursively plans a query, replaces it with a
* result query and returns the subplan. * result query and returns the subplan.
* *
* Before we recursively plan the given subquery, we should ensure * Before we recursively plan the given subquery, we should ensure

View File

@ -220,7 +220,7 @@ CheckForDistributedDeadlocks(void)
/* /*
* CheckDeadlockForDistributedTransaction does a DFS starting with the given * CheckDeadlockForTransactionNode does a DFS starting with the given
* transaction node and checks for a cycle (i.e., the node can be reached again * transaction node and checks for a cycle (i.e., the node can be reached again
* while traversing the graph). * while traversing the graph).
* *

View File

@ -51,7 +51,7 @@ static void Assign2PCIdentifier(MultiConnection *connection);
/* /*
* StartRemoteTransactionBeging initiates beginning the remote transaction in * StartRemoteTransactionBegin initiates beginning the remote transaction in
* a non-blocking manner. The function sends "BEGIN" followed by * a non-blocking manner. The function sends "BEGIN" followed by
* assign_distributed_transaction_id() to assign the distributed transaction * assign_distributed_transaction_id() to assign the distributed transaction
* id on the remote node. * id on the remote node.

View File

@ -693,7 +693,7 @@ CheckDistributionColumnType(Oid sourceRelationId, Oid targetRelationId)
/* /*
* GetColumnTypeEquality checks if distribution column types and collations * EnsureColumnTypeEquality checks if distribution column types and collations
* of the given columns are same. The function sets the boolean pointers. * of the given columns are same. The function sets the boolean pointers.
*/ */
void void

View File

@ -649,7 +649,7 @@ LockShardListMetadata(List *shardIntervalList, LOCKMODE lockMode)
/* /*
* LockPlacementListMetadata takes locks on the metadata of all shards in * LockShardsInPlacementListMetadata takes locks on the metadata of all shards in
* shardPlacementList to prevent concurrent placement changes. * shardPlacementList to prevent concurrent placement changes.
*/ */
void void