mirror of https://github.com/citusdata/citus.git
Merge pull request #5285 from citusdata/typos_in_comment_functions
Fixes function names in commentspull/5338/head^2
commit
52879fdc96
|
@ -265,7 +265,7 @@ create_distributed_table(PG_FUNCTION_ARGS)
|
|||
|
||||
|
||||
/*
|
||||
* CreateReferenceTable creates a distributed table with the given relationId. The
|
||||
* create_reference_table creates a distributed table with the given relationId. The
|
||||
* created table has one shard and replication factor is set to the active worker
|
||||
* count. In fact, the above is the definition of a reference table in Citus.
|
||||
*/
|
||||
|
|
|
@ -540,10 +540,10 @@ PostprocessAlterExtensionCitusUpdateStmt(Node *node)
|
|||
|
||||
|
||||
/*
|
||||
* MarkAllExistingObjectsDistributed marks all objects that could be distributed by
|
||||
* resolving dependencies of "existing distributed tables" and "already distributed
|
||||
* objects" to introduce the objects created in older versions of Citus to distributed
|
||||
* object infrastructure as well.
|
||||
* MarkExistingObjectDependenciesDistributedIfSupported marks all objects that could
|
||||
* be distributed by resolving dependencies of "existing distributed tables" and
|
||||
* "already distributed objects" to introduce the objects created in older versions
|
||||
* of Citus to distributed object infrastructure as well.
|
||||
*
|
||||
* Note that this function is not responsible for ensuring if dependencies exist on
|
||||
* nodes and satisfying these dependendencies if not exists, which is already done by
|
||||
|
|
|
@ -722,7 +722,7 @@ get_relation_constraint_oid_compat(HeapTuple heapTuple)
|
|||
|
||||
|
||||
/*
|
||||
* HasForeignKeyToLocalTable returns true if relation has foreign key
|
||||
* HasForeignKeyWithLocalTable returns true if relation has foreign key
|
||||
* relationship with a local table.
|
||||
*/
|
||||
bool
|
||||
|
@ -857,8 +857,8 @@ TableReferencing(Oid relationId)
|
|||
|
||||
|
||||
/*
|
||||
* ConstraintWithNameIsOfType is a wrapper around ConstraintWithNameIsOfType that returns true
|
||||
* if given constraint name identifies a uniqueness constraint, i.e:
|
||||
* ConstraintIsAUniquenessConstraint is a wrapper around ConstraintWithNameIsOfType
|
||||
* that returns true if given constraint name identifies a uniqueness constraint, i.e:
|
||||
* - primary key constraint, or
|
||||
* - unique constraint
|
||||
*/
|
||||
|
|
|
@ -331,7 +331,7 @@ ExecuteFunctionOnEachTableIndex(Oid relationId, PGIndexProcessor pgIndexProcesso
|
|||
|
||||
|
||||
/*
|
||||
* SwitchToSequentialOrLocalExecutionIfIndexNameTooLong generates the longest index name
|
||||
* SwitchToSequentialAndLocalExecutionIfIndexNameTooLong generates the longest index name
|
||||
* on the shards of the partitions, and if exceeds the limit switches to sequential and
|
||||
* local execution to prevent self-deadlocks.
|
||||
*/
|
||||
|
|
|
@ -137,7 +137,7 @@ PreprocessRenameStmt(Node *node, const char *renameCommand,
|
|||
|
||||
|
||||
/*
|
||||
* ErrorIfDistributedRenameStmt errors out if the corresponding rename statement
|
||||
* ErrorIfUnsupportedRenameStmt errors out if the corresponding rename statement
|
||||
* operates on any part of a distributed table other than a column.
|
||||
*
|
||||
* Note: This function handles RenameStmt applied to relations handed by Citus.
|
||||
|
|
|
@ -1576,7 +1576,7 @@ IsAlterTableRenameStmt(RenameStmt *renameStmt)
|
|||
|
||||
|
||||
/*
|
||||
* ErrorIfDropPartitionColumn checks if any subcommands of the given alter table
|
||||
* ErrorIfAlterDropsPartitionColumn checks if any subcommands of the given alter table
|
||||
* command is a DROP COLUMN command which drops the partition column of a distributed
|
||||
* table. If there is such a subcommand, this function errors out.
|
||||
*/
|
||||
|
@ -2967,7 +2967,7 @@ AlterInvolvesPartitionColumn(AlterTableStmt *alterTableStatement,
|
|||
|
||||
|
||||
/*
|
||||
* ErrorIfUnsopprtedAlterAddConstraintStmt runs the constraint checks on distributed
|
||||
* ErrorIfUnsupportedAlterAddConstraintStmt runs the constraint checks on distributed
|
||||
* table using the same logic with create_distributed_table.
|
||||
*/
|
||||
static void
|
||||
|
|
|
@ -98,7 +98,7 @@ static bool ShouldUndistributeCitusLocalTables(void);
|
|||
|
||||
|
||||
/*
|
||||
* ProcessUtilityForParseTree is a convenience method to create a PlannedStmt out of
|
||||
* ProcessUtilityParseTree is a convenience method to create a PlannedStmt out of
|
||||
* pieces of a utility statement before invoking ProcessUtility.
|
||||
*/
|
||||
void
|
||||
|
|
|
@ -198,7 +198,7 @@ ExecuteVacuumOnDistributedTables(VacuumStmt *vacuumStmt, List *relationIdList,
|
|||
|
||||
|
||||
/*
|
||||
* IsSupportedDistributedVacuumStmt returns whether distributed execution of a
|
||||
* IsDistributedVacuumStmt returns whether distributed execution of a
|
||||
* given VacuumStmt is supported. The provided relationId list represents
|
||||
* the list of tables targeted by the provided statement.
|
||||
*
|
||||
|
|
|
@ -43,7 +43,7 @@ static Size CalculateMaxSize(void);
|
|||
static int uri_prefix_length(const char *connstr);
|
||||
|
||||
/*
|
||||
* InitConnParms initializes the ConnParams field to point to enough memory to
|
||||
* InitConnParams initializes the ConnParams field to point to enough memory to
|
||||
* store settings for every valid libpq value, though these regions are set to
|
||||
* zeros from the outset and the size appropriately also set to zero.
|
||||
*
|
||||
|
|
|
@ -80,7 +80,7 @@ ForgetResults(MultiConnection *connection)
|
|||
|
||||
|
||||
/*
|
||||
* ClearResultsInternal clears a connection from pending activity,
|
||||
* ClearResults clears a connection from pending activity,
|
||||
* returns true if all pending commands return success. It raises
|
||||
* error if raiseErrors flag is set, any command fails and transaction
|
||||
* is marked critical.
|
||||
|
|
|
@ -174,8 +174,8 @@ AppendRoleOption(StringInfo buf, ListCell *optionCell)
|
|||
|
||||
|
||||
/*
|
||||
* AppendAlterRoleStmt generates the string representation of the
|
||||
* AlterRoleStmt and appends it to the buffer.
|
||||
* AppendAlterRoleSetStmt generates the string representation of the
|
||||
* AlterRoleSetStmt and appends it to the buffer.
|
||||
*/
|
||||
static void
|
||||
AppendAlterRoleSetStmt(StringInfo buf, AlterRoleSetStmt *stmt)
|
||||
|
|
|
@ -146,8 +146,8 @@ QualifyAlterStatisticsStmt(Node *node)
|
|||
#endif
|
||||
|
||||
/*
|
||||
* QualifyAlterStatisticsStmt qualifies AlterOwnerStmt's with schema name for
|
||||
* ALTER STATISTICS .. OWNER TO statements.
|
||||
* QualifyAlterStatisticsOwnerStmt qualifies AlterOwnerStmt's with schema
|
||||
* name for ALTER STATISTICS .. OWNER TO statements.
|
||||
*/
|
||||
void
|
||||
QualifyAlterStatisticsOwnerStmt(Node *node)
|
||||
|
|
|
@ -974,7 +974,7 @@ ExecuteTaskListOutsideTransaction(RowModifyLevel modLevel, List *taskList,
|
|||
|
||||
|
||||
/*
|
||||
* ExecuteTaskListIntoTupleStore is a proxy to ExecuteTaskListExtended() with defaults
|
||||
* ExecuteTaskListIntoTupleDest is a proxy to ExecuteTaskListExtended() with defaults
|
||||
* for some of the arguments.
|
||||
*/
|
||||
uint64
|
||||
|
@ -2418,8 +2418,8 @@ RunDistributedExecution(DistributedExecution *execution)
|
|||
|
||||
|
||||
/*
|
||||
* ProcessSessionsWithFailedEventSetOperations goes over the session list and
|
||||
* processes sessions with failed wait event set operations.
|
||||
* ProcessSessionsWithFailedWaitEventSetOperations goes over the session list
|
||||
* and processes sessions with failed wait event set operations.
|
||||
*
|
||||
* Failed sessions are not going to generate any further events, so it is our
|
||||
* only chance to process the failure by calling into `ConnectionStateMachine`.
|
||||
|
|
|
@ -297,7 +297,7 @@ CreatePartitioningTupleDest(CitusTableCacheEntry *targetRelation)
|
|||
|
||||
|
||||
/*
|
||||
* PartitioningTupleDestTupleDescForQuery implements TupleDestination->putTuple for
|
||||
* PartitioningTupleDestPutTuple implements TupleDestination->putTuple for
|
||||
* PartitioningTupleDest.
|
||||
*/
|
||||
static void
|
||||
|
|
|
@ -144,7 +144,7 @@ GetCurrentLocalExecutionStatus(void)
|
|||
|
||||
|
||||
/*
|
||||
* ExecuteLocalTasks executes the given tasks locally.
|
||||
* ExecuteLocalTaskList executes the given tasks locally.
|
||||
*
|
||||
* The function goes over the task list and executes them locally.
|
||||
* The returning tuples (if any) is stored in the tupleStoreState.
|
||||
|
|
|
@ -205,7 +205,7 @@ CreateTupleDestNone(void)
|
|||
|
||||
|
||||
/*
|
||||
* TupleStoreTupleDestPutTuple implements TupleDestination->putTuple for
|
||||
* TupleDestNonePutTuple implements TupleDestination->putTuple for
|
||||
* no-op tuple destination.
|
||||
*/
|
||||
static void
|
||||
|
@ -218,7 +218,7 @@ TupleDestNonePutTuple(TupleDestination *self, Task *task,
|
|||
|
||||
|
||||
/*
|
||||
* TupleStoreTupleDestTupleDescForQuery implements TupleDestination->TupleDescForQuery
|
||||
* TupleDestNoneTupleDescForQuery implements TupleDestination->TupleDescForQuery
|
||||
* for no-op tuple destination.
|
||||
*/
|
||||
static TupleDesc
|
||||
|
@ -262,7 +262,7 @@ TupleDestDestReceiverStartup(DestReceiver *destReceiver, int operation,
|
|||
|
||||
|
||||
/*
|
||||
* TupleDestDestReceiverStartup implements DestReceiver->receiveSlot for
|
||||
* TupleDestDestReceiverReceive implements DestReceiver->receiveSlot for
|
||||
* TupleDestDestReceiver.
|
||||
*/
|
||||
static bool
|
||||
|
@ -292,7 +292,7 @@ TupleDestDestReceiverReceive(TupleTableSlot *slot,
|
|||
|
||||
|
||||
/*
|
||||
* TupleDestDestReceiverStartup implements DestReceiver->rShutdown for
|
||||
* TupleDestDestReceiverShutdown implements DestReceiver->rShutdown for
|
||||
* TupleDestDestReceiver.
|
||||
*/
|
||||
static void
|
||||
|
@ -303,7 +303,7 @@ TupleDestDestReceiverShutdown(DestReceiver *destReceiver)
|
|||
|
||||
|
||||
/*
|
||||
* TupleDestDestReceiverStartup implements DestReceiver->rDestroy for
|
||||
* TupleDestDestReceiverDestroy implements DestReceiver->rDestroy for
|
||||
* TupleDestDestReceiver.
|
||||
*/
|
||||
static void
|
||||
|
|
|
@ -942,7 +942,7 @@ InitializeTableCacheEntry(int64 shardId)
|
|||
|
||||
|
||||
/*
|
||||
* RefreshInvalidTableCacheEntry checks if the cache entry is still valid and
|
||||
* RefreshTableCacheEntryIfInvalid checks if the cache entry is still valid and
|
||||
* refreshes it in cache when it's not. It returns true if it refreshed the
|
||||
* entry in the cache and false if it didn't.
|
||||
*/
|
||||
|
@ -3618,7 +3618,7 @@ ResetCitusTableCacheEntry(CitusTableCacheEntry *cacheEntry)
|
|||
|
||||
|
||||
/*
|
||||
* RemoveShardIdCacheEntries removes all shard ID cache entries belonging to the
|
||||
* RemoveStaleShardIdCacheEntries removes all shard ID cache entries belonging to the
|
||||
* given table entry. If the shard ID belongs to a different (newer) table entry,
|
||||
* we leave it in place.
|
||||
*/
|
||||
|
|
|
@ -701,7 +701,7 @@ GroupForNode(char *nodeName, int nodePort)
|
|||
|
||||
|
||||
/*
|
||||
* NodeIsPrimaryAndLocal returns whether the argument represents the local
|
||||
* NodeIsPrimaryAndRemote returns whether the argument represents the remote
|
||||
* primary node.
|
||||
*/
|
||||
bool
|
||||
|
|
|
@ -206,7 +206,7 @@ ParseCommandParameters(FunctionCallInfo fcinfo, StringInfo **nodeNameArray,
|
|||
|
||||
|
||||
/*
|
||||
* ExecuteCommandsInParellelAndStoreResults connects to each node specified in
|
||||
* ExecuteCommandsInParallelAndStoreResults connects to each node specified in
|
||||
* nodeNameArray and nodePortArray, and executes command in commandStringArray
|
||||
* in parallel fashion. Execution success status and result is reported for
|
||||
* each command in statusArray and resultStringArray. Each array contains
|
||||
|
|
|
@ -525,7 +525,7 @@ ErrorIfMoveUnsupportedTableType(Oid relationId)
|
|||
|
||||
|
||||
/*
|
||||
* BlockWritesToColocatedShardList blocks writes to all shards in the given shard
|
||||
* BlockWritesToShardList blocks writes to all shards in the given shard
|
||||
* list. The function assumes that all the shards in the list are colocated.
|
||||
*/
|
||||
void
|
||||
|
|
|
@ -217,7 +217,7 @@ DropOrphanedShards(bool waitForLocks)
|
|||
|
||||
|
||||
/*
|
||||
* TryLockRelationAndCleanup tries to lock the given relation
|
||||
* TryLockRelationAndPlacementCleanup tries to lock the given relation
|
||||
* and the placement cleanup. If it cannot, it returns false.
|
||||
*
|
||||
*/
|
||||
|
|
|
@ -2048,7 +2048,7 @@ CompareShardCostAsc(const void *void1, const void *void2)
|
|||
|
||||
|
||||
/*
|
||||
* CompareShardCostAsc can be used to sort shard costs from high cost to low
|
||||
* CompareShardCostDesc can be used to sort shard costs from high cost to low
|
||||
* cost.
|
||||
*/
|
||||
static int
|
||||
|
@ -2109,8 +2109,8 @@ CompareDisallowedPlacementAsc(const void *void1, const void *void2)
|
|||
|
||||
|
||||
/*
|
||||
* CompareDisallowedPlacementAsc can be used to sort disallowed placements from
|
||||
* low cost to high cost.
|
||||
* CompareDisallowedPlacementDesc can be used to sort disallowed placements from
|
||||
* high cost to low cost.
|
||||
*/
|
||||
static int
|
||||
CompareDisallowedPlacementDesc(const void *a, const void *b)
|
||||
|
@ -2619,7 +2619,7 @@ ActivePlacementsHash(List *shardPlacementList)
|
|||
|
||||
|
||||
/*
|
||||
* PlacementsHashFinds returns true if there exists a shard placement with the
|
||||
* PlacementsHashFind returns true if there exists a shard placement with the
|
||||
* given workerNode and shard id in the given placements hash, otherwise it
|
||||
* returns false.
|
||||
*/
|
||||
|
@ -2681,7 +2681,7 @@ PlacementsHashRemove(HTAB *placementsHash, uint64 shardId, WorkerNode *workerNod
|
|||
|
||||
|
||||
/*
|
||||
* ShardPlacementCompare compares two shard placements using shard id, node name,
|
||||
* PlacementsHashCompare compares two shard placements using shard id, node name,
|
||||
* and node port number.
|
||||
*/
|
||||
static int
|
||||
|
@ -2722,7 +2722,7 @@ PlacementsHashCompare(const void *lhsKey, const void *rhsKey, Size keySize)
|
|||
|
||||
|
||||
/*
|
||||
* ShardPlacementHashCode computes the hash code for a shard placement from the
|
||||
* PlacementsHashHashCode computes the hash code for a shard placement from the
|
||||
* placement's shard id, node name, and node port number.
|
||||
*/
|
||||
static uint32
|
||||
|
@ -2924,7 +2924,7 @@ EnsureNodeCapacityUDF(Oid functionOid)
|
|||
|
||||
|
||||
/*
|
||||
* EnsureNodeCapacityUDF checks that the UDF matching the oid has the correct
|
||||
* EnsureShardAllowedOnNodeUDF checks that the UDF matching the oid has the correct
|
||||
* signature to be used as a NodeCapacity function. The expected signature is:
|
||||
*
|
||||
* shard_allowed_on_node(shardid bigint, nodeid int) returns boolean
|
||||
|
|
|
@ -1635,7 +1635,7 @@ ExplainOneQuery(Query *query, int cursorOptions,
|
|||
|
||||
|
||||
/*
|
||||
* ExplainAnalyzeWorkerPlan produces explain output into es. If es->analyze, it also executes
|
||||
* ExplainWorkerPlan produces explain output into es. If es->analyze, it also executes
|
||||
* the given plannedStmt and sends the results to dest. It puts total time to execute in
|
||||
* executionDurationMillisec.
|
||||
*
|
||||
|
|
|
@ -2626,7 +2626,7 @@ ProcessHavingClauseForWorkerQuery(Node *originalHavingQual,
|
|||
|
||||
|
||||
/*
|
||||
* PrcoessDistinctClauseForWorkerQuery gets the inputs and modifies the outputs
|
||||
* ProcessDistinctClauseForWorkerQuery gets the inputs and modifies the outputs
|
||||
* such that worker query's DISTINCT and DISTINCT ON clauses are set accordingly.
|
||||
* Note the function may or may not decide to pushdown the DISTINCT and DISTINCT
|
||||
* on clauses based on the inputs.
|
||||
|
@ -2789,7 +2789,7 @@ ProcessLimitOrderByForWorkerQuery(OrderByLimitReference orderByLimitReference,
|
|||
|
||||
|
||||
/*
|
||||
* BuildLimitOrderByReference is a helper function that simply builds
|
||||
* BuildOrderByLimitReference is a helper function that simply builds
|
||||
* the necessary information for processing the limit and order by.
|
||||
* The return value should be used in a read-only manner.
|
||||
*/
|
||||
|
|
|
@ -264,7 +264,7 @@ CreateModifyPlan(Query *originalQuery, Query *query,
|
|||
|
||||
|
||||
/*
|
||||
* CreateSingleTaskRouterPlan creates a physical plan for given SELECT query.
|
||||
* CreateSingleTaskRouterSelectPlan creates a physical plan for given SELECT query.
|
||||
* The returned plan is a router task that returns query results from a single worker.
|
||||
* If not router plannable, the returned plan's planningError describes the problem.
|
||||
*/
|
||||
|
@ -1878,7 +1878,7 @@ RouterJob(Query *originalQuery, PlannerRestrictionContext *plannerRestrictionCon
|
|||
|
||||
|
||||
/*
|
||||
* SingleShardRouterTaskList is a wrapper around other corresponding task
|
||||
* GenerateSingleShardRouterTaskList is a wrapper around other corresponding task
|
||||
* list generation functions specific to single shard selects and modifications.
|
||||
*
|
||||
* The function updates the input job's taskList in-place.
|
||||
|
|
|
@ -77,7 +77,7 @@ static DeferredErrorMessage * DeferredErrorIfUnsupportedRecurringTuplesJoin(
|
|||
PlannerRestrictionContext *plannerRestrictionContext);
|
||||
static DeferredErrorMessage * DeferErrorIfUnsupportedTableCombination(Query *queryTree);
|
||||
static DeferredErrorMessage * DeferErrorIfSubqueryRequiresMerge(Query *subqueryTree);
|
||||
static bool ExtractSetOperationStatmentWalker(Node *node, List **setOperationList);
|
||||
static bool ExtractSetOperationStatementWalker(Node *node, List **setOperationList);
|
||||
static RecurringTuplesType FetchFirstRecurType(PlannerInfo *plannerInfo,
|
||||
Relids relids);
|
||||
static bool ContainsRecurringRTE(RangeTblEntry *rangeTableEntry,
|
||||
|
@ -1254,8 +1254,8 @@ DeferErrorIfUnsupportedUnionQuery(Query *subqueryTree)
|
|||
ListCell *setOperationStatmentCell = NULL;
|
||||
RecurringTuplesType recurType = RECURRING_TUPLES_INVALID;
|
||||
|
||||
ExtractSetOperationStatmentWalker((Node *) subqueryTree->setOperations,
|
||||
&setOperationStatementList);
|
||||
ExtractSetOperationStatementWalker((Node *) subqueryTree->setOperations,
|
||||
&setOperationStatementList);
|
||||
foreach(setOperationStatmentCell, setOperationStatementList)
|
||||
{
|
||||
SetOperationStmt *setOperation =
|
||||
|
@ -1343,7 +1343,7 @@ DeferErrorIfUnsupportedUnionQuery(Query *subqueryTree)
|
|||
* and finds all set operations in the tree.
|
||||
*/
|
||||
static bool
|
||||
ExtractSetOperationStatmentWalker(Node *node, List **setOperationList)
|
||||
ExtractSetOperationStatementWalker(Node *node, List **setOperationList)
|
||||
{
|
||||
if (node == NULL)
|
||||
{
|
||||
|
@ -1358,7 +1358,7 @@ ExtractSetOperationStatmentWalker(Node *node, List **setOperationList)
|
|||
}
|
||||
|
||||
bool walkerResult = expression_tree_walker(node,
|
||||
ExtractSetOperationStatmentWalker,
|
||||
ExtractSetOperationStatementWalker,
|
||||
setOperationList);
|
||||
|
||||
return walkerResult;
|
||||
|
|
|
@ -564,8 +564,8 @@ RecursivelyPlanNonColocatedJoinWalker(Node *joinNode,
|
|||
|
||||
|
||||
/*
|
||||
* RecursivelyPlanNonColocatedJoinWalker gets a query and walks over its sublinks
|
||||
* to find subqueries that live in WHERE clause.
|
||||
* RecursivelyPlanNonColocatedSubqueriesInWhere gets a query and walks over its
|
||||
* sublinks to find subqueries that live in WHERE clause.
|
||||
*
|
||||
* When a subquery found, it's checked whether the subquery is colocated with the
|
||||
* anchor subquery specified in the nonColocatedJoinContext. If not,
|
||||
|
@ -1122,7 +1122,7 @@ IsRelationLocalTableOrMatView(Oid relationId)
|
|||
|
||||
|
||||
/*
|
||||
* RecursivelyPlanQuery recursively plans a query, replaces it with a
|
||||
* RecursivelyPlanSubquery recursively plans a query, replaces it with a
|
||||
* result query and returns the subplan.
|
||||
*
|
||||
* Before we recursively plan the given subquery, we should ensure
|
||||
|
|
|
@ -220,7 +220,7 @@ CheckForDistributedDeadlocks(void)
|
|||
|
||||
|
||||
/*
|
||||
* CheckDeadlockForDistributedTransaction does a DFS starting with the given
|
||||
* CheckDeadlockForTransactionNode does a DFS starting with the given
|
||||
* transaction node and checks for a cycle (i.e., the node can be reached again
|
||||
* while traversing the graph).
|
||||
*
|
||||
|
|
|
@ -51,7 +51,7 @@ static void Assign2PCIdentifier(MultiConnection *connection);
|
|||
|
||||
|
||||
/*
|
||||
* StartRemoteTransactionBeging initiates beginning the remote transaction in
|
||||
* StartRemoteTransactionBegin initiates beginning the remote transaction in
|
||||
* a non-blocking manner. The function sends "BEGIN" followed by
|
||||
* assign_distributed_transaction_id() to assign the distributed transaction
|
||||
* id on the remote node.
|
||||
|
|
|
@ -693,7 +693,7 @@ CheckDistributionColumnType(Oid sourceRelationId, Oid targetRelationId)
|
|||
|
||||
|
||||
/*
|
||||
* GetColumnTypeEquality checks if distribution column types and collations
|
||||
* EnsureColumnTypeEquality checks if distribution column types and collations
|
||||
* of the given columns are same. The function sets the boolean pointers.
|
||||
*/
|
||||
void
|
||||
|
|
|
@ -649,7 +649,7 @@ LockShardListMetadata(List *shardIntervalList, LOCKMODE lockMode)
|
|||
|
||||
|
||||
/*
|
||||
* LockPlacementListMetadata takes locks on the metadata of all shards in
|
||||
* LockShardsInPlacementListMetadata takes locks on the metadata of all shards in
|
||||
* shardPlacementList to prevent concurrent placement changes.
|
||||
*/
|
||||
void
|
||||
|
|
Loading…
Reference in New Issue