Fix indentation suggestions

pull/7983/head
Muhammad Usama 2025-05-15 14:43:05 +05:00
parent a8a5f34dc9
commit e8361bcab8
2 changed files with 69 additions and 51 deletions

View File

@ -768,6 +768,7 @@ AcquireRebalanceColocationLock(Oid relationId, const char *operationName)
} }
} }
/* /*
* AcquireRebalanceOperationLock does not allow concurrent rebalance * AcquireRebalanceOperationLock does not allow concurrent rebalance
* operations. * operations.
@ -794,6 +795,7 @@ AcquireRebalanceOperationLock(const char *operationName)
} }
} }
/* /*
* AcquirePlacementColocationLock tries to acquire a lock for * AcquirePlacementColocationLock tries to acquire a lock for
* rebalance/replication while moving/copying the placement. If this * rebalance/replication while moving/copying the placement. If this
@ -2040,6 +2042,7 @@ GenerateTaskMoveDependencyList(PlacementUpdateEvent *move, int64 *refTablesDepTa
"shardMoveDependencyList", 0); "shardMoveDependencyList", 0);
bool found; bool found;
/* /*
* Check if there exists moves scheduled earlier whose source node * Check if there exists moves scheduled earlier whose source node
* overlaps with the current move's target node. * overlaps with the current move's target node.
@ -2104,7 +2107,6 @@ static void
UpdateShardMoveDependencies(PlacementUpdateEvent *move, uint64 colocationId, int64 taskId, UpdateShardMoveDependencies(PlacementUpdateEvent *move, uint64 colocationId, int64 taskId,
ShardMoveDependencies shardMoveDependencies) ShardMoveDependencies shardMoveDependencies)
{ {
bool found; bool found;
ShardMoveSourceNodeHashEntry *shardMoveSourceNodeHashEntry = hash_search( ShardMoveSourceNodeHashEntry *shardMoveSourceNodeHashEntry = hash_search(
shardMoveDependencies.nodeDependencies, &move->sourceNode->nodeId, HASH_ENTER, shardMoveDependencies.nodeDependencies, &move->sourceNode->nodeId, HASH_ENTER,
@ -2204,7 +2206,8 @@ RebalanceTableShardsBackground(RebalanceOptions *options, Oid shardReplicationMo
if (HasNodesWithMissingReferenceTables(&referenceTableIdList)) if (HasNodesWithMissingReferenceTables(&referenceTableIdList))
{ {
refTablesDepTaskIds = ScheduleTasksToParallelCopyReferenceTablesOnAllMissingNodes(jobId, TRANSFER_MODE_BLOCK_WRITES, &refTablesDepTaskIdsCount); refTablesDepTaskIds = ScheduleTasksToParallelCopyReferenceTablesOnAllMissingNodes(
jobId, TRANSFER_MODE_BLOCK_WRITES, &refTablesDepTaskIdsCount);
ereport(DEBUG2, ereport(DEBUG2,
(errmsg("%d dependent copy reference table tasks for job %ld", (errmsg("%d dependent copy reference table tasks for job %ld",
refTablesDepTaskIdsCount, jobId), refTablesDepTaskIdsCount, jobId),

View File

@ -305,6 +305,7 @@ citus_copy_one_shard_placement(PG_FUNCTION_ARGS)
PG_RETURN_VOID(); PG_RETURN_VOID();
} }
/* /*
* citus_move_shard_placement moves given shard (and its co-located shards) from one * citus_move_shard_placement moves given shard (and its co-located shards) from one
* node to the other node. To accomplish this it entirely recreates the table structure * node to the other node. To accomplish this it entirely recreates the table structure
@ -378,6 +379,8 @@ citus_move_shard_placement_with_nodeid(PG_FUNCTION_ARGS)
PG_RETURN_VOID(); PG_RETURN_VOID();
} }
/* /*
* TransferShards is the function for shard transfers. * TransferShards is the function for shard transfers.
*/ */
@ -464,8 +467,10 @@ TransferShards(int64 shardId, char *sourceNodeName,
} }
bool transferAlreadyCompleted = TransferAlreadyCompleted(colocatedShardList, bool transferAlreadyCompleted = TransferAlreadyCompleted(colocatedShardList,
sourceNodeName, sourceNodePort, sourceNodeName,
targetNodeName, targetNodePort, sourceNodePort,
targetNodeName,
targetNodePort,
transferType); transferType);
/* /*
@ -488,9 +493,12 @@ TransferShards(int64 shardId, char *sourceNodeName,
return; return;
} }
CopyShardTables(colocatedShardList, sourceNodeName, sourceNodePort, targetNodeName, CopyShardTables(colocatedShardList, sourceNodeName, sourceNodePort, targetNodeName
targetNodePort, (shardReplicationMode == TRANSFER_MODE_FORCE_LOGICAL), ,
targetNodePort, (shardReplicationMode ==
TRANSFER_MODE_FORCE_LOGICAL),
operationFunctionName, optionFlags); operationFunctionName, optionFlags);
/* We don't need to do anything else, just return */ /* We don't need to do anything else, just return */
return; return;
} }
@ -591,7 +599,8 @@ TransferShards(int64 shardId, char *sourceNodeName,
} }
CopyShardTables(colocatedShardList, sourceNodeName, sourceNodePort, targetNodeName, CopyShardTables(colocatedShardList, sourceNodeName, sourceNodePort, targetNodeName,
targetNodePort, useLogicalReplication, operationFunctionName, optionFlags); targetNodePort, useLogicalReplication, operationFunctionName,
optionFlags);
if (transferType == SHARD_TRANSFER_MOVE) if (transferType == SHARD_TRANSFER_MOVE)
{ {
@ -649,6 +658,7 @@ TransferShards(int64 shardId, char *sourceNodeName,
FinalizeCurrentProgressMonitor(); FinalizeCurrentProgressMonitor();
} }
/* /*
* Insert deferred cleanup records. * Insert deferred cleanup records.
* The shards will be dropped by background cleaner later. * The shards will be dropped by background cleaner later.
@ -1543,13 +1553,15 @@ CopyShardTablesViaBlockWrites(List *shardIntervalList, char *sourceNodeName,
* wouldn't be visible in the session that get_rebalance_progress uses. * wouldn't be visible in the session that get_rebalance_progress uses.
* So get_rebalance_progress would always report its size as 0. * So get_rebalance_progress would always report its size as 0.
*/ */
List *ddlCommandList = RecreateShardDDLCommandList(shardInterval, sourceNodeName, List *ddlCommandList = RecreateShardDDLCommandList(shardInterval,
sourceNodeName,
sourceNodePort); sourceNodePort);
char *tableOwner = TableOwner(shardInterval->relationId); char *tableOwner = TableOwner(shardInterval->relationId);
/* drop the shard we created on the target, in case of failure */ /* drop the shard we created on the target, in case of failure */
InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT,
ConstructQualifiedShardName(shardInterval), ConstructQualifiedShardName(
shardInterval),
GroupForNode(targetNodeName, GroupForNode(targetNodeName,
targetNodePort), targetNodePort),
CLEANUP_ON_FAILURE); CLEANUP_ON_FAILURE);
@ -1629,8 +1641,10 @@ CopyShardTablesViaBlockWrites(List *shardIntervalList, char *sourceNodeName,
List *referenceTableForeignConstraintList = NIL; List *referenceTableForeignConstraintList = NIL;
CopyShardForeignConstraintCommandListGrouped(shardInterval, CopyShardForeignConstraintCommandListGrouped(shardInterval,
&shardForeignConstraintCommandList, &
&referenceTableForeignConstraintList); shardForeignConstraintCommandList,
&
referenceTableForeignConstraintList);
ShardCommandList *shardCommandList = CreateShardCommandList( ShardCommandList *shardCommandList = CreateShardCommandList(
shardInterval, shardInterval,
@ -1736,7 +1750,8 @@ CopyShardsToNode(WorkerNode *sourceNode, WorkerNode *targetNode, List *shardInte
ExecuteTaskListOutsideTransaction(ROW_MODIFY_NONE, copyTaskList, ExecuteTaskListOutsideTransaction(ROW_MODIFY_NONE, copyTaskList,
MaxAdaptiveExecutorPoolSize, MaxAdaptiveExecutorPoolSize,
NULL /* jobIdList (ignored by API implementation) */); NULL /* jobIdList (ignored by API implementation) */
);
} }