niupre/TestDeferredDropAndCleanup
Nitish Upreti 2022-08-25 18:45:41 -07:00
parent 919e44eab6
commit 3d46860fbb
4 changed files with 49 additions and 40 deletions

View File

@ -69,7 +69,8 @@ PG_FUNCTION_INFO_V1(citus_cleanup_orphaned_shards);
PG_FUNCTION_INFO_V1(isolation_cleanup_orphaned_shards);
static int DropOrphanedShardsForMove(bool waitForLocks);
static bool TryDropShardOutsideTransaction(char *qualifiedTableName, char *nodeName, int nodePort);
static bool TryDropShardOutsideTransaction(char *qualifiedTableName, char *nodeName, int
nodePort);
static bool TryLockRelationAndPlacementCleanup(Oid relationId, LOCKMODE lockmode);
/* Functions for cleanup infrastructure */
@ -246,7 +247,8 @@ DropOrphanedShardsForCleanup()
if (failedShardCountForCleanup > 0)
{
ereport(WARNING, (errmsg("Failed to drop %d cleanup shards out of %d",
failedShardCountForCleanup, list_length(cleanupRecordList))));
failedShardCountForCleanup, list_length(
cleanupRecordList))));
}
return removedShardCountForCleanup;
@ -417,12 +419,14 @@ CompleteNewOperationNeedingCleanup(bool isSuccess)
if (list_length(currentOperationRecordList) > 0)
{
ereport(LOG, (errmsg("Removed %d orphaned shards out of %d",
removedShardCountOnComplete, list_length(currentOperationRecordList))));
removedShardCountOnComplete, list_length(
currentOperationRecordList))));
if (failedShardCountOnComplete > 0)
{
ereport(WARNING, (errmsg("Failed to drop %d cleanup shards out of %d",
failedShardCountOnComplete, list_length(currentOperationRecordList))));
failedShardCountOnComplete, list_length(
currentOperationRecordList))));
}
}
}
@ -680,7 +684,8 @@ ListCleanupRecordsForCurrentOperation(void)
int scanKeyCount = 1;
Oid scanIndexId = InvalidOid;
bool useIndex = false;
SysScanDesc scanDescriptor = systable_beginscan(pgDistCleanup, scanIndexId, useIndex, NULL,
SysScanDesc scanDescriptor = systable_beginscan(pgDistCleanup, scanIndexId, useIndex,
NULL,
scanKeyCount, scanKey);
HeapTuple heapTuple = NULL;
@ -697,6 +702,7 @@ ListCleanupRecordsForCurrentOperation(void)
return recordList;
}
/*
* TupleToCleanupRecord converts a pg_dist_cleanup record tuple into a CleanupRecord struct.
*/
@ -808,6 +814,7 @@ static uint64
GetNextCleanupRecordId(void)
{
uint64 recordId = INVALID_CLEANUP_RECORD_ID;
/*
* In regression tests, we would like to generate record IDs consistently
* even if the tests run in parallel. Instead of the sequence, we can use
@ -844,6 +851,7 @@ LockOperationId(OperationId operationId)
(void) LockAcquire(&tag, ExclusiveLock, sessionLock, dontWait);
}
static bool
TryLockOperationId(OperationId operationId)
{

View File

@ -75,8 +75,7 @@ static void ErrorIfCannotSplitShardExtended(SplitOperation splitOperation,
ShardInterval *shardIntervalToSplit,
List *shardSplitPointsList,
List *nodeIdsForPlacementList);
static void CreateAndCopySplitShardsForShardGroup(
WorkerNode *sourceShardNode,
static void CreateAndCopySplitShardsForShardGroup(WorkerNode *sourceShardNode,
List *sourceColocatedShardIntervalList,
List *shardGroupSplitIntervalListList,
List *workersForPlacementList);
@ -133,7 +132,8 @@ static List * ExecuteSplitShardReplicationSetupUDF(WorkerNode *sourceWorkerNode,
List *sourceColocatedShardIntervalList,
List *shardGroupSplitIntervalListList,
List *destinationWorkerNodesList);
static void AddDummyShardEntryInMap(HTAB *mapOfPlacementToDummyShardList, uint32 targetNodeId,
static void AddDummyShardEntryInMap(HTAB *mapOfPlacementToDummyShardList, uint32
targetNodeId,
ShardInterval *shardInterval);
static uint64 GetNextShardIdForSplitChild(void);
@ -581,7 +581,8 @@ CreateSplitShardsForShardGroup(List *shardGroupSplitIntervalListList,
/* Log resource for cleanup in case of failure only. */
CleanupPolicy policy = CLEANUP_ON_FAILURE;
InsertCleanupRecordInSubtransaction(CLEANUP_SHARD_PLACEMENT,
ConstructQualifiedShardName(shardInterval),
ConstructQualifiedShardName(
shardInterval),
workerPlacementNode->groupId,
policy);
@ -1132,7 +1133,6 @@ DropShardList(List *shardIntervalList)
if (DeferShardDeleteOnSplit)
{
/* Log shard in pg_dist_cleanup.
* Parent shards are to be dropped only on sucess after split workflow is complete,
* so mark the policy as 'CLEANUP_DEFERRED_ON_SUCCESS'.
@ -1465,7 +1465,8 @@ CreateDummyShardsForShardGroup(HTAB *mapOfPlacementToDummyShardList,
*/
CleanupPolicy policy = CLEANUP_ALWAYS;
InsertCleanupRecordInSubtransaction(CLEANUP_SHARD_PLACEMENT,
ConstructQualifiedShardName(shardInterval),
ConstructQualifiedShardName(
shardInterval),
workerPlacementNode->groupId,
policy);
@ -1509,7 +1510,8 @@ CreateDummyShardsForShardGroup(HTAB *mapOfPlacementToDummyShardList,
*/
CleanupPolicy policy = CLEANUP_ALWAYS;
InsertCleanupRecordInSubtransaction(CLEANUP_SHARD_PLACEMENT,
ConstructQualifiedShardName(shardInterval),
ConstructQualifiedShardName(
shardInterval),
workerPlacementNode->groupId,
policy);

View File

@ -31,4 +31,3 @@
#define CLEANUPRECORDID_SEQUENCE_NAME "pg_dist_cleanup_recordid_seq"
#endif /* PG_DIST_CLEANUP_H */