Concurrent shard move/copy and colocated table creation fix

It turns out that create_distributed_table
and citus_move/copy_shard_placement does not
work well concurrently.

To fix that, we need to acquire a lock, which
sounds like a good use of colocation lock.

However, the current usage of colocation lock is
limited to higher level UDFs like rebalance_table_shards
etc. Those usage of lock is still useful, but
we cannot acquire the same lock on citus_move_shard_placement
etc. because the coordinator connects to itself to acquire
the lock. Hence, the high level UDF blocks itself.

To fix that, we use one more colocation lock, with the placements
are the main objects to consider.
improve_locking_try_3
Onder Kalaci 2022-07-20 11:28:30 +02:00
parent c085ac026a
commit 360fd790c6
9 changed files with 177 additions and 129 deletions

View File

@ -60,6 +60,7 @@
#include "distributed/relation_access_tracking.h"
#include "distributed/remote_commands.h"
#include "distributed/shared_library_init.h"
#include "distributed/shard_rebalancer.h"
#include "distributed/worker_protocol.h"
#include "distributed/worker_shard_visibility.h"
#include "distributed/worker_transaction.h"
@ -850,6 +851,12 @@ CreateHashDistributedTableShards(Oid relationId, int shardCount,
if (colocatedTableId != InvalidOid)
{
/*
* TODO: add commnts
*/
AcquirePlacementColocationLock(colocatedTableId, ShareLock,
"colocate distributed table");
CreateColocatedShards(relationId, colocatedTableId, useExclusiveConnection);
}
else

View File

@ -37,6 +37,7 @@
#include "distributed/reference_table_utils.h"
#include "distributed/remote_commands.h"
#include "distributed/resource_lock.h"
#include "distributed/shard_rebalancer.h"
#include "distributed/worker_manager.h"
#include "distributed/worker_protocol.h"
#include "distributed/worker_transaction.h"
@ -165,6 +166,9 @@ citus_copy_shard_placement(PG_FUNCTION_ARGS)
ShardInterval *shardInterval = LoadShardInterval(shardId);
ErrorIfTableCannotBeReplicated(shardInterval->relationId);
AcquirePlacementColocationLock(shardInterval->relationId, ExclusiveLock,
doRepair ? "repair" : "copy");
if (doRepair)
{
RepairShardPlacement(shardId, sourceNodeName, sourceNodePort, targetNodeName,
@ -318,6 +322,8 @@ citus_move_shard_placement(PG_FUNCTION_ARGS)
ErrorIfMoveUnsupportedTableType(relationId);
ErrorIfTargetNodeIsNotSafeToMove(targetNodeName, targetNodePort);
AcquirePlacementColocationLock(relationId, ExclusiveLock, "move");
ShardInterval *shardInterval = LoadShardInterval(shardId);
Oid distributedTableId = shardInterval->relationId;

View File

@ -230,7 +230,7 @@ static float4 NodeCapacity(WorkerNode *workerNode, void *context);
static ShardCost GetShardCost(uint64 shardId, void *context);
static List * NonColocatedDistRelationIdList(void);
static void RebalanceTableShards(RebalanceOptions *options, Oid shardReplicationModeOid);
static void AcquireColocationLock(Oid relationId, const char *operationName);
static void AcquireRelationColocationLock(Oid relationId, const char *operationName);
static void ExecutePlacementUpdates(List *placementUpdateList, Oid
shardReplicationModeOid, char *noticeOperation);
static float4 CalculateUtilization(float4 totalCost, float4 capacity);
@ -619,13 +619,13 @@ GetColocatedRebalanceSteps(List *placementUpdateList)
/*
* AcquireColocationLock tries to acquire a lock for rebalance/replication. If
* this is it not possible it fails instantly because this means another
* rebalance/replication is currently happening. This would really mess up
* planning.
* AcquireRelationColocationLock tries to acquire a lock for
* rebalance/replication. If this is it not possible it fails
* instantly because this means another rebalance/replication
* is currently happening. This would really mess up planning.
*/
static void
AcquireColocationLock(Oid relationId, const char *operationName)
AcquireRelationColocationLock(Oid relationId, const char *operationName)
{
uint32 lockId = relationId;
LOCKTAG tag;
@ -636,7 +636,7 @@ AcquireColocationLock(Oid relationId, const char *operationName)
lockId = citusTableCacheEntry->colocationId;
}
SET_LOCKTAG_REBALANCE_COLOCATION(tag, (int64) lockId);
SET_LOCKTAG_REBALANCE_TABLE_COLOCATION(tag, (int64) lockId);
LockAcquireResult lockAcquired = LockAcquire(&tag, ExclusiveLock, false, true);
if (!lockAcquired)
@ -648,6 +648,37 @@ AcquireColocationLock(Oid relationId, const char *operationName)
}
/*
* AcquirePlacementColocationLock tries to acquire a lock for
* rebalance/replication while moving/copying the placement. If this
* is it not possible it fails instantly because this means
* another move/copy is currently happening. This would really mess up planning.
*/
void
AcquirePlacementColocationLock(Oid relationId, int lockMode,
const char *operationName)
{
uint32 lockId = relationId;
LOCKTAG tag;
CitusTableCacheEntry *citusTableCacheEntry = GetCitusTableCacheEntry(relationId);
if (citusTableCacheEntry->colocationId != INVALID_COLOCATION_ID)
{
lockId = citusTableCacheEntry->colocationId;
}
SET_LOCKTAG_REBALANCE_PLACEMENT_COLOCATION(tag, (int64) lockId);
LockAcquireResult lockAcquired = LockAcquire(&tag, lockMode, false, true);
if (!lockAcquired)
{
ereport(ERROR, (errmsg("could not acquire the lock required to %s %s",
operationName, generate_qualified_relation_name(
relationId))));
}
}
/*
* GetResponsiveWorkerList returns a List of workers that respond to new
* connection requests.
@ -945,7 +976,7 @@ replicate_table_shards(PG_FUNCTION_ARGS)
char transferMode = LookupShardTransferMode(shardReplicationModeOid);
EnsureReferenceTablesExistOnAllNodesExtended(transferMode);
AcquireColocationLock(relationId, "replicate");
AcquireRelationColocationLock(relationId, "replicate");
List *activeWorkerList = SortedActiveWorkers();
List *shardPlacementList = FullShardPlacementList(relationId, excludedShardArray);
@ -1558,7 +1589,7 @@ RebalanceTableShards(RebalanceOptions *options, Oid shardReplicationModeOid)
foreach_oid(relationId, options->relationIdList)
{
AcquireColocationLock(relationId, operationName);
AcquireRelationColocationLock(relationId, operationName);
}
List *placementUpdateList = GetRebalanceSteps(options);

View File

@ -441,7 +441,7 @@ LockColocationId(int colocationId, LOCKMODE lockMode)
const bool sessionLock = false;
const bool dontWait = false;
SET_LOCKTAG_REBALANCE_COLOCATION(tag, (int64) colocationId);
SET_LOCKTAG_REBALANCE_TABLE_COLOCATION(tag, (int64) colocationId);
(void) LockAcquire(&tag, lockMode, sessionLock, dontWait);
}
@ -455,7 +455,7 @@ UnlockColocationId(int colocationId, LOCKMODE lockMode)
LOCKTAG tag;
const bool sessionLock = false;
SET_LOCKTAG_REBALANCE_COLOCATION(tag, (int64) colocationId);
SET_LOCKTAG_REBALANCE_TABLE_COLOCATION(tag, (int64) colocationId);
LockRelease(&tag, lockMode, sessionLock);
}

View File

@ -37,11 +37,12 @@ typedef enum AdvisoryLocktagClass
ADV_LOCKTAG_CLASS_CITUS_SHARD_METADATA = 4,
ADV_LOCKTAG_CLASS_CITUS_SHARD = 5,
ADV_LOCKTAG_CLASS_CITUS_JOB = 6,
ADV_LOCKTAG_CLASS_CITUS_REBALANCE_COLOCATION = 7,
ADV_LOCKTAG_CLASS_CITUS_REBALANCE_RELATION_COLOCATION = 7,
ADV_LOCKTAG_CLASS_CITUS_COLOCATED_SHARDS_METADATA = 8,
ADV_LOCKTAG_CLASS_CITUS_OPERATIONS = 9,
ADV_LOCKTAG_CLASS_CITUS_PLACEMENT_CLEANUP = 10,
ADV_LOCKTAG_CLASS_CITUS_LOGICAL_REPLICATION = 12
ADV_LOCKTAG_CLASS_CITUS_LOGICAL_REPLICATION = 12,
ADV_LOCKTAG_CLASS_CITUS_REBALANCE_PLACEMENT_COLOCATION = 13
} AdvisoryLocktagClass;
/* CitusOperations has constants for citus operations */
@ -77,12 +78,23 @@ typedef enum CitusOperations
/* reuse advisory lock, but with different, unused field 4 (7)
* Also it has the database hardcoded to MyDatabaseId, to ensure the locks
* are local to each database */
#define SET_LOCKTAG_REBALANCE_COLOCATION(tag, colocationOrTableId) \
#define SET_LOCKTAG_REBALANCE_TABLE_COLOCATION(tag, colocationOrTableId) \
SET_LOCKTAG_ADVISORY(tag, \
MyDatabaseId, \
(uint32) ((colocationOrTableId) >> 32), \
(uint32) (colocationOrTableId), \
ADV_LOCKTAG_CLASS_CITUS_REBALANCE_COLOCATION)
ADV_LOCKTAG_CLASS_CITUS_REBALANCE_RELATION_COLOCATION)
/* reuse advisory lock, but with different, unused field 4 (13)
* Also it has the database hardcoded to MyDatabaseId, to ensure the locks
* are local to each database */
#define SET_LOCKTAG_REBALANCE_PLACEMENT_COLOCATION(tag, colocationOrTableId) \
SET_LOCKTAG_ADVISORY(tag, \
MyDatabaseId, \
(uint32) ((colocationOrTableId) >> 32), \
(uint32) (colocationOrTableId), \
ADV_LOCKTAG_CLASS_CITUS_REBALANCE_PLACEMENT_COLOCATION)
/* advisory lock for citus operations, also it has the database hardcoded to MyDatabaseId,
* to ensure the locks are local to each database */

View File

@ -194,6 +194,7 @@ extern List * RebalancePlacementUpdates(List *workerNodeList,
extern List * ReplicationPlacementUpdates(List *workerNodeList, List *shardPlacementList,
int shardReplicationFactor);
extern void ExecuteRebalancerCommandInSeparateTransaction(char *command);
extern void AcquirePlacementColocationLock(Oid relationId, int lockMode,
const char *operationName);
#endif /* SHARD_REBALANCER_H */

View File

@ -2,19 +2,19 @@ Parsed test spec with 2 sessions
starting permutation: s1-load-cache s2-load-cache s2-set-placement-inactive s2-begin s2-repair-placement s1-repair-placement s2-commit
step s1-load-cache:
COPY test_hash_table FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV;
COPY test_hash_table FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV;
step s2-load-cache:
COPY test_hash_table FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV;
COPY test_hash_table FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV;
step s2-set-placement-inactive:
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard_for_test_table) AND nodeport = 57638;
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard_for_test_table) AND nodeport = 57638;
step s2-begin:
BEGIN;
BEGIN;
step s2-repair-placement:
SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638);
SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638);
master_copy_shard_placement
---------------------------------------------------------------------
@ -22,23 +22,22 @@ master_copy_shard_placement
(1 row)
step s1-repair-placement:
SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638);
<waiting ...>
step s2-commit:
COMMIT;
SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638);
ERROR: could not acquire the lock required to repair public.test_hash_table
step s2-commit:
COMMIT;
step s1-repair-placement: <... completed>
ERROR: target placement must be in inactive state
starting permutation: s2-set-placement-inactive s2-begin s2-repair-placement s1-repair-placement s2-commit
step s2-set-placement-inactive:
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard_for_test_table) AND nodeport = 57638;
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard_for_test_table) AND nodeport = 57638;
step s2-begin:
BEGIN;
BEGIN;
step s2-repair-placement:
SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638);
SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638);
master_copy_shard_placement
---------------------------------------------------------------------
@ -46,10 +45,9 @@ master_copy_shard_placement
(1 row)
step s1-repair-placement:
SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638);
<waiting ...>
step s2-commit:
COMMIT;
SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638);
ERROR: could not acquire the lock required to repair public.test_hash_table
step s2-commit:
COMMIT;
step s1-repair-placement: <... completed>
ERROR: target placement must be in inactive state

View File

@ -10,10 +10,10 @@ pg_advisory_lock
(1 row)
step s1-begin:
BEGIN;
BEGIN;
step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
<waiting ...>
step s2-insert:
INSERT INTO logical_replicate_placement VALUES (15, 15);
@ -33,7 +33,7 @@ master_move_shard_placement
(1 row)
step s1-end:
COMMIT;
COMMIT;
step s1-select:
SELECT * FROM logical_replicate_placement order by y;
@ -62,10 +62,10 @@ pg_advisory_lock
(1 row)
step s1-begin:
BEGIN;
BEGIN;
step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
<waiting ...>
step s2-upsert:
INSERT INTO logical_replicate_placement VALUES (15, 15);
@ -86,7 +86,7 @@ master_move_shard_placement
(1 row)
step s1-end:
COMMIT;
COMMIT;
step s1-select:
SELECT * FROM logical_replicate_placement order by y;
@ -118,10 +118,10 @@ pg_advisory_lock
(1 row)
step s1-begin:
BEGIN;
BEGIN;
step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
<waiting ...>
step s2-update:
UPDATE logical_replicate_placement SET y = y + 1 WHERE x = 15;
@ -141,7 +141,7 @@ master_move_shard_placement
(1 row)
step s1-end:
COMMIT;
COMMIT;
step s1-select:
SELECT * FROM logical_replicate_placement order by y;
@ -173,10 +173,10 @@ pg_advisory_lock
(1 row)
step s1-begin:
BEGIN;
BEGIN;
step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
<waiting ...>
step s2-delete:
DELETE FROM logical_replicate_placement WHERE x = 15;
@ -196,7 +196,7 @@ master_move_shard_placement
(1 row)
step s1-end:
COMMIT;
COMMIT;
step s1-select:
SELECT * FROM logical_replicate_placement order by y;
@ -227,10 +227,10 @@ pg_advisory_lock
(1 row)
step s1-begin:
BEGIN;
BEGIN;
step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
<waiting ...>
step s2-select:
SELECT * FROM logical_replicate_placement ORDER BY y;
@ -255,7 +255,7 @@ master_move_shard_placement
(1 row)
step s1-end:
COMMIT;
COMMIT;
step s1-get-shard-distribution:
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport;
@ -279,10 +279,10 @@ pg_advisory_lock
(1 row)
step s1-begin:
BEGIN;
BEGIN;
step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
<waiting ...>
step s2-select-for-update:
SELECT * FROM logical_replicate_placement WHERE x=15 FOR UPDATE;
@ -307,7 +307,7 @@ master_move_shard_placement
(1 row)
step s1-end:
COMMIT;
COMMIT;
step s1-get-shard-distribution:
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport;
@ -320,7 +320,7 @@ nodeport
starting permutation: s1-begin s2-begin s2-insert s1-move-placement s2-end s1-end s1-select s1-get-shard-distribution
step s1-begin:
BEGIN;
BEGIN;
step s2-begin:
BEGIN;
@ -329,10 +329,10 @@ step s2-insert:
INSERT INTO logical_replicate_placement VALUES (15, 15);
step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
<waiting ...>
step s2-end:
COMMIT;
COMMIT;
step s1-move-placement: <... completed>
master_move_shard_placement
@ -341,7 +341,7 @@ master_move_shard_placement
(1 row)
step s1-end:
COMMIT;
COMMIT;
step s1-select:
SELECT * FROM logical_replicate_placement order by y;
@ -362,7 +362,7 @@ nodeport
starting permutation: s1-begin s2-begin s2-upsert s1-move-placement s2-end s1-end s1-select s1-get-shard-distribution
step s1-begin:
BEGIN;
BEGIN;
step s2-begin:
BEGIN;
@ -372,10 +372,10 @@ step s2-upsert:
INSERT INTO logical_replicate_placement VALUES (15, 15) ON CONFLICT (x) DO UPDATE SET y = logical_replicate_placement.y + 1;
step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
<waiting ...>
step s2-end:
COMMIT;
COMMIT;
step s1-move-placement: <... completed>
master_move_shard_placement
@ -384,7 +384,7 @@ master_move_shard_placement
(1 row)
step s1-end:
COMMIT;
COMMIT;
step s1-select:
SELECT * FROM logical_replicate_placement order by y;
@ -408,7 +408,7 @@ step s1-insert:
INSERT INTO logical_replicate_placement VALUES (15, 15);
step s1-begin:
BEGIN;
BEGIN;
step s2-begin:
BEGIN;
@ -417,10 +417,10 @@ step s2-update:
UPDATE logical_replicate_placement SET y = y + 1 WHERE x = 15;
step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
<waiting ...>
step s2-end:
COMMIT;
COMMIT;
step s1-move-placement: <... completed>
master_move_shard_placement
@ -429,7 +429,7 @@ master_move_shard_placement
(1 row)
step s1-end:
COMMIT;
COMMIT;
step s1-select:
SELECT * FROM logical_replicate_placement order by y;
@ -453,7 +453,7 @@ step s1-insert:
INSERT INTO logical_replicate_placement VALUES (15, 15);
step s1-begin:
BEGIN;
BEGIN;
step s2-begin:
BEGIN;
@ -462,10 +462,10 @@ step s2-delete:
DELETE FROM logical_replicate_placement WHERE x = 15;
step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
<waiting ...>
step s2-end:
COMMIT;
COMMIT;
step s1-move-placement: <... completed>
master_move_shard_placement
@ -474,7 +474,7 @@ master_move_shard_placement
(1 row)
step s1-end:
COMMIT;
COMMIT;
step s1-select:
SELECT * FROM logical_replicate_placement order by y;
@ -497,7 +497,7 @@ step s1-insert:
INSERT INTO logical_replicate_placement VALUES (15, 15);
step s1-begin:
BEGIN;
BEGIN;
step s2-begin:
BEGIN;
@ -511,7 +511,7 @@ step s2-select:
(1 row)
step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
master_move_shard_placement
---------------------------------------------------------------------
@ -519,10 +519,10 @@ master_move_shard_placement
(1 row)
step s2-end:
COMMIT;
COMMIT;
step s1-end:
COMMIT;
COMMIT;
step s1-get-shard-distribution:
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport;
@ -538,7 +538,7 @@ step s1-insert:
INSERT INTO logical_replicate_placement VALUES (15, 15);
step s1-begin:
BEGIN;
BEGIN;
step s2-begin:
BEGIN;
@ -552,10 +552,10 @@ step s2-select-for-update:
(1 row)
step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
<waiting ...>
step s2-end:
COMMIT;
COMMIT;
step s1-move-placement: <... completed>
master_move_shard_placement
@ -564,7 +564,7 @@ master_move_shard_placement
(1 row)
step s1-end:
COMMIT;
COMMIT;
step s1-get-shard-distribution:
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport;
@ -577,13 +577,13 @@ nodeport
starting permutation: s1-begin s2-begin s1-move-placement s2-move-placement s1-end s2-end
step s1-begin:
BEGIN;
BEGIN;
step s2-begin:
BEGIN;
step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
master_move_shard_placement
---------------------------------------------------------------------
@ -591,19 +591,14 @@ master_move_shard_placement
(1 row)
step s2-move-placement:
SELECT master_move_shard_placement(
get_shard_id_for_distribution_column('logical_replicate_placement', 4),
'localhost', 57637, 'localhost', 57638);
<waiting ...>
step s1-end:
COMMIT;
SELECT master_move_shard_placement(
get_shard_id_for_distribution_column('logical_replicate_placement', 4),
'localhost', 57637, 'localhost', 57638);
step s2-move-placement: <... completed>
master_move_shard_placement
---------------------------------------------------------------------
(1 row)
ERROR: could not acquire the lock required to move public.logical_replicate_placement
step s1-end:
COMMIT;
step s2-end:
COMMIT;
COMMIT;

View File

@ -2,13 +2,13 @@ Parsed test spec with 2 sessions
starting permutation: s1-load-cache s2-begin s2-move-placement s1-move-placement s2-commit s2-print-placements
step s1-load-cache:
COPY test_move_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV;
COPY test_move_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV;
step s2-begin:
BEGIN;
BEGIN;
step s2-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638, 'force_logical');
SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638, 'force_logical');
master_move_shard_placement
---------------------------------------------------------------------
@ -16,24 +16,23 @@ master_move_shard_placement
(1 row)
step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638, 'force_logical');
<waiting ...>
step s2-commit:
COMMIT;
SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638, 'force_logical');
ERROR: could not acquire the lock required to move public.test_move_table
step s2-commit:
COMMIT;
step s1-move-placement: <... completed>
ERROR: source placement must be in active state
step s2-print-placements:
SELECT
nodename, nodeport, count(*)
FROM
pg_dist_shard_placement
WHERE
shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_move_table'::regclass)
AND
shardstate = 1
GROUP BY
nodename, nodeport;
SELECT
nodename, nodeport, count(*)
FROM
pg_dist_shard_placement
WHERE
shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_move_table'::regclass)
AND
shardstate = 1
GROUP BY
nodename, nodeport;
nodename |nodeport|count
---------------------------------------------------------------------
@ -43,10 +42,10 @@ localhost| 57638| 2
starting permutation: s2-begin s2-move-placement s1-move-placement s2-commit s2-print-placements
step s2-begin:
BEGIN;
BEGIN;
step s2-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638, 'force_logical');
SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638, 'force_logical');
master_move_shard_placement
---------------------------------------------------------------------
@ -54,24 +53,23 @@ master_move_shard_placement
(1 row)
step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638, 'force_logical');
<waiting ...>
step s2-commit:
COMMIT;
SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638, 'force_logical');
ERROR: could not acquire the lock required to move public.test_move_table
step s2-commit:
COMMIT;
step s1-move-placement: <... completed>
ERROR: source placement must be in active state
step s2-print-placements:
SELECT
nodename, nodeport, count(*)
FROM
pg_dist_shard_placement
WHERE
shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_move_table'::regclass)
AND
shardstate = 1
GROUP BY
nodename, nodeport;
SELECT
nodename, nodeport, count(*)
FROM
pg_dist_shard_placement
WHERE
shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_move_table'::regclass)
AND
shardstate = 1
GROUP BY
nodename, nodeport;
nodename |nodeport|count
---------------------------------------------------------------------