Merge pull request #6066 from citusdata/fix_colocation_lock

Use colocation locks in lower level UDFs and create_distributed_table
pull/6089/head
Önder Kalacı 2022-07-27 10:10:25 +02:00 committed by GitHub
commit 14c56ce0dd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 868 additions and 448 deletions

View File

@ -60,6 +60,7 @@
#include "distributed/relation_access_tracking.h" #include "distributed/relation_access_tracking.h"
#include "distributed/remote_commands.h" #include "distributed/remote_commands.h"
#include "distributed/shared_library_init.h" #include "distributed/shared_library_init.h"
#include "distributed/shard_rebalancer.h"
#include "distributed/worker_protocol.h" #include "distributed/worker_protocol.h"
#include "distributed/worker_shard_visibility.h" #include "distributed/worker_shard_visibility.h"
#include "distributed/worker_transaction.h" #include "distributed/worker_transaction.h"
@ -850,6 +851,17 @@ CreateHashDistributedTableShards(Oid relationId, int shardCount,
if (colocatedTableId != InvalidOid) if (colocatedTableId != InvalidOid)
{ {
/*
* We currently allow concurrent distribution of colocated tables (which
* we probably should not be allowing because of foreign keys /
* partitioning etc).
*
* We also prevent concurrent shard moves / copy / splits) while creating
* a colocated table.
*/
AcquirePlacementColocationLock(colocatedTableId, ShareLock,
"colocate distributed table");
CreateColocatedShards(relationId, colocatedTableId, useExclusiveConnection); CreateColocatedShards(relationId, colocatedTableId, useExclusiveConnection);
} }
else else

View File

@ -37,6 +37,7 @@
#include "distributed/reference_table_utils.h" #include "distributed/reference_table_utils.h"
#include "distributed/remote_commands.h" #include "distributed/remote_commands.h"
#include "distributed/resource_lock.h" #include "distributed/resource_lock.h"
#include "distributed/shard_rebalancer.h"
#include "distributed/worker_manager.h" #include "distributed/worker_manager.h"
#include "distributed/worker_protocol.h" #include "distributed/worker_protocol.h"
#include "distributed/worker_transaction.h" #include "distributed/worker_transaction.h"
@ -179,6 +180,9 @@ citus_copy_shard_placement(PG_FUNCTION_ARGS)
ShardInterval *shardInterval = LoadShardInterval(shardId); ShardInterval *shardInterval = LoadShardInterval(shardId);
ErrorIfTableCannotBeReplicated(shardInterval->relationId); ErrorIfTableCannotBeReplicated(shardInterval->relationId);
AcquirePlacementColocationLock(shardInterval->relationId, ExclusiveLock,
doRepair ? "repair" : "copy");
if (doRepair) if (doRepair)
{ {
RepairShardPlacement(shardId, sourceNodeName, sourceNodePort, targetNodeName, RepairShardPlacement(shardId, sourceNodeName, sourceNodePort, targetNodeName,
@ -332,6 +336,8 @@ citus_move_shard_placement(PG_FUNCTION_ARGS)
ErrorIfMoveUnsupportedTableType(relationId); ErrorIfMoveUnsupportedTableType(relationId);
ErrorIfTargetNodeIsNotSafeToMove(targetNodeName, targetNodePort); ErrorIfTargetNodeIsNotSafeToMove(targetNodeName, targetNodePort);
AcquirePlacementColocationLock(relationId, ExclusiveLock, "move");
ShardInterval *shardInterval = LoadShardInterval(shardId); ShardInterval *shardInterval = LoadShardInterval(shardId);
Oid distributedTableId = shardInterval->relationId; Oid distributedTableId = shardInterval->relationId;

View File

@ -227,7 +227,7 @@ static float4 NodeCapacity(WorkerNode *workerNode, void *context);
static ShardCost GetShardCost(uint64 shardId, void *context); static ShardCost GetShardCost(uint64 shardId, void *context);
static List * NonColocatedDistRelationIdList(void); static List * NonColocatedDistRelationIdList(void);
static void RebalanceTableShards(RebalanceOptions *options, Oid shardReplicationModeOid); static void RebalanceTableShards(RebalanceOptions *options, Oid shardReplicationModeOid);
static void AcquireColocationLock(Oid relationId, const char *operationName); static void AcquireRebalanceColocationLock(Oid relationId, const char *operationName);
static void ExecutePlacementUpdates(List *placementUpdateList, Oid static void ExecutePlacementUpdates(List *placementUpdateList, Oid
shardReplicationModeOid, char *noticeOperation); shardReplicationModeOid, char *noticeOperation);
static float4 CalculateUtilization(float4 totalCost, float4 capacity); static float4 CalculateUtilization(float4 totalCost, float4 capacity);
@ -616,13 +616,13 @@ GetColocatedRebalanceSteps(List *placementUpdateList)
/* /*
* AcquireColocationLock tries to acquire a lock for rebalance/replication. If * AcquireRelationColocationLock tries to acquire a lock for
* this is it not possible it fails instantly because this means another * rebalance/replication. If this is it not possible it fails
* rebalance/replication is currently happening. This would really mess up * instantly because this means another rebalance/replication
* planning. * is currently happening. This would really mess up planning.
*/ */
static void static void
AcquireColocationLock(Oid relationId, const char *operationName) AcquireRebalanceColocationLock(Oid relationId, const char *operationName)
{ {
uint32 lockId = relationId; uint32 lockId = relationId;
LOCKTAG tag; LOCKTAG tag;
@ -639,8 +639,48 @@ AcquireColocationLock(Oid relationId, const char *operationName)
if (!lockAcquired) if (!lockAcquired)
{ {
ereport(ERROR, (errmsg("could not acquire the lock required to %s %s", ereport(ERROR, (errmsg("could not acquire the lock required to %s %s",
operationName, generate_qualified_relation_name( operationName,
relationId)))); generate_qualified_relation_name(relationId)),
errdetail("It means that either a concurrent shard move "
"or shard copy is happening."),
errhint("Make sure that the concurrent operation has "
"finished and re-run the command")));
}
}
/*
* AcquirePlacementColocationLock tries to acquire a lock for
* rebalance/replication while moving/copying the placement. If this
* is it not possible it fails instantly because this means
* another move/copy is currently happening. This would really mess up planning.
*/
void
AcquirePlacementColocationLock(Oid relationId, int lockMode,
const char *operationName)
{
uint32 lockId = relationId;
LOCKTAG tag;
CitusTableCacheEntry *citusTableCacheEntry = GetCitusTableCacheEntry(relationId);
if (citusTableCacheEntry->colocationId != INVALID_COLOCATION_ID)
{
lockId = citusTableCacheEntry->colocationId;
}
SET_LOCKTAG_REBALANCE_PLACEMENT_COLOCATION(tag, (int64) lockId);
LockAcquireResult lockAcquired = LockAcquire(&tag, lockMode, false, true);
if (!lockAcquired)
{
ereport(ERROR, (errmsg("could not acquire the lock required to %s %s",
operationName,
generate_qualified_relation_name(relationId)),
errdetail("It means that either a concurrent shard move "
"or colocated distributed table creation is "
"happening."),
errhint("Make sure that the concurrent operation has "
"finished and re-run the command")));
} }
} }
@ -942,7 +982,7 @@ replicate_table_shards(PG_FUNCTION_ARGS)
char transferMode = LookupShardTransferMode(shardReplicationModeOid); char transferMode = LookupShardTransferMode(shardReplicationModeOid);
EnsureReferenceTablesExistOnAllNodesExtended(transferMode); EnsureReferenceTablesExistOnAllNodesExtended(transferMode);
AcquireColocationLock(relationId, "replicate"); AcquireRebalanceColocationLock(relationId, "replicate");
List *activeWorkerList = SortedActiveWorkers(); List *activeWorkerList = SortedActiveWorkers();
List *shardPlacementList = FullShardPlacementList(relationId, excludedShardArray); List *shardPlacementList = FullShardPlacementList(relationId, excludedShardArray);
@ -1555,7 +1595,7 @@ RebalanceTableShards(RebalanceOptions *options, Oid shardReplicationModeOid)
foreach_oid(relationId, options->relationIdList) foreach_oid(relationId, options->relationIdList)
{ {
AcquireColocationLock(relationId, operationName); AcquireRebalanceColocationLock(relationId, operationName);
} }
List *placementUpdateList = GetRebalanceSteps(options); List *placementUpdateList = GetRebalanceSteps(options);

View File

@ -35,6 +35,7 @@
#include "distributed/metadata_sync.h" #include "distributed/metadata_sync.h"
#include "distributed/multi_physical_planner.h" #include "distributed/multi_physical_planner.h"
#include "distributed/deparse_shard_query.h" #include "distributed/deparse_shard_query.h"
#include "distributed/shard_rebalancer.h"
/* /*
* Entry for map that tracks ShardInterval -> Placement Node * Entry for map that tracks ShardInterval -> Placement Node
@ -329,6 +330,9 @@ SplitShard(SplitMode splitMode,
ShardInterval *shardIntervalToSplit = LoadShardInterval(shardIdToSplit); ShardInterval *shardIntervalToSplit = LoadShardInterval(shardIdToSplit);
List *colocatedTableList = ColocatedTableList(shardIntervalToSplit->relationId); List *colocatedTableList = ColocatedTableList(shardIntervalToSplit->relationId);
Oid relationId = RelationIdForShard(shardIdToSplit);
AcquirePlacementColocationLock(relationId, ExclusiveLock, "split");
/* sort the tables to avoid deadlocks */ /* sort the tables to avoid deadlocks */
colocatedTableList = SortList(colocatedTableList, CompareOids); colocatedTableList = SortList(colocatedTableList, CompareOids);
Oid colocatedTableId = InvalidOid; Oid colocatedTableId = InvalidOid;

View File

@ -41,7 +41,8 @@ typedef enum AdvisoryLocktagClass
ADV_LOCKTAG_CLASS_CITUS_COLOCATED_SHARDS_METADATA = 8, ADV_LOCKTAG_CLASS_CITUS_COLOCATED_SHARDS_METADATA = 8,
ADV_LOCKTAG_CLASS_CITUS_OPERATIONS = 9, ADV_LOCKTAG_CLASS_CITUS_OPERATIONS = 9,
ADV_LOCKTAG_CLASS_CITUS_PLACEMENT_CLEANUP = 10, ADV_LOCKTAG_CLASS_CITUS_PLACEMENT_CLEANUP = 10,
ADV_LOCKTAG_CLASS_CITUS_LOGICAL_REPLICATION = 12 ADV_LOCKTAG_CLASS_CITUS_LOGICAL_REPLICATION = 12,
ADV_LOCKTAG_CLASS_CITUS_REBALANCE_PLACEMENT_COLOCATION = 13
} AdvisoryLocktagClass; } AdvisoryLocktagClass;
/* CitusOperations has constants for citus operations */ /* CitusOperations has constants for citus operations */
@ -84,6 +85,17 @@ typedef enum CitusOperations
(uint32) (colocationOrTableId), \ (uint32) (colocationOrTableId), \
ADV_LOCKTAG_CLASS_CITUS_REBALANCE_COLOCATION) ADV_LOCKTAG_CLASS_CITUS_REBALANCE_COLOCATION)
/* reuse advisory lock, but with different, unused field 4 (13)
* Also it has the database hardcoded to MyDatabaseId, to ensure the locks
* are local to each database */
#define SET_LOCKTAG_REBALANCE_PLACEMENT_COLOCATION(tag, colocationOrTableId) \
SET_LOCKTAG_ADVISORY(tag, \
MyDatabaseId, \
(uint32) ((colocationOrTableId) >> 32), \
(uint32) (colocationOrTableId), \
ADV_LOCKTAG_CLASS_CITUS_REBALANCE_PLACEMENT_COLOCATION)
/* advisory lock for citus operations, also it has the database hardcoded to MyDatabaseId, /* advisory lock for citus operations, also it has the database hardcoded to MyDatabaseId,
* to ensure the locks are local to each database */ * to ensure the locks are local to each database */
#define SET_LOCKTAG_CITUS_OPERATION(tag, operationId) \ #define SET_LOCKTAG_CITUS_OPERATION(tag, operationId) \

View File

@ -194,6 +194,7 @@ extern List * RebalancePlacementUpdates(List *workerNodeList,
extern List * ReplicationPlacementUpdates(List *workerNodeList, List *shardPlacementList, extern List * ReplicationPlacementUpdates(List *workerNodeList, List *shardPlacementList,
int shardReplicationFactor); int shardReplicationFactor);
extern void ExecuteRebalancerCommandInSeparateTransaction(char *command); extern void ExecuteRebalancerCommandInSeparateTransaction(char *command);
extern void AcquirePlacementColocationLock(Oid relationId, int lockMode,
const char *operationName);
#endif /* SHARD_REBALANCER_H */ #endif /* SHARD_REBALANCER_H */

View File

@ -7,13 +7,13 @@ create_distributed_table
(1 row) (1 row)
step s1-load-cache: step s1-load-cache:
-- Indirect way to load cache. -- Indirect way to load cache.
TRUNCATE to_split_table; TRUNCATE to_split_table;
step s1-insert: step s1-insert:
-- Id '123456789' maps to shard xxxxx. -- Id '123456789' maps to shard xxxxx.
SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); SELECT get_shard_id_for_distribution_column('to_split_table', 123456789);
INSERT INTO to_split_table VALUES (123456789, 1); INSERT INTO to_split_table VALUES (123456789, 1);
get_shard_id_for_distribution_column get_shard_id_for_distribution_column
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -27,7 +27,7 @@ step s1-begin:
SET citus.select_opens_transaction_block TO false; SET citus.select_opens_transaction_block TO false;
step s1-select: step s1-select:
SELECT count(*) FROM to_split_table WHERE id = 123456789; SELECT count(*) FROM to_split_table WHERE id = 123456789;
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -35,14 +35,14 @@ count
(1 row) (1 row)
step s2-begin: step s2-begin:
BEGIN; BEGIN;
step s2-blocking-shard-split: step s2-blocking-shard-split:
SELECT pg_catalog.citus_split_shard_by_split_points( SELECT pg_catalog.citus_split_shard_by_split_points(
1500002, 1500002,
ARRAY['1073741824'], ARRAY['1073741824'],
ARRAY[1, 2], ARRAY[1, 2],
'block_writes'); 'block_writes');
citus_split_shard_by_split_points citus_split_shard_by_split_points
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -50,26 +50,26 @@ citus_split_shard_by_split_points
(1 row) (1 row)
step s1-update: step s1-update:
UPDATE to_split_table SET value = 111 WHERE id = 123456789; UPDATE to_split_table SET value = 111 WHERE id = 123456789;
<waiting ...> <waiting ...>
step s2-commit: step s2-commit:
COMMIT; COMMIT;
step s1-update: <... completed> step s1-update: <... completed>
ERROR: could not find valid entry for shard xxxxx ERROR: could not find valid entry for shard xxxxx
step s1-commit: step s1-commit:
COMMIT; COMMIT;
step s2-print-cluster: step s2-print-cluster:
-- row count per shard -- row count per shard
SELECT SELECT
nodeport, shardid, success, result nodeport, shardid, success, result
FROM FROM
run_command_on_placements('to_split_table', 'select count(*) from %s') run_command_on_placements('to_split_table', 'select count(*) from %s')
ORDER BY ORDER BY
nodeport, shardid; nodeport, shardid;
-- rows -- rows
SELECT id, value FROM to_split_table ORDER BY id, value; SELECT id, value FROM to_split_table ORDER BY id, value;
nodeport|shardid|success|result nodeport|shardid|success|result
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -91,13 +91,13 @@ create_distributed_table
(1 row) (1 row)
step s1-load-cache: step s1-load-cache:
-- Indirect way to load cache. -- Indirect way to load cache.
TRUNCATE to_split_table; TRUNCATE to_split_table;
step s1-insert: step s1-insert:
-- Id '123456789' maps to shard xxxxx. -- Id '123456789' maps to shard xxxxx.
SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); SELECT get_shard_id_for_distribution_column('to_split_table', 123456789);
INSERT INTO to_split_table VALUES (123456789, 1); INSERT INTO to_split_table VALUES (123456789, 1);
get_shard_id_for_distribution_column get_shard_id_for_distribution_column
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -111,7 +111,7 @@ step s1-begin:
SET citus.select_opens_transaction_block TO false; SET citus.select_opens_transaction_block TO false;
step s1-select: step s1-select:
SELECT count(*) FROM to_split_table WHERE id = 123456789; SELECT count(*) FROM to_split_table WHERE id = 123456789;
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -119,14 +119,14 @@ count
(1 row) (1 row)
step s2-begin: step s2-begin:
BEGIN; BEGIN;
step s2-blocking-shard-split: step s2-blocking-shard-split:
SELECT pg_catalog.citus_split_shard_by_split_points( SELECT pg_catalog.citus_split_shard_by_split_points(
1500002, 1500002,
ARRAY['1073741824'], ARRAY['1073741824'],
ARRAY[1, 2], ARRAY[1, 2],
'block_writes'); 'block_writes');
citus_split_shard_by_split_points citus_split_shard_by_split_points
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -134,26 +134,26 @@ citus_split_shard_by_split_points
(1 row) (1 row)
step s1-delete: step s1-delete:
DELETE FROM to_split_table WHERE id = 123456789; DELETE FROM to_split_table WHERE id = 123456789;
<waiting ...> <waiting ...>
step s2-commit: step s2-commit:
COMMIT; COMMIT;
step s1-delete: <... completed> step s1-delete: <... completed>
ERROR: could not find valid entry for shard xxxxx ERROR: could not find valid entry for shard xxxxx
step s1-commit: step s1-commit:
COMMIT; COMMIT;
step s2-print-cluster: step s2-print-cluster:
-- row count per shard -- row count per shard
SELECT SELECT
nodeport, shardid, success, result nodeport, shardid, success, result
FROM FROM
run_command_on_placements('to_split_table', 'select count(*) from %s') run_command_on_placements('to_split_table', 'select count(*) from %s')
ORDER BY ORDER BY
nodeport, shardid; nodeport, shardid;
-- rows -- rows
SELECT id, value FROM to_split_table ORDER BY id, value; SELECT id, value FROM to_split_table ORDER BY id, value;
nodeport|shardid|success|result nodeport|shardid|success|result
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -175,8 +175,8 @@ create_distributed_table
(1 row) (1 row)
step s1-load-cache: step s1-load-cache:
-- Indirect way to load cache. -- Indirect way to load cache.
TRUNCATE to_split_table; TRUNCATE to_split_table;
step s1-begin: step s1-begin:
BEGIN; BEGIN;
@ -185,7 +185,7 @@ step s1-begin:
SET citus.select_opens_transaction_block TO false; SET citus.select_opens_transaction_block TO false;
step s1-select: step s1-select:
SELECT count(*) FROM to_split_table WHERE id = 123456789; SELECT count(*) FROM to_split_table WHERE id = 123456789;
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -193,14 +193,14 @@ count
(1 row) (1 row)
step s2-begin: step s2-begin:
BEGIN; BEGIN;
step s2-blocking-shard-split: step s2-blocking-shard-split:
SELECT pg_catalog.citus_split_shard_by_split_points( SELECT pg_catalog.citus_split_shard_by_split_points(
1500002, 1500002,
ARRAY['1073741824'], ARRAY['1073741824'],
ARRAY[1, 2], ARRAY[1, 2],
'block_writes'); 'block_writes');
citus_split_shard_by_split_points citus_split_shard_by_split_points
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -208,12 +208,12 @@ citus_split_shard_by_split_points
(1 row) (1 row)
step s1-insert: step s1-insert:
-- Id '123456789' maps to shard xxxxx. -- Id '123456789' maps to shard xxxxx.
SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); SELECT get_shard_id_for_distribution_column('to_split_table', 123456789);
INSERT INTO to_split_table VALUES (123456789, 1); INSERT INTO to_split_table VALUES (123456789, 1);
<waiting ...> <waiting ...>
step s2-commit: step s2-commit:
COMMIT; COMMIT;
step s1-insert: <... completed> step s1-insert: <... completed>
get_shard_id_for_distribution_column get_shard_id_for_distribution_column
@ -223,18 +223,18 @@ get_shard_id_for_distribution_column
ERROR: could not find valid entry for shard xxxxx ERROR: could not find valid entry for shard xxxxx
step s1-commit: step s1-commit:
COMMIT; COMMIT;
step s2-print-cluster: step s2-print-cluster:
-- row count per shard -- row count per shard
SELECT SELECT
nodeport, shardid, success, result nodeport, shardid, success, result
FROM FROM
run_command_on_placements('to_split_table', 'select count(*) from %s') run_command_on_placements('to_split_table', 'select count(*) from %s')
ORDER BY ORDER BY
nodeport, shardid; nodeport, shardid;
-- rows -- rows
SELECT id, value FROM to_split_table ORDER BY id, value; SELECT id, value FROM to_split_table ORDER BY id, value;
nodeport|shardid|success|result nodeport|shardid|success|result
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -255,8 +255,8 @@ create_distributed_table
(1 row) (1 row)
step s1-load-cache: step s1-load-cache:
-- Indirect way to load cache. -- Indirect way to load cache.
TRUNCATE to_split_table; TRUNCATE to_split_table;
step s1-begin: step s1-begin:
BEGIN; BEGIN;
@ -265,7 +265,7 @@ step s1-begin:
SET citus.select_opens_transaction_block TO false; SET citus.select_opens_transaction_block TO false;
step s1-select: step s1-select:
SELECT count(*) FROM to_split_table WHERE id = 123456789; SELECT count(*) FROM to_split_table WHERE id = 123456789;
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -273,14 +273,14 @@ count
(1 row) (1 row)
step s2-begin: step s2-begin:
BEGIN; BEGIN;
step s2-blocking-shard-split: step s2-blocking-shard-split:
SELECT pg_catalog.citus_split_shard_by_split_points( SELECT pg_catalog.citus_split_shard_by_split_points(
1500002, 1500002,
ARRAY['1073741824'], ARRAY['1073741824'],
ARRAY[1, 2], ARRAY[1, 2],
'block_writes'); 'block_writes');
citus_split_shard_by_split_points citus_split_shard_by_split_points
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -288,26 +288,26 @@ citus_split_shard_by_split_points
(1 row) (1 row)
step s1-copy: step s1-copy:
COPY to_split_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; COPY to_split_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV;
<waiting ...> <waiting ...>
step s2-commit: step s2-commit:
COMMIT; COMMIT;
step s1-copy: <... completed> step s1-copy: <... completed>
ERROR: could not find valid entry for shard xxxxx ERROR: could not find valid entry for shard xxxxx
step s1-commit: step s1-commit:
COMMIT; COMMIT;
step s2-print-cluster: step s2-print-cluster:
-- row count per shard -- row count per shard
SELECT SELECT
nodeport, shardid, success, result nodeport, shardid, success, result
FROM FROM
run_command_on_placements('to_split_table', 'select count(*) from %s') run_command_on_placements('to_split_table', 'select count(*) from %s')
ORDER BY ORDER BY
nodeport, shardid; nodeport, shardid;
-- rows -- rows
SELECT id, value FROM to_split_table ORDER BY id, value; SELECT id, value FROM to_split_table ORDER BY id, value;
nodeport|shardid|success|result nodeport|shardid|success|result
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -328,9 +328,9 @@ create_distributed_table
(1 row) (1 row)
step s1-insert: step s1-insert:
-- Id '123456789' maps to shard xxxxx. -- Id '123456789' maps to shard xxxxx.
SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); SELECT get_shard_id_for_distribution_column('to_split_table', 123456789);
INSERT INTO to_split_table VALUES (123456789, 1); INSERT INTO to_split_table VALUES (123456789, 1);
get_shard_id_for_distribution_column get_shard_id_for_distribution_column
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -344,7 +344,7 @@ step s1-begin:
SET citus.select_opens_transaction_block TO false; SET citus.select_opens_transaction_block TO false;
step s1-select: step s1-select:
SELECT count(*) FROM to_split_table WHERE id = 123456789; SELECT count(*) FROM to_split_table WHERE id = 123456789;
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -352,14 +352,14 @@ count
(1 row) (1 row)
step s2-begin: step s2-begin:
BEGIN; BEGIN;
step s2-blocking-shard-split: step s2-blocking-shard-split:
SELECT pg_catalog.citus_split_shard_by_split_points( SELECT pg_catalog.citus_split_shard_by_split_points(
1500002, 1500002,
ARRAY['1073741824'], ARRAY['1073741824'],
ARRAY[1, 2], ARRAY[1, 2],
'block_writes'); 'block_writes');
citus_split_shard_by_split_points citus_split_shard_by_split_points
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -367,26 +367,26 @@ citus_split_shard_by_split_points
(1 row) (1 row)
step s1-update: step s1-update:
UPDATE to_split_table SET value = 111 WHERE id = 123456789; UPDATE to_split_table SET value = 111 WHERE id = 123456789;
<waiting ...> <waiting ...>
step s2-commit: step s2-commit:
COMMIT; COMMIT;
step s1-update: <... completed> step s1-update: <... completed>
ERROR: could not find valid entry for shard xxxxx ERROR: could not find valid entry for shard xxxxx
step s1-commit: step s1-commit:
COMMIT; COMMIT;
step s2-print-cluster: step s2-print-cluster:
-- row count per shard -- row count per shard
SELECT SELECT
nodeport, shardid, success, result nodeport, shardid, success, result
FROM FROM
run_command_on_placements('to_split_table', 'select count(*) from %s') run_command_on_placements('to_split_table', 'select count(*) from %s')
ORDER BY ORDER BY
nodeport, shardid; nodeport, shardid;
-- rows -- rows
SELECT id, value FROM to_split_table ORDER BY id, value; SELECT id, value FROM to_split_table ORDER BY id, value;
nodeport|shardid|success|result nodeport|shardid|success|result
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -408,9 +408,9 @@ create_distributed_table
(1 row) (1 row)
step s1-insert: step s1-insert:
-- Id '123456789' maps to shard xxxxx. -- Id '123456789' maps to shard xxxxx.
SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); SELECT get_shard_id_for_distribution_column('to_split_table', 123456789);
INSERT INTO to_split_table VALUES (123456789, 1); INSERT INTO to_split_table VALUES (123456789, 1);
get_shard_id_for_distribution_column get_shard_id_for_distribution_column
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -424,7 +424,7 @@ step s1-begin:
SET citus.select_opens_transaction_block TO false; SET citus.select_opens_transaction_block TO false;
step s1-select: step s1-select:
SELECT count(*) FROM to_split_table WHERE id = 123456789; SELECT count(*) FROM to_split_table WHERE id = 123456789;
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -432,14 +432,14 @@ count
(1 row) (1 row)
step s2-begin: step s2-begin:
BEGIN; BEGIN;
step s2-blocking-shard-split: step s2-blocking-shard-split:
SELECT pg_catalog.citus_split_shard_by_split_points( SELECT pg_catalog.citus_split_shard_by_split_points(
1500002, 1500002,
ARRAY['1073741824'], ARRAY['1073741824'],
ARRAY[1, 2], ARRAY[1, 2],
'block_writes'); 'block_writes');
citus_split_shard_by_split_points citus_split_shard_by_split_points
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -447,26 +447,26 @@ citus_split_shard_by_split_points
(1 row) (1 row)
step s1-delete: step s1-delete:
DELETE FROM to_split_table WHERE id = 123456789; DELETE FROM to_split_table WHERE id = 123456789;
<waiting ...> <waiting ...>
step s2-commit: step s2-commit:
COMMIT; COMMIT;
step s1-delete: <... completed> step s1-delete: <... completed>
ERROR: could not find valid entry for shard xxxxx ERROR: could not find valid entry for shard xxxxx
step s1-commit: step s1-commit:
COMMIT; COMMIT;
step s2-print-cluster: step s2-print-cluster:
-- row count per shard -- row count per shard
SELECT SELECT
nodeport, shardid, success, result nodeport, shardid, success, result
FROM FROM
run_command_on_placements('to_split_table', 'select count(*) from %s') run_command_on_placements('to_split_table', 'select count(*) from %s')
ORDER BY ORDER BY
nodeport, shardid; nodeport, shardid;
-- rows -- rows
SELECT id, value FROM to_split_table ORDER BY id, value; SELECT id, value FROM to_split_table ORDER BY id, value;
nodeport|shardid|success|result nodeport|shardid|success|result
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -494,7 +494,7 @@ step s1-begin:
SET citus.select_opens_transaction_block TO false; SET citus.select_opens_transaction_block TO false;
step s1-select: step s1-select:
SELECT count(*) FROM to_split_table WHERE id = 123456789; SELECT count(*) FROM to_split_table WHERE id = 123456789;
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -502,14 +502,14 @@ count
(1 row) (1 row)
step s2-begin: step s2-begin:
BEGIN; BEGIN;
step s2-blocking-shard-split: step s2-blocking-shard-split:
SELECT pg_catalog.citus_split_shard_by_split_points( SELECT pg_catalog.citus_split_shard_by_split_points(
1500002, 1500002,
ARRAY['1073741824'], ARRAY['1073741824'],
ARRAY[1, 2], ARRAY[1, 2],
'block_writes'); 'block_writes');
citus_split_shard_by_split_points citus_split_shard_by_split_points
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -517,12 +517,12 @@ citus_split_shard_by_split_points
(1 row) (1 row)
step s1-insert: step s1-insert:
-- Id '123456789' maps to shard xxxxx. -- Id '123456789' maps to shard xxxxx.
SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); SELECT get_shard_id_for_distribution_column('to_split_table', 123456789);
INSERT INTO to_split_table VALUES (123456789, 1); INSERT INTO to_split_table VALUES (123456789, 1);
<waiting ...> <waiting ...>
step s2-commit: step s2-commit:
COMMIT; COMMIT;
step s1-insert: <... completed> step s1-insert: <... completed>
get_shard_id_for_distribution_column get_shard_id_for_distribution_column
@ -532,18 +532,18 @@ get_shard_id_for_distribution_column
ERROR: could not find valid entry for shard xxxxx ERROR: could not find valid entry for shard xxxxx
step s1-commit: step s1-commit:
COMMIT; COMMIT;
step s2-print-cluster: step s2-print-cluster:
-- row count per shard -- row count per shard
SELECT SELECT
nodeport, shardid, success, result nodeport, shardid, success, result
FROM FROM
run_command_on_placements('to_split_table', 'select count(*) from %s') run_command_on_placements('to_split_table', 'select count(*) from %s')
ORDER BY ORDER BY
nodeport, shardid; nodeport, shardid;
-- rows -- rows
SELECT id, value FROM to_split_table ORDER BY id, value; SELECT id, value FROM to_split_table ORDER BY id, value;
nodeport|shardid|success|result nodeport|shardid|success|result
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -570,7 +570,7 @@ step s1-begin:
SET citus.select_opens_transaction_block TO false; SET citus.select_opens_transaction_block TO false;
step s1-select: step s1-select:
SELECT count(*) FROM to_split_table WHERE id = 123456789; SELECT count(*) FROM to_split_table WHERE id = 123456789;
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -578,14 +578,14 @@ count
(1 row) (1 row)
step s2-begin: step s2-begin:
BEGIN; BEGIN;
step s2-blocking-shard-split: step s2-blocking-shard-split:
SELECT pg_catalog.citus_split_shard_by_split_points( SELECT pg_catalog.citus_split_shard_by_split_points(
1500002, 1500002,
ARRAY['1073741824'], ARRAY['1073741824'],
ARRAY[1, 2], ARRAY[1, 2],
'block_writes'); 'block_writes');
citus_split_shard_by_split_points citus_split_shard_by_split_points
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -593,26 +593,26 @@ citus_split_shard_by_split_points
(1 row) (1 row)
step s1-copy: step s1-copy:
COPY to_split_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; COPY to_split_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV;
<waiting ...> <waiting ...>
step s2-commit: step s2-commit:
COMMIT; COMMIT;
step s1-copy: <... completed> step s1-copy: <... completed>
ERROR: could not find valid entry for shard xxxxx ERROR: could not find valid entry for shard xxxxx
step s1-commit: step s1-commit:
COMMIT; COMMIT;
step s2-print-cluster: step s2-print-cluster:
-- row count per shard -- row count per shard
SELECT SELECT
nodeport, shardid, success, result nodeport, shardid, success, result
FROM FROM
run_command_on_placements('to_split_table', 'select count(*) from %s') run_command_on_placements('to_split_table', 'select count(*) from %s')
ORDER BY ORDER BY
nodeport, shardid; nodeport, shardid;
-- rows -- rows
SELECT id, value FROM to_split_table ORDER BY id, value; SELECT id, value FROM to_split_table ORDER BY id, value;
nodeport|shardid|success|result nodeport|shardid|success|result
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -633,13 +633,13 @@ create_distributed_table
(1 row) (1 row)
step s1-load-cache: step s1-load-cache:
-- Indirect way to load cache. -- Indirect way to load cache.
TRUNCATE to_split_table; TRUNCATE to_split_table;
step s1-insert: step s1-insert:
-- Id '123456789' maps to shard xxxxx. -- Id '123456789' maps to shard xxxxx.
SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); SELECT get_shard_id_for_distribution_column('to_split_table', 123456789);
INSERT INTO to_split_table VALUES (123456789, 1); INSERT INTO to_split_table VALUES (123456789, 1);
get_shard_id_for_distribution_column get_shard_id_for_distribution_column
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -653,11 +653,11 @@ step s1-begin:
SET citus.select_opens_transaction_block TO false; SET citus.select_opens_transaction_block TO false;
step s1-blocking-shard-split: step s1-blocking-shard-split:
SELECT pg_catalog.citus_split_shard_by_split_points( SELECT pg_catalog.citus_split_shard_by_split_points(
1500001, 1500001,
ARRAY['-1073741824'], ARRAY['-1073741824'],
ARRAY[1, 2], ARRAY[1, 2],
'block_writes'); 'block_writes');
citus_split_shard_by_split_points citus_split_shard_by_split_points
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -665,39 +665,33 @@ citus_split_shard_by_split_points
(1 row) (1 row)
step s2-blocking-shard-split: step s2-blocking-shard-split:
SELECT pg_catalog.citus_split_shard_by_split_points( SELECT pg_catalog.citus_split_shard_by_split_points(
1500002, 1500002,
ARRAY['1073741824'], ARRAY['1073741824'],
ARRAY[1, 2], ARRAY[1, 2],
'block_writes'); 'block_writes');
<waiting ...>
step s1-commit:
COMMIT;
step s2-blocking-shard-split: <... completed> ERROR: could not acquire the lock required to split public.to_split_table
citus_split_shard_by_split_points step s1-commit:
--------------------------------------------------------------------- COMMIT;
(1 row)
step s2-print-cluster: step s2-print-cluster:
-- row count per shard -- row count per shard
SELECT SELECT
nodeport, shardid, success, result nodeport, shardid, success, result
FROM FROM
run_command_on_placements('to_split_table', 'select count(*) from %s') run_command_on_placements('to_split_table', 'select count(*) from %s')
ORDER BY ORDER BY
nodeport, shardid; nodeport, shardid;
-- rows -- rows
SELECT id, value FROM to_split_table ORDER BY id, value; SELECT id, value FROM to_split_table ORDER BY id, value;
nodeport|shardid|success|result nodeport|shardid|success|result
--------------------------------------------------------------------- ---------------------------------------------------------------------
57637|1500003|t | 0 57637|1500003|t | 0
57637|1500005|t | 1 57638|1500002|t | 1
57638|1500004|t | 0 57638|1500004|t | 0
57638|1500006|t | 0 (3 rows)
(4 rows)
id|value id|value
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -712,9 +706,9 @@ create_distributed_table
(1 row) (1 row)
step s1-insert: step s1-insert:
-- Id '123456789' maps to shard xxxxx. -- Id '123456789' maps to shard xxxxx.
SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); SELECT get_shard_id_for_distribution_column('to_split_table', 123456789);
INSERT INTO to_split_table VALUES (123456789, 1); INSERT INTO to_split_table VALUES (123456789, 1);
get_shard_id_for_distribution_column get_shard_id_for_distribution_column
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -728,11 +722,11 @@ step s1-begin:
SET citus.select_opens_transaction_block TO false; SET citus.select_opens_transaction_block TO false;
step s1-blocking-shard-split: step s1-blocking-shard-split:
SELECT pg_catalog.citus_split_shard_by_split_points( SELECT pg_catalog.citus_split_shard_by_split_points(
1500001, 1500001,
ARRAY['-1073741824'], ARRAY['-1073741824'],
ARRAY[1, 2], ARRAY[1, 2],
'block_writes'); 'block_writes');
citus_split_shard_by_split_points citus_split_shard_by_split_points
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -740,39 +734,33 @@ citus_split_shard_by_split_points
(1 row) (1 row)
step s2-blocking-shard-split: step s2-blocking-shard-split:
SELECT pg_catalog.citus_split_shard_by_split_points( SELECT pg_catalog.citus_split_shard_by_split_points(
1500002, 1500002,
ARRAY['1073741824'], ARRAY['1073741824'],
ARRAY[1, 2], ARRAY[1, 2],
'block_writes'); 'block_writes');
<waiting ...>
step s1-commit:
COMMIT;
step s2-blocking-shard-split: <... completed> ERROR: could not acquire the lock required to split public.to_split_table
citus_split_shard_by_split_points step s1-commit:
--------------------------------------------------------------------- COMMIT;
(1 row)
step s2-print-cluster: step s2-print-cluster:
-- row count per shard -- row count per shard
SELECT SELECT
nodeport, shardid, success, result nodeport, shardid, success, result
FROM FROM
run_command_on_placements('to_split_table', 'select count(*) from %s') run_command_on_placements('to_split_table', 'select count(*) from %s')
ORDER BY ORDER BY
nodeport, shardid; nodeport, shardid;
-- rows -- rows
SELECT id, value FROM to_split_table ORDER BY id, value; SELECT id, value FROM to_split_table ORDER BY id, value;
nodeport|shardid|success|result nodeport|shardid|success|result
--------------------------------------------------------------------- ---------------------------------------------------------------------
57637|1500003|t | 0 57637|1500003|t | 0
57637|1500005|t | 1 57638|1500002|t | 1
57638|1500004|t | 0 57638|1500004|t | 0
57638|1500006|t | 0 (3 rows)
(4 rows)
id|value id|value
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -787,8 +775,8 @@ create_distributed_table
(1 row) (1 row)
step s1-load-cache: step s1-load-cache:
-- Indirect way to load cache. -- Indirect way to load cache.
TRUNCATE to_split_table; TRUNCATE to_split_table;
step s1-begin: step s1-begin:
BEGIN; BEGIN;
@ -797,7 +785,7 @@ step s1-begin:
SET citus.select_opens_transaction_block TO false; SET citus.select_opens_transaction_block TO false;
step s1-select: step s1-select:
SELECT count(*) FROM to_split_table WHERE id = 123456789; SELECT count(*) FROM to_split_table WHERE id = 123456789;
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -805,14 +793,14 @@ count
(1 row) (1 row)
step s2-begin: step s2-begin:
BEGIN; BEGIN;
step s2-blocking-shard-split: step s2-blocking-shard-split:
SELECT pg_catalog.citus_split_shard_by_split_points( SELECT pg_catalog.citus_split_shard_by_split_points(
1500002, 1500002,
ARRAY['1073741824'], ARRAY['1073741824'],
ARRAY[1, 2], ARRAY[1, 2],
'block_writes'); 'block_writes');
citus_split_shard_by_split_points citus_split_shard_by_split_points
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -820,25 +808,25 @@ citus_split_shard_by_split_points
(1 row) (1 row)
step s1-ddl: step s1-ddl:
CREATE INDEX test_table_index ON to_split_table(id); CREATE INDEX test_table_index ON to_split_table(id);
<waiting ...> <waiting ...>
step s2-commit: step s2-commit:
COMMIT; COMMIT;
step s1-ddl: <... completed> step s1-ddl: <... completed>
step s1-commit: step s1-commit:
COMMIT; COMMIT;
step s2-print-cluster: step s2-print-cluster:
-- row count per shard -- row count per shard
SELECT SELECT
nodeport, shardid, success, result nodeport, shardid, success, result
FROM FROM
run_command_on_placements('to_split_table', 'select count(*) from %s') run_command_on_placements('to_split_table', 'select count(*) from %s')
ORDER BY ORDER BY
nodeport, shardid; nodeport, shardid;
-- rows -- rows
SELECT id, value FROM to_split_table ORDER BY id, value; SELECT id, value FROM to_split_table ORDER BY id, value;
nodeport|shardid|success|result nodeport|shardid|success|result
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -852,12 +840,12 @@ id|value
(0 rows) (0 rows)
step s2-print-index-count: step s2-print-index-count:
SELECT SELECT
nodeport, success, result nodeport, success, result
FROM FROM
run_command_on_placements('to_split_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''') run_command_on_placements('to_split_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''')
ORDER BY ORDER BY
nodeport; nodeport;
nodeport|success|result nodeport|success|result
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -880,7 +868,7 @@ step s1-begin:
SET citus.select_opens_transaction_block TO false; SET citus.select_opens_transaction_block TO false;
step s1-select: step s1-select:
SELECT count(*) FROM to_split_table WHERE id = 123456789; SELECT count(*) FROM to_split_table WHERE id = 123456789;
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -888,14 +876,14 @@ count
(1 row) (1 row)
step s2-begin: step s2-begin:
BEGIN; BEGIN;
step s2-blocking-shard-split: step s2-blocking-shard-split:
SELECT pg_catalog.citus_split_shard_by_split_points( SELECT pg_catalog.citus_split_shard_by_split_points(
1500002, 1500002,
ARRAY['1073741824'], ARRAY['1073741824'],
ARRAY[1, 2], ARRAY[1, 2],
'block_writes'); 'block_writes');
citus_split_shard_by_split_points citus_split_shard_by_split_points
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -903,25 +891,25 @@ citus_split_shard_by_split_points
(1 row) (1 row)
step s1-ddl: step s1-ddl:
CREATE INDEX test_table_index ON to_split_table(id); CREATE INDEX test_table_index ON to_split_table(id);
<waiting ...> <waiting ...>
step s2-commit: step s2-commit:
COMMIT; COMMIT;
step s1-ddl: <... completed> step s1-ddl: <... completed>
step s1-commit: step s1-commit:
COMMIT; COMMIT;
step s2-print-cluster: step s2-print-cluster:
-- row count per shard -- row count per shard
SELECT SELECT
nodeport, shardid, success, result nodeport, shardid, success, result
FROM FROM
run_command_on_placements('to_split_table', 'select count(*) from %s') run_command_on_placements('to_split_table', 'select count(*) from %s')
ORDER BY ORDER BY
nodeport, shardid; nodeport, shardid;
-- rows -- rows
SELECT id, value FROM to_split_table ORDER BY id, value; SELECT id, value FROM to_split_table ORDER BY id, value;
nodeport|shardid|success|result nodeport|shardid|success|result
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -935,12 +923,12 @@ id|value
(0 rows) (0 rows)
step s2-print-index-count: step s2-print-index-count:
SELECT SELECT
nodeport, success, result nodeport, success, result
FROM FROM
run_command_on_placements('to_split_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''') run_command_on_placements('to_split_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''')
ORDER BY ORDER BY
nodeport; nodeport;
nodeport|success|result nodeport|success|result
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -0,0 +1,233 @@
Parsed test spec with 5 sessions
starting permutation: s2-begin s2-create_distributed_table s3-create_distributed_table s2-commit
step s2-begin:
BEGIN;
step s2-create_distributed_table:
SELECT create_distributed_table('concurrent_table_2', 'id', colocate_with := 'concurrent_table_1');
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s3-create_distributed_table:
SELECT create_distributed_table('concurrent_table_3', 'id', colocate_with := 'concurrent_table_1');
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s2-commit:
COMMIT;
starting permutation: s2-begin s2-create_distributed_table s1-move-shard-logical s2-commit s3-sanity-check s3-sanity-check-2
step s2-begin:
BEGIN;
step s2-create_distributed_table:
SELECT create_distributed_table('concurrent_table_2', 'id', colocate_with := 'concurrent_table_1');
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-move-shard-logical:
WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_1'::regclass ORDER BY shardid LIMIT 1)
SELECT citus_move_Shard_placement(shardid.shardid, 'localhost', 57637, 'localhost', 57638) FROM shardid;
ERROR: could not acquire the lock required to move public.concurrent_table_1
step s2-commit:
COMMIT;
step s3-sanity-check:
SELECT count(*) FROM pg_dist_shard LEFT JOIN pg_dist_shard_placement USING(shardid) WHERE nodename IS NULL;
count
---------------------------------------------------------------------
0
(1 row)
step s3-sanity-check-2:
SELECT count(*) FROM concurrent_table_1 JOIN concurrent_table_2 USING (id);
count
---------------------------------------------------------------------
0
(1 row)
starting permutation: s2-begin s2-create_distributed_table s1-move-shard-block s2-commit s3-sanity-check s3-sanity-check-2
step s2-begin:
BEGIN;
step s2-create_distributed_table:
SELECT create_distributed_table('concurrent_table_2', 'id', colocate_with := 'concurrent_table_1');
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-move-shard-block:
WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_1'::regclass ORDER BY shardid LIMIT 1)
SELECT citus_move_Shard_placement(shardid.shardid, 'localhost', 57637, 'localhost', 57638, 'block_writes') FROM shardid;
ERROR: could not acquire the lock required to move public.concurrent_table_1
step s2-commit:
COMMIT;
step s3-sanity-check:
SELECT count(*) FROM pg_dist_shard LEFT JOIN pg_dist_shard_placement USING(shardid) WHERE nodename IS NULL;
count
---------------------------------------------------------------------
0
(1 row)
step s3-sanity-check-2:
SELECT count(*) FROM concurrent_table_1 JOIN concurrent_table_2 USING (id);
count
---------------------------------------------------------------------
0
(1 row)
starting permutation: s2-begin s2-create_distributed_table s1-split-block s2-commit s3-sanity-check s3-sanity-check-2
step s2-begin:
BEGIN;
step s2-create_distributed_table:
SELECT create_distributed_table('concurrent_table_2', 'id', colocate_with := 'concurrent_table_1');
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-split-block:
WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_1'::regclass ORDER BY shardid LIMIT 1)
SELECT citus_split_shard_by_split_points(
shardid.shardid, ARRAY['2113265921'], ARRAY[(SELECT * FROM first_node_id), (SELECT * FROM first_node_id)], 'block_writes') FROM shardid;
ERROR: could not acquire the lock required to split public.concurrent_table_1
step s2-commit:
COMMIT;
step s3-sanity-check:
SELECT count(*) FROM pg_dist_shard LEFT JOIN pg_dist_shard_placement USING(shardid) WHERE nodename IS NULL;
count
---------------------------------------------------------------------
0
(1 row)
step s3-sanity-check-2:
SELECT count(*) FROM concurrent_table_1 JOIN concurrent_table_2 USING (id);
count
---------------------------------------------------------------------
0
(1 row)
starting permutation: s4-begin s4-move-shard-logical s5-setup-rep-factor s5-create_implicit_colocated_distributed_table s4-commit s3-sanity-check s3-sanity-check-3 s3-sanity-check-4
step s4-begin:
BEGIN;
step s4-move-shard-logical:
WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_4'::regclass ORDER BY shardid LIMIT 1)
SELECT citus_move_Shard_placement(shardid.shardid, 'localhost', 57637, 'localhost', 57638) FROM shardid;
citus_move_shard_placement
---------------------------------------------------------------------
(1 row)
step s5-setup-rep-factor:
SET citus.shard_replication_factor TO 1;
step s5-create_implicit_colocated_distributed_table:
SELECT create_distributed_table('concurrent_table_5', 'id');
ERROR: could not acquire the lock required to colocate distributed table public.concurrent_table_4
step s4-commit:
commit;
step s3-sanity-check:
SELECT count(*) FROM pg_dist_shard LEFT JOIN pg_dist_shard_placement USING(shardid) WHERE nodename IS NULL;
count
---------------------------------------------------------------------
0
(1 row)
step s3-sanity-check-3:
SELECT count(DISTINCT colocationid) FROM pg_dist_partition WHERE logicalrelid IN ('concurrent_table_4', 'concurrent_table_5');
count
---------------------------------------------------------------------
1
(1 row)
step s3-sanity-check-4:
SELECT count(*) FROM concurrent_table_4 JOIN concurrent_table_5 USING (id);
count
---------------------------------------------------------------------
0
(1 row)
starting permutation: s4-begin s4-move-shard-block s5-setup-rep-factor s5-create_implicit_colocated_distributed_table s4-commit s3-sanity-check s3-sanity-check-3 s3-sanity-check-4
step s4-begin:
BEGIN;
step s4-move-shard-block:
WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_4'::regclass ORDER BY shardid LIMIT 1)
SELECT citus_move_Shard_placement(shardid.shardid, 'localhost', 57637, 'localhost', 57638, 'block_writes') FROM shardid;
citus_move_shard_placement
---------------------------------------------------------------------
(1 row)
step s5-setup-rep-factor:
SET citus.shard_replication_factor TO 1;
step s5-create_implicit_colocated_distributed_table:
SELECT create_distributed_table('concurrent_table_5', 'id');
ERROR: could not acquire the lock required to colocate distributed table public.concurrent_table_4
step s4-commit:
commit;
step s3-sanity-check:
SELECT count(*) FROM pg_dist_shard LEFT JOIN pg_dist_shard_placement USING(shardid) WHERE nodename IS NULL;
count
---------------------------------------------------------------------
0
(1 row)
step s3-sanity-check-3:
SELECT count(DISTINCT colocationid) FROM pg_dist_partition WHERE logicalrelid IN ('concurrent_table_4', 'concurrent_table_5');
count
---------------------------------------------------------------------
1
(1 row)
step s3-sanity-check-4:
SELECT count(*) FROM concurrent_table_4 JOIN concurrent_table_5 USING (id);
count
---------------------------------------------------------------------
0
(1 row)

View File

@ -2,19 +2,19 @@ Parsed test spec with 2 sessions
starting permutation: s1-load-cache s2-load-cache s2-set-placement-inactive s2-begin s2-repair-placement s1-repair-placement s2-commit starting permutation: s1-load-cache s2-load-cache s2-set-placement-inactive s2-begin s2-repair-placement s1-repair-placement s2-commit
step s1-load-cache: step s1-load-cache:
COPY test_hash_table FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV; COPY test_hash_table FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV;
step s2-load-cache: step s2-load-cache:
COPY test_hash_table FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV; COPY test_hash_table FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV;
step s2-set-placement-inactive: step s2-set-placement-inactive:
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard_for_test_table) AND nodeport = 57638; UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard_for_test_table) AND nodeport = 57638;
step s2-begin: step s2-begin:
BEGIN; BEGIN;
step s2-repair-placement: step s2-repair-placement:
SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638); SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638);
master_copy_shard_placement master_copy_shard_placement
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -22,23 +22,22 @@ master_copy_shard_placement
(1 row) (1 row)
step s1-repair-placement: step s1-repair-placement:
SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638); SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638);
<waiting ...>
step s2-commit: ERROR: could not acquire the lock required to repair public.test_hash_table
COMMIT; step s2-commit:
COMMIT;
step s1-repair-placement: <... completed>
ERROR: target placement must be in inactive state
starting permutation: s2-set-placement-inactive s2-begin s2-repair-placement s1-repair-placement s2-commit starting permutation: s2-set-placement-inactive s2-begin s2-repair-placement s1-repair-placement s2-commit
step s2-set-placement-inactive: step s2-set-placement-inactive:
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard_for_test_table) AND nodeport = 57638; UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard_for_test_table) AND nodeport = 57638;
step s2-begin: step s2-begin:
BEGIN; BEGIN;
step s2-repair-placement: step s2-repair-placement:
SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638); SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638);
master_copy_shard_placement master_copy_shard_placement
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -46,10 +45,9 @@ master_copy_shard_placement
(1 row) (1 row)
step s1-repair-placement: step s1-repair-placement:
SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638); SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638);
<waiting ...>
step s2-commit: ERROR: could not acquire the lock required to repair public.test_hash_table
COMMIT; step s2-commit:
COMMIT;
step s1-repair-placement: <... completed>
ERROR: target placement must be in inactive state

View File

@ -10,10 +10,10 @@ pg_advisory_lock
(1 row) (1 row)
step s1-begin: step s1-begin:
BEGIN; BEGIN;
step s1-move-placement: step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
<waiting ...> <waiting ...>
step s2-insert: step s2-insert:
INSERT INTO logical_replicate_placement VALUES (15, 15); INSERT INTO logical_replicate_placement VALUES (15, 15);
@ -33,7 +33,7 @@ master_move_shard_placement
(1 row) (1 row)
step s1-end: step s1-end:
COMMIT; COMMIT;
step s1-select: step s1-select:
SELECT * FROM logical_replicate_placement order by y; SELECT * FROM logical_replicate_placement order by y;
@ -62,10 +62,10 @@ pg_advisory_lock
(1 row) (1 row)
step s1-begin: step s1-begin:
BEGIN; BEGIN;
step s1-move-placement: step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
<waiting ...> <waiting ...>
step s2-upsert: step s2-upsert:
INSERT INTO logical_replicate_placement VALUES (15, 15); INSERT INTO logical_replicate_placement VALUES (15, 15);
@ -86,7 +86,7 @@ master_move_shard_placement
(1 row) (1 row)
step s1-end: step s1-end:
COMMIT; COMMIT;
step s1-select: step s1-select:
SELECT * FROM logical_replicate_placement order by y; SELECT * FROM logical_replicate_placement order by y;
@ -118,10 +118,10 @@ pg_advisory_lock
(1 row) (1 row)
step s1-begin: step s1-begin:
BEGIN; BEGIN;
step s1-move-placement: step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
<waiting ...> <waiting ...>
step s2-update: step s2-update:
UPDATE logical_replicate_placement SET y = y + 1 WHERE x = 15; UPDATE logical_replicate_placement SET y = y + 1 WHERE x = 15;
@ -141,7 +141,7 @@ master_move_shard_placement
(1 row) (1 row)
step s1-end: step s1-end:
COMMIT; COMMIT;
step s1-select: step s1-select:
SELECT * FROM logical_replicate_placement order by y; SELECT * FROM logical_replicate_placement order by y;
@ -173,10 +173,10 @@ pg_advisory_lock
(1 row) (1 row)
step s1-begin: step s1-begin:
BEGIN; BEGIN;
step s1-move-placement: step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
<waiting ...> <waiting ...>
step s2-delete: step s2-delete:
DELETE FROM logical_replicate_placement WHERE x = 15; DELETE FROM logical_replicate_placement WHERE x = 15;
@ -196,7 +196,7 @@ master_move_shard_placement
(1 row) (1 row)
step s1-end: step s1-end:
COMMIT; COMMIT;
step s1-select: step s1-select:
SELECT * FROM logical_replicate_placement order by y; SELECT * FROM logical_replicate_placement order by y;
@ -227,10 +227,10 @@ pg_advisory_lock
(1 row) (1 row)
step s1-begin: step s1-begin:
BEGIN; BEGIN;
step s1-move-placement: step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
<waiting ...> <waiting ...>
step s2-select: step s2-select:
SELECT * FROM logical_replicate_placement ORDER BY y; SELECT * FROM logical_replicate_placement ORDER BY y;
@ -255,7 +255,7 @@ master_move_shard_placement
(1 row) (1 row)
step s1-end: step s1-end:
COMMIT; COMMIT;
step s1-get-shard-distribution: step s1-get-shard-distribution:
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport;
@ -279,10 +279,10 @@ pg_advisory_lock
(1 row) (1 row)
step s1-begin: step s1-begin:
BEGIN; BEGIN;
step s1-move-placement: step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
<waiting ...> <waiting ...>
step s2-select-for-update: step s2-select-for-update:
SELECT * FROM logical_replicate_placement WHERE x=15 FOR UPDATE; SELECT * FROM logical_replicate_placement WHERE x=15 FOR UPDATE;
@ -307,7 +307,7 @@ master_move_shard_placement
(1 row) (1 row)
step s1-end: step s1-end:
COMMIT; COMMIT;
step s1-get-shard-distribution: step s1-get-shard-distribution:
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport;
@ -320,7 +320,7 @@ nodeport
starting permutation: s1-begin s2-begin s2-insert s1-move-placement s2-end s1-end s1-select s1-get-shard-distribution starting permutation: s1-begin s2-begin s2-insert s1-move-placement s2-end s1-end s1-select s1-get-shard-distribution
step s1-begin: step s1-begin:
BEGIN; BEGIN;
step s2-begin: step s2-begin:
BEGIN; BEGIN;
@ -329,10 +329,10 @@ step s2-insert:
INSERT INTO logical_replicate_placement VALUES (15, 15); INSERT INTO logical_replicate_placement VALUES (15, 15);
step s1-move-placement: step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
<waiting ...> <waiting ...>
step s2-end: step s2-end:
COMMIT; COMMIT;
step s1-move-placement: <... completed> step s1-move-placement: <... completed>
master_move_shard_placement master_move_shard_placement
@ -341,7 +341,7 @@ master_move_shard_placement
(1 row) (1 row)
step s1-end: step s1-end:
COMMIT; COMMIT;
step s1-select: step s1-select:
SELECT * FROM logical_replicate_placement order by y; SELECT * FROM logical_replicate_placement order by y;
@ -362,7 +362,7 @@ nodeport
starting permutation: s1-begin s2-begin s2-upsert s1-move-placement s2-end s1-end s1-select s1-get-shard-distribution starting permutation: s1-begin s2-begin s2-upsert s1-move-placement s2-end s1-end s1-select s1-get-shard-distribution
step s1-begin: step s1-begin:
BEGIN; BEGIN;
step s2-begin: step s2-begin:
BEGIN; BEGIN;
@ -372,10 +372,10 @@ step s2-upsert:
INSERT INTO logical_replicate_placement VALUES (15, 15) ON CONFLICT (x) DO UPDATE SET y = logical_replicate_placement.y + 1; INSERT INTO logical_replicate_placement VALUES (15, 15) ON CONFLICT (x) DO UPDATE SET y = logical_replicate_placement.y + 1;
step s1-move-placement: step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
<waiting ...> <waiting ...>
step s2-end: step s2-end:
COMMIT; COMMIT;
step s1-move-placement: <... completed> step s1-move-placement: <... completed>
master_move_shard_placement master_move_shard_placement
@ -384,7 +384,7 @@ master_move_shard_placement
(1 row) (1 row)
step s1-end: step s1-end:
COMMIT; COMMIT;
step s1-select: step s1-select:
SELECT * FROM logical_replicate_placement order by y; SELECT * FROM logical_replicate_placement order by y;
@ -408,7 +408,7 @@ step s1-insert:
INSERT INTO logical_replicate_placement VALUES (15, 15); INSERT INTO logical_replicate_placement VALUES (15, 15);
step s1-begin: step s1-begin:
BEGIN; BEGIN;
step s2-begin: step s2-begin:
BEGIN; BEGIN;
@ -417,10 +417,10 @@ step s2-update:
UPDATE logical_replicate_placement SET y = y + 1 WHERE x = 15; UPDATE logical_replicate_placement SET y = y + 1 WHERE x = 15;
step s1-move-placement: step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
<waiting ...> <waiting ...>
step s2-end: step s2-end:
COMMIT; COMMIT;
step s1-move-placement: <... completed> step s1-move-placement: <... completed>
master_move_shard_placement master_move_shard_placement
@ -429,7 +429,7 @@ master_move_shard_placement
(1 row) (1 row)
step s1-end: step s1-end:
COMMIT; COMMIT;
step s1-select: step s1-select:
SELECT * FROM logical_replicate_placement order by y; SELECT * FROM logical_replicate_placement order by y;
@ -453,7 +453,7 @@ step s1-insert:
INSERT INTO logical_replicate_placement VALUES (15, 15); INSERT INTO logical_replicate_placement VALUES (15, 15);
step s1-begin: step s1-begin:
BEGIN; BEGIN;
step s2-begin: step s2-begin:
BEGIN; BEGIN;
@ -462,10 +462,10 @@ step s2-delete:
DELETE FROM logical_replicate_placement WHERE x = 15; DELETE FROM logical_replicate_placement WHERE x = 15;
step s1-move-placement: step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
<waiting ...> <waiting ...>
step s2-end: step s2-end:
COMMIT; COMMIT;
step s1-move-placement: <... completed> step s1-move-placement: <... completed>
master_move_shard_placement master_move_shard_placement
@ -474,7 +474,7 @@ master_move_shard_placement
(1 row) (1 row)
step s1-end: step s1-end:
COMMIT; COMMIT;
step s1-select: step s1-select:
SELECT * FROM logical_replicate_placement order by y; SELECT * FROM logical_replicate_placement order by y;
@ -497,7 +497,7 @@ step s1-insert:
INSERT INTO logical_replicate_placement VALUES (15, 15); INSERT INTO logical_replicate_placement VALUES (15, 15);
step s1-begin: step s1-begin:
BEGIN; BEGIN;
step s2-begin: step s2-begin:
BEGIN; BEGIN;
@ -511,7 +511,7 @@ step s2-select:
(1 row) (1 row)
step s1-move-placement: step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
master_move_shard_placement master_move_shard_placement
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -519,10 +519,10 @@ master_move_shard_placement
(1 row) (1 row)
step s2-end: step s2-end:
COMMIT; COMMIT;
step s1-end: step s1-end:
COMMIT; COMMIT;
step s1-get-shard-distribution: step s1-get-shard-distribution:
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport;
@ -538,7 +538,7 @@ step s1-insert:
INSERT INTO logical_replicate_placement VALUES (15, 15); INSERT INTO logical_replicate_placement VALUES (15, 15);
step s1-begin: step s1-begin:
BEGIN; BEGIN;
step s2-begin: step s2-begin:
BEGIN; BEGIN;
@ -552,10 +552,10 @@ step s2-select-for-update:
(1 row) (1 row)
step s1-move-placement: step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
<waiting ...> <waiting ...>
step s2-end: step s2-end:
COMMIT; COMMIT;
step s1-move-placement: <... completed> step s1-move-placement: <... completed>
master_move_shard_placement master_move_shard_placement
@ -564,7 +564,7 @@ master_move_shard_placement
(1 row) (1 row)
step s1-end: step s1-end:
COMMIT; COMMIT;
step s1-get-shard-distribution: step s1-get-shard-distribution:
select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport;
@ -577,13 +577,13 @@ nodeport
starting permutation: s1-begin s2-begin s1-move-placement s2-move-placement s1-end s2-end starting permutation: s1-begin s2-begin s1-move-placement s2-move-placement s1-end s2-end
step s1-begin: step s1-begin:
BEGIN; BEGIN;
step s2-begin: step s2-begin:
BEGIN; BEGIN;
step s1-move-placement: step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
master_move_shard_placement master_move_shard_placement
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -591,19 +591,14 @@ master_move_shard_placement
(1 row) (1 row)
step s2-move-placement: step s2-move-placement:
SELECT master_move_shard_placement( SELECT master_move_shard_placement(
get_shard_id_for_distribution_column('logical_replicate_placement', 4), get_shard_id_for_distribution_column('logical_replicate_placement', 4),
'localhost', 57637, 'localhost', 57638); 'localhost', 57637, 'localhost', 57638);
<waiting ...>
step s1-end:
COMMIT;
step s2-move-placement: <... completed> ERROR: could not acquire the lock required to move public.logical_replicate_placement
master_move_shard_placement step s1-end:
--------------------------------------------------------------------- COMMIT;
(1 row)
step s2-end: step s2-end:
COMMIT; COMMIT;

View File

@ -8,7 +8,7 @@ run_command_on_workers
(2 rows) (2 rows)
step s1-grant: step s1-grant:
SELECT result FROM run_command_on_placements('my_table', 'GRANT SELECT ON TABLE %s TO my_user'); SELECT result FROM run_command_on_placements('my_table', 'GRANT SELECT ON TABLE %s TO my_user');
result result
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -19,7 +19,7 @@ GRANT
(4 rows) (4 rows)
step s1-connect: step s1-connect:
SELECT make_external_connection_to_node('localhost', 57637, 'my_user', current_database()); SELECT make_external_connection_to_node('localhost', 57637, 'my_user', current_database());
make_external_connection_to_node make_external_connection_to_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -27,11 +27,11 @@ make_external_connection_to_node
(1 row) (1 row)
step s2-connect: step s2-connect:
SELECT make_external_connection_to_node('localhost', 57637, 'my_user', current_database()); SELECT make_external_connection_to_node('localhost', 57637, 'my_user', current_database());
ERROR: connection failed ERROR: connection failed
step s2-connect-superuser: step s2-connect-superuser:
SELECT make_external_connection_to_node('localhost', 57637, 'postgres', current_database()); SELECT make_external_connection_to_node('localhost', 57637, 'postgres', current_database());
make_external_connection_to_node make_external_connection_to_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -39,17 +39,11 @@ make_external_connection_to_node
(1 row) (1 row)
step s3-select: step s3-select:
SET ROLE my_user; SET ROLE my_user;
SELECT count(*) FROM my_table; SELECT count(*) FROM my_table;
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
0 0
(1 row) (1 row)
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,t)
(localhost,57638,t,t)
(2 rows)

View File

@ -2,13 +2,13 @@ Parsed test spec with 2 sessions
starting permutation: s1-load-cache s2-begin s2-move-placement s1-move-placement s2-commit s2-print-placements starting permutation: s1-load-cache s2-begin s2-move-placement s1-move-placement s2-commit s2-print-placements
step s1-load-cache: step s1-load-cache:
COPY test_move_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; COPY test_move_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV;
step s2-begin: step s2-begin:
BEGIN; BEGIN;
step s2-move-placement: step s2-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638, 'force_logical'); SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638, 'force_logical');
master_move_shard_placement master_move_shard_placement
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -16,24 +16,23 @@ master_move_shard_placement
(1 row) (1 row)
step s1-move-placement: step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638, 'force_logical'); SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638, 'force_logical');
<waiting ...>
step s2-commit: ERROR: could not acquire the lock required to move public.test_move_table
COMMIT; step s2-commit:
COMMIT;
step s1-move-placement: <... completed>
ERROR: source placement must be in active state
step s2-print-placements: step s2-print-placements:
SELECT SELECT
nodename, nodeport, count(*) nodename, nodeport, count(*)
FROM FROM
pg_dist_shard_placement pg_dist_shard_placement
WHERE WHERE
shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_move_table'::regclass) shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_move_table'::regclass)
AND AND
shardstate = 1 shardstate = 1
GROUP BY GROUP BY
nodename, nodeport; nodename, nodeport;
nodename |nodeport|count nodename |nodeport|count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -43,10 +42,10 @@ localhost| 57638| 2
starting permutation: s2-begin s2-move-placement s1-move-placement s2-commit s2-print-placements starting permutation: s2-begin s2-move-placement s1-move-placement s2-commit s2-print-placements
step s2-begin: step s2-begin:
BEGIN; BEGIN;
step s2-move-placement: step s2-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638, 'force_logical'); SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638, 'force_logical');
master_move_shard_placement master_move_shard_placement
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -54,24 +53,23 @@ master_move_shard_placement
(1 row) (1 row)
step s1-move-placement: step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638, 'force_logical'); SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638, 'force_logical');
<waiting ...>
step s2-commit: ERROR: could not acquire the lock required to move public.test_move_table
COMMIT; step s2-commit:
COMMIT;
step s1-move-placement: <... completed>
ERROR: source placement must be in active state
step s2-print-placements: step s2-print-placements:
SELECT SELECT
nodename, nodeport, count(*) nodename, nodeport, count(*)
FROM FROM
pg_dist_shard_placement pg_dist_shard_placement
WHERE WHERE
shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_move_table'::regclass) shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_move_table'::regclass)
AND AND
shardstate = 1 shardstate = 1
GROUP BY GROUP BY
nodename, nodeport; nodename, nodeport;
nodename |nodeport|count nodename |nodeport|count
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -97,5 +97,6 @@ test: isolation_replicated_dist_on_mx
test: isolation_replicate_reference_tables_to_coordinator test: isolation_replicate_reference_tables_to_coordinator
test: isolation_multiuser_locking test: isolation_multiuser_locking
test: isolation_acquire_distributed_locks test: isolation_acquire_distributed_locks
test: isolation_concurrent_move_create_table
test: isolation_check_mx test: isolation_check_mx

View File

@ -125,22 +125,22 @@ step "s2-print-index-count"
// Run shard split while concurrently performing DML and index creation // Run shard split while concurrently performing DML and index creation
// We expect DML,Copy to fail because the shard they are waiting for is destroyed. // We expect DML,Copy to fail because the shard they are waiting for is destroyed.
permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-update" "s2-commit" "s1-commit" "s2-print-cluster" permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-update" "s2-commit" "s1-commit" "s2-print-cluster"
permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-delete" "s2-commit" "s1-commit" "s2-print-cluster" permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-delete" "s2-commit" "s1-commit" "s2-print-cluster"
permutation "s1-load-cache" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-insert" "s2-commit" "s1-commit" "s2-print-cluster" permutation "s1-load-cache" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-insert" "s2-commit" "s1-commit" "s2-print-cluster"
permutation "s1-load-cache" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-copy" "s2-commit" "s1-commit" "s2-print-cluster" permutation "s1-load-cache" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-copy" "s2-commit" "s1-commit" "s2-print-cluster"
// The same tests without loading the cache at first // The same tests without loading the cache at first
permutation "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-update" "s2-commit" "s1-commit" "s2-print-cluster" permutation "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-update" "s2-commit" "s1-commit" "s2-print-cluster"
permutation "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-delete" "s2-commit" "s1-commit" "s2-print-cluster" permutation "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-delete" "s2-commit" "s1-commit" "s2-print-cluster"
permutation "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-insert" "s2-commit" "s1-commit" "s2-print-cluster" permutation "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-insert" "s2-commit" "s1-commit" "s2-print-cluster"
permutation "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-copy" "s2-commit" "s1-commit" "s2-print-cluster" permutation "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-copy" "s2-commit" "s1-commit" "s2-print-cluster"
// Concurrent shard split blocks on different shards of the same table (or any colocated table) // Concurrent shard split blocks on different shards of the same table (or any colocated table)
permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-blocking-shard-split" "s2-blocking-shard-split" "s1-commit" "s2-print-cluster" permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-blocking-shard-split" "s2-blocking-shard-split" "s1-commit" "s2-print-cluster"
// The same test above without loading the cache at first // The same test above without loading the cache at first
permutation "s1-insert" "s1-begin" "s1-blocking-shard-split" "s2-blocking-shard-split" "s1-commit" "s2-print-cluster" permutation "s1-insert" "s1-begin" "s1-blocking-shard-split" "s2-blocking-shard-split" "s1-commit" "s2-print-cluster"
// Concurrent DDL blocks on different shards of the same table (or any colocated table) // Concurrent DDL blocks on different shards of the same table (or any colocated table)
permutation "s1-load-cache" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-ddl" "s2-commit" "s1-commit" "s2-print-cluster" "s2-print-index-count" permutation "s1-load-cache" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-ddl" "s2-commit" "s1-commit" "s2-print-cluster" "s2-print-index-count"
// The same tests without loading the cache at first // The same tests without loading the cache at first
permutation "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-ddl" "s2-commit" "s1-commit" "s2-print-cluster" "s2-print-index-count" permutation "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-ddl" "s2-commit" "s1-commit" "s2-print-cluster" "s2-print-index-count"

View File

@ -0,0 +1,137 @@
setup
{
CREATE TABLE concurrent_table_1(id int PRIMARY KEY);
CREATE TABLE concurrent_table_2(id int PRIMARY KEY);
CREATE TABLE concurrent_table_3(id int PRIMARY KEY);
CREATE TABLE concurrent_table_4(id int PRIMARY KEY);
CREATE TABLE concurrent_table_5(id int PRIMARY KEY);
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('concurrent_table_1', 'id', colocate_with := 'none');
SELECT create_distributed_table('concurrent_table_4', 'id');
SELECT nodeid INTO first_node_id FROM pg_dist_node WHERE nodeport = 57637;
}
teardown
{
DROP TABLE concurrent_table_1, concurrent_table_2, concurrent_table_3, concurrent_table_4, concurrent_table_5, first_node_id CASCADE;
}
session "s1"
step "s1-move-shard-logical"
{
WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_1'::regclass ORDER BY shardid LIMIT 1)
SELECT citus_move_Shard_placement(shardid.shardid, 'localhost', 57637, 'localhost', 57638) FROM shardid;
}
step "s1-move-shard-block"
{
WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_1'::regclass ORDER BY shardid LIMIT 1)
SELECT citus_move_Shard_placement(shardid.shardid, 'localhost', 57637, 'localhost', 57638, 'block_writes') FROM shardid;
}
step "s1-split-block"
{
WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_1'::regclass ORDER BY shardid LIMIT 1)
SELECT citus_split_shard_by_split_points(
shardid.shardid, ARRAY['2113265921'], ARRAY[(SELECT * FROM first_node_id), (SELECT * FROM first_node_id)], 'block_writes') FROM shardid;
}
session "s2"
step "s2-begin"
{
BEGIN;
}
step "s2-create_distributed_table"
{
SELECT create_distributed_table('concurrent_table_2', 'id', colocate_with := 'concurrent_table_1');
}
step "s2-commit"
{
COMMIT;
}
session "s3"
step "s3-create_distributed_table"
{
SELECT create_distributed_table('concurrent_table_3', 'id', colocate_with := 'concurrent_table_1');
}
step "s3-sanity-check"
{
SELECT count(*) FROM pg_dist_shard LEFT JOIN pg_dist_shard_placement USING(shardid) WHERE nodename IS NULL;
}
step "s3-sanity-check-2"
{
SELECT count(*) FROM concurrent_table_1 JOIN concurrent_table_2 USING (id);
}
step "s3-sanity-check-3"
{
SELECT count(DISTINCT colocationid) FROM pg_dist_partition WHERE logicalrelid IN ('concurrent_table_4', 'concurrent_table_5');
}
step "s3-sanity-check-4"
{
SELECT count(*) FROM concurrent_table_4 JOIN concurrent_table_5 USING (id);
}
session "s4"
step "s4-begin"
{
BEGIN;
}
step "s4-commit"
{
commit;
}
step "s4-move-shard-logical"
{
WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_4'::regclass ORDER BY shardid LIMIT 1)
SELECT citus_move_Shard_placement(shardid.shardid, 'localhost', 57637, 'localhost', 57638) FROM shardid;
}
step "s4-move-shard-block"
{
WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_4'::regclass ORDER BY shardid LIMIT 1)
SELECT citus_move_Shard_placement(shardid.shardid, 'localhost', 57637, 'localhost', 57638, 'block_writes') FROM shardid;
}
session "s5"
step "s5-setup-rep-factor"
{
SET citus.shard_replication_factor TO 1;
}
step "s5-create_implicit_colocated_distributed_table"
{
SELECT create_distributed_table('concurrent_table_5', 'id');
}
//concurrent create_distributed_table with the same colocation should not block each other
permutation "s2-begin" "s2-create_distributed_table" "s3-create_distributed_table" "s2-commit"
// concurrent create colocated table and shard move properly block each other, and cluster is healthy
permutation "s2-begin" "s2-create_distributed_table" "s1-move-shard-logical" "s2-commit" "s3-sanity-check" "s3-sanity-check-2"
permutation "s2-begin" "s2-create_distributed_table" "s1-move-shard-block" "s2-commit" "s3-sanity-check" "s3-sanity-check-2"
permutation "s2-begin" "s2-create_distributed_table" "s1-split-block" "s2-commit" "s3-sanity-check" "s3-sanity-check-2"
// same test above, but this time implicitly colocated tables
permutation "s4-begin" "s4-move-shard-logical" "s5-setup-rep-factor" "s5-create_implicit_colocated_distributed_table" "s4-commit" "s3-sanity-check" "s3-sanity-check-3" "s3-sanity-check-4"
permutation "s4-begin" "s4-move-shard-block" "s5-setup-rep-factor" "s5-create_implicit_colocated_distributed_table" "s4-commit" "s3-sanity-check" "s3-sanity-check-3" "s3-sanity-check-4"

View File

@ -26,6 +26,7 @@ teardown
{ {
SELECT run_command_on_workers('ALTER SYSTEM RESET citus.max_client_connections'); SELECT run_command_on_workers('ALTER SYSTEM RESET citus.max_client_connections');
SELECT run_command_on_workers('SELECT pg_reload_conf()'); SELECT run_command_on_workers('SELECT pg_reload_conf()');
DROP TABLE my_table;
} }
session "s1" session "s1"