mirror of https://github.com/citusdata/citus.git
Merge pull request #1488 from citusdata/fix_conflicting_vacuum_insert
Fix conflicting locks in VACUUM and INSERTpull/1462/head
commit
d6d88efc2d
|
@ -1331,8 +1331,13 @@ VacuumTaskList(Oid relationId, VacuumStmt *vacuumStmt)
|
||||||
char *schemaName = get_namespace_name(schemaId);
|
char *schemaName = get_namespace_name(schemaId);
|
||||||
char *tableName = get_rel_name(relationId);
|
char *tableName = get_rel_name(relationId);
|
||||||
|
|
||||||
/* lock relation metadata before getting shard list */
|
/*
|
||||||
LockRelationDistributionMetadata(relationId, ShareLock);
|
* We obtain ShareUpdateExclusiveLock here to not conflict with INSERT's
|
||||||
|
* RowExclusiveLock. However if VACUUM FULL is used, we already obtain
|
||||||
|
* AccessExclusiveLock before reaching to that point and INSERT's will be
|
||||||
|
* blocked anyway. This is inline with PostgreSQL's own behaviour.
|
||||||
|
*/
|
||||||
|
LockRelationOid(relationId, ShareUpdateExclusiveLock);
|
||||||
|
|
||||||
shardIntervalList = LoadShardIntervalList(relationId);
|
shardIntervalList = LoadShardIntervalList(relationId);
|
||||||
|
|
||||||
|
|
|
@ -113,8 +113,8 @@ CreateShardsWithRoundRobinPolicy(Oid distributedTableId, int32 shardCount,
|
||||||
*/
|
*/
|
||||||
EnsureTableOwner(distributedTableId);
|
EnsureTableOwner(distributedTableId);
|
||||||
|
|
||||||
/* we plan to add shards: get an exclusive metadata lock */
|
/* we plan to add shards: get an exclusive lock on relation oid */
|
||||||
LockRelationDistributionMetadata(distributedTableId, ExclusiveLock);
|
LockRelationOid(distributedTableId, ExclusiveLock);
|
||||||
|
|
||||||
relationOwner = TableOwner(distributedTableId);
|
relationOwner = TableOwner(distributedTableId);
|
||||||
|
|
||||||
|
@ -264,8 +264,8 @@ CreateColocatedShards(Oid targetRelationId, Oid sourceRelationId)
|
||||||
*/
|
*/
|
||||||
EnsureTableOwner(targetRelationId);
|
EnsureTableOwner(targetRelationId);
|
||||||
|
|
||||||
/* we plan to add shards: get an exclusive metadata lock on the target relation */
|
/* we plan to add shards: get an exclusive lock on target relation oid */
|
||||||
LockRelationDistributionMetadata(targetRelationId, ExclusiveLock);
|
LockRelationOid(targetRelationId, ExclusiveLock);
|
||||||
|
|
||||||
/* we don't want source table to get dropped before we colocate with it */
|
/* we don't want source table to get dropped before we colocate with it */
|
||||||
LockRelationOid(sourceRelationId, AccessShareLock);
|
LockRelationOid(sourceRelationId, AccessShareLock);
|
||||||
|
@ -369,8 +369,8 @@ CreateReferenceTableShard(Oid distributedTableId)
|
||||||
*/
|
*/
|
||||||
EnsureTableOwner(distributedTableId);
|
EnsureTableOwner(distributedTableId);
|
||||||
|
|
||||||
/* we plan to add shards: get an exclusive metadata lock */
|
/* we plan to add shards: get an exclusive lock on relation oid */
|
||||||
LockRelationDistributionMetadata(distributedTableId, ExclusiveLock);
|
LockRelationOid(distributedTableId, ExclusiveLock);
|
||||||
|
|
||||||
relationOwner = TableOwner(distributedTableId);
|
relationOwner = TableOwner(distributedTableId);
|
||||||
|
|
||||||
|
|
|
@ -184,21 +184,6 @@ TryLockShardDistributionMetadata(int64 shardId, LOCKMODE lockMode)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* LockRelationDistributionMetadata returns after getting a the lock used for a
|
|
||||||
* relation's distribution metadata, blocking if required. Only ExclusiveLock
|
|
||||||
* and ShareLock modes are supported. Any locks acquired using this method are
|
|
||||||
* released at transaction end.
|
|
||||||
*/
|
|
||||||
void
|
|
||||||
LockRelationDistributionMetadata(Oid relationId, LOCKMODE lockMode)
|
|
||||||
{
|
|
||||||
Assert(lockMode == ExclusiveLock || lockMode == ShareLock);
|
|
||||||
|
|
||||||
(void) LockRelationOid(relationId, lockMode);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* LockShardResource acquires a lock needed to modify data on a remote shard.
|
* LockShardResource acquires a lock needed to modify data on a remote shard.
|
||||||
* This task may be assigned to multiple backends at the same time, so the lock
|
* This task may be assigned to multiple backends at the same time, so the lock
|
||||||
|
|
|
@ -66,7 +66,6 @@ typedef enum AdvisoryLocktagClass
|
||||||
/* Lock shard/relation metadata for safe modifications */
|
/* Lock shard/relation metadata for safe modifications */
|
||||||
extern void LockShardDistributionMetadata(int64 shardId, LOCKMODE lockMode);
|
extern void LockShardDistributionMetadata(int64 shardId, LOCKMODE lockMode);
|
||||||
extern bool TryLockShardDistributionMetadata(int64 shardId, LOCKMODE lockMode);
|
extern bool TryLockShardDistributionMetadata(int64 shardId, LOCKMODE lockMode);
|
||||||
extern void LockRelationDistributionMetadata(Oid relationId, LOCKMODE lockMode);
|
|
||||||
|
|
||||||
/* Lock shard data, for DML commands or remote fetches */
|
/* Lock shard data, for DML commands or remote fetches */
|
||||||
extern void LockShardResource(uint64 shardId, LOCKMODE lockmode);
|
extern void LockShardResource(uint64 shardId, LOCKMODE lockmode);
|
||||||
|
|
|
@ -0,0 +1,36 @@
|
||||||
|
Parsed test spec with 2 sessions
|
||||||
|
|
||||||
|
starting permutation: s1-begin s1-insert s2-vacuum-analyze s1-commit
|
||||||
|
create_distributed_table
|
||||||
|
|
||||||
|
|
||||||
|
step s1-begin:
|
||||||
|
BEGIN;
|
||||||
|
|
||||||
|
step s1-insert:
|
||||||
|
INSERT INTO test_insert_vacuum VALUES(1, 1);
|
||||||
|
|
||||||
|
step s2-vacuum-analyze:
|
||||||
|
VACUUM ANALYZE test_insert_vacuum;
|
||||||
|
|
||||||
|
step s1-commit:
|
||||||
|
COMMIT;
|
||||||
|
|
||||||
|
|
||||||
|
starting permutation: s1-begin s1-insert s2-vacuum-full s1-commit
|
||||||
|
create_distributed_table
|
||||||
|
|
||||||
|
|
||||||
|
step s1-begin:
|
||||||
|
BEGIN;
|
||||||
|
|
||||||
|
step s1-insert:
|
||||||
|
INSERT INTO test_insert_vacuum VALUES(1, 1);
|
||||||
|
|
||||||
|
step s2-vacuum-full:
|
||||||
|
VACUUM FULL test_insert_vacuum;
|
||||||
|
<waiting ...>
|
||||||
|
step s1-commit:
|
||||||
|
COMMIT;
|
||||||
|
|
||||||
|
step s2-vacuum-full: <... completed>
|
|
@ -8,3 +8,5 @@ test: isolation_cluster_management
|
||||||
test: isolation_dml_vs_repair isolation_copy_placement_vs_copy_placement isolation_cancellation
|
test: isolation_dml_vs_repair isolation_copy_placement_vs_copy_placement isolation_cancellation
|
||||||
test: isolation_concurrent_dml isolation_data_migration
|
test: isolation_concurrent_dml isolation_data_migration
|
||||||
test: isolation_drop_shards isolation_copy_placement_vs_modification
|
test: isolation_drop_shards isolation_copy_placement_vs_modification
|
||||||
|
|
||||||
|
test: isolation_insert_vs_vacuum
|
||||||
|
|
|
@ -0,0 +1,47 @@
|
||||||
|
setup
|
||||||
|
{
|
||||||
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
CREATE TABLE test_insert_vacuum(column1 int, column2 int);
|
||||||
|
SELECT create_distributed_table('test_insert_vacuum', 'column1');
|
||||||
|
}
|
||||||
|
|
||||||
|
teardown
|
||||||
|
{
|
||||||
|
DROP TABLE test_insert_vacuum;
|
||||||
|
}
|
||||||
|
|
||||||
|
session "s1"
|
||||||
|
|
||||||
|
step "s1-begin"
|
||||||
|
{
|
||||||
|
BEGIN;
|
||||||
|
}
|
||||||
|
|
||||||
|
step "s1-insert"
|
||||||
|
{
|
||||||
|
INSERT INTO test_insert_vacuum VALUES(1, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
step "s1-commit"
|
||||||
|
{
|
||||||
|
COMMIT;
|
||||||
|
}
|
||||||
|
|
||||||
|
session "s2"
|
||||||
|
|
||||||
|
step "s2-vacuum-analyze"
|
||||||
|
{
|
||||||
|
VACUUM ANALYZE test_insert_vacuum;
|
||||||
|
}
|
||||||
|
|
||||||
|
step "s2-vacuum-full"
|
||||||
|
{
|
||||||
|
VACUUM FULL test_insert_vacuum;
|
||||||
|
}
|
||||||
|
|
||||||
|
# INSERT and VACUUM ANALYZE should not block each other.
|
||||||
|
permutation "s1-begin" "s1-insert" "s2-vacuum-analyze" "s1-commit"
|
||||||
|
|
||||||
|
# INSERT and VACUUM FULL should block each other.
|
||||||
|
permutation "s1-begin" "s1-insert" "s2-vacuum-full" "s1-commit"
|
||||||
|
|
Loading…
Reference in New Issue