verify shards if exists for insert, delete, update (#6280)

Co-authored-by: Marco Slot <marco.slot@gmail.com>
pull/6295/head^2
aykut-bozkurt 2022-09-06 16:29:14 +03:00 committed by GitHub
parent b15cb146a3
commit 69726648ab
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 655 additions and 219 deletions

View File

@ -31,6 +31,7 @@
#include "distributed/multi_server_executor.h"
#include "distributed/multi_router_planner.h"
#include "distributed/query_stats.h"
#include "distributed/shard_utils.h"
#include "distributed/subplan_execution.h"
#include "distributed/worker_log_messages.h"
#include "distributed/worker_protocol.h"
@ -67,6 +68,9 @@ static void CitusEndScan(CustomScanState *node);
static void CitusReScan(CustomScanState *node);
static void SetJobColocationId(Job *job);
static void EnsureForceDelegationDistributionKey(Job *job);
static void EnsureAnchorShardsInJobExist(Job *job);
static bool AnchorShardsInTaskListExist(List *taskList);
static void TryToRerouteFastPathModifyQuery(Job *job);
/* create custom scan methods for all executors */
@ -406,6 +410,19 @@ CitusBeginModifyScan(CustomScanState *node, EState *estate, int eflags)
/* prevent concurrent placement changes */
AcquireMetadataLocks(workerJob->taskList);
/*
* In case of a split, the shard might no longer be available. In that
* case try to reroute. We can only do this for fast path queries.
*/
if (currentPlan->fastPathRouterPlan &&
!AnchorShardsInTaskListExist(workerJob->taskList))
{
TryToRerouteFastPathModifyQuery(workerJob);
}
/* ensure there is no invalid shard */
EnsureAnchorShardsInJobExist(workerJob);
/* modify tasks are always assigned using first-replica policy */
workerJob->taskList = FirstReplicaAssignTaskList(workerJob->taskList);
}
@ -440,6 +457,65 @@ CitusBeginModifyScan(CustomScanState *node, EState *estate, int eflags)
}
/*
* TryToRerouteFastPathModifyQuery tries to reroute non-existent shards in given job if it finds any such shard,
* only for fastpath queries.
*
* Should only be called if the job belongs to a fastpath modify query
*/
static void
TryToRerouteFastPathModifyQuery(Job *job)
{
if (job->jobQuery->commandType == CMD_INSERT)
{
RegenerateTaskListForInsert(job);
}
else
{
RegenerateTaskForFasthPathQuery(job);
RebuildQueryStrings(job);
}
}
/*
* EnsureAnchorShardsInJobExist ensures all shards are valid in job.
* If it finds a non-existent shard in given job, it throws an error.
*/
static void
EnsureAnchorShardsInJobExist(Job *job)
{
if (!AnchorShardsInTaskListExist(job->taskList))
{
ereport(ERROR, (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("shard for the given value does not exist"),
errdetail(
"A concurrent shard split may have moved the data into a new set of shards."),
errhint("Retry the query.")));
}
}
/*
* AnchorShardsInTaskListExist checks whether all the anchor shards in the task list
* still exist.
*/
static bool
AnchorShardsInTaskListExist(List *taskList)
{
Task *task = NULL;
foreach_ptr(task, taskList)
{
if (!ShardExists(task->anchorShardId))
{
return false;
}
}
return true;
}
/*
* ModifyJobNeedsEvaluation checks whether the functions and parameters in the job query
* need to be evaluated before we can build task query strings.

View File

@ -231,7 +231,7 @@ static ScanKeyData DistObjectScanKey[3];
/* local function forward declarations */
static HeapTuple PgDistPartitionTupleViaCatalog(Oid relationId);
static ShardIdCacheEntry * LookupShardIdCacheEntry(int64 shardId);
static ShardIdCacheEntry * LookupShardIdCacheEntry(int64 shardId, bool missingOk);
static CitusTableCacheEntry * BuildCitusTableCacheEntry(Oid relationId);
static void BuildCachedShardList(CitusTableCacheEntry *cacheEntry);
static void PrepareWorkerNodeCache(void);
@ -280,10 +280,11 @@ static Oid LookupEnumValueId(Oid typeId, char *valueName);
static void InvalidateCitusTableCacheEntrySlot(CitusTableCacheEntrySlot *cacheSlot);
static void InvalidateDistTableCache(void);
static void InvalidateDistObjectCache(void);
static void InitializeTableCacheEntry(int64 shardId);
static bool InitializeTableCacheEntry(int64 shardId, bool missingOk);
static bool IsCitusTableTypeInternal(char partitionMethod, char replicationModel,
CitusTableType tableType);
static bool RefreshTableCacheEntryIfInvalid(ShardIdCacheEntry *shardEntry);
static bool RefreshTableCacheEntryIfInvalid(ShardIdCacheEntry *shardEntry, bool
missingOk);
static Oid DistAuthinfoRelationId(void);
static Oid DistAuthinfoIndexId(void);
@ -781,7 +782,8 @@ CitusTableList(void)
ShardInterval *
LoadShardInterval(uint64 shardId)
{
ShardIdCacheEntry *shardIdEntry = LookupShardIdCacheEntry(shardId);
bool missingOk = false;
ShardIdCacheEntry *shardIdEntry = LookupShardIdCacheEntry(shardId, missingOk);
CitusTableCacheEntry *tableEntry = shardIdEntry->tableEntry;
int shardIndex = shardIdEntry->shardIndex;
@ -798,13 +800,33 @@ LoadShardInterval(uint64 shardId)
}
/*
* ShardExists returns whether given shard exists or not. It fails if missingOk is false
* and shard is not found.
*/
bool
ShardExists(uint64 shardId)
{
bool missingOk = true;
ShardIdCacheEntry *shardIdEntry = LookupShardIdCacheEntry(shardId, missingOk);
if (!shardIdEntry)
{
return false;
}
return true;
}
/*
* RelationIdOfShard returns the relationId of the given shardId.
*/
Oid
RelationIdForShard(uint64 shardId)
{
ShardIdCacheEntry *shardIdEntry = LookupShardIdCacheEntry(shardId);
bool missingOk = false;
ShardIdCacheEntry *shardIdEntry = LookupShardIdCacheEntry(shardId, missingOk);
CitusTableCacheEntry *tableEntry = shardIdEntry->tableEntry;
return tableEntry->relationId;
}
@ -817,7 +839,8 @@ RelationIdForShard(uint64 shardId)
bool
ReferenceTableShardId(uint64 shardId)
{
ShardIdCacheEntry *shardIdEntry = LookupShardIdCacheEntry(shardId);
bool missingOk = false;
ShardIdCacheEntry *shardIdEntry = LookupShardIdCacheEntry(shardId, missingOk);
CitusTableCacheEntry *tableEntry = shardIdEntry->tableEntry;
return IsCitusTableTypeCacheEntry(tableEntry, REFERENCE_TABLE);
}
@ -835,7 +858,8 @@ DistributedTableShardId(uint64 shardId)
return false;
}
ShardIdCacheEntry *shardIdEntry = LookupShardIdCacheEntry(shardId);
bool missingOk = false;
ShardIdCacheEntry *shardIdEntry = LookupShardIdCacheEntry(shardId, missingOk);
CitusTableCacheEntry *tableEntry = shardIdEntry->tableEntry;
return IsCitusTableTypeCacheEntry(tableEntry, DISTRIBUTED_TABLE);
}
@ -850,7 +874,8 @@ DistributedTableShardId(uint64 shardId)
GroupShardPlacement *
LoadGroupShardPlacement(uint64 shardId, uint64 placementId)
{
ShardIdCacheEntry *shardIdEntry = LookupShardIdCacheEntry(shardId);
bool missingOk = false;
ShardIdCacheEntry *shardIdEntry = LookupShardIdCacheEntry(shardId, missingOk);
CitusTableCacheEntry *tableEntry = shardIdEntry->tableEntry;
int shardIndex = shardIdEntry->shardIndex;
@ -885,7 +910,8 @@ LoadGroupShardPlacement(uint64 shardId, uint64 placementId)
ShardPlacement *
LoadShardPlacement(uint64 shardId, uint64 placementId)
{
ShardIdCacheEntry *shardIdEntry = LookupShardIdCacheEntry(shardId);
bool missingOk = false;
ShardIdCacheEntry *shardIdEntry = LookupShardIdCacheEntry(shardId, missingOk);
CitusTableCacheEntry *tableEntry = shardIdEntry->tableEntry;
int shardIndex = shardIdEntry->shardIndex;
GroupShardPlacement *groupPlacement = LoadGroupShardPlacement(shardId, placementId);
@ -908,7 +934,8 @@ ShardPlacementOnGroupIncludingOrphanedPlacements(int32 groupId, uint64 shardId)
{
ShardPlacement *placementOnNode = NULL;
ShardIdCacheEntry *shardIdEntry = LookupShardIdCacheEntry(shardId);
bool missingOk = false;
ShardIdCacheEntry *shardIdEntry = LookupShardIdCacheEntry(shardId, missingOk);
CitusTableCacheEntry *tableEntry = shardIdEntry->tableEntry;
int shardIndex = shardIdEntry->shardIndex;
GroupShardPlacement *placementArray =
@ -1128,7 +1155,8 @@ ShardPlacementListIncludingOrphanedPlacements(uint64 shardId)
{
List *placementList = NIL;
ShardIdCacheEntry *shardIdEntry = LookupShardIdCacheEntry(shardId);
bool missingOk = false;
ShardIdCacheEntry *shardIdEntry = LookupShardIdCacheEntry(shardId, missingOk);
CitusTableCacheEntry *tableEntry = shardIdEntry->tableEntry;
int shardIndex = shardIdEntry->shardIndex;
@ -1168,15 +1196,24 @@ ShardPlacementListIncludingOrphanedPlacements(uint64 shardId)
* build the cache entry. Afterwards we know that the shard has to be in the
* cache if it exists. If the shard does *not* exist, this function errors
* (because LookupShardRelationFromCatalog errors out).
*
* If missingOk is true and the shard cannot be found, the function returns false.
*/
static void
InitializeTableCacheEntry(int64 shardId)
static bool
InitializeTableCacheEntry(int64 shardId, bool missingOk)
{
bool missingOk = false;
Oid relationId = LookupShardRelationFromCatalog(shardId, missingOk);
if (!OidIsValid(relationId))
{
Assert(missingOk);
return false;
}
/* trigger building the cache for the shard id */
GetCitusTableCacheEntry(relationId); /* lgtm[cpp/return-value-ignored] */
return true;
}
@ -1186,7 +1223,7 @@ InitializeTableCacheEntry(int64 shardId)
* entry in the cache and false if it didn't.
*/
static bool
RefreshTableCacheEntryIfInvalid(ShardIdCacheEntry *shardEntry)
RefreshTableCacheEntryIfInvalid(ShardIdCacheEntry *shardEntry, bool missingOk)
{
/*
* We might have some concurrent metadata changes. In order to get the changes,
@ -1198,7 +1235,8 @@ RefreshTableCacheEntryIfInvalid(ShardIdCacheEntry *shardEntry)
return false;
}
Oid oldRelationId = shardEntry->tableEntry->relationId;
Oid currentRelationId = LookupShardRelationFromCatalog(shardEntry->shardId, false);
Oid currentRelationId = LookupShardRelationFromCatalog(shardEntry->shardId,
missingOk);
/*
* The relation OID to which the shard belongs could have changed,
@ -1213,11 +1251,12 @@ RefreshTableCacheEntryIfInvalid(ShardIdCacheEntry *shardEntry)
/*
* LookupShardCacheEntry returns the cache entry belonging to a shard, or
* errors out if that shard is unknown.
* LookupShardCacheEntry returns the cache entry belonging to a shard.
* It errors out if that shard is unknown and missingOk is false. Else,
* it will return a NULL cache entry.
*/
static ShardIdCacheEntry *
LookupShardIdCacheEntry(int64 shardId)
LookupShardIdCacheEntry(int64 shardId, bool missingOk)
{
bool foundInCache = false;
bool recheck = false;
@ -1231,12 +1270,16 @@ LookupShardIdCacheEntry(int64 shardId)
if (!foundInCache)
{
InitializeTableCacheEntry(shardId);
if (!InitializeTableCacheEntry(shardId, missingOk))
{
return NULL;
}
recheck = true;
}
else
{
recheck = RefreshTableCacheEntryIfInvalid(shardEntry);
recheck = RefreshTableCacheEntryIfInvalid(shardEntry, missingOk);
}
/*
@ -1250,7 +1293,8 @@ LookupShardIdCacheEntry(int64 shardId)
if (!foundInCache)
{
ereport(ERROR, (errmsg("could not find valid entry for shard "
int eflag = (missingOk) ? DEBUG1 : ERROR;
ereport(eflag, (errmsg("could not find valid entry for shard "
UINT64_FORMAT, shardId)));
}
}
@ -4253,6 +4297,9 @@ InvalidateCitusTableCacheEntrySlot(CitusTableCacheEntrySlot *cacheSlot)
{
/* reload the metadata */
cacheSlot->citusTableMetadata->isValid = false;
/* clean up ShardIdCacheHash */
RemoveStaleShardIdCacheEntries(cacheSlot->citusTableMetadata);
}
}
@ -4653,37 +4700,6 @@ LookupShardRelationFromCatalog(int64 shardId, bool missingOk)
}
/*
* ShardExists returns whether the given shard ID exists in pg_dist_shard.
*/
bool
ShardExists(int64 shardId)
{
ScanKeyData scanKey[1];
int scanKeyCount = 1;
Relation pgDistShard = table_open(DistShardRelationId(), AccessShareLock);
bool shardExists = false;
ScanKeyInit(&scanKey[0], Anum_pg_dist_shard_shardid,
BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(shardId));
SysScanDesc scanDescriptor = systable_beginscan(pgDistShard,
DistShardShardidIndexId(), true,
NULL, scanKeyCount, scanKey);
HeapTuple heapTuple = systable_getnext(scanDescriptor);
if (HeapTupleIsValid(heapTuple))
{
shardExists = true;
}
systable_endscan(scanDescriptor);
table_close(pgDistShard, NoLock);
return shardExists;
}
/*
* GetPartitionTypeInputInfo populates output parameters with the interval type
* identifier and modifier for the specified partition key/method combination.

View File

@ -30,7 +30,6 @@ static List * RemoteScanTargetList(List *workerTargetList);
static PlannedStmt * BuildSelectStatementViaStdPlanner(Query *combineQuery,
List *remoteScanTargetList,
CustomScan *remoteScan);
static bool FindCitusExtradataContainerRTE(Node *node, RangeTblEntry **result);
static Plan * CitusCustomScanPathPlan(PlannerInfo *root, RelOptInfo *rel,
struct CustomPath *best_path, List *tlist,
@ -323,7 +322,7 @@ BuildSelectStatementViaStdPlanner(Query *combineQuery, List *remoteScanTargetLis
* Finds the rangetable entry in the query that refers to the citus_extradata_container
* and stores the pointer in result.
*/
static bool
bool
FindCitusExtradataContainerRTE(Node *node, RangeTblEntry **result)
{
if (node == NULL)

View File

@ -17,6 +17,7 @@
#include "catalog/pg_constraint.h"
#include "distributed/citus_nodefuncs.h"
#include "distributed/citus_ruleutils.h"
#include "distributed/combine_query_planner.h"
#include "distributed/deparse_shard_query.h"
#include "distributed/insert_select_planner.h"
#include "distributed/listutils.h"
@ -79,12 +80,13 @@ RebuildQueryStrings(Job *workerJob)
if (UpdateOrDeleteQuery(query))
{
List *relationShardList = task->relationShardList;
/*
* For UPDATE and DELETE queries, we may have subqueries and joins, so
* we use relation shard list to update shard names and call
* pg_get_query_def() directly.
*/
List *relationShardList = task->relationShardList;
UpdateRelationToShardNames((Node *) query, relationShardList);
}
else if (query->commandType == CMD_INSERT && task->modifyWithSubquery)
@ -229,7 +231,16 @@ UpdateRelationToShardNames(Node *node, List *relationShardList)
RangeTblEntry *newRte = (RangeTblEntry *) node;
if (newRte->rtekind != RTE_RELATION)
if (newRte->rtekind == RTE_FUNCTION)
{
newRte = NULL;
if (!FindCitusExtradataContainerRTE(node, &newRte))
{
/* only update function rtes containing citus_extradata_container */
return false;
}
}
else if (newRte->rtekind != RTE_RELATION)
{
return false;
}

View File

@ -2677,9 +2677,6 @@ TargetShardIntervalForFastPathQuery(Query *query, bool *isMultiShardQuery,
}
/* we're only expecting single shard from a single table */
Node *distKey PG_USED_FOR_ASSERTS_ONLY = NULL;
Assert(FastPathRouterQuery(query, &distKey) || !EnableFastPathRouterPlanner);
if (list_length(prunedShardIntervalList) > 1)
{
*isMultiShardQuery = true;

View File

@ -16,7 +16,10 @@
#include "utils/fmgrprotos.h"
#include "utils/lsyscache.h"
#include "distributed/coordinator_protocol.h"
#include "distributed/listutils.h"
#include "distributed/log_utils.h"
#include "distributed/metadata_utility.h"
#include "distributed/multi_physical_planner.h"
#include "distributed/relay_utility.h"
#include "distributed/shard_utils.h"

View File

@ -27,6 +27,7 @@ extern Path * CreateCitusCustomScanPath(PlannerInfo *root, RelOptInfo *relOptInf
CustomScan *remoteScan);
extern PlannedStmt * PlanCombineQuery(struct DistributedPlan *distributedPlan,
struct CustomScan *dataScan);
extern bool FindCitusExtradataContainerRTE(Node *node, RangeTblEntry **result);
extern bool ReplaceCitusExtraDataContainer;
extern CustomScan *ReplaceCitusExtraDataContainerWithCustomScan;

View File

@ -157,6 +157,7 @@ extern uint32 ColocationIdViaCatalog(Oid relationId);
extern bool IsCitusLocalTableByDistParams(char partitionMethod, char replicationModel);
extern List * CitusTableList(void);
extern ShardInterval * LoadShardInterval(uint64 shardId);
extern bool ShardExists(uint64 shardId);
extern Oid RelationIdForShard(uint64 shardId);
extern bool ReferenceTableShardId(uint64 shardId);
extern bool DistributedTableShardId(uint64 shardId);
@ -174,7 +175,6 @@ extern int32 GetLocalNodeId(void);
extern void CitusTableCacheFlushInvalidatedEntries(void);
extern Oid LookupShardRelationFromCatalog(int64 shardId, bool missing_ok);
extern List * ShardPlacementListIncludingOrphanedPlacements(uint64 shardId);
extern bool ShardExists(int64 shardId);
extern void CitusInvalidateRelcacheByRelid(Oid relationId);
extern void CitusInvalidateRelcacheByShardId(int64 shardId);
extern void InvalidateForeignKeyGraph(void);

View File

@ -56,7 +56,6 @@ step s2-commit:
COMMIT;
step s1-update: <... completed>
ERROR: could not find valid entry for shard xxxxx
step s1-commit:
COMMIT;
@ -80,7 +79,7 @@ nodeport|shardid|success|result
id|value
---------------------------------------------------------------------
123456789| 1
123456789| 111
(1 row)
@ -140,7 +139,6 @@ step s2-commit:
COMMIT;
step s1-delete: <... completed>
ERROR: could not find valid entry for shard xxxxx
step s1-commit:
COMMIT;
@ -158,14 +156,13 @@ step s2-print-cluster:
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500001|t | 0
57637|1500003|t | 1
57637|1500003|t | 0
57638|1500004|t | 0
(3 rows)
id|value
id|value
---------------------------------------------------------------------
123456789| 1
(1 row)
(0 rows)
starting permutation: s1-load-cache s1-begin s1-select s2-begin s2-blocking-shard-split s1-insert s2-commit s1-commit s2-print-cluster
@ -221,7 +218,6 @@ get_shard_id_for_distribution_column
1500002
(1 row)
ERROR: could not find valid entry for shard xxxxx
step s1-commit:
COMMIT;
@ -239,13 +235,14 @@ step s2-print-cluster:
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500001|t | 0
57637|1500003|t | 0
57637|1500003|t | 1
57638|1500004|t | 0
(3 rows)
id|value
id|value
---------------------------------------------------------------------
(0 rows)
123456789| 1
(1 row)
starting permutation: s1-load-cache s1-begin s1-select s2-begin s2-blocking-shard-split s1-copy s2-commit s1-commit s2-print-cluster
@ -373,7 +370,6 @@ step s2-commit:
COMMIT;
step s1-update: <... completed>
ERROR: could not find valid entry for shard xxxxx
step s1-commit:
COMMIT;
@ -397,7 +393,7 @@ nodeport|shardid|success|result
id|value
---------------------------------------------------------------------
123456789| 1
123456789| 111
(1 row)
@ -453,7 +449,6 @@ step s2-commit:
COMMIT;
step s1-delete: <... completed>
ERROR: could not find valid entry for shard xxxxx
step s1-commit:
COMMIT;
@ -471,14 +466,13 @@ step s2-print-cluster:
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500001|t | 0
57637|1500003|t | 1
57637|1500003|t | 0
57638|1500004|t | 0
(3 rows)
id|value
id|value
---------------------------------------------------------------------
123456789| 1
(1 row)
(0 rows)
starting permutation: s1-begin s1-select s2-begin s2-blocking-shard-split s1-insert s2-commit s1-commit s2-print-cluster
@ -530,7 +524,6 @@ get_shard_id_for_distribution_column
1500002
(1 row)
ERROR: could not find valid entry for shard xxxxx
step s1-commit:
COMMIT;
@ -548,13 +541,14 @@ step s2-print-cluster:
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500001|t | 0
57637|1500003|t | 0
57637|1500003|t | 1
57638|1500004|t | 0
(3 rows)
id|value
id|value
---------------------------------------------------------------------
(0 rows)
123456789| 1
(1 row)
starting permutation: s1-begin s1-select s2-begin s2-blocking-shard-split s1-copy s2-commit s1-commit s2-print-cluster

View File

@ -434,7 +434,6 @@ step s1-end:
COMMIT;
step s4-insert: <... completed>
ERROR: could not find valid entry for shard xxxxx
step s4-end:
COMMIT;
@ -452,13 +451,14 @@ step s2-print-cluster:
nodeport|shardid|success|result
---------------------------------------------------------------------
57638|1500002|t | 0
57638|1500003|t | 1
57638|1500003|t | 2
(2 rows)
id|value
---------------------------------------------------------------------
900| 1
123456789| 1
(1 row)
(2 rows)
starting permutation: s2-print-cluster s3-acquire-advisory-lock s1-begin s2-begin s1-non-blocking-shard-split s2-insert s2-end s2-print-cluster s3-release-advisory-lock s1-end s2-print-cluster

View File

@ -44,7 +44,6 @@ step s2-commit:
COMMIT;
step s1-update: <... completed>
ERROR: could not find valid entry for shard xxxxx
step s1-commit:
COMMIT;
@ -69,7 +68,7 @@ nodeport|shardid|success|result
id|value
---------------------------------------------------------------------
5| 10
5| 5
(1 row)
@ -117,7 +116,6 @@ step s2-commit:
COMMIT;
step s1-delete: <... completed>
ERROR: could not find valid entry for shard xxxxx
step s1-commit:
COMMIT;
@ -135,11 +133,85 @@ step s2-print-cluster:
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500009|t | 0
57637|1500010|t | 1
57637|1500010|t | 0
57637|1500011|t | 0
57638|1500008|t | 0
(4 rows)
id|value
---------------------------------------------------------------------
(0 rows)
starting permutation: s1-load-cache s1-insert s1-begin s1-select s2-begin s2-isolate-tenant s1-update-complex s2-commit s1-commit s2-print-cluster
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-load-cache:
TRUNCATE isolation_table;
step s1-insert:
INSERT INTO isolation_table VALUES (5, 10);
step s1-begin:
BEGIN;
-- the tests are written with the logic where single shard SELECTs
-- do not to open transaction blocks
SET citus.select_opens_transaction_block TO false;
step s1-select:
SELECT count(*) FROM isolation_table WHERE id = 5;
count
---------------------------------------------------------------------
1
(1 row)
step s2-begin:
BEGIN;
step s2-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 5, shard_transfer_mode => 'block_writes');
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500016
(1 row)
step s1-update-complex:
UPDATE isolation_table SET value = 5 WHERE id IN (
SELECT max(id) FROM isolation_table
);
<waiting ...>
step s2-commit:
COMMIT;
step s1-update-complex: <... completed>
ERROR: shard for the given value does not exist
step s1-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('isolation_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM isolation_table ORDER BY id, value;
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500015|t | 0
57637|1500016|t | 1
57637|1500017|t | 0
57638|1500014|t | 0
(4 rows)
id|value
---------------------------------------------------------------------
5| 10
@ -177,7 +249,7 @@ step s2-isolate-tenant:
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500016
1500022
(1 row)
step s1-insert:
@ -187,7 +259,6 @@ step s2-commit:
COMMIT;
step s1-insert: <... completed>
ERROR: could not find valid entry for shard xxxxx
step s1-commit:
COMMIT;
@ -204,15 +275,16 @@ step s2-print-cluster:
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500015|t | 0
57637|1500016|t | 0
57637|1500017|t | 0
57638|1500014|t | 0
57637|1500021|t | 0
57637|1500022|t | 1
57637|1500023|t | 0
57638|1500020|t | 0
(4 rows)
id|value
---------------------------------------------------------------------
(0 rows)
5| 10
(1 row)
starting permutation: s1-load-cache s1-begin s1-select s2-begin s2-isolate-tenant s1-copy s2-commit s1-commit s2-print-cluster
@ -246,7 +318,7 @@ step s2-isolate-tenant:
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500022
1500028
(1 row)
step s1-copy:
@ -273,10 +345,10 @@ step s2-print-cluster:
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500021|t | 0
57637|1500022|t | 0
57637|1500023|t | 0
57638|1500020|t | 0
57637|1500027|t | 0
57637|1500028|t | 0
57637|1500029|t | 0
57638|1500026|t | 0
(4 rows)
id|value
@ -315,7 +387,7 @@ step s2-isolate-tenant:
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500028
1500034
(1 row)
step s1-ddl:
@ -341,10 +413,10 @@ step s2-print-cluster:
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500027|t | 0
57637|1500028|t | 0
57637|1500029|t | 0
57638|1500026|t | 0
57637|1500033|t | 0
57637|1500034|t | 0
57637|1500035|t | 0
57638|1500032|t | 0
(4 rows)
id|value
@ -399,7 +471,7 @@ step s2-isolate-tenant:
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500034
1500040
(1 row)
step s1-update:
@ -409,7 +481,6 @@ step s2-commit:
COMMIT;
step s1-update: <... completed>
ERROR: could not find valid entry for shard xxxxx
step s1-commit:
COMMIT;
@ -426,15 +497,15 @@ step s2-print-cluster:
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500033|t | 0
57637|1500034|t | 1
57637|1500035|t | 0
57638|1500032|t | 0
57637|1500039|t | 0
57637|1500040|t | 1
57637|1500041|t | 0
57638|1500038|t | 0
(4 rows)
id|value
---------------------------------------------------------------------
5| 10
5| 5
(1 row)
@ -469,7 +540,7 @@ step s2-isolate-tenant:
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500040
1500046
(1 row)
step s1-delete:
@ -479,7 +550,6 @@ step s2-commit:
COMMIT;
step s1-delete: <... completed>
ERROR: could not find valid entry for shard xxxxx
step s1-commit:
COMMIT;
@ -496,10 +566,81 @@ step s2-print-cluster:
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500039|t | 0
57637|1500040|t | 1
57637|1500041|t | 0
57638|1500038|t | 0
57637|1500045|t | 0
57637|1500046|t | 0
57637|1500047|t | 0
57638|1500044|t | 0
(4 rows)
id|value
---------------------------------------------------------------------
(0 rows)
starting permutation: s1-insert s1-begin s1-select s2-begin s2-isolate-tenant s1-update-complex s2-commit s1-commit s2-print-cluster
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-insert:
INSERT INTO isolation_table VALUES (5, 10);
step s1-begin:
BEGIN;
-- the tests are written with the logic where single shard SELECTs
-- do not to open transaction blocks
SET citus.select_opens_transaction_block TO false;
step s1-select:
SELECT count(*) FROM isolation_table WHERE id = 5;
count
---------------------------------------------------------------------
1
(1 row)
step s2-begin:
BEGIN;
step s2-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 5, shard_transfer_mode => 'block_writes');
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500052
(1 row)
step s1-update-complex:
UPDATE isolation_table SET value = 5 WHERE id IN (
SELECT max(id) FROM isolation_table
);
<waiting ...>
step s2-commit:
COMMIT;
step s1-update-complex: <... completed>
ERROR: shard for the given value does not exist
step s1-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('isolation_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM isolation_table ORDER BY id, value;
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500051|t | 0
57637|1500052|t | 1
57637|1500053|t | 0
57638|1500050|t | 0
(4 rows)
id|value
@ -536,7 +677,7 @@ step s2-isolate-tenant:
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500046
1500058
(1 row)
step s1-insert:
@ -546,7 +687,6 @@ step s2-commit:
COMMIT;
step s1-insert: <... completed>
ERROR: could not find valid entry for shard xxxxx
step s1-commit:
COMMIT;
@ -563,15 +703,16 @@ step s2-print-cluster:
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500045|t | 0
57637|1500046|t | 0
57637|1500047|t | 0
57638|1500044|t | 0
57637|1500057|t | 0
57637|1500058|t | 1
57637|1500059|t | 0
57638|1500056|t | 0
(4 rows)
id|value
---------------------------------------------------------------------
(0 rows)
5| 10
(1 row)
starting permutation: s1-begin s1-select s2-begin s2-isolate-tenant s1-copy s2-commit s1-commit s2-print-cluster
@ -602,7 +743,7 @@ step s2-isolate-tenant:
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500052
1500064
(1 row)
step s1-copy:
@ -629,10 +770,10 @@ step s2-print-cluster:
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500051|t | 0
57637|1500052|t | 0
57637|1500053|t | 0
57638|1500050|t | 0
57637|1500063|t | 0
57637|1500064|t | 0
57637|1500065|t | 0
57638|1500062|t | 0
(4 rows)
id|value
@ -668,7 +809,7 @@ step s2-isolate-tenant:
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500058
1500070
(1 row)
step s1-ddl:
@ -694,10 +835,10 @@ step s2-print-cluster:
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500057|t | 0
57637|1500058|t | 0
57637|1500059|t | 0
57638|1500056|t | 0
57637|1500069|t | 0
57637|1500070|t | 0
57637|1500071|t | 0
57638|1500068|t | 0
(4 rows)
id|value
@ -744,7 +885,7 @@ step s1-isolate-tenant:
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500064
1500076
(1 row)
step s2-isolate-tenant:
@ -767,10 +908,10 @@ step s2-print-cluster:
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500061|t | 1
57638|1500063|t | 0
57638|1500064|t | 0
57638|1500065|t | 0
57637|1500073|t | 1
57638|1500075|t | 0
57638|1500076|t | 0
57638|1500077|t | 0
(4 rows)
id|value
@ -799,7 +940,7 @@ step s1-isolate-tenant:
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500070
1500082
(1 row)
step s2-isolate-tenant:
@ -822,10 +963,10 @@ step s2-print-cluster:
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500067|t | 1
57638|1500069|t | 0
57638|1500070|t | 0
57638|1500071|t | 0
57637|1500079|t | 1
57638|1500081|t | 0
57638|1500082|t | 0
57638|1500083|t | 0
(4 rows)
id|value

View File

@ -59,7 +59,7 @@ t
step s2-isolate-tenant: <... completed>
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500078
1500090
(1 row)
step s2-commit:
@ -78,10 +78,10 @@ step s2-print-cluster:
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500077|t | 0
57637|1500078|t | 1
57637|1500079|t | 0
57638|1500074|t | 0
57637|1500089|t | 0
57637|1500090|t | 1
57637|1500091|t | 0
57638|1500086|t | 0
(4 rows)
id|value
@ -149,7 +149,7 @@ t
step s2-isolate-tenant: <... completed>
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500086
1500098
(1 row)
step s2-commit:
@ -168,10 +168,10 @@ step s2-print-cluster:
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500085|t | 0
57637|1500086|t | 0
57637|1500087|t | 0
57638|1500082|t | 0
57637|1500097|t | 0
57637|1500098|t | 0
57637|1500099|t | 0
57638|1500094|t | 0
(4 rows)
id|value
@ -179,6 +179,98 @@ id|value
(0 rows)
starting permutation: s1-load-cache s1-insert s3-acquire-advisory-lock s1-begin s1-select s2-begin s2-isolate-tenant s1-update-complex s1-commit s3-release-advisory-lock s2-commit s2-print-cluster
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-load-cache:
TRUNCATE isolation_table;
TRUNCATE isolation_table2;
step s1-insert:
INSERT INTO isolation_table VALUES (5, 10);
INSERT INTO isolation_table2 VALUES (5, 10);
step s3-acquire-advisory-lock:
SELECT pg_advisory_lock(44000, 55152);
pg_advisory_lock
---------------------------------------------------------------------
(1 row)
step s1-begin:
BEGIN;
-- the tests are written with the logic where single shard SELECTs
-- do not to open transaction blocks
SET citus.select_opens_transaction_block TO false;
step s1-select:
SELECT count(*) FROM isolation_table WHERE id = 5;
count
---------------------------------------------------------------------
1
(1 row)
step s2-begin:
BEGIN;
step s2-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 5, shard_transfer_mode => 'force_logical');
<waiting ...>
step s1-update-complex:
UPDATE isolation_table SET value = 5 WHERE id IN (
SELECT max(id) FROM isolation_table
);
step s1-commit:
COMMIT;
step s3-release-advisory-lock:
SELECT pg_advisory_unlock(44000, 55152);
pg_advisory_unlock
---------------------------------------------------------------------
t
(1 row)
step s2-isolate-tenant: <... completed>
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500106
(1 row)
step s2-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('isolation_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM isolation_table ORDER BY id, value;
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500105|t | 0
57637|1500106|t | 1
57637|1500107|t | 0
57638|1500102|t | 0
(4 rows)
id|value
---------------------------------------------------------------------
5| 5
(1 row)
starting permutation: s1-load-cache s3-acquire-advisory-lock s1-begin s1-select s2-begin s2-isolate-tenant s1-insert s1-commit s3-release-advisory-lock s2-commit s2-print-cluster
create_distributed_table
---------------------------------------------------------------------
@ -235,7 +327,7 @@ t
step s2-isolate-tenant: <... completed>
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500094
1500114
(1 row)
step s2-commit:
@ -254,10 +346,10 @@ step s2-print-cluster:
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500093|t | 0
57637|1500094|t | 1
57637|1500095|t | 0
57638|1500090|t | 0
57637|1500113|t | 0
57637|1500114|t | 1
57637|1500115|t | 0
57638|1500110|t | 0
(4 rows)
id|value
@ -321,7 +413,7 @@ t
step s2-isolate-tenant: <... completed>
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500102
1500122
(1 row)
step s2-commit:
@ -340,10 +432,10 @@ step s2-print-cluster:
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500101|t | 1
57637|1500102|t | 1
57637|1500103|t | 2
57638|1500098|t | 1
57637|1500121|t | 1
57637|1500122|t | 1
57637|1500123|t | 2
57638|1500118|t | 1
(4 rows)
id|value
@ -411,7 +503,7 @@ t
step s2-isolate-tenant: <... completed>
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500110
1500130
(1 row)
step s2-commit:
@ -430,10 +522,10 @@ step s2-print-cluster:
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500109|t | 0
57637|1500110|t | 1
57637|1500111|t | 0
57638|1500106|t | 0
57637|1500129|t | 0
57637|1500130|t | 1
57637|1500131|t | 0
57638|1500126|t | 0
(4 rows)
id|value
@ -497,7 +589,7 @@ t
step s2-isolate-tenant: <... completed>
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500118
1500138
(1 row)
step s2-commit:
@ -516,10 +608,10 @@ step s2-print-cluster:
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500117|t | 0
57637|1500118|t | 0
57637|1500119|t | 0
57638|1500114|t | 0
57637|1500137|t | 0
57637|1500138|t | 0
57637|1500139|t | 0
57638|1500134|t | 0
(4 rows)
id|value
@ -527,6 +619,94 @@ id|value
(0 rows)
starting permutation: s1-insert s3-acquire-advisory-lock s1-begin s1-select s2-begin s2-isolate-tenant s1-update-complex s1-commit s3-release-advisory-lock s2-commit s2-print-cluster
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-insert:
INSERT INTO isolation_table VALUES (5, 10);
INSERT INTO isolation_table2 VALUES (5, 10);
step s3-acquire-advisory-lock:
SELECT pg_advisory_lock(44000, 55152);
pg_advisory_lock
---------------------------------------------------------------------
(1 row)
step s1-begin:
BEGIN;
-- the tests are written with the logic where single shard SELECTs
-- do not to open transaction blocks
SET citus.select_opens_transaction_block TO false;
step s1-select:
SELECT count(*) FROM isolation_table WHERE id = 5;
count
---------------------------------------------------------------------
1
(1 row)
step s2-begin:
BEGIN;
step s2-isolate-tenant:
SELECT isolate_tenant_to_new_shard('isolation_table', 5, shard_transfer_mode => 'force_logical');
<waiting ...>
step s1-update-complex:
UPDATE isolation_table SET value = 5 WHERE id IN (
SELECT max(id) FROM isolation_table
);
step s1-commit:
COMMIT;
step s3-release-advisory-lock:
SELECT pg_advisory_unlock(44000, 55152);
pg_advisory_unlock
---------------------------------------------------------------------
t
(1 row)
step s2-isolate-tenant: <... completed>
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500146
(1 row)
step s2-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('isolation_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM isolation_table ORDER BY id, value;
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500145|t | 0
57637|1500146|t | 1
57637|1500147|t | 0
57638|1500142|t | 0
(4 rows)
id|value
---------------------------------------------------------------------
5| 5
(1 row)
starting permutation: s3-acquire-advisory-lock s1-begin s1-select s2-begin s2-isolate-tenant s1-insert s1-commit s3-release-advisory-lock s2-commit s2-print-cluster
create_distributed_table
---------------------------------------------------------------------
@ -579,7 +759,7 @@ t
step s2-isolate-tenant: <... completed>
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500126
1500154
(1 row)
step s2-commit:
@ -598,10 +778,10 @@ step s2-print-cluster:
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500125|t | 0
57637|1500126|t | 1
57637|1500127|t | 0
57638|1500122|t | 0
57637|1500153|t | 0
57637|1500154|t | 1
57637|1500155|t | 0
57638|1500150|t | 0
(4 rows)
id|value
@ -661,7 +841,7 @@ t
step s2-isolate-tenant: <... completed>
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500134
1500162
(1 row)
step s2-commit:
@ -680,10 +860,10 @@ step s2-print-cluster:
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500133|t | 1
57637|1500134|t | 1
57637|1500135|t | 2
57638|1500130|t | 1
57637|1500161|t | 1
57637|1500162|t | 1
57637|1500163|t | 2
57638|1500158|t | 1
(4 rows)
id|value
@ -736,7 +916,7 @@ t
step s2-isolate-tenant: <... completed>
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500142
1500170
(1 row)
step s2-print-cluster:
@ -752,10 +932,10 @@ step s2-print-cluster:
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500141|t | 0
57637|1500142|t | 1
57637|1500143|t | 0
57638|1500138|t | 0
57637|1500169|t | 0
57637|1500170|t | 1
57637|1500171|t | 0
57638|1500166|t | 0
(4 rows)
id|value
@ -804,7 +984,7 @@ t
step s2-isolate-tenant: <... completed>
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500150
1500178
(1 row)
step s2-print-cluster:
@ -820,10 +1000,10 @@ step s2-print-cluster:
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500149|t | 0
57637|1500150|t | 1
57637|1500151|t | 0
57638|1500146|t | 0
57637|1500177|t | 0
57637|1500178|t | 1
57637|1500179|t | 0
57638|1500174|t | 0
(4 rows)
id|value
@ -872,7 +1052,7 @@ t
step s2-isolate-tenant: <... completed>
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500158
1500186
(1 row)
step s2-print-cluster:
@ -888,10 +1068,10 @@ step s2-print-cluster:
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500157|t | 0
57637|1500158|t | 1
57637|1500159|t | 0
57638|1500154|t | 0
57637|1500185|t | 0
57637|1500186|t | 1
57637|1500187|t | 0
57638|1500182|t | 0
(4 rows)
id|value
@ -943,7 +1123,7 @@ t
step s2-isolate-tenant: <... completed>
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500169
1500197
(1 row)
step s2-commit:
@ -962,10 +1142,10 @@ step s2-print-cluster:
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500168|t | 0
57637|1500169|t | 1
57637|1500170|t | 0
57638|1500165|t | 0
57637|1500196|t | 0
57637|1500197|t | 1
57637|1500198|t | 0
57638|1500193|t | 0
(4 rows)
id|value
@ -1004,7 +1184,7 @@ step s1-isolate-tenant-no-same-coloc-blocking:
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500183
1500211
(1 row)
step s3-release-advisory-lock:
@ -1018,7 +1198,7 @@ t
step s2-isolate-tenant: <... completed>
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500180
1500208
(1 row)
step s2-print-cluster:
@ -1034,10 +1214,10 @@ step s2-print-cluster:
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500179|t | 0
57637|1500180|t | 1
57637|1500181|t | 0
57638|1500176|t | 0
57637|1500207|t | 0
57637|1500208|t | 1
57637|1500209|t | 0
57638|1500204|t | 0
(4 rows)
id|value
@ -1076,7 +1256,7 @@ step s1-isolate-tenant-no-same-coloc-blocking:
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500194
1500222
(1 row)
step s3-release-advisory-lock:
@ -1090,7 +1270,7 @@ t
step s2-isolate-tenant: <... completed>
isolate_tenant_to_new_shard
---------------------------------------------------------------------
1500191
1500219
(1 row)
step s2-print-cluster:
@ -1106,10 +1286,10 @@ step s2-print-cluster:
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500190|t | 0
57637|1500191|t | 1
57637|1500192|t | 0
57638|1500187|t | 0
57637|1500218|t | 0
57637|1500219|t | 1
57637|1500220|t | 0
57638|1500215|t | 0
(4 rows)
id|value

View File

@ -44,6 +44,13 @@ step "s1-update"
UPDATE isolation_table SET value = 5 WHERE id = 5;
}
step "s1-update-complex"
{
UPDATE isolation_table SET value = 5 WHERE id IN (
SELECT max(id) FROM isolation_table
);
}
step "s1-delete"
{
DELETE FROM isolation_table WHERE id = 5;
@ -119,6 +126,7 @@ step "s2-print-index-count"
// we expect DML/DDL queries to fail because the shard they are waiting for is destroyed
permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-update" "s2-commit" "s1-commit" "s2-print-cluster"
permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-delete" "s2-commit" "s1-commit" "s2-print-cluster"
permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-update-complex" "s2-commit" "s1-commit" "s2-print-cluster"
permutation "s1-load-cache" "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-insert" "s2-commit" "s1-commit" "s2-print-cluster"
permutation "s1-load-cache" "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-copy" "s2-commit" "s1-commit" "s2-print-cluster"
permutation "s1-load-cache" "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-ddl" "s2-commit" "s1-commit" "s2-print-cluster" "s2-print-index-count"
@ -127,6 +135,7 @@ permutation "s1-load-cache" "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant
// the same tests without loading the cache at first
permutation "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-update" "s2-commit" "s1-commit" "s2-print-cluster"
permutation "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-delete" "s2-commit" "s1-commit" "s2-print-cluster"
permutation "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-update-complex" "s2-commit" "s1-commit" "s2-print-cluster"
permutation "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-insert" "s2-commit" "s1-commit" "s2-print-cluster"
permutation "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-copy" "s2-commit" "s1-commit" "s2-print-cluster"
permutation "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-ddl" "s2-commit" "s1-commit" "s2-print-cluster" "s2-print-index-count"

View File

@ -51,6 +51,13 @@ step "s1-update"
UPDATE isolation_table SET value = 5 WHERE id = 5;
}
step "s1-update-complex"
{
UPDATE isolation_table SET value = 5 WHERE id IN (
SELECT max(id) FROM isolation_table
);
}
step "s1-delete"
{
DELETE FROM isolation_table WHERE id = 5;
@ -148,12 +155,14 @@ step "s3-release-advisory-lock"
// we expect DML queries of s2 to succeed without being blocked.
permutation "s1-load-cache" "s1-insert" "s3-acquire-advisory-lock" "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-update" "s1-commit" "s3-release-advisory-lock" "s2-commit" "s2-print-cluster"
permutation "s1-load-cache" "s1-insert" "s3-acquire-advisory-lock" "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-delete" "s1-commit" "s3-release-advisory-lock" "s2-commit" "s2-print-cluster"
permutation "s1-load-cache" "s1-insert" "s3-acquire-advisory-lock" "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-update-complex" "s1-commit" "s3-release-advisory-lock" "s2-commit" "s2-print-cluster"
permutation "s1-load-cache" "s3-acquire-advisory-lock" "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-insert" "s1-commit" "s3-release-advisory-lock" "s2-commit" "s2-print-cluster"
permutation "s1-load-cache" "s3-acquire-advisory-lock" "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-copy" "s1-commit" "s3-release-advisory-lock" "s2-commit" "s2-print-cluster"
// the same tests without loading the cache at first
permutation "s1-insert" "s3-acquire-advisory-lock" "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-update" "s1-commit" "s3-release-advisory-lock" "s2-commit" "s2-print-cluster"
permutation "s1-insert" "s3-acquire-advisory-lock" "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-delete" "s1-commit" "s3-release-advisory-lock" "s2-commit" "s2-print-cluster"
permutation "s1-insert" "s3-acquire-advisory-lock" "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-update-complex" "s1-commit" "s3-release-advisory-lock" "s2-commit" "s2-print-cluster"
permutation "s3-acquire-advisory-lock" "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-insert" "s1-commit" "s3-release-advisory-lock" "s2-commit" "s2-print-cluster"
permutation "s3-acquire-advisory-lock" "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-copy" "s1-commit" "s3-release-advisory-lock" "s2-commit" "s2-print-cluster"