Fix flakyness in isolation_replicate_reference_tables_to_coordinator.spec (#6123)

When the deadlock detector kills s2-update-dist-table both sessions
finish at the same time. The order in which they are displayed can be
swapped. To counteract this we start using the ["marker" feature][1] of
the isolationtester framework to create consistent output.

In passing this also sets the next_shard_id to the expected value by
this test so it can be run using `make check-isolation-base`.

Failed CI test: https://app.circleci.com/pipelines/github/citusdata/citus/25562/workflows/dfe6f88a-c306-4d91-b771-d5d1deb1798d/jobs/713417

[1]: ec62ce55a8/src/test/isolation/README (L152)
pull/6120/head
Jelte Fennema 2022-08-03 12:00:30 +02:00 committed by GitHub
parent f91f0f4b55
commit 8bbc1a45e1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 2 additions and 1 deletions

View File

@ -1,5 +1,6 @@
setup
{
SET citus.next_shard_id TO 1500877;
CREATE TABLE ref_table(a int primary key);
SELECT create_reference_table('ref_table');
INSERT INTO ref_table VALUES (1), (3), (5), (7);
@ -137,7 +138,7 @@ step "add-node"
// verify that locks on the placement of the reference table on the coordinator is
// taken into account when looking for distributed deadlocks
permutation "add-node" "s1-begin" "s2-begin" "s1-update-dist-table" "s2-lock-ref-table-placement-on-coordinator" "s1-lock-ref-table-placement-on-coordinator" "s2-update-dist-table" "deadlock-checker-call" "s1-end" "s2-end"
permutation "add-node" "s1-begin" "s2-begin" "s1-update-dist-table" "s2-lock-ref-table-placement-on-coordinator" "s1-lock-ref-table-placement-on-coordinator" "s2-update-dist-table" ("s1-lock-ref-table-placement-on-coordinator") "deadlock-checker-call" "s1-end" "s2-end"
// verify that *_dist_stat_activity() functions return the correct result when query
// has a task on the coordinator.