Deletes unnecessary test outputs (#6140)

pull/6119/head
Naisila Puka 2022-08-08 11:19:14 +03:00 committed by GitHub
parent 9eedf6dcf8
commit 3401b31c13
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 6 additions and 2055 deletions

File diff suppressed because it is too large Load Diff

View File

@ -1,517 +0,0 @@
CREATE TABLE parent(ts timestamptz, i int, n numeric, s text)
PARTITION BY RANGE (ts);
-- row partitions
CREATE TABLE p0 PARTITION OF parent
FOR VALUES FROM ('2020-01-01') TO ('2020-02-01');
CREATE TABLE p1 PARTITION OF parent
FOR VALUES FROM ('2020-02-01') TO ('2020-03-01');
CREATE TABLE p2 PARTITION OF parent
FOR VALUES FROM ('2020-03-01') TO ('2020-04-01');
CREATE TABLE p3 PARTITION OF parent
FOR VALUES FROM ('2020-04-01') TO ('2020-05-01');
INSERT INTO parent SELECT '2020-01-15', 10, 100, 'one thousand'
FROM generate_series(1,100000);
INSERT INTO parent SELECT '2020-02-15', 20, 200, 'two thousand'
FROM generate_series(1,100000);
INSERT INTO parent SELECT '2020-03-15', 30, 300, 'three thousand'
FROM generate_series(1,100000);
INSERT INTO parent SELECT '2020-04-15', 30, 300, 'three thousand'
FROM generate_series(1,100000);
-- run parallel plans
SET force_parallel_mode = regress;
SET min_parallel_table_scan_size = 1;
SET parallel_tuple_cost = 0;
SET max_parallel_workers = 4;
SET max_parallel_workers_per_gather = 4;
EXPLAIN (costs off) SELECT count(*), sum(i), min(i), max(i) FROM parent;
QUERY PLAN
---------------------------------------------------------------------
Finalize Aggregate
-> Gather
Workers Planned: 4
-> Partial Aggregate
-> Parallel Append
-> Parallel Seq Scan on p0
-> Parallel Seq Scan on p1
-> Parallel Seq Scan on p2
-> Parallel Seq Scan on p3
(9 rows)
SELECT count(*), sum(i), min(i), max(i) FROM parent;
count | sum | min | max
---------------------------------------------------------------------
400000 | 9000000 | 10 | 30
(1 row)
-- set older partitions as columnar
SELECT alter_table_set_access_method('p0','columnar');
NOTICE: creating a new table for public.p0
NOTICE: moving the data of public.p0
NOTICE: dropping the old public.p0
NOTICE: renaming the new table to public.p0
alter_table_set_access_method
---------------------------------------------------------------------
(1 row)
SELECT alter_table_set_access_method('p1','columnar');
NOTICE: creating a new table for public.p1
NOTICE: moving the data of public.p1
NOTICE: dropping the old public.p1
NOTICE: renaming the new table to public.p1
alter_table_set_access_method
---------------------------------------------------------------------
(1 row)
SELECT alter_table_set_access_method('p3','columnar');
NOTICE: creating a new table for public.p3
NOTICE: moving the data of public.p3
NOTICE: dropping the old public.p3
NOTICE: renaming the new table to public.p3
alter_table_set_access_method
---------------------------------------------------------------------
(1 row)
-- should also be parallel plan
EXPLAIN (costs off) SELECT count(*), sum(i), min(i), max(i) FROM parent;
QUERY PLAN
---------------------------------------------------------------------
Finalize Aggregate
-> Gather
Workers Planned: 4
-> Partial Aggregate
-> Parallel Append
-> Custom Scan (ColumnarScan) on p3
Columnar Projected Columns: i
-> Custom Scan (ColumnarScan) on p0
Columnar Projected Columns: i
-> Custom Scan (ColumnarScan) on p1
Columnar Projected Columns: i
-> Parallel Seq Scan on p2
(12 rows)
SELECT count(*), sum(i), min(i), max(i) FROM parent;
count | sum | min | max
---------------------------------------------------------------------
400000 | 9000000 | 10 | 30
(1 row)
-- and also parallel without custom scan
SET columnar.enable_custom_scan = FALSE;
EXPLAIN (costs off) SELECT count(*), sum(i), min(i), max(i) FROM parent;
QUERY PLAN
---------------------------------------------------------------------
Finalize Aggregate
-> Gather
Workers Planned: 4
-> Partial Aggregate
-> Parallel Append
-> Seq Scan on p3
-> Seq Scan on p0
-> Seq Scan on p1
-> Parallel Seq Scan on p2
(9 rows)
SELECT count(*), sum(i), min(i), max(i) FROM parent;
count | sum | min | max
---------------------------------------------------------------------
400000 | 9000000 | 10 | 30
(1 row)
SET columnar.enable_custom_scan TO DEFAULT;
SET force_parallel_mode TO DEFAULT;
SET min_parallel_table_scan_size TO DEFAULT;
SET parallel_tuple_cost TO DEFAULT;
SET max_parallel_workers TO DEFAULT;
SET max_parallel_workers_per_gather TO DEFAULT;
CREATE INDEX parent_btree ON parent (n);
ANALYZE parent;
-- will use columnar custom scan on columnar partitions but index
-- scan on heap partition
EXPLAIN (costs off) SELECT count(*), sum(i), min(i), max(i) FROM parent
WHERE ts > '2020-02-20' AND n < 5;
QUERY PLAN
---------------------------------------------------------------------
Aggregate
-> Append
-> Custom Scan (ColumnarScan) on p1
Filter: ((ts > 'Thu Feb 20 00:00:00 2020 PST'::timestamp with time zone) AND (n < '5'::numeric))
Columnar Projected Columns: ts, i, n
Columnar Chunk Group Filters: ((ts > 'Thu Feb 20 00:00:00 2020 PST'::timestamp with time zone) AND (n < '5'::numeric))
-> Index Scan using p2_n_idx on p2
Index Cond: (n < '5'::numeric)
Filter: (ts > 'Thu Feb 20 00:00:00 2020 PST'::timestamp with time zone)
-> Custom Scan (ColumnarScan) on p3
Filter: ((ts > 'Thu Feb 20 00:00:00 2020 PST'::timestamp with time zone) AND (n < '5'::numeric))
Columnar Projected Columns: ts, i, n
Columnar Chunk Group Filters: ((ts > 'Thu Feb 20 00:00:00 2020 PST'::timestamp with time zone) AND (n < '5'::numeric))
(13 rows)
BEGIN;
SET LOCAL columnar.enable_custom_scan TO 'OFF';
-- now that we disabled columnar custom scan, will use seq scan on columnar
-- partitions since index scan is more expensive than seq scan too
EXPLAIN (costs off) SELECT count(*), sum(i), min(i), max(i) FROM parent
WHERE ts > '2020-02-20' AND n < 5;
QUERY PLAN
---------------------------------------------------------------------
Aggregate
-> Append
-> Seq Scan on p1
Filter: ((ts > 'Thu Feb 20 00:00:00 2020 PST'::timestamp with time zone) AND (n < '5'::numeric))
-> Index Scan using p2_n_idx on p2
Index Cond: (n < '5'::numeric)
Filter: (ts > 'Thu Feb 20 00:00:00 2020 PST'::timestamp with time zone)
-> Seq Scan on p3
Filter: ((ts > 'Thu Feb 20 00:00:00 2020 PST'::timestamp with time zone) AND (n < '5'::numeric))
(9 rows)
ROLLBACK;
DROP TABLE parent;
--
-- Test inheritance
--
CREATE TABLE i_row(i int);
INSERT INTO i_row VALUES(100);
CREATE TABLE i_col(i int) USING columnar;
INSERT INTO i_col VALUES(200);
CREATE TABLE ij_row_row(j int) INHERITS(i_row);
INSERT INTO ij_row_row VALUES(300, 1000);
CREATE TABLE ij_row_col(j int) INHERITS(i_row) USING columnar;
INSERT INTO ij_row_col VALUES(400, 2000);
CREATE TABLE ij_col_row(j int) INHERITS(i_col);
INSERT INTO ij_col_row VALUES(500, 3000);
CREATE TABLE ij_col_col(j int) INHERITS(i_col) USING columnar;
INSERT INTO ij_col_col VALUES(600, 4000);
EXPLAIN (costs off) SELECT * FROM i_row;
QUERY PLAN
---------------------------------------------------------------------
Append
-> Seq Scan on i_row
-> Seq Scan on ij_row_row
-> Custom Scan (ColumnarScan) on ij_row_col
Columnar Projected Columns: i
(5 rows)
SELECT * FROM i_row;
i
---------------------------------------------------------------------
100
300
400
(3 rows)
EXPLAIN (costs off) SELECT * FROM ONLY i_row;
QUERY PLAN
---------------------------------------------------------------------
Seq Scan on i_row
(1 row)
SELECT * FROM ONLY i_row;
i
---------------------------------------------------------------------
100
(1 row)
EXPLAIN (costs off) SELECT * FROM i_col;
QUERY PLAN
---------------------------------------------------------------------
Append
-> Custom Scan (ColumnarScan) on i_col
Columnar Projected Columns: i
-> Seq Scan on ij_col_row
-> Custom Scan (ColumnarScan) on ij_col_col
Columnar Projected Columns: i
(6 rows)
SELECT * FROM i_col;
i
---------------------------------------------------------------------
200
500
600
(3 rows)
EXPLAIN (costs off) SELECT * FROM ONLY i_col;
QUERY PLAN
---------------------------------------------------------------------
Custom Scan (ColumnarScan) on i_col
Columnar Projected Columns: i
(2 rows)
SELECT * FROM ONLY i_col;
i
---------------------------------------------------------------------
200
(1 row)
EXPLAIN (costs off) SELECT * FROM ij_row_row;
QUERY PLAN
---------------------------------------------------------------------
Seq Scan on ij_row_row
(1 row)
SELECT * FROM ij_row_row;
i | j
---------------------------------------------------------------------
300 | 1000
(1 row)
EXPLAIN (costs off) SELECT * FROM ij_row_col;
QUERY PLAN
---------------------------------------------------------------------
Custom Scan (ColumnarScan) on ij_row_col
Columnar Projected Columns: i, j
(2 rows)
SELECT * FROM ij_row_col;
i | j
---------------------------------------------------------------------
400 | 2000
(1 row)
EXPLAIN (costs off) SELECT * FROM ij_col_row;
QUERY PLAN
---------------------------------------------------------------------
Seq Scan on ij_col_row
(1 row)
SELECT * FROM ij_col_row;
i | j
---------------------------------------------------------------------
500 | 3000
(1 row)
EXPLAIN (costs off) SELECT * FROM ij_col_col;
QUERY PLAN
---------------------------------------------------------------------
Custom Scan (ColumnarScan) on ij_col_col
Columnar Projected Columns: i, j
(2 rows)
SELECT * FROM ij_col_col;
i | j
---------------------------------------------------------------------
600 | 4000
(1 row)
SET columnar.enable_custom_scan = FALSE;
EXPLAIN (costs off) SELECT * FROM i_row;
QUERY PLAN
---------------------------------------------------------------------
Append
-> Seq Scan on i_row
-> Seq Scan on ij_row_row
-> Seq Scan on ij_row_col
(4 rows)
SELECT * FROM i_row;
i
---------------------------------------------------------------------
100
300
400
(3 rows)
EXPLAIN (costs off) SELECT * FROM ONLY i_row;
QUERY PLAN
---------------------------------------------------------------------
Seq Scan on i_row
(1 row)
SELECT * FROM ONLY i_row;
i
---------------------------------------------------------------------
100
(1 row)
EXPLAIN (costs off) SELECT * FROM i_col;
QUERY PLAN
---------------------------------------------------------------------
Append
-> Seq Scan on i_col
-> Seq Scan on ij_col_row
-> Seq Scan on ij_col_col
(4 rows)
SELECT * FROM i_col;
i
---------------------------------------------------------------------
200
500
600
(3 rows)
EXPLAIN (costs off) SELECT * FROM ONLY i_col;
QUERY PLAN
---------------------------------------------------------------------
Seq Scan on i_col
(1 row)
SELECT * FROM ONLY i_col;
i
---------------------------------------------------------------------
200
(1 row)
EXPLAIN (costs off) SELECT * FROM ij_row_row;
QUERY PLAN
---------------------------------------------------------------------
Seq Scan on ij_row_row
(1 row)
SELECT * FROM ij_row_row;
i | j
---------------------------------------------------------------------
300 | 1000
(1 row)
EXPLAIN (costs off) SELECT * FROM ij_row_col;
QUERY PLAN
---------------------------------------------------------------------
Seq Scan on ij_row_col
(1 row)
SELECT * FROM ij_row_col;
i | j
---------------------------------------------------------------------
400 | 2000
(1 row)
EXPLAIN (costs off) SELECT * FROM ij_col_row;
QUERY PLAN
---------------------------------------------------------------------
Seq Scan on ij_col_row
(1 row)
SELECT * FROM ij_col_row;
i | j
---------------------------------------------------------------------
500 | 3000
(1 row)
EXPLAIN (costs off) SELECT * FROM ij_col_col;
QUERY PLAN
---------------------------------------------------------------------
Seq Scan on ij_col_col
(1 row)
SELECT * FROM ij_col_col;
i | j
---------------------------------------------------------------------
600 | 4000
(1 row)
SET columnar.enable_custom_scan TO DEFAULT;
-- remove the child table from the inheritance hierarchy table
ALTER TABLE ij_row_row NO INHERIT i_row;
DROP TABLE ij_row_row;
DROP TABLE i_row CASCADE;
NOTICE: drop cascades to table ij_row_col
DROP TABLE i_col CASCADE;
NOTICE: drop cascades to 2 other objects
DETAIL: drop cascades to table ij_col_row
drop cascades to table ij_col_col
--
-- https://github.com/citusdata/citus/issues/5257
--
set default_table_access_method to columnar;
CREATE TABLE prt1 (a int, b int, c varchar) PARTITION BY RANGE(a);
CREATE TABLE prt1_p1 PARTITION OF prt1 FOR VALUES FROM (0) TO (250);
CREATE TABLE prt1_p3 PARTITION OF prt1 FOR VALUES FROM (500) TO (600);
CREATE TABLE prt1_p2 PARTITION OF prt1 FOR VALUES FROM (250) TO (500);
INSERT INTO prt1 SELECT i, i % 25, to_char(i, 'FM0000') FROM generate_series(0, 599) i WHERE i % 2 = 0;
CREATE TABLE prt2 (a int, b int, c varchar) PARTITION BY RANGE(b);
CREATE TABLE prt2_p1 PARTITION OF prt2 FOR VALUES FROM (0) TO (250);
CREATE TABLE prt2_p2 PARTITION OF prt2 FOR VALUES FROM (250) TO (500);
CREATE TABLE prt2_p3 PARTITION OF prt2 FOR VALUES FROM (500) TO (600);
INSERT INTO prt2 SELECT i % 25, i, to_char(i, 'FM0000') FROM generate_series(0, 599) i WHERE i % 3 = 0;
SET enable_partitionwise_join to true;
EXPLAIN (costs off, timing off, summary off)
SELECT * FROM
prt1 t1 LEFT JOIN LATERAL
(SELECT t2.a AS t2a, t3.a AS t3a, least(t1.a,t2.a,t3.b)
FROM prt1 t2
JOIN prt2 t3 ON (t2.a = t3.b)
) ss
ON t1.a = ss.t2a WHERE t1.b = 0
ORDER BY t1.a;
QUERY PLAN
---------------------------------------------------------------------
Sort
Sort Key: t1.a
-> Append
-> Nested Loop Left Join
-> Custom Scan (ColumnarScan) on prt1_p1 t1
Filter: (b = 0)
Columnar Projected Columns: a, b, c
Columnar Chunk Group Filters: (b = 0)
-> Hash Join
Hash Cond: (t2.a = t3.b)
-> Custom Scan (ColumnarScan) on prt1_p1 t2
Filter: (t1.a = a)
Columnar Projected Columns: a
Columnar Chunk Group Filters: (t1.a = a)
-> Hash
-> Custom Scan (ColumnarScan) on prt2_p1 t3
Columnar Projected Columns: a, b
-> Nested Loop Left Join
-> Custom Scan (ColumnarScan) on prt1_p2 t1_1
Filter: (b = 0)
Columnar Projected Columns: a, b, c
Columnar Chunk Group Filters: (b = 0)
-> Hash Join
Hash Cond: (t2_1.a = t3_1.b)
-> Custom Scan (ColumnarScan) on prt1_p2 t2_1
Filter: (t1_1.a = a)
Columnar Projected Columns: a
Columnar Chunk Group Filters: (t1_1.a = a)
-> Hash
-> Custom Scan (ColumnarScan) on prt2_p2 t3_1
Columnar Projected Columns: a, b
-> Nested Loop Left Join
-> Custom Scan (ColumnarScan) on prt1_p3 t1_2
Filter: (b = 0)
Columnar Projected Columns: a, b, c
Columnar Chunk Group Filters: (b = 0)
-> Hash Join
Hash Cond: (t2_2.a = t3_2.b)
-> Custom Scan (ColumnarScan) on prt1_p3 t2_2
Filter: (t1_2.a = a)
Columnar Projected Columns: a
Columnar Chunk Group Filters: (t1_2.a = a)
-> Hash
-> Custom Scan (ColumnarScan) on prt2_p3 t3_2
Columnar Projected Columns: a, b
(45 rows)
SELECT * FROM
prt1 t1 LEFT JOIN LATERAL
(SELECT t2.a AS t2a, t3.a AS t3a, least(t1.a,t2.a,t3.b)
FROM prt1 t2
JOIN prt2 t3 ON (t2.a = t3.b)
) ss
ON t1.a = ss.t2a WHERE t1.b = 0
ORDER BY t1.a;
a | b | c | t2a | t3a | least
---------------------------------------------------------------------
0 | 0 | 0000 | 0 | 0 | 0
50 | 0 | 0050 | | |
100 | 0 | 0100 | | |
150 | 0 | 0150 | 150 | 0 | 150
200 | 0 | 0200 | | |
250 | 0 | 0250 | | |
300 | 0 | 0300 | 300 | 0 | 300
350 | 0 | 0350 | | |
400 | 0 | 0400 | | |
450 | 0 | 0450 | 450 | 0 | 450
500 | 0 | 0500 | | |
550 | 0 | 0550 | | |
(12 rows)
set default_table_access_method to default;
SET enable_partitionwise_join to default;
DROP TABLE prt1;
DROP TABLE prt2;

View File

@ -1,103 +0,0 @@
--
-- failure_on_create_subscription
--
-- Since the result of these tests depends on the success of background
-- process that creating the replication slot on the publisher. These
-- tests are separated.
CREATE SCHEMA IF NOT EXISTS move_shard;
SET SEARCH_PATH = move_shard;
SET citus.shard_count TO 4;
SET citus.next_shard_id TO 100;
SET citus.shard_replication_factor TO 1;
SELECT pg_backend_pid() as pid \gset
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------
(1 row)
CREATE TABLE t(id int PRIMARY KEY, int_data int, data text);
SELECT create_distributed_table('t', 'id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE VIEW shards_in_workers AS
SELECT shardid,
(CASE WHEN nodeport = :worker_1_port THEN 'worker1' ELSE 'worker2' END) AS worker
FROM pg_dist_placement NATURAL JOIN pg_dist_node
ORDER BY 1,2 ASC;
-- Insert some data
INSERT INTO t SELECT x, x+1, MD5(random()::text) FROM generate_series(1,100000) AS f(x);
-- Initial shard placements
SELECT * FROM shards_in_workers;
shardid | worker
---------------------------------------------------------------------
100 | worker2
101 | worker1
102 | worker2
103 | worker1
(4 rows)
-- failure on creating the subscription
SELECT citus.mitmproxy('conn.onQuery(query="CREATE SUBSCRIPTION").kill()');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port);
WARNING: could not drop the replication slot "citus_shard_move_subscription" on publisher
DETAIL: The error was: ERROR: replication slot "citus_shard_move_subscription" does not exist
CONTEXT: while executing command on localhost:xxxxx
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
-- Verify that the shard is not moved and the number of rows are still 100k
SELECT * FROM shards_in_workers;
shardid | worker
---------------------------------------------------------------------
100 | worker2
101 | worker1
102 | worker2
103 | worker1
(4 rows)
SELECT count(*) FROM t;
count
---------------------------------------------------------------------
100000
(1 row)
-- Verify that shard can be moved after a temporary failure
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port);
master_move_shard_placement
---------------------------------------------------------------------
(1 row)
SELECT * FROM shards_in_workers;
shardid | worker
---------------------------------------------------------------------
100 | worker2
101 | worker2
102 | worker2
103 | worker1
(4 rows)
SELECT count(*) FROM t;
count
---------------------------------------------------------------------
100000
(1 row)
DROP SCHEMA move_shard CASCADE ;
NOTICE: drop cascades to 2 other objects
DETAIL: drop cascades to table t
drop cascades to view shards_in_workers

View File

@ -1,8 +1,6 @@
-- We have two different output files for this failure test because the --
-- failure behaviour of SAVEPOINT and RELEASE commands are different if -- FAILURE_SAVEPOINTS
-- we use the executor. If we use it, these commands error out if any of --
-- the placement commands fail. Otherwise, we might mark the placement
-- as invalid and continue with a WARNING.
SELECT citus.mitmproxy('conn.allow()'); SELECT citus.mitmproxy('conn.allow()');
mitmproxy mitmproxy
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -1,126 +0,0 @@
-- We have different output files for the executor. This is because
-- we don't mark transactions with ANALYZE as critical anymore, and
-- get WARNINGs instead of ERRORs.
SET citus.next_shard_id TO 12000000;
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------
(1 row)
SET citus.shard_count = 1;
SET citus.shard_replication_factor = 2; -- one shard per worker
SET citus.multi_shard_commit_protocol TO '1pc';
CREATE TABLE vacuum_test (key int, value int);
SELECT create_distributed_table('vacuum_test', 'key');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT citus.clear_network_traffic();
clear_network_traffic
---------------------------------------------------------------------
(1 row)
SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM").kill()');
mitmproxy
---------------------------------------------------------------------
(1 row)
VACUUM vacuum_test;
ERROR: connection error: localhost:xxxxx
DETAIL: server closed the connection unexpectedly
SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").kill()');
mitmproxy
---------------------------------------------------------------------
(1 row)
ANALYZE vacuum_test;
WARNING: connection error: localhost:xxxxx
DETAIL: server closed the connection unexpectedly
SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()');
mitmproxy
---------------------------------------------------------------------
(1 row)
ANALYZE vacuum_test;
-- ANALYZE transactions being critical is an open question, see #2430
-- show that we marked as INVALID on COMMIT FAILURE
SELECT shardid, shardstate FROM pg_dist_shard_placement where shardstate != 1 AND
shardid in ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'vacuum_test'::regclass);
shardid | shardstate
---------------------------------------------------------------------
12000000 | 3
(1 row)
UPDATE pg_dist_shard_placement SET shardstate = 1
WHERE shardid IN (
SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'vacuum_test'::regclass
);
-- the same tests with cancel
SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM").cancel(' || pg_backend_pid() || ')');
mitmproxy
---------------------------------------------------------------------
(1 row)
VACUUM vacuum_test;
ERROR: canceling statement due to user request
SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").cancel(' || pg_backend_pid() || ')');
mitmproxy
---------------------------------------------------------------------
(1 row)
ANALYZE vacuum_test;
ERROR: canceling statement due to user request
-- cancel during COMMIT should be ignored
SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").cancel(' || pg_backend_pid() || ')');
mitmproxy
---------------------------------------------------------------------
(1 row)
ANALYZE vacuum_test;
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------
(1 row)
CREATE TABLE other_vacuum_test (key int, value int);
SELECT create_distributed_table('other_vacuum_test', 'key');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM.*other").kill()');
mitmproxy
---------------------------------------------------------------------
(1 row)
VACUUM vacuum_test, other_vacuum_test;
ERROR: syntax error at or near ","
SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM.*other").cancel(' || pg_backend_pid() || ')');
mitmproxy
---------------------------------------------------------------------
(1 row)
VACUUM vacuum_test, other_vacuum_test;
ERROR: syntax error at or near ","
-- ==== Clean up, we're done here ====
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------
(1 row)
DROP TABLE vacuum_test, other_vacuum_test;

View File

@ -1,223 +0,0 @@
Parsed test spec with 3 sessions
starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement s2-insert s3-release-advisory-lock s1-end
step s3-acquire-advisory-lock:
SELECT pg_advisory_lock(44000, 55152);
pg_advisory_lock
step s1-begin:
BEGIN;
step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
master_move_shard_placement
step s2-insert:
INSERT INTO logical_replicate_placement VALUES (15, 15);
<waiting ...>
step s3-release-advisory-lock:
SELECT pg_advisory_unlock(44000, 55152);
pg_advisory_unlock
t
step s1-end:
COMMIT;
step s2-insert: <... completed>
starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement s2-upsert s3-release-advisory-lock s1-end
step s3-acquire-advisory-lock:
SELECT pg_advisory_lock(44000, 55152);
pg_advisory_lock
step s1-begin:
BEGIN;
step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
master_move_shard_placement
step s2-upsert:
INSERT INTO logical_replicate_placement VALUES (15, 15);
INSERT INTO logical_replicate_placement VALUES (15, 15) ON CONFLICT (x) DO UPDATE SET y = logical_replicate_placement.y + 1;
<waiting ...>
step s3-release-advisory-lock:
SELECT pg_advisory_unlock(44000, 55152);
pg_advisory_unlock
t
step s1-end:
COMMIT;
step s2-upsert: <... completed>
starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement s2-update s3-release-advisory-lock s1-end
step s3-acquire-advisory-lock:
SELECT pg_advisory_lock(44000, 55152);
pg_advisory_lock
step s1-begin:
BEGIN;
step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
master_move_shard_placement
step s2-update:
UPDATE logical_replicate_placement SET y = y + 1 WHERE x = 15;
<waiting ...>
step s3-release-advisory-lock:
SELECT pg_advisory_unlock(44000, 55152);
pg_advisory_unlock
t
step s1-end:
COMMIT;
step s2-update: <... completed>
starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement s2-delete s3-release-advisory-lock s1-end
step s3-acquire-advisory-lock:
SELECT pg_advisory_lock(44000, 55152);
pg_advisory_lock
step s1-begin:
BEGIN;
step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
master_move_shard_placement
step s2-delete:
DELETE FROM logical_replicate_placement WHERE x = 15;
<waiting ...>
step s3-release-advisory-lock:
SELECT pg_advisory_unlock(44000, 55152);
pg_advisory_unlock
t
step s1-end:
COMMIT;
step s2-delete: <... completed>
starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement s2-copy s3-release-advisory-lock s1-end
step s3-acquire-advisory-lock:
SELECT pg_advisory_lock(44000, 55152);
pg_advisory_lock
step s1-begin:
BEGIN;
step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
master_move_shard_placement
step s2-copy:
COPY logical_replicate_placement FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV;
<waiting ...>
step s3-release-advisory-lock:
SELECT pg_advisory_unlock(44000, 55152);
pg_advisory_unlock
t
step s1-end:
COMMIT;
step s2-copy: <... completed>
starting permutation: s1-begin s1-move-placement s2-truncate s1-end
step s1-begin:
BEGIN;
step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
master_move_shard_placement
step s2-truncate:
TRUNCATE logical_replicate_placement;
<waiting ...>
step s1-end:
COMMIT;
step s2-truncate: <... completed>
starting permutation: s1-begin s1-move-placement s2-alter-table s1-end
step s1-begin:
BEGIN;
step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
master_move_shard_placement
step s2-alter-table:
ALTER TABLE logical_replicate_placement ADD COLUMN z INT;
<waiting ...>
step s1-end:
COMMIT;
step s2-alter-table: <... completed>
starting permutation: s1-begin s2-truncate s1-move-placement s1-end
step s1-begin:
BEGIN;
step s2-truncate:
TRUNCATE logical_replicate_placement;
step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
master_move_shard_placement
step s1-end:
COMMIT;
starting permutation: s1-begin s2-alter-table s1-move-placement s1-end
step s1-begin:
BEGIN;
step s2-alter-table:
ALTER TABLE logical_replicate_placement ADD COLUMN z INT;
step s1-move-placement:
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
master_move_shard_placement
step s1-end:
COMMIT;

View File

@ -1,4 +0,0 @@
Parsed test spec with 3 sessions
starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement-partitioned s2-insert-partitioned s3-release-advisory-lock s1-end
setup failed: ERROR: primary key constraints are not supported on partitioned tables

View File

@ -1,4 +0,0 @@
Parsed test spec with 3 sessions
starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement-partitioned s2-insert-partitioned s3-release-advisory-lock s1-end
setup failed: ERROR: syntax error at or near "PARTITION"

View File

@ -1,8 +1,6 @@
-- We have two different output files for this failure test because the --
-- failure behaviour of SAVEPOINT and RELEASE commands are different if -- FAILURE_SAVEPOINTS
-- we use the executor. If we use it, these commands error out if any of --
-- the placement commands fail. Otherwise, we might mark the placement
-- as invalid and continue with a WARNING.
SELECT citus.mitmproxy('conn.allow()'); SELECT citus.mitmproxy('conn.allow()');