From 7aa2af8975a7281e59feae511f31e6361cd0fafd Mon Sep 17 00:00:00 2001 From: Onder Kalaci Date: Mon, 29 Oct 2018 11:35:56 +0300 Subject: [PATCH] Add failure and cancellation tests for multi row inserts --- .../expected/failure_multi_row_insert.out | 158 ++++++++++++++++++ src/test/regress/failure_schedule | 1 + .../regress/sql/failure_multi_row_insert.sql | 79 +++++++++ 3 files changed, 238 insertions(+) create mode 100644 src/test/regress/expected/failure_multi_row_insert.out create mode 100644 src/test/regress/sql/failure_multi_row_insert.sql diff --git a/src/test/regress/expected/failure_multi_row_insert.out b/src/test/regress/expected/failure_multi_row_insert.out new file mode 100644 index 000000000..a4b4878e7 --- /dev/null +++ b/src/test/regress/expected/failure_multi_row_insert.out @@ -0,0 +1,158 @@ +-- +-- failure_multi_row_insert +-- +CREATE SCHEMA IF NOT EXISTS failure_multi_row_insert; +SET SEARCH_PATH TO failure_multi_row_insert; +-- this test is dependent on the shard count, so do not change +-- whitout changing the test +SET citus.shard_count TO 4; +SET citus.next_shard_id TO 301000; +SET citus.shard_replication_factor TO 1; +SELECT pg_backend_pid() as pid \gset +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +----------- + +(1 row) + +CREATE TABLE distributed_table(key int, value int); +CREATE TABLE reference_table(value int); +SELECT create_distributed_table('distributed_table', 'key'); + create_distributed_table +-------------------------- + +(1 row) + +SELECT create_reference_table('reference_table'); + create_reference_table +------------------------ + +(1 row) + +-- we'll test failure cases of the following cases: +-- (a) multi-row INSERT that hits the same shard with the same value +-- (b) multi-row INSERT that hits the same shard with different values +-- (c) multi-row INSERT that hits multiple shards in a single worker +-- (d) multi-row INSERT that hits multiple shards in multiple workers +-- (e) multi-row INSERT to a reference table +-- Failure and cancellation on multi-row INSERT that hits the same shard with the same value +SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); + mitmproxy +----------- + +(1 row) + +INSERT INTO distributed_table VALUES (1,1), (1,2), (1,3); +ERROR: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +CONTEXT: while executing command on localhost:9060 +-- this test is broken, see https://github.com/citusdata/citus/issues/2460 +-- SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')'); +-- INSERT INTO distributed_table VALUES (1,4), (1,5), (1,6); +-- Failure and cancellation on multi-row INSERT that hits the same shard with different values +SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); + mitmproxy +----------- + +(1 row) + +INSERT INTO distributed_table VALUES (1,7), (5,8); +ERROR: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +CONTEXT: while executing command on localhost:9060 +-- this test is broken, see https://github.com/citusdata/citus/issues/2460 +-- SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')'); +-- INSERT INTO distributed_table VALUES (1,9), (5,10); +-- Failure and cancellation multi-row INSERT that hits multiple shards in a single worker +SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); + mitmproxy +----------- + +(1 row) + +INSERT INTO distributed_table VALUES (1,11), (6,12); +ERROR: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +CONTEXT: while executing command on localhost:9060 +SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')'); + mitmproxy +----------- + +(1 row) + +INSERT INTO distributed_table VALUES (1,13), (6,14); +ERROR: canceling statement due to user request +-- Failure and cancellation multi-row INSERT that hits multiple shards in a single worker, happening on the second query +SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").after(1).kill()'); + mitmproxy +----------- + +(1 row) + +INSERT INTO distributed_table VALUES (1,15), (6,16); +ERROR: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +CONTEXT: while executing command on localhost:9060 +SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").after(1).cancel(' || :pid || ')'); + mitmproxy +----------- + +(1 row) + +INSERT INTO distributed_table VALUES (1,17), (6,18); +ERROR: canceling statement due to user request +-- Failure and cancellation multi-row INSERT that hits multiple shards in multiple workers +SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); + mitmproxy +----------- + +(1 row) + +INSERT INTO distributed_table VALUES (2,19),(1,20); +ERROR: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +CONTEXT: while executing command on localhost:9060 +SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')'); + mitmproxy +----------- + +(1 row) + +INSERT INTO distributed_table VALUES (2,21), (1,22); +ERROR: canceling statement due to user request +-- one test for the reference tables for completeness +SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')'); + mitmproxy +----------- + +(1 row) + +INSERT INTO reference_table VALUES (1), (2), (3), (4); +ERROR: canceling statement due to user request +-- we've either failed or cancelled all queries, so should be empty +SELECT * FROM distributed_table; + key | value +-----+------- +(0 rows) + +SELECT * FROM reference_table; + value +------- +(0 rows) + +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +----------- + +(1 row) + +RESET SEARCH_PATH; +DROP SCHEMA failure_multi_row_insert CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to table failure_multi_row_insert.distributed_table +drop cascades to table failure_multi_row_insert.reference_table diff --git a/src/test/regress/failure_schedule b/src/test/regress/failure_schedule index ca5b491f2..dd7c007a6 100644 --- a/src/test/regress/failure_schedule +++ b/src/test/regress/failure_schedule @@ -28,3 +28,4 @@ test: failure_real_time_select test: failure_insert_select_pushdown test: failure_single_mod test: failure_savepoints +test: failure_multi_row_insert diff --git a/src/test/regress/sql/failure_multi_row_insert.sql b/src/test/regress/sql/failure_multi_row_insert.sql new file mode 100644 index 000000000..26dc7b770 --- /dev/null +++ b/src/test/regress/sql/failure_multi_row_insert.sql @@ -0,0 +1,79 @@ +-- +-- failure_multi_row_insert +-- + +CREATE SCHEMA IF NOT EXISTS failure_multi_row_insert; +SET SEARCH_PATH TO failure_multi_row_insert; + +-- this test is dependent on the shard count, so do not change +-- whitout changing the test +SET citus.shard_count TO 4; +SET citus.next_shard_id TO 301000; +SET citus.shard_replication_factor TO 1; +SELECT pg_backend_pid() as pid \gset + +SELECT citus.mitmproxy('conn.allow()'); + + +CREATE TABLE distributed_table(key int, value int); +CREATE TABLE reference_table(value int); + +SELECT create_distributed_table('distributed_table', 'key'); +SELECT create_reference_table('reference_table'); + +-- we'll test failure cases of the following cases: +-- (a) multi-row INSERT that hits the same shard with the same value +-- (b) multi-row INSERT that hits the same shard with different values +-- (c) multi-row INSERT that hits multiple shards in a single worker +-- (d) multi-row INSERT that hits multiple shards in multiple workers +-- (e) multi-row INSERT to a reference table + + +-- Failure and cancellation on multi-row INSERT that hits the same shard with the same value +SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); +INSERT INTO distributed_table VALUES (1,1), (1,2), (1,3); + +-- this test is broken, see https://github.com/citusdata/citus/issues/2460 +-- SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')'); +-- INSERT INTO distributed_table VALUES (1,4), (1,5), (1,6); + +-- Failure and cancellation on multi-row INSERT that hits the same shard with different values +SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); +INSERT INTO distributed_table VALUES (1,7), (5,8); + +-- this test is broken, see https://github.com/citusdata/citus/issues/2460 +-- SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')'); +-- INSERT INTO distributed_table VALUES (1,9), (5,10); + +-- Failure and cancellation multi-row INSERT that hits multiple shards in a single worker +SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); +INSERT INTO distributed_table VALUES (1,11), (6,12); + +SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')'); +INSERT INTO distributed_table VALUES (1,13), (6,14); + +-- Failure and cancellation multi-row INSERT that hits multiple shards in a single worker, happening on the second query +SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").after(1).kill()'); +INSERT INTO distributed_table VALUES (1,15), (6,16); + +SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").after(1).cancel(' || :pid || ')'); +INSERT INTO distributed_table VALUES (1,17), (6,18); + +-- Failure and cancellation multi-row INSERT that hits multiple shards in multiple workers +SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); +INSERT INTO distributed_table VALUES (2,19),(1,20); + +SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')'); +INSERT INTO distributed_table VALUES (2,21), (1,22); + +-- one test for the reference tables for completeness +SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')'); +INSERT INTO reference_table VALUES (1), (2), (3), (4); + +-- we've either failed or cancelled all queries, so should be empty +SELECT * FROM distributed_table; +SELECT * FROM reference_table; + +SELECT citus.mitmproxy('conn.allow()'); +RESET SEARCH_PATH; +DROP SCHEMA failure_multi_row_insert CASCADE;