From 8be1b0112d3b0477cb759bcc474ee284108c79c7 Mon Sep 17 00:00:00 2001 From: Onder Kalaci Date: Mon, 3 Feb 2020 10:34:58 +0100 Subject: [PATCH] Add failure test for parallel reference table join --- .../expected/failure_parallel_connection.out | 75 +++++++++++++++++++ src/test/regress/failure_schedule | 1 + .../sql/failure_parallel_connection.sql | 48 ++++++++++++ 3 files changed, 124 insertions(+) create mode 100644 src/test/regress/expected/failure_parallel_connection.out create mode 100644 src/test/regress/sql/failure_parallel_connection.sql diff --git a/src/test/regress/expected/failure_parallel_connection.out b/src/test/regress/expected/failure_parallel_connection.out new file mode 100644 index 000000000..37321b1b3 --- /dev/null +++ b/src/test/regress/expected/failure_parallel_connection.out @@ -0,0 +1,75 @@ +-- +-- failure_parallel_connection.sql tests some behaviour of connection management +-- where Citus is expected to use multiple connections. +-- +-- In other words, we're not testing any failures in this test. We're trying to make +-- sure that Citus uses 1-connection per placement of distributed table even after +-- a join with distributed table +-- +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +CREATE SCHEMA fail_parallel_connection; +SET search_path TO 'fail_parallel_connection'; +SET citus.shard_count TO 4; +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1880000; +ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 1880000; +CREATE TABLE distributed_table ( + key int, + value int +); +SELECT create_distributed_table('distributed_table', 'key'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE reference_table ( + key int, + value int +); +SELECT create_reference_table('reference_table'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +-- make sure that access to the placements of the distributed +-- tables use 1 connection +SET citus.force_max_query_parallelization TO ON; +BEGIN; + SELECT count(*) FROM distributed_table JOIN reference_table USING (key); + count +--------------------------------------------------------------------- + 0 +(1 row) + + SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(1).kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + + -- this query should not fail because each placement should be acceessed + -- over a seperate connection + SELECT count(*) FROM distributed_table JOIN reference_table USING (key); + count +--------------------------------------------------------------------- + 0 +(1 row) + +COMMIT; +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +DROP SCHEMA fail_parallel_connection CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to table distributed_table +drop cascades to table reference_table +SET search_path TO default; diff --git a/src/test/regress/failure_schedule b/src/test/regress/failure_schedule index 0e2143370..fb0ecd0bf 100644 --- a/src/test/regress/failure_schedule +++ b/src/test/regress/failure_schedule @@ -4,6 +4,7 @@ test: failure_test_helpers # this should only be run by pg_regress_multi, you don't need it test: failure_setup test: multi_test_helpers +test: failure_parallel_connection test: failure_replicated_partitions test: multi_test_catalog_views test: failure_insert_select_repartition diff --git a/src/test/regress/sql/failure_parallel_connection.sql b/src/test/regress/sql/failure_parallel_connection.sql new file mode 100644 index 000000000..595b10ced --- /dev/null +++ b/src/test/regress/sql/failure_parallel_connection.sql @@ -0,0 +1,48 @@ +-- +-- failure_parallel_connection.sql tests some behaviour of connection management +-- where Citus is expected to use multiple connections. +-- +-- In other words, we're not testing any failures in this test. We're trying to make +-- sure that Citus uses 1-connection per placement of distributed table even after +-- a join with distributed table +-- + +SELECT citus.mitmproxy('conn.allow()'); + +CREATE SCHEMA fail_parallel_connection; +SET search_path TO 'fail_parallel_connection'; + +SET citus.shard_count TO 4; +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1880000; +ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 1880000; + +CREATE TABLE distributed_table ( + key int, + value int +); +SELECT create_distributed_table('distributed_table', 'key'); + +CREATE TABLE reference_table ( + key int, + value int +); +SELECT create_reference_table('reference_table'); + +-- make sure that access to the placements of the distributed +-- tables use 1 connection +SET citus.force_max_query_parallelization TO ON; + +BEGIN; + SELECT count(*) FROM distributed_table JOIN reference_table USING (key); + + SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(1).kill()'); + + -- this query should not fail because each placement should be acceessed + -- over a seperate connection + SELECT count(*) FROM distributed_table JOIN reference_table USING (key); +COMMIT; + + +SELECT citus.mitmproxy('conn.allow()'); +DROP SCHEMA fail_parallel_connection CASCADE; +SET search_path TO default;