mirror of https://github.com/citusdata/citus.git
Add failure test for parallel reference table join
parent
be77d3304f
commit
8be1b0112d
|
@ -0,0 +1,75 @@
|
||||||
|
--
|
||||||
|
-- failure_parallel_connection.sql tests some behaviour of connection management
|
||||||
|
-- where Citus is expected to use multiple connections.
|
||||||
|
--
|
||||||
|
-- In other words, we're not testing any failures in this test. We're trying to make
|
||||||
|
-- sure that Citus uses 1-connection per placement of distributed table even after
|
||||||
|
-- a join with distributed table
|
||||||
|
--
|
||||||
|
SELECT citus.mitmproxy('conn.allow()');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
CREATE SCHEMA fail_parallel_connection;
|
||||||
|
SET search_path TO 'fail_parallel_connection';
|
||||||
|
SET citus.shard_count TO 4;
|
||||||
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1880000;
|
||||||
|
ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 1880000;
|
||||||
|
CREATE TABLE distributed_table (
|
||||||
|
key int,
|
||||||
|
value int
|
||||||
|
);
|
||||||
|
SELECT create_distributed_table('distributed_table', 'key');
|
||||||
|
create_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
CREATE TABLE reference_table (
|
||||||
|
key int,
|
||||||
|
value int
|
||||||
|
);
|
||||||
|
SELECT create_reference_table('reference_table');
|
||||||
|
create_reference_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- make sure that access to the placements of the distributed
|
||||||
|
-- tables use 1 connection
|
||||||
|
SET citus.force_max_query_parallelization TO ON;
|
||||||
|
BEGIN;
|
||||||
|
SELECT count(*) FROM distributed_table JOIN reference_table USING (key);
|
||||||
|
count
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
0
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(1).kill()');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- this query should not fail because each placement should be acceessed
|
||||||
|
-- over a seperate connection
|
||||||
|
SELECT count(*) FROM distributed_table JOIN reference_table USING (key);
|
||||||
|
count
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
0
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
COMMIT;
|
||||||
|
SELECT citus.mitmproxy('conn.allow()');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
DROP SCHEMA fail_parallel_connection CASCADE;
|
||||||
|
NOTICE: drop cascades to 2 other objects
|
||||||
|
DETAIL: drop cascades to table distributed_table
|
||||||
|
drop cascades to table reference_table
|
||||||
|
SET search_path TO default;
|
|
@ -4,6 +4,7 @@ test: failure_test_helpers
|
||||||
# this should only be run by pg_regress_multi, you don't need it
|
# this should only be run by pg_regress_multi, you don't need it
|
||||||
test: failure_setup
|
test: failure_setup
|
||||||
test: multi_test_helpers
|
test: multi_test_helpers
|
||||||
|
test: failure_parallel_connection
|
||||||
test: failure_replicated_partitions
|
test: failure_replicated_partitions
|
||||||
test: multi_test_catalog_views
|
test: multi_test_catalog_views
|
||||||
test: failure_insert_select_repartition
|
test: failure_insert_select_repartition
|
||||||
|
|
|
@ -0,0 +1,48 @@
|
||||||
|
--
|
||||||
|
-- failure_parallel_connection.sql tests some behaviour of connection management
|
||||||
|
-- where Citus is expected to use multiple connections.
|
||||||
|
--
|
||||||
|
-- In other words, we're not testing any failures in this test. We're trying to make
|
||||||
|
-- sure that Citus uses 1-connection per placement of distributed table even after
|
||||||
|
-- a join with distributed table
|
||||||
|
--
|
||||||
|
|
||||||
|
SELECT citus.mitmproxy('conn.allow()');
|
||||||
|
|
||||||
|
CREATE SCHEMA fail_parallel_connection;
|
||||||
|
SET search_path TO 'fail_parallel_connection';
|
||||||
|
|
||||||
|
SET citus.shard_count TO 4;
|
||||||
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1880000;
|
||||||
|
ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 1880000;
|
||||||
|
|
||||||
|
CREATE TABLE distributed_table (
|
||||||
|
key int,
|
||||||
|
value int
|
||||||
|
);
|
||||||
|
SELECT create_distributed_table('distributed_table', 'key');
|
||||||
|
|
||||||
|
CREATE TABLE reference_table (
|
||||||
|
key int,
|
||||||
|
value int
|
||||||
|
);
|
||||||
|
SELECT create_reference_table('reference_table');
|
||||||
|
|
||||||
|
-- make sure that access to the placements of the distributed
|
||||||
|
-- tables use 1 connection
|
||||||
|
SET citus.force_max_query_parallelization TO ON;
|
||||||
|
|
||||||
|
BEGIN;
|
||||||
|
SELECT count(*) FROM distributed_table JOIN reference_table USING (key);
|
||||||
|
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(1).kill()');
|
||||||
|
|
||||||
|
-- this query should not fail because each placement should be acceessed
|
||||||
|
-- over a seperate connection
|
||||||
|
SELECT count(*) FROM distributed_table JOIN reference_table USING (key);
|
||||||
|
COMMIT;
|
||||||
|
|
||||||
|
|
||||||
|
SELECT citus.mitmproxy('conn.allow()');
|
||||||
|
DROP SCHEMA fail_parallel_connection CASCADE;
|
||||||
|
SET search_path TO default;
|
Loading…
Reference in New Issue