Disable Postgres parallelism by default in tests

pull/3579/head
Marco Slot 2020-03-05 16:29:44 +01:00
parent 50e59f1a61
commit d0fead6691
2 changed files with 18 additions and 16 deletions

View File

@ -412,7 +412,7 @@ SELECT * FROM partitioning_test WHERE id = 9 OR id = 10 ORDER BY 1;
-- create default partition -- create default partition
CREATE TABLE partitioning_test_default PARTITION OF partitioning_test DEFAULT; CREATE TABLE partitioning_test_default PARTITION OF partitioning_test DEFAULT;
\d+ partitioning_test \d+ partitioning_test
Table "public.partitioning_test" Table "public.partitioning_test"
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
--------------------------------------------------------------------- ---------------------------------------------------------------------
id | integer | | | | plain | | id | integer | | | | plain | |
@ -1584,27 +1584,25 @@ SELECT success FROM run_command_on_workers('select pg_reload_conf()');
EXPLAIN (COSTS OFF) EXPLAIN (COSTS OFF)
SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id, subid); SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id, subid);
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
Custom Scan (Citus Adaptive) Custom Scan (Citus Adaptive)
Task Count: 4 Task Count: 4
Tasks Shown: One of 4 Tasks Shown: One of 4
-> Task -> Task
Node: host=localhost port=xxxxx dbname=regression Node: host=localhost port=xxxxx dbname=regression
-> Gather -> Hash Join
Workers Planned: 2 Hash Cond: ((partitioning_hash_join_test.id = partitioning_hash_test.id) AND (partitioning_hash_join_test.subid = partitioning_hash_test.subid))
-> Parallel Hash Join -> Append
Hash Cond: ((partitioning_hash_join_test.id = partitioning_hash_test_1.id) AND (partitioning_hash_join_test.subid = partitioning_hash_test_1.subid)) -> Seq Scan on partitioning_hash_join_test_0_1660133 partitioning_hash_join_test
-> Parallel Append -> Seq Scan on partitioning_hash_join_test_1_1660137 partitioning_hash_join_test_1
-> Parallel Seq Scan on partitioning_hash_join_test_0_1660133 partitioning_hash_join_test -> Seq Scan on partitioning_hash_join_test_2_1660141 partitioning_hash_join_test_2
-> Parallel Seq Scan on partitioning_hash_join_test_1_1660137 partitioning_hash_join_test_1 -> Hash
-> Parallel Seq Scan on partitioning_hash_join_test_2_1660141 partitioning_hash_join_test_2 -> Append
-> Parallel Hash -> Seq Scan on partitioning_hash_test_0_1660016 partitioning_hash_test
-> Parallel Append -> Seq Scan on partitioning_hash_test_1_1660020 partitioning_hash_test_1
-> Parallel Seq Scan on partitioning_hash_test_1_1660020 partitioning_hash_test_1 -> Seq Scan on partitioning_hash_test_2_1660032 partitioning_hash_test_2
-> Parallel Seq Scan on partitioning_hash_test_0_1660016 partitioning_hash_test (16 rows)
-> Parallel Seq Scan on partitioning_hash_test_2_1660032 partitioning_hash_test_2
(18 rows)
-- set partition-wise join on and parallel to off -- set partition-wise join on and parallel to off
SELECT success FROM run_command_on_workers('alter system set max_parallel_workers_per_gather = 0'); SELECT success FROM run_command_on_workers('alter system set max_parallel_workers_per_gather = 0');

View File

@ -320,6 +320,10 @@ if (-e $hll_control)
} }
push(@pgOptions, '-c', "shared_preload_libraries=${sharedPreloadLibraries}"); push(@pgOptions, '-c', "shared_preload_libraries=${sharedPreloadLibraries}");
# Avoid parallelism to stabilize explain plans
push(@pgOptions, '-c', "max_parallel_workers_per_gather=0");
# Allow CREATE SUBSCRIPTION to work
push(@pgOptions, '-c', "wal_level=logical"); push(@pgOptions, '-c', "wal_level=logical");
# Citus options set for the tests # Citus options set for the tests