Update version-specific tests for PostgreSQL 10

Nitty-gritty, but it's all here.
pull/1439/head
Jason Petersen 2017-04-21 15:58:09 -06:00
parent 67eca1d3f6
commit d7fe6b06f9
No known key found for this signature in database
GPG Key ID: 9F1D3510D110ABA9
16 changed files with 179 additions and 162 deletions

View File

@ -6,7 +6,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 570000;
SELECT substring(version(), '\d+(?:\.\d+)?') AS major_version; SELECT substring(version(), '\d+(?:\.\d+)?') AS major_version;
major_version major_version
--------------- ---------------
9.6 10
(1 row) (1 row)
\a\t \a\t
@ -903,6 +903,8 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
SET parallel_setup_cost=0; SET parallel_setup_cost=0;
SET parallel_tuple_cost=0; SET parallel_tuple_cost=0;
SET min_parallel_relation_size=0; SET min_parallel_relation_size=0;
ERROR: unrecognized configuration parameter "min_parallel_relation_size"
SET min_parallel_table_scan_size=0;
SET max_parallel_workers_per_gather=4; SET max_parallel_workers_per_gather=4;
-- ensure local plans display correctly -- ensure local plans display correctly
CREATE TABLE lineitem_clone (LIKE lineitem); CREATE TABLE lineitem_clone (LIKE lineitem);

View File

@ -903,6 +903,8 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
SET parallel_setup_cost=0; SET parallel_setup_cost=0;
SET parallel_tuple_cost=0; SET parallel_tuple_cost=0;
SET min_parallel_relation_size=0; SET min_parallel_relation_size=0;
SET min_parallel_table_scan_size=0;
ERROR: unrecognized configuration parameter "min_parallel_table_scan_size"
SET max_parallel_workers_per_gather=4; SET max_parallel_workers_per_gather=4;
-- ensure local plans display correctly -- ensure local plans display correctly
CREATE TABLE lineitem_clone (LIKE lineitem); CREATE TABLE lineitem_clone (LIKE lineitem);

View File

@ -860,6 +860,8 @@ SET parallel_tuple_cost=0;
ERROR: unrecognized configuration parameter "parallel_tuple_cost" ERROR: unrecognized configuration parameter "parallel_tuple_cost"
SET min_parallel_relation_size=0; SET min_parallel_relation_size=0;
ERROR: unrecognized configuration parameter "min_parallel_relation_size" ERROR: unrecognized configuration parameter "min_parallel_relation_size"
SET min_parallel_table_scan_size=0;
ERROR: unrecognized configuration parameter "min_parallel_table_scan_size"
SET max_parallel_workers_per_gather=4; SET max_parallel_workers_per_gather=4;
ERROR: unrecognized configuration parameter "max_parallel_workers_per_gather" ERROR: unrecognized configuration parameter "max_parallel_workers_per_gather"
-- ensure local plans display correctly -- ensure local plans display correctly

View File

@ -7,17 +7,18 @@
-- executor here, as we cannot run repartition jobs with real time executor. -- executor here, as we cannot run repartition jobs with real time executor.
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 690000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 690000;
SET citus.enable_unique_job_ids TO off; SET citus.enable_unique_job_ids TO off;
-- print major version to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+') AS major_version;
major_version
---------------
10
(1 row)
BEGIN; BEGIN;
SET client_min_messages TO DEBUG4; SET client_min_messages TO DEBUG4;
DEBUG: CommitTransactionCommand
SET citus.large_table_shard_count TO 2; SET citus.large_table_shard_count TO 2;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: CommitTransactionCommand
SET citus.task_executor_type TO 'task-tracker'; SET citus.task_executor_type TO 'task-tracker';
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: CommitTransactionCommand
-- Debug4 log messages display jobIds within them. We explicitly set the jobId -- Debug4 log messages display jobIds within them. We explicitly set the jobId
-- sequence here so that the regression output becomes independent of the number -- sequence here so that the regression output becomes independent of the number
-- of jobs executed prior to running this test. -- of jobs executed prior to running this test.
@ -40,7 +41,6 @@ GROUP BY
l_partkey, o_orderkey l_partkey, o_orderkey
ORDER BY ORDER BY
l_partkey, o_orderkey; l_partkey, o_orderkey;
DEBUG: StartTransactionCommand
DEBUG: join prunable for intervals [1,1509] and [8997,14946] DEBUG: join prunable for intervals [1,1509] and [8997,14946]
DEBUG: join prunable for intervals [1509,4964] and [8997,14946] DEBUG: join prunable for intervals [1509,4964] and [8997,14946]
DEBUG: join prunable for intervals [2951,4455] and [8997,14946] DEBUG: join prunable for intervals [2951,4455] and [8997,14946]
@ -112,7 +112,6 @@ DEBUG: completed cleanup query for job 2
DEBUG: completed cleanup query for job 2 DEBUG: completed cleanup query for job 2
DEBUG: completed cleanup query for job 1 DEBUG: completed cleanup query for job 1
DEBUG: completed cleanup query for job 1 DEBUG: completed cleanup query for job 1
DEBUG: CommitTransactionCommand
l_partkey | o_orderkey | count l_partkey | o_orderkey | count
-----------+------------+------- -----------+------------+-------
18 | 12005 | 1 18 | 12005 | 1
@ -157,7 +156,6 @@ GROUP BY
l_partkey, o_orderkey l_partkey, o_orderkey
ORDER BY ORDER BY
l_partkey, o_orderkey; l_partkey, o_orderkey;
DEBUG: StartTransactionCommand
DEBUG: generated sql query for task 2 DEBUG: generated sql query for task 2
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290000 lineitem WHERE (l_quantity < 5.0)" DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290000 lineitem WHERE (l_quantity < 5.0)"
DEBUG: generated sql query for task 4 DEBUG: generated sql query for task 4
@ -234,13 +232,10 @@ DEBUG: completed cleanup query for job 4
DEBUG: completed cleanup query for job 4 DEBUG: completed cleanup query for job 4
DEBUG: completed cleanup query for job 5 DEBUG: completed cleanup query for job 5
DEBUG: completed cleanup query for job 5 DEBUG: completed cleanup query for job 5
DEBUG: CommitTransactionCommand
l_partkey | o_orderkey | count l_partkey | o_orderkey | count
-----------+------------+------- -----------+------------+-------
(0 rows) (0 rows)
-- Reset client logging level to its previous value -- Reset client logging level to its previous value
SET client_min_messages TO NOTICE; SET client_min_messages TO NOTICE;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
COMMIT; COMMIT;

View File

@ -7,6 +7,14 @@
-- executor here, as we cannot run repartition jobs with real time executor. -- executor here, as we cannot run repartition jobs with real time executor.
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 690000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 690000;
SET citus.enable_unique_job_ids TO off; SET citus.enable_unique_job_ids TO off;
-- print major version to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+') AS major_version;
major_version
---------------
9
(1 row)
BEGIN; BEGIN;
SET client_min_messages TO DEBUG4; SET client_min_messages TO DEBUG4;
DEBUG: CommitTransactionCommand DEBUG: CommitTransactionCommand

View File

@ -6,17 +6,18 @@
-- from a sql task to its depended tasks. Note that we set the executor type to task -- from a sql task to its depended tasks. Note that we set the executor type to task
-- tracker executor here, as we cannot run repartition jobs with real time executor. -- tracker executor here, as we cannot run repartition jobs with real time executor.
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 710000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 710000;
-- print major version to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+') AS major_version;
major_version
---------------
10
(1 row)
BEGIN; BEGIN;
SET client_min_messages TO DEBUG3; SET client_min_messages TO DEBUG3;
DEBUG: CommitTransactionCommand
SET citus.large_table_shard_count TO 2; SET citus.large_table_shard_count TO 2;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: CommitTransactionCommand
SET citus.task_executor_type TO 'task-tracker'; SET citus.task_executor_type TO 'task-tracker';
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: CommitTransactionCommand
-- Single range repartition join to test anchor-shard based task assignment and -- Single range repartition join to test anchor-shard based task assignment and
-- assignment propagation to merge and data-fetch tasks. -- assignment propagation to merge and data-fetch tasks.
SELECT SELECT
@ -25,7 +26,6 @@ FROM
orders, customer orders, customer
WHERE WHERE
o_custkey = c_custkey; o_custkey = c_custkey;
DEBUG: StartTransactionCommand
DEBUG: assigned task 4 to node localhost:57637 DEBUG: assigned task 4 to node localhost:57637
DEBUG: assigned task 2 to node localhost:57638 DEBUG: assigned task 2 to node localhost:57638
DEBUG: join prunable for intervals [1,1000] and [1001,2000] DEBUG: join prunable for intervals [1,1000] and [1001,2000]
@ -43,7 +43,6 @@ DETAIL: Creating dependency on merge taskId 11
DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 6 to node localhost:57637
DEBUG: assigned task 9 to node localhost:57638 DEBUG: assigned task 9 to node localhost:57638
DEBUG: assigned task 3 to node localhost:57637 DEBUG: assigned task 3 to node localhost:57637
DEBUG: CommitTransactionCommand
count count
------- -------
2984 2984
@ -54,9 +53,6 @@ DEBUG: CommitTransactionCommand
-- the same merge task, and tests our constraint group creation and assignment -- the same merge task, and tests our constraint group creation and assignment
-- propagation. Here 'orders' is considered the small table. -- propagation. Here 'orders' is considered the small table.
SET citus.large_table_shard_count TO 3; SET citus.large_table_shard_count TO 3;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: CommitTransactionCommand
SELECT SELECT
count(*) count(*)
FROM FROM
@ -64,7 +60,6 @@ FROM
WHERE WHERE
o_custkey = c_custkey AND o_custkey = c_custkey AND
o_orderkey = l_orderkey; o_orderkey = l_orderkey;
DEBUG: StartTransactionCommand
DEBUG: assigned task 9 to node localhost:57637 DEBUG: assigned task 9 to node localhost:57637
DEBUG: assigned task 15 to node localhost:57638 DEBUG: assigned task 15 to node localhost:57638
DEBUG: assigned task 12 to node localhost:57637 DEBUG: assigned task 12 to node localhost:57637
@ -175,16 +170,12 @@ DEBUG: propagating assignment from merge task 54 to constrained sql task 45
DEBUG: propagating assignment from merge task 61 to constrained sql task 51 DEBUG: propagating assignment from merge task 61 to constrained sql task 51
DEBUG: propagating assignment from merge task 61 to constrained sql task 54 DEBUG: propagating assignment from merge task 61 to constrained sql task 54
DEBUG: propagating assignment from merge task 68 to constrained sql task 60 DEBUG: propagating assignment from merge task 68 to constrained sql task 60
DEBUG: CommitTransactionCommand
count count
------- -------
11998 11998
(1 row) (1 row)
SET citus.large_table_shard_count TO 2; SET citus.large_table_shard_count TO 2;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: CommitTransactionCommand
-- Dual hash repartition join which tests the separate hash repartition join -- Dual hash repartition join which tests the separate hash repartition join
-- task assignment algorithm. -- task assignment algorithm.
SELECT SELECT
@ -193,7 +184,6 @@ FROM
lineitem, customer lineitem, customer
WHERE WHERE
l_partkey = c_nationkey; l_partkey = c_nationkey;
DEBUG: StartTransactionCommand
DEBUG: assigned task 4 to node localhost:57637 DEBUG: assigned task 4 to node localhost:57637
DEBUG: assigned task 2 to node localhost:57638 DEBUG: assigned task 2 to node localhost:57638
DEBUG: assigned task 8 to node localhost:57637 DEBUG: assigned task 8 to node localhost:57637
@ -237,7 +227,6 @@ DEBUG: assigned task 3 to node localhost:57638
DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 6 to node localhost:57637
DEBUG: assigned task 9 to node localhost:57638 DEBUG: assigned task 9 to node localhost:57638
DEBUG: assigned task 12 to node localhost:57637 DEBUG: assigned task 12 to node localhost:57637
DEBUG: CommitTransactionCommand
count count
------- -------
125 125
@ -245,6 +234,4 @@ DEBUG: CommitTransactionCommand
-- Reset client logging level to its previous value -- Reset client logging level to its previous value
SET client_min_messages TO NOTICE; SET client_min_messages TO NOTICE;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
COMMIT; COMMIT;

View File

@ -6,6 +6,14 @@
-- from a sql task to its depended tasks. Note that we set the executor type to task -- from a sql task to its depended tasks. Note that we set the executor type to task
-- tracker executor here, as we cannot run repartition jobs with real time executor. -- tracker executor here, as we cannot run repartition jobs with real time executor.
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 710000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 710000;
-- print major version to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+') AS major_version;
major_version
---------------
9
(1 row)
BEGIN; BEGIN;
SET client_min_messages TO DEBUG3; SET client_min_messages TO DEBUG3;
DEBUG: CommitTransactionCommand DEBUG: CommitTransactionCommand

View File

@ -4,6 +4,14 @@
-- This test checks that we can handle null min/max values in shard statistics -- This test checks that we can handle null min/max values in shard statistics
-- and that we don't partition or join prune shards that have null values. -- and that we don't partition or join prune shards that have null values.
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 760000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 760000;
-- print major version to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+') AS major_version;
major_version
---------------
10
(1 row)
SET client_min_messages TO DEBUG2; SET client_min_messages TO DEBUG2;
SET citus.explain_all_tasks TO on; SET citus.explain_all_tasks TO on;
-- to avoid differing explain output - executor doesn't matter, -- to avoid differing explain output - executor doesn't matter,
@ -73,9 +81,9 @@ DEBUG: join prunable for intervals [13473,14947] and [1,5986]
Node: host=localhost port=57637 dbname=regression Node: host=localhost port=57637 dbname=regression
-> Aggregate -> Aggregate
-> Merge Join -> Merge Join
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem -> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
-> Task -> Task
Node: host=localhost port=57638 dbname=regression Node: host=localhost port=57638 dbname=regression
-> Aggregate -> Aggregate
@ -87,44 +95,44 @@ DEBUG: join prunable for intervals [13473,14947] and [1,5986]
Node: host=localhost port=57637 dbname=regression Node: host=localhost port=57637 dbname=regression
-> Aggregate -> Aggregate
-> Merge Join -> Merge Join
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
-> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem -> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem
-> Task
Node: host=localhost port=57638 dbname=regression
-> Aggregate
-> Merge Join
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Index Only Scan using orders_pkey_290008 on orders_290008 orders
-> Task
Node: host=localhost port=57638 dbname=regression
-> Aggregate
-> Merge Join
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
-> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem -> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
-> Task -> Task
Node: host=localhost port=57637 dbname=regression Node: host=localhost port=57637 dbname=regression
-> Aggregate -> Aggregate
-> Merge Join -> Merge Join
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
-> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem -> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
-> Task -> Task
Node: host=localhost port=57638 dbname=regression Node: host=localhost port=57638 dbname=regression
-> Aggregate -> Aggregate
-> Merge Join -> Merge Join
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
-> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem -> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
-> Task -> Task
Node: host=localhost port=57637 dbname=regression Node: host=localhost port=57637 dbname=regression
-> Aggregate -> Aggregate
-> Merge Join -> Merge Join
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
-> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem -> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
-> Task -> Task
Node: host=localhost port=57638 dbname=regression Node: host=localhost port=57638 dbname=regression
-> Aggregate -> Aggregate
-> Merge Join -> Merge Join
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
-> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem -> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
(60 rows) (60 rows)
-- Now set the minimum value for a shard to null. Then check that we don't apply -- Now set the minimum value for a shard to null. Then check that we don't apply
@ -167,9 +175,9 @@ DEBUG: join prunable for intervals [13473,14947] and [1,5986]
Node: host=localhost port=57637 dbname=regression Node: host=localhost port=57637 dbname=regression
-> Aggregate -> Aggregate
-> Merge Join -> Merge Join
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem -> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
-> Task -> Task
Node: host=localhost port=57638 dbname=regression Node: host=localhost port=57638 dbname=regression
-> Aggregate -> Aggregate
@ -181,51 +189,51 @@ DEBUG: join prunable for intervals [13473,14947] and [1,5986]
Node: host=localhost port=57637 dbname=regression Node: host=localhost port=57637 dbname=regression
-> Aggregate -> Aggregate
-> Merge Join -> Merge Join
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
-> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem -> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem
-> Task
Node: host=localhost port=57638 dbname=regression
-> Aggregate
-> Merge Join
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
-> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
-> Task
Node: host=localhost port=57637 dbname=regression
-> Aggregate
-> Merge Join
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
-> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem
-> Task
Node: host=localhost port=57638 dbname=regression
-> Aggregate
-> Merge Join
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Index Only Scan using orders_pkey_290008 on orders_290008 orders
-> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem
-> Task
Node: host=localhost port=57637 dbname=regression
-> Aggregate
-> Merge Join
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
-> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem
-> Task -> Task
Node: host=localhost port=57638 dbname=regression Node: host=localhost port=57638 dbname=regression
-> Aggregate -> Aggregate
-> Merge Join -> Merge Join
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
-> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Index Only Scan using orders_pkey_290009 on orders_290009 orders
-> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem
-> Task -> Task
Node: host=localhost port=57637 dbname=regression Node: host=localhost port=57637 dbname=regression
-> Aggregate -> Aggregate
-> Merge Join -> Merge Join
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
-> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Index Only Scan using orders_pkey_290009 on orders_290009 orders
-> Task
Node: host=localhost port=57638 dbname=regression
-> Aggregate
-> Merge Join
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
-> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
-> Task
Node: host=localhost port=57637 dbname=regression
-> Aggregate
-> Merge Join
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
-> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
-> Task
Node: host=localhost port=57638 dbname=regression
-> Aggregate
-> Merge Join
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
-> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
-> Task
Node: host=localhost port=57637 dbname=regression
-> Aggregate
-> Merge Join
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
-> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem -> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
(67 rows) (67 rows)
-- Next, set the maximum value for another shard to null. Then check that we -- Next, set the maximum value for another shard to null. Then check that we
@ -271,9 +279,9 @@ DEBUG: join prunable for intervals [13473,14947] and [1,5986]
Node: host=localhost port=57637 dbname=regression Node: host=localhost port=57637 dbname=regression
-> Aggregate -> Aggregate
-> Merge Join -> Merge Join
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem -> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
-> Task -> Task
Node: host=localhost port=57638 dbname=regression Node: host=localhost port=57638 dbname=regression
-> Aggregate -> Aggregate
@ -285,58 +293,58 @@ DEBUG: join prunable for intervals [13473,14947] and [1,5986]
Node: host=localhost port=57637 dbname=regression Node: host=localhost port=57637 dbname=regression
-> Aggregate -> Aggregate
-> Merge Join -> Merge Join
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem -> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
-> Task -> Task
Node: host=localhost port=57638 dbname=regression Node: host=localhost port=57638 dbname=regression
-> Aggregate -> Aggregate
-> Merge Join -> Merge Join
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
-> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem -> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
-> Task -> Task
Node: host=localhost port=57637 dbname=regression Node: host=localhost port=57637 dbname=regression
-> Aggregate -> Aggregate
-> Merge Join -> Merge Join
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
-> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem -> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem
-> Task
Node: host=localhost port=57638 dbname=regression
-> Aggregate
-> Merge Join
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Index Only Scan using orders_pkey_290008 on orders_290008 orders
-> Task
Node: host=localhost port=57638 dbname=regression
-> Aggregate
-> Merge Join
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
-> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem -> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
-> Task -> Task
Node: host=localhost port=57637 dbname=regression Node: host=localhost port=57637 dbname=regression
-> Aggregate -> Aggregate
-> Merge Join -> Merge Join
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
-> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem -> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
-> Task -> Task
Node: host=localhost port=57638 dbname=regression Node: host=localhost port=57638 dbname=regression
-> Aggregate -> Aggregate
-> Merge Join -> Merge Join
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
-> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem -> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
-> Task -> Task
Node: host=localhost port=57637 dbname=regression Node: host=localhost port=57637 dbname=regression
-> Aggregate -> Aggregate
-> Merge Join -> Merge Join
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
-> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem -> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
-> Task -> Task
Node: host=localhost port=57638 dbname=regression Node: host=localhost port=57638 dbname=regression
-> Aggregate -> Aggregate
-> Merge Join -> Merge Join
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
-> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem -> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
(74 rows) (74 rows)
-- Last, set the minimum value to 0 and check that we don't treat it as null. We -- Last, set the minimum value to 0 and check that we don't treat it as null. We
@ -379,9 +387,9 @@ DEBUG: join prunable for intervals [13473,14947] and [1,5986]
Node: host=localhost port=57637 dbname=regression Node: host=localhost port=57637 dbname=regression
-> Aggregate -> Aggregate
-> Merge Join -> Merge Join
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem -> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
-> Task -> Task
Node: host=localhost port=57638 dbname=regression Node: host=localhost port=57638 dbname=regression
-> Aggregate -> Aggregate
@ -393,51 +401,51 @@ DEBUG: join prunable for intervals [13473,14947] and [1,5986]
Node: host=localhost port=57637 dbname=regression Node: host=localhost port=57637 dbname=regression
-> Aggregate -> Aggregate
-> Merge Join -> Merge Join
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem -> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
-> Task -> Task
Node: host=localhost port=57638 dbname=regression Node: host=localhost port=57638 dbname=regression
-> Aggregate -> Aggregate
-> Merge Join -> Merge Join
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
-> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem -> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem
-> Task
Node: host=localhost port=57637 dbname=regression
-> Aggregate
-> Merge Join
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Index Only Scan using orders_pkey_290008 on orders_290008 orders
-> Task
Node: host=localhost port=57637 dbname=regression
-> Aggregate
-> Merge Join
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
-> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem -> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
-> Task -> Task
Node: host=localhost port=57638 dbname=regression Node: host=localhost port=57638 dbname=regression
-> Aggregate -> Aggregate
-> Merge Join -> Merge Join
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
-> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem -> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
-> Task -> Task
Node: host=localhost port=57637 dbname=regression Node: host=localhost port=57637 dbname=regression
-> Aggregate -> Aggregate
-> Merge Join -> Merge Join
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
-> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem -> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
-> Task -> Task
Node: host=localhost port=57638 dbname=regression Node: host=localhost port=57638 dbname=regression
-> Aggregate -> Aggregate
-> Merge Join -> Merge Join
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
-> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem -> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
-> Task -> Task
Node: host=localhost port=57637 dbname=regression Node: host=localhost port=57637 dbname=regression
-> Aggregate -> Aggregate
-> Merge Join -> Merge Join
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
-> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem -> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
(67 rows) (67 rows)
-- Set minimum and maximum values for two shards back to their original values -- Set minimum and maximum values for two shards back to their original values

View File

@ -4,6 +4,14 @@
-- This test checks that we can handle null min/max values in shard statistics -- This test checks that we can handle null min/max values in shard statistics
-- and that we don't partition or join prune shards that have null values. -- and that we don't partition or join prune shards that have null values.
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 760000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 760000;
-- print major version to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+') AS major_version;
major_version
---------------
9
(1 row)
SET client_min_messages TO DEBUG2; SET client_min_messages TO DEBUG2;
SET citus.explain_all_tasks TO on; SET citus.explain_all_tasks TO on;
-- to avoid differing explain output - executor doesn't matter, -- to avoid differing explain output - executor doesn't matter,

View File

@ -2,6 +2,14 @@
-- MULTI_TASK_ASSIGNMENT -- MULTI_TASK_ASSIGNMENT
-- --
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 880000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 880000;
-- print major version to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+') AS major_version;
major_version
---------------
10
(1 row)
SET citus.explain_distributed_queries TO off; SET citus.explain_distributed_queries TO off;
-- Check that our policies for assigning tasks to worker nodes run as expected. -- Check that our policies for assigning tasks to worker nodes run as expected.
-- To test this, we first create a shell table, and then manually insert shard -- To test this, we first create a shell table, and then manually insert shard
@ -46,19 +54,12 @@ BEGIN;
-- the following log messages print node name and port numbers; and node numbers -- the following log messages print node name and port numbers; and node numbers
-- in regression tests depend upon PG_VERSION_NUM. -- in regression tests depend upon PG_VERSION_NUM.
SET client_min_messages TO DEBUG3; SET client_min_messages TO DEBUG3;
DEBUG: CommitTransactionCommand
-- First test the default greedy task assignment policy -- First test the default greedy task assignment policy
SET citus.task_assignment_policy TO 'greedy'; SET citus.task_assignment_policy TO 'greedy';
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: CommitTransactionCommand
EXPLAIN SELECT count(*) FROM task_assignment_test_table; EXPLAIN SELECT count(*) FROM task_assignment_test_table;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 6 to node localhost:57637
DEBUG: assigned task 2 to node localhost:57638 DEBUG: assigned task 2 to node localhost:57638
DEBUG: assigned task 4 to node localhost:57637 DEBUG: assigned task 4 to node localhost:57637
DEBUG: CommitTransactionCommand
QUERY PLAN QUERY PLAN
----------------------------------------------------------------------- -----------------------------------------------------------------------
Aggregate (cost=0.00..0.00 rows=0 width=0) Aggregate (cost=0.00..0.00 rows=0 width=0)
@ -67,12 +68,9 @@ DEBUG: CommitTransactionCommand
(3 rows) (3 rows)
EXPLAIN SELECT count(*) FROM task_assignment_test_table; EXPLAIN SELECT count(*) FROM task_assignment_test_table;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 6 to node localhost:57637
DEBUG: assigned task 2 to node localhost:57638 DEBUG: assigned task 2 to node localhost:57638
DEBUG: assigned task 4 to node localhost:57637 DEBUG: assigned task 4 to node localhost:57637
DEBUG: CommitTransactionCommand
QUERY PLAN QUERY PLAN
----------------------------------------------------------------------- -----------------------------------------------------------------------
Aggregate (cost=0.00..0.00 rows=0 width=0) Aggregate (cost=0.00..0.00 rows=0 width=0)
@ -82,16 +80,10 @@ DEBUG: CommitTransactionCommand
-- Next test the first-replica task assignment policy -- Next test the first-replica task assignment policy
SET citus.task_assignment_policy TO 'first-replica'; SET citus.task_assignment_policy TO 'first-replica';
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: CommitTransactionCommand
EXPLAIN SELECT count(*) FROM task_assignment_test_table; EXPLAIN SELECT count(*) FROM task_assignment_test_table;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 6 to node localhost:57637
DEBUG: assigned task 4 to node localhost:57637 DEBUG: assigned task 4 to node localhost:57637
DEBUG: assigned task 2 to node localhost:57638 DEBUG: assigned task 2 to node localhost:57638
DEBUG: CommitTransactionCommand
QUERY PLAN QUERY PLAN
----------------------------------------------------------------------- -----------------------------------------------------------------------
Aggregate (cost=0.00..0.00 rows=0 width=0) Aggregate (cost=0.00..0.00 rows=0 width=0)
@ -100,12 +92,9 @@ DEBUG: CommitTransactionCommand
(3 rows) (3 rows)
EXPLAIN SELECT count(*) FROM task_assignment_test_table; EXPLAIN SELECT count(*) FROM task_assignment_test_table;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 6 to node localhost:57637
DEBUG: assigned task 4 to node localhost:57637 DEBUG: assigned task 4 to node localhost:57637
DEBUG: assigned task 2 to node localhost:57638 DEBUG: assigned task 2 to node localhost:57638
DEBUG: CommitTransactionCommand
QUERY PLAN QUERY PLAN
----------------------------------------------------------------------- -----------------------------------------------------------------------
Aggregate (cost=0.00..0.00 rows=0 width=0) Aggregate (cost=0.00..0.00 rows=0 width=0)
@ -115,16 +104,10 @@ DEBUG: CommitTransactionCommand
-- Finally test the round-robin task assignment policy -- Finally test the round-robin task assignment policy
SET citus.task_assignment_policy TO 'round-robin'; SET citus.task_assignment_policy TO 'round-robin';
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: CommitTransactionCommand
EXPLAIN SELECT count(*) FROM task_assignment_test_table; EXPLAIN SELECT count(*) FROM task_assignment_test_table;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: assigned task 6 to node localhost:57638 DEBUG: assigned task 6 to node localhost:57638
DEBUG: assigned task 4 to node localhost:57638 DEBUG: assigned task 4 to node localhost:57638
DEBUG: assigned task 2 to node localhost:57637 DEBUG: assigned task 2 to node localhost:57637
DEBUG: CommitTransactionCommand
QUERY PLAN QUERY PLAN
----------------------------------------------------------------------- -----------------------------------------------------------------------
Aggregate (cost=0.00..0.00 rows=0 width=0) Aggregate (cost=0.00..0.00 rows=0 width=0)
@ -133,12 +116,9 @@ DEBUG: CommitTransactionCommand
(3 rows) (3 rows)
EXPLAIN SELECT count(*) FROM task_assignment_test_table; EXPLAIN SELECT count(*) FROM task_assignment_test_table;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 6 to node localhost:57637
DEBUG: assigned task 4 to node localhost:57637 DEBUG: assigned task 4 to node localhost:57637
DEBUG: assigned task 2 to node localhost:57638 DEBUG: assigned task 2 to node localhost:57638
DEBUG: CommitTransactionCommand
QUERY PLAN QUERY PLAN
----------------------------------------------------------------------- -----------------------------------------------------------------------
Aggregate (cost=0.00..0.00 rows=0 width=0) Aggregate (cost=0.00..0.00 rows=0 width=0)
@ -147,12 +127,9 @@ DEBUG: CommitTransactionCommand
(3 rows) (3 rows)
EXPLAIN SELECT count(*) FROM task_assignment_test_table; EXPLAIN SELECT count(*) FROM task_assignment_test_table;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: assigned task 6 to node localhost:57638 DEBUG: assigned task 6 to node localhost:57638
DEBUG: assigned task 4 to node localhost:57638 DEBUG: assigned task 4 to node localhost:57638
DEBUG: assigned task 2 to node localhost:57637 DEBUG: assigned task 2 to node localhost:57637
DEBUG: CommitTransactionCommand
QUERY PLAN QUERY PLAN
----------------------------------------------------------------------- -----------------------------------------------------------------------
Aggregate (cost=0.00..0.00 rows=0 width=0) Aggregate (cost=0.00..0.00 rows=0 width=0)
@ -161,10 +138,5 @@ DEBUG: CommitTransactionCommand
(3 rows) (3 rows)
RESET citus.task_assignment_policy; RESET citus.task_assignment_policy;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: CommitTransactionCommand
RESET client_min_messages; RESET client_min_messages;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
COMMIT; COMMIT;

View File

@ -2,6 +2,14 @@
-- MULTI_TASK_ASSIGNMENT -- MULTI_TASK_ASSIGNMENT
-- --
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 880000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 880000;
-- print major version to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+') AS major_version;
major_version
---------------
9
(1 row)
SET citus.explain_distributed_queries TO off; SET citus.explain_distributed_queries TO off;
-- Check that our policies for assigning tasks to worker nodes run as expected. -- Check that our policies for assigning tasks to worker nodes run as expected.
-- To test this, we first create a shell table, and then manually insert shard -- To test this, we first create a shell table, and then manually insert shard

View File

@ -426,6 +426,7 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
SET parallel_setup_cost=0; SET parallel_setup_cost=0;
SET parallel_tuple_cost=0; SET parallel_tuple_cost=0;
SET min_parallel_relation_size=0; SET min_parallel_relation_size=0;
SET min_parallel_table_scan_size=0;
SET max_parallel_workers_per_gather=4; SET max_parallel_workers_per_gather=4;
-- ensure local plans display correctly -- ensure local plans display correctly

View File

@ -10,6 +10,9 @@
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 690000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 690000;
SET citus.enable_unique_job_ids TO off; SET citus.enable_unique_job_ids TO off;
-- print major version to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+') AS major_version;
BEGIN; BEGIN;
SET client_min_messages TO DEBUG4; SET client_min_messages TO DEBUG4;

View File

@ -9,6 +9,10 @@
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 710000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 710000;
-- print major version to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+') AS major_version;
BEGIN; BEGIN;
SET client_min_messages TO DEBUG3; SET client_min_messages TO DEBUG3;

View File

@ -8,6 +8,10 @@
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 760000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 760000;
-- print major version to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+') AS major_version;
SET client_min_messages TO DEBUG2; SET client_min_messages TO DEBUG2;
SET citus.explain_all_tasks TO on; SET citus.explain_all_tasks TO on;
-- to avoid differing explain output - executor doesn't matter, -- to avoid differing explain output - executor doesn't matter,

View File

@ -5,6 +5,11 @@
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 880000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 880000;
-- print major version to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+') AS major_version;
SET citus.explain_distributed_queries TO off; SET citus.explain_distributed_queries TO off;