diff --git a/src/test/regress/expected/multi_explain.out b/src/test/regress/expected/multi_explain.out index 322445b7f..370552207 100644 --- a/src/test/regress/expected/multi_explain.out +++ b/src/test/regress/expected/multi_explain.out @@ -6,7 +6,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 570000; SELECT substring(version(), '\d+(?:\.\d+)?') AS major_version; major_version --------------- - 9.6 + 10 (1 row) \a\t @@ -903,6 +903,8 @@ EXPLAIN (COSTS FALSE, FORMAT YAML) SET parallel_setup_cost=0; SET parallel_tuple_cost=0; SET min_parallel_relation_size=0; +ERROR: unrecognized configuration parameter "min_parallel_relation_size" +SET min_parallel_table_scan_size=0; SET max_parallel_workers_per_gather=4; -- ensure local plans display correctly CREATE TABLE lineitem_clone (LIKE lineitem); diff --git a/src/test/regress/expected/multi_explain_0.out b/src/test/regress/expected/multi_explain_0.out index 322445b7f..0ecc3af06 100644 --- a/src/test/regress/expected/multi_explain_0.out +++ b/src/test/regress/expected/multi_explain_0.out @@ -903,6 +903,8 @@ EXPLAIN (COSTS FALSE, FORMAT YAML) SET parallel_setup_cost=0; SET parallel_tuple_cost=0; SET min_parallel_relation_size=0; +SET min_parallel_table_scan_size=0; +ERROR: unrecognized configuration parameter "min_parallel_table_scan_size" SET max_parallel_workers_per_gather=4; -- ensure local plans display correctly CREATE TABLE lineitem_clone (LIKE lineitem); diff --git a/src/test/regress/expected/multi_explain_1.out b/src/test/regress/expected/multi_explain_1.out index 0f76dc9d0..6bb2fbfc2 100644 --- a/src/test/regress/expected/multi_explain_1.out +++ b/src/test/regress/expected/multi_explain_1.out @@ -860,6 +860,8 @@ SET parallel_tuple_cost=0; ERROR: unrecognized configuration parameter "parallel_tuple_cost" SET min_parallel_relation_size=0; ERROR: unrecognized configuration parameter "min_parallel_relation_size" +SET min_parallel_table_scan_size=0; +ERROR: unrecognized configuration parameter "min_parallel_table_scan_size" SET max_parallel_workers_per_gather=4; ERROR: unrecognized configuration parameter "max_parallel_workers_per_gather" -- ensure local plans display correctly diff --git a/src/test/regress/expected/multi_large_table_join_planning.out b/src/test/regress/expected/multi_large_table_join_planning.out index 2237b8014..41db9b4e0 100644 --- a/src/test/regress/expected/multi_large_table_join_planning.out +++ b/src/test/regress/expected/multi_large_table_join_planning.out @@ -7,17 +7,18 @@ -- executor here, as we cannot run repartition jobs with real time executor. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 690000; SET citus.enable_unique_job_ids TO off; +-- print major version to make version-specific tests clear +SHOW server_version \gset +SELECT substring(:'server_version', '\d+') AS major_version; + major_version +--------------- + 10 +(1 row) + BEGIN; SET client_min_messages TO DEBUG4; -DEBUG: CommitTransactionCommand SET citus.large_table_shard_count TO 2; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -DEBUG: CommitTransactionCommand SET citus.task_executor_type TO 'task-tracker'; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -DEBUG: CommitTransactionCommand -- Debug4 log messages display jobIds within them. We explicitly set the jobId -- sequence here so that the regression output becomes independent of the number -- of jobs executed prior to running this test. @@ -40,7 +41,6 @@ GROUP BY l_partkey, o_orderkey ORDER BY l_partkey, o_orderkey; -DEBUG: StartTransactionCommand DEBUG: join prunable for intervals [1,1509] and [8997,14946] DEBUG: join prunable for intervals [1509,4964] and [8997,14946] DEBUG: join prunable for intervals [2951,4455] and [8997,14946] @@ -112,7 +112,6 @@ DEBUG: completed cleanup query for job 2 DEBUG: completed cleanup query for job 2 DEBUG: completed cleanup query for job 1 DEBUG: completed cleanup query for job 1 -DEBUG: CommitTransactionCommand l_partkey | o_orderkey | count -----------+------------+------- 18 | 12005 | 1 @@ -157,7 +156,6 @@ GROUP BY l_partkey, o_orderkey ORDER BY l_partkey, o_orderkey; -DEBUG: StartTransactionCommand DEBUG: generated sql query for task 2 DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290000 lineitem WHERE (l_quantity < 5.0)" DEBUG: generated sql query for task 4 @@ -234,13 +232,10 @@ DEBUG: completed cleanup query for job 4 DEBUG: completed cleanup query for job 4 DEBUG: completed cleanup query for job 5 DEBUG: completed cleanup query for job 5 -DEBUG: CommitTransactionCommand l_partkey | o_orderkey | count -----------+------------+------- (0 rows) -- Reset client logging level to its previous value SET client_min_messages TO NOTICE; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility COMMIT; diff --git a/src/test/regress/expected/multi_large_table_join_planning_0.out b/src/test/regress/expected/multi_large_table_join_planning_0.out index 2237b8014..569e3acc2 100644 --- a/src/test/regress/expected/multi_large_table_join_planning_0.out +++ b/src/test/regress/expected/multi_large_table_join_planning_0.out @@ -7,6 +7,14 @@ -- executor here, as we cannot run repartition jobs with real time executor. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 690000; SET citus.enable_unique_job_ids TO off; +-- print major version to make version-specific tests clear +SHOW server_version \gset +SELECT substring(:'server_version', '\d+') AS major_version; + major_version +--------------- + 9 +(1 row) + BEGIN; SET client_min_messages TO DEBUG4; DEBUG: CommitTransactionCommand diff --git a/src/test/regress/expected/multi_large_table_task_assignment.out b/src/test/regress/expected/multi_large_table_task_assignment.out index 49c75fb6f..8eaf83f14 100644 --- a/src/test/regress/expected/multi_large_table_task_assignment.out +++ b/src/test/regress/expected/multi_large_table_task_assignment.out @@ -6,17 +6,18 @@ -- from a sql task to its depended tasks. Note that we set the executor type to task -- tracker executor here, as we cannot run repartition jobs with real time executor. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 710000; +-- print major version to make version-specific tests clear +SHOW server_version \gset +SELECT substring(:'server_version', '\d+') AS major_version; + major_version +--------------- + 10 +(1 row) + BEGIN; SET client_min_messages TO DEBUG3; -DEBUG: CommitTransactionCommand SET citus.large_table_shard_count TO 2; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -DEBUG: CommitTransactionCommand SET citus.task_executor_type TO 'task-tracker'; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -DEBUG: CommitTransactionCommand -- Single range repartition join to test anchor-shard based task assignment and -- assignment propagation to merge and data-fetch tasks. SELECT @@ -25,7 +26,6 @@ FROM orders, customer WHERE o_custkey = c_custkey; -DEBUG: StartTransactionCommand DEBUG: assigned task 4 to node localhost:57637 DEBUG: assigned task 2 to node localhost:57638 DEBUG: join prunable for intervals [1,1000] and [1001,2000] @@ -43,7 +43,6 @@ DETAIL: Creating dependency on merge taskId 11 DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 9 to node localhost:57638 DEBUG: assigned task 3 to node localhost:57637 -DEBUG: CommitTransactionCommand count ------- 2984 @@ -54,9 +53,6 @@ DEBUG: CommitTransactionCommand -- the same merge task, and tests our constraint group creation and assignment -- propagation. Here 'orders' is considered the small table. SET citus.large_table_shard_count TO 3; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -DEBUG: CommitTransactionCommand SELECT count(*) FROM @@ -64,7 +60,6 @@ FROM WHERE o_custkey = c_custkey AND o_orderkey = l_orderkey; -DEBUG: StartTransactionCommand DEBUG: assigned task 9 to node localhost:57637 DEBUG: assigned task 15 to node localhost:57638 DEBUG: assigned task 12 to node localhost:57637 @@ -175,16 +170,12 @@ DEBUG: propagating assignment from merge task 54 to constrained sql task 45 DEBUG: propagating assignment from merge task 61 to constrained sql task 51 DEBUG: propagating assignment from merge task 61 to constrained sql task 54 DEBUG: propagating assignment from merge task 68 to constrained sql task 60 -DEBUG: CommitTransactionCommand count ------- 11998 (1 row) SET citus.large_table_shard_count TO 2; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -DEBUG: CommitTransactionCommand -- Dual hash repartition join which tests the separate hash repartition join -- task assignment algorithm. SELECT @@ -193,7 +184,6 @@ FROM lineitem, customer WHERE l_partkey = c_nationkey; -DEBUG: StartTransactionCommand DEBUG: assigned task 4 to node localhost:57637 DEBUG: assigned task 2 to node localhost:57638 DEBUG: assigned task 8 to node localhost:57637 @@ -237,7 +227,6 @@ DEBUG: assigned task 3 to node localhost:57638 DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 9 to node localhost:57638 DEBUG: assigned task 12 to node localhost:57637 -DEBUG: CommitTransactionCommand count ------- 125 @@ -245,6 +234,4 @@ DEBUG: CommitTransactionCommand -- Reset client logging level to its previous value SET client_min_messages TO NOTICE; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility COMMIT; diff --git a/src/test/regress/expected/multi_large_table_task_assignment_0.out b/src/test/regress/expected/multi_large_table_task_assignment_0.out index 49c75fb6f..c752d3cb7 100644 --- a/src/test/regress/expected/multi_large_table_task_assignment_0.out +++ b/src/test/regress/expected/multi_large_table_task_assignment_0.out @@ -6,6 +6,14 @@ -- from a sql task to its depended tasks. Note that we set the executor type to task -- tracker executor here, as we cannot run repartition jobs with real time executor. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 710000; +-- print major version to make version-specific tests clear +SHOW server_version \gset +SELECT substring(:'server_version', '\d+') AS major_version; + major_version +--------------- + 9 +(1 row) + BEGIN; SET client_min_messages TO DEBUG3; DEBUG: CommitTransactionCommand diff --git a/src/test/regress/expected/multi_null_minmax_value_pruning.out b/src/test/regress/expected/multi_null_minmax_value_pruning.out index fd5491287..8abddb3d3 100644 --- a/src/test/regress/expected/multi_null_minmax_value_pruning.out +++ b/src/test/regress/expected/multi_null_minmax_value_pruning.out @@ -4,6 +4,14 @@ -- This test checks that we can handle null min/max values in shard statistics -- and that we don't partition or join prune shards that have null values. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 760000; +-- print major version to make version-specific tests clear +SHOW server_version \gset +SELECT substring(:'server_version', '\d+') AS major_version; + major_version +--------------- + 10 +(1 row) + SET client_min_messages TO DEBUG2; SET citus.explain_all_tasks TO on; -- to avoid differing explain output - executor doesn't matter, @@ -73,9 +81,9 @@ DEBUG: join prunable for intervals [13473,14947] and [1,5986] Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join - Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) - -> Index Only Scan using orders_pkey_290008 on orders_290008 orders + Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem + -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate @@ -87,44 +95,44 @@ DEBUG: join prunable for intervals [13473,14947] and [1,5986] Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join - Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) - -> Index Only Scan using orders_pkey_290008 on orders_290008 orders + Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem - -> Task - Node: host=localhost port=57638 dbname=regression - -> Aggregate - -> Merge Join - Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Only Scan using orders_pkey_290008 on orders_290008 orders + -> Task + Node: host=localhost port=57638 dbname=regression + -> Aggregate + -> Merge Join + Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem + -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join - Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) - -> Index Only Scan using orders_pkey_290009 on orders_290009 orders + Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem + -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join - Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) - -> Index Only Scan using orders_pkey_290009 on orders_290009 orders + Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem + -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join - Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) - -> Index Only Scan using orders_pkey_290009 on orders_290009 orders + Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem + -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join - Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) - -> Index Only Scan using orders_pkey_290009 on orders_290009 orders + Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem + -> Index Only Scan using orders_pkey_290009 on orders_290009 orders (60 rows) -- Now set the minimum value for a shard to null. Then check that we don't apply @@ -167,9 +175,9 @@ DEBUG: join prunable for intervals [13473,14947] and [1,5986] Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join - Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) - -> Index Only Scan using orders_pkey_290008 on orders_290008 orders + Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem + -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate @@ -181,51 +189,51 @@ DEBUG: join prunable for intervals [13473,14947] and [1,5986] Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join - Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) - -> Index Only Scan using orders_pkey_290008 on orders_290008 orders + Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem - -> Task - Node: host=localhost port=57638 dbname=regression - -> Aggregate - -> Merge Join - Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) - -> Index Only Scan using orders_pkey_290009 on orders_290009 orders - -> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem - -> Task - Node: host=localhost port=57637 dbname=regression - -> Aggregate - -> Merge Join - Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) - -> Index Only Scan using orders_pkey_290009 on orders_290009 orders - -> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem - -> Task - Node: host=localhost port=57638 dbname=regression - -> Aggregate - -> Merge Join - Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Only Scan using orders_pkey_290008 on orders_290008 orders - -> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem - -> Task - Node: host=localhost port=57637 dbname=regression - -> Aggregate - -> Merge Join - Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) - -> Index Only Scan using orders_pkey_290009 on orders_290009 orders - -> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join - Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) + Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) + -> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem -> Index Only Scan using orders_pkey_290009 on orders_290009 orders - -> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join - Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) + Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) + -> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem -> Index Only Scan using orders_pkey_290009 on orders_290009 orders + -> Task + Node: host=localhost port=57638 dbname=regression + -> Aggregate + -> Merge Join + Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) + -> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem + -> Index Only Scan using orders_pkey_290008 on orders_290008 orders + -> Task + Node: host=localhost port=57637 dbname=regression + -> Aggregate + -> Merge Join + Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) + -> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem + -> Index Only Scan using orders_pkey_290009 on orders_290009 orders + -> Task + Node: host=localhost port=57638 dbname=regression + -> Aggregate + -> Merge Join + Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) + -> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem + -> Index Only Scan using orders_pkey_290009 on orders_290009 orders + -> Task + Node: host=localhost port=57637 dbname=regression + -> Aggregate + -> Merge Join + Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem + -> Index Only Scan using orders_pkey_290009 on orders_290009 orders (67 rows) -- Next, set the maximum value for another shard to null. Then check that we @@ -271,9 +279,9 @@ DEBUG: join prunable for intervals [13473,14947] and [1,5986] Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join - Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) - -> Index Only Scan using orders_pkey_290009 on orders_290009 orders + Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem + -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate @@ -285,58 +293,58 @@ DEBUG: join prunable for intervals [13473,14947] and [1,5986] Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join - Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) - -> Index Only Scan using orders_pkey_290008 on orders_290008 orders + Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem + -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join - Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) - -> Index Only Scan using orders_pkey_290009 on orders_290009 orders + Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem + -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join - Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) - -> Index Only Scan using orders_pkey_290008 on orders_290008 orders + Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem - -> Task - Node: host=localhost port=57638 dbname=regression - -> Aggregate - -> Merge Join - Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Only Scan using orders_pkey_290008 on orders_290008 orders + -> Task + Node: host=localhost port=57638 dbname=regression + -> Aggregate + -> Merge Join + Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem + -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join - Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) - -> Index Only Scan using orders_pkey_290009 on orders_290009 orders + Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem + -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join - Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) - -> Index Only Scan using orders_pkey_290009 on orders_290009 orders + Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem + -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join - Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) - -> Index Only Scan using orders_pkey_290009 on orders_290009 orders + Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem + -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join - Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) - -> Index Only Scan using orders_pkey_290009 on orders_290009 orders + Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem + -> Index Only Scan using orders_pkey_290009 on orders_290009 orders (74 rows) -- Last, set the minimum value to 0 and check that we don't treat it as null. We @@ -379,9 +387,9 @@ DEBUG: join prunable for intervals [13473,14947] and [1,5986] Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join - Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) - -> Index Only Scan using orders_pkey_290009 on orders_290009 orders + Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem + -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate @@ -393,51 +401,51 @@ DEBUG: join prunable for intervals [13473,14947] and [1,5986] Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join - Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) - -> Index Only Scan using orders_pkey_290008 on orders_290008 orders + Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem + -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join - Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) - -> Index Only Scan using orders_pkey_290008 on orders_290008 orders + Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem - -> Task - Node: host=localhost port=57637 dbname=regression - -> Aggregate - -> Merge Join - Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Only Scan using orders_pkey_290008 on orders_290008 orders + -> Task + Node: host=localhost port=57637 dbname=regression + -> Aggregate + -> Merge Join + Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem + -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join - Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) - -> Index Only Scan using orders_pkey_290009 on orders_290009 orders + Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem + -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join - Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) - -> Index Only Scan using orders_pkey_290009 on orders_290009 orders + Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem + -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join - Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) - -> Index Only Scan using orders_pkey_290009 on orders_290009 orders + Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem + -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join - Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) - -> Index Only Scan using orders_pkey_290009 on orders_290009 orders + Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem + -> Index Only Scan using orders_pkey_290009 on orders_290009 orders (67 rows) -- Set minimum and maximum values for two shards back to their original values diff --git a/src/test/regress/expected/multi_null_minmax_value_pruning_0.out b/src/test/regress/expected/multi_null_minmax_value_pruning_0.out index fd5491287..fd1ba6dc3 100644 --- a/src/test/regress/expected/multi_null_minmax_value_pruning_0.out +++ b/src/test/regress/expected/multi_null_minmax_value_pruning_0.out @@ -4,6 +4,14 @@ -- This test checks that we can handle null min/max values in shard statistics -- and that we don't partition or join prune shards that have null values. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 760000; +-- print major version to make version-specific tests clear +SHOW server_version \gset +SELECT substring(:'server_version', '\d+') AS major_version; + major_version +--------------- + 9 +(1 row) + SET client_min_messages TO DEBUG2; SET citus.explain_all_tasks TO on; -- to avoid differing explain output - executor doesn't matter, diff --git a/src/test/regress/expected/multi_task_assignment_policy.out b/src/test/regress/expected/multi_task_assignment_policy.out index 0468d42e2..edae227c0 100644 --- a/src/test/regress/expected/multi_task_assignment_policy.out +++ b/src/test/regress/expected/multi_task_assignment_policy.out @@ -2,6 +2,14 @@ -- MULTI_TASK_ASSIGNMENT -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 880000; +-- print major version to make version-specific tests clear +SHOW server_version \gset +SELECT substring(:'server_version', '\d+') AS major_version; + major_version +--------------- + 10 +(1 row) + SET citus.explain_distributed_queries TO off; -- Check that our policies for assigning tasks to worker nodes run as expected. -- To test this, we first create a shell table, and then manually insert shard @@ -46,19 +54,12 @@ BEGIN; -- the following log messages print node name and port numbers; and node numbers -- in regression tests depend upon PG_VERSION_NUM. SET client_min_messages TO DEBUG3; -DEBUG: CommitTransactionCommand -- First test the default greedy task assignment policy SET citus.task_assignment_policy TO 'greedy'; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -DEBUG: CommitTransactionCommand EXPLAIN SELECT count(*) FROM task_assignment_test_table; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 2 to node localhost:57638 DEBUG: assigned task 4 to node localhost:57637 -DEBUG: CommitTransactionCommand QUERY PLAN ----------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) @@ -67,12 +68,9 @@ DEBUG: CommitTransactionCommand (3 rows) EXPLAIN SELECT count(*) FROM task_assignment_test_table; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 2 to node localhost:57638 DEBUG: assigned task 4 to node localhost:57637 -DEBUG: CommitTransactionCommand QUERY PLAN ----------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) @@ -82,16 +80,10 @@ DEBUG: CommitTransactionCommand -- Next test the first-replica task assignment policy SET citus.task_assignment_policy TO 'first-replica'; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -DEBUG: CommitTransactionCommand EXPLAIN SELECT count(*) FROM task_assignment_test_table; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 4 to node localhost:57637 DEBUG: assigned task 2 to node localhost:57638 -DEBUG: CommitTransactionCommand QUERY PLAN ----------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) @@ -100,12 +92,9 @@ DEBUG: CommitTransactionCommand (3 rows) EXPLAIN SELECT count(*) FROM task_assignment_test_table; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 4 to node localhost:57637 DEBUG: assigned task 2 to node localhost:57638 -DEBUG: CommitTransactionCommand QUERY PLAN ----------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) @@ -115,16 +104,10 @@ DEBUG: CommitTransactionCommand -- Finally test the round-robin task assignment policy SET citus.task_assignment_policy TO 'round-robin'; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -DEBUG: CommitTransactionCommand EXPLAIN SELECT count(*) FROM task_assignment_test_table; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility DEBUG: assigned task 6 to node localhost:57638 DEBUG: assigned task 4 to node localhost:57638 DEBUG: assigned task 2 to node localhost:57637 -DEBUG: CommitTransactionCommand QUERY PLAN ----------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) @@ -133,12 +116,9 @@ DEBUG: CommitTransactionCommand (3 rows) EXPLAIN SELECT count(*) FROM task_assignment_test_table; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 4 to node localhost:57637 DEBUG: assigned task 2 to node localhost:57638 -DEBUG: CommitTransactionCommand QUERY PLAN ----------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) @@ -147,12 +127,9 @@ DEBUG: CommitTransactionCommand (3 rows) EXPLAIN SELECT count(*) FROM task_assignment_test_table; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility DEBUG: assigned task 6 to node localhost:57638 DEBUG: assigned task 4 to node localhost:57638 DEBUG: assigned task 2 to node localhost:57637 -DEBUG: CommitTransactionCommand QUERY PLAN ----------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) @@ -161,10 +138,5 @@ DEBUG: CommitTransactionCommand (3 rows) RESET citus.task_assignment_policy; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -DEBUG: CommitTransactionCommand RESET client_min_messages; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility COMMIT; diff --git a/src/test/regress/expected/multi_task_assignment_policy_0.out b/src/test/regress/expected/multi_task_assignment_policy_0.out index 0468d42e2..642e05a6d 100644 --- a/src/test/regress/expected/multi_task_assignment_policy_0.out +++ b/src/test/regress/expected/multi_task_assignment_policy_0.out @@ -2,6 +2,14 @@ -- MULTI_TASK_ASSIGNMENT -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 880000; +-- print major version to make version-specific tests clear +SHOW server_version \gset +SELECT substring(:'server_version', '\d+') AS major_version; + major_version +--------------- + 9 +(1 row) + SET citus.explain_distributed_queries TO off; -- Check that our policies for assigning tasks to worker nodes run as expected. -- To test this, we first create a shell table, and then manually insert shard diff --git a/src/test/regress/sql/multi_explain.sql b/src/test/regress/sql/multi_explain.sql index e5d32c922..2915b79af 100644 --- a/src/test/regress/sql/multi_explain.sql +++ b/src/test/regress/sql/multi_explain.sql @@ -426,6 +426,7 @@ EXPLAIN (COSTS FALSE, FORMAT YAML) SET parallel_setup_cost=0; SET parallel_tuple_cost=0; SET min_parallel_relation_size=0; +SET min_parallel_table_scan_size=0; SET max_parallel_workers_per_gather=4; -- ensure local plans display correctly diff --git a/src/test/regress/sql/multi_large_table_join_planning.sql b/src/test/regress/sql/multi_large_table_join_planning.sql index 3a10c5f76..d8da3454f 100644 --- a/src/test/regress/sql/multi_large_table_join_planning.sql +++ b/src/test/regress/sql/multi_large_table_join_planning.sql @@ -10,6 +10,9 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 690000; SET citus.enable_unique_job_ids TO off; +-- print major version to make version-specific tests clear +SHOW server_version \gset +SELECT substring(:'server_version', '\d+') AS major_version; BEGIN; SET client_min_messages TO DEBUG4; diff --git a/src/test/regress/sql/multi_large_table_task_assignment.sql b/src/test/regress/sql/multi_large_table_task_assignment.sql index bd4de27d7..9a69189fa 100644 --- a/src/test/regress/sql/multi_large_table_task_assignment.sql +++ b/src/test/regress/sql/multi_large_table_task_assignment.sql @@ -9,6 +9,10 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 710000; +-- print major version to make version-specific tests clear +SHOW server_version \gset +SELECT substring(:'server_version', '\d+') AS major_version; + BEGIN; SET client_min_messages TO DEBUG3; diff --git a/src/test/regress/sql/multi_null_minmax_value_pruning.sql b/src/test/regress/sql/multi_null_minmax_value_pruning.sql index 60ebd51cf..03dbb77f1 100644 --- a/src/test/regress/sql/multi_null_minmax_value_pruning.sql +++ b/src/test/regress/sql/multi_null_minmax_value_pruning.sql @@ -8,6 +8,10 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 760000; +-- print major version to make version-specific tests clear +SHOW server_version \gset +SELECT substring(:'server_version', '\d+') AS major_version; + SET client_min_messages TO DEBUG2; SET citus.explain_all_tasks TO on; -- to avoid differing explain output - executor doesn't matter, diff --git a/src/test/regress/sql/multi_task_assignment_policy.sql b/src/test/regress/sql/multi_task_assignment_policy.sql index 28f8c57ed..74253d36e 100644 --- a/src/test/regress/sql/multi_task_assignment_policy.sql +++ b/src/test/regress/sql/multi_task_assignment_policy.sql @@ -5,6 +5,11 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 880000; +-- print major version to make version-specific tests clear +SHOW server_version \gset +SELECT substring(:'server_version', '\d+') AS major_version; + + SET citus.explain_distributed_queries TO off;