From 1c2ee39f15d34b78ea8b64a2eac6dcdb8371f969 Mon Sep 17 00:00:00 2001 From: Sait Talha Nisanci Date: Tue, 19 May 2020 13:51:40 +0300 Subject: [PATCH] update repartition join tests for check-multi --- .../regress/expected/aggregate_support.out | 11 +- .../regress/expected/multi_cross_shard.out | 1 + src/test/regress/expected/multi_explain.out | 15 +- .../expected/multi_join_order_additional.out | 19 +- .../multi_join_order_tpch_repartition.out | 11 +- src/test/regress/expected/multi_multiuser.out | 3 + .../regress/expected/multi_partitioning.out | 1 + .../expected/multi_reference_table.out | 7 +- .../multi_repartition_join_planning.out | 230 +------------- .../multi_repartition_join_pruning.out | 23 +- ...multi_repartition_join_task_assignment.out | 1 + .../expected/multi_repartition_udt.out | 4 +- .../regress/expected/multi_schema_support.out | 1 + .../multi_single_relation_subquery.out | 18 +- .../expected/multi_task_string_size.out | 8 +- src/test/regress/expected/multi_view.out | 22 +- .../set_operation_and_local_tables.out | 1 + src/test/regress/expected/with_executors.out | 6 +- .../multi_complex_count_distinct.source | 286 ++++++++++-------- src/test/regress/sql/aggregate_support.sql | 3 +- src/test/regress/sql/multi_cross_shard.sql | 2 + src/test/regress/sql/multi_explain.sql | 1 + .../sql/multi_join_order_additional.sql | 1 + .../sql/multi_join_order_tpch_repartition.sql | 1 + src/test/regress/sql/multi_multiuser.sql | 3 + src/test/regress/sql/multi_partitioning.sql | 1 + .../regress/sql/multi_reference_table.sql | 8 +- .../sql/multi_repartition_join_planning.sql | 3 +- .../sql/multi_repartition_join_pruning.sql | 1 + ...multi_repartition_join_task_assignment.sql | 1 + .../regress/sql/multi_repartition_udt.sql | 2 + src/test/regress/sql/multi_schema_support.sql | 1 + .../regress/sql/multi_task_string_size.sql | 1 + .../sql/set_operation_and_local_tables.sql | 2 + 34 files changed, 282 insertions(+), 417 deletions(-) diff --git a/src/test/regress/expected/aggregate_support.out b/src/test/regress/expected/aggregate_support.out index 0a30d061c..beb4165b1 100644 --- a/src/test/regress/expected/aggregate_support.out +++ b/src/test/regress/expected/aggregate_support.out @@ -404,15 +404,16 @@ select * FROM ( select * FROM ( SELECT key k, avg(distinct floor(agg1.val/2)) m from aggdata agg1 group by key -) subq; +) subq +order by k,m; k | m --------------------------------------------------------------------- 1 | 1 - 5 | - 3 | 2 - 7 | 4 - 6 | 2 | 1.5 + 3 | 2 + 5 | + 6 | + 7 | 4 9 | 0 (7 rows) diff --git a/src/test/regress/expected/multi_cross_shard.out b/src/test/regress/expected/multi_cross_shard.out index 7ee7fb374..bcc53ef5a 100644 --- a/src/test/regress/expected/multi_cross_shard.out +++ b/src/test/regress/expected/multi_cross_shard.out @@ -3,6 +3,7 @@ -- -- Tests to log cross shard queries according to error log level -- +SET citus.enable_repartition_joins to ON; -- Create a distributed table and add data to it CREATE TABLE multi_task_table ( diff --git a/src/test/regress/expected/multi_explain.out b/src/test/regress/expected/multi_explain.out index 7ca2f509f..1d7497c5d 100644 --- a/src/test/regress/expected/multi_explain.out +++ b/src/test/regress/expected/multi_explain.out @@ -5,6 +5,7 @@ SET citus.next_shard_id TO 570000; \a\t RESET citus.task_executor_type; SET citus.explain_distributed_queries TO on; +SET citus.enable_repartition_joins to ON; -- Function that parses explain output as JSON CREATE FUNCTION explain_json(query text) RETURNS jsonb @@ -947,7 +948,7 @@ SET citus.task_executor_type TO 'task-tracker'; EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; Aggregate - -> Custom Scan (Citus Task-Tracker) + -> Custom Scan (Citus Adaptive) Task Count: 1 Tasks Shown: All -> Task @@ -963,7 +964,7 @@ EXPLAIN (COSTS FALSE) AND o_custkey = c_custkey AND l_suppkey = s_suppkey; Aggregate - -> Custom Scan (Citus Task-Tracker) + -> Custom Scan (Citus Adaptive) Task Count: 1 Tasks Shown: None, not supported for re-partition queries -> MapMergeJob @@ -989,7 +990,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON) { "Node Type": "Custom Scan", "Parent Relationship": "Outer", - "Custom Plan Provider": "Citus Task-Tracker", + "Custom Plan Provider": "Citus Adaptive", "Parallel Aware": false, "Distributed Query": { "Job": { @@ -1038,7 +1039,7 @@ EXPLAIN (COSTS FALSE, FORMAT XML) Custom Scan Outer - Citus Task-Tracker + Citus Adaptive false @@ -1097,7 +1098,7 @@ EXPLAIN (COSTS FALSE, FORMAT YAML) Plans: - Node Type: "Custom Scan" Parent Relationship: "Outer" - Custom Plan Provider: "Citus Task-Tracker" + Custom Plan Provider: "Citus Adaptive" Parallel Aware: false Distributed Query: Job: @@ -1114,7 +1115,7 @@ Aggregate -- ensure distributed plans don't break EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem; Aggregate - -> Custom Scan (Citus Task-Tracker) + -> Custom Scan (Citus Adaptive) Task Count: 2 Tasks Shown: One of 2 -> Task @@ -1126,7 +1127,7 @@ PREPARE task_tracker_query AS SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; EXPLAIN (COSTS FALSE) EXECUTE task_tracker_query; Aggregate - -> Custom Scan (Citus Task-Tracker) + -> Custom Scan (Citus Adaptive) Task Count: 1 Tasks Shown: All -> Task diff --git a/src/test/regress/expected/multi_join_order_additional.out b/src/test/regress/expected/multi_join_order_additional.out index feb7d7576..6dae378df 100644 --- a/src/test/regress/expected/multi_join_order_additional.out +++ b/src/test/regress/expected/multi_join_order_additional.out @@ -6,6 +6,7 @@ SET citus.next_shard_id TO 650000; SET citus.explain_distributed_queries TO off; SET citus.log_multi_join_order TO TRUE; SET citus.task_executor_type = 'task-tracker'; -- can't explain all queries otherwise +SET citus.enable_repartition_joins to ON; SET citus.shard_count to 2; SET citus.shard_replication_factor to 1; RESET client_min_messages; @@ -79,7 +80,7 @@ DEBUG: join prunable for intervals [1,5986] and [8997,14947] DEBUG: join prunable for intervals [8997,14947] and [1,5986] QUERY PLAN --------------------------------------------------------------------- - Custom Scan (Citus Task-Tracker) + Custom Scan (Citus Adaptive) explain statements for distributed queries are not enabled (2 rows) @@ -95,7 +96,7 @@ LOG: join order: [ "lineitem" ][ local partition join "orders" ] QUERY PLAN --------------------------------------------------------------------- Aggregate - -> Custom Scan (Citus Task-Tracker) + -> Custom Scan (Citus Adaptive) explain statements for distributed queries are not enabled (3 rows) @@ -110,7 +111,7 @@ LOG: join order: [ "orders" ][ single range partition join "lineitem_hash" ] QUERY PLAN --------------------------------------------------------------------- Aggregate - -> Custom Scan (Citus Task-Tracker) + -> Custom Scan (Citus Adaptive) explain statements for distributed queries are not enabled (3 rows) @@ -122,7 +123,7 @@ LOG: join order: [ "orders_hash" ][ local partition join "lineitem_hash" ] QUERY PLAN --------------------------------------------------------------------- Aggregate - -> Custom Scan (Citus Task-Tracker) + -> Custom Scan (Citus Adaptive) explain statements for distributed queries are not enabled (3 rows) @@ -134,7 +135,7 @@ LOG: join order: [ "customer_hash" ][ reference join "nation" ] QUERY PLAN --------------------------------------------------------------------- Aggregate - -> Custom Scan (Citus Task-Tracker) + -> Custom Scan (Citus Adaptive) explain statements for distributed queries are not enabled (3 rows) @@ -147,7 +148,7 @@ LOG: join order: [ "orders" ][ dual partition join "lineitem" ][ dual partition QUERY PLAN --------------------------------------------------------------------- Aggregate - -> Custom Scan (Citus Task-Tracker) + -> Custom Scan (Citus Adaptive) explain statements for distributed queries are not enabled (3 rows) @@ -160,7 +161,7 @@ LOG: join order: [ "orders" ][ dual partition join "customer_hash" ] QUERY PLAN --------------------------------------------------------------------- Aggregate - -> Custom Scan (Citus Task-Tracker) + -> Custom Scan (Citus Adaptive) explain statements for distributed queries are not enabled (3 rows) @@ -173,7 +174,7 @@ LOG: join order: [ "orders_hash" ][ single range partition join "customer_appen QUERY PLAN --------------------------------------------------------------------- Aggregate - -> Custom Scan (Citus Task-Tracker) + -> Custom Scan (Citus Adaptive) explain statements for distributed queries are not enabled (3 rows) @@ -198,7 +199,7 @@ LOG: join order: [ "users_table" ][ local partition join "events_table" ][ loca QUERY PLAN --------------------------------------------------------------------- Aggregate - -> Custom Scan (Citus Task-Tracker) + -> Custom Scan (Citus Adaptive) explain statements for distributed queries are not enabled (3 rows) diff --git a/src/test/regress/expected/multi_join_order_tpch_repartition.out b/src/test/regress/expected/multi_join_order_tpch_repartition.out index a0b7a72d4..5d7b5c2c1 100644 --- a/src/test/regress/expected/multi_join_order_tpch_repartition.out +++ b/src/test/regress/expected/multi_join_order_tpch_repartition.out @@ -6,6 +6,7 @@ SET citus.next_shard_id TO 660000; SET citus.explain_distributed_queries TO off; SET citus.log_multi_join_order TO TRUE; SET citus.task_executor_type = 'task-tracker'; -- can't explain all queries otherwise +SET citus.enable_repartition_joins to ON; SET client_min_messages TO LOG; -- The following queries are basically the same as the ones in tpch_small -- except that more data has been loaded into customer and part tables. Therefore, @@ -25,7 +26,7 @@ LOG: join order: [ "lineitem" ] QUERY PLAN --------------------------------------------------------------------- Aggregate - -> Custom Scan (Citus Task-Tracker) + -> Custom Scan (Citus Adaptive) explain statements for distributed queries are not enabled (3 rows) @@ -60,7 +61,7 @@ LOG: join order: [ "orders" ][ local partition join "lineitem" ][ single range Sort Key: (sum(remote_scan.revenue)) DESC, remote_scan.o_orderdate -> HashAggregate Group Key: remote_scan.l_orderkey, remote_scan.o_orderdate, remote_scan.o_shippriority - -> Custom Scan (Citus Task-Tracker) + -> Custom Scan (Citus Adaptive) explain statements for distributed queries are not enabled (6 rows) @@ -104,7 +105,7 @@ LOG: join order: [ "orders" ][ local partition join "lineitem" ][ single range Sort Key: (sum(remote_scan.revenue)) DESC -> HashAggregate Group Key: remote_scan.c_custkey, remote_scan.c_name, remote_scan.c_acctbal, remote_scan.c_phone, remote_scan.n_name, remote_scan.c_address, remote_scan.c_comment - -> Custom Scan (Citus Task-Tracker) + -> Custom Scan (Citus Adaptive) explain statements for distributed queries are not enabled (6 rows) @@ -143,7 +144,7 @@ LOG: join order: [ "lineitem" ][ single range partition join "part_append" ] QUERY PLAN --------------------------------------------------------------------- Aggregate - -> Custom Scan (Citus Task-Tracker) + -> Custom Scan (Citus Adaptive) explain statements for distributed queries are not enabled (3 rows) @@ -164,7 +165,7 @@ LOG: join order: [ "lineitem" ][ local partition join "orders" ][ single range --------------------------------------------------------------------- HashAggregate Group Key: remote_scan.l_partkey - -> Custom Scan (Citus Task-Tracker) + -> Custom Scan (Citus Adaptive) explain statements for distributed queries are not enabled (4 rows) diff --git a/src/test/regress/expected/multi_multiuser.out b/src/test/regress/expected/multi_multiuser.out index 0b84a601e..ba6682e90 100644 --- a/src/test/regress/expected/multi_multiuser.out +++ b/src/test/regress/expected/multi_multiuser.out @@ -142,6 +142,7 @@ SELECT count(*) FROM test WHERE id = 1; (1 row) SET citus.task_executor_type TO 'task-tracker'; +SET citus.enable_repartition_joins to ON; SELECT count(*), min(current_user) FROM test; count | min --------------------------------------------------------------------- @@ -230,6 +231,7 @@ SELECT count(*) FROM test WHERE id = 1; (1 row) SET citus.task_executor_type TO 'task-tracker'; +SET citus.enable_repartition_joins to ON; SELECT count(*), min(current_user) FROM test; count | min --------------------------------------------------------------------- @@ -288,6 +290,7 @@ ERROR: permission denied for table test SELECT count(*) FROM test WHERE id = 1; ERROR: permission denied for table test SET citus.task_executor_type TO 'task-tracker'; +SET citus.enable_repartition_joins to ON; SELECT count(*), min(current_user) FROM test; ERROR: permission denied for table test -- test re-partition query diff --git a/src/test/regress/expected/multi_partitioning.out b/src/test/regress/expected/multi_partitioning.out index 18691286c..e30219f08 100644 --- a/src/test/regress/expected/multi_partitioning.out +++ b/src/test/regress/expected/multi_partitioning.out @@ -4,6 +4,7 @@ SET citus.next_shard_id TO 1660000; SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; +SET citus.enable_repartition_joins to ON; -- -- Distributed Partitioned Table Creation Tests -- diff --git a/src/test/regress/expected/multi_reference_table.out b/src/test/regress/expected/multi_reference_table.out index 57dd97344..0fb645c64 100644 --- a/src/test/regress/expected/multi_reference_table.out +++ b/src/test/regress/expected/multi_reference_table.out @@ -1087,12 +1087,14 @@ LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_te (2 rows) SET citus.task_executor_type to "task-tracker"; +SET citus.enable_repartition_joins to ON; SELECT colocated_table_test.value_2 FROM reference_table_test, colocated_table_test, colocated_table_test_2 WHERE - colocated_table_test.value_2 = colocated_table_test_2.value_2 AND colocated_table_test.value_2 = reference_table_test.value_2; + colocated_table_test.value_2 = colocated_table_test_2.value_2 AND colocated_table_test.value_2 = reference_table_test.value_2 +ORDER BY colocated_table_test.value_2; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ][ dual partition join "colocated_table_test_2" ] value_2 --------------------------------------------------------------------- @@ -1105,7 +1107,8 @@ SELECT FROM reference_table_test, colocated_table_test, colocated_table_test_2 WHERE - colocated_table_test.value_1 = reference_table_test.value_1 AND colocated_table_test_2.value_1 = reference_table_test.value_1; + colocated_table_test.value_1 = reference_table_test.value_1 AND colocated_table_test_2.value_1 = reference_table_test.value_1 +ORDER BY reference_table_test.value_2; LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ][ dual partition join "colocated_table_test_2" ] value_2 --------------------------------------------------------------------- diff --git a/src/test/regress/expected/multi_repartition_join_planning.out b/src/test/regress/expected/multi_repartition_join_planning.out index 2e37256eb..86f3b7642 100644 --- a/src/test/regress/expected/multi_repartition_join_planning.out +++ b/src/test/regress/expected/multi_repartition_join_planning.out @@ -7,6 +7,7 @@ -- executor here, as we cannot run repartition jobs with real time executor. SET citus.next_shard_id TO 690000; SET citus.enable_unique_job_ids TO off; +SET citus.enable_repartition_joins to ON; create schema repartition_join; DROP TABLE IF EXISTS repartition_join.order_line; NOTICE: table "order_line" does not exist, skipping @@ -40,7 +41,7 @@ SELECT create_distributed_table('stock','s_w_id'); (1 row) BEGIN; -SET client_min_messages TO DEBUG4; +SET client_min_messages TO DEBUG; SET citus.task_executor_type TO 'task-tracker'; -- Debug4 log messages display jobIds within them. We explicitly set the jobId -- sequence here so that the regression output becomes independent of the number @@ -65,62 +66,26 @@ GROUP BY ORDER BY l_partkey, o_orderkey; DEBUG: Router planner does not support append-partitioned tables. -DEBUG: no valid constraints found -DEBUG: shard count: 2 -DEBUG: no valid constraints found -DEBUG: shard count: 2 DEBUG: join prunable for intervals [1,5986] and [8997,14947] DEBUG: join prunable for intervals [8997,14947] and [1,5986] -DEBUG: generated sql query for task 1 -DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290000 lineitem JOIN orders_290002 orders ON ((lineitem.l_orderkey OPERATOR(pg_catalog.=) orders.o_orderkey))) WHERE ((lineitem.l_partkey OPERATOR(pg_catalog.<) 1000) AND (orders.o_totalprice OPERATOR(pg_catalog.>) 10.0))" -DEBUG: generated sql query for task 2 -DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290001 lineitem JOIN orders_290003 orders ON ((lineitem.l_orderkey OPERATOR(pg_catalog.=) orders.o_orderkey))) WHERE ((lineitem.l_partkey OPERATOR(pg_catalog.<) 1000) AND (orders.o_totalprice OPERATOR(pg_catalog.>) 10.0))" -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: no valid constraints found -DEBUG: shard count: 2 DEBUG: join prunable for intervals [1,1000] and [6001,7000] DEBUG: join prunable for intervals [6001,7000] and [1,1000] -DEBUG: generated sql query for task 2 -DETAIL: query string: "SELECT "pg_merge_job_0001.task_000003".intermediate_column_1_0, "pg_merge_job_0001.task_000003".intermediate_column_1_1, "pg_merge_job_0001.task_000003".intermediate_column_1_2, "pg_merge_job_0001.task_000003".intermediate_column_1_3, "pg_merge_job_0001.task_000003".intermediate_column_1_4 FROM (pg_merge_job_0001.task_000003 "pg_merge_job_0001.task_000003" JOIN part_append_290005 part_append ON (("pg_merge_job_0001.task_000003".intermediate_column_1_0 OPERATOR(pg_catalog.=) part_append.p_partkey))) WHERE (part_append.p_size OPERATOR(pg_catalog.>) 8)" -DEBUG: generated sql query for task 4 -DETAIL: query string: "SELECT "pg_merge_job_0001.task_000006".intermediate_column_1_0, "pg_merge_job_0001.task_000006".intermediate_column_1_1, "pg_merge_job_0001.task_000006".intermediate_column_1_2, "pg_merge_job_0001.task_000006".intermediate_column_1_3, "pg_merge_job_0001.task_000006".intermediate_column_1_4 FROM (pg_merge_job_0001.task_000006 "pg_merge_job_0001.task_000006" JOIN part_append_280002 part_append ON (("pg_merge_job_0001.task_000006".intermediate_column_1_0 OPERATOR(pg_catalog.=) part_append.p_partkey))) WHERE (part_append.p_size OPERATOR(pg_catalog.>) 8)" DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 3 DEBUG: pruning merge fetch taskId 3 DETAIL: Creating dependency on merge taskId 6 -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: no valid constraints found -DEBUG: shard count: 3 DEBUG: join prunable for intervals [1,1000] and [1001,2000] DEBUG: join prunable for intervals [1,1000] and [6001,7000] DEBUG: join prunable for intervals [1001,2000] and [1,1000] DEBUG: join prunable for intervals [1001,2000] and [6001,7000] DEBUG: join prunable for intervals [6001,7000] and [1,1000] DEBUG: join prunable for intervals [6001,7000] and [1001,2000] -DEBUG: generated sql query for task 2 -DETAIL: query string: "SELECT "pg_merge_job_0002.task_000005".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000005".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000005 "pg_merge_job_0002.task_000005" JOIN customer_append_290004 customer_append ON ((customer_append.c_custkey OPERATOR(pg_catalog.=) "pg_merge_job_0002.task_000005".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000005".intermediate_column_2_2 OPERATOR(pg_catalog.>) 5.0) OR ("pg_merge_job_0002.task_000005".intermediate_column_2_3 OPERATOR(pg_catalog.>) 1200.0)) AND (customer_append.c_acctbal OPERATOR(pg_catalog.<) 5000.0)) GROUP BY "pg_merge_job_0002.task_000005".intermediate_column_2_0, "pg_merge_job_0002.task_000005".intermediate_column_2_1" -DEBUG: generated sql query for task 4 -DETAIL: query string: "SELECT "pg_merge_job_0002.task_000008".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000008".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000008 "pg_merge_job_0002.task_000008" JOIN customer_append_280001 customer_append ON ((customer_append.c_custkey OPERATOR(pg_catalog.=) "pg_merge_job_0002.task_000008".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000008".intermediate_column_2_2 OPERATOR(pg_catalog.>) 5.0) OR ("pg_merge_job_0002.task_000008".intermediate_column_2_3 OPERATOR(pg_catalog.>) 1200.0)) AND (customer_append.c_acctbal OPERATOR(pg_catalog.<) 5000.0)) GROUP BY "pg_merge_job_0002.task_000008".intermediate_column_2_0, "pg_merge_job_0002.task_000008".intermediate_column_2_1" -DEBUG: generated sql query for task 6 -DETAIL: query string: "SELECT "pg_merge_job_0002.task_000011".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000011".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000011 "pg_merge_job_0002.task_000011" JOIN customer_append_280000 customer_append ON ((customer_append.c_custkey OPERATOR(pg_catalog.=) "pg_merge_job_0002.task_000011".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000011".intermediate_column_2_2 OPERATOR(pg_catalog.>) 5.0) OR ("pg_merge_job_0002.task_000011".intermediate_column_2_3 OPERATOR(pg_catalog.>) 1200.0)) AND (customer_append.c_acctbal OPERATOR(pg_catalog.<) 5000.0)) GROUP BY "pg_merge_job_0002.task_000011".intermediate_column_2_0, "pg_merge_job_0002.task_000011".intermediate_column_2_1" DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 5 DEBUG: pruning merge fetch taskId 3 DETAIL: Creating dependency on merge taskId 8 DEBUG: pruning merge fetch taskId 5 DETAIL: Creating dependency on merge taskId 11 -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: master query: SELECT l_partkey, o_orderkey, COALESCE((pg_catalog.sum(count))::bigint, '0'::bigint) AS count FROM pg_catalog.citus_extradata_container(XXX, NULL::cstring(0), NULL::cstring(0), '(i 1 3 2 4)'::cstring(0)) remote_scan(l_partkey integer, o_orderkey bigint, count bigint) GROUP BY l_partkey, o_orderkey ORDER BY l_partkey, o_orderkey -DEBUG: completed cleanup query for job 3 -DEBUG: completed cleanup query for job 3 -DEBUG: completed cleanup query for job 2 -DEBUG: completed cleanup query for job 2 -DEBUG: completed cleanup query for job 1 -DEBUG: completed cleanup query for job 1 l_partkey | o_orderkey | count --------------------------------------------------------------------- 18 | 12005 | 1 @@ -166,22 +131,6 @@ GROUP BY ORDER BY l_partkey, o_orderkey; DEBUG: Router planner does not support append-partitioned tables. -DEBUG: no valid constraints found -DEBUG: shard count: 2 -DEBUG: generated sql query for task 1 -DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290000 lineitem WHERE (l_quantity OPERATOR(pg_catalog.<) 5.0)" -DEBUG: generated sql query for task 2 -DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290001 lineitem WHERE (l_quantity OPERATOR(pg_catalog.<) 5.0)" -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: no valid constraints found -DEBUG: shard count: 2 -DEBUG: generated sql query for task 1 -DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290002 orders WHERE (o_totalprice OPERATOR(pg_catalog.<>) 4.0)" -DEBUG: generated sql query for task 2 -DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290003 orders WHERE (o_totalprice OPERATOR(pg_catalog.<>) 4.0)" -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 @@ -194,14 +143,6 @@ DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 2 -DEBUG: generated sql query for task 3 -DETAIL: query string: "SELECT "pg_merge_job_0004.task_000003".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000003".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000003 "pg_merge_job_0004.task_000003" JOIN pg_merge_job_0005.task_000003 "pg_merge_job_0005.task_000003" ON (("pg_merge_job_0004.task_000003".intermediate_column_4_1 OPERATOR(pg_catalog.=) "pg_merge_job_0005.task_000003".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000003".intermediate_column_4_0, "pg_merge_job_0005.task_000003".intermediate_column_5_0" -DEBUG: generated sql query for task 6 -DETAIL: query string: "SELECT "pg_merge_job_0004.task_000006".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000006".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000006 "pg_merge_job_0004.task_000006" JOIN pg_merge_job_0005.task_000006 "pg_merge_job_0005.task_000006" ON (("pg_merge_job_0004.task_000006".intermediate_column_4_1 OPERATOR(pg_catalog.=) "pg_merge_job_0005.task_000006".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000006".intermediate_column_4_0, "pg_merge_job_0005.task_000006".intermediate_column_5_0" -DEBUG: generated sql query for task 9 -DETAIL: query string: "SELECT "pg_merge_job_0004.task_000009".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000009".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000009 "pg_merge_job_0004.task_000009" JOIN pg_merge_job_0005.task_000009 "pg_merge_job_0005.task_000009" ON (("pg_merge_job_0004.task_000009".intermediate_column_4_1 OPERATOR(pg_catalog.=) "pg_merge_job_0005.task_000009".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000009".intermediate_column_4_0, "pg_merge_job_0005.task_000009".intermediate_column_5_0" -DEBUG: generated sql query for task 12 -DETAIL: query string: "SELECT "pg_merge_job_0004.task_000012".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000012".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000012 "pg_merge_job_0004.task_000012" JOIN pg_merge_job_0005.task_000012 "pg_merge_job_0005.task_000012" ON (("pg_merge_job_0004.task_000012".intermediate_column_4_1 OPERATOR(pg_catalog.=) "pg_merge_job_0005.task_000012".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000012".intermediate_column_4_0, "pg_merge_job_0005.task_000012".intermediate_column_5_0" DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 3 DEBUG: pruning merge fetch taskId 2 @@ -218,17 +159,6 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 12 -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: master query: SELECT l_partkey, o_orderkey, COALESCE((pg_catalog.sum(count))::bigint, '0'::bigint) AS count FROM pg_catalog.citus_extradata_container(XXX, NULL::cstring(0), NULL::cstring(0), '(i 1 2)'::cstring(0)) remote_scan(l_partkey integer, o_orderkey bigint, count bigint) GROUP BY l_partkey, o_orderkey ORDER BY l_partkey, o_orderkey -DEBUG: completed cleanup query for job 6 -DEBUG: completed cleanup query for job 6 -DEBUG: completed cleanup query for job 4 -DEBUG: completed cleanup query for job 4 -DEBUG: completed cleanup query for job 5 -DEBUG: completed cleanup query for job 5 l_partkey | o_orderkey | count --------------------------------------------------------------------- (0 rows) @@ -245,22 +175,6 @@ GROUP BY ORDER BY o_orderkey; DEBUG: Router planner does not support append-partitioned tables. -DEBUG: no valid constraints found -DEBUG: shard count: 2 -DEBUG: generated sql query for task 1 -DETAIL: query string: "SELECT l_suppkey FROM lineitem_290000 lineitem WHERE true" -DEBUG: generated sql query for task 2 -DETAIL: query string: "SELECT l_suppkey FROM lineitem_290001 lineitem WHERE true" -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: no valid constraints found -DEBUG: shard count: 2 -DEBUG: generated sql query for task 1 -DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290002 orders WHERE true" -DEBUG: generated sql query for task 2 -DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290003 orders WHERE true" -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 @@ -273,14 +187,6 @@ DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 2 -DEBUG: generated sql query for task 3 -DETAIL: query string: "SELECT "pg_merge_job_0008.task_000003".intermediate_column_8_0 AS o_orderkey, any_value("pg_merge_job_0008.task_000003".intermediate_column_8_1) AS o_shippriority, count(*) AS count FROM (pg_merge_job_0007.task_000003 "pg_merge_job_0007.task_000003" JOIN pg_merge_job_0008.task_000003 "pg_merge_job_0008.task_000003" ON (("pg_merge_job_0007.task_000003".intermediate_column_7_0 OPERATOR(pg_catalog.=) "pg_merge_job_0008.task_000003".intermediate_column_8_1))) WHERE true GROUP BY "pg_merge_job_0008.task_000003".intermediate_column_8_0" -DEBUG: generated sql query for task 6 -DETAIL: query string: "SELECT "pg_merge_job_0008.task_000006".intermediate_column_8_0 AS o_orderkey, any_value("pg_merge_job_0008.task_000006".intermediate_column_8_1) AS o_shippriority, count(*) AS count FROM (pg_merge_job_0007.task_000006 "pg_merge_job_0007.task_000006" JOIN pg_merge_job_0008.task_000006 "pg_merge_job_0008.task_000006" ON (("pg_merge_job_0007.task_000006".intermediate_column_7_0 OPERATOR(pg_catalog.=) "pg_merge_job_0008.task_000006".intermediate_column_8_1))) WHERE true GROUP BY "pg_merge_job_0008.task_000006".intermediate_column_8_0" -DEBUG: generated sql query for task 9 -DETAIL: query string: "SELECT "pg_merge_job_0008.task_000009".intermediate_column_8_0 AS o_orderkey, any_value("pg_merge_job_0008.task_000009".intermediate_column_8_1) AS o_shippriority, count(*) AS count FROM (pg_merge_job_0007.task_000009 "pg_merge_job_0007.task_000009" JOIN pg_merge_job_0008.task_000009 "pg_merge_job_0008.task_000009" ON (("pg_merge_job_0007.task_000009".intermediate_column_7_0 OPERATOR(pg_catalog.=) "pg_merge_job_0008.task_000009".intermediate_column_8_1))) WHERE true GROUP BY "pg_merge_job_0008.task_000009".intermediate_column_8_0" -DEBUG: generated sql query for task 12 -DETAIL: query string: "SELECT "pg_merge_job_0008.task_000012".intermediate_column_8_0 AS o_orderkey, any_value("pg_merge_job_0008.task_000012".intermediate_column_8_1) AS o_shippriority, count(*) AS count FROM (pg_merge_job_0007.task_000012 "pg_merge_job_0007.task_000012" JOIN pg_merge_job_0008.task_000012 "pg_merge_job_0008.task_000012" ON (("pg_merge_job_0007.task_000012".intermediate_column_7_0 OPERATOR(pg_catalog.=) "pg_merge_job_0008.task_000012".intermediate_column_8_1))) WHERE true GROUP BY "pg_merge_job_0008.task_000012".intermediate_column_8_0" DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 3 DEBUG: pruning merge fetch taskId 2 @@ -297,17 +203,6 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 12 -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: master query: SELECT o_orderkey, o_shippriority, COALESCE((pg_catalog.sum(count))::bigint, '0'::bigint) AS count FROM pg_catalog.citus_extradata_container(XXX, NULL::cstring(0), NULL::cstring(0), '(i 1 2)'::cstring(0)) remote_scan(o_orderkey bigint, o_shippriority integer, count bigint) GROUP BY o_orderkey ORDER BY o_orderkey -DEBUG: completed cleanup query for job 9 -DEBUG: completed cleanup query for job 9 -DEBUG: completed cleanup query for job 7 -DEBUG: completed cleanup query for job 7 -DEBUG: completed cleanup query for job 8 -DEBUG: completed cleanup query for job 8 o_orderkey | o_shippriority | count --------------------------------------------------------------------- (0 rows) @@ -326,22 +221,6 @@ GROUP BY ORDER BY o_orderkey; DEBUG: Router planner does not support append-partitioned tables. -DEBUG: no valid constraints found -DEBUG: shard count: 2 -DEBUG: generated sql query for task 1 -DETAIL: query string: "SELECT l_suppkey FROM lineitem_290000 lineitem WHERE true" -DEBUG: generated sql query for task 2 -DETAIL: query string: "SELECT l_suppkey FROM lineitem_290001 lineitem WHERE true" -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: no valid constraints found -DEBUG: shard count: 2 -DEBUG: generated sql query for task 1 -DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290002 orders WHERE true" -DEBUG: generated sql query for task 2 -DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290003 orders WHERE true" -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 @@ -354,14 +233,6 @@ DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 2 -DEBUG: generated sql query for task 3 -DETAIL: query string: "SELECT "pg_merge_job_0011.task_000003".intermediate_column_11_0 AS o_orderkey, any_value("pg_merge_job_0011.task_000003".intermediate_column_11_1) AS o_shippriority, count(*) AS count FROM (pg_merge_job_0010.task_000003 "pg_merge_job_0010.task_000003" JOIN pg_merge_job_0011.task_000003 "pg_merge_job_0011.task_000003" ON (("pg_merge_job_0010.task_000003".intermediate_column_10_0 OPERATOR(pg_catalog.=) "pg_merge_job_0011.task_000003".intermediate_column_11_1))) WHERE true GROUP BY "pg_merge_job_0011.task_000003".intermediate_column_11_0" -DEBUG: generated sql query for task 6 -DETAIL: query string: "SELECT "pg_merge_job_0011.task_000006".intermediate_column_11_0 AS o_orderkey, any_value("pg_merge_job_0011.task_000006".intermediate_column_11_1) AS o_shippriority, count(*) AS count FROM (pg_merge_job_0010.task_000006 "pg_merge_job_0010.task_000006" JOIN pg_merge_job_0011.task_000006 "pg_merge_job_0011.task_000006" ON (("pg_merge_job_0010.task_000006".intermediate_column_10_0 OPERATOR(pg_catalog.=) "pg_merge_job_0011.task_000006".intermediate_column_11_1))) WHERE true GROUP BY "pg_merge_job_0011.task_000006".intermediate_column_11_0" -DEBUG: generated sql query for task 9 -DETAIL: query string: "SELECT "pg_merge_job_0011.task_000009".intermediate_column_11_0 AS o_orderkey, any_value("pg_merge_job_0011.task_000009".intermediate_column_11_1) AS o_shippriority, count(*) AS count FROM (pg_merge_job_0010.task_000009 "pg_merge_job_0010.task_000009" JOIN pg_merge_job_0011.task_000009 "pg_merge_job_0011.task_000009" ON (("pg_merge_job_0010.task_000009".intermediate_column_10_0 OPERATOR(pg_catalog.=) "pg_merge_job_0011.task_000009".intermediate_column_11_1))) WHERE true GROUP BY "pg_merge_job_0011.task_000009".intermediate_column_11_0" -DEBUG: generated sql query for task 12 -DETAIL: query string: "SELECT "pg_merge_job_0011.task_000012".intermediate_column_11_0 AS o_orderkey, any_value("pg_merge_job_0011.task_000012".intermediate_column_11_1) AS o_shippriority, count(*) AS count FROM (pg_merge_job_0010.task_000012 "pg_merge_job_0010.task_000012" JOIN pg_merge_job_0011.task_000012 "pg_merge_job_0011.task_000012" ON (("pg_merge_job_0010.task_000012".intermediate_column_10_0 OPERATOR(pg_catalog.=) "pg_merge_job_0011.task_000012".intermediate_column_11_1))) WHERE true GROUP BY "pg_merge_job_0011.task_000012".intermediate_column_11_0" DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 3 DEBUG: pruning merge fetch taskId 2 @@ -378,17 +249,6 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 12 -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: master query: SELECT o_orderkey, o_shippriority, COALESCE((pg_catalog.sum(count))::bigint, '0'::bigint) AS count FROM pg_catalog.citus_extradata_container(XXX, NULL::cstring(0), NULL::cstring(0), '(i 1 2)'::cstring(0)) remote_scan(o_orderkey bigint, o_shippriority integer, count bigint) GROUP BY o_orderkey ORDER BY o_orderkey -DEBUG: completed cleanup query for job 12 -DEBUG: completed cleanup query for job 12 -DEBUG: completed cleanup query for job 10 -DEBUG: completed cleanup query for job 10 -DEBUG: completed cleanup query for job 11 -DEBUG: completed cleanup query for job 11 o_orderkey | o_shippriority | count --------------------------------------------------------------------- (0 rows) @@ -405,22 +265,6 @@ GROUP BY ORDER BY o_orderkey; DEBUG: Router planner does not support append-partitioned tables. -DEBUG: no valid constraints found -DEBUG: shard count: 2 -DEBUG: generated sql query for task 1 -DETAIL: query string: "SELECT l_suppkey FROM lineitem_290000 lineitem WHERE true" -DEBUG: generated sql query for task 2 -DETAIL: query string: "SELECT l_suppkey FROM lineitem_290001 lineitem WHERE true" -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: no valid constraints found -DEBUG: shard count: 2 -DEBUG: generated sql query for task 1 -DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290002 orders WHERE true" -DEBUG: generated sql query for task 2 -DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290003 orders WHERE true" -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 @@ -433,14 +277,6 @@ DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 2 -DEBUG: generated sql query for task 3 -DETAIL: query string: "SELECT "pg_merge_job_0014.task_000003".intermediate_column_14_0 AS o_orderkey, any_value("pg_merge_job_0014.task_000003".intermediate_column_14_1) AS any_value, any_value("pg_merge_job_0014.task_000003".intermediate_column_14_1) AS worker_column_3 FROM (pg_merge_job_0013.task_000003 "pg_merge_job_0013.task_000003" JOIN pg_merge_job_0014.task_000003 "pg_merge_job_0014.task_000003" ON (("pg_merge_job_0013.task_000003".intermediate_column_13_0 OPERATOR(pg_catalog.=) "pg_merge_job_0014.task_000003".intermediate_column_14_1))) WHERE true GROUP BY "pg_merge_job_0014.task_000003".intermediate_column_14_0" -DEBUG: generated sql query for task 6 -DETAIL: query string: "SELECT "pg_merge_job_0014.task_000006".intermediate_column_14_0 AS o_orderkey, any_value("pg_merge_job_0014.task_000006".intermediate_column_14_1) AS any_value, any_value("pg_merge_job_0014.task_000006".intermediate_column_14_1) AS worker_column_3 FROM (pg_merge_job_0013.task_000006 "pg_merge_job_0013.task_000006" JOIN pg_merge_job_0014.task_000006 "pg_merge_job_0014.task_000006" ON (("pg_merge_job_0013.task_000006".intermediate_column_13_0 OPERATOR(pg_catalog.=) "pg_merge_job_0014.task_000006".intermediate_column_14_1))) WHERE true GROUP BY "pg_merge_job_0014.task_000006".intermediate_column_14_0" -DEBUG: generated sql query for task 9 -DETAIL: query string: "SELECT "pg_merge_job_0014.task_000009".intermediate_column_14_0 AS o_orderkey, any_value("pg_merge_job_0014.task_000009".intermediate_column_14_1) AS any_value, any_value("pg_merge_job_0014.task_000009".intermediate_column_14_1) AS worker_column_3 FROM (pg_merge_job_0013.task_000009 "pg_merge_job_0013.task_000009" JOIN pg_merge_job_0014.task_000009 "pg_merge_job_0014.task_000009" ON (("pg_merge_job_0013.task_000009".intermediate_column_13_0 OPERATOR(pg_catalog.=) "pg_merge_job_0014.task_000009".intermediate_column_14_1))) WHERE true GROUP BY "pg_merge_job_0014.task_000009".intermediate_column_14_0" -DEBUG: generated sql query for task 12 -DETAIL: query string: "SELECT "pg_merge_job_0014.task_000012".intermediate_column_14_0 AS o_orderkey, any_value("pg_merge_job_0014.task_000012".intermediate_column_14_1) AS any_value, any_value("pg_merge_job_0014.task_000012".intermediate_column_14_1) AS worker_column_3 FROM (pg_merge_job_0013.task_000012 "pg_merge_job_0013.task_000012" JOIN pg_merge_job_0014.task_000012 "pg_merge_job_0014.task_000012" ON (("pg_merge_job_0013.task_000012".intermediate_column_13_0 OPERATOR(pg_catalog.=) "pg_merge_job_0014.task_000012".intermediate_column_14_1))) WHERE true GROUP BY "pg_merge_job_0014.task_000012".intermediate_column_14_0" DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 3 DEBUG: pruning merge fetch taskId 2 @@ -457,17 +293,6 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 12 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 12 -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: master query: SELECT o_orderkey, any_value(any_value) AS any_value FROM pg_catalog.citus_extradata_container(XXX, NULL::cstring(0), NULL::cstring(0), '(i 1 2)'::cstring(0)) remote_scan(o_orderkey bigint, any_value integer, worker_column_3 integer) GROUP BY o_orderkey ORDER BY o_orderkey -DEBUG: completed cleanup query for job 15 -DEBUG: completed cleanup query for job 15 -DEBUG: completed cleanup query for job 13 -DEBUG: completed cleanup query for job 13 -DEBUG: completed cleanup query for job 14 -DEBUG: completed cleanup query for job 14 o_orderkey | any_value --------------------------------------------------------------------- (0 rows) @@ -481,39 +306,7 @@ select s_i_id group by s_i_id, s_w_id, s_quantity having s_quantity > random() ; -DEBUG: no valid constraints found -DEBUG: shard count: 4 -DEBUG: no valid constraints found -DEBUG: shard count: 4 DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: no valid constraints found -DEBUG: shard count: 4 -DEBUG: generated sql query for task 1 -DETAIL: query string: "SELECT s_i_id, s_w_id, s_quantity FROM stock_690004 stock WHERE true" -DEBUG: generated sql query for task 2 -DETAIL: query string: "SELECT s_i_id, s_w_id, s_quantity FROM stock_690005 stock WHERE true" -DEBUG: generated sql query for task 3 -DETAIL: query string: "SELECT s_i_id, s_w_id, s_quantity FROM stock_690006 stock WHERE true" -DEBUG: generated sql query for task 4 -DETAIL: query string: "SELECT s_i_id, s_w_id, s_quantity FROM stock_690007 stock WHERE true" -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: no valid constraints found -DEBUG: shard count: 4 -DEBUG: generated sql query for task 1 -DETAIL: query string: "SELECT ol_i_id FROM order_line_690000 order_line WHERE true" -DEBUG: generated sql query for task 2 -DETAIL: query string: "SELECT ol_i_id FROM order_line_690001 order_line WHERE true" -DEBUG: generated sql query for task 3 -DETAIL: query string: "SELECT ol_i_id FROM order_line_690002 order_line WHERE true" -DEBUG: generated sql query for task 4 -DETAIL: query string: "SELECT ol_i_id FROM order_line_690003 order_line WHERE true" -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 @@ -526,14 +319,6 @@ DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 2 -DEBUG: generated sql query for task 3 -DETAIL: query string: "SELECT "pg_merge_job_0016.task_000005".intermediate_column_16_0 AS s_i_id, "pg_merge_job_0016.task_000005".intermediate_column_16_1 AS worker_column_2, any_value("pg_merge_job_0016.task_000005".intermediate_column_16_2) AS worker_column_3 FROM (pg_merge_job_0016.task_000005 "pg_merge_job_0016.task_000005" JOIN pg_merge_job_0017.task_000005 "pg_merge_job_0017.task_000005" ON (("pg_merge_job_0017.task_000005".intermediate_column_17_0 OPERATOR(pg_catalog.=) "pg_merge_job_0016.task_000005".intermediate_column_16_0))) WHERE true GROUP BY "pg_merge_job_0016.task_000005".intermediate_column_16_0, "pg_merge_job_0016.task_000005".intermediate_column_16_1 HAVING ((any_value("pg_merge_job_0016.task_000005".intermediate_column_16_2))::double precision OPERATOR(pg_catalog.>) random())" -DEBUG: generated sql query for task 6 -DETAIL: query string: "SELECT "pg_merge_job_0016.task_000010".intermediate_column_16_0 AS s_i_id, "pg_merge_job_0016.task_000010".intermediate_column_16_1 AS worker_column_2, any_value("pg_merge_job_0016.task_000010".intermediate_column_16_2) AS worker_column_3 FROM (pg_merge_job_0016.task_000010 "pg_merge_job_0016.task_000010" JOIN pg_merge_job_0017.task_000010 "pg_merge_job_0017.task_000010" ON (("pg_merge_job_0017.task_000010".intermediate_column_17_0 OPERATOR(pg_catalog.=) "pg_merge_job_0016.task_000010".intermediate_column_16_0))) WHERE true GROUP BY "pg_merge_job_0016.task_000010".intermediate_column_16_0, "pg_merge_job_0016.task_000010".intermediate_column_16_1 HAVING ((any_value("pg_merge_job_0016.task_000010".intermediate_column_16_2))::double precision OPERATOR(pg_catalog.>) random())" -DEBUG: generated sql query for task 9 -DETAIL: query string: "SELECT "pg_merge_job_0016.task_000015".intermediate_column_16_0 AS s_i_id, "pg_merge_job_0016.task_000015".intermediate_column_16_1 AS worker_column_2, any_value("pg_merge_job_0016.task_000015".intermediate_column_16_2) AS worker_column_3 FROM (pg_merge_job_0016.task_000015 "pg_merge_job_0016.task_000015" JOIN pg_merge_job_0017.task_000015 "pg_merge_job_0017.task_000015" ON (("pg_merge_job_0017.task_000015".intermediate_column_17_0 OPERATOR(pg_catalog.=) "pg_merge_job_0016.task_000015".intermediate_column_16_0))) WHERE true GROUP BY "pg_merge_job_0016.task_000015".intermediate_column_16_0, "pg_merge_job_0016.task_000015".intermediate_column_16_1 HAVING ((any_value("pg_merge_job_0016.task_000015".intermediate_column_16_2))::double precision OPERATOR(pg_catalog.>) random())" -DEBUG: generated sql query for task 12 -DETAIL: query string: "SELECT "pg_merge_job_0016.task_000020".intermediate_column_16_0 AS s_i_id, "pg_merge_job_0016.task_000020".intermediate_column_16_1 AS worker_column_2, any_value("pg_merge_job_0016.task_000020".intermediate_column_16_2) AS worker_column_3 FROM (pg_merge_job_0016.task_000020 "pg_merge_job_0016.task_000020" JOIN pg_merge_job_0017.task_000020 "pg_merge_job_0017.task_000020" ON (("pg_merge_job_0017.task_000020".intermediate_column_17_0 OPERATOR(pg_catalog.=) "pg_merge_job_0016.task_000020".intermediate_column_16_0))) WHERE true GROUP BY "pg_merge_job_0016.task_000020".intermediate_column_16_0, "pg_merge_job_0016.task_000020".intermediate_column_16_1 HAVING ((any_value("pg_merge_job_0016.task_000020".intermediate_column_16_2))::double precision OPERATOR(pg_catalog.>) random())" DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 5 DEBUG: pruning merge fetch taskId 2 @@ -550,17 +335,6 @@ DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 20 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 20 -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: assigned task to node localhost:xxxxx -DEBUG: master query: SELECT s_i_id FROM pg_catalog.citus_extradata_container(XXX, NULL::cstring(0), NULL::cstring(0), '(i 1 2)'::cstring(0)) remote_scan(s_i_id integer, worker_column_2 integer, worker_column_3 numeric) -DEBUG: completed cleanup query for job 18 -DEBUG: completed cleanup query for job 18 -DEBUG: completed cleanup query for job 16 -DEBUG: completed cleanup query for job 16 -DEBUG: completed cleanup query for job 17 -DEBUG: completed cleanup query for job 17 s_i_id --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/multi_repartition_join_pruning.out b/src/test/regress/expected/multi_repartition_join_pruning.out index b32d88cd8..c29f2e318 100644 --- a/src/test/regress/expected/multi_repartition_join_pruning.out +++ b/src/test/regress/expected/multi_repartition_join_pruning.out @@ -7,6 +7,7 @@ SET citus.next_shard_id TO 700000; SET client_min_messages TO DEBUG2; SET citus.task_executor_type TO 'task-tracker'; +SET citus.enable_repartition_joins to ON; -- Single range-repartition join to test join-pruning behaviour. EXPLAIN (COSTS OFF) SELECT @@ -31,7 +32,7 @@ DETAIL: Creating dependency on merge taskId 9 QUERY PLAN --------------------------------------------------------------------- Aggregate - -> Custom Scan (Citus Task-Tracker) + -> Custom Scan (Citus Adaptive) Task Count: 3 Tasks Shown: None, not supported for re-partition queries -> MapMergeJob @@ -77,7 +78,7 @@ DEBUG: Router planner does not support append-partitioned tables. QUERY PLAN --------------------------------------------------------------------- Aggregate - -> Custom Scan (Citus Task-Tracker) + -> Custom Scan (Citus Adaptive) Task Count: 0 Tasks Shown: None, not supported for re-partition queries -> MapMergeJob @@ -112,7 +113,7 @@ DEBUG: Router planner does not support append-partitioned tables. QUERY PLAN --------------------------------------------------------------------- Aggregate - -> Custom Scan (Citus Task-Tracker) + -> Custom Scan (Citus Adaptive) Task Count: 0 Tasks Shown: None, not supported for re-partition queries -> MapMergeJob @@ -175,7 +176,7 @@ DETAIL: Creating dependency on merge taskId 16 QUERY PLAN --------------------------------------------------------------------- Aggregate - -> Custom Scan (Citus Task-Tracker) + -> Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: None, not supported for re-partition queries -> MapMergeJob @@ -240,7 +241,7 @@ DEBUG: Router planner does not support append-partitioned tables. QUERY PLAN --------------------------------------------------------------------- Aggregate - -> Custom Scan (Citus Task-Tracker) + -> Custom Scan (Citus Adaptive) Task Count: 0 Tasks Shown: None, not supported for re-partition queries -> MapMergeJob @@ -275,7 +276,7 @@ WHERE DEBUG: Router planner does not support append-partitioned tables. QUERY PLAN --------------------------------------------------------------------- - Custom Scan (Citus Task-Tracker) + Custom Scan (Citus Adaptive) Task Count: 0 Tasks Shown: None, not supported for re-partition queries -> MapMergeJob @@ -305,7 +306,7 @@ WHERE DEBUG: Router planner does not support append-partitioned tables. QUERY PLAN --------------------------------------------------------------------- - Custom Scan (Citus Task-Tracker) + Custom Scan (Citus Adaptive) Task Count: 0 Tasks Shown: None, not supported for re-partition queries -> MapMergeJob @@ -319,9 +320,9 @@ SELECT FROM orders INNER JOIN customer_append ON (o_custkey = c_custkey AND false); DEBUG: Router planner does not support append-partitioned tables. - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- - Custom Scan (Citus Task-Tracker) + Custom Scan (Citus Adaptive) Task Count: 0 Tasks Shown: All (3 rows) @@ -334,9 +335,9 @@ FROM WHERE o_custkey = c_custkey AND false; DEBUG: Router planner does not support append-partitioned tables. - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- - Custom Scan (Citus Task-Tracker) + Custom Scan (Citus Adaptive) Task Count: 0 Tasks Shown: All (3 rows) diff --git a/src/test/regress/expected/multi_repartition_join_task_assignment.out b/src/test/regress/expected/multi_repartition_join_task_assignment.out index 78d0216a8..b976cf644 100644 --- a/src/test/regress/expected/multi_repartition_join_task_assignment.out +++ b/src/test/regress/expected/multi_repartition_join_task_assignment.out @@ -9,6 +9,7 @@ SET citus.next_shard_id TO 710000; BEGIN; SET client_min_messages TO DEBUG3; SET citus.task_executor_type TO 'task-tracker'; +SET citus.enable_repartition_joins to ON; -- Single range repartition join to test anchor-shard based task assignment and -- assignment propagation to merge and data-fetch tasks. SELECT diff --git a/src/test/regress/expected/multi_repartition_udt.out b/src/test/regress/expected/multi_repartition_udt.out index f904a1029..4724ef887 100644 --- a/src/test/regress/expected/multi_repartition_udt.out +++ b/src/test/regress/expected/multi_repartition_udt.out @@ -2,6 +2,7 @@ -- MULTI_REPARTITION_UDT -- SET citus.next_shard_id TO 535000; +SET citus.enable_repartition_joins to ON; -- START type creation CREATE TYPE test_udt AS (i integer, i2 integer); -- ... as well as a function to use as its comparator... @@ -125,6 +126,7 @@ FUNCTION 1 test_udt_hash(test_udt); -- Distribute and populate the two tables. SET citus.shard_count TO 3; SET citus.shard_replication_factor TO 1; +SET citus.enable_repartition_joins to ON; SELECT create_distributed_table('repartition_udt', 'pk', 'hash'); create_distributed_table --------------------------------------------------------------------- @@ -171,7 +173,7 @@ SELECT * FROM repartition_udt JOIN repartition_udt_other LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ] QUERY PLAN --------------------------------------------------------------------- - Custom Scan (Citus Task-Tracker) + Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: None, not supported for re-partition queries -> MapMergeJob diff --git a/src/test/regress/expected/multi_schema_support.out b/src/test/regress/expected/multi_schema_support.out index dae406c50..b9fd13d23 100644 --- a/src/test/regress/expected/multi_schema_support.out +++ b/src/test/regress/expected/multi_schema_support.out @@ -828,6 +828,7 @@ SELECT create_distributed_table('test_schema_support_join_2.nation_hash', 'n_nat (1 row) \copy test_schema_support_join_2.nation_hash FROM STDIN with delimiter '|'; +SET citus.enable_repartition_joins to ON; -- check when search_path is public, -- join of two tables which are in different schemas, -- join on partition column diff --git a/src/test/regress/expected/multi_single_relation_subquery.out b/src/test/regress/expected/multi_single_relation_subquery.out index aa097a015..5108a367d 100644 --- a/src/test/regress/expected/multi_single_relation_subquery.out +++ b/src/test/regress/expected/multi_single_relation_subquery.out @@ -198,8 +198,15 @@ group by l_suppkey ORDER BY 2 DESC, 1 DESC LIMIT 5; -ERROR: cannot perform distributed planning on this query -DETAIL: Subqueries with limit are not supported yet + l_suppkey | total_suppkey_count +--------------------------------------------------------------------- + 35 | 5 + 112 | 4 + 102 | 4 + 73 | 4 + 123 | 3 +(5 rows) + -- Check that we don't support subqueries without aggregates. select DISTINCT rounded_tax @@ -212,8 +219,11 @@ from l_tax) as distributed_table ORDER BY 1 DESC LIMIT 5; -ERROR: cannot perform distributed planning on this query -DETAIL: Subqueries without aggregates are not supported yet + rounded_tax +--------------------------------------------------------------------- + 0 +(1 row) + -- Check that we support subqueries with count(distinct). select avg(different_shipment_days) diff --git a/src/test/regress/expected/multi_task_string_size.out b/src/test/regress/expected/multi_task_string_size.out index 7e4a3c188..76ac56dab 100644 --- a/src/test/regress/expected/multi_task_string_size.out +++ b/src/test/regress/expected/multi_task_string_size.out @@ -228,8 +228,12 @@ SET client_min_messages to ERROR; SELECT raise_failed_execution(' SELECT u.* FROM wide_table u JOIN wide_table v ON (u.long_column_002 = v.long_column_003); '); -ERROR: Task failed to execute -CONTEXT: PL/pgSQL function raise_failed_execution(text) line 6 at RAISE + raise_failed_execution +--------------------------------------------------------------------- + +(1 row) + +SET citus.enable_repartition_joins to ON; -- following will succeed since it fetches few columns SELECT u.long_column_001, u.long_column_002, u.long_column_003 FROM wide_table u JOIN wide_table v ON (u.long_column_002 = v.long_column_003); long_column_001 | long_column_002 | long_column_003 diff --git a/src/test/regress/expected/multi_view.out b/src/test/regress/expected/multi_view.out index 82e20b8f3..03027b05d 100644 --- a/src/test/regress/expected/multi_view.out +++ b/src/test/regress/expected/multi_view.out @@ -204,8 +204,15 @@ SELECT l_suppkey, count(*) FROM (SELECT l_suppkey, l_shipdate, count(*) FROM air_shipped_lineitems GROUP BY l_suppkey, l_shipdate) supps GROUP BY l_suppkey ORDER BY 2 DESC, 1 LIMIT 5; -ERROR: cannot perform distributed planning on this query -DETAIL: Subqueries without group by clause are not supported yet + l_suppkey | count +--------------------------------------------------------------------- + 7680 | 4 + 160 | 3 + 1042 | 3 + 1318 | 3 + 5873 | 3 +(5 rows) + -- logically same query without a view works fine SELECT l_suppkey, count(*) FROM (SELECT l_suppkey, l_shipdate, count(*) @@ -226,8 +233,15 @@ SELECT l_suppkey, count(*) FROM FROM (SELECT * FROM lineitem_hash_part WHERE l_shipmode = 'AIR') asi GROUP BY l_suppkey, l_shipdate) supps GROUP BY l_suppkey ORDER BY 2 DESC, 1 LIMIT 5; -ERROR: cannot perform distributed planning on this query -DETAIL: Subqueries without group by clause are not supported yet + l_suppkey | count +--------------------------------------------------------------------- + 7680 | 4 + 160 | 3 + 1042 | 3 + 1318 | 3 + 5873 | 3 +(5 rows) + -- repartition query on view with single table subquery CREATE VIEW supp_count_view AS SELECT * FROM (SELECT l_suppkey, count(*) FROM lineitem_hash_part GROUP BY 1) s1; SELECT * FROM supp_count_view ORDER BY 2 DESC, 1 LIMIT 10; diff --git a/src/test/regress/expected/set_operation_and_local_tables.out b/src/test/regress/expected/set_operation_and_local_tables.out index c0b9c27c9..4c78544fc 100644 --- a/src/test/regress/expected/set_operation_and_local_tables.out +++ b/src/test/regress/expected/set_operation_and_local_tables.out @@ -1,5 +1,6 @@ CREATE SCHEMA recursive_set_local; SET search_path TO recursive_set_local, public; +SET citus.enable_repartition_joins to ON; CREATE TABLE recursive_set_local.test (x int, y int); SELECT create_distributed_table('test', 'x'); create_distributed_table diff --git a/src/test/regress/expected/with_executors.out b/src/test/regress/expected/with_executors.out index 5c1fb8317..69633bdb8 100644 --- a/src/test/regress/expected/with_executors.out +++ b/src/test/regress/expected/with_executors.out @@ -380,6 +380,10 @@ FROM users_table, cte_merge WHERE users_table.user_id = cte_merge.u_id; -ERROR: Complex subqueries and CTEs are not supported when task_executor_type is set to 'task-tracker' + count +--------------------------------------------------------------------- + 4365606 +(1 row) + DROP SCHEMA with_executors CASCADE; NOTICE: drop cascades to table local_table diff --git a/src/test/regress/output/multi_complex_count_distinct.source b/src/test/regress/output/multi_complex_count_distinct.source index a2be31e78..8520a1893 100644 --- a/src/test/regress/output/multi_complex_count_distinct.source +++ b/src/test/regress/output/multi_complex_count_distinct.source @@ -24,13 +24,13 @@ CREATE TABLE lineitem_hash ( l_comment varchar(44) not null, PRIMARY KEY(l_orderkey, l_linenumber) ); SELECT create_distributed_table('lineitem_hash', 'l_orderkey', 'hash'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) -\copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' -\copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' +\copy lineitem_hash FROM '/home/talha/citus/src/test/regress/data/lineitem.1.data' with delimiter '|' +\copy lineitem_hash FROM '/home/talha/citus/src/test/regress/data/lineitem.2.data' with delimiter '|' ANALYZE lineitem_hash; SET citus.task_executor_type to "task-tracker"; -- count(distinct) is supported on top level query if there @@ -41,8 +41,8 @@ SELECT GROUP BY l_orderkey ORDER BY 2 DESC, 1 DESC LIMIT 10; - l_orderkey | count -------------+------- + l_orderkey | count +--------------------------------------------------------------------- 14885 | 7 14884 | 7 14821 | 7 @@ -62,19 +62,19 @@ SELECT GROUP BY l_orderkey ORDER BY 2 DESC, 1 DESC LIMIT 10; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit Output: remote_scan.l_orderkey, remote_scan.count -> Sort Output: remote_scan.l_orderkey, remote_scan.count Sort Key: remote_scan.count DESC, remote_scan.l_orderkey DESC - -> Custom Scan (Citus Task-Tracker) + -> Custom Scan (Citus Adaptive) Output: remote_scan.l_orderkey, remote_scan.count Task Count: 8 Tasks Shown: One of 8 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Limit Output: l_orderkey, (count(DISTINCT l_partkey)) -> Sort @@ -93,8 +93,8 @@ SELECT FROM lineitem_hash ORDER BY 1 DESC LIMIT 10; - count -------- + count +--------------------------------------------------------------------- 11661 (1 row) @@ -104,8 +104,8 @@ SELECT FROM lineitem_hash ORDER BY 1 DESC LIMIT 10; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit Output: (count(DISTINCT remote_scan.count)) -> Sort @@ -113,12 +113,12 @@ SELECT Sort Key: (count(DISTINCT remote_scan.count)) DESC -> Aggregate Output: count(DISTINCT remote_scan.count) - -> Custom Scan (Citus Task-Tracker) + -> Custom Scan (Citus Adaptive) Output: remote_scan.count Task Count: 8 Tasks Shown: One of 8 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Output: l_partkey Group Key: lineitem_hash.l_partkey @@ -132,8 +132,8 @@ SELECT GROUP BY l_shipmode ORDER BY 2 DESC, 1 DESC LIMIT 10; - l_shipmode | count -------------+------- + l_shipmode | count +--------------------------------------------------------------------- TRUCK | 1757 MAIL | 1730 AIR | 1702 @@ -150,8 +150,8 @@ SELECT GROUP BY l_shipmode ORDER BY 2 DESC, 1 DESC LIMIT 10; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit Output: remote_scan.l_shipmode, (count(DISTINCT remote_scan.count)) -> Sort @@ -163,12 +163,12 @@ SELECT -> Sort Output: remote_scan.l_shipmode, remote_scan.count Sort Key: remote_scan.l_shipmode DESC - -> Custom Scan (Citus Task-Tracker) + -> Custom Scan (Citus Adaptive) Output: remote_scan.l_shipmode, remote_scan.count Task Count: 8 Tasks Shown: One of 8 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Output: l_shipmode, l_partkey Group Key: lineitem_hash.l_shipmode, lineitem_hash.l_partkey @@ -183,8 +183,8 @@ SELECT GROUP BY l_orderkey ORDER BY 3 DESC, 2 DESC, 1 LIMIT 10; - l_orderkey | count | count -------------+-------+------- + l_orderkey | count | count +--------------------------------------------------------------------- 226 | 7 | 7 1316 | 7 | 7 1477 | 7 | 7 @@ -204,19 +204,19 @@ SELECT GROUP BY l_orderkey ORDER BY 3 DESC, 2 DESC, 1 LIMIT 10; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit Output: remote_scan.l_orderkey, remote_scan.count, remote_scan.count_1 -> Sort Output: remote_scan.l_orderkey, remote_scan.count, remote_scan.count_1 Sort Key: remote_scan.count_1 DESC, remote_scan.count DESC, remote_scan.l_orderkey - -> Custom Scan (Citus Task-Tracker) + -> Custom Scan (Citus Adaptive) Output: remote_scan.l_orderkey, remote_scan.count, remote_scan.count_1 Task Count: 8 Tasks Shown: One of 8 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Limit Output: l_orderkey, (count(DISTINCT l_partkey)), (count(DISTINCT l_shipmode)) -> Sort @@ -233,8 +233,8 @@ SELECT SELECT count(distinct l_orderkey), count(distinct l_partkey), count(distinct l_shipmode) FROM lineitem_hash; - count | count | count --------+-------+------- + count | count | count +--------------------------------------------------------------------- 2985 | 11661 | 7 (1 row) @@ -242,16 +242,16 @@ EXPLAIN (COSTS false, VERBOSE true) SELECT count(distinct l_orderkey), count(distinct l_partkey), count(distinct l_shipmode) FROM lineitem_hash; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Aggregate Output: count(DISTINCT remote_scan.count), count(DISTINCT remote_scan.count_1), count(DISTINCT remote_scan.count_2) - -> Custom Scan (Citus Task-Tracker) + -> Custom Scan (Citus Adaptive) Output: remote_scan.count, remote_scan.count_1, remote_scan.count_2 Task Count: 8 Tasks Shown: One of 8 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Output: l_orderkey, l_partkey, l_shipmode Group Key: lineitem_hash.l_orderkey, lineitem_hash.l_partkey, lineitem_hash.l_shipmode @@ -265,8 +265,8 @@ SELECT count(distinct l_partkey), count(l_partkey), count(distinct l_shipmode), count(l_shipmode) FROM lineitem_hash; - count | count | count | count | count | count --------+-------+-------+-------+-------+------- + count | count | count | count | count | count +--------------------------------------------------------------------- 2985 | 12000 | 11661 | 12000 | 7 | 12000 (1 row) @@ -276,8 +276,8 @@ SELECT FROM lineitem_hash GROUP BY l_shipmode ORDER BY 1, 2 DESC, 3 DESC; - l_shipmode | count | count -------------+-------+------- + l_shipmode | count | count +--------------------------------------------------------------------- AIR | 1702 | 1327 FOB | 1700 | 1276 MAIL | 1730 | 1299 @@ -295,8 +295,8 @@ SELECT GROUP BY l_shipmode HAVING count(distinct l_orderkey) > 1300 ORDER BY 1, 2 DESC; - l_shipmode | count | count -------------+-------+------- + l_shipmode | count | count +--------------------------------------------------------------------- AIR | 1702 | 1327 TRUCK | 1757 | 1333 (2 rows) @@ -308,8 +308,8 @@ SELECT GROUP BY l_shipmode HAVING count(distinct l_orderkey) > 1300 ORDER BY 1, 2 DESC; - l_shipmode | count -------------+------- + l_shipmode | count +--------------------------------------------------------------------- AIR | 1702 TRUCK | 1757 (2 rows) @@ -322,8 +322,8 @@ SELECT GROUP BY l_shipmode HAVING count(distinct l_suppkey) > 1550 ORDER BY 1, 2 DESC; - l_shipmode | count | count -------------+-------+------- + l_shipmode | count | count +--------------------------------------------------------------------- AIR | 1702 | 1564 FOB | 1700 | 1571 MAIL | 1730 | 1573 @@ -340,8 +340,8 @@ SELECT GROUP BY l_shipmode HAVING count(distinct l_suppkey) > 1550 ORDER BY 1, 2 DESC; - l_shipmode | count -------------+------- + l_shipmode | count +--------------------------------------------------------------------- AIR | 1702 FOB | 1700 MAIL | 1730 @@ -358,8 +358,8 @@ SELECT GROUP BY l_shipmode HAVING count(distinct l_suppkey) > 1550 ORDER BY 1, 2 DESC; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Sort Output: remote_scan.l_shipmode, (count(DISTINCT remote_scan.count)) Sort Key: remote_scan.l_shipmode, (count(DISTINCT remote_scan.count)) DESC @@ -370,12 +370,12 @@ SELECT -> Sort Output: remote_scan.l_shipmode, remote_scan.count, remote_scan.worker_column_3 Sort Key: remote_scan.l_shipmode - -> Custom Scan (Citus Task-Tracker) + -> Custom Scan (Citus Adaptive) Output: remote_scan.l_shipmode, remote_scan.count, remote_scan.worker_column_3 Task Count: 8 Tasks Shown: One of 8 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Output: l_shipmode, l_partkey, l_suppkey Group Key: lineitem_hash.l_shipmode, lineitem_hash.l_partkey, lineitem_hash.l_suppkey @@ -392,8 +392,8 @@ SELECT * GROUP BY l_orderkey) sub ORDER BY 2 DESC, 1 DESC LIMIT 10; - l_orderkey | count -------------+------- + l_orderkey | count +--------------------------------------------------------------------- 14885 | 7 14884 | 7 14821 | 7 @@ -414,8 +414,8 @@ SELECT * GROUP BY l_partkey) sub ORDER BY 2 DESC, 1 DESC LIMIT 10; - l_partkey | count ------------+------- + l_partkey | count +--------------------------------------------------------------------- 199146 | 3 188804 | 3 177771 | 3 @@ -437,21 +437,41 @@ SELECT * GROUP BY l_partkey) sub ORDER BY 2 DESC, 1 DESC LIMIT 10; - QUERY PLAN -------------------------------------------------------------------------- - Limit + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (Citus Adaptive) Output: remote_scan.l_partkey, remote_scan.count - -> Sort - Output: remote_scan.l_partkey, remote_scan.count - Sort Key: remote_scan.count DESC, remote_scan.l_partkey DESC - -> Custom Scan (Citus Task-Tracker) - Output: remote_scan.l_partkey, remote_scan.count - Task Count: 4 - Tasks Shown: None, not supported for re-partition queries - -> MapMergeJob - Map Task Count: 8 - Merge Task Count: 4 -(12 rows) + -> Distributed Subplan XXX_1 + -> HashAggregate + Output: remote_scan.l_partkey, COALESCE((pg_catalog.sum(remote_scan.count))::bigint, '0'::bigint) + Group Key: remote_scan.l_partkey + -> Custom Scan (Citus Adaptive) + Output: remote_scan.l_partkey, remote_scan.count + Task Count: 8 + Tasks Shown: One of 8 + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> GroupAggregate + Output: l_partkey, count(DISTINCT l_orderkey) + Group Key: lineitem_hash.l_partkey + -> Sort + Output: l_partkey, l_orderkey + Sort Key: lineitem_hash.l_partkey + -> Seq Scan on public.lineitem_hash_240000 lineitem_hash + Output: l_partkey, l_orderkey + Task Count: 1 + Tasks Shown: All + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Limit + Output: intermediate_result.l_partkey, intermediate_result.count + -> Sort + Output: intermediate_result.l_partkey, intermediate_result.count + Sort Key: intermediate_result.count DESC, intermediate_result.l_partkey DESC + -> Function Scan on pg_catalog.read_intermediate_result intermediate_result + Output: intermediate_result.l_partkey, intermediate_result.count + Function Call: read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) +(32 rows) -- count distinct with filters SELECT @@ -462,8 +482,8 @@ SELECT GROUP BY l_orderkey ORDER BY 2 DESC, 3 DESC, 1 LIMIT 10; - l_orderkey | count | count -------------+-------+------- + l_orderkey | count | count +--------------------------------------------------------------------- 4964 | 4 | 7 12005 | 4 | 7 5409 | 4 | 6 @@ -485,19 +505,19 @@ SELECT GROUP BY l_orderkey ORDER BY 2 DESC, 3 DESC, 1 LIMIT 10; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit Output: remote_scan.l_orderkey, remote_scan.count, remote_scan.count_1 -> Sort Output: remote_scan.l_orderkey, remote_scan.count, remote_scan.count_1 Sort Key: remote_scan.count DESC, remote_scan.count_1 DESC, remote_scan.l_orderkey - -> Custom Scan (Citus Task-Tracker) + -> Custom Scan (Citus Adaptive) Output: remote_scan.l_orderkey, remote_scan.count, remote_scan.count_1 Task Count: 8 Tasks Shown: One of 8 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> Limit Output: l_orderkey, (count(DISTINCT l_suppkey) FILTER (WHERE (l_shipmode = 'AIR'::bpchar))), (count(DISTINCT l_suppkey)) -> Sort @@ -517,8 +537,8 @@ SELECT GROUP BY l_suppkey ORDER BY 2 DESC, 1 DESC LIMIT 10; - l_suppkey | count ------------+------- + l_suppkey | count +--------------------------------------------------------------------- 7680 | 4 7703 | 3 7542 | 3 @@ -539,8 +559,8 @@ SELECT GROUP BY l_suppkey ORDER BY 2 DESC, 1 DESC LIMIT 10; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- Limit Output: remote_scan.l_suppkey, (count(DISTINCT remote_scan.count) FILTER (WHERE (remote_scan.count_1 = 'AIR'::bpchar))) -> Sort @@ -552,12 +572,12 @@ SELECT -> Sort Output: remote_scan.l_suppkey, remote_scan.count, remote_scan.count_1 Sort Key: remote_scan.l_suppkey DESC - -> Custom Scan (Citus Task-Tracker) + -> Custom Scan (Citus Adaptive) Output: remote_scan.l_suppkey, remote_scan.count, remote_scan.count_1 Task Count: 8 Tasks Shown: One of 8 -> Task - Node: host=localhost port=57637 dbname=regression + Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Output: l_suppkey, l_partkey, l_shipmode Group Key: lineitem_hash.l_suppkey, lineitem_hash.l_partkey, lineitem_hash.l_shipmode @@ -569,8 +589,8 @@ SELECT SELECT count(DISTINCT l_orderkey) FILTER (WHERE l_shipmode = 'AIR') FROM lineitem_hash; - count -------- + count +--------------------------------------------------------------------- 1327 (1 row) @@ -578,8 +598,8 @@ SELECT SELECT count(DISTINCT l_partkey) FILTER (WHERE l_shipmode = 'AIR') FROM lineitem_hash; - count -------- + count +--------------------------------------------------------------------- 1702 (1 row) @@ -588,8 +608,8 @@ SELECT count(DISTINCT l_partkey), count(DISTINCT l_shipdate) FROM lineitem_hash; - count | count | count --------+-------+------- + count | count | count +--------------------------------------------------------------------- 1702 | 11661 | 2470 (1 row) @@ -602,8 +622,8 @@ SELECT * GROUP BY l_orderkey) sub ORDER BY 2 DESC, 1 DESC LIMIT 10; - l_orderkey | count -------------+------- + l_orderkey | count +--------------------------------------------------------------------- 14885 | 7 14884 | 7 14821 | 7 @@ -625,8 +645,8 @@ SELECT * GROUP BY l_orderkey) sub ORDER BY 2 DESC, 1 DESC LIMIT 10; - l_orderkey | count -------------+------- + l_orderkey | count +--------------------------------------------------------------------- 12005 | 4 5409 | 4 4964 | 4 @@ -650,8 +670,8 @@ SELECT * WHERE count > 0 ORDER BY 2 DESC, 1 DESC LIMIT 10; - l_orderkey | count -------------+------- + l_orderkey | count +--------------------------------------------------------------------- 12005 | 4 5409 | 4 4964 | 4 @@ -674,8 +694,8 @@ SELECT * WHERE count > 0 ORDER BY 2 DESC, 1 DESC LIMIT 10; - l_orderkey | count -------------+------- + l_orderkey | count +--------------------------------------------------------------------- 14275 | 7 14181 | 7 13605 | 7 @@ -721,8 +741,8 @@ SELECT * GROUP BY l_orderkey) sub ORDER BY 2 DESC, 1 DESC LIMIT 0; - l_orderkey | count -------------+------- + l_orderkey | count +--------------------------------------------------------------------- (0 rows) -- multiple nested subquery @@ -755,8 +775,8 @@ SELECT total ORDER BY total_avg_count DESC; - total | total_avg_count --------+-------------------- + total | total_avg_count +--------------------------------------------------------------------- 1 | 3.6000000000000000 6 | 2.8333333333333333 10 | 2.6000000000000000 @@ -787,8 +807,8 @@ SELECT * ORDER BY 1 DESC, 2 DESC LIMIT 10; - count | l_shipdate --------+------------ + count | l_shipdate +--------------------------------------------------------------------- 14 | 07-30-1997 13 | 05-26-1998 13 | 08-08-1997 @@ -815,8 +835,8 @@ SELECT * ORDER BY 2 DESC, 1 DESC LIMIT 10; - l_quantity | count -------------+------- + l_quantity | count +--------------------------------------------------------------------- 48.00 | 13 47.00 | 13 37.00 | 13 @@ -847,15 +867,15 @@ SELECT * ORDER BY 1 DESC, 2 DESC LIMIT 10; - avg | l_shipmode --------------------------+------------ - 44.82904609027336300064 | MAIL - 44.80704536679536679537 | SHIP - 44.68891732736572890026 | AIR - 44.34106724470134874759 | REG AIR - 43.12739987269255251432 | FOB - 43.07299253636938646426 | RAIL - 40.50298377916903813318 | TRUCK + avg | l_shipmode +--------------------------------------------------------------------- + 44.82904609027336300064 | MAIL + 44.80704536679536679537 | SHIP + 44.68891732736572890026 | AIR + 44.34106724470134874759 | REG AIR + 43.12739987269255251432 | FOB + 43.07299253636938646426 | RAIL + 40.50298377916903813318 | TRUCK (7 rows) -- count DISTINCT CASE WHEN expression @@ -873,8 +893,8 @@ SELECT * GROUP BY l_shipdate) sub ORDER BY 1 DESC LIMIT 10; - avg ------ + avg +--------------------------------------------------------------------- 7 6 6 @@ -900,8 +920,8 @@ SELECT * ORDER BY 2 DESC,1 DESC LIMIT 10; - l_shipmode | count -------------+------- + l_shipmode | count +--------------------------------------------------------------------- TRUCK | 1689 MAIL | 1683 FOB | 1655 @@ -922,8 +942,8 @@ SELECT GROUP BY 1 ORDER BY 1 DESC LIMIT 5; - l_orderkey | count | count -------------+-------+------- + l_orderkey | count | count +--------------------------------------------------------------------- 14947 | 2 | 1 14946 | 2 | 1 14945 | 6 | 1 @@ -944,8 +964,8 @@ FROM GROUP BY 1 ORDER BY 1 DESC LIMIT 5; - user_id | count | count | count ----------+-------+-------+------- + user_id | count | count | count +--------------------------------------------------------------------- 6 | 11 | 1 | 1 5 | 27 | 1 | 1 4 | 24 | 1 | 1 @@ -960,9 +980,9 @@ CREATE TYPE test_item AS ); CREATE TABLE test_count_distinct_array (key int, value int , value_arr test_item[]); SELECT create_distributed_table('test_count_distinct_array', 'key'); - create_distributed_table --------------------------- - + create_distributed_table +--------------------------------------------------------------------- + (1 row) INSERT INTO test_count_distinct_array SELECT i, i, ARRAY[(i,i)::test_item] FROM generate_Series(0, 1000) i; @@ -978,8 +998,8 @@ FROM GROUP BY 1 ORDER BY 1 DESC LIMIT 5; - key | count | count | count -------+-------+-------+------- + key | count | count | count +--------------------------------------------------------------------- 1000 | 1 | 1 | 1 999 | 1 | 1 | 1 998 | 1 | 1 | 1 @@ -999,7 +1019,7 @@ SELECT * ORDER BY 2 DESC, 1 DESC LIMIT 10; ERROR: cannot compute aggregate (distinct) -DETAIL: Only count(distinct) aggregate is supported in subqueries +DETAIL: table partitioning is unsuitable for aggregate (distinct) SELECT * FROM ( SELECT @@ -1009,7 +1029,7 @@ SELECT * ORDER BY 2 DESC, 1 DESC LIMIT 10; ERROR: cannot compute aggregate (distinct) -DETAIL: Only count(distinct) aggregate is supported in subqueries +DETAIL: table partitioning is unsuitable for aggregate (distinct) -- whole row references, oid, and ctid are not supported in count distinct -- test table does not have oid or ctid enabled, so tests for them are skipped SELECT * @@ -1049,8 +1069,8 @@ SELECT * WHERE year = 1995 ORDER BY 2 DESC, 1 LIMIT 10; - l_shipdate | distinct_part | year -------------+---------------+------ + l_shipdate | distinct_part | year +--------------------------------------------------------------------- 11-29-1995 | 5 | 1995 03-24-1995 | 4 | 1995 09-18-1995 | 4 | 1995 @@ -1081,8 +1101,8 @@ SELECT * WHERE year = 1995 ORDER BY 2 DESC, 1 LIMIT 10; - l_shipdate | distinct_part | year -------------+---------------+------ + l_shipdate | distinct_part | year +--------------------------------------------------------------------- 11-29-1995 | 5 | 1995 03-24-1995 | 4 | 1995 09-18-1995 | 4 | 1995 @@ -1111,8 +1131,8 @@ SELECT * WHERE year = 1995 ORDER BY 2 DESC, 1 LIMIT 10; - l_shipdate | distinct_part | year -------------+---------------+------ + l_shipdate | distinct_part | year +--------------------------------------------------------------------- 11-29-1995 | 5 | 1995 03-24-1995 | 4 | 1995 09-18-1995 | 4 | 1995 diff --git a/src/test/regress/sql/aggregate_support.sql b/src/test/regress/sql/aggregate_support.sql index bce20d014..1cc1b403b 100644 --- a/src/test/regress/sql/aggregate_support.sql +++ b/src/test/regress/sql/aggregate_support.sql @@ -190,7 +190,8 @@ select * FROM ( select * FROM ( SELECT key k, avg(distinct floor(agg1.val/2)) m from aggdata agg1 group by key -) subq; +) subq +order by k,m; -- Test TransformsSubqueryNode with group by not in FROM (failed in past) select count(*) FROM ( diff --git a/src/test/regress/sql/multi_cross_shard.sql b/src/test/regress/sql/multi_cross_shard.sql index 8af073e42..8ef2ec235 100644 --- a/src/test/regress/sql/multi_cross_shard.sql +++ b/src/test/regress/sql/multi_cross_shard.sql @@ -4,6 +4,8 @@ -- Tests to log cross shard queries according to error log level -- +SET citus.enable_repartition_joins to ON; + -- Create a distributed table and add data to it CREATE TABLE multi_task_table ( diff --git a/src/test/regress/sql/multi_explain.sql b/src/test/regress/sql/multi_explain.sql index 573e42dbc..011f4570f 100644 --- a/src/test/regress/sql/multi_explain.sql +++ b/src/test/regress/sql/multi_explain.sql @@ -8,6 +8,7 @@ SET citus.next_shard_id TO 570000; RESET citus.task_executor_type; SET citus.explain_distributed_queries TO on; +SET citus.enable_repartition_joins to ON; -- Function that parses explain output as JSON CREATE FUNCTION explain_json(query text) diff --git a/src/test/regress/sql/multi_join_order_additional.sql b/src/test/regress/sql/multi_join_order_additional.sql index 869ae0889..1d30900dd 100644 --- a/src/test/regress/sql/multi_join_order_additional.sql +++ b/src/test/regress/sql/multi_join_order_additional.sql @@ -10,6 +10,7 @@ SET citus.next_shard_id TO 650000; SET citus.explain_distributed_queries TO off; SET citus.log_multi_join_order TO TRUE; SET citus.task_executor_type = 'task-tracker'; -- can't explain all queries otherwise +SET citus.enable_repartition_joins to ON; SET citus.shard_count to 2; SET citus.shard_replication_factor to 1; RESET client_min_messages; diff --git a/src/test/regress/sql/multi_join_order_tpch_repartition.sql b/src/test/regress/sql/multi_join_order_tpch_repartition.sql index 8e9c59713..3060f8123 100644 --- a/src/test/regress/sql/multi_join_order_tpch_repartition.sql +++ b/src/test/regress/sql/multi_join_order_tpch_repartition.sql @@ -11,6 +11,7 @@ SET citus.next_shard_id TO 660000; SET citus.explain_distributed_queries TO off; SET citus.log_multi_join_order TO TRUE; SET citus.task_executor_type = 'task-tracker'; -- can't explain all queries otherwise +SET citus.enable_repartition_joins to ON; SET client_min_messages TO LOG; -- The following queries are basically the same as the ones in tpch_small diff --git a/src/test/regress/sql/multi_multiuser.sql b/src/test/regress/sql/multi_multiuser.sql index 10e19f8a4..b12fcc08a 100644 --- a/src/test/regress/sql/multi_multiuser.sql +++ b/src/test/regress/sql/multi_multiuser.sql @@ -107,6 +107,7 @@ SELECT count(*) FROM test; SELECT count(*) FROM test WHERE id = 1; SET citus.task_executor_type TO 'task-tracker'; +SET citus.enable_repartition_joins to ON; SELECT count(*), min(current_user) FROM test; -- test re-partition query (needs to transmit intermediate results) @@ -143,6 +144,7 @@ SELECT count(*) FROM test; SELECT count(*) FROM test WHERE id = 1; SET citus.task_executor_type TO 'task-tracker'; +SET citus.enable_repartition_joins to ON; SELECT count(*), min(current_user) FROM test; -- test re-partition query (needs to transmit intermediate results) @@ -179,6 +181,7 @@ SELECT count(*) FROM test; SELECT count(*) FROM test WHERE id = 1; SET citus.task_executor_type TO 'task-tracker'; +SET citus.enable_repartition_joins to ON; SELECT count(*), min(current_user) FROM test; -- test re-partition query diff --git a/src/test/regress/sql/multi_partitioning.sql b/src/test/regress/sql/multi_partitioning.sql index e0297e54f..e284f44a2 100644 --- a/src/test/regress/sql/multi_partitioning.sql +++ b/src/test/regress/sql/multi_partitioning.sql @@ -5,6 +5,7 @@ SET citus.next_shard_id TO 1660000; SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; +SET citus.enable_repartition_joins to ON; -- -- Distributed Partitioned Table Creation Tests diff --git a/src/test/regress/sql/multi_reference_table.sql b/src/test/regress/sql/multi_reference_table.sql index 39d75ef06..97f3f2b76 100644 --- a/src/test/regress/sql/multi_reference_table.sql +++ b/src/test/regress/sql/multi_reference_table.sql @@ -678,20 +678,22 @@ WHERE ORDER BY 1; SET citus.task_executor_type to "task-tracker"; +SET citus.enable_repartition_joins to ON; SELECT colocated_table_test.value_2 FROM reference_table_test, colocated_table_test, colocated_table_test_2 WHERE - colocated_table_test.value_2 = colocated_table_test_2.value_2 AND colocated_table_test.value_2 = reference_table_test.value_2; + colocated_table_test.value_2 = colocated_table_test_2.value_2 AND colocated_table_test.value_2 = reference_table_test.value_2 +ORDER BY colocated_table_test.value_2; SELECT reference_table_test.value_2 FROM reference_table_test, colocated_table_test, colocated_table_test_2 WHERE - colocated_table_test.value_1 = reference_table_test.value_1 AND colocated_table_test_2.value_1 = reference_table_test.value_1; - + colocated_table_test.value_1 = reference_table_test.value_1 AND colocated_table_test_2.value_1 = reference_table_test.value_1 +ORDER BY reference_table_test.value_2; SET citus.log_multi_join_order TO FALSE; diff --git a/src/test/regress/sql/multi_repartition_join_planning.sql b/src/test/regress/sql/multi_repartition_join_planning.sql index a7bd65e7f..3dda9a8e5 100644 --- a/src/test/regress/sql/multi_repartition_join_planning.sql +++ b/src/test/regress/sql/multi_repartition_join_planning.sql @@ -9,6 +9,7 @@ SET citus.next_shard_id TO 690000; SET citus.enable_unique_job_ids TO off; +SET citus.enable_repartition_joins to ON; create schema repartition_join; DROP TABLE IF EXISTS repartition_join.order_line; @@ -34,7 +35,7 @@ SELECT create_distributed_table('order_line','ol_w_id'); SELECT create_distributed_table('stock','s_w_id'); BEGIN; -SET client_min_messages TO DEBUG4; +SET client_min_messages TO DEBUG; SET citus.task_executor_type TO 'task-tracker'; -- Debug4 log messages display jobIds within them. We explicitly set the jobId diff --git a/src/test/regress/sql/multi_repartition_join_pruning.sql b/src/test/regress/sql/multi_repartition_join_pruning.sql index 0e8090e02..4c0c8dfc3 100644 --- a/src/test/regress/sql/multi_repartition_join_pruning.sql +++ b/src/test/regress/sql/multi_repartition_join_pruning.sql @@ -11,6 +11,7 @@ SET citus.next_shard_id TO 700000; SET client_min_messages TO DEBUG2; SET citus.task_executor_type TO 'task-tracker'; +SET citus.enable_repartition_joins to ON; -- Single range-repartition join to test join-pruning behaviour. EXPLAIN (COSTS OFF) diff --git a/src/test/regress/sql/multi_repartition_join_task_assignment.sql b/src/test/regress/sql/multi_repartition_join_task_assignment.sql index f7c651bda..991bdc969 100644 --- a/src/test/regress/sql/multi_repartition_join_task_assignment.sql +++ b/src/test/regress/sql/multi_repartition_join_task_assignment.sql @@ -12,6 +12,7 @@ SET citus.next_shard_id TO 710000; BEGIN; SET client_min_messages TO DEBUG3; SET citus.task_executor_type TO 'task-tracker'; +SET citus.enable_repartition_joins to ON; -- Single range repartition join to test anchor-shard based task assignment and -- assignment propagation to merge and data-fetch tasks. diff --git a/src/test/regress/sql/multi_repartition_udt.sql b/src/test/regress/sql/multi_repartition_udt.sql index d0797b9a0..d4f029929 100644 --- a/src/test/regress/sql/multi_repartition_udt.sql +++ b/src/test/regress/sql/multi_repartition_udt.sql @@ -3,6 +3,7 @@ -- SET citus.next_shard_id TO 535000; +SET citus.enable_repartition_joins to ON; -- START type creation @@ -160,6 +161,7 @@ FUNCTION 1 test_udt_hash(test_udt); -- Distribute and populate the two tables. SET citus.shard_count TO 3; SET citus.shard_replication_factor TO 1; +SET citus.enable_repartition_joins to ON; SELECT create_distributed_table('repartition_udt', 'pk', 'hash'); SET citus.shard_count TO 5; SELECT create_distributed_table('repartition_udt_other', 'pk', 'hash'); diff --git a/src/test/regress/sql/multi_schema_support.sql b/src/test/regress/sql/multi_schema_support.sql index b08d59849..217723980 100644 --- a/src/test/regress/sql/multi_schema_support.sql +++ b/src/test/regress/sql/multi_schema_support.sql @@ -601,6 +601,7 @@ SELECT create_distributed_table('test_schema_support_join_2.nation_hash', 'n_nat 5|ETHIOPIA|0|ven packages wake quickly. regu \. +SET citus.enable_repartition_joins to ON; -- check when search_path is public, -- join of two tables which are in different schemas, -- join on partition column diff --git a/src/test/regress/sql/multi_task_string_size.sql b/src/test/regress/sql/multi_task_string_size.sql index a12a88dc6..59c1034f2 100644 --- a/src/test/regress/sql/multi_task_string_size.sql +++ b/src/test/regress/sql/multi_task_string_size.sql @@ -225,6 +225,7 @@ SELECT raise_failed_execution(' SELECT u.* FROM wide_table u JOIN wide_table v ON (u.long_column_002 = v.long_column_003); '); +SET citus.enable_repartition_joins to ON; -- following will succeed since it fetches few columns SELECT u.long_column_001, u.long_column_002, u.long_column_003 FROM wide_table u JOIN wide_table v ON (u.long_column_002 = v.long_column_003); diff --git a/src/test/regress/sql/set_operation_and_local_tables.sql b/src/test/regress/sql/set_operation_and_local_tables.sql index 4c87784f8..77d8598a4 100644 --- a/src/test/regress/sql/set_operation_and_local_tables.sql +++ b/src/test/regress/sql/set_operation_and_local_tables.sql @@ -1,6 +1,8 @@ CREATE SCHEMA recursive_set_local; SET search_path TO recursive_set_local, public; +SET citus.enable_repartition_joins to ON; + CREATE TABLE recursive_set_local.test (x int, y int); SELECT create_distributed_table('test', 'x');