mirror of https://github.com/citusdata/citus.git
update repartition join tests for check-multi
parent
43fc86646c
commit
1c2ee39f15
|
@ -404,15 +404,16 @@ select * FROM (
|
|||
select * FROM (
|
||||
SELECT key k, avg(distinct floor(agg1.val/2)) m from aggdata agg1
|
||||
group by key
|
||||
) subq;
|
||||
) subq
|
||||
order by k,m;
|
||||
k | m
|
||||
---------------------------------------------------------------------
|
||||
1 | 1
|
||||
5 |
|
||||
3 | 2
|
||||
7 | 4
|
||||
6 |
|
||||
2 | 1.5
|
||||
3 | 2
|
||||
5 |
|
||||
6 |
|
||||
7 | 4
|
||||
9 | 0
|
||||
(7 rows)
|
||||
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
--
|
||||
-- Tests to log cross shard queries according to error log level
|
||||
--
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
-- Create a distributed table and add data to it
|
||||
CREATE TABLE multi_task_table
|
||||
(
|
||||
|
|
|
@ -5,6 +5,7 @@ SET citus.next_shard_id TO 570000;
|
|||
\a\t
|
||||
RESET citus.task_executor_type;
|
||||
SET citus.explain_distributed_queries TO on;
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
-- Function that parses explain output as JSON
|
||||
CREATE FUNCTION explain_json(query text)
|
||||
RETURNS jsonb
|
||||
|
@ -947,7 +948,7 @@ SET citus.task_executor_type TO 'task-tracker';
|
|||
EXPLAIN (COSTS FALSE)
|
||||
SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Task Count: 1
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
|
@ -963,7 +964,7 @@ EXPLAIN (COSTS FALSE)
|
|||
AND o_custkey = c_custkey
|
||||
AND l_suppkey = s_suppkey;
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Task Count: 1
|
||||
Tasks Shown: None, not supported for re-partition queries
|
||||
-> MapMergeJob
|
||||
|
@ -989,7 +990,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
|
|||
{
|
||||
"Node Type": "Custom Scan",
|
||||
"Parent Relationship": "Outer",
|
||||
"Custom Plan Provider": "Citus Task-Tracker",
|
||||
"Custom Plan Provider": "Citus Adaptive",
|
||||
"Parallel Aware": false,
|
||||
"Distributed Query": {
|
||||
"Job": {
|
||||
|
@ -1038,7 +1039,7 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
|
|||
<Plan>
|
||||
<Node-Type>Custom Scan</Node-Type>
|
||||
<Parent-Relationship>Outer</Parent-Relationship>
|
||||
<Custom-Plan-Provider>Citus Task-Tracker</Custom-Plan-Provider>
|
||||
<Custom-Plan-Provider>Citus Adaptive</Custom-Plan-Provider>
|
||||
<Parallel-Aware>false</Parallel-Aware>
|
||||
<Distributed-Query>
|
||||
<Job>
|
||||
|
@ -1097,7 +1098,7 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
|
|||
Plans:
|
||||
- Node Type: "Custom Scan"
|
||||
Parent Relationship: "Outer"
|
||||
Custom Plan Provider: "Citus Task-Tracker"
|
||||
Custom Plan Provider: "Citus Adaptive"
|
||||
Parallel Aware: false
|
||||
Distributed Query:
|
||||
Job:
|
||||
|
@ -1114,7 +1115,7 @@ Aggregate
|
|||
-- ensure distributed plans don't break
|
||||
EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem;
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Task Count: 2
|
||||
Tasks Shown: One of 2
|
||||
-> Task
|
||||
|
@ -1126,7 +1127,7 @@ PREPARE task_tracker_query AS
|
|||
SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
|
||||
EXPLAIN (COSTS FALSE) EXECUTE task_tracker_query;
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Task Count: 1
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
|
|
|
@ -6,6 +6,7 @@ SET citus.next_shard_id TO 650000;
|
|||
SET citus.explain_distributed_queries TO off;
|
||||
SET citus.log_multi_join_order TO TRUE;
|
||||
SET citus.task_executor_type = 'task-tracker'; -- can't explain all queries otherwise
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
SET citus.shard_count to 2;
|
||||
SET citus.shard_replication_factor to 1;
|
||||
RESET client_min_messages;
|
||||
|
@ -79,7 +80,7 @@ DEBUG: join prunable for intervals [1,5986] and [8997,14947]
|
|||
DEBUG: join prunable for intervals [8997,14947] and [1,5986]
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Task-Tracker)
|
||||
Custom Scan (Citus Adaptive)
|
||||
explain statements for distributed queries are not enabled
|
||||
(2 rows)
|
||||
|
||||
|
@ -95,7 +96,7 @@ LOG: join order: [ "lineitem" ][ local partition join "orders" ]
|
|||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
explain statements for distributed queries are not enabled
|
||||
(3 rows)
|
||||
|
||||
|
@ -110,7 +111,7 @@ LOG: join order: [ "orders" ][ single range partition join "lineitem_hash" ]
|
|||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
explain statements for distributed queries are not enabled
|
||||
(3 rows)
|
||||
|
||||
|
@ -122,7 +123,7 @@ LOG: join order: [ "orders_hash" ][ local partition join "lineitem_hash" ]
|
|||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
explain statements for distributed queries are not enabled
|
||||
(3 rows)
|
||||
|
||||
|
@ -134,7 +135,7 @@ LOG: join order: [ "customer_hash" ][ reference join "nation" ]
|
|||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
explain statements for distributed queries are not enabled
|
||||
(3 rows)
|
||||
|
||||
|
@ -147,7 +148,7 @@ LOG: join order: [ "orders" ][ dual partition join "lineitem" ][ dual partition
|
|||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
explain statements for distributed queries are not enabled
|
||||
(3 rows)
|
||||
|
||||
|
@ -160,7 +161,7 @@ LOG: join order: [ "orders" ][ dual partition join "customer_hash" ]
|
|||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
explain statements for distributed queries are not enabled
|
||||
(3 rows)
|
||||
|
||||
|
@ -173,7 +174,7 @@ LOG: join order: [ "orders_hash" ][ single range partition join "customer_appen
|
|||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
explain statements for distributed queries are not enabled
|
||||
(3 rows)
|
||||
|
||||
|
@ -198,7 +199,7 @@ LOG: join order: [ "users_table" ][ local partition join "events_table" ][ loca
|
|||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
explain statements for distributed queries are not enabled
|
||||
(3 rows)
|
||||
|
||||
|
|
|
@ -6,6 +6,7 @@ SET citus.next_shard_id TO 660000;
|
|||
SET citus.explain_distributed_queries TO off;
|
||||
SET citus.log_multi_join_order TO TRUE;
|
||||
SET citus.task_executor_type = 'task-tracker'; -- can't explain all queries otherwise
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
SET client_min_messages TO LOG;
|
||||
-- The following queries are basically the same as the ones in tpch_small
|
||||
-- except that more data has been loaded into customer and part tables. Therefore,
|
||||
|
@ -25,7 +26,7 @@ LOG: join order: [ "lineitem" ]
|
|||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
explain statements for distributed queries are not enabled
|
||||
(3 rows)
|
||||
|
||||
|
@ -60,7 +61,7 @@ LOG: join order: [ "orders" ][ local partition join "lineitem" ][ single range
|
|||
Sort Key: (sum(remote_scan.revenue)) DESC, remote_scan.o_orderdate
|
||||
-> HashAggregate
|
||||
Group Key: remote_scan.l_orderkey, remote_scan.o_orderdate, remote_scan.o_shippriority
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
explain statements for distributed queries are not enabled
|
||||
(6 rows)
|
||||
|
||||
|
@ -104,7 +105,7 @@ LOG: join order: [ "orders" ][ local partition join "lineitem" ][ single range
|
|||
Sort Key: (sum(remote_scan.revenue)) DESC
|
||||
-> HashAggregate
|
||||
Group Key: remote_scan.c_custkey, remote_scan.c_name, remote_scan.c_acctbal, remote_scan.c_phone, remote_scan.n_name, remote_scan.c_address, remote_scan.c_comment
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
explain statements for distributed queries are not enabled
|
||||
(6 rows)
|
||||
|
||||
|
@ -143,7 +144,7 @@ LOG: join order: [ "lineitem" ][ single range partition join "part_append" ]
|
|||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
explain statements for distributed queries are not enabled
|
||||
(3 rows)
|
||||
|
||||
|
@ -164,7 +165,7 @@ LOG: join order: [ "lineitem" ][ local partition join "orders" ][ single range
|
|||
---------------------------------------------------------------------
|
||||
HashAggregate
|
||||
Group Key: remote_scan.l_partkey
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
explain statements for distributed queries are not enabled
|
||||
(4 rows)
|
||||
|
||||
|
|
|
@ -142,6 +142,7 @@ SELECT count(*) FROM test WHERE id = 1;
|
|||
(1 row)
|
||||
|
||||
SET citus.task_executor_type TO 'task-tracker';
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
SELECT count(*), min(current_user) FROM test;
|
||||
count | min
|
||||
---------------------------------------------------------------------
|
||||
|
@ -230,6 +231,7 @@ SELECT count(*) FROM test WHERE id = 1;
|
|||
(1 row)
|
||||
|
||||
SET citus.task_executor_type TO 'task-tracker';
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
SELECT count(*), min(current_user) FROM test;
|
||||
count | min
|
||||
---------------------------------------------------------------------
|
||||
|
@ -288,6 +290,7 @@ ERROR: permission denied for table test
|
|||
SELECT count(*) FROM test WHERE id = 1;
|
||||
ERROR: permission denied for table test
|
||||
SET citus.task_executor_type TO 'task-tracker';
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
SELECT count(*), min(current_user) FROM test;
|
||||
ERROR: permission denied for table test
|
||||
-- test re-partition query
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
SET citus.next_shard_id TO 1660000;
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
--
|
||||
-- Distributed Partitioned Table Creation Tests
|
||||
--
|
||||
|
|
|
@ -1087,12 +1087,14 @@ LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_te
|
|||
(2 rows)
|
||||
|
||||
SET citus.task_executor_type to "task-tracker";
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
SELECT
|
||||
colocated_table_test.value_2
|
||||
FROM
|
||||
reference_table_test, colocated_table_test, colocated_table_test_2
|
||||
WHERE
|
||||
colocated_table_test.value_2 = colocated_table_test_2.value_2 AND colocated_table_test.value_2 = reference_table_test.value_2;
|
||||
colocated_table_test.value_2 = colocated_table_test_2.value_2 AND colocated_table_test.value_2 = reference_table_test.value_2
|
||||
ORDER BY colocated_table_test.value_2;
|
||||
LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ][ dual partition join "colocated_table_test_2" ]
|
||||
value_2
|
||||
---------------------------------------------------------------------
|
||||
|
@ -1105,7 +1107,8 @@ SELECT
|
|||
FROM
|
||||
reference_table_test, colocated_table_test, colocated_table_test_2
|
||||
WHERE
|
||||
colocated_table_test.value_1 = reference_table_test.value_1 AND colocated_table_test_2.value_1 = reference_table_test.value_1;
|
||||
colocated_table_test.value_1 = reference_table_test.value_1 AND colocated_table_test_2.value_1 = reference_table_test.value_1
|
||||
ORDER BY reference_table_test.value_2;
|
||||
LOG: join order: [ "colocated_table_test" ][ reference join "reference_table_test" ][ dual partition join "colocated_table_test_2" ]
|
||||
value_2
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
-- executor here, as we cannot run repartition jobs with real time executor.
|
||||
SET citus.next_shard_id TO 690000;
|
||||
SET citus.enable_unique_job_ids TO off;
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
create schema repartition_join;
|
||||
DROP TABLE IF EXISTS repartition_join.order_line;
|
||||
NOTICE: table "order_line" does not exist, skipping
|
||||
|
@ -40,7 +41,7 @@ SELECT create_distributed_table('stock','s_w_id');
|
|||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
SET client_min_messages TO DEBUG4;
|
||||
SET client_min_messages TO DEBUG;
|
||||
SET citus.task_executor_type TO 'task-tracker';
|
||||
-- Debug4 log messages display jobIds within them. We explicitly set the jobId
|
||||
-- sequence here so that the regression output becomes independent of the number
|
||||
|
@ -65,62 +66,26 @@ GROUP BY
|
|||
ORDER BY
|
||||
l_partkey, o_orderkey;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
DEBUG: no valid constraints found
|
||||
DEBUG: shard count: 2
|
||||
DEBUG: no valid constraints found
|
||||
DEBUG: shard count: 2
|
||||
DEBUG: join prunable for intervals [1,5986] and [8997,14947]
|
||||
DEBUG: join prunable for intervals [8997,14947] and [1,5986]
|
||||
DEBUG: generated sql query for task 1
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290000 lineitem JOIN orders_290002 orders ON ((lineitem.l_orderkey OPERATOR(pg_catalog.=) orders.o_orderkey))) WHERE ((lineitem.l_partkey OPERATOR(pg_catalog.<) 1000) AND (orders.o_totalprice OPERATOR(pg_catalog.>) 10.0))"
|
||||
DEBUG: generated sql query for task 2
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290001 lineitem JOIN orders_290003 orders ON ((lineitem.l_orderkey OPERATOR(pg_catalog.=) orders.o_orderkey))) WHERE ((lineitem.l_partkey OPERATOR(pg_catalog.<) 1000) AND (orders.o_totalprice OPERATOR(pg_catalog.>) 10.0))"
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: no valid constraints found
|
||||
DEBUG: shard count: 2
|
||||
DEBUG: join prunable for intervals [1,1000] and [6001,7000]
|
||||
DEBUG: join prunable for intervals [6001,7000] and [1,1000]
|
||||
DEBUG: generated sql query for task 2
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0001.task_000003".intermediate_column_1_0, "pg_merge_job_0001.task_000003".intermediate_column_1_1, "pg_merge_job_0001.task_000003".intermediate_column_1_2, "pg_merge_job_0001.task_000003".intermediate_column_1_3, "pg_merge_job_0001.task_000003".intermediate_column_1_4 FROM (pg_merge_job_0001.task_000003 "pg_merge_job_0001.task_000003" JOIN part_append_290005 part_append ON (("pg_merge_job_0001.task_000003".intermediate_column_1_0 OPERATOR(pg_catalog.=) part_append.p_partkey))) WHERE (part_append.p_size OPERATOR(pg_catalog.>) 8)"
|
||||
DEBUG: generated sql query for task 4
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0001.task_000006".intermediate_column_1_0, "pg_merge_job_0001.task_000006".intermediate_column_1_1, "pg_merge_job_0001.task_000006".intermediate_column_1_2, "pg_merge_job_0001.task_000006".intermediate_column_1_3, "pg_merge_job_0001.task_000006".intermediate_column_1_4 FROM (pg_merge_job_0001.task_000006 "pg_merge_job_0001.task_000006" JOIN part_append_280002 part_append ON (("pg_merge_job_0001.task_000006".intermediate_column_1_0 OPERATOR(pg_catalog.=) part_append.p_partkey))) WHERE (part_append.p_size OPERATOR(pg_catalog.>) 8)"
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 3
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: no valid constraints found
|
||||
DEBUG: shard count: 3
|
||||
DEBUG: join prunable for intervals [1,1000] and [1001,2000]
|
||||
DEBUG: join prunable for intervals [1,1000] and [6001,7000]
|
||||
DEBUG: join prunable for intervals [1001,2000] and [1,1000]
|
||||
DEBUG: join prunable for intervals [1001,2000] and [6001,7000]
|
||||
DEBUG: join prunable for intervals [6001,7000] and [1,1000]
|
||||
DEBUG: join prunable for intervals [6001,7000] and [1001,2000]
|
||||
DEBUG: generated sql query for task 2
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0002.task_000005".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000005".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000005 "pg_merge_job_0002.task_000005" JOIN customer_append_290004 customer_append ON ((customer_append.c_custkey OPERATOR(pg_catalog.=) "pg_merge_job_0002.task_000005".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000005".intermediate_column_2_2 OPERATOR(pg_catalog.>) 5.0) OR ("pg_merge_job_0002.task_000005".intermediate_column_2_3 OPERATOR(pg_catalog.>) 1200.0)) AND (customer_append.c_acctbal OPERATOR(pg_catalog.<) 5000.0)) GROUP BY "pg_merge_job_0002.task_000005".intermediate_column_2_0, "pg_merge_job_0002.task_000005".intermediate_column_2_1"
|
||||
DEBUG: generated sql query for task 4
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0002.task_000008".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000008".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000008 "pg_merge_job_0002.task_000008" JOIN customer_append_280001 customer_append ON ((customer_append.c_custkey OPERATOR(pg_catalog.=) "pg_merge_job_0002.task_000008".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000008".intermediate_column_2_2 OPERATOR(pg_catalog.>) 5.0) OR ("pg_merge_job_0002.task_000008".intermediate_column_2_3 OPERATOR(pg_catalog.>) 1200.0)) AND (customer_append.c_acctbal OPERATOR(pg_catalog.<) 5000.0)) GROUP BY "pg_merge_job_0002.task_000008".intermediate_column_2_0, "pg_merge_job_0002.task_000008".intermediate_column_2_1"
|
||||
DEBUG: generated sql query for task 6
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0002.task_000011".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000011".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000011 "pg_merge_job_0002.task_000011" JOIN customer_append_280000 customer_append ON ((customer_append.c_custkey OPERATOR(pg_catalog.=) "pg_merge_job_0002.task_000011".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000011".intermediate_column_2_2 OPERATOR(pg_catalog.>) 5.0) OR ("pg_merge_job_0002.task_000011".intermediate_column_2_3 OPERATOR(pg_catalog.>) 1200.0)) AND (customer_append.c_acctbal OPERATOR(pg_catalog.<) 5000.0)) GROUP BY "pg_merge_job_0002.task_000011".intermediate_column_2_0, "pg_merge_job_0002.task_000011".intermediate_column_2_1"
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 5
|
||||
DEBUG: pruning merge fetch taskId 3
|
||||
DETAIL: Creating dependency on merge taskId 8
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 11
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: master query: SELECT l_partkey, o_orderkey, COALESCE((pg_catalog.sum(count))::bigint, '0'::bigint) AS count FROM pg_catalog.citus_extradata_container(XXX, NULL::cstring(0), NULL::cstring(0), '(i 1 3 2 4)'::cstring(0)) remote_scan(l_partkey integer, o_orderkey bigint, count bigint) GROUP BY l_partkey, o_orderkey ORDER BY l_partkey, o_orderkey
|
||||
DEBUG: completed cleanup query for job 3
|
||||
DEBUG: completed cleanup query for job 3
|
||||
DEBUG: completed cleanup query for job 2
|
||||
DEBUG: completed cleanup query for job 2
|
||||
DEBUG: completed cleanup query for job 1
|
||||
DEBUG: completed cleanup query for job 1
|
||||
l_partkey | o_orderkey | count
|
||||
---------------------------------------------------------------------
|
||||
18 | 12005 | 1
|
||||
|
@ -166,22 +131,6 @@ GROUP BY
|
|||
ORDER BY
|
||||
l_partkey, o_orderkey;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
DEBUG: no valid constraints found
|
||||
DEBUG: shard count: 2
|
||||
DEBUG: generated sql query for task 1
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290000 lineitem WHERE (l_quantity OPERATOR(pg_catalog.<) 5.0)"
|
||||
DEBUG: generated sql query for task 2
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290001 lineitem WHERE (l_quantity OPERATOR(pg_catalog.<) 5.0)"
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: no valid constraints found
|
||||
DEBUG: shard count: 2
|
||||
DEBUG: generated sql query for task 1
|
||||
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290002 orders WHERE (o_totalprice OPERATOR(pg_catalog.<>) 4.0)"
|
||||
DEBUG: generated sql query for task 2
|
||||
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290003 orders WHERE (o_totalprice OPERATOR(pg_catalog.<>) 4.0)"
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
DEBUG: join prunable for task partitionId 0 and 3
|
||||
|
@ -194,14 +143,6 @@ DEBUG: join prunable for task partitionId 2 and 3
|
|||
DEBUG: join prunable for task partitionId 3 and 0
|
||||
DEBUG: join prunable for task partitionId 3 and 1
|
||||
DEBUG: join prunable for task partitionId 3 and 2
|
||||
DEBUG: generated sql query for task 3
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000003".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000003".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000003 "pg_merge_job_0004.task_000003" JOIN pg_merge_job_0005.task_000003 "pg_merge_job_0005.task_000003" ON (("pg_merge_job_0004.task_000003".intermediate_column_4_1 OPERATOR(pg_catalog.=) "pg_merge_job_0005.task_000003".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000003".intermediate_column_4_0, "pg_merge_job_0005.task_000003".intermediate_column_5_0"
|
||||
DEBUG: generated sql query for task 6
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000006".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000006".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000006 "pg_merge_job_0004.task_000006" JOIN pg_merge_job_0005.task_000006 "pg_merge_job_0005.task_000006" ON (("pg_merge_job_0004.task_000006".intermediate_column_4_1 OPERATOR(pg_catalog.=) "pg_merge_job_0005.task_000006".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000006".intermediate_column_4_0, "pg_merge_job_0005.task_000006".intermediate_column_5_0"
|
||||
DEBUG: generated sql query for task 9
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000009".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000009".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000009 "pg_merge_job_0004.task_000009" JOIN pg_merge_job_0005.task_000009 "pg_merge_job_0005.task_000009" ON (("pg_merge_job_0004.task_000009".intermediate_column_4_1 OPERATOR(pg_catalog.=) "pg_merge_job_0005.task_000009".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000009".intermediate_column_4_0, "pg_merge_job_0005.task_000009".intermediate_column_5_0"
|
||||
DEBUG: generated sql query for task 12
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000012".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000012".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000012 "pg_merge_job_0004.task_000012" JOIN pg_merge_job_0005.task_000012 "pg_merge_job_0005.task_000012" ON (("pg_merge_job_0004.task_000012".intermediate_column_4_1 OPERATOR(pg_catalog.=) "pg_merge_job_0005.task_000012".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000012".intermediate_column_4_0, "pg_merge_job_0005.task_000012".intermediate_column_5_0"
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 2
|
||||
|
@ -218,17 +159,6 @@ DEBUG: pruning merge fetch taskId 10
|
|||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: master query: SELECT l_partkey, o_orderkey, COALESCE((pg_catalog.sum(count))::bigint, '0'::bigint) AS count FROM pg_catalog.citus_extradata_container(XXX, NULL::cstring(0), NULL::cstring(0), '(i 1 2)'::cstring(0)) remote_scan(l_partkey integer, o_orderkey bigint, count bigint) GROUP BY l_partkey, o_orderkey ORDER BY l_partkey, o_orderkey
|
||||
DEBUG: completed cleanup query for job 6
|
||||
DEBUG: completed cleanup query for job 6
|
||||
DEBUG: completed cleanup query for job 4
|
||||
DEBUG: completed cleanup query for job 4
|
||||
DEBUG: completed cleanup query for job 5
|
||||
DEBUG: completed cleanup query for job 5
|
||||
l_partkey | o_orderkey | count
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
@ -245,22 +175,6 @@ GROUP BY
|
|||
ORDER BY
|
||||
o_orderkey;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
DEBUG: no valid constraints found
|
||||
DEBUG: shard count: 2
|
||||
DEBUG: generated sql query for task 1
|
||||
DETAIL: query string: "SELECT l_suppkey FROM lineitem_290000 lineitem WHERE true"
|
||||
DEBUG: generated sql query for task 2
|
||||
DETAIL: query string: "SELECT l_suppkey FROM lineitem_290001 lineitem WHERE true"
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: no valid constraints found
|
||||
DEBUG: shard count: 2
|
||||
DEBUG: generated sql query for task 1
|
||||
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290002 orders WHERE true"
|
||||
DEBUG: generated sql query for task 2
|
||||
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290003 orders WHERE true"
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
DEBUG: join prunable for task partitionId 0 and 3
|
||||
|
@ -273,14 +187,6 @@ DEBUG: join prunable for task partitionId 2 and 3
|
|||
DEBUG: join prunable for task partitionId 3 and 0
|
||||
DEBUG: join prunable for task partitionId 3 and 1
|
||||
DEBUG: join prunable for task partitionId 3 and 2
|
||||
DEBUG: generated sql query for task 3
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0008.task_000003".intermediate_column_8_0 AS o_orderkey, any_value("pg_merge_job_0008.task_000003".intermediate_column_8_1) AS o_shippriority, count(*) AS count FROM (pg_merge_job_0007.task_000003 "pg_merge_job_0007.task_000003" JOIN pg_merge_job_0008.task_000003 "pg_merge_job_0008.task_000003" ON (("pg_merge_job_0007.task_000003".intermediate_column_7_0 OPERATOR(pg_catalog.=) "pg_merge_job_0008.task_000003".intermediate_column_8_1))) WHERE true GROUP BY "pg_merge_job_0008.task_000003".intermediate_column_8_0"
|
||||
DEBUG: generated sql query for task 6
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0008.task_000006".intermediate_column_8_0 AS o_orderkey, any_value("pg_merge_job_0008.task_000006".intermediate_column_8_1) AS o_shippriority, count(*) AS count FROM (pg_merge_job_0007.task_000006 "pg_merge_job_0007.task_000006" JOIN pg_merge_job_0008.task_000006 "pg_merge_job_0008.task_000006" ON (("pg_merge_job_0007.task_000006".intermediate_column_7_0 OPERATOR(pg_catalog.=) "pg_merge_job_0008.task_000006".intermediate_column_8_1))) WHERE true GROUP BY "pg_merge_job_0008.task_000006".intermediate_column_8_0"
|
||||
DEBUG: generated sql query for task 9
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0008.task_000009".intermediate_column_8_0 AS o_orderkey, any_value("pg_merge_job_0008.task_000009".intermediate_column_8_1) AS o_shippriority, count(*) AS count FROM (pg_merge_job_0007.task_000009 "pg_merge_job_0007.task_000009" JOIN pg_merge_job_0008.task_000009 "pg_merge_job_0008.task_000009" ON (("pg_merge_job_0007.task_000009".intermediate_column_7_0 OPERATOR(pg_catalog.=) "pg_merge_job_0008.task_000009".intermediate_column_8_1))) WHERE true GROUP BY "pg_merge_job_0008.task_000009".intermediate_column_8_0"
|
||||
DEBUG: generated sql query for task 12
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0008.task_000012".intermediate_column_8_0 AS o_orderkey, any_value("pg_merge_job_0008.task_000012".intermediate_column_8_1) AS o_shippriority, count(*) AS count FROM (pg_merge_job_0007.task_000012 "pg_merge_job_0007.task_000012" JOIN pg_merge_job_0008.task_000012 "pg_merge_job_0008.task_000012" ON (("pg_merge_job_0007.task_000012".intermediate_column_7_0 OPERATOR(pg_catalog.=) "pg_merge_job_0008.task_000012".intermediate_column_8_1))) WHERE true GROUP BY "pg_merge_job_0008.task_000012".intermediate_column_8_0"
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 2
|
||||
|
@ -297,17 +203,6 @@ DEBUG: pruning merge fetch taskId 10
|
|||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: master query: SELECT o_orderkey, o_shippriority, COALESCE((pg_catalog.sum(count))::bigint, '0'::bigint) AS count FROM pg_catalog.citus_extradata_container(XXX, NULL::cstring(0), NULL::cstring(0), '(i 1 2)'::cstring(0)) remote_scan(o_orderkey bigint, o_shippriority integer, count bigint) GROUP BY o_orderkey ORDER BY o_orderkey
|
||||
DEBUG: completed cleanup query for job 9
|
||||
DEBUG: completed cleanup query for job 9
|
||||
DEBUG: completed cleanup query for job 7
|
||||
DEBUG: completed cleanup query for job 7
|
||||
DEBUG: completed cleanup query for job 8
|
||||
DEBUG: completed cleanup query for job 8
|
||||
o_orderkey | o_shippriority | count
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
@ -326,22 +221,6 @@ GROUP BY
|
|||
ORDER BY
|
||||
o_orderkey;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
DEBUG: no valid constraints found
|
||||
DEBUG: shard count: 2
|
||||
DEBUG: generated sql query for task 1
|
||||
DETAIL: query string: "SELECT l_suppkey FROM lineitem_290000 lineitem WHERE true"
|
||||
DEBUG: generated sql query for task 2
|
||||
DETAIL: query string: "SELECT l_suppkey FROM lineitem_290001 lineitem WHERE true"
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: no valid constraints found
|
||||
DEBUG: shard count: 2
|
||||
DEBUG: generated sql query for task 1
|
||||
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290002 orders WHERE true"
|
||||
DEBUG: generated sql query for task 2
|
||||
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290003 orders WHERE true"
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
DEBUG: join prunable for task partitionId 0 and 3
|
||||
|
@ -354,14 +233,6 @@ DEBUG: join prunable for task partitionId 2 and 3
|
|||
DEBUG: join prunable for task partitionId 3 and 0
|
||||
DEBUG: join prunable for task partitionId 3 and 1
|
||||
DEBUG: join prunable for task partitionId 3 and 2
|
||||
DEBUG: generated sql query for task 3
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0011.task_000003".intermediate_column_11_0 AS o_orderkey, any_value("pg_merge_job_0011.task_000003".intermediate_column_11_1) AS o_shippriority, count(*) AS count FROM (pg_merge_job_0010.task_000003 "pg_merge_job_0010.task_000003" JOIN pg_merge_job_0011.task_000003 "pg_merge_job_0011.task_000003" ON (("pg_merge_job_0010.task_000003".intermediate_column_10_0 OPERATOR(pg_catalog.=) "pg_merge_job_0011.task_000003".intermediate_column_11_1))) WHERE true GROUP BY "pg_merge_job_0011.task_000003".intermediate_column_11_0"
|
||||
DEBUG: generated sql query for task 6
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0011.task_000006".intermediate_column_11_0 AS o_orderkey, any_value("pg_merge_job_0011.task_000006".intermediate_column_11_1) AS o_shippriority, count(*) AS count FROM (pg_merge_job_0010.task_000006 "pg_merge_job_0010.task_000006" JOIN pg_merge_job_0011.task_000006 "pg_merge_job_0011.task_000006" ON (("pg_merge_job_0010.task_000006".intermediate_column_10_0 OPERATOR(pg_catalog.=) "pg_merge_job_0011.task_000006".intermediate_column_11_1))) WHERE true GROUP BY "pg_merge_job_0011.task_000006".intermediate_column_11_0"
|
||||
DEBUG: generated sql query for task 9
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0011.task_000009".intermediate_column_11_0 AS o_orderkey, any_value("pg_merge_job_0011.task_000009".intermediate_column_11_1) AS o_shippriority, count(*) AS count FROM (pg_merge_job_0010.task_000009 "pg_merge_job_0010.task_000009" JOIN pg_merge_job_0011.task_000009 "pg_merge_job_0011.task_000009" ON (("pg_merge_job_0010.task_000009".intermediate_column_10_0 OPERATOR(pg_catalog.=) "pg_merge_job_0011.task_000009".intermediate_column_11_1))) WHERE true GROUP BY "pg_merge_job_0011.task_000009".intermediate_column_11_0"
|
||||
DEBUG: generated sql query for task 12
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0011.task_000012".intermediate_column_11_0 AS o_orderkey, any_value("pg_merge_job_0011.task_000012".intermediate_column_11_1) AS o_shippriority, count(*) AS count FROM (pg_merge_job_0010.task_000012 "pg_merge_job_0010.task_000012" JOIN pg_merge_job_0011.task_000012 "pg_merge_job_0011.task_000012" ON (("pg_merge_job_0010.task_000012".intermediate_column_10_0 OPERATOR(pg_catalog.=) "pg_merge_job_0011.task_000012".intermediate_column_11_1))) WHERE true GROUP BY "pg_merge_job_0011.task_000012".intermediate_column_11_0"
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 2
|
||||
|
@ -378,17 +249,6 @@ DEBUG: pruning merge fetch taskId 10
|
|||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: master query: SELECT o_orderkey, o_shippriority, COALESCE((pg_catalog.sum(count))::bigint, '0'::bigint) AS count FROM pg_catalog.citus_extradata_container(XXX, NULL::cstring(0), NULL::cstring(0), '(i 1 2)'::cstring(0)) remote_scan(o_orderkey bigint, o_shippriority integer, count bigint) GROUP BY o_orderkey ORDER BY o_orderkey
|
||||
DEBUG: completed cleanup query for job 12
|
||||
DEBUG: completed cleanup query for job 12
|
||||
DEBUG: completed cleanup query for job 10
|
||||
DEBUG: completed cleanup query for job 10
|
||||
DEBUG: completed cleanup query for job 11
|
||||
DEBUG: completed cleanup query for job 11
|
||||
o_orderkey | o_shippriority | count
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
@ -405,22 +265,6 @@ GROUP BY
|
|||
ORDER BY
|
||||
o_orderkey;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
DEBUG: no valid constraints found
|
||||
DEBUG: shard count: 2
|
||||
DEBUG: generated sql query for task 1
|
||||
DETAIL: query string: "SELECT l_suppkey FROM lineitem_290000 lineitem WHERE true"
|
||||
DEBUG: generated sql query for task 2
|
||||
DETAIL: query string: "SELECT l_suppkey FROM lineitem_290001 lineitem WHERE true"
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: no valid constraints found
|
||||
DEBUG: shard count: 2
|
||||
DEBUG: generated sql query for task 1
|
||||
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290002 orders WHERE true"
|
||||
DEBUG: generated sql query for task 2
|
||||
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290003 orders WHERE true"
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
DEBUG: join prunable for task partitionId 0 and 3
|
||||
|
@ -433,14 +277,6 @@ DEBUG: join prunable for task partitionId 2 and 3
|
|||
DEBUG: join prunable for task partitionId 3 and 0
|
||||
DEBUG: join prunable for task partitionId 3 and 1
|
||||
DEBUG: join prunable for task partitionId 3 and 2
|
||||
DEBUG: generated sql query for task 3
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0014.task_000003".intermediate_column_14_0 AS o_orderkey, any_value("pg_merge_job_0014.task_000003".intermediate_column_14_1) AS any_value, any_value("pg_merge_job_0014.task_000003".intermediate_column_14_1) AS worker_column_3 FROM (pg_merge_job_0013.task_000003 "pg_merge_job_0013.task_000003" JOIN pg_merge_job_0014.task_000003 "pg_merge_job_0014.task_000003" ON (("pg_merge_job_0013.task_000003".intermediate_column_13_0 OPERATOR(pg_catalog.=) "pg_merge_job_0014.task_000003".intermediate_column_14_1))) WHERE true GROUP BY "pg_merge_job_0014.task_000003".intermediate_column_14_0"
|
||||
DEBUG: generated sql query for task 6
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0014.task_000006".intermediate_column_14_0 AS o_orderkey, any_value("pg_merge_job_0014.task_000006".intermediate_column_14_1) AS any_value, any_value("pg_merge_job_0014.task_000006".intermediate_column_14_1) AS worker_column_3 FROM (pg_merge_job_0013.task_000006 "pg_merge_job_0013.task_000006" JOIN pg_merge_job_0014.task_000006 "pg_merge_job_0014.task_000006" ON (("pg_merge_job_0013.task_000006".intermediate_column_13_0 OPERATOR(pg_catalog.=) "pg_merge_job_0014.task_000006".intermediate_column_14_1))) WHERE true GROUP BY "pg_merge_job_0014.task_000006".intermediate_column_14_0"
|
||||
DEBUG: generated sql query for task 9
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0014.task_000009".intermediate_column_14_0 AS o_orderkey, any_value("pg_merge_job_0014.task_000009".intermediate_column_14_1) AS any_value, any_value("pg_merge_job_0014.task_000009".intermediate_column_14_1) AS worker_column_3 FROM (pg_merge_job_0013.task_000009 "pg_merge_job_0013.task_000009" JOIN pg_merge_job_0014.task_000009 "pg_merge_job_0014.task_000009" ON (("pg_merge_job_0013.task_000009".intermediate_column_13_0 OPERATOR(pg_catalog.=) "pg_merge_job_0014.task_000009".intermediate_column_14_1))) WHERE true GROUP BY "pg_merge_job_0014.task_000009".intermediate_column_14_0"
|
||||
DEBUG: generated sql query for task 12
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0014.task_000012".intermediate_column_14_0 AS o_orderkey, any_value("pg_merge_job_0014.task_000012".intermediate_column_14_1) AS any_value, any_value("pg_merge_job_0014.task_000012".intermediate_column_14_1) AS worker_column_3 FROM (pg_merge_job_0013.task_000012 "pg_merge_job_0013.task_000012" JOIN pg_merge_job_0014.task_000012 "pg_merge_job_0014.task_000012" ON (("pg_merge_job_0013.task_000012".intermediate_column_13_0 OPERATOR(pg_catalog.=) "pg_merge_job_0014.task_000012".intermediate_column_14_1))) WHERE true GROUP BY "pg_merge_job_0014.task_000012".intermediate_column_14_0"
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 2
|
||||
|
@ -457,17 +293,6 @@ DEBUG: pruning merge fetch taskId 10
|
|||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: master query: SELECT o_orderkey, any_value(any_value) AS any_value FROM pg_catalog.citus_extradata_container(XXX, NULL::cstring(0), NULL::cstring(0), '(i 1 2)'::cstring(0)) remote_scan(o_orderkey bigint, any_value integer, worker_column_3 integer) GROUP BY o_orderkey ORDER BY o_orderkey
|
||||
DEBUG: completed cleanup query for job 15
|
||||
DEBUG: completed cleanup query for job 15
|
||||
DEBUG: completed cleanup query for job 13
|
||||
DEBUG: completed cleanup query for job 13
|
||||
DEBUG: completed cleanup query for job 14
|
||||
DEBUG: completed cleanup query for job 14
|
||||
o_orderkey | any_value
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
@ -481,39 +306,7 @@ select s_i_id
|
|||
group by s_i_id, s_w_id, s_quantity
|
||||
having s_quantity > random()
|
||||
;
|
||||
DEBUG: no valid constraints found
|
||||
DEBUG: shard count: 4
|
||||
DEBUG: no valid constraints found
|
||||
DEBUG: shard count: 4
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: no valid constraints found
|
||||
DEBUG: shard count: 4
|
||||
DEBUG: generated sql query for task 1
|
||||
DETAIL: query string: "SELECT s_i_id, s_w_id, s_quantity FROM stock_690004 stock WHERE true"
|
||||
DEBUG: generated sql query for task 2
|
||||
DETAIL: query string: "SELECT s_i_id, s_w_id, s_quantity FROM stock_690005 stock WHERE true"
|
||||
DEBUG: generated sql query for task 3
|
||||
DETAIL: query string: "SELECT s_i_id, s_w_id, s_quantity FROM stock_690006 stock WHERE true"
|
||||
DEBUG: generated sql query for task 4
|
||||
DETAIL: query string: "SELECT s_i_id, s_w_id, s_quantity FROM stock_690007 stock WHERE true"
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: no valid constraints found
|
||||
DEBUG: shard count: 4
|
||||
DEBUG: generated sql query for task 1
|
||||
DETAIL: query string: "SELECT ol_i_id FROM order_line_690000 order_line WHERE true"
|
||||
DEBUG: generated sql query for task 2
|
||||
DETAIL: query string: "SELECT ol_i_id FROM order_line_690001 order_line WHERE true"
|
||||
DEBUG: generated sql query for task 3
|
||||
DETAIL: query string: "SELECT ol_i_id FROM order_line_690002 order_line WHERE true"
|
||||
DEBUG: generated sql query for task 4
|
||||
DETAIL: query string: "SELECT ol_i_id FROM order_line_690003 order_line WHERE true"
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
DEBUG: join prunable for task partitionId 0 and 3
|
||||
|
@ -526,14 +319,6 @@ DEBUG: join prunable for task partitionId 2 and 3
|
|||
DEBUG: join prunable for task partitionId 3 and 0
|
||||
DEBUG: join prunable for task partitionId 3 and 1
|
||||
DEBUG: join prunable for task partitionId 3 and 2
|
||||
DEBUG: generated sql query for task 3
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0016.task_000005".intermediate_column_16_0 AS s_i_id, "pg_merge_job_0016.task_000005".intermediate_column_16_1 AS worker_column_2, any_value("pg_merge_job_0016.task_000005".intermediate_column_16_2) AS worker_column_3 FROM (pg_merge_job_0016.task_000005 "pg_merge_job_0016.task_000005" JOIN pg_merge_job_0017.task_000005 "pg_merge_job_0017.task_000005" ON (("pg_merge_job_0017.task_000005".intermediate_column_17_0 OPERATOR(pg_catalog.=) "pg_merge_job_0016.task_000005".intermediate_column_16_0))) WHERE true GROUP BY "pg_merge_job_0016.task_000005".intermediate_column_16_0, "pg_merge_job_0016.task_000005".intermediate_column_16_1 HAVING ((any_value("pg_merge_job_0016.task_000005".intermediate_column_16_2))::double precision OPERATOR(pg_catalog.>) random())"
|
||||
DEBUG: generated sql query for task 6
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0016.task_000010".intermediate_column_16_0 AS s_i_id, "pg_merge_job_0016.task_000010".intermediate_column_16_1 AS worker_column_2, any_value("pg_merge_job_0016.task_000010".intermediate_column_16_2) AS worker_column_3 FROM (pg_merge_job_0016.task_000010 "pg_merge_job_0016.task_000010" JOIN pg_merge_job_0017.task_000010 "pg_merge_job_0017.task_000010" ON (("pg_merge_job_0017.task_000010".intermediate_column_17_0 OPERATOR(pg_catalog.=) "pg_merge_job_0016.task_000010".intermediate_column_16_0))) WHERE true GROUP BY "pg_merge_job_0016.task_000010".intermediate_column_16_0, "pg_merge_job_0016.task_000010".intermediate_column_16_1 HAVING ((any_value("pg_merge_job_0016.task_000010".intermediate_column_16_2))::double precision OPERATOR(pg_catalog.>) random())"
|
||||
DEBUG: generated sql query for task 9
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0016.task_000015".intermediate_column_16_0 AS s_i_id, "pg_merge_job_0016.task_000015".intermediate_column_16_1 AS worker_column_2, any_value("pg_merge_job_0016.task_000015".intermediate_column_16_2) AS worker_column_3 FROM (pg_merge_job_0016.task_000015 "pg_merge_job_0016.task_000015" JOIN pg_merge_job_0017.task_000015 "pg_merge_job_0017.task_000015" ON (("pg_merge_job_0017.task_000015".intermediate_column_17_0 OPERATOR(pg_catalog.=) "pg_merge_job_0016.task_000015".intermediate_column_16_0))) WHERE true GROUP BY "pg_merge_job_0016.task_000015".intermediate_column_16_0, "pg_merge_job_0016.task_000015".intermediate_column_16_1 HAVING ((any_value("pg_merge_job_0016.task_000015".intermediate_column_16_2))::double precision OPERATOR(pg_catalog.>) random())"
|
||||
DEBUG: generated sql query for task 12
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0016.task_000020".intermediate_column_16_0 AS s_i_id, "pg_merge_job_0016.task_000020".intermediate_column_16_1 AS worker_column_2, any_value("pg_merge_job_0016.task_000020".intermediate_column_16_2) AS worker_column_3 FROM (pg_merge_job_0016.task_000020 "pg_merge_job_0016.task_000020" JOIN pg_merge_job_0017.task_000020 "pg_merge_job_0017.task_000020" ON (("pg_merge_job_0017.task_000020".intermediate_column_17_0 OPERATOR(pg_catalog.=) "pg_merge_job_0016.task_000020".intermediate_column_16_0))) WHERE true GROUP BY "pg_merge_job_0016.task_000020".intermediate_column_16_0, "pg_merge_job_0016.task_000020".intermediate_column_16_1 HAVING ((any_value("pg_merge_job_0016.task_000020".intermediate_column_16_2))::double precision OPERATOR(pg_catalog.>) random())"
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 5
|
||||
DEBUG: pruning merge fetch taskId 2
|
||||
|
@ -550,17 +335,6 @@ DEBUG: pruning merge fetch taskId 10
|
|||
DETAIL: Creating dependency on merge taskId 20
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 20
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: assigned task to node localhost:xxxxx
|
||||
DEBUG: master query: SELECT s_i_id FROM pg_catalog.citus_extradata_container(XXX, NULL::cstring(0), NULL::cstring(0), '(i 1 2)'::cstring(0)) remote_scan(s_i_id integer, worker_column_2 integer, worker_column_3 numeric)
|
||||
DEBUG: completed cleanup query for job 18
|
||||
DEBUG: completed cleanup query for job 18
|
||||
DEBUG: completed cleanup query for job 16
|
||||
DEBUG: completed cleanup query for job 16
|
||||
DEBUG: completed cleanup query for job 17
|
||||
DEBUG: completed cleanup query for job 17
|
||||
s_i_id
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
SET citus.next_shard_id TO 700000;
|
||||
SET client_min_messages TO DEBUG2;
|
||||
SET citus.task_executor_type TO 'task-tracker';
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
-- Single range-repartition join to test join-pruning behaviour.
|
||||
EXPLAIN (COSTS OFF)
|
||||
SELECT
|
||||
|
@ -31,7 +32,7 @@ DETAIL: Creating dependency on merge taskId 9
|
|||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Task Count: 3
|
||||
Tasks Shown: None, not supported for re-partition queries
|
||||
-> MapMergeJob
|
||||
|
@ -77,7 +78,7 @@ DEBUG: Router planner does not support append-partitioned tables.
|
|||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Task Count: 0
|
||||
Tasks Shown: None, not supported for re-partition queries
|
||||
-> MapMergeJob
|
||||
|
@ -112,7 +113,7 @@ DEBUG: Router planner does not support append-partitioned tables.
|
|||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Task Count: 0
|
||||
Tasks Shown: None, not supported for re-partition queries
|
||||
-> MapMergeJob
|
||||
|
@ -175,7 +176,7 @@ DETAIL: Creating dependency on merge taskId 16
|
|||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Task Count: 4
|
||||
Tasks Shown: None, not supported for re-partition queries
|
||||
-> MapMergeJob
|
||||
|
@ -240,7 +241,7 @@ DEBUG: Router planner does not support append-partitioned tables.
|
|||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Task Count: 0
|
||||
Tasks Shown: None, not supported for re-partition queries
|
||||
-> MapMergeJob
|
||||
|
@ -275,7 +276,7 @@ WHERE
|
|||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Task-Tracker)
|
||||
Custom Scan (Citus Adaptive)
|
||||
Task Count: 0
|
||||
Tasks Shown: None, not supported for re-partition queries
|
||||
-> MapMergeJob
|
||||
|
@ -305,7 +306,7 @@ WHERE
|
|||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Task-Tracker)
|
||||
Custom Scan (Citus Adaptive)
|
||||
Task Count: 0
|
||||
Tasks Shown: None, not supported for re-partition queries
|
||||
-> MapMergeJob
|
||||
|
@ -321,7 +322,7 @@ FROM
|
|||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Task-Tracker)
|
||||
Custom Scan (Citus Adaptive)
|
||||
Task Count: 0
|
||||
Tasks Shown: All
|
||||
(3 rows)
|
||||
|
@ -336,7 +337,7 @@ WHERE
|
|||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Task-Tracker)
|
||||
Custom Scan (Citus Adaptive)
|
||||
Task Count: 0
|
||||
Tasks Shown: All
|
||||
(3 rows)
|
||||
|
|
|
@ -9,6 +9,7 @@ SET citus.next_shard_id TO 710000;
|
|||
BEGIN;
|
||||
SET client_min_messages TO DEBUG3;
|
||||
SET citus.task_executor_type TO 'task-tracker';
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
-- Single range repartition join to test anchor-shard based task assignment and
|
||||
-- assignment propagation to merge and data-fetch tasks.
|
||||
SELECT
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
-- MULTI_REPARTITION_UDT
|
||||
--
|
||||
SET citus.next_shard_id TO 535000;
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
-- START type creation
|
||||
CREATE TYPE test_udt AS (i integer, i2 integer);
|
||||
-- ... as well as a function to use as its comparator...
|
||||
|
@ -125,6 +126,7 @@ FUNCTION 1 test_udt_hash(test_udt);
|
|||
-- Distribute and populate the two tables.
|
||||
SET citus.shard_count TO 3;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
SELECT create_distributed_table('repartition_udt', 'pk', 'hash');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -171,7 +173,7 @@ SELECT * FROM repartition_udt JOIN repartition_udt_other
|
|||
LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ]
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Task-Tracker)
|
||||
Custom Scan (Citus Adaptive)
|
||||
Task Count: 4
|
||||
Tasks Shown: None, not supported for re-partition queries
|
||||
-> MapMergeJob
|
||||
|
|
|
@ -828,6 +828,7 @@ SELECT create_distributed_table('test_schema_support_join_2.nation_hash', 'n_nat
|
|||
(1 row)
|
||||
|
||||
\copy test_schema_support_join_2.nation_hash FROM STDIN with delimiter '|';
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
-- check when search_path is public,
|
||||
-- join of two tables which are in different schemas,
|
||||
-- join on partition column
|
||||
|
|
|
@ -198,8 +198,15 @@ group by
|
|||
l_suppkey
|
||||
ORDER BY 2 DESC, 1 DESC
|
||||
LIMIT 5;
|
||||
ERROR: cannot perform distributed planning on this query
|
||||
DETAIL: Subqueries with limit are not supported yet
|
||||
l_suppkey | total_suppkey_count
|
||||
---------------------------------------------------------------------
|
||||
35 | 5
|
||||
112 | 4
|
||||
102 | 4
|
||||
73 | 4
|
||||
123 | 3
|
||||
(5 rows)
|
||||
|
||||
-- Check that we don't support subqueries without aggregates.
|
||||
select
|
||||
DISTINCT rounded_tax
|
||||
|
@ -212,8 +219,11 @@ from
|
|||
l_tax) as distributed_table
|
||||
ORDER BY 1 DESC
|
||||
LIMIT 5;
|
||||
ERROR: cannot perform distributed planning on this query
|
||||
DETAIL: Subqueries without aggregates are not supported yet
|
||||
rounded_tax
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- Check that we support subqueries with count(distinct).
|
||||
select
|
||||
avg(different_shipment_days)
|
||||
|
|
|
@ -228,8 +228,12 @@ SET client_min_messages to ERROR;
|
|||
SELECT raise_failed_execution('
|
||||
SELECT u.* FROM wide_table u JOIN wide_table v ON (u.long_column_002 = v.long_column_003);
|
||||
');
|
||||
ERROR: Task failed to execute
|
||||
CONTEXT: PL/pgSQL function raise_failed_execution(text) line 6 at RAISE
|
||||
raise_failed_execution
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
-- following will succeed since it fetches few columns
|
||||
SELECT u.long_column_001, u.long_column_002, u.long_column_003 FROM wide_table u JOIN wide_table v ON (u.long_column_002 = v.long_column_003);
|
||||
long_column_001 | long_column_002 | long_column_003
|
||||
|
|
|
@ -204,8 +204,15 @@ SELECT l_suppkey, count(*) FROM
|
|||
(SELECT l_suppkey, l_shipdate, count(*)
|
||||
FROM air_shipped_lineitems GROUP BY l_suppkey, l_shipdate) supps
|
||||
GROUP BY l_suppkey ORDER BY 2 DESC, 1 LIMIT 5;
|
||||
ERROR: cannot perform distributed planning on this query
|
||||
DETAIL: Subqueries without group by clause are not supported yet
|
||||
l_suppkey | count
|
||||
---------------------------------------------------------------------
|
||||
7680 | 4
|
||||
160 | 3
|
||||
1042 | 3
|
||||
1318 | 3
|
||||
5873 | 3
|
||||
(5 rows)
|
||||
|
||||
-- logically same query without a view works fine
|
||||
SELECT l_suppkey, count(*) FROM
|
||||
(SELECT l_suppkey, l_shipdate, count(*)
|
||||
|
@ -226,8 +233,15 @@ SELECT l_suppkey, count(*) FROM
|
|||
FROM (SELECT * FROM lineitem_hash_part WHERE l_shipmode = 'AIR') asi
|
||||
GROUP BY l_suppkey, l_shipdate) supps
|
||||
GROUP BY l_suppkey ORDER BY 2 DESC, 1 LIMIT 5;
|
||||
ERROR: cannot perform distributed planning on this query
|
||||
DETAIL: Subqueries without group by clause are not supported yet
|
||||
l_suppkey | count
|
||||
---------------------------------------------------------------------
|
||||
7680 | 4
|
||||
160 | 3
|
||||
1042 | 3
|
||||
1318 | 3
|
||||
5873 | 3
|
||||
(5 rows)
|
||||
|
||||
-- repartition query on view with single table subquery
|
||||
CREATE VIEW supp_count_view AS SELECT * FROM (SELECT l_suppkey, count(*) FROM lineitem_hash_part GROUP BY 1) s1;
|
||||
SELECT * FROM supp_count_view ORDER BY 2 DESC, 1 LIMIT 10;
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
CREATE SCHEMA recursive_set_local;
|
||||
SET search_path TO recursive_set_local, public;
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
CREATE TABLE recursive_set_local.test (x int, y int);
|
||||
SELECT create_distributed_table('test', 'x');
|
||||
create_distributed_table
|
||||
|
|
|
@ -380,6 +380,10 @@ FROM
|
|||
users_table, cte_merge
|
||||
WHERE
|
||||
users_table.user_id = cte_merge.u_id;
|
||||
ERROR: Complex subqueries and CTEs are not supported when task_executor_type is set to 'task-tracker'
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
4365606
|
||||
(1 row)
|
||||
|
||||
DROP SCHEMA with_executors CASCADE;
|
||||
NOTICE: drop cascades to table local_table
|
||||
|
|
|
@ -25,12 +25,12 @@ CREATE TABLE lineitem_hash (
|
|||
PRIMARY KEY(l_orderkey, l_linenumber) );
|
||||
SELECT create_distributed_table('lineitem_hash', 'l_orderkey', 'hash');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
\copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
|
||||
\copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
|
||||
\copy lineitem_hash FROM '/home/talha/citus/src/test/regress/data/lineitem.1.data' with delimiter '|'
|
||||
\copy lineitem_hash FROM '/home/talha/citus/src/test/regress/data/lineitem.2.data' with delimiter '|'
|
||||
ANALYZE lineitem_hash;
|
||||
SET citus.task_executor_type to "task-tracker";
|
||||
-- count(distinct) is supported on top level query if there
|
||||
|
@ -42,7 +42,7 @@ SELECT
|
|||
ORDER BY 2 DESC, 1 DESC
|
||||
LIMIT 10;
|
||||
l_orderkey | count
|
||||
------------+-------
|
||||
---------------------------------------------------------------------
|
||||
14885 | 7
|
||||
14884 | 7
|
||||
14821 | 7
|
||||
|
@ -63,18 +63,18 @@ SELECT
|
|||
ORDER BY 2 DESC, 1 DESC
|
||||
LIMIT 10;
|
||||
QUERY PLAN
|
||||
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
---------------------------------------------------------------------
|
||||
Limit
|
||||
Output: remote_scan.l_orderkey, remote_scan.count
|
||||
-> Sort
|
||||
Output: remote_scan.l_orderkey, remote_scan.count
|
||||
Sort Key: remote_scan.count DESC, remote_scan.l_orderkey DESC
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Output: remote_scan.l_orderkey, remote_scan.count
|
||||
Task Count: 8
|
||||
Tasks Shown: One of 8
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Limit
|
||||
Output: l_orderkey, (count(DISTINCT l_partkey))
|
||||
-> Sort
|
||||
|
@ -94,7 +94,7 @@ SELECT
|
|||
ORDER BY 1 DESC
|
||||
LIMIT 10;
|
||||
count
|
||||
-------
|
||||
---------------------------------------------------------------------
|
||||
11661
|
||||
(1 row)
|
||||
|
||||
|
@ -105,7 +105,7 @@ SELECT
|
|||
ORDER BY 1 DESC
|
||||
LIMIT 10;
|
||||
QUERY PLAN
|
||||
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
---------------------------------------------------------------------
|
||||
Limit
|
||||
Output: (count(DISTINCT remote_scan.count))
|
||||
-> Sort
|
||||
|
@ -113,12 +113,12 @@ SELECT
|
|||
Sort Key: (count(DISTINCT remote_scan.count)) DESC
|
||||
-> Aggregate
|
||||
Output: count(DISTINCT remote_scan.count)
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Output: remote_scan.count
|
||||
Task Count: 8
|
||||
Tasks Shown: One of 8
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Output: l_partkey
|
||||
Group Key: lineitem_hash.l_partkey
|
||||
|
@ -133,7 +133,7 @@ SELECT
|
|||
ORDER BY 2 DESC, 1 DESC
|
||||
LIMIT 10;
|
||||
l_shipmode | count
|
||||
------------+-------
|
||||
---------------------------------------------------------------------
|
||||
TRUCK | 1757
|
||||
MAIL | 1730
|
||||
AIR | 1702
|
||||
|
@ -151,7 +151,7 @@ SELECT
|
|||
ORDER BY 2 DESC, 1 DESC
|
||||
LIMIT 10;
|
||||
QUERY PLAN
|
||||
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
---------------------------------------------------------------------
|
||||
Limit
|
||||
Output: remote_scan.l_shipmode, (count(DISTINCT remote_scan.count))
|
||||
-> Sort
|
||||
|
@ -163,12 +163,12 @@ SELECT
|
|||
-> Sort
|
||||
Output: remote_scan.l_shipmode, remote_scan.count
|
||||
Sort Key: remote_scan.l_shipmode DESC
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Output: remote_scan.l_shipmode, remote_scan.count
|
||||
Task Count: 8
|
||||
Tasks Shown: One of 8
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Output: l_shipmode, l_partkey
|
||||
Group Key: lineitem_hash.l_shipmode, lineitem_hash.l_partkey
|
||||
|
@ -184,7 +184,7 @@ SELECT
|
|||
ORDER BY 3 DESC, 2 DESC, 1
|
||||
LIMIT 10;
|
||||
l_orderkey | count | count
|
||||
------------+-------+-------
|
||||
---------------------------------------------------------------------
|
||||
226 | 7 | 7
|
||||
1316 | 7 | 7
|
||||
1477 | 7 | 7
|
||||
|
@ -205,18 +205,18 @@ SELECT
|
|||
ORDER BY 3 DESC, 2 DESC, 1
|
||||
LIMIT 10;
|
||||
QUERY PLAN
|
||||
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
---------------------------------------------------------------------
|
||||
Limit
|
||||
Output: remote_scan.l_orderkey, remote_scan.count, remote_scan.count_1
|
||||
-> Sort
|
||||
Output: remote_scan.l_orderkey, remote_scan.count, remote_scan.count_1
|
||||
Sort Key: remote_scan.count_1 DESC, remote_scan.count DESC, remote_scan.l_orderkey
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Output: remote_scan.l_orderkey, remote_scan.count, remote_scan.count_1
|
||||
Task Count: 8
|
||||
Tasks Shown: One of 8
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Limit
|
||||
Output: l_orderkey, (count(DISTINCT l_partkey)), (count(DISTINCT l_shipmode))
|
||||
-> Sort
|
||||
|
@ -234,7 +234,7 @@ SELECT
|
|||
count(distinct l_orderkey), count(distinct l_partkey), count(distinct l_shipmode)
|
||||
FROM lineitem_hash;
|
||||
count | count | count
|
||||
-------+-------+-------
|
||||
---------------------------------------------------------------------
|
||||
2985 | 11661 | 7
|
||||
(1 row)
|
||||
|
||||
|
@ -243,15 +243,15 @@ SELECT
|
|||
count(distinct l_orderkey), count(distinct l_partkey), count(distinct l_shipmode)
|
||||
FROM lineitem_hash;
|
||||
QUERY PLAN
|
||||
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
---------------------------------------------------------------------
|
||||
Aggregate
|
||||
Output: count(DISTINCT remote_scan.count), count(DISTINCT remote_scan.count_1), count(DISTINCT remote_scan.count_2)
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Output: remote_scan.count, remote_scan.count_1, remote_scan.count_2
|
||||
Task Count: 8
|
||||
Tasks Shown: One of 8
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Output: l_orderkey, l_partkey, l_shipmode
|
||||
Group Key: lineitem_hash.l_orderkey, lineitem_hash.l_partkey, lineitem_hash.l_shipmode
|
||||
|
@ -266,7 +266,7 @@ SELECT
|
|||
count(distinct l_shipmode), count(l_shipmode)
|
||||
FROM lineitem_hash;
|
||||
count | count | count | count | count | count
|
||||
-------+-------+-------+-------+-------+-------
|
||||
---------------------------------------------------------------------
|
||||
2985 | 12000 | 11661 | 12000 | 7 | 12000
|
||||
(1 row)
|
||||
|
||||
|
@ -277,7 +277,7 @@ SELECT
|
|||
GROUP BY l_shipmode
|
||||
ORDER BY 1, 2 DESC, 3 DESC;
|
||||
l_shipmode | count | count
|
||||
------------+-------+-------
|
||||
---------------------------------------------------------------------
|
||||
AIR | 1702 | 1327
|
||||
FOB | 1700 | 1276
|
||||
MAIL | 1730 | 1299
|
||||
|
@ -296,7 +296,7 @@ SELECT
|
|||
HAVING count(distinct l_orderkey) > 1300
|
||||
ORDER BY 1, 2 DESC;
|
||||
l_shipmode | count | count
|
||||
------------+-------+-------
|
||||
---------------------------------------------------------------------
|
||||
AIR | 1702 | 1327
|
||||
TRUCK | 1757 | 1333
|
||||
(2 rows)
|
||||
|
@ -309,7 +309,7 @@ SELECT
|
|||
HAVING count(distinct l_orderkey) > 1300
|
||||
ORDER BY 1, 2 DESC;
|
||||
l_shipmode | count
|
||||
------------+-------
|
||||
---------------------------------------------------------------------
|
||||
AIR | 1702
|
||||
TRUCK | 1757
|
||||
(2 rows)
|
||||
|
@ -323,7 +323,7 @@ SELECT
|
|||
HAVING count(distinct l_suppkey) > 1550
|
||||
ORDER BY 1, 2 DESC;
|
||||
l_shipmode | count | count
|
||||
------------+-------+-------
|
||||
---------------------------------------------------------------------
|
||||
AIR | 1702 | 1564
|
||||
FOB | 1700 | 1571
|
||||
MAIL | 1730 | 1573
|
||||
|
@ -341,7 +341,7 @@ SELECT
|
|||
HAVING count(distinct l_suppkey) > 1550
|
||||
ORDER BY 1, 2 DESC;
|
||||
l_shipmode | count
|
||||
------------+-------
|
||||
---------------------------------------------------------------------
|
||||
AIR | 1702
|
||||
FOB | 1700
|
||||
MAIL | 1730
|
||||
|
@ -359,7 +359,7 @@ SELECT
|
|||
HAVING count(distinct l_suppkey) > 1550
|
||||
ORDER BY 1, 2 DESC;
|
||||
QUERY PLAN
|
||||
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
---------------------------------------------------------------------
|
||||
Sort
|
||||
Output: remote_scan.l_shipmode, (count(DISTINCT remote_scan.count))
|
||||
Sort Key: remote_scan.l_shipmode, (count(DISTINCT remote_scan.count)) DESC
|
||||
|
@ -370,12 +370,12 @@ SELECT
|
|||
-> Sort
|
||||
Output: remote_scan.l_shipmode, remote_scan.count, remote_scan.worker_column_3
|
||||
Sort Key: remote_scan.l_shipmode
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Output: remote_scan.l_shipmode, remote_scan.count, remote_scan.worker_column_3
|
||||
Task Count: 8
|
||||
Tasks Shown: One of 8
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Output: l_shipmode, l_partkey, l_suppkey
|
||||
Group Key: lineitem_hash.l_shipmode, lineitem_hash.l_partkey, lineitem_hash.l_suppkey
|
||||
|
@ -393,7 +393,7 @@ SELECT *
|
|||
ORDER BY 2 DESC, 1 DESC
|
||||
LIMIT 10;
|
||||
l_orderkey | count
|
||||
------------+-------
|
||||
---------------------------------------------------------------------
|
||||
14885 | 7
|
||||
14884 | 7
|
||||
14821 | 7
|
||||
|
@ -415,7 +415,7 @@ SELECT *
|
|||
ORDER BY 2 DESC, 1 DESC
|
||||
LIMIT 10;
|
||||
l_partkey | count
|
||||
-----------+-------
|
||||
---------------------------------------------------------------------
|
||||
199146 | 3
|
||||
188804 | 3
|
||||
177771 | 3
|
||||
|
@ -438,20 +438,40 @@ SELECT *
|
|||
ORDER BY 2 DESC, 1 DESC
|
||||
LIMIT 10;
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------------------
|
||||
Limit
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive)
|
||||
Output: remote_scan.l_partkey, remote_scan.count
|
||||
-> Distributed Subplan XXX_1
|
||||
-> HashAggregate
|
||||
Output: remote_scan.l_partkey, COALESCE((pg_catalog.sum(remote_scan.count))::bigint, '0'::bigint)
|
||||
Group Key: remote_scan.l_partkey
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Output: remote_scan.l_partkey, remote_scan.count
|
||||
Task Count: 8
|
||||
Tasks Shown: One of 8
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> GroupAggregate
|
||||
Output: l_partkey, count(DISTINCT l_orderkey)
|
||||
Group Key: lineitem_hash.l_partkey
|
||||
-> Sort
|
||||
Output: remote_scan.l_partkey, remote_scan.count
|
||||
Sort Key: remote_scan.count DESC, remote_scan.l_partkey DESC
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
Output: remote_scan.l_partkey, remote_scan.count
|
||||
Task Count: 4
|
||||
Tasks Shown: None, not supported for re-partition queries
|
||||
-> MapMergeJob
|
||||
Map Task Count: 8
|
||||
Merge Task Count: 4
|
||||
(12 rows)
|
||||
Output: l_partkey, l_orderkey
|
||||
Sort Key: lineitem_hash.l_partkey
|
||||
-> Seq Scan on public.lineitem_hash_240000 lineitem_hash
|
||||
Output: l_partkey, l_orderkey
|
||||
Task Count: 1
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Limit
|
||||
Output: intermediate_result.l_partkey, intermediate_result.count
|
||||
-> Sort
|
||||
Output: intermediate_result.l_partkey, intermediate_result.count
|
||||
Sort Key: intermediate_result.count DESC, intermediate_result.l_partkey DESC
|
||||
-> Function Scan on pg_catalog.read_intermediate_result intermediate_result
|
||||
Output: intermediate_result.l_partkey, intermediate_result.count
|
||||
Function Call: read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format)
|
||||
(32 rows)
|
||||
|
||||
-- count distinct with filters
|
||||
SELECT
|
||||
|
@ -463,7 +483,7 @@ SELECT
|
|||
ORDER BY 2 DESC, 3 DESC, 1
|
||||
LIMIT 10;
|
||||
l_orderkey | count | count
|
||||
------------+-------+-------
|
||||
---------------------------------------------------------------------
|
||||
4964 | 4 | 7
|
||||
12005 | 4 | 7
|
||||
5409 | 4 | 6
|
||||
|
@ -486,18 +506,18 @@ SELECT
|
|||
ORDER BY 2 DESC, 3 DESC, 1
|
||||
LIMIT 10;
|
||||
QUERY PLAN
|
||||
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
---------------------------------------------------------------------
|
||||
Limit
|
||||
Output: remote_scan.l_orderkey, remote_scan.count, remote_scan.count_1
|
||||
-> Sort
|
||||
Output: remote_scan.l_orderkey, remote_scan.count, remote_scan.count_1
|
||||
Sort Key: remote_scan.count DESC, remote_scan.count_1 DESC, remote_scan.l_orderkey
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Output: remote_scan.l_orderkey, remote_scan.count, remote_scan.count_1
|
||||
Task Count: 8
|
||||
Tasks Shown: One of 8
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Limit
|
||||
Output: l_orderkey, (count(DISTINCT l_suppkey) FILTER (WHERE (l_shipmode = 'AIR'::bpchar))), (count(DISTINCT l_suppkey))
|
||||
-> Sort
|
||||
|
@ -518,7 +538,7 @@ SELECT
|
|||
ORDER BY 2 DESC, 1 DESC
|
||||
LIMIT 10;
|
||||
l_suppkey | count
|
||||
-----------+-------
|
||||
---------------------------------------------------------------------
|
||||
7680 | 4
|
||||
7703 | 3
|
||||
7542 | 3
|
||||
|
@ -540,7 +560,7 @@ SELECT
|
|||
ORDER BY 2 DESC, 1 DESC
|
||||
LIMIT 10;
|
||||
QUERY PLAN
|
||||
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
---------------------------------------------------------------------
|
||||
Limit
|
||||
Output: remote_scan.l_suppkey, (count(DISTINCT remote_scan.count) FILTER (WHERE (remote_scan.count_1 = 'AIR'::bpchar)))
|
||||
-> Sort
|
||||
|
@ -552,12 +572,12 @@ SELECT
|
|||
-> Sort
|
||||
Output: remote_scan.l_suppkey, remote_scan.count, remote_scan.count_1
|
||||
Sort Key: remote_scan.l_suppkey DESC
|
||||
-> Custom Scan (Citus Task-Tracker)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Output: remote_scan.l_suppkey, remote_scan.count, remote_scan.count_1
|
||||
Task Count: 8
|
||||
Tasks Shown: One of 8
|
||||
-> Task
|
||||
Node: host=localhost port=57637 dbname=regression
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Output: l_suppkey, l_partkey, l_shipmode
|
||||
Group Key: lineitem_hash.l_suppkey, lineitem_hash.l_partkey, lineitem_hash.l_shipmode
|
||||
|
@ -570,7 +590,7 @@ SELECT
|
|||
count(DISTINCT l_orderkey) FILTER (WHERE l_shipmode = 'AIR')
|
||||
FROM lineitem_hash;
|
||||
count
|
||||
-------
|
||||
---------------------------------------------------------------------
|
||||
1327
|
||||
(1 row)
|
||||
|
||||
|
@ -579,7 +599,7 @@ SELECT
|
|||
count(DISTINCT l_partkey) FILTER (WHERE l_shipmode = 'AIR')
|
||||
FROM lineitem_hash;
|
||||
count
|
||||
-------
|
||||
---------------------------------------------------------------------
|
||||
1702
|
||||
(1 row)
|
||||
|
||||
|
@ -589,7 +609,7 @@ SELECT
|
|||
count(DISTINCT l_shipdate)
|
||||
FROM lineitem_hash;
|
||||
count | count | count
|
||||
-------+-------+-------
|
||||
---------------------------------------------------------------------
|
||||
1702 | 11661 | 2470
|
||||
(1 row)
|
||||
|
||||
|
@ -603,7 +623,7 @@ SELECT *
|
|||
ORDER BY 2 DESC, 1 DESC
|
||||
LIMIT 10;
|
||||
l_orderkey | count
|
||||
------------+-------
|
||||
---------------------------------------------------------------------
|
||||
14885 | 7
|
||||
14884 | 7
|
||||
14821 | 7
|
||||
|
@ -626,7 +646,7 @@ SELECT *
|
|||
ORDER BY 2 DESC, 1 DESC
|
||||
LIMIT 10;
|
||||
l_orderkey | count
|
||||
------------+-------
|
||||
---------------------------------------------------------------------
|
||||
12005 | 4
|
||||
5409 | 4
|
||||
4964 | 4
|
||||
|
@ -651,7 +671,7 @@ SELECT *
|
|||
ORDER BY 2 DESC, 1 DESC
|
||||
LIMIT 10;
|
||||
l_orderkey | count
|
||||
------------+-------
|
||||
---------------------------------------------------------------------
|
||||
12005 | 4
|
||||
5409 | 4
|
||||
4964 | 4
|
||||
|
@ -675,7 +695,7 @@ SELECT *
|
|||
ORDER BY 2 DESC, 1 DESC
|
||||
LIMIT 10;
|
||||
l_orderkey | count
|
||||
------------+-------
|
||||
---------------------------------------------------------------------
|
||||
14275 | 7
|
||||
14181 | 7
|
||||
13605 | 7
|
||||
|
@ -722,7 +742,7 @@ SELECT *
|
|||
ORDER BY 2 DESC, 1 DESC
|
||||
LIMIT 0;
|
||||
l_orderkey | count
|
||||
------------+-------
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
-- multiple nested subquery
|
||||
|
@ -756,7 +776,7 @@ SELECT
|
|||
ORDER BY
|
||||
total_avg_count DESC;
|
||||
total | total_avg_count
|
||||
-------+--------------------
|
||||
---------------------------------------------------------------------
|
||||
1 | 3.6000000000000000
|
||||
6 | 2.8333333333333333
|
||||
10 | 2.6000000000000000
|
||||
|
@ -788,7 +808,7 @@ SELECT *
|
|||
1 DESC, 2 DESC
|
||||
LIMIT 10;
|
||||
count | l_shipdate
|
||||
-------+------------
|
||||
---------------------------------------------------------------------
|
||||
14 | 07-30-1997
|
||||
13 | 05-26-1998
|
||||
13 | 08-08-1997
|
||||
|
@ -816,7 +836,7 @@ SELECT *
|
|||
2 DESC, 1 DESC
|
||||
LIMIT 10;
|
||||
l_quantity | count
|
||||
------------+-------
|
||||
---------------------------------------------------------------------
|
||||
48.00 | 13
|
||||
47.00 | 13
|
||||
37.00 | 13
|
||||
|
@ -848,7 +868,7 @@ SELECT *
|
|||
1 DESC, 2 DESC
|
||||
LIMIT 10;
|
||||
avg | l_shipmode
|
||||
-------------------------+------------
|
||||
---------------------------------------------------------------------
|
||||
44.82904609027336300064 | MAIL
|
||||
44.80704536679536679537 | SHIP
|
||||
44.68891732736572890026 | AIR
|
||||
|
@ -874,7 +894,7 @@ SELECT *
|
|||
ORDER BY 1 DESC
|
||||
LIMIT 10;
|
||||
avg
|
||||
-----
|
||||
---------------------------------------------------------------------
|
||||
7
|
||||
6
|
||||
6
|
||||
|
@ -901,7 +921,7 @@ SELECT *
|
|||
2 DESC,1 DESC
|
||||
LIMIT 10;
|
||||
l_shipmode | count
|
||||
------------+-------
|
||||
---------------------------------------------------------------------
|
||||
TRUCK | 1689
|
||||
MAIL | 1683
|
||||
FOB | 1655
|
||||
|
@ -923,7 +943,7 @@ SELECT
|
|||
ORDER BY 1 DESC
|
||||
LIMIT 5;
|
||||
l_orderkey | count | count
|
||||
------------+-------+-------
|
||||
---------------------------------------------------------------------
|
||||
14947 | 2 | 1
|
||||
14946 | 2 | 1
|
||||
14945 | 6 | 1
|
||||
|
@ -945,7 +965,7 @@ GROUP BY 1
|
|||
ORDER BY 1 DESC
|
||||
LIMIT 5;
|
||||
user_id | count | count | count
|
||||
---------+-------+-------+-------
|
||||
---------------------------------------------------------------------
|
||||
6 | 11 | 1 | 1
|
||||
5 | 27 | 1 | 1
|
||||
4 | 24 | 1 | 1
|
||||
|
@ -961,7 +981,7 @@ CREATE TYPE test_item AS
|
|||
CREATE TABLE test_count_distinct_array (key int, value int , value_arr test_item[]);
|
||||
SELECT create_distributed_table('test_count_distinct_array', 'key');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -979,7 +999,7 @@ GROUP BY 1
|
|||
ORDER BY 1 DESC
|
||||
LIMIT 5;
|
||||
key | count | count | count
|
||||
------+-------+-------+-------
|
||||
---------------------------------------------------------------------
|
||||
1000 | 1 | 1 | 1
|
||||
999 | 1 | 1 | 1
|
||||
998 | 1 | 1 | 1
|
||||
|
@ -999,7 +1019,7 @@ SELECT *
|
|||
ORDER BY 2 DESC, 1 DESC
|
||||
LIMIT 10;
|
||||
ERROR: cannot compute aggregate (distinct)
|
||||
DETAIL: Only count(distinct) aggregate is supported in subqueries
|
||||
DETAIL: table partitioning is unsuitable for aggregate (distinct)
|
||||
SELECT *
|
||||
FROM (
|
||||
SELECT
|
||||
|
@ -1009,7 +1029,7 @@ SELECT *
|
|||
ORDER BY 2 DESC, 1 DESC
|
||||
LIMIT 10;
|
||||
ERROR: cannot compute aggregate (distinct)
|
||||
DETAIL: Only count(distinct) aggregate is supported in subqueries
|
||||
DETAIL: table partitioning is unsuitable for aggregate (distinct)
|
||||
-- whole row references, oid, and ctid are not supported in count distinct
|
||||
-- test table does not have oid or ctid enabled, so tests for them are skipped
|
||||
SELECT *
|
||||
|
@ -1050,7 +1070,7 @@ SELECT *
|
|||
ORDER BY 2 DESC, 1
|
||||
LIMIT 10;
|
||||
l_shipdate | distinct_part | year
|
||||
------------+---------------+------
|
||||
---------------------------------------------------------------------
|
||||
11-29-1995 | 5 | 1995
|
||||
03-24-1995 | 4 | 1995
|
||||
09-18-1995 | 4 | 1995
|
||||
|
@ -1082,7 +1102,7 @@ SELECT *
|
|||
ORDER BY 2 DESC, 1
|
||||
LIMIT 10;
|
||||
l_shipdate | distinct_part | year
|
||||
------------+---------------+------
|
||||
---------------------------------------------------------------------
|
||||
11-29-1995 | 5 | 1995
|
||||
03-24-1995 | 4 | 1995
|
||||
09-18-1995 | 4 | 1995
|
||||
|
@ -1112,7 +1132,7 @@ SELECT *
|
|||
ORDER BY 2 DESC, 1
|
||||
LIMIT 10;
|
||||
l_shipdate | distinct_part | year
|
||||
------------+---------------+------
|
||||
---------------------------------------------------------------------
|
||||
11-29-1995 | 5 | 1995
|
||||
03-24-1995 | 4 | 1995
|
||||
09-18-1995 | 4 | 1995
|
||||
|
|
|
@ -190,7 +190,8 @@ select * FROM (
|
|||
select * FROM (
|
||||
SELECT key k, avg(distinct floor(agg1.val/2)) m from aggdata agg1
|
||||
group by key
|
||||
) subq;
|
||||
) subq
|
||||
order by k,m;
|
||||
|
||||
-- Test TransformsSubqueryNode with group by not in FROM (failed in past)
|
||||
select count(*) FROM (
|
||||
|
|
|
@ -4,6 +4,8 @@
|
|||
-- Tests to log cross shard queries according to error log level
|
||||
--
|
||||
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
|
||||
-- Create a distributed table and add data to it
|
||||
CREATE TABLE multi_task_table
|
||||
(
|
||||
|
|
|
@ -8,6 +8,7 @@ SET citus.next_shard_id TO 570000;
|
|||
|
||||
RESET citus.task_executor_type;
|
||||
SET citus.explain_distributed_queries TO on;
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
|
||||
-- Function that parses explain output as JSON
|
||||
CREATE FUNCTION explain_json(query text)
|
||||
|
|
|
@ -10,6 +10,7 @@ SET citus.next_shard_id TO 650000;
|
|||
SET citus.explain_distributed_queries TO off;
|
||||
SET citus.log_multi_join_order TO TRUE;
|
||||
SET citus.task_executor_type = 'task-tracker'; -- can't explain all queries otherwise
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
SET citus.shard_count to 2;
|
||||
SET citus.shard_replication_factor to 1;
|
||||
RESET client_min_messages;
|
||||
|
|
|
@ -11,6 +11,7 @@ SET citus.next_shard_id TO 660000;
|
|||
SET citus.explain_distributed_queries TO off;
|
||||
SET citus.log_multi_join_order TO TRUE;
|
||||
SET citus.task_executor_type = 'task-tracker'; -- can't explain all queries otherwise
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
SET client_min_messages TO LOG;
|
||||
|
||||
-- The following queries are basically the same as the ones in tpch_small
|
||||
|
|
|
@ -107,6 +107,7 @@ SELECT count(*) FROM test;
|
|||
SELECT count(*) FROM test WHERE id = 1;
|
||||
|
||||
SET citus.task_executor_type TO 'task-tracker';
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
SELECT count(*), min(current_user) FROM test;
|
||||
|
||||
-- test re-partition query (needs to transmit intermediate results)
|
||||
|
@ -143,6 +144,7 @@ SELECT count(*) FROM test;
|
|||
SELECT count(*) FROM test WHERE id = 1;
|
||||
|
||||
SET citus.task_executor_type TO 'task-tracker';
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
SELECT count(*), min(current_user) FROM test;
|
||||
|
||||
-- test re-partition query (needs to transmit intermediate results)
|
||||
|
@ -179,6 +181,7 @@ SELECT count(*) FROM test;
|
|||
SELECT count(*) FROM test WHERE id = 1;
|
||||
|
||||
SET citus.task_executor_type TO 'task-tracker';
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
SELECT count(*), min(current_user) FROM test;
|
||||
|
||||
-- test re-partition query
|
||||
|
|
|
@ -5,6 +5,7 @@ SET citus.next_shard_id TO 1660000;
|
|||
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
|
||||
--
|
||||
-- Distributed Partitioned Table Creation Tests
|
||||
|
|
|
@ -678,20 +678,22 @@ WHERE
|
|||
ORDER BY 1;
|
||||
|
||||
SET citus.task_executor_type to "task-tracker";
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
SELECT
|
||||
colocated_table_test.value_2
|
||||
FROM
|
||||
reference_table_test, colocated_table_test, colocated_table_test_2
|
||||
WHERE
|
||||
colocated_table_test.value_2 = colocated_table_test_2.value_2 AND colocated_table_test.value_2 = reference_table_test.value_2;
|
||||
colocated_table_test.value_2 = colocated_table_test_2.value_2 AND colocated_table_test.value_2 = reference_table_test.value_2
|
||||
ORDER BY colocated_table_test.value_2;
|
||||
|
||||
SELECT
|
||||
reference_table_test.value_2
|
||||
FROM
|
||||
reference_table_test, colocated_table_test, colocated_table_test_2
|
||||
WHERE
|
||||
colocated_table_test.value_1 = reference_table_test.value_1 AND colocated_table_test_2.value_1 = reference_table_test.value_1;
|
||||
|
||||
colocated_table_test.value_1 = reference_table_test.value_1 AND colocated_table_test_2.value_1 = reference_table_test.value_1
|
||||
ORDER BY reference_table_test.value_2;
|
||||
|
||||
SET citus.log_multi_join_order TO FALSE;
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
|
||||
SET citus.next_shard_id TO 690000;
|
||||
SET citus.enable_unique_job_ids TO off;
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
|
||||
create schema repartition_join;
|
||||
DROP TABLE IF EXISTS repartition_join.order_line;
|
||||
|
@ -34,7 +35,7 @@ SELECT create_distributed_table('order_line','ol_w_id');
|
|||
SELECT create_distributed_table('stock','s_w_id');
|
||||
|
||||
BEGIN;
|
||||
SET client_min_messages TO DEBUG4;
|
||||
SET client_min_messages TO DEBUG;
|
||||
SET citus.task_executor_type TO 'task-tracker';
|
||||
|
||||
-- Debug4 log messages display jobIds within them. We explicitly set the jobId
|
||||
|
|
|
@ -11,6 +11,7 @@ SET citus.next_shard_id TO 700000;
|
|||
|
||||
SET client_min_messages TO DEBUG2;
|
||||
SET citus.task_executor_type TO 'task-tracker';
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
|
||||
-- Single range-repartition join to test join-pruning behaviour.
|
||||
EXPLAIN (COSTS OFF)
|
||||
|
|
|
@ -12,6 +12,7 @@ SET citus.next_shard_id TO 710000;
|
|||
BEGIN;
|
||||
SET client_min_messages TO DEBUG3;
|
||||
SET citus.task_executor_type TO 'task-tracker';
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
|
||||
-- Single range repartition join to test anchor-shard based task assignment and
|
||||
-- assignment propagation to merge and data-fetch tasks.
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
--
|
||||
|
||||
SET citus.next_shard_id TO 535000;
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
|
||||
-- START type creation
|
||||
|
||||
|
@ -160,6 +161,7 @@ FUNCTION 1 test_udt_hash(test_udt);
|
|||
-- Distribute and populate the two tables.
|
||||
SET citus.shard_count TO 3;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
SELECT create_distributed_table('repartition_udt', 'pk', 'hash');
|
||||
SET citus.shard_count TO 5;
|
||||
SELECT create_distributed_table('repartition_udt_other', 'pk', 'hash');
|
||||
|
|
|
@ -601,6 +601,7 @@ SELECT create_distributed_table('test_schema_support_join_2.nation_hash', 'n_nat
|
|||
5|ETHIOPIA|0|ven packages wake quickly. regu
|
||||
\.
|
||||
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
-- check when search_path is public,
|
||||
-- join of two tables which are in different schemas,
|
||||
-- join on partition column
|
||||
|
|
|
@ -225,6 +225,7 @@ SELECT raise_failed_execution('
|
|||
SELECT u.* FROM wide_table u JOIN wide_table v ON (u.long_column_002 = v.long_column_003);
|
||||
');
|
||||
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
-- following will succeed since it fetches few columns
|
||||
SELECT u.long_column_001, u.long_column_002, u.long_column_003 FROM wide_table u JOIN wide_table v ON (u.long_column_002 = v.long_column_003);
|
||||
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
CREATE SCHEMA recursive_set_local;
|
||||
SET search_path TO recursive_set_local, public;
|
||||
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
|
||||
CREATE TABLE recursive_set_local.test (x int, y int);
|
||||
SELECT create_distributed_table('test', 'x');
|
||||
|
||||
|
|
Loading…
Reference in New Issue