mirror of https://github.com/citusdata/citus.git
Normalize Output:.. since it changes with pg13
Fix indentation for better readabilitypull/3900/head
parent
283b1db6a4
commit
fe4ac51d8c
|
@ -166,7 +166,7 @@ jobs:
|
||||||
- run:
|
- run:
|
||||||
name: 'Install and test postgres upgrade'
|
name: 'Install and test postgres upgrade'
|
||||||
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext --target check-pg-upgrade --old-pg-version 12 --new-pg-version 13'
|
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext --target check-pg-upgrade --old-pg-version 12 --new-pg-version 13'
|
||||||
no_output_timeout: 2m
|
no_output_timeout: 2m
|
||||||
|
|
||||||
test-12_check-multi:
|
test-12_check-multi:
|
||||||
docker:
|
docker:
|
||||||
|
|
|
@ -266,8 +266,8 @@ StartPortalForQueryExecution(const char *queryString)
|
||||||
/* don't display the portal in pg_cursors, it is for internal use only */
|
/* don't display the portal in pg_cursors, it is for internal use only */
|
||||||
portal->visible = false;
|
portal->visible = false;
|
||||||
|
|
||||||
PortalDefineQuery(portal, NULL, queryString, CMDTAG_SELECT_COMPAT, list_make1(
|
PortalDefineQuery(portal, NULL, queryString, CMDTAG_SELECT_COMPAT,
|
||||||
queryPlan), NULL);
|
list_make1(queryPlan), NULL);
|
||||||
int eflags = 0;
|
int eflags = 0;
|
||||||
PortalStart(portal, NULL, eflags, GetActiveSnapshot());
|
PortalStart(portal, NULL, eflags, GetActiveSnapshot());
|
||||||
|
|
||||||
|
|
|
@ -36,8 +36,8 @@
|
||||||
#define standard_planner_compat(a, c, d) standard_planner(a, NULL, c, d)
|
#define standard_planner_compat(a, c, d) standard_planner(a, NULL, c, d)
|
||||||
#define getOwnedSequencesCompat(a, b) getOwnedSequences(a)
|
#define getOwnedSequencesCompat(a, b) getOwnedSequences(a)
|
||||||
#define CMDTAG_SELECT_COMPAT CMDTAG_SELECT
|
#define CMDTAG_SELECT_COMPAT CMDTAG_SELECT
|
||||||
#define ExplainOnePlanCompat(a, b, c, d, e, f, g, h) ExplainOnePlan(a, b, c, d, e, f, g, \
|
#define ExplainOnePlanCompat(a, b, c, d, e, f, g, h) \
|
||||||
h)
|
ExplainOnePlan(a, b, c, d, e, f, g, h)
|
||||||
#define SetListCellPtr(a, b) ((a)->ptr_value = (b))
|
#define SetListCellPtr(a, b) ((a)->ptr_value = (b))
|
||||||
#define RangeTableEntryFromNSItem(a) ((a)->p_rte)
|
#define RangeTableEntryFromNSItem(a) ((a)->p_rte)
|
||||||
#define QueryCompletionCompat QueryCompletion
|
#define QueryCompletionCompat QueryCompletion
|
||||||
|
|
|
@ -100,6 +100,8 @@ s/partition ".*" would be violated by some row/partition would be violated by so
|
||||||
/.*Peak Memory Usage:.*$/d
|
/.*Peak Memory Usage:.*$/d
|
||||||
s/of relation ".*" contains null values/contains null values/g
|
s/of relation ".*" contains null values/contains null values/g
|
||||||
s/of relation "t1" is violated by some row/is violated by some row/g
|
s/of relation "t1" is violated by some row/is violated by some row/g
|
||||||
|
# can be removed when we remove PG_VERSION_NUM >= 120000
|
||||||
|
s/(.*)Output:.*$/\1Output: xxxxxx/g
|
||||||
|
|
||||||
|
|
||||||
# intermediate_results
|
# intermediate_results
|
||||||
|
|
|
@ -174,7 +174,7 @@ INSERT INTO composite_type_partitioned_table VALUES (123, '(123, 456)'::other_co
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> Insert on public.composite_type_partitioned_table_530003 (actual rows=0 loops=1)
|
-> Insert on public.composite_type_partitioned_table_530003 (actual rows=0 loops=1)
|
||||||
-> Result (actual rows=1 loops=1)
|
-> Result (actual rows=1 loops=1)
|
||||||
Output: 123, '(123,456)'::test_composite_type
|
Output: xxxxxx
|
||||||
(9 rows)
|
(9 rows)
|
||||||
|
|
||||||
SELECT run_command_on_coordinator_and_workers($cf$
|
SELECT run_command_on_coordinator_and_workers($cf$
|
||||||
|
@ -218,7 +218,7 @@ INSERT INTO composite_type_partitioned_table VALUES (123, '(456, 678)'::other_co
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> Insert on public.composite_type_partitioned_table_530000 (actual rows=0 loops=1)
|
-> Insert on public.composite_type_partitioned_table_530000 (actual rows=0 loops=1)
|
||||||
-> Result (actual rows=1 loops=1)
|
-> Result (actual rows=1 loops=1)
|
||||||
Output: 123, '(456,678)'::test_composite_type
|
Output: xxxxxx
|
||||||
(9 rows)
|
(9 rows)
|
||||||
|
|
||||||
-- create and distribute a table on enum type column
|
-- create and distribute a table on enum type column
|
||||||
|
|
|
@ -345,14 +345,14 @@ EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, VERBOSE TRUE)
|
||||||
SELECT l_quantity, count(*) count_quantity FROM lineitem
|
SELECT l_quantity, count(*) count_quantity FROM lineitem
|
||||||
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
|
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
|
||||||
Sort (actual rows=50 loops=1)
|
Sort (actual rows=50 loops=1)
|
||||||
Output: remote_scan.l_quantity, (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))
|
Output: xxxxxx
|
||||||
Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
|
Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
|
||||||
Sort Method: quicksort Memory: 27kB
|
Sort Method: quicksort Memory: 27kB
|
||||||
-> HashAggregate (actual rows=50 loops=1)
|
-> HashAggregate (actual rows=50 loops=1)
|
||||||
Output: remote_scan.l_quantity, COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)
|
Output: xxxxxx
|
||||||
Group Key: remote_scan.l_quantity
|
Group Key: remote_scan.l_quantity
|
||||||
-> Custom Scan (Citus Adaptive) (actual rows=100 loops=1)
|
-> Custom Scan (Citus Adaptive) (actual rows=100 loops=1)
|
||||||
Output: remote_scan.l_quantity, remote_scan.count_quantity
|
Output: xxxxxx
|
||||||
Task Count: 2
|
Task Count: 2
|
||||||
Tuple data received from nodes: 780 bytes
|
Tuple data received from nodes: 780 bytes
|
||||||
Tasks Shown: One of 2
|
Tasks Shown: One of 2
|
||||||
|
@ -361,48 +361,48 @@ Sort (actual rows=50 loops=1)
|
||||||
Tuple data received from node: 390 bytes
|
Tuple data received from node: 390 bytes
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> HashAggregate (actual rows=50 loops=1)
|
-> HashAggregate (actual rows=50 loops=1)
|
||||||
Output: l_quantity, count(*)
|
Output: xxxxxx
|
||||||
Group Key: lineitem.l_quantity
|
Group Key: lineitem.l_quantity
|
||||||
-> Seq Scan on public.lineitem_290000 lineitem (actual rows=6000 loops=1)
|
-> Seq Scan on public.lineitem_290000 lineitem (actual rows=6000 loops=1)
|
||||||
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
|
Output: xxxxxx
|
||||||
-- Test query text output, with ANALYZE OFF
|
-- Test query text output, with ANALYZE OFF
|
||||||
EXPLAIN (COSTS FALSE, ANALYZE FALSE, TIMING FALSE, SUMMARY FALSE, VERBOSE TRUE)
|
EXPLAIN (COSTS FALSE, ANALYZE FALSE, TIMING FALSE, SUMMARY FALSE, VERBOSE TRUE)
|
||||||
SELECT l_quantity, count(*) count_quantity FROM lineitem
|
SELECT l_quantity, count(*) count_quantity FROM lineitem
|
||||||
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
|
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
|
||||||
Sort
|
Sort
|
||||||
Output: remote_scan.l_quantity, (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))
|
Output: xxxxxx
|
||||||
Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
|
Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Output: remote_scan.l_quantity, COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)
|
Output: xxxxxx
|
||||||
Group Key: remote_scan.l_quantity
|
Group Key: remote_scan.l_quantity
|
||||||
-> Custom Scan (Citus Adaptive)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Output: remote_scan.l_quantity, remote_scan.count_quantity
|
Output: xxxxxx
|
||||||
Task Count: 2
|
Task Count: 2
|
||||||
Tasks Shown: One of 2
|
Tasks Shown: One of 2
|
||||||
-> Task
|
-> Task
|
||||||
Query: SELECT l_quantity, count(*) AS count_quantity FROM lineitem_290000 lineitem WHERE true GROUP BY l_quantity
|
Query: SELECT l_quantity, count(*) AS count_quantity FROM lineitem_290000 lineitem WHERE true GROUP BY l_quantity
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Output: l_quantity, count(*)
|
Output: xxxxxx
|
||||||
Group Key: lineitem.l_quantity
|
Group Key: lineitem.l_quantity
|
||||||
-> Seq Scan on public.lineitem_290000 lineitem
|
-> Seq Scan on public.lineitem_290000 lineitem
|
||||||
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
|
Output: xxxxxx
|
||||||
-- Test verbose
|
-- Test verbose
|
||||||
EXPLAIN (COSTS FALSE, VERBOSE TRUE)
|
EXPLAIN (COSTS FALSE, VERBOSE TRUE)
|
||||||
SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem;
|
SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem;
|
||||||
Aggregate
|
Aggregate
|
||||||
Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / pg_catalog.sum(remote_scan."?column?_2")))
|
Output: xxxxxx
|
||||||
-> Custom Scan (Citus Adaptive)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2"
|
Output: xxxxxx
|
||||||
Task Count: 2
|
Task Count: 2
|
||||||
Tasks Shown: One of 2
|
Tasks Shown: One of 2
|
||||||
-> Task
|
-> Task
|
||||||
Query: SELECT sum(l_quantity), sum(l_quantity), count(l_quantity) FROM lineitem_290000 lineitem WHERE true
|
Query: SELECT sum(l_quantity), sum(l_quantity), count(l_quantity) FROM lineitem_290000 lineitem WHERE true
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
Output: sum(l_quantity), sum(l_quantity), count(l_quantity)
|
Output: xxxxxx
|
||||||
-> Seq Scan on public.lineitem_290000 lineitem
|
-> Seq Scan on public.lineitem_290000 lineitem
|
||||||
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
|
Output: xxxxxx
|
||||||
-- Test join
|
-- Test join
|
||||||
EXPLAIN (COSTS FALSE)
|
EXPLAIN (COSTS FALSE)
|
||||||
SELECT * FROM lineitem
|
SELECT * FROM lineitem
|
||||||
|
@ -525,40 +525,40 @@ EXPLAIN (COSTS FALSE, VERBOSE TRUE)
|
||||||
SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem
|
SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem
|
||||||
HAVING sum(l_quantity) > 100;
|
HAVING sum(l_quantity) > 100;
|
||||||
Aggregate
|
Aggregate
|
||||||
Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / pg_catalog.sum(remote_scan."?column?_2")))
|
Output: xxxxxx
|
||||||
Filter: (sum(remote_scan.worker_column_4) > '100'::numeric)
|
Filter: (sum(remote_scan.worker_column_4) > '100'::numeric)
|
||||||
-> Custom Scan (Citus Adaptive)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2", remote_scan.worker_column_4
|
Output: xxxxxx
|
||||||
Task Count: 2
|
Task Count: 2
|
||||||
Tasks Shown: One of 2
|
Tasks Shown: One of 2
|
||||||
-> Task
|
-> Task
|
||||||
Query: SELECT sum(l_quantity), sum(l_quantity), count(l_quantity), sum(l_quantity) AS worker_column_4 FROM lineitem_290000 lineitem WHERE true
|
Query: SELECT sum(l_quantity), sum(l_quantity), count(l_quantity), sum(l_quantity) AS worker_column_4 FROM lineitem_290000 lineitem WHERE true
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
Output: sum(l_quantity), sum(l_quantity), count(l_quantity), sum(l_quantity)
|
Output: xxxxxx
|
||||||
-> Seq Scan on public.lineitem_290000 lineitem
|
-> Seq Scan on public.lineitem_290000 lineitem
|
||||||
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
|
Output: xxxxxx
|
||||||
-- Test having without aggregate
|
-- Test having without aggregate
|
||||||
EXPLAIN (COSTS FALSE, VERBOSE TRUE)
|
EXPLAIN (COSTS FALSE, VERBOSE TRUE)
|
||||||
SELECT l_quantity FROM lineitem
|
SELECT l_quantity FROM lineitem
|
||||||
GROUP BY l_quantity
|
GROUP BY l_quantity
|
||||||
HAVING l_quantity > (100 * random());
|
HAVING l_quantity > (100 * random());
|
||||||
HashAggregate
|
HashAggregate
|
||||||
Output: remote_scan.l_quantity
|
Output: xxxxxx
|
||||||
Group Key: remote_scan.l_quantity
|
Group Key: remote_scan.l_quantity
|
||||||
Filter: ((remote_scan.worker_column_2)::double precision > ('100'::double precision * random()))
|
Filter: ((remote_scan.worker_column_2)::double precision > ('100'::double precision * random()))
|
||||||
-> Custom Scan (Citus Adaptive)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Output: remote_scan.l_quantity, remote_scan.worker_column_2
|
Output: xxxxxx
|
||||||
Task Count: 2
|
Task Count: 2
|
||||||
Tasks Shown: One of 2
|
Tasks Shown: One of 2
|
||||||
-> Task
|
-> Task
|
||||||
Query: SELECT l_quantity, l_quantity AS worker_column_2 FROM lineitem_290000 lineitem WHERE true GROUP BY l_quantity
|
Query: SELECT l_quantity, l_quantity AS worker_column_2 FROM lineitem_290000 lineitem WHERE true GROUP BY l_quantity
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Output: l_quantity, l_quantity
|
Output: xxxxxx
|
||||||
Group Key: lineitem.l_quantity
|
Group Key: lineitem.l_quantity
|
||||||
-> Seq Scan on public.lineitem_290000 lineitem
|
-> Seq Scan on public.lineitem_290000 lineitem
|
||||||
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
|
Output: xxxxxx
|
||||||
-- Subquery pushdown tests with explain
|
-- Subquery pushdown tests with explain
|
||||||
EXPLAIN (COSTS OFF)
|
EXPLAIN (COSTS OFF)
|
||||||
SELECT
|
SELECT
|
||||||
|
@ -1395,26 +1395,26 @@ series AS (
|
||||||
SELECT l_orderkey FROM series JOIN keys ON (s = l_orderkey)
|
SELECT l_orderkey FROM series JOIN keys ON (s = l_orderkey)
|
||||||
ORDER BY s;
|
ORDER BY s;
|
||||||
Custom Scan (Citus Adaptive)
|
Custom Scan (Citus Adaptive)
|
||||||
Output: remote_scan.l_orderkey
|
Output: xxxxxx
|
||||||
-> Distributed Subplan XXX_1
|
-> Distributed Subplan XXX_1
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Output: remote_scan.l_orderkey
|
Output: xxxxxx
|
||||||
Group Key: remote_scan.l_orderkey
|
Group Key: remote_scan.l_orderkey
|
||||||
-> Custom Scan (Citus Adaptive)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Output: remote_scan.l_orderkey
|
Output: xxxxxx
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Query: SELECT DISTINCT l_orderkey FROM lineitem_hash_part_360041 lineitem_hash_part WHERE true
|
Query: SELECT DISTINCT l_orderkey FROM lineitem_hash_part_360041 lineitem_hash_part WHERE true
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Output: l_orderkey
|
Output: xxxxxx
|
||||||
Group Key: lineitem_hash_part.l_orderkey
|
Group Key: lineitem_hash_part.l_orderkey
|
||||||
-> Seq Scan on public.lineitem_hash_part_360041 lineitem_hash_part
|
-> Seq Scan on public.lineitem_hash_part_360041 lineitem_hash_part
|
||||||
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
|
Output: xxxxxx
|
||||||
-> Distributed Subplan XXX_2
|
-> Distributed Subplan XXX_2
|
||||||
-> Function Scan on pg_catalog.generate_series s
|
-> Function Scan on pg_catalog.generate_series s
|
||||||
Output: s
|
Output: xxxxxx
|
||||||
Function Call: generate_series(1, 10)
|
Function Call: generate_series(1, 10)
|
||||||
Task Count: 1
|
Task Count: 1
|
||||||
Tasks Shown: All
|
Tasks Shown: All
|
||||||
|
@ -1422,19 +1422,19 @@ Custom Scan (Citus Adaptive)
|
||||||
Query: SELECT keys.l_orderkey FROM ((SELECT intermediate_result.s FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(s integer)) series JOIN (SELECT intermediate_result.l_orderkey FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(l_orderkey bigint)) keys ON ((series.s OPERATOR(pg_catalog.=) keys.l_orderkey))) ORDER BY series.s
|
Query: SELECT keys.l_orderkey FROM ((SELECT intermediate_result.s FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(s integer)) series JOIN (SELECT intermediate_result.l_orderkey FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(l_orderkey bigint)) keys ON ((series.s OPERATOR(pg_catalog.=) keys.l_orderkey))) ORDER BY series.s
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> Merge Join
|
-> Merge Join
|
||||||
Output: intermediate_result_1.l_orderkey, intermediate_result.s
|
Output: xxxxxx
|
||||||
Merge Cond: (intermediate_result.s = intermediate_result_1.l_orderkey)
|
Merge Cond: (intermediate_result.s = intermediate_result_1.l_orderkey)
|
||||||
-> Sort
|
-> Sort
|
||||||
Output: intermediate_result.s
|
Output: xxxxxx
|
||||||
Sort Key: intermediate_result.s
|
Sort Key: intermediate_result.s
|
||||||
-> Function Scan on pg_catalog.read_intermediate_result intermediate_result
|
-> Function Scan on pg_catalog.read_intermediate_result intermediate_result
|
||||||
Output: intermediate_result.s
|
Output: xxxxxx
|
||||||
Function Call: read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format)
|
Function Call: read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format)
|
||||||
-> Sort
|
-> Sort
|
||||||
Output: intermediate_result_1.l_orderkey
|
Output: xxxxxx
|
||||||
Sort Key: intermediate_result_1.l_orderkey
|
Sort Key: intermediate_result_1.l_orderkey
|
||||||
-> Function Scan on pg_catalog.read_intermediate_result intermediate_result_1
|
-> Function Scan on pg_catalog.read_intermediate_result intermediate_result_1
|
||||||
Output: intermediate_result_1.l_orderkey
|
Output: xxxxxx
|
||||||
Function Call: read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format)
|
Function Call: read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format)
|
||||||
SET citus.enable_cte_inlining TO true;
|
SET citus.enable_cte_inlining TO true;
|
||||||
SELECT true AS valid FROM explain_json($$
|
SELECT true AS valid FROM explain_json($$
|
||||||
|
@ -1755,7 +1755,7 @@ SELECT * FROM worker_save_query_explain_analyze('SELECT * FROM explain_analyze_t
|
||||||
4
|
4
|
||||||
(4 rows)
|
(4 rows)
|
||||||
|
|
||||||
SELECT explain_analyze_output ~ 'Output: a, b' FROM worker_last_saved_explain_analyze();
|
SELECT explain_analyze_output ~ 'Output: xxxxxx
|
||||||
?column?
|
?column?
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
t
|
t
|
||||||
|
|
|
@ -285,18 +285,18 @@ Sort
|
||||||
EXPLAIN (COSTS FALSE, VERBOSE TRUE)
|
EXPLAIN (COSTS FALSE, VERBOSE TRUE)
|
||||||
SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem_mx;
|
SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem_mx;
|
||||||
Aggregate
|
Aggregate
|
||||||
Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / pg_catalog.sum(remote_scan."?column?_2")))
|
Output: xxxxxx
|
||||||
-> Custom Scan (Citus Adaptive)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2"
|
Output: xxxxxx
|
||||||
Task Count: 16
|
Task Count: 16
|
||||||
Tasks Shown: One of 16
|
Tasks Shown: One of 16
|
||||||
-> Task
|
-> Task
|
||||||
Query: SELECT sum(l_quantity), sum(l_quantity), count(l_quantity) FROM lineitem_mx_1220052 lineitem_mx WHERE true
|
Query: SELECT sum(l_quantity), sum(l_quantity), count(l_quantity) FROM lineitem_mx_1220052 lineitem_mx WHERE true
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
Output: sum(l_quantity), sum(l_quantity), count(l_quantity)
|
Output: xxxxxx
|
||||||
-> Seq Scan on public.lineitem_mx_1220052 lineitem_mx
|
-> Seq Scan on public.lineitem_mx_1220052 lineitem_mx
|
||||||
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
|
Output: xxxxxx
|
||||||
-- Test join
|
-- Test join
|
||||||
EXPLAIN (COSTS FALSE)
|
EXPLAIN (COSTS FALSE)
|
||||||
SELECT * FROM lineitem_mx
|
SELECT * FROM lineitem_mx
|
||||||
|
|
|
@ -712,68 +712,68 @@ EXPLAIN (COSTS FALSE, VERBOSE TRUE)
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
Limit
|
Limit
|
||||||
Output: remote_scan.user_id, remote_scan.sum
|
Output: xxxxxx
|
||||||
-> Sort
|
-> Sort
|
||||||
Output: remote_scan.user_id, remote_scan.sum
|
Output: xxxxxx
|
||||||
Sort Key: remote_scan.sum DESC, remote_scan.user_id DESC
|
Sort Key: remote_scan.sum DESC, remote_scan.user_id DESC
|
||||||
-> Custom Scan (Citus Adaptive)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Output: remote_scan.user_id, remote_scan.sum
|
Output: xxxxxx
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Query: SELECT worker_column_1 AS user_id, worker_column_2 AS sum FROM (SELECT ftop.user_id AS worker_column_1, ftop.sum AS worker_column_2 FROM (SELECT user_id_1.user_id, sum(user_id_1.counter) AS sum FROM (SELECT users_table.user_id, sum(users_table.value_2) OVER (PARTITION BY users_table.user_id) AS counter FROM public.users_table_1400256 users_table UNION SELECT events_table.user_id, sum(events_table.value_2) OVER (PARTITION BY events_table.user_id) AS counter FROM public.events_table_1400260 events_table) user_id_1 GROUP BY user_id_1.user_id UNION SELECT user_id_2.user_id, sum(user_id_2.counter) AS sum FROM (SELECT users_table.user_id, sum(users_table.value_2) OVER (PARTITION BY users_table.user_id) AS counter FROM public.users_table_1400256 users_table UNION SELECT events_table.user_id, sum(events_table.value_2) OVER (PARTITION BY events_table.user_id) AS counter FROM public.events_table_1400260 events_table) user_id_2 GROUP BY user_id_2.user_id) ftop) worker_subquery ORDER BY worker_column_2 DESC, worker_column_1 DESC LIMIT '5'::bigint
|
Query: SELECT worker_column_1 AS user_id, worker_column_2 AS sum FROM (SELECT ftop.user_id AS worker_column_1, ftop.sum AS worker_column_2 FROM (SELECT user_id_1.user_id, sum(user_id_1.counter) AS sum FROM (SELECT users_table.user_id, sum(users_table.value_2) OVER (PARTITION BY users_table.user_id) AS counter FROM public.users_table_1400256 users_table UNION SELECT events_table.user_id, sum(events_table.value_2) OVER (PARTITION BY events_table.user_id) AS counter FROM public.events_table_1400260 events_table) user_id_1 GROUP BY user_id_1.user_id UNION SELECT user_id_2.user_id, sum(user_id_2.counter) AS sum FROM (SELECT users_table.user_id, sum(users_table.value_2) OVER (PARTITION BY users_table.user_id) AS counter FROM public.users_table_1400256 users_table UNION SELECT events_table.user_id, sum(events_table.value_2) OVER (PARTITION BY events_table.user_id) AS counter FROM public.events_table_1400260 events_table) user_id_2 GROUP BY user_id_2.user_id) ftop) worker_subquery ORDER BY worker_column_2 DESC, worker_column_1 DESC LIMIT '5'::bigint
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> Limit
|
-> Limit
|
||||||
Output: users_table.user_id, (sum((sum(users_table.value_2) OVER (?))))
|
Output: xxxxxx
|
||||||
-> Sort
|
-> Sort
|
||||||
Output: users_table.user_id, (sum((sum(users_table.value_2) OVER (?))))
|
Output: xxxxxx
|
||||||
Sort Key: (sum((sum(users_table.value_2) OVER (?)))) DESC, users_table.user_id DESC
|
Sort Key: (sum((sum(users_table.value_2) OVER (?)))) DESC, users_table.user_id DESC
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Output: users_table.user_id, (sum((sum(users_table.value_2) OVER (?))))
|
Output: xxxxxx
|
||||||
Group Key: users_table.user_id, (sum((sum(users_table.value_2) OVER (?))))
|
Group Key: users_table.user_id, (sum((sum(users_table.value_2) OVER (?))))
|
||||||
-> Append
|
-> Append
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Output: users_table.user_id, sum((sum(users_table.value_2) OVER (?)))
|
Output: xxxxxx
|
||||||
Group Key: users_table.user_id
|
Group Key: users_table.user_id
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Output: users_table.user_id, (sum(users_table.value_2) OVER (?))
|
Output: xxxxxx
|
||||||
Group Key: users_table.user_id, (sum(users_table.value_2) OVER (?))
|
Group Key: users_table.user_id, (sum(users_table.value_2) OVER (?))
|
||||||
-> Append
|
-> Append
|
||||||
-> WindowAgg
|
-> WindowAgg
|
||||||
Output: users_table.user_id, sum(users_table.value_2) OVER (?)
|
Output: xxxxxx
|
||||||
-> Sort
|
-> Sort
|
||||||
Output: users_table.user_id, users_table.value_2
|
Output: xxxxxx
|
||||||
Sort Key: users_table.user_id
|
Sort Key: users_table.user_id
|
||||||
-> Seq Scan on public.users_table_1400256 users_table
|
-> Seq Scan on public.users_table_1400256 users_table
|
||||||
Output: users_table.user_id, users_table.value_2
|
Output: xxxxxx
|
||||||
-> WindowAgg
|
-> WindowAgg
|
||||||
Output: events_table.user_id, sum(events_table.value_2) OVER (?)
|
Output: xxxxxx
|
||||||
-> Sort
|
-> Sort
|
||||||
Output: events_table.user_id, events_table.value_2
|
Output: xxxxxx
|
||||||
Sort Key: events_table.user_id
|
Sort Key: events_table.user_id
|
||||||
-> Seq Scan on public.events_table_1400260 events_table
|
-> Seq Scan on public.events_table_1400260 events_table
|
||||||
Output: events_table.user_id, events_table.value_2
|
Output: xxxxxx
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Output: users_table_1.user_id, sum((sum(users_table_1.value_2) OVER (?)))
|
Output: xxxxxx
|
||||||
Group Key: users_table_1.user_id
|
Group Key: users_table_1.user_id
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Output: users_table_1.user_id, (sum(users_table_1.value_2) OVER (?))
|
Output: xxxxxx
|
||||||
Group Key: users_table_1.user_id, (sum(users_table_1.value_2) OVER (?))
|
Group Key: users_table_1.user_id, (sum(users_table_1.value_2) OVER (?))
|
||||||
-> Append
|
-> Append
|
||||||
-> WindowAgg
|
-> WindowAgg
|
||||||
Output: users_table_1.user_id, sum(users_table_1.value_2) OVER (?)
|
Output: xxxxxx
|
||||||
-> Sort
|
-> Sort
|
||||||
Output: users_table_1.user_id, users_table_1.value_2
|
Output: xxxxxx
|
||||||
Sort Key: users_table_1.user_id
|
Sort Key: users_table_1.user_id
|
||||||
-> Seq Scan on public.users_table_1400256 users_table_1
|
-> Seq Scan on public.users_table_1400256 users_table_1
|
||||||
Output: users_table_1.user_id, users_table_1.value_2
|
Output: xxxxxx
|
||||||
-> WindowAgg
|
-> WindowAgg
|
||||||
Output: events_table_1.user_id, sum(events_table_1.value_2) OVER (?)
|
Output: xxxxxx
|
||||||
-> Sort
|
-> Sort
|
||||||
Output: events_table_1.user_id, events_table_1.value_2
|
Output: xxxxxx
|
||||||
Sort Key: events_table_1.user_id
|
Sort Key: events_table_1.user_id
|
||||||
-> Seq Scan on public.events_table_1400260 events_table_1
|
-> Seq Scan on public.events_table_1400260 events_table_1
|
||||||
Output: events_table_1.user_id, events_table_1.value_2
|
Output: xxxxxx
|
||||||
(63 rows)
|
(63 rows)
|
||||||
|
|
||||||
-- test with window functions which aren't pushed down
|
-- test with window functions which aren't pushed down
|
||||||
|
|
|
@ -554,14 +554,14 @@ $$);
|
||||||
coordinator_plan
|
coordinator_plan
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
Sort
|
Sort
|
||||||
Output: remote_scan.k1, remote_scan.k2, remote_scan.k3, (any_value(remote_scan.v1)), (any_value(remote_scan.v2)), ((any_value(remote_scan.v3) || '_notgrouped'::text)), remote_scan.va1, remote_scan.va2, remote_scan.va3, (COALESCE((pg_catalog.sum(remote_scan.count))::bigint, '0'::bigint))
|
Output: xxxxxx
|
||||||
Sort Key: remote_scan.k1
|
Sort Key: remote_scan.k1
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Output: remote_scan.k1, remote_scan.k2, remote_scan.k3, any_value(remote_scan.v1), any_value(remote_scan.v2), (any_value(remote_scan.v3) || '_notgrouped'::text), remote_scan.va1, remote_scan.va2, remote_scan.va3, COALESCE((pg_catalog.sum(remote_scan.count))::bigint, '0'::bigint)
|
Output: xxxxxx
|
||||||
Group Key: remote_scan.k1, remote_scan.va1
|
Group Key: remote_scan.k1, remote_scan.va1
|
||||||
Filter: ((length(remote_scan.worker_column_11) + length(any_value(remote_scan.worker_column_12))) < length((any_value(remote_scan.worker_column_13) || '_append'::text)))
|
Filter: ((length(remote_scan.worker_column_11) + length(any_value(remote_scan.worker_column_12))) < length((any_value(remote_scan.worker_column_13) || '_append'::text)))
|
||||||
-> Custom Scan (Citus Adaptive)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Output: remote_scan.k1, remote_scan.k2, remote_scan.k3, remote_scan.v1, remote_scan.v2, remote_scan.v3, remote_scan.va1, remote_scan.va2, remote_scan.va3, remote_scan.count, remote_scan.worker_column_11, remote_scan.worker_column_12, remote_scan.worker_column_13
|
Output: xxxxxx
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
(10 rows)
|
(10 rows)
|
||||||
|
|
||||||
|
|
|
@ -42,18 +42,18 @@ FROM latencies;
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
Aggregate
|
Aggregate
|
||||||
Output: tdigest(remote_scan.tdigest)
|
Output: xxxxxx
|
||||||
-> Custom Scan (Citus Adaptive)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Output: remote_scan.tdigest
|
Output: xxxxxx
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Query: SELECT public.tdigest(latency, 100) AS tdigest FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true
|
Query: SELECT public.tdigest(latency, 100) AS tdigest FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
Output: tdigest(latency, 100)
|
Output: xxxxxx
|
||||||
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
||||||
Output: a, b, latency
|
Output: xxxxxx
|
||||||
(13 rows)
|
(13 rows)
|
||||||
|
|
||||||
-- explain grouping by distribution column is completely pushed down for tdigest(value, compression)
|
-- explain grouping by distribution column is completely pushed down for tdigest(value, compression)
|
||||||
|
@ -64,17 +64,17 @@ GROUP BY a;
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
Custom Scan (Citus Adaptive)
|
Custom Scan (Citus Adaptive)
|
||||||
Output: remote_scan.a, remote_scan.tdigest
|
Output: xxxxxx
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Query: SELECT a, public.tdigest(latency, 100) AS tdigest FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY a
|
Query: SELECT a, public.tdigest(latency, 100) AS tdigest FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY a
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Output: a, tdigest(latency, 100)
|
Output: xxxxxx
|
||||||
Group Key: latencies.a
|
Group Key: latencies.a
|
||||||
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
||||||
Output: a, b, latency
|
Output: xxxxxx
|
||||||
(12 rows)
|
(12 rows)
|
||||||
|
|
||||||
-- explain grouping by non-distribution column is partially pushed down for tdigest(value, compression)
|
-- explain grouping by non-distribution column is partially pushed down for tdigest(value, compression)
|
||||||
|
@ -85,20 +85,20 @@ GROUP BY b;
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
HashAggregate
|
HashAggregate
|
||||||
Output: remote_scan.b, tdigest(remote_scan.tdigest)
|
Output: xxxxxx
|
||||||
Group Key: remote_scan.b
|
Group Key: remote_scan.b
|
||||||
-> Custom Scan (Citus Adaptive)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Output: remote_scan.b, remote_scan.tdigest
|
Output: xxxxxx
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Query: SELECT b, public.tdigest(latency, 100) AS tdigest FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY b
|
Query: SELECT b, public.tdigest(latency, 100) AS tdigest FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY b
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Output: b, tdigest(latency, 100)
|
Output: xxxxxx
|
||||||
Group Key: latencies.b
|
Group Key: latencies.b
|
||||||
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
||||||
Output: a, b, latency
|
Output: xxxxxx
|
||||||
(15 rows)
|
(15 rows)
|
||||||
|
|
||||||
-- explain no grouping to verify partially pushed down for tdigest_precentile(value, compression, quantile)
|
-- explain no grouping to verify partially pushed down for tdigest_precentile(value, compression, quantile)
|
||||||
|
@ -108,18 +108,18 @@ FROM latencies;
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
Aggregate
|
Aggregate
|
||||||
Output: tdigest_percentile(remote_scan.tdigest_percentile, '0.99'::double precision)
|
Output: xxxxxx
|
||||||
-> Custom Scan (Citus Adaptive)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Output: remote_scan.tdigest_percentile
|
Output: xxxxxx
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Query: SELECT public.tdigest(latency, 100) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true
|
Query: SELECT public.tdigest(latency, 100) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
Output: tdigest(latency, 100)
|
Output: xxxxxx
|
||||||
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
||||||
Output: a, b, latency
|
Output: xxxxxx
|
||||||
(13 rows)
|
(13 rows)
|
||||||
|
|
||||||
-- explain grouping by distribution column is completely pushed down for tdigest_precentile(value, compression, quantile)
|
-- explain grouping by distribution column is completely pushed down for tdigest_precentile(value, compression, quantile)
|
||||||
|
@ -130,17 +130,17 @@ GROUP BY a;
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
Custom Scan (Citus Adaptive)
|
Custom Scan (Citus Adaptive)
|
||||||
Output: remote_scan.a, remote_scan.tdigest_percentile
|
Output: xxxxxx
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Query: SELECT a, public.tdigest_percentile(latency, 100, '0.99'::double precision) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY a
|
Query: SELECT a, public.tdigest_percentile(latency, 100, '0.99'::double precision) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY a
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Output: a, tdigest_percentile(latency, 100, '0.99'::double precision)
|
Output: xxxxxx
|
||||||
Group Key: latencies.a
|
Group Key: latencies.a
|
||||||
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
||||||
Output: a, b, latency
|
Output: xxxxxx
|
||||||
(12 rows)
|
(12 rows)
|
||||||
|
|
||||||
-- explain grouping by non-distribution column is partially pushed down for tdigest_precentile(value, compression, quantile)
|
-- explain grouping by non-distribution column is partially pushed down for tdigest_precentile(value, compression, quantile)
|
||||||
|
@ -151,20 +151,20 @@ GROUP BY b;
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
HashAggregate
|
HashAggregate
|
||||||
Output: remote_scan.b, tdigest_percentile(remote_scan.tdigest_percentile, '0.99'::double precision)
|
Output: xxxxxx
|
||||||
Group Key: remote_scan.b
|
Group Key: remote_scan.b
|
||||||
-> Custom Scan (Citus Adaptive)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Output: remote_scan.b, remote_scan.tdigest_percentile
|
Output: xxxxxx
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Query: SELECT b, public.tdigest(latency, 100) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY b
|
Query: SELECT b, public.tdigest(latency, 100) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY b
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Output: b, tdigest(latency, 100)
|
Output: xxxxxx
|
||||||
Group Key: latencies.b
|
Group Key: latencies.b
|
||||||
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
||||||
Output: a, b, latency
|
Output: xxxxxx
|
||||||
(15 rows)
|
(15 rows)
|
||||||
|
|
||||||
-- explain no grouping to verify partially pushed down for tdigest_precentile(value, compression, quantiles[])
|
-- explain no grouping to verify partially pushed down for tdigest_precentile(value, compression, quantiles[])
|
||||||
|
@ -174,18 +174,18 @@ FROM latencies;
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
Aggregate
|
Aggregate
|
||||||
Output: tdigest_percentile(remote_scan.tdigest_percentile, '{0.99,0.95}'::double precision[])
|
Output: xxxxxx
|
||||||
-> Custom Scan (Citus Adaptive)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Output: remote_scan.tdigest_percentile
|
Output: xxxxxx
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Query: SELECT public.tdigest(latency, 100) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true
|
Query: SELECT public.tdigest(latency, 100) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
Output: tdigest(latency, 100)
|
Output: xxxxxx
|
||||||
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
||||||
Output: a, b, latency
|
Output: xxxxxx
|
||||||
(13 rows)
|
(13 rows)
|
||||||
|
|
||||||
-- explain grouping by distribution column is completely pushed down for tdigest_precentile(value, compression, quantiles[])
|
-- explain grouping by distribution column is completely pushed down for tdigest_precentile(value, compression, quantiles[])
|
||||||
|
@ -196,17 +196,17 @@ GROUP BY a;
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
Custom Scan (Citus Adaptive)
|
Custom Scan (Citus Adaptive)
|
||||||
Output: remote_scan.a, remote_scan.tdigest_percentile
|
Output: xxxxxx
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Query: SELECT a, public.tdigest_percentile(latency, 100, '{0.99,0.95}'::double precision[]) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY a
|
Query: SELECT a, public.tdigest_percentile(latency, 100, '{0.99,0.95}'::double precision[]) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY a
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Output: a, tdigest_percentile(latency, 100, '{0.99,0.95}'::double precision[])
|
Output: xxxxxx
|
||||||
Group Key: latencies.a
|
Group Key: latencies.a
|
||||||
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
||||||
Output: a, b, latency
|
Output: xxxxxx
|
||||||
(12 rows)
|
(12 rows)
|
||||||
|
|
||||||
-- explain grouping by non-distribution column is partially pushed down for tdigest_precentile(value, compression, quantiles[])
|
-- explain grouping by non-distribution column is partially pushed down for tdigest_precentile(value, compression, quantiles[])
|
||||||
|
@ -217,20 +217,20 @@ GROUP BY b;
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
HashAggregate
|
HashAggregate
|
||||||
Output: remote_scan.b, tdigest_percentile(remote_scan.tdigest_percentile, '{0.99,0.95}'::double precision[])
|
Output: xxxxxx
|
||||||
Group Key: remote_scan.b
|
Group Key: remote_scan.b
|
||||||
-> Custom Scan (Citus Adaptive)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Output: remote_scan.b, remote_scan.tdigest_percentile
|
Output: xxxxxx
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Query: SELECT b, public.tdigest(latency, 100) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY b
|
Query: SELECT b, public.tdigest(latency, 100) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY b
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Output: b, tdigest(latency, 100)
|
Output: xxxxxx
|
||||||
Group Key: latencies.b
|
Group Key: latencies.b
|
||||||
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
||||||
Output: a, b, latency
|
Output: xxxxxx
|
||||||
(15 rows)
|
(15 rows)
|
||||||
|
|
||||||
-- explain no grouping to verify partially pushed down for tdigest_precentile_of(value, compression, hypotetical_value)
|
-- explain no grouping to verify partially pushed down for tdigest_precentile_of(value, compression, hypotetical_value)
|
||||||
|
@ -240,18 +240,18 @@ FROM latencies;
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
Aggregate
|
Aggregate
|
||||||
Output: tdigest_percentile_of(remote_scan.tdigest_percentile_of, '9000'::double precision)
|
Output: xxxxxx
|
||||||
-> Custom Scan (Citus Adaptive)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Output: remote_scan.tdigest_percentile_of
|
Output: xxxxxx
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Query: SELECT public.tdigest(latency, 100) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true
|
Query: SELECT public.tdigest(latency, 100) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
Output: tdigest(latency, 100)
|
Output: xxxxxx
|
||||||
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
||||||
Output: a, b, latency
|
Output: xxxxxx
|
||||||
(13 rows)
|
(13 rows)
|
||||||
|
|
||||||
-- explain grouping by distribution column is completely pushed down for tdigest_precentile_of(value, compression, hypotetical_value)
|
-- explain grouping by distribution column is completely pushed down for tdigest_precentile_of(value, compression, hypotetical_value)
|
||||||
|
@ -262,17 +262,17 @@ GROUP BY a;
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
Custom Scan (Citus Adaptive)
|
Custom Scan (Citus Adaptive)
|
||||||
Output: remote_scan.a, remote_scan.tdigest_percentile_of
|
Output: xxxxxx
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Query: SELECT a, public.tdigest_percentile_of(latency, 100, '9000'::double precision) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY a
|
Query: SELECT a, public.tdigest_percentile_of(latency, 100, '9000'::double precision) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY a
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Output: a, tdigest_percentile_of(latency, 100, '9000'::double precision)
|
Output: xxxxxx
|
||||||
Group Key: latencies.a
|
Group Key: latencies.a
|
||||||
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
||||||
Output: a, b, latency
|
Output: xxxxxx
|
||||||
(12 rows)
|
(12 rows)
|
||||||
|
|
||||||
-- explain grouping by non-distribution column is partially pushed down for tdigest_precentile_of(value, compression, hypotetical_value)
|
-- explain grouping by non-distribution column is partially pushed down for tdigest_precentile_of(value, compression, hypotetical_value)
|
||||||
|
@ -283,20 +283,20 @@ GROUP BY b;
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
HashAggregate
|
HashAggregate
|
||||||
Output: remote_scan.b, tdigest_percentile_of(remote_scan.tdigest_percentile_of, '9000'::double precision)
|
Output: xxxxxx
|
||||||
Group Key: remote_scan.b
|
Group Key: remote_scan.b
|
||||||
-> Custom Scan (Citus Adaptive)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Output: remote_scan.b, remote_scan.tdigest_percentile_of
|
Output: xxxxxx
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Query: SELECT b, public.tdigest(latency, 100) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY b
|
Query: SELECT b, public.tdigest(latency, 100) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY b
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Output: b, tdigest(latency, 100)
|
Output: xxxxxx
|
||||||
Group Key: latencies.b
|
Group Key: latencies.b
|
||||||
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
||||||
Output: a, b, latency
|
Output: xxxxxx
|
||||||
(15 rows)
|
(15 rows)
|
||||||
|
|
||||||
-- explain no grouping to verify partially pushed down for tdigest_precentile_of(value, compression, hypotetical_values[])
|
-- explain no grouping to verify partially pushed down for tdigest_precentile_of(value, compression, hypotetical_values[])
|
||||||
|
@ -306,18 +306,18 @@ FROM latencies;
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
Aggregate
|
Aggregate
|
||||||
Output: tdigest_percentile_of(remote_scan.tdigest_percentile_of, '{9000,9500}'::double precision[])
|
Output: xxxxxx
|
||||||
-> Custom Scan (Citus Adaptive)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Output: remote_scan.tdigest_percentile_of
|
Output: xxxxxx
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Query: SELECT public.tdigest(latency, 100) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true
|
Query: SELECT public.tdigest(latency, 100) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
Output: tdigest(latency, 100)
|
Output: xxxxxx
|
||||||
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
||||||
Output: a, b, latency
|
Output: xxxxxx
|
||||||
(13 rows)
|
(13 rows)
|
||||||
|
|
||||||
-- explain grouping by distribution column is completely pushed down for tdigest_precentile_of(value, compression, hypotetical_values[])
|
-- explain grouping by distribution column is completely pushed down for tdigest_precentile_of(value, compression, hypotetical_values[])
|
||||||
|
@ -328,17 +328,17 @@ GROUP BY a;
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
Custom Scan (Citus Adaptive)
|
Custom Scan (Citus Adaptive)
|
||||||
Output: remote_scan.a, remote_scan.tdigest_percentile_of
|
Output: xxxxxx
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Query: SELECT a, public.tdigest_percentile_of(latency, 100, '{9000,9500}'::double precision[]) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY a
|
Query: SELECT a, public.tdigest_percentile_of(latency, 100, '{9000,9500}'::double precision[]) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY a
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Output: a, tdigest_percentile_of(latency, 100, '{9000,9500}'::double precision[])
|
Output: xxxxxx
|
||||||
Group Key: latencies.a
|
Group Key: latencies.a
|
||||||
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
||||||
Output: a, b, latency
|
Output: xxxxxx
|
||||||
(12 rows)
|
(12 rows)
|
||||||
|
|
||||||
-- explain grouping by non-distribution column is partially pushed down for tdigest_precentile_of(value, compression, hypotetical_values[])
|
-- explain grouping by non-distribution column is partially pushed down for tdigest_precentile_of(value, compression, hypotetical_values[])
|
||||||
|
@ -349,20 +349,20 @@ GROUP BY b;
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
HashAggregate
|
HashAggregate
|
||||||
Output: remote_scan.b, tdigest_percentile_of(remote_scan.tdigest_percentile_of, '{9000,9500}'::double precision[])
|
Output: xxxxxx
|
||||||
Group Key: remote_scan.b
|
Group Key: remote_scan.b
|
||||||
-> Custom Scan (Citus Adaptive)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Output: remote_scan.b, remote_scan.tdigest_percentile_of
|
Output: xxxxxx
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Query: SELECT b, public.tdigest(latency, 100) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY b
|
Query: SELECT b, public.tdigest(latency, 100) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY b
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Output: b, tdigest(latency, 100)
|
Output: xxxxxx
|
||||||
Group Key: latencies.b
|
Group Key: latencies.b
|
||||||
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
||||||
Output: a, b, latency
|
Output: xxxxxx
|
||||||
(15 rows)
|
(15 rows)
|
||||||
|
|
||||||
-- verifying results - should be stable due to seed while inserting the data, if failure due to data these queries could be removed or check for certain ranges
|
-- verifying results - should be stable due to seed while inserting the data, if failure due to data these queries could be removed or check for certain ranges
|
||||||
|
@ -413,18 +413,18 @@ FROM latencies_rollup;
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
Aggregate
|
Aggregate
|
||||||
Output: tdigest(remote_scan.tdigest)
|
Output: xxxxxx
|
||||||
-> Custom Scan (Citus Adaptive)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Output: remote_scan.tdigest
|
Output: xxxxxx
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Query: SELECT public.tdigest(tdigest) AS tdigest FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true
|
Query: SELECT public.tdigest(tdigest) AS tdigest FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
Output: tdigest(tdigest)
|
Output: xxxxxx
|
||||||
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
|
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
|
||||||
Output: a, tdigest
|
Output: xxxxxx
|
||||||
(13 rows)
|
(13 rows)
|
||||||
|
|
||||||
-- explain grouping by distribution column is completely pushed down for tdigest(tdigest)
|
-- explain grouping by distribution column is completely pushed down for tdigest(tdigest)
|
||||||
|
@ -435,17 +435,17 @@ GROUP BY a;
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
Custom Scan (Citus Adaptive)
|
Custom Scan (Citus Adaptive)
|
||||||
Output: remote_scan.a, remote_scan.tdigest
|
Output: xxxxxx
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Query: SELECT a, public.tdigest(tdigest) AS tdigest FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true GROUP BY a
|
Query: SELECT a, public.tdigest(tdigest) AS tdigest FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true GROUP BY a
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Output: a, tdigest(tdigest)
|
Output: xxxxxx
|
||||||
Group Key: latencies_rollup.a
|
Group Key: latencies_rollup.a
|
||||||
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
|
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
|
||||||
Output: a, tdigest
|
Output: xxxxxx
|
||||||
(12 rows)
|
(12 rows)
|
||||||
|
|
||||||
-- explain no grouping to verify partially pushed down for tdigest_precentile(tdigest, quantile)
|
-- explain no grouping to verify partially pushed down for tdigest_precentile(tdigest, quantile)
|
||||||
|
@ -455,18 +455,18 @@ FROM latencies_rollup;
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
Aggregate
|
Aggregate
|
||||||
Output: tdigest_percentile(remote_scan.tdigest_percentile, '0.99'::double precision)
|
Output: xxxxxx
|
||||||
-> Custom Scan (Citus Adaptive)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Output: remote_scan.tdigest_percentile
|
Output: xxxxxx
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Query: SELECT public.tdigest(tdigest) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true
|
Query: SELECT public.tdigest(tdigest) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
Output: tdigest(tdigest)
|
Output: xxxxxx
|
||||||
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
|
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
|
||||||
Output: a, tdigest
|
Output: xxxxxx
|
||||||
(13 rows)
|
(13 rows)
|
||||||
|
|
||||||
-- explain grouping by distribution column is completely pushed down for tdigest_precentile(tdigest, quantile)
|
-- explain grouping by distribution column is completely pushed down for tdigest_precentile(tdigest, quantile)
|
||||||
|
@ -477,17 +477,17 @@ GROUP BY a;
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
Custom Scan (Citus Adaptive)
|
Custom Scan (Citus Adaptive)
|
||||||
Output: remote_scan.a, remote_scan.tdigest_percentile
|
Output: xxxxxx
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Query: SELECT a, public.tdigest_percentile(tdigest, '0.99'::double precision) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true GROUP BY a
|
Query: SELECT a, public.tdigest_percentile(tdigest, '0.99'::double precision) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true GROUP BY a
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Output: a, tdigest_percentile(tdigest, '0.99'::double precision)
|
Output: xxxxxx
|
||||||
Group Key: latencies_rollup.a
|
Group Key: latencies_rollup.a
|
||||||
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
|
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
|
||||||
Output: a, tdigest
|
Output: xxxxxx
|
||||||
(12 rows)
|
(12 rows)
|
||||||
|
|
||||||
-- explain no grouping to verify partially pushed down for tdigest_precentile(value, compression, quantiles[])
|
-- explain no grouping to verify partially pushed down for tdigest_precentile(value, compression, quantiles[])
|
||||||
|
@ -497,18 +497,18 @@ FROM latencies_rollup;
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
Aggregate
|
Aggregate
|
||||||
Output: tdigest_percentile(remote_scan.tdigest_percentile, '{0.99,0.95}'::double precision[])
|
Output: xxxxxx
|
||||||
-> Custom Scan (Citus Adaptive)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Output: remote_scan.tdigest_percentile
|
Output: xxxxxx
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Query: SELECT public.tdigest(tdigest) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true
|
Query: SELECT public.tdigest(tdigest) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
Output: tdigest(tdigest)
|
Output: xxxxxx
|
||||||
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
|
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
|
||||||
Output: a, tdigest
|
Output: xxxxxx
|
||||||
(13 rows)
|
(13 rows)
|
||||||
|
|
||||||
-- explain grouping by distribution column is completely pushed down for tdigest_precentile(value, compression, quantiles[])
|
-- explain grouping by distribution column is completely pushed down for tdigest_precentile(value, compression, quantiles[])
|
||||||
|
@ -519,17 +519,17 @@ GROUP BY a;
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
Custom Scan (Citus Adaptive)
|
Custom Scan (Citus Adaptive)
|
||||||
Output: remote_scan.a, remote_scan.tdigest_percentile
|
Output: xxxxxx
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Query: SELECT a, public.tdigest_percentile(tdigest, '{0.99,0.95}'::double precision[]) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true GROUP BY a
|
Query: SELECT a, public.tdigest_percentile(tdigest, '{0.99,0.95}'::double precision[]) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true GROUP BY a
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Output: a, tdigest_percentile(tdigest, '{0.99,0.95}'::double precision[])
|
Output: xxxxxx
|
||||||
Group Key: latencies_rollup.a
|
Group Key: latencies_rollup.a
|
||||||
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
|
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
|
||||||
Output: a, tdigest
|
Output: xxxxxx
|
||||||
(12 rows)
|
(12 rows)
|
||||||
|
|
||||||
-- explain no grouping to verify partially pushed down for tdigest_precentile_of(value, compression, hypotetical_value)
|
-- explain no grouping to verify partially pushed down for tdigest_precentile_of(value, compression, hypotetical_value)
|
||||||
|
@ -539,18 +539,18 @@ FROM latencies_rollup;
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
Aggregate
|
Aggregate
|
||||||
Output: tdigest_percentile_of(remote_scan.tdigest_percentile_of, '9000'::double precision)
|
Output: xxxxxx
|
||||||
-> Custom Scan (Citus Adaptive)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Output: remote_scan.tdigest_percentile_of
|
Output: xxxxxx
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Query: SELECT public.tdigest(tdigest) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true
|
Query: SELECT public.tdigest(tdigest) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
Output: tdigest(tdigest)
|
Output: xxxxxx
|
||||||
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
|
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
|
||||||
Output: a, tdigest
|
Output: xxxxxx
|
||||||
(13 rows)
|
(13 rows)
|
||||||
|
|
||||||
-- explain grouping by distribution column is completely pushed down for tdigest_precentile_of(value, compression, hypotetical_value)
|
-- explain grouping by distribution column is completely pushed down for tdigest_precentile_of(value, compression, hypotetical_value)
|
||||||
|
@ -561,17 +561,17 @@ GROUP BY a;
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
Custom Scan (Citus Adaptive)
|
Custom Scan (Citus Adaptive)
|
||||||
Output: remote_scan.a, remote_scan.tdigest_percentile_of
|
Output: xxxxxx
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Query: SELECT a, public.tdigest_percentile_of(tdigest, '9000'::double precision) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true GROUP BY a
|
Query: SELECT a, public.tdigest_percentile_of(tdigest, '9000'::double precision) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true GROUP BY a
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Output: a, tdigest_percentile_of(tdigest, '9000'::double precision)
|
Output: xxxxxx
|
||||||
Group Key: latencies_rollup.a
|
Group Key: latencies_rollup.a
|
||||||
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
|
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
|
||||||
Output: a, tdigest
|
Output: xxxxxx
|
||||||
(12 rows)
|
(12 rows)
|
||||||
|
|
||||||
-- explain no grouping to verify partially pushed down for tdigest_precentile_of(value, compression, hypotetical_values[])
|
-- explain no grouping to verify partially pushed down for tdigest_precentile_of(value, compression, hypotetical_values[])
|
||||||
|
@ -581,18 +581,18 @@ FROM latencies_rollup;
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
Aggregate
|
Aggregate
|
||||||
Output: tdigest_percentile_of(remote_scan.tdigest_percentile_of, '{9000,9500}'::double precision[])
|
Output: xxxxxx
|
||||||
-> Custom Scan (Citus Adaptive)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Output: remote_scan.tdigest_percentile_of
|
Output: xxxxxx
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Query: SELECT public.tdigest(tdigest) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true
|
Query: SELECT public.tdigest(tdigest) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
Output: tdigest(tdigest)
|
Output: xxxxxx
|
||||||
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
|
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
|
||||||
Output: a, tdigest
|
Output: xxxxxx
|
||||||
(13 rows)
|
(13 rows)
|
||||||
|
|
||||||
-- explain grouping by distribution column is completely pushed down for tdigest_precentile_of(value, compression, hypotetical_values[])
|
-- explain grouping by distribution column is completely pushed down for tdigest_precentile_of(value, compression, hypotetical_values[])
|
||||||
|
@ -603,17 +603,17 @@ GROUP BY a;
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
Custom Scan (Citus Adaptive)
|
Custom Scan (Citus Adaptive)
|
||||||
Output: remote_scan.a, remote_scan.tdigest_percentile_of
|
Output: xxxxxx
|
||||||
Task Count: 4
|
Task Count: 4
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Query: SELECT a, public.tdigest_percentile_of(tdigest, '{9000,9500}'::double precision[]) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true GROUP BY a
|
Query: SELECT a, public.tdigest_percentile_of(tdigest, '{9000,9500}'::double precision[]) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true GROUP BY a
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Output: a, tdigest_percentile_of(tdigest, '{9000,9500}'::double precision[])
|
Output: xxxxxx
|
||||||
Group Key: latencies_rollup.a
|
Group Key: latencies_rollup.a
|
||||||
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
|
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
|
||||||
Output: a, tdigest
|
Output: xxxxxx
|
||||||
(12 rows)
|
(12 rows)
|
||||||
|
|
||||||
-- verifying results - should be stable due to seed while inserting the data, if failure due to data these queries could be removed or check for certain ranges
|
-- verifying results - should be stable due to seed while inserting the data, if failure due to data these queries could be removed or check for certain ranges
|
||||||
|
|
|
@ -21,11 +21,6 @@ step "s1-begin"
|
||||||
BEGIN;
|
BEGIN;
|
||||||
}
|
}
|
||||||
|
|
||||||
step "s1-commit"
|
|
||||||
{
|
|
||||||
COMMIT;
|
|
||||||
}
|
|
||||||
|
|
||||||
step "s1-rollback"
|
step "s1-rollback"
|
||||||
{
|
{
|
||||||
ROLLBACK;
|
ROLLBACK;
|
||||||
|
|
Loading…
Reference in New Issue