Normalize Output:.. since it changes with pg13

Fix indentation for better readability
pull/3900/head
Sait Talha Nisanci 2020-07-30 10:46:32 +03:00
parent 283b1db6a4
commit fe4ac51d8c
11 changed files with 162 additions and 165 deletions

View File

@ -166,7 +166,7 @@ jobs:
- run:
name: 'Install and test postgres upgrade'
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext --target check-pg-upgrade --old-pg-version 12 --new-pg-version 13'
no_output_timeout: 2m
no_output_timeout: 2m
test-12_check-multi:
docker:

View File

@ -266,8 +266,8 @@ StartPortalForQueryExecution(const char *queryString)
/* don't display the portal in pg_cursors, it is for internal use only */
portal->visible = false;
PortalDefineQuery(portal, NULL, queryString, CMDTAG_SELECT_COMPAT, list_make1(
queryPlan), NULL);
PortalDefineQuery(portal, NULL, queryString, CMDTAG_SELECT_COMPAT,
list_make1(queryPlan), NULL);
int eflags = 0;
PortalStart(portal, NULL, eflags, GetActiveSnapshot());

View File

@ -36,8 +36,8 @@
#define standard_planner_compat(a, c, d) standard_planner(a, NULL, c, d)
#define getOwnedSequencesCompat(a, b) getOwnedSequences(a)
#define CMDTAG_SELECT_COMPAT CMDTAG_SELECT
#define ExplainOnePlanCompat(a, b, c, d, e, f, g, h) ExplainOnePlan(a, b, c, d, e, f, g, \
h)
#define ExplainOnePlanCompat(a, b, c, d, e, f, g, h) \
ExplainOnePlan(a, b, c, d, e, f, g, h)
#define SetListCellPtr(a, b) ((a)->ptr_value = (b))
#define RangeTableEntryFromNSItem(a) ((a)->p_rte)
#define QueryCompletionCompat QueryCompletion

View File

@ -100,6 +100,8 @@ s/partition ".*" would be violated by some row/partition would be violated by so
/.*Peak Memory Usage:.*$/d
s/of relation ".*" contains null values/contains null values/g
s/of relation "t1" is violated by some row/is violated by some row/g
# can be removed when we remove PG_VERSION_NUM >= 120000
s/(.*)Output:.*$/\1Output: xxxxxx/g
# intermediate_results

View File

@ -174,7 +174,7 @@ INSERT INTO composite_type_partitioned_table VALUES (123, '(123, 456)'::other_co
Node: host=localhost port=xxxxx dbname=regression
-> Insert on public.composite_type_partitioned_table_530003 (actual rows=0 loops=1)
-> Result (actual rows=1 loops=1)
Output: 123, '(123,456)'::test_composite_type
Output: xxxxxx
(9 rows)
SELECT run_command_on_coordinator_and_workers($cf$
@ -218,7 +218,7 @@ INSERT INTO composite_type_partitioned_table VALUES (123, '(456, 678)'::other_co
Node: host=localhost port=xxxxx dbname=regression
-> Insert on public.composite_type_partitioned_table_530000 (actual rows=0 loops=1)
-> Result (actual rows=1 loops=1)
Output: 123, '(456,678)'::test_composite_type
Output: xxxxxx
(9 rows)
-- create and distribute a table on enum type column

View File

@ -345,14 +345,14 @@ EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, VERBOSE TRUE)
SELECT l_quantity, count(*) count_quantity FROM lineitem
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
Sort (actual rows=50 loops=1)
Output: remote_scan.l_quantity, (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))
Output: xxxxxx
Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
Sort Method: quicksort Memory: 27kB
-> HashAggregate (actual rows=50 loops=1)
Output: remote_scan.l_quantity, COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)
Output: xxxxxx
Group Key: remote_scan.l_quantity
-> Custom Scan (Citus Adaptive) (actual rows=100 loops=1)
Output: remote_scan.l_quantity, remote_scan.count_quantity
Output: xxxxxx
Task Count: 2
Tuple data received from nodes: 780 bytes
Tasks Shown: One of 2
@ -361,48 +361,48 @@ Sort (actual rows=50 loops=1)
Tuple data received from node: 390 bytes
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate (actual rows=50 loops=1)
Output: l_quantity, count(*)
Output: xxxxxx
Group Key: lineitem.l_quantity
-> Seq Scan on public.lineitem_290000 lineitem (actual rows=6000 loops=1)
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
Output: xxxxxx
-- Test query text output, with ANALYZE OFF
EXPLAIN (COSTS FALSE, ANALYZE FALSE, TIMING FALSE, SUMMARY FALSE, VERBOSE TRUE)
SELECT l_quantity, count(*) count_quantity FROM lineitem
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
Sort
Output: remote_scan.l_quantity, (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))
Output: xxxxxx
Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
-> HashAggregate
Output: remote_scan.l_quantity, COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)
Output: xxxxxx
Group Key: remote_scan.l_quantity
-> Custom Scan (Citus Adaptive)
Output: remote_scan.l_quantity, remote_scan.count_quantity
Output: xxxxxx
Task Count: 2
Tasks Shown: One of 2
-> Task
Query: SELECT l_quantity, count(*) AS count_quantity FROM lineitem_290000 lineitem WHERE true GROUP BY l_quantity
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: l_quantity, count(*)
Output: xxxxxx
Group Key: lineitem.l_quantity
-> Seq Scan on public.lineitem_290000 lineitem
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
Output: xxxxxx
-- Test verbose
EXPLAIN (COSTS FALSE, VERBOSE TRUE)
SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem;
Aggregate
Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / pg_catalog.sum(remote_scan."?column?_2")))
Output: xxxxxx
-> Custom Scan (Citus Adaptive)
Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2"
Output: xxxxxx
Task Count: 2
Tasks Shown: One of 2
-> Task
Query: SELECT sum(l_quantity), sum(l_quantity), count(l_quantity) FROM lineitem_290000 lineitem WHERE true
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate
Output: sum(l_quantity), sum(l_quantity), count(l_quantity)
Output: xxxxxx
-> Seq Scan on public.lineitem_290000 lineitem
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
Output: xxxxxx
-- Test join
EXPLAIN (COSTS FALSE)
SELECT * FROM lineitem
@ -525,40 +525,40 @@ EXPLAIN (COSTS FALSE, VERBOSE TRUE)
SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem
HAVING sum(l_quantity) > 100;
Aggregate
Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / pg_catalog.sum(remote_scan."?column?_2")))
Output: xxxxxx
Filter: (sum(remote_scan.worker_column_4) > '100'::numeric)
-> Custom Scan (Citus Adaptive)
Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2", remote_scan.worker_column_4
Output: xxxxxx
Task Count: 2
Tasks Shown: One of 2
-> Task
Query: SELECT sum(l_quantity), sum(l_quantity), count(l_quantity), sum(l_quantity) AS worker_column_4 FROM lineitem_290000 lineitem WHERE true
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate
Output: sum(l_quantity), sum(l_quantity), count(l_quantity), sum(l_quantity)
Output: xxxxxx
-> Seq Scan on public.lineitem_290000 lineitem
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
Output: xxxxxx
-- Test having without aggregate
EXPLAIN (COSTS FALSE, VERBOSE TRUE)
SELECT l_quantity FROM lineitem
GROUP BY l_quantity
HAVING l_quantity > (100 * random());
HashAggregate
Output: remote_scan.l_quantity
Output: xxxxxx
Group Key: remote_scan.l_quantity
Filter: ((remote_scan.worker_column_2)::double precision > ('100'::double precision * random()))
-> Custom Scan (Citus Adaptive)
Output: remote_scan.l_quantity, remote_scan.worker_column_2
Output: xxxxxx
Task Count: 2
Tasks Shown: One of 2
-> Task
Query: SELECT l_quantity, l_quantity AS worker_column_2 FROM lineitem_290000 lineitem WHERE true GROUP BY l_quantity
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: l_quantity, l_quantity
Output: xxxxxx
Group Key: lineitem.l_quantity
-> Seq Scan on public.lineitem_290000 lineitem
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
Output: xxxxxx
-- Subquery pushdown tests with explain
EXPLAIN (COSTS OFF)
SELECT
@ -1395,26 +1395,26 @@ series AS (
SELECT l_orderkey FROM series JOIN keys ON (s = l_orderkey)
ORDER BY s;
Custom Scan (Citus Adaptive)
Output: remote_scan.l_orderkey
Output: xxxxxx
-> Distributed Subplan XXX_1
-> HashAggregate
Output: remote_scan.l_orderkey
Output: xxxxxx
Group Key: remote_scan.l_orderkey
-> Custom Scan (Citus Adaptive)
Output: remote_scan.l_orderkey
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT DISTINCT l_orderkey FROM lineitem_hash_part_360041 lineitem_hash_part WHERE true
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: l_orderkey
Output: xxxxxx
Group Key: lineitem_hash_part.l_orderkey
-> Seq Scan on public.lineitem_hash_part_360041 lineitem_hash_part
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
Output: xxxxxx
-> Distributed Subplan XXX_2
-> Function Scan on pg_catalog.generate_series s
Output: s
Output: xxxxxx
Function Call: generate_series(1, 10)
Task Count: 1
Tasks Shown: All
@ -1422,19 +1422,19 @@ Custom Scan (Citus Adaptive)
Query: SELECT keys.l_orderkey FROM ((SELECT intermediate_result.s FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(s integer)) series JOIN (SELECT intermediate_result.l_orderkey FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(l_orderkey bigint)) keys ON ((series.s OPERATOR(pg_catalog.=) keys.l_orderkey))) ORDER BY series.s
Node: host=localhost port=xxxxx dbname=regression
-> Merge Join
Output: intermediate_result_1.l_orderkey, intermediate_result.s
Output: xxxxxx
Merge Cond: (intermediate_result.s = intermediate_result_1.l_orderkey)
-> Sort
Output: intermediate_result.s
Output: xxxxxx
Sort Key: intermediate_result.s
-> Function Scan on pg_catalog.read_intermediate_result intermediate_result
Output: intermediate_result.s
Output: xxxxxx
Function Call: read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format)
-> Sort
Output: intermediate_result_1.l_orderkey
Output: xxxxxx
Sort Key: intermediate_result_1.l_orderkey
-> Function Scan on pg_catalog.read_intermediate_result intermediate_result_1
Output: intermediate_result_1.l_orderkey
Output: xxxxxx
Function Call: read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format)
SET citus.enable_cte_inlining TO true;
SELECT true AS valid FROM explain_json($$
@ -1755,7 +1755,7 @@ SELECT * FROM worker_save_query_explain_analyze('SELECT * FROM explain_analyze_t
4
(4 rows)
SELECT explain_analyze_output ~ 'Output: a, b' FROM worker_last_saved_explain_analyze();
SELECT explain_analyze_output ~ 'Output: xxxxxx
?column?
---------------------------------------------------------------------
t

View File

@ -285,18 +285,18 @@ Sort
EXPLAIN (COSTS FALSE, VERBOSE TRUE)
SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem_mx;
Aggregate
Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / pg_catalog.sum(remote_scan."?column?_2")))
Output: xxxxxx
-> Custom Scan (Citus Adaptive)
Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2"
Output: xxxxxx
Task Count: 16
Tasks Shown: One of 16
-> Task
Query: SELECT sum(l_quantity), sum(l_quantity), count(l_quantity) FROM lineitem_mx_1220052 lineitem_mx WHERE true
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate
Output: sum(l_quantity), sum(l_quantity), count(l_quantity)
Output: xxxxxx
-> Seq Scan on public.lineitem_mx_1220052 lineitem_mx
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
Output: xxxxxx
-- Test join
EXPLAIN (COSTS FALSE)
SELECT * FROM lineitem_mx

View File

@ -712,68 +712,68 @@ EXPLAIN (COSTS FALSE, VERBOSE TRUE)
QUERY PLAN
---------------------------------------------------------------------
Limit
Output: remote_scan.user_id, remote_scan.sum
Output: xxxxxx
-> Sort
Output: remote_scan.user_id, remote_scan.sum
Output: xxxxxx
Sort Key: remote_scan.sum DESC, remote_scan.user_id DESC
-> Custom Scan (Citus Adaptive)
Output: remote_scan.user_id, remote_scan.sum
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT worker_column_1 AS user_id, worker_column_2 AS sum FROM (SELECT ftop.user_id AS worker_column_1, ftop.sum AS worker_column_2 FROM (SELECT user_id_1.user_id, sum(user_id_1.counter) AS sum FROM (SELECT users_table.user_id, sum(users_table.value_2) OVER (PARTITION BY users_table.user_id) AS counter FROM public.users_table_1400256 users_table UNION SELECT events_table.user_id, sum(events_table.value_2) OVER (PARTITION BY events_table.user_id) AS counter FROM public.events_table_1400260 events_table) user_id_1 GROUP BY user_id_1.user_id UNION SELECT user_id_2.user_id, sum(user_id_2.counter) AS sum FROM (SELECT users_table.user_id, sum(users_table.value_2) OVER (PARTITION BY users_table.user_id) AS counter FROM public.users_table_1400256 users_table UNION SELECT events_table.user_id, sum(events_table.value_2) OVER (PARTITION BY events_table.user_id) AS counter FROM public.events_table_1400260 events_table) user_id_2 GROUP BY user_id_2.user_id) ftop) worker_subquery ORDER BY worker_column_2 DESC, worker_column_1 DESC LIMIT '5'::bigint
Node: host=localhost port=xxxxx dbname=regression
-> Limit
Output: users_table.user_id, (sum((sum(users_table.value_2) OVER (?))))
Output: xxxxxx
-> Sort
Output: users_table.user_id, (sum((sum(users_table.value_2) OVER (?))))
Output: xxxxxx
Sort Key: (sum((sum(users_table.value_2) OVER (?)))) DESC, users_table.user_id DESC
-> HashAggregate
Output: users_table.user_id, (sum((sum(users_table.value_2) OVER (?))))
Output: xxxxxx
Group Key: users_table.user_id, (sum((sum(users_table.value_2) OVER (?))))
-> Append
-> HashAggregate
Output: users_table.user_id, sum((sum(users_table.value_2) OVER (?)))
Output: xxxxxx
Group Key: users_table.user_id
-> HashAggregate
Output: users_table.user_id, (sum(users_table.value_2) OVER (?))
Output: xxxxxx
Group Key: users_table.user_id, (sum(users_table.value_2) OVER (?))
-> Append
-> WindowAgg
Output: users_table.user_id, sum(users_table.value_2) OVER (?)
Output: xxxxxx
-> Sort
Output: users_table.user_id, users_table.value_2
Output: xxxxxx
Sort Key: users_table.user_id
-> Seq Scan on public.users_table_1400256 users_table
Output: users_table.user_id, users_table.value_2
Output: xxxxxx
-> WindowAgg
Output: events_table.user_id, sum(events_table.value_2) OVER (?)
Output: xxxxxx
-> Sort
Output: events_table.user_id, events_table.value_2
Output: xxxxxx
Sort Key: events_table.user_id
-> Seq Scan on public.events_table_1400260 events_table
Output: events_table.user_id, events_table.value_2
Output: xxxxxx
-> HashAggregate
Output: users_table_1.user_id, sum((sum(users_table_1.value_2) OVER (?)))
Output: xxxxxx
Group Key: users_table_1.user_id
-> HashAggregate
Output: users_table_1.user_id, (sum(users_table_1.value_2) OVER (?))
Output: xxxxxx
Group Key: users_table_1.user_id, (sum(users_table_1.value_2) OVER (?))
-> Append
-> WindowAgg
Output: users_table_1.user_id, sum(users_table_1.value_2) OVER (?)
Output: xxxxxx
-> Sort
Output: users_table_1.user_id, users_table_1.value_2
Output: xxxxxx
Sort Key: users_table_1.user_id
-> Seq Scan on public.users_table_1400256 users_table_1
Output: users_table_1.user_id, users_table_1.value_2
Output: xxxxxx
-> WindowAgg
Output: events_table_1.user_id, sum(events_table_1.value_2) OVER (?)
Output: xxxxxx
-> Sort
Output: events_table_1.user_id, events_table_1.value_2
Output: xxxxxx
Sort Key: events_table_1.user_id
-> Seq Scan on public.events_table_1400260 events_table_1
Output: events_table_1.user_id, events_table_1.value_2
Output: xxxxxx
(63 rows)
-- test with window functions which aren't pushed down

View File

@ -554,14 +554,14 @@ $$);
coordinator_plan
---------------------------------------------------------------------
Sort
Output: remote_scan.k1, remote_scan.k2, remote_scan.k3, (any_value(remote_scan.v1)), (any_value(remote_scan.v2)), ((any_value(remote_scan.v3) || '_notgrouped'::text)), remote_scan.va1, remote_scan.va2, remote_scan.va3, (COALESCE((pg_catalog.sum(remote_scan.count))::bigint, '0'::bigint))
Output: xxxxxx
Sort Key: remote_scan.k1
-> HashAggregate
Output: remote_scan.k1, remote_scan.k2, remote_scan.k3, any_value(remote_scan.v1), any_value(remote_scan.v2), (any_value(remote_scan.v3) || '_notgrouped'::text), remote_scan.va1, remote_scan.va2, remote_scan.va3, COALESCE((pg_catalog.sum(remote_scan.count))::bigint, '0'::bigint)
Output: xxxxxx
Group Key: remote_scan.k1, remote_scan.va1
Filter: ((length(remote_scan.worker_column_11) + length(any_value(remote_scan.worker_column_12))) < length((any_value(remote_scan.worker_column_13) || '_append'::text)))
-> Custom Scan (Citus Adaptive)
Output: remote_scan.k1, remote_scan.k2, remote_scan.k3, remote_scan.v1, remote_scan.v2, remote_scan.v3, remote_scan.va1, remote_scan.va2, remote_scan.va3, remote_scan.count, remote_scan.worker_column_11, remote_scan.worker_column_12, remote_scan.worker_column_13
Output: xxxxxx
Task Count: 4
(10 rows)

View File

@ -42,18 +42,18 @@ FROM latencies;
QUERY PLAN
---------------------------------------------------------------------
Aggregate
Output: tdigest(remote_scan.tdigest)
Output: xxxxxx
-> Custom Scan (Citus Adaptive)
Output: remote_scan.tdigest
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT public.tdigest(latency, 100) AS tdigest FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate
Output: tdigest(latency, 100)
Output: xxxxxx
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
Output: a, b, latency
Output: xxxxxx
(13 rows)
-- explain grouping by distribution column is completely pushed down for tdigest(value, compression)
@ -64,17 +64,17 @@ GROUP BY a;
QUERY PLAN
---------------------------------------------------------------------
Custom Scan (Citus Adaptive)
Output: remote_scan.a, remote_scan.tdigest
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT a, public.tdigest(latency, 100) AS tdigest FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY a
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: a, tdigest(latency, 100)
Output: xxxxxx
Group Key: latencies.a
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
Output: a, b, latency
Output: xxxxxx
(12 rows)
-- explain grouping by non-distribution column is partially pushed down for tdigest(value, compression)
@ -85,20 +85,20 @@ GROUP BY b;
QUERY PLAN
---------------------------------------------------------------------
HashAggregate
Output: remote_scan.b, tdigest(remote_scan.tdigest)
Output: xxxxxx
Group Key: remote_scan.b
-> Custom Scan (Citus Adaptive)
Output: remote_scan.b, remote_scan.tdigest
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT b, public.tdigest(latency, 100) AS tdigest FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY b
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: b, tdigest(latency, 100)
Output: xxxxxx
Group Key: latencies.b
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
Output: a, b, latency
Output: xxxxxx
(15 rows)
-- explain no grouping to verify partially pushed down for tdigest_precentile(value, compression, quantile)
@ -108,18 +108,18 @@ FROM latencies;
QUERY PLAN
---------------------------------------------------------------------
Aggregate
Output: tdigest_percentile(remote_scan.tdigest_percentile, '0.99'::double precision)
Output: xxxxxx
-> Custom Scan (Citus Adaptive)
Output: remote_scan.tdigest_percentile
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT public.tdigest(latency, 100) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate
Output: tdigest(latency, 100)
Output: xxxxxx
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
Output: a, b, latency
Output: xxxxxx
(13 rows)
-- explain grouping by distribution column is completely pushed down for tdigest_precentile(value, compression, quantile)
@ -130,17 +130,17 @@ GROUP BY a;
QUERY PLAN
---------------------------------------------------------------------
Custom Scan (Citus Adaptive)
Output: remote_scan.a, remote_scan.tdigest_percentile
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT a, public.tdigest_percentile(latency, 100, '0.99'::double precision) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY a
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: a, tdigest_percentile(latency, 100, '0.99'::double precision)
Output: xxxxxx
Group Key: latencies.a
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
Output: a, b, latency
Output: xxxxxx
(12 rows)
-- explain grouping by non-distribution column is partially pushed down for tdigest_precentile(value, compression, quantile)
@ -151,20 +151,20 @@ GROUP BY b;
QUERY PLAN
---------------------------------------------------------------------
HashAggregate
Output: remote_scan.b, tdigest_percentile(remote_scan.tdigest_percentile, '0.99'::double precision)
Output: xxxxxx
Group Key: remote_scan.b
-> Custom Scan (Citus Adaptive)
Output: remote_scan.b, remote_scan.tdigest_percentile
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT b, public.tdigest(latency, 100) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY b
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: b, tdigest(latency, 100)
Output: xxxxxx
Group Key: latencies.b
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
Output: a, b, latency
Output: xxxxxx
(15 rows)
-- explain no grouping to verify partially pushed down for tdigest_precentile(value, compression, quantiles[])
@ -174,18 +174,18 @@ FROM latencies;
QUERY PLAN
---------------------------------------------------------------------
Aggregate
Output: tdigest_percentile(remote_scan.tdigest_percentile, '{0.99,0.95}'::double precision[])
Output: xxxxxx
-> Custom Scan (Citus Adaptive)
Output: remote_scan.tdigest_percentile
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT public.tdigest(latency, 100) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate
Output: tdigest(latency, 100)
Output: xxxxxx
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
Output: a, b, latency
Output: xxxxxx
(13 rows)
-- explain grouping by distribution column is completely pushed down for tdigest_precentile(value, compression, quantiles[])
@ -196,17 +196,17 @@ GROUP BY a;
QUERY PLAN
---------------------------------------------------------------------
Custom Scan (Citus Adaptive)
Output: remote_scan.a, remote_scan.tdigest_percentile
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT a, public.tdigest_percentile(latency, 100, '{0.99,0.95}'::double precision[]) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY a
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: a, tdigest_percentile(latency, 100, '{0.99,0.95}'::double precision[])
Output: xxxxxx
Group Key: latencies.a
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
Output: a, b, latency
Output: xxxxxx
(12 rows)
-- explain grouping by non-distribution column is partially pushed down for tdigest_precentile(value, compression, quantiles[])
@ -217,20 +217,20 @@ GROUP BY b;
QUERY PLAN
---------------------------------------------------------------------
HashAggregate
Output: remote_scan.b, tdigest_percentile(remote_scan.tdigest_percentile, '{0.99,0.95}'::double precision[])
Output: xxxxxx
Group Key: remote_scan.b
-> Custom Scan (Citus Adaptive)
Output: remote_scan.b, remote_scan.tdigest_percentile
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT b, public.tdigest(latency, 100) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY b
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: b, tdigest(latency, 100)
Output: xxxxxx
Group Key: latencies.b
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
Output: a, b, latency
Output: xxxxxx
(15 rows)
-- explain no grouping to verify partially pushed down for tdigest_precentile_of(value, compression, hypotetical_value)
@ -240,18 +240,18 @@ FROM latencies;
QUERY PLAN
---------------------------------------------------------------------
Aggregate
Output: tdigest_percentile_of(remote_scan.tdigest_percentile_of, '9000'::double precision)
Output: xxxxxx
-> Custom Scan (Citus Adaptive)
Output: remote_scan.tdigest_percentile_of
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT public.tdigest(latency, 100) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate
Output: tdigest(latency, 100)
Output: xxxxxx
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
Output: a, b, latency
Output: xxxxxx
(13 rows)
-- explain grouping by distribution column is completely pushed down for tdigest_precentile_of(value, compression, hypotetical_value)
@ -262,17 +262,17 @@ GROUP BY a;
QUERY PLAN
---------------------------------------------------------------------
Custom Scan (Citus Adaptive)
Output: remote_scan.a, remote_scan.tdigest_percentile_of
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT a, public.tdigest_percentile_of(latency, 100, '9000'::double precision) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY a
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: a, tdigest_percentile_of(latency, 100, '9000'::double precision)
Output: xxxxxx
Group Key: latencies.a
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
Output: a, b, latency
Output: xxxxxx
(12 rows)
-- explain grouping by non-distribution column is partially pushed down for tdigest_precentile_of(value, compression, hypotetical_value)
@ -283,20 +283,20 @@ GROUP BY b;
QUERY PLAN
---------------------------------------------------------------------
HashAggregate
Output: remote_scan.b, tdigest_percentile_of(remote_scan.tdigest_percentile_of, '9000'::double precision)
Output: xxxxxx
Group Key: remote_scan.b
-> Custom Scan (Citus Adaptive)
Output: remote_scan.b, remote_scan.tdigest_percentile_of
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT b, public.tdigest(latency, 100) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY b
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: b, tdigest(latency, 100)
Output: xxxxxx
Group Key: latencies.b
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
Output: a, b, latency
Output: xxxxxx
(15 rows)
-- explain no grouping to verify partially pushed down for tdigest_precentile_of(value, compression, hypotetical_values[])
@ -306,18 +306,18 @@ FROM latencies;
QUERY PLAN
---------------------------------------------------------------------
Aggregate
Output: tdigest_percentile_of(remote_scan.tdigest_percentile_of, '{9000,9500}'::double precision[])
Output: xxxxxx
-> Custom Scan (Citus Adaptive)
Output: remote_scan.tdigest_percentile_of
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT public.tdigest(latency, 100) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate
Output: tdigest(latency, 100)
Output: xxxxxx
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
Output: a, b, latency
Output: xxxxxx
(13 rows)
-- explain grouping by distribution column is completely pushed down for tdigest_precentile_of(value, compression, hypotetical_values[])
@ -328,17 +328,17 @@ GROUP BY a;
QUERY PLAN
---------------------------------------------------------------------
Custom Scan (Citus Adaptive)
Output: remote_scan.a, remote_scan.tdigest_percentile_of
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT a, public.tdigest_percentile_of(latency, 100, '{9000,9500}'::double precision[]) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY a
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: a, tdigest_percentile_of(latency, 100, '{9000,9500}'::double precision[])
Output: xxxxxx
Group Key: latencies.a
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
Output: a, b, latency
Output: xxxxxx
(12 rows)
-- explain grouping by non-distribution column is partially pushed down for tdigest_precentile_of(value, compression, hypotetical_values[])
@ -349,20 +349,20 @@ GROUP BY b;
QUERY PLAN
---------------------------------------------------------------------
HashAggregate
Output: remote_scan.b, tdigest_percentile_of(remote_scan.tdigest_percentile_of, '{9000,9500}'::double precision[])
Output: xxxxxx
Group Key: remote_scan.b
-> Custom Scan (Citus Adaptive)
Output: remote_scan.b, remote_scan.tdigest_percentile_of
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT b, public.tdigest(latency, 100) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY b
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: b, tdigest(latency, 100)
Output: xxxxxx
Group Key: latencies.b
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
Output: a, b, latency
Output: xxxxxx
(15 rows)
-- verifying results - should be stable due to seed while inserting the data, if failure due to data these queries could be removed or check for certain ranges
@ -413,18 +413,18 @@ FROM latencies_rollup;
QUERY PLAN
---------------------------------------------------------------------
Aggregate
Output: tdigest(remote_scan.tdigest)
Output: xxxxxx
-> Custom Scan (Citus Adaptive)
Output: remote_scan.tdigest
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT public.tdigest(tdigest) AS tdigest FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate
Output: tdigest(tdigest)
Output: xxxxxx
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
Output: a, tdigest
Output: xxxxxx
(13 rows)
-- explain grouping by distribution column is completely pushed down for tdigest(tdigest)
@ -435,17 +435,17 @@ GROUP BY a;
QUERY PLAN
---------------------------------------------------------------------
Custom Scan (Citus Adaptive)
Output: remote_scan.a, remote_scan.tdigest
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT a, public.tdigest(tdigest) AS tdigest FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true GROUP BY a
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: a, tdigest(tdigest)
Output: xxxxxx
Group Key: latencies_rollup.a
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
Output: a, tdigest
Output: xxxxxx
(12 rows)
-- explain no grouping to verify partially pushed down for tdigest_precentile(tdigest, quantile)
@ -455,18 +455,18 @@ FROM latencies_rollup;
QUERY PLAN
---------------------------------------------------------------------
Aggregate
Output: tdigest_percentile(remote_scan.tdigest_percentile, '0.99'::double precision)
Output: xxxxxx
-> Custom Scan (Citus Adaptive)
Output: remote_scan.tdigest_percentile
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT public.tdigest(tdigest) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate
Output: tdigest(tdigest)
Output: xxxxxx
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
Output: a, tdigest
Output: xxxxxx
(13 rows)
-- explain grouping by distribution column is completely pushed down for tdigest_precentile(tdigest, quantile)
@ -477,17 +477,17 @@ GROUP BY a;
QUERY PLAN
---------------------------------------------------------------------
Custom Scan (Citus Adaptive)
Output: remote_scan.a, remote_scan.tdigest_percentile
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT a, public.tdigest_percentile(tdigest, '0.99'::double precision) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true GROUP BY a
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: a, tdigest_percentile(tdigest, '0.99'::double precision)
Output: xxxxxx
Group Key: latencies_rollup.a
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
Output: a, tdigest
Output: xxxxxx
(12 rows)
-- explain no grouping to verify partially pushed down for tdigest_precentile(value, compression, quantiles[])
@ -497,18 +497,18 @@ FROM latencies_rollup;
QUERY PLAN
---------------------------------------------------------------------
Aggregate
Output: tdigest_percentile(remote_scan.tdigest_percentile, '{0.99,0.95}'::double precision[])
Output: xxxxxx
-> Custom Scan (Citus Adaptive)
Output: remote_scan.tdigest_percentile
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT public.tdigest(tdigest) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate
Output: tdigest(tdigest)
Output: xxxxxx
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
Output: a, tdigest
Output: xxxxxx
(13 rows)
-- explain grouping by distribution column is completely pushed down for tdigest_precentile(value, compression, quantiles[])
@ -519,17 +519,17 @@ GROUP BY a;
QUERY PLAN
---------------------------------------------------------------------
Custom Scan (Citus Adaptive)
Output: remote_scan.a, remote_scan.tdigest_percentile
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT a, public.tdigest_percentile(tdigest, '{0.99,0.95}'::double precision[]) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true GROUP BY a
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: a, tdigest_percentile(tdigest, '{0.99,0.95}'::double precision[])
Output: xxxxxx
Group Key: latencies_rollup.a
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
Output: a, tdigest
Output: xxxxxx
(12 rows)
-- explain no grouping to verify partially pushed down for tdigest_precentile_of(value, compression, hypotetical_value)
@ -539,18 +539,18 @@ FROM latencies_rollup;
QUERY PLAN
---------------------------------------------------------------------
Aggregate
Output: tdigest_percentile_of(remote_scan.tdigest_percentile_of, '9000'::double precision)
Output: xxxxxx
-> Custom Scan (Citus Adaptive)
Output: remote_scan.tdigest_percentile_of
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT public.tdigest(tdigest) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate
Output: tdigest(tdigest)
Output: xxxxxx
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
Output: a, tdigest
Output: xxxxxx
(13 rows)
-- explain grouping by distribution column is completely pushed down for tdigest_precentile_of(value, compression, hypotetical_value)
@ -561,17 +561,17 @@ GROUP BY a;
QUERY PLAN
---------------------------------------------------------------------
Custom Scan (Citus Adaptive)
Output: remote_scan.a, remote_scan.tdigest_percentile_of
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT a, public.tdigest_percentile_of(tdigest, '9000'::double precision) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true GROUP BY a
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: a, tdigest_percentile_of(tdigest, '9000'::double precision)
Output: xxxxxx
Group Key: latencies_rollup.a
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
Output: a, tdigest
Output: xxxxxx
(12 rows)
-- explain no grouping to verify partially pushed down for tdigest_precentile_of(value, compression, hypotetical_values[])
@ -581,18 +581,18 @@ FROM latencies_rollup;
QUERY PLAN
---------------------------------------------------------------------
Aggregate
Output: tdigest_percentile_of(remote_scan.tdigest_percentile_of, '{9000,9500}'::double precision[])
Output: xxxxxx
-> Custom Scan (Citus Adaptive)
Output: remote_scan.tdigest_percentile_of
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT public.tdigest(tdigest) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate
Output: tdigest(tdigest)
Output: xxxxxx
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
Output: a, tdigest
Output: xxxxxx
(13 rows)
-- explain grouping by distribution column is completely pushed down for tdigest_precentile_of(value, compression, hypotetical_values[])
@ -603,17 +603,17 @@ GROUP BY a;
QUERY PLAN
---------------------------------------------------------------------
Custom Scan (Citus Adaptive)
Output: remote_scan.a, remote_scan.tdigest_percentile_of
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT a, public.tdigest_percentile_of(tdigest, '{9000,9500}'::double precision[]) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true GROUP BY a
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: a, tdigest_percentile_of(tdigest, '{9000,9500}'::double precision[])
Output: xxxxxx
Group Key: latencies_rollup.a
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
Output: a, tdigest
Output: xxxxxx
(12 rows)
-- verifying results - should be stable due to seed while inserting the data, if failure due to data these queries could be removed or check for certain ranges

View File

@ -21,11 +21,6 @@ step "s1-begin"
BEGIN;
}
step "s1-commit"
{
COMMIT;
}
step "s1-rollback"
{
ROLLBACK;