citus/src/test/regress/expected/multi_explain.out

2603 lines
104 KiB
Plaintext

--
-- MULTI_EXPLAIN
--
SET citus.next_shard_id TO 570000;
\a\t
SET citus.explain_distributed_queries TO on;
SET citus.enable_repartition_joins to ON;
-- Function that parses explain output as JSON
CREATE FUNCTION explain_json(query text)
RETURNS jsonb
AS $BODY$
DECLARE
result jsonb;
BEGIN
EXECUTE format('EXPLAIN (FORMAT JSON) %s', query) INTO result;
RETURN result;
END;
$BODY$ LANGUAGE plpgsql;
-- Function that parses explain output as XML
CREATE FUNCTION explain_xml(query text)
RETURNS xml
AS $BODY$
DECLARE
result xml;
BEGIN
EXECUTE format('EXPLAIN (FORMAT XML) %s', query) INTO result;
RETURN result;
END;
$BODY$ LANGUAGE plpgsql;
-- VACUMM related tables to ensure test outputs are stable
VACUUM ANALYZE lineitem;
VACUUM ANALYZE orders;
-- Test Text format
EXPLAIN (COSTS FALSE, FORMAT TEXT)
SELECT l_quantity, count(*) count_quantity FROM lineitem
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
Sort
Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
-> HashAggregate
Group Key: remote_scan.l_quantity
-> Custom Scan (Citus Adaptive)
Task Count: 2
Tasks Shown: One of 2
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Group Key: l_quantity
-> Seq Scan on lineitem_290000 lineitem
-- Test disable hash aggregate
SET enable_hashagg TO off;
EXPLAIN (COSTS FALSE, FORMAT TEXT)
SELECT l_quantity, count(*) count_quantity FROM lineitem
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
Sort
Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
-> GroupAggregate
Group Key: remote_scan.l_quantity
-> Sort
Sort Key: remote_scan.l_quantity
-> Custom Scan (Citus Adaptive)
Task Count: 2
Tasks Shown: One of 2
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Group Key: l_quantity
-> Seq Scan on lineitem_290000 lineitem
SET enable_hashagg TO on;
-- Test JSON format
EXPLAIN (COSTS FALSE, FORMAT JSON)
SELECT l_quantity, count(*) count_quantity FROM lineitem
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
[
{
"Plan": {
"Node Type": "Sort",
"Parallel Aware": false,
"Sort Key": ["(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))", "remote_scan.l_quantity"],
"Plans": [
{
"Node Type": "Aggregate",
"Strategy": "Hashed",
"Partial Mode": "Simple",
"Parent Relationship": "Outer",
"Parallel Aware": false,
"Group Key": ["remote_scan.l_quantity"],
"Plans": [
{
"Node Type": "Custom Scan",
"Parent Relationship": "Outer",
"Custom Plan Provider": "Citus Adaptive",
"Parallel Aware": false,
"Distributed Query": {
"Job": {
"Task Count": 2,
"Tasks Shown": "One of 2",
"Tasks": [
{
"Node": "host=localhost port=xxxxx dbname=regression",
"Remote Plan": [
[
{
"Plan": {
"Node Type": "Aggregate",
"Strategy": "Hashed",
"Partial Mode": "Simple",
"Parallel Aware": false,
"Group Key": ["l_quantity"],
"Plans": [
{
"Node Type": "Seq Scan",
"Parent Relationship": "Outer",
"Parallel Aware": false,
"Relation Name": "lineitem_290000",
"Alias": "lineitem"
}
]
}
}
]
]
}
]
}
}
}
]
}
]
}
}
]
-- Validate JSON format
SELECT true AS valid FROM explain_json($$
SELECT l_quantity, count(*) count_quantity FROM lineitem
GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$);
t
-- Test XML format
EXPLAIN (COSTS FALSE, FORMAT XML)
SELECT l_quantity, count(*) count_quantity FROM lineitem
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
<explain xmlns="http://www.postgresql.org/2009/explain">
<Query>
<Plan>
<Node-Type>Sort</Node-Type>
<Parallel-Aware>false</Parallel-Aware>
<Sort-Key>
<Item>(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))</Item>
<Item>remote_scan.l_quantity</Item>
</Sort-Key>
<Plans>
<Plan>
<Node-Type>Aggregate</Node-Type>
<Strategy>Hashed</Strategy>
<Partial-Mode>Simple</Partial-Mode>
<Parent-Relationship>Outer</Parent-Relationship>
<Parallel-Aware>false</Parallel-Aware>
<Group-Key>
<Item>remote_scan.l_quantity</Item>
</Group-Key>
<Plans>
<Plan>
<Node-Type>Custom Scan</Node-Type>
<Parent-Relationship>Outer</Parent-Relationship>
<Custom-Plan-Provider>Citus Adaptive</Custom-Plan-Provider>
<Parallel-Aware>false</Parallel-Aware>
<Distributed-Query>
<Job>
<Task-Count>2</Task-Count>
<Tasks-Shown>One of 2</Tasks-Shown>
<Tasks>
<Task>
<Node>host=localhost port=xxxxx dbname=regression</Node>
<Remote-Plan>
<explain xmlns="http://www.postgresql.org/2009/explain">
<Query>
<Plan>
<Node-Type>Aggregate</Node-Type>
<Strategy>Hashed</Strategy>
<Partial-Mode>Simple</Partial-Mode>
<Parallel-Aware>false</Parallel-Aware>
<Group-Key>
<Item>l_quantity</Item>
</Group-Key>
<Plans>
<Plan>
<Node-Type>Seq Scan</Node-Type>
<Parent-Relationship>Outer</Parent-Relationship>
<Parallel-Aware>false</Parallel-Aware>
<Relation-Name>lineitem_290000</Relation-Name>
<Alias>lineitem</Alias>
</Plan>
</Plans>
</Plan>
</Query>
</explain>
</Remote-Plan>
</Task>
</Tasks>
</Job>
</Distributed-Query>
</Plan>
</Plans>
</Plan>
</Plans>
</Plan>
</Query>
</explain>
-- Validate XML format
SELECT true AS valid FROM explain_xml($$
SELECT l_quantity, count(*) count_quantity FROM lineitem
GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$);
t
-- Test YAML format
EXPLAIN (COSTS FALSE, FORMAT YAML)
SELECT l_quantity, count(*) count_quantity FROM lineitem
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
- Plan:
Node Type: "Sort"
Parallel Aware: false
Sort Key:
- "(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))"
- "remote_scan.l_quantity"
Plans:
- Node Type: "Aggregate"
Strategy: "Hashed"
Partial Mode: "Simple"
Parent Relationship: "Outer"
Parallel Aware: false
Group Key:
- "remote_scan.l_quantity"
Plans:
- Node Type: "Custom Scan"
Parent Relationship: "Outer"
Custom Plan Provider: "Citus Adaptive"
Parallel Aware: false
Distributed Query:
Job:
Task Count: 2
Tasks Shown: "One of 2"
Tasks:
- Node: "host=localhost port=xxxxx dbname=regression"
Remote Plan:
- Plan:
Node Type: "Aggregate"
Strategy: "Hashed"
Partial Mode: "Simple"
Parallel Aware: false
Group Key:
- "l_quantity"
Plans:
- Node Type: "Seq Scan"
Parent Relationship: "Outer"
Parallel Aware: false
Relation Name: "lineitem_290000"
Alias: "lineitem"
-- Test Text format
EXPLAIN (COSTS FALSE, FORMAT TEXT)
SELECT l_quantity, count(*) count_quantity FROM lineitem
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
Sort
Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
-> HashAggregate
Group Key: remote_scan.l_quantity
-> Custom Scan (Citus Adaptive)
Task Count: 2
Tasks Shown: One of 2
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Group Key: l_quantity
-> Seq Scan on lineitem_290000 lineitem
-- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output)
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE)
SELECT l_quantity, count(*) count_quantity FROM lineitem
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
Sort (actual rows=50 loops=1)
Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
Sort Method: quicksort Memory: 27kB
-> HashAggregate (actual rows=50 loops=1)
Group Key: remote_scan.l_quantity
-> Custom Scan (Citus Adaptive) (actual rows=100 loops=1)
Task Count: 2
Tuple data received from nodes: 780 bytes
Tasks Shown: One of 2
-> Task
Tuple data received from node: 390 bytes
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate (actual rows=50 loops=1)
Group Key: l_quantity
-> Seq Scan on lineitem_290000 lineitem (actual rows=6000 loops=1)
-- EXPLAIN ANALYZE doesn't show worker tasks for repartition joins yet
SET citus.shard_count TO 3;
CREATE TABLE t1(a int, b int);
CREATE TABLE t2(a int, b int);
SELECT create_distributed_table('t1', 'a'), create_distributed_table('t2', 'a');
|
BEGIN;
SET LOCAL citus.enable_repartition_joins TO true;
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off) SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b;
Aggregate (actual rows=1 loops=1)
-> Custom Scan (Citus Adaptive) (actual rows=4 loops=1)
Task Count: 4
Tuple data received from nodes: 4 bytes
Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob
Map Task Count: 3
Merge Task Count: 4
-> MapMergeJob
Map Task Count: 3
Merge Task Count: 4
-- Confirm repartiton join in distributed subplan works
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off)
WITH repartion AS (SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b)
SELECT count(*) from repartion;
Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
-> Distributed Subplan XXX_1
Intermediate Data Size: 14 bytes
Result destination: Write locally
-> Aggregate (actual rows=1 loops=1)
-> Custom Scan (Citus Adaptive) (actual rows=4 loops=1)
Task Count: 4
Tuple data received from nodes: 4 bytes
Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob
Map Task Count: 3
Merge Task Count: 4
-> MapMergeJob
Map Task Count: 3
Merge Task Count: 4
Task Count: 1
Tuple data received from nodes: 1 bytes
Tasks Shown: All
-> Task
Tuple data received from node: 1 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate (actual rows=1 loops=1)
-> Function Scan on read_intermediate_result intermediate_result (actual rows=1 loops=1)
END;
DROP TABLE t1, t2;
-- Test query text output, with ANALYZE ON
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, VERBOSE TRUE)
SELECT l_quantity, count(*) count_quantity FROM lineitem
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
Sort (actual rows=50 loops=1)
Output: remote_scan.l_quantity, (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))
Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
Sort Method: quicksort Memory: 27kB
-> HashAggregate (actual rows=50 loops=1)
Output: remote_scan.l_quantity, COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)
Group Key: remote_scan.l_quantity
-> Custom Scan (Citus Adaptive) (actual rows=100 loops=1)
Output: remote_scan.l_quantity, remote_scan.count_quantity
Task Count: 2
Tuple data received from nodes: 780 bytes
Tasks Shown: One of 2
-> Task
Query: SELECT l_quantity, count(*) AS count_quantity FROM lineitem_290000 lineitem WHERE true GROUP BY l_quantity
Tuple data received from node: 390 bytes
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate (actual rows=50 loops=1)
Output: l_quantity, count(*)
Group Key: lineitem.l_quantity
-> Seq Scan on public.lineitem_290000 lineitem (actual rows=6000 loops=1)
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
-- Test query text output, with ANALYZE OFF
EXPLAIN (COSTS FALSE, ANALYZE FALSE, TIMING FALSE, SUMMARY FALSE, VERBOSE TRUE)
SELECT l_quantity, count(*) count_quantity FROM lineitem
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
Sort
Output: remote_scan.l_quantity, (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))
Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
-> HashAggregate
Output: remote_scan.l_quantity, COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)
Group Key: remote_scan.l_quantity
-> Custom Scan (Citus Adaptive)
Output: remote_scan.l_quantity, remote_scan.count_quantity
Task Count: 2
Tasks Shown: One of 2
-> Task
Query: SELECT l_quantity, count(*) AS count_quantity FROM lineitem_290000 lineitem WHERE true GROUP BY l_quantity
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: l_quantity, count(*)
Group Key: lineitem.l_quantity
-> Seq Scan on public.lineitem_290000 lineitem
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
-- Test verbose
EXPLAIN (COSTS FALSE, VERBOSE TRUE)
SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem;
Aggregate
Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / pg_catalog.sum(remote_scan."?column?_2")))
-> Custom Scan (Citus Adaptive)
Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2"
Task Count: 2
Tasks Shown: One of 2
-> Task
Query: SELECT sum(l_quantity), sum(l_quantity), count(l_quantity) FROM lineitem_290000 lineitem WHERE true
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate
Output: sum(l_quantity), sum(l_quantity), count(l_quantity)
-> Seq Scan on public.lineitem_290000 lineitem
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
-- Test join
EXPLAIN (COSTS FALSE)
SELECT * FROM lineitem
JOIN orders ON l_orderkey = o_orderkey AND l_quantity < 5.0
ORDER BY l_quantity LIMIT 10;
Limit
-> Sort
Sort Key: remote_scan.l_quantity
-> Custom Scan (Citus Adaptive)
Task Count: 2
Tasks Shown: One of 2
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Limit
-> Sort
Sort Key: lineitem.l_quantity
-> Hash Join
Hash Cond: (lineitem.l_orderkey = orders.o_orderkey)
-> Seq Scan on lineitem_290000 lineitem
Filter: (l_quantity < 5.0)
-> Hash
-> Seq Scan on orders_290002 orders
-- Test insert
EXPLAIN (COSTS FALSE)
INSERT INTO lineitem VALUES (1,0), (2, 0), (3, 0), (4, 0);
Custom Scan (Citus Adaptive)
Task Count: 1
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Insert on lineitem_290000 citus_table_alias
-> Values Scan on "*VALUES*"
-- Test update
EXPLAIN (COSTS FALSE)
UPDATE lineitem
SET l_suppkey = 12
WHERE l_orderkey = 1 AND l_partkey = 0;
Custom Scan (Citus Adaptive)
Task Count: 1
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Update on lineitem_290000 lineitem
-> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
Index Cond: (l_orderkey = 1)
Filter: (l_partkey = 0)
-- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output)
BEGIN;
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE)
UPDATE lineitem
SET l_suppkey = 12
WHERE l_orderkey = 1 AND l_partkey = 0;
Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
Task Count: 1
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Update on lineitem_290000 lineitem (actual rows=0 loops=1)
-> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem (actual rows=0 loops=1)
Index Cond: (l_orderkey = 1)
Filter: (l_partkey = 0)
Rows Removed by Filter: 6
ROLLBACk;
-- Test delete
EXPLAIN (COSTS FALSE)
DELETE FROM lineitem
WHERE l_orderkey = 1 AND l_partkey = 0;
Custom Scan (Citus Adaptive)
Task Count: 1
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Delete on lineitem_290000 lineitem
-> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
Index Cond: (l_orderkey = 1)
Filter: (l_partkey = 0)
-- Test zero-shard update
EXPLAIN (COSTS FALSE)
UPDATE lineitem
SET l_suppkey = 12
WHERE l_orderkey = 1 AND l_orderkey = 0;
Custom Scan (Citus Adaptive)
Task Count: 0
Tasks Shown: All
-- Test zero-shard delete
EXPLAIN (COSTS FALSE)
DELETE FROM lineitem
WHERE l_orderkey = 1 AND l_orderkey = 0;
Custom Scan (Citus Adaptive)
Task Count: 0
Tasks Shown: All
-- Test single-shard SELECT
EXPLAIN (COSTS FALSE)
SELECT l_quantity FROM lineitem WHERE l_orderkey = 5;
Custom Scan (Citus Adaptive)
Task Count: 1
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
Index Cond: (l_orderkey = 5)
SELECT true AS valid FROM explain_xml($$
SELECT l_quantity FROM lineitem WHERE l_orderkey = 5$$);
t
SELECT true AS valid FROM explain_json($$
SELECT l_quantity FROM lineitem WHERE l_orderkey = 5$$);
t
-- Test CREATE TABLE ... AS
EXPLAIN (COSTS FALSE)
CREATE TABLE explain_result AS
SELECT * FROM lineitem;
Custom Scan (Citus Adaptive)
Task Count: 2
Tasks Shown: One of 2
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on lineitem_290000 lineitem
-- Test having
EXPLAIN (COSTS FALSE, VERBOSE TRUE)
SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem
HAVING sum(l_quantity) > 100;
Aggregate
Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / pg_catalog.sum(remote_scan."?column?_2")))
Filter: (sum(remote_scan.worker_column_4) > '100'::numeric)
-> Custom Scan (Citus Adaptive)
Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2", remote_scan.worker_column_4
Task Count: 2
Tasks Shown: One of 2
-> Task
Query: SELECT sum(l_quantity), sum(l_quantity), count(l_quantity), sum(l_quantity) AS worker_column_4 FROM lineitem_290000 lineitem WHERE true
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate
Output: sum(l_quantity), sum(l_quantity), count(l_quantity), sum(l_quantity)
-> Seq Scan on public.lineitem_290000 lineitem
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
-- Test having without aggregate
EXPLAIN (COSTS FALSE, VERBOSE TRUE)
SELECT l_quantity FROM lineitem
GROUP BY l_quantity
HAVING l_quantity > (100 * random());
HashAggregate
Output: remote_scan.l_quantity
Group Key: remote_scan.l_quantity
Filter: ((remote_scan.worker_column_2)::double precision > ('100'::double precision * random()))
-> Custom Scan (Citus Adaptive)
Output: remote_scan.l_quantity, remote_scan.worker_column_2
Task Count: 2
Tasks Shown: One of 2
-> Task
Query: SELECT l_quantity, l_quantity AS worker_column_2 FROM lineitem_290000 lineitem WHERE true GROUP BY l_quantity
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: l_quantity, l_quantity
Group Key: lineitem.l_quantity
-> Seq Scan on public.lineitem_290000 lineitem
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
-- Subquery pushdown tests with explain
EXPLAIN (COSTS OFF)
SELECT
avg(array_length(events, 1)) AS event_average
FROM
(SELECT
tenant_id,
user_id,
array_agg(event_type ORDER BY event_time) AS events
FROM
(SELECT
(users.composite_id).tenant_id,
(users.composite_id).user_id,
event_type,
events.event_time
FROM
users,
events
WHERE
(users.composite_id) = (events.composite_id) AND
users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND
users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND
event_type IN ('click', 'submit', 'pay')) AS subquery
GROUP BY
tenant_id,
user_id) AS subquery;
Aggregate
-> Custom Scan (Citus Adaptive)
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate
-> GroupAggregate
Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
-> Sort
Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
-> Hash Join
Hash Cond: (users.composite_id = events.composite_id)
-> Seq Scan on users_1400289 users
Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type))
-> Hash
-> Seq Scan on events_1400285 events
Filter: ((event_type)::text = ANY ('{click,submit,pay}'::text[]))
-- Union and left join subquery pushdown
EXPLAIN (COSTS OFF)
SELECT
avg(array_length(events, 1)) AS event_average,
hasdone
FROM
(SELECT
subquery_1.tenant_id,
subquery_1.user_id,
array_agg(event ORDER BY event_time) AS events,
COALESCE(hasdone, 'Has not done paying') AS hasdone
FROM
(
(SELECT
(users.composite_id).tenant_id,
(users.composite_id).user_id,
(users.composite_id) as composite_id,
'action=>1'AS event,
events.event_time
FROM
users,
events
WHERE
(users.composite_id) = (events.composite_id) AND
users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND
users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND
event_type = 'click')
UNION
(SELECT
(users.composite_id).tenant_id,
(users.composite_id).user_id,
(users.composite_id) as composite_id,
'action=>2'AS event,
events.event_time
FROM
users,
events
WHERE
(users.composite_id) = (events.composite_id) AND
users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND
users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND
event_type = 'submit')
) AS subquery_1
LEFT JOIN
(SELECT
DISTINCT ON ((composite_id).tenant_id, (composite_id).user_id) composite_id,
(composite_id).tenant_id,
(composite_id).user_id,
'Has done paying'::TEXT AS hasdone
FROM
events
WHERE
events.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND
events.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND
event_type = 'pay') AS subquery_2
ON
subquery_1.composite_id = subquery_2.composite_id
GROUP BY
subquery_1.tenant_id,
subquery_1.user_id,
hasdone) AS subquery_top
GROUP BY
hasdone;
HashAggregate
Group Key: remote_scan.hasdone
-> Custom Scan (Citus Adaptive)
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> GroupAggregate
Group Key: subquery_top.hasdone
-> Sort
Sort Key: subquery_top.hasdone
-> Subquery Scan on subquery_top
-> GroupAggregate
Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.hasdone
-> Sort
Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.hasdone
-> Hash Left Join
Hash Cond: (users.composite_id = subquery_2.composite_id)
-> HashAggregate
Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), users.composite_id, ('action=>1'::text), events.event_time
-> Append
-> Hash Join
Hash Cond: (users.composite_id = events.composite_id)
-> Seq Scan on users_1400289 users
Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type))
-> Hash
-> Seq Scan on events_1400285 events
Filter: ((event_type)::text = 'click'::text)
-> Hash Join
Hash Cond: (users_1.composite_id = events_1.composite_id)
-> Seq Scan on users_1400289 users_1
Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type))
-> Hash
-> Seq Scan on events_1400285 events_1
Filter: ((event_type)::text = 'submit'::text)
-> Hash
-> Subquery Scan on subquery_2
-> Unique
-> Sort
Sort Key: ((events_2.composite_id).tenant_id), ((events_2.composite_id).user_id)
-> Seq Scan on events_1400285 events_2
Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type) AND ((event_type)::text = 'pay'::text))
-- Union, left join and having subquery pushdown
EXPLAIN (COSTS OFF)
SELECT
avg(array_length(events, 1)) AS event_average,
count_pay
FROM (
SELECT
subquery_1.tenant_id,
subquery_1.user_id,
array_agg(event ORDER BY event_time) AS events,
COALESCE(count_pay, 0) AS count_pay
FROM
(
(SELECT
(users.composite_id).tenant_id,
(users.composite_id).user_id,
(users.composite_id),
'action=>1'AS event,
events.event_time
FROM
users,
events
WHERE
(users.composite_id) = (events.composite_id) AND
users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND
users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND
event_type = 'click')
UNION
(SELECT
(users.composite_id).tenant_id,
(users.composite_id).user_id,
(users.composite_id),
'action=>2'AS event,
events.event_time
FROM
users,
events
WHERE
(users.composite_id) = (events.composite_id) AND
users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND
users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND
event_type = 'submit')
) AS subquery_1
LEFT JOIN
(SELECT
(composite_id).tenant_id,
(composite_id).user_id,
composite_id,
COUNT(*) AS count_pay
FROM
events
WHERE
events.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND
events.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND
event_type = 'pay'
GROUP BY
composite_id
HAVING
COUNT(*) > 2) AS subquery_2
ON
subquery_1.composite_id = subquery_2.composite_id
GROUP BY
subquery_1.tenant_id,
subquery_1.user_id,
count_pay) AS subquery_top
WHERE
array_ndims(events) > 0
GROUP BY
count_pay
ORDER BY
count_pay;
Sort
Sort Key: remote_scan.count_pay
-> HashAggregate
Group Key: remote_scan.count_pay
-> Custom Scan (Citus Adaptive)
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> GroupAggregate
Group Key: subquery_top.count_pay
-> Sort
Sort Key: subquery_top.count_pay
-> Subquery Scan on subquery_top
-> GroupAggregate
Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.count_pay
Filter: (array_ndims(array_agg(('action=>1'::text) ORDER BY events.event_time)) > 0)
-> Sort
Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.count_pay
-> Hash Left Join
Hash Cond: (users.composite_id = subquery_2.composite_id)
-> HashAggregate
Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), users.composite_id, ('action=>1'::text), events.event_time
-> Append
-> Hash Join
Hash Cond: (users.composite_id = events.composite_id)
-> Seq Scan on users_1400289 users
Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type))
-> Hash
-> Seq Scan on events_1400285 events
Filter: ((event_type)::text = 'click'::text)
-> Hash Join
Hash Cond: (users_1.composite_id = events_1.composite_id)
-> Seq Scan on users_1400289 users_1
Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type))
-> Hash
-> Seq Scan on events_1400285 events_1
Filter: ((event_type)::text = 'submit'::text)
-> Hash
-> Subquery Scan on subquery_2
-> GroupAggregate
Group Key: events_2.composite_id
Filter: (count(*) > 2)
-> Sort
Sort Key: events_2.composite_id
-> Seq Scan on events_1400285 events_2
Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type) AND ((event_type)::text = 'pay'::text))
-- Lateral join subquery pushdown
-- set subquery_pushdown due to limit in the query
SET citus.subquery_pushdown to ON;
EXPLAIN (COSTS OFF)
SELECT
tenant_id,
user_id,
user_lastseen,
event_array
FROM
(SELECT
tenant_id,
user_id,
max(lastseen) as user_lastseen,
array_agg(event_type ORDER BY event_time) AS event_array
FROM
(SELECT
(composite_id).tenant_id,
(composite_id).user_id,
composite_id,
lastseen
FROM
users
WHERE
composite_id >= '(1, -9223372036854775808)'::user_composite_type AND
composite_id <= '(1, 9223372036854775807)'::user_composite_type
ORDER BY
lastseen DESC
LIMIT
10
) AS subquery_top
LEFT JOIN LATERAL
(SELECT
event_type,
event_time
FROM
events
WHERE
(composite_id) = subquery_top.composite_id
ORDER BY
event_time DESC
LIMIT
99) AS subquery_lateral
ON
true
GROUP BY
tenant_id,
user_id
) AS shard_union
ORDER BY
user_lastseen DESC
LIMIT
10;
Limit
-> Sort
Sort Key: remote_scan.user_lastseen DESC
-> Custom Scan (Citus Adaptive)
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Limit
-> Sort
Sort Key: (max(users.lastseen)) DESC
-> GroupAggregate
Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
-> Sort
Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
-> Nested Loop Left Join
-> Limit
-> Sort
Sort Key: users.lastseen DESC
-> Seq Scan on users_1400289 users
Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type))
-> Limit
-> Sort
Sort Key: events.event_time DESC
-> Seq Scan on events_1400285 events
Filter: (composite_id = users.composite_id)
RESET citus.subquery_pushdown;
-- Test all tasks output
SET citus.explain_all_tasks TO on;
EXPLAIN (COSTS FALSE)
SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
Aggregate
-> Custom Scan (Citus Adaptive)
Task Count: 1
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate
-> Seq Scan on lineitem_290001 lineitem
Filter: (l_orderkey > 9030)
SELECT true AS valid FROM explain_xml($$
SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030$$);
t
SELECT true AS valid FROM explain_json($$
SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030$$);
t
-- Test multi shard update
EXPLAIN (COSTS FALSE)
UPDATE lineitem_hash_part
SET l_suppkey = 12;
Custom Scan (Citus Adaptive)
Task Count: 4
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Update on lineitem_hash_part_360041 lineitem_hash_part
-> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Update on lineitem_hash_part_360042 lineitem_hash_part
-> Seq Scan on lineitem_hash_part_360042 lineitem_hash_part
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Update on lineitem_hash_part_360043 lineitem_hash_part
-> Seq Scan on lineitem_hash_part_360043 lineitem_hash_part
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Update on lineitem_hash_part_360044 lineitem_hash_part
-> Seq Scan on lineitem_hash_part_360044 lineitem_hash_part
EXPLAIN (COSTS FALSE)
UPDATE lineitem_hash_part
SET l_suppkey = 12
WHERE l_orderkey = 1 OR l_orderkey = 3;
Custom Scan (Citus Adaptive)
Task Count: 2
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Update on lineitem_hash_part_360041 lineitem_hash_part
-> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
Filter: ((l_orderkey = 1) OR (l_orderkey = 3))
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Update on lineitem_hash_part_360042 lineitem_hash_part
-> Seq Scan on lineitem_hash_part_360042 lineitem_hash_part
Filter: ((l_orderkey = 1) OR (l_orderkey = 3))
-- Test multi shard delete
EXPLAIN (COSTS FALSE)
DELETE FROM lineitem_hash_part;
Custom Scan (Citus Adaptive)
Task Count: 4
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Delete on lineitem_hash_part_360041 lineitem_hash_part
-> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Delete on lineitem_hash_part_360042 lineitem_hash_part
-> Seq Scan on lineitem_hash_part_360042 lineitem_hash_part
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Delete on lineitem_hash_part_360043 lineitem_hash_part
-> Seq Scan on lineitem_hash_part_360043 lineitem_hash_part
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Delete on lineitem_hash_part_360044 lineitem_hash_part
-> Seq Scan on lineitem_hash_part_360044 lineitem_hash_part
-- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output)
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE)
SELECT l_quantity, count(*) count_quantity FROM lineitem
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
Sort (actual rows=50 loops=1)
Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
Sort Method: quicksort Memory: 27kB
-> HashAggregate (actual rows=50 loops=1)
Group Key: remote_scan.l_quantity
-> Custom Scan (Citus Adaptive) (actual rows=100 loops=1)
Task Count: 2
Tuple data received from nodes: 780 bytes
Tasks Shown: All
-> Task
Tuple data received from node: 390 bytes
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate (actual rows=50 loops=1)
Group Key: l_quantity
-> Seq Scan on lineitem_290000 lineitem (actual rows=6000 loops=1)
-> Task
Tuple data received from node: 390 bytes
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate (actual rows=50 loops=1)
Group Key: l_quantity
-> Seq Scan on lineitem_290001 lineitem (actual rows=6000 loops=1)
SET citus.explain_all_tasks TO off;
-- Test update with subquery
EXPLAIN (COSTS FALSE)
UPDATE lineitem_hash_part
SET l_suppkey = 12
FROM orders_hash_part
WHERE orders_hash_part.o_orderkey = lineitem_hash_part.l_orderkey;
Custom Scan (Citus Adaptive)
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Update on lineitem_hash_part_360041 lineitem_hash_part
-> Hash Join
Hash Cond: (lineitem_hash_part.l_orderkey = orders_hash_part.o_orderkey)
-> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
-> Hash
-> Seq Scan on orders_hash_part_360045 orders_hash_part
-- Test delete with subquery
EXPLAIN (COSTS FALSE)
DELETE FROM lineitem_hash_part
USING orders_hash_part
WHERE orders_hash_part.o_orderkey = lineitem_hash_part.l_orderkey;
Custom Scan (Citus Adaptive)
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Delete on lineitem_hash_part_360041 lineitem_hash_part
-> Hash Join
Hash Cond: (lineitem_hash_part.l_orderkey = orders_hash_part.o_orderkey)
-> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
-> Hash
-> Seq Scan on orders_hash_part_360045 orders_hash_part
-- Test track tracker
EXPLAIN (COSTS FALSE)
SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
Aggregate
-> Custom Scan (Citus Adaptive)
Task Count: 1
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate
-> Seq Scan on lineitem_290001 lineitem
Filter: (l_orderkey > 9030)
-- Test re-partition join
EXPLAIN (COSTS FALSE)
SELECT count(*)
FROM lineitem, orders, customer_append, supplier_single_shard
WHERE l_orderkey = o_orderkey
AND o_custkey = c_custkey
AND l_suppkey = s_suppkey;
Aggregate
-> Custom Scan (Citus Adaptive)
Task Count: 1
Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob
Map Task Count: 1
Merge Task Count: 1
-> MapMergeJob
Map Task Count: 2
Merge Task Count: 1
EXPLAIN (COSTS FALSE, FORMAT JSON)
SELECT count(*)
FROM lineitem, orders, customer_append, supplier_single_shard
WHERE l_orderkey = o_orderkey
AND o_custkey = c_custkey
AND l_suppkey = s_suppkey;
[
{
"Plan": {
"Node Type": "Aggregate",
"Strategy": "Plain",
"Partial Mode": "Simple",
"Parallel Aware": false,
"Plans": [
{
"Node Type": "Custom Scan",
"Parent Relationship": "Outer",
"Custom Plan Provider": "Citus Adaptive",
"Parallel Aware": false,
"Distributed Query": {
"Job": {
"Task Count": 1,
"Tasks Shown": "None, not supported for re-partition queries",
"Dependent Jobs": [
{
"Map Task Count": 1,
"Merge Task Count": 1,
"Dependent Jobs": [
{
"Map Task Count": 2,
"Merge Task Count": 1
}
]
}
]
}
}
}
]
}
}
]
SELECT true AS valid FROM explain_json($$
SELECT count(*)
FROM lineitem, orders, customer_append, supplier_single_shard
WHERE l_orderkey = o_orderkey
AND o_custkey = c_custkey
AND l_suppkey = s_suppkey$$);
t
EXPLAIN (COSTS FALSE, FORMAT XML)
SELECT count(*)
FROM lineitem, orders, customer_append, supplier_single_shard
WHERE l_orderkey = o_orderkey
AND o_custkey = c_custkey
AND l_suppkey = s_suppkey;
<explain xmlns="http://www.postgresql.org/2009/explain">
<Query>
<Plan>
<Node-Type>Aggregate</Node-Type>
<Strategy>Plain</Strategy>
<Partial-Mode>Simple</Partial-Mode>
<Parallel-Aware>false</Parallel-Aware>
<Plans>
<Plan>
<Node-Type>Custom Scan</Node-Type>
<Parent-Relationship>Outer</Parent-Relationship>
<Custom-Plan-Provider>Citus Adaptive</Custom-Plan-Provider>
<Parallel-Aware>false</Parallel-Aware>
<Distributed-Query>
<Job>
<Task-Count>1</Task-Count>
<Tasks-Shown>None, not supported for re-partition queries</Tasks-Shown>
<Dependent-Jobs>
<MapMergeJob>
<Map-Task-Count>1</Map-Task-Count>
<Merge-Task-Count>1</Merge-Task-Count>
<Dependent-Jobs>
<MapMergeJob>
<Map-Task-Count>2</Map-Task-Count>
<Merge-Task-Count>1</Merge-Task-Count>
</MapMergeJob>
</Dependent-Jobs>
</MapMergeJob>
</Dependent-Jobs>
</Job>
</Distributed-Query>
</Plan>
</Plans>
</Plan>
</Query>
</explain>
SELECT true AS valid FROM explain_xml($$
SELECT count(*)
FROM lineitem, orders, customer_append, supplier
WHERE l_orderkey = o_orderkey
AND o_custkey = c_custkey
AND l_suppkey = s_suppkey$$);
t
-- make sure that EXPLAIN works without
-- problems for queries that inlvolves only
-- reference tables
SELECT true AS valid FROM explain_xml($$
SELECT count(*)
FROM nation
WHERE n_name = 'CHINA'$$);
t
SELECT true AS valid FROM explain_xml($$
SELECT count(*)
FROM nation, supplier
WHERE nation.n_nationkey = supplier.s_nationkey$$);
t
EXPLAIN (COSTS FALSE, FORMAT YAML)
SELECT count(*)
FROM lineitem, orders, customer, supplier_single_shard
WHERE l_orderkey = o_orderkey
AND o_custkey = c_custkey
AND l_suppkey = s_suppkey;
- Plan:
Node Type: "Aggregate"
Strategy: "Plain"
Partial Mode: "Simple"
Parallel Aware: false
Plans:
- Node Type: "Custom Scan"
Parent Relationship: "Outer"
Custom Plan Provider: "Citus Adaptive"
Parallel Aware: false
Distributed Query:
Job:
Task Count: 1
Tasks Shown: "None, not supported for re-partition queries"
Dependent Jobs:
- Map Task Count: 2
Merge Task Count: 1
-- ensure local plans display correctly
CREATE TABLE lineitem_clone (LIKE lineitem);
EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem_clone;
Aggregate
-> Seq Scan on lineitem_clone
-- ensure distributed plans don't break
EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem;
Aggregate
-> Custom Scan (Citus Adaptive)
Task Count: 2
Tasks Shown: One of 2
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate
-> Seq Scan on lineitem_290000 lineitem
-- ensure EXPLAIN EXECUTE doesn't crash
PREPARE task_tracker_query AS
SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
EXPLAIN (COSTS FALSE) EXECUTE task_tracker_query;
Aggregate
-> Custom Scan (Citus Adaptive)
Task Count: 1
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate
-> Seq Scan on lineitem_290001 lineitem
Filter: (l_orderkey > 9030)
PREPARE router_executor_query AS SELECT l_quantity FROM lineitem WHERE l_orderkey = 5;
EXPLAIN EXECUTE router_executor_query;
Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=100000 width=18)
Task Count: 1
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem (cost=0.28..13.60 rows=4 width=5)
Index Cond: (l_orderkey = 5)
PREPARE real_time_executor_query AS
SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
EXPLAIN (COSTS FALSE) EXECUTE real_time_executor_query;
Aggregate
-> Custom Scan (Citus Adaptive)
Task Count: 1
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate
-> Seq Scan on lineitem_290001 lineitem
Filter: (l_orderkey > 9030)
-- EXPLAIN EXECUTE of parametrized prepared statements is broken, but
-- at least make sure to fail without crashing
PREPARE router_executor_query_param(int) AS SELECT l_quantity FROM lineitem WHERE l_orderkey = $1;
EXPLAIN EXECUTE router_executor_query_param(5);
Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=100000 width=18)
Task Count: 1
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem (cost=0.28..13.60 rows=4 width=5)
Index Cond: (l_orderkey = 5)
EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF) EXECUTE router_executor_query_param(5);
Custom Scan (Citus Adaptive) (actual rows=3 loops=1)
Task Count: 1
Tuple data received from nodes: 15 bytes
Tasks Shown: All
-> Task
Tuple data received from node: 15 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem (actual rows=3 loops=1)
Index Cond: (l_orderkey = 5)
\set VERBOSITY TERSE
PREPARE multi_shard_query_param(int) AS UPDATE lineitem SET l_quantity = $1;
BEGIN;
EXPLAIN EXECUTE multi_shard_query_param(5);
WARNING: there is no parameter $1
WARNING: there is no parameter $1
Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0)
Task Count: 2
Tasks Shown: One of 2
-> Task
Error: Could not get remote plan.
ROLLBACK;
BEGIN;
EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF) EXECUTE multi_shard_query_param(5);
WARNING: there is no parameter $1
WARNING: there is no parameter $1
Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
Task Count: 2
Tasks Shown: One of 2
-> Task
Error: Could not get remote plan.
ROLLBACK;
\set VERBOSITY DEFAULT
-- test explain in a transaction with alter table to test we use right connections
BEGIN;
CREATE TABLE explain_table(id int);
SELECT create_distributed_table('explain_table', 'id');
ALTER TABLE explain_table ADD COLUMN value int;
ROLLBACK;
-- test explain with local INSERT ... SELECT
EXPLAIN (COSTS OFF)
INSERT INTO lineitem_hash_part
SELECT o_orderkey FROM orders_hash_part LIMIT 3;
Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator
-> Limit
-> Custom Scan (Citus Adaptive)
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Limit
-> Seq Scan on orders_hash_part_360045 orders_hash_part
SELECT true AS valid FROM explain_json($$
INSERT INTO lineitem_hash_part (l_orderkey)
SELECT o_orderkey FROM orders_hash_part LIMIT 3;
$$);
t
EXPLAIN (COSTS OFF)
INSERT INTO lineitem_hash_part (l_orderkey, l_quantity)
SELECT o_orderkey, 5 FROM orders_hash_part LIMIT 3;
Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator
-> Limit
-> Custom Scan (Citus Adaptive)
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Limit
-> Seq Scan on orders_hash_part_360045 orders_hash_part
EXPLAIN (COSTS OFF)
INSERT INTO lineitem_hash_part (l_orderkey)
SELECT s FROM generate_series(1,5) s;
Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator
-> Function Scan on generate_series s
-- WHERE EXISTS forces pg12 to materialize cte
EXPLAIN (COSTS OFF)
WITH cte1 AS (SELECT s FROM generate_series(1,10) s)
INSERT INTO lineitem_hash_part
WITH cte1 AS (SELECT * FROM cte1 WHERE EXISTS (SELECT * FROM cte1) LIMIT 5)
SELECT s FROM cte1 WHERE EXISTS (SELECT * FROM cte1);
Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator
-> Subquery Scan on citus_insert_select_subquery
CTE cte1
-> Function Scan on generate_series s
-> Result
One-Time Filter: $3
CTE cte1
-> Limit
InitPlan 2 (returns $1)
-> CTE Scan on cte1 cte1_1
-> Result
One-Time Filter: $1
-> CTE Scan on cte1 cte1_2
InitPlan 4 (returns $3)
-> CTE Scan on cte1 cte1_3
-> CTE Scan on cte1
EXPLAIN (COSTS OFF)
INSERT INTO lineitem_hash_part
( SELECT s FROM generate_series(1,5) s) UNION
( SELECT s FROM generate_series(5,10) s);
Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator
-> Subquery Scan on citus_insert_select_subquery
-> HashAggregate
Group Key: s.s
-> Append
-> Function Scan on generate_series s
-> Function Scan on generate_series s_1
-- explain with recursive planning
-- prevent PG 11 - PG 12 outputs to diverge
SET citus.enable_cte_inlining TO false;
EXPLAIN (COSTS OFF, VERBOSE true)
WITH keys AS (
SELECT DISTINCT l_orderkey FROM lineitem_hash_part
),
series AS (
SELECT s FROM generate_series(1,10) s
)
SELECT l_orderkey FROM series JOIN keys ON (s = l_orderkey)
ORDER BY s;
Custom Scan (Citus Adaptive)
Output: remote_scan.l_orderkey
-> Distributed Subplan XXX_1
-> HashAggregate
Output: remote_scan.l_orderkey
Group Key: remote_scan.l_orderkey
-> Custom Scan (Citus Adaptive)
Output: remote_scan.l_orderkey
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT DISTINCT l_orderkey FROM lineitem_hash_part_360041 lineitem_hash_part WHERE true
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: l_orderkey
Group Key: lineitem_hash_part.l_orderkey
-> Seq Scan on public.lineitem_hash_part_360041 lineitem_hash_part
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
-> Distributed Subplan XXX_2
-> Function Scan on pg_catalog.generate_series s
Output: s
Function Call: generate_series(1, 10)
Task Count: 1
Tasks Shown: All
-> Task
Query: SELECT keys.l_orderkey FROM ((SELECT intermediate_result.s FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(s integer)) series JOIN (SELECT intermediate_result.l_orderkey FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(l_orderkey bigint)) keys ON ((series.s OPERATOR(pg_catalog.=) keys.l_orderkey))) ORDER BY series.s
Node: host=localhost port=xxxxx dbname=regression
-> Merge Join
Output: intermediate_result_1.l_orderkey, intermediate_result.s
Merge Cond: (intermediate_result.s = intermediate_result_1.l_orderkey)
-> Sort
Output: intermediate_result.s
Sort Key: intermediate_result.s
-> Function Scan on pg_catalog.read_intermediate_result intermediate_result
Output: intermediate_result.s
Function Call: read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format)
-> Sort
Output: intermediate_result_1.l_orderkey
Sort Key: intermediate_result_1.l_orderkey
-> Function Scan on pg_catalog.read_intermediate_result intermediate_result_1
Output: intermediate_result_1.l_orderkey
Function Call: read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format)
SET citus.enable_cte_inlining TO true;
SELECT true AS valid FROM explain_json($$
WITH result AS (
SELECT l_quantity, count(*) count_quantity FROM lineitem
GROUP BY l_quantity ORDER BY count_quantity, l_quantity
),
series AS (
SELECT s FROM generate_series(1,10) s
)
SELECT * FROM result JOIN series ON (s = count_quantity) JOIN orders_hash_part ON (s = o_orderkey)
$$);
t
SELECT true AS valid FROM explain_xml($$
WITH result AS (
SELECT l_quantity, count(*) count_quantity FROM lineitem
GROUP BY l_quantity ORDER BY count_quantity, l_quantity
),
series AS (
SELECT s FROM generate_series(1,10) s
)
SELECT * FROM result JOIN series ON (s = l_quantity) JOIN orders_hash_part ON (s = o_orderkey)
$$);
t
--
-- Test EXPLAIN ANALYZE udfs
--
\a\t
\set default_opts '''{"costs": false, "timing": false, "summary": false}'''::jsonb
CREATE TABLE explain_analyze_test(a int, b text);
INSERT INTO explain_analyze_test VALUES (1, 'value 1'), (2, 'value 2'), (3, 'value 3'), (4, 'value 4');
-- simple select
BEGIN;
SELECT * FROM worker_save_query_explain_analyze('SELECT 1', :default_opts) as (a int);
a
---------------------------------------------------------------------
1
(1 row)
SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
explain_analyze_output
---------------------------------------------------------------------
Result (actual rows=1 loops=1)+
(1 row)
END;
-- insert into select
BEGIN;
SELECT * FROM worker_save_query_explain_analyze($Q$
INSERT INTO explain_analyze_test SELECT i, i::text FROM generate_series(1, 5) i $Q$,
:default_opts) as (a int);
a
---------------------------------------------------------------------
(0 rows)
SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
explain_analyze_output
---------------------------------------------------------------------
Insert on explain_analyze_test (actual rows=0 loops=1) +
-> Function Scan on generate_series i (actual rows=5 loops=1)+
(1 row)
ROLLBACK;
-- select from table
BEGIN;
SELECT * FROM worker_save_query_explain_analyze($Q$SELECT * FROM explain_analyze_test$Q$,
:default_opts) as (a int, b text);
a | b
---------------------------------------------------------------------
1 | value 1
2 | value 2
3 | value 3
4 | value 4
(4 rows)
SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
explain_analyze_output
---------------------------------------------------------------------
Seq Scan on explain_analyze_test (actual rows=4 loops=1)+
(1 row)
ROLLBACK;
-- insert into with returning
BEGIN;
SELECT * FROM worker_save_query_explain_analyze($Q$
INSERT INTO explain_analyze_test SELECT i, i::text FROM generate_series(1, 5) i
RETURNING a, b$Q$,
:default_opts) as (a int, b text);
a | b
---------------------------------------------------------------------
1 | 1
2 | 2
3 | 3
4 | 4
5 | 5
(5 rows)
SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
explain_analyze_output
---------------------------------------------------------------------
Insert on explain_analyze_test (actual rows=5 loops=1) +
-> Function Scan on generate_series i (actual rows=5 loops=1)+
(1 row)
ROLLBACK;
-- delete with returning
BEGIN;
SELECT * FROM worker_save_query_explain_analyze($Q$
DELETE FROM explain_analyze_test WHERE a % 2 = 0
RETURNING a, b$Q$,
:default_opts) as (a int, b text);
a | b
---------------------------------------------------------------------
2 | value 2
4 | value 4
(2 rows)
SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
explain_analyze_output
---------------------------------------------------------------------
Delete on explain_analyze_test (actual rows=2 loops=1) +
-> Seq Scan on explain_analyze_test (actual rows=2 loops=1)+
Filter: ((a % 2) = 0) +
Rows Removed by Filter: 2 +
(1 row)
ROLLBACK;
-- delete without returning
BEGIN;
SELECT * FROM worker_save_query_explain_analyze($Q$
DELETE FROM explain_analyze_test WHERE a % 2 = 0$Q$,
:default_opts) as (a int);
a
---------------------------------------------------------------------
(0 rows)
SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
explain_analyze_output
---------------------------------------------------------------------
Delete on explain_analyze_test (actual rows=0 loops=1) +
-> Seq Scan on explain_analyze_test (actual rows=2 loops=1)+
Filter: ((a % 2) = 0) +
Rows Removed by Filter: 2 +
(1 row)
ROLLBACK;
-- multiple queries (should ERROR)
SELECT * FROM worker_save_query_explain_analyze('SELECT 1; SELECT 2', :default_opts) as (a int);
ERROR: cannot EXPLAIN ANALYZE multiple queries
-- error in query
SELECT * FROM worker_save_query_explain_analyze('SELECT x', :default_opts) as (a int);
ERROR: column "x" does not exist
-- error in format string
SELECT * FROM worker_save_query_explain_analyze('SELECT 1', '{"format": "invlaid_format"}') as (a int);
ERROR: Invalid explain analyze format: "invlaid_format"
-- test formats
BEGIN;
SELECT * FROM worker_save_query_explain_analyze('SELECT 1', '{"format": "text", "costs": false}') as (a int);
a
---------------------------------------------------------------------
1
(1 row)
SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
explain_analyze_output
---------------------------------------------------------------------
Result (actual rows=1 loops=1)+
(1 row)
SELECT * FROM worker_save_query_explain_analyze('SELECT 1', '{"format": "json", "costs": false}') as (a int);
a
---------------------------------------------------------------------
1
(1 row)
SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
explain_analyze_output
---------------------------------------------------------------------
[ +
{ +
"Plan": { +
"Node Type": "Result", +
"Parallel Aware": false,+
"Actual Rows": 1, +
"Actual Loops": 1 +
}, +
"Triggers": [ +
] +
} +
]
(1 row)
SELECT * FROM worker_save_query_explain_analyze('SELECT 1', '{"format": "xml", "costs": false}') as (a int);
a
---------------------------------------------------------------------
1
(1 row)
SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
explain_analyze_output
---------------------------------------------------------------------
<explain xmlns="http://www.postgresql.org/2009/explain">+
<Query> +
<Plan> +
<Node-Type>Result</Node-Type> +
<Parallel-Aware>false</Parallel-Aware> +
<Actual-Rows>1</Actual-Rows> +
<Actual-Loops>1</Actual-Loops> +
</Plan> +
<Triggers> +
</Triggers> +
</Query> +
</explain>
(1 row)
SELECT * FROM worker_save_query_explain_analyze('SELECT 1', '{"format": "yaml", "costs": false}') as (a int);
a
---------------------------------------------------------------------
1
(1 row)
SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
explain_analyze_output
---------------------------------------------------------------------
- Plan: +
Node Type: "Result" +
Parallel Aware: false+
Actual Rows: 1 +
Actual Loops: 1 +
Triggers:
(1 row)
END;
-- costs on, timing off
BEGIN;
SELECT * FROM worker_save_query_explain_analyze('SELECT * FROM explain_analyze_test', '{"timing": false, "costs": true}') as (a int);
a
---------------------------------------------------------------------
1
2
3
4
(4 rows)
SELECT explain_analyze_output ~ 'Seq Scan.*\(cost=0.00.*\) \(actual rows.*\)' FROM worker_last_saved_explain_analyze();
?column?
---------------------------------------------------------------------
t
(1 row)
END;
-- costs off, timing on
BEGIN;
SELECT * FROM worker_save_query_explain_analyze('SELECT * FROM explain_analyze_test', '{"timing": true, "costs": false}') as (a int);
a
---------------------------------------------------------------------
1
2
3
4
(4 rows)
SELECT explain_analyze_output ~ 'Seq Scan on explain_analyze_test \(actual time=.* rows=.* loops=1\)' FROM worker_last_saved_explain_analyze();
?column?
---------------------------------------------------------------------
t
(1 row)
END;
-- summary on
BEGIN;
SELECT * FROM worker_save_query_explain_analyze('SELECT 1', '{"timing": false, "costs": false, "summary": true}') as (a int);
a
---------------------------------------------------------------------
1
(1 row)
SELECT explain_analyze_output ~ 'Planning Time:.*Execution Time:.*' FROM worker_last_saved_explain_analyze();
?column?
---------------------------------------------------------------------
t
(1 row)
END;
-- buffers on
BEGIN;
SELECT * FROM worker_save_query_explain_analyze('SELECT * FROM explain_analyze_test', '{"timing": false, "costs": false, "buffers": true}') as (a int);
a
---------------------------------------------------------------------
1
2
3
4
(4 rows)
SELECT explain_analyze_output ~ 'Buffers:' FROM worker_last_saved_explain_analyze();
?column?
---------------------------------------------------------------------
t
(1 row)
END;
-- verbose on
BEGIN;
SELECT * FROM worker_save_query_explain_analyze('SELECT * FROM explain_analyze_test', '{"timing": false, "costs": false, "verbose": true}') as (a int);
a
---------------------------------------------------------------------
1
2
3
4
(4 rows)
SELECT explain_analyze_output ~ 'Output: a, b' FROM worker_last_saved_explain_analyze();
?column?
---------------------------------------------------------------------
t
(1 row)
END;
-- make sure deleted at transaction end
SELECT * FROM worker_save_query_explain_analyze('SELECT 1', '{}') as (a int);
a
---------------------------------------------------------------------
1
(1 row)
SELECT count(*) FROM worker_last_saved_explain_analyze();
count
---------------------------------------------------------------------
0
(1 row)
-- should be deleted at the end of prepare commit
BEGIN;
SELECT * FROM worker_save_query_explain_analyze('UPDATE explain_analyze_test SET a=6 WHERE a=4', '{}') as (a int);
a
---------------------------------------------------------------------
(0 rows)
SELECT count(*) FROM worker_last_saved_explain_analyze();
count
---------------------------------------------------------------------
1
(1 row)
PREPARE TRANSACTION 'citus_0_1496350_7_0';
SELECT count(*) FROM worker_last_saved_explain_analyze();
count
---------------------------------------------------------------------
0
(1 row)
COMMIT PREPARED 'citus_0_1496350_7_0';
-- verify execution time makes sense
BEGIN;
SELECT count(*) FROM worker_save_query_explain_analyze('SELECT pg_sleep(0.05)', :default_opts) as (a int);
count
---------------------------------------------------------------------
1
(1 row)
SELECT execution_duration BETWEEN 30 AND 200 FROM worker_last_saved_explain_analyze();
?column?
---------------------------------------------------------------------
t
(1 row)
END;
SELECT * FROM explain_analyze_test ORDER BY a;
a | b
---------------------------------------------------------------------
1 | value 1
2 | value 2
3 | value 3
6 | value 4
(4 rows)
\a\t
--
-- Test different cases of EXPLAIN ANALYZE
--
SET citus.shard_count TO 4;
SET client_min_messages TO WARNING;
SELECT create_distributed_table('explain_analyze_test', 'a');
\set default_analyze_flags '(ANALYZE on, COSTS off, TIMING off, SUMMARY off)'
-- router SELECT
EXPLAIN :default_analyze_flags SELECT * FROM explain_analyze_test WHERE a = 1;
Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Task Count: 1
Tuple data received from nodes: 8 bytes
Tasks Shown: All
-> Task
Tuple data received from node: 8 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on explain_analyze_test_570009 explain_analyze_test (actual rows=1 loops=1)
Filter: (a = 1)
-- multi-shard SELECT
EXPLAIN :default_analyze_flags SELECT count(*) FROM explain_analyze_test;
Aggregate (actual rows=1 loops=1)
-> Custom Scan (Citus Adaptive) (actual rows=4 loops=1)
Task Count: 4
Tuple data received from nodes: 4 bytes
Tasks Shown: One of 4
-> Task
Tuple data received from node: 1 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate (actual rows=1 loops=1)
-> Seq Scan on explain_analyze_test_570009 explain_analyze_test (actual rows=1 loops=1)
-- empty router SELECT
EXPLAIN :default_analyze_flags SELECT * FROM explain_analyze_test WHERE a = 10000;
Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
Task Count: 1
Tuple data received from nodes: 0 bytes
Tasks Shown: All
-> Task
Tuple data received from node: 0 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on explain_analyze_test_570012 explain_analyze_test (actual rows=0 loops=1)
Filter: (a = 10000)
Rows Removed by Filter: 1
-- empty multi-shard SELECT
EXPLAIN :default_analyze_flags SELECT * FROM explain_analyze_test WHERE b = 'does not exist';
Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
Task Count: 4
Tuple data received from nodes: 0 bytes
Tasks Shown: One of 4
-> Task
Tuple data received from node: 0 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on explain_analyze_test_570009 explain_analyze_test (actual rows=0 loops=1)
Filter: (b = 'does not exist'::text)
Rows Removed by Filter: 1
-- router DML
BEGIN;
EXPLAIN :default_analyze_flags DELETE FROM explain_analyze_test WHERE a = 1;
Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
Task Count: 1
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Delete on explain_analyze_test_570009 explain_analyze_test (actual rows=0 loops=1)
-> Seq Scan on explain_analyze_test_570009 explain_analyze_test (actual rows=1 loops=1)
Filter: (a = 1)
EXPLAIN :default_analyze_flags UPDATE explain_analyze_test SET b = 'b' WHERE a = 2;
Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
Task Count: 1
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Update on explain_analyze_test_570012 explain_analyze_test (actual rows=0 loops=1)
-> Seq Scan on explain_analyze_test_570012 explain_analyze_test (actual rows=1 loops=1)
Filter: (a = 2)
SELECT * FROM explain_analyze_test ORDER BY a;
2|b
3|value 3
6|value 4
ROLLBACK;
-- multi-shard DML
BEGIN;
EXPLAIN :default_analyze_flags UPDATE explain_analyze_test SET b = 'b' WHERE a IN (1, 2);
Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
Task Count: 2
Tasks Shown: One of 2
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Update on explain_analyze_test_570009 explain_analyze_test (actual rows=0 loops=1)
-> Seq Scan on explain_analyze_test_570009 explain_analyze_test (actual rows=1 loops=1)
Filter: (a = ANY ('{1,2}'::integer[]))
EXPLAIN :default_analyze_flags DELETE FROM explain_analyze_test;
Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Delete on explain_analyze_test_570009 explain_analyze_test (actual rows=0 loops=1)
-> Seq Scan on explain_analyze_test_570009 explain_analyze_test (actual rows=1 loops=1)
SELECT * FROM explain_analyze_test ORDER BY a;
ROLLBACK;
-- router DML with RETURNING with empty result
EXPLAIN :default_analyze_flags UPDATE explain_analyze_test SET b = 'something' WHERE a = 10000 RETURNING *;
Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
Task Count: 1
Tuple data received from nodes: 0 bytes
Tasks Shown: All
-> Task
Tuple data received from node: 0 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Update on explain_analyze_test_570012 explain_analyze_test (actual rows=0 loops=1)
-> Seq Scan on explain_analyze_test_570012 explain_analyze_test (actual rows=0 loops=1)
Filter: (a = 10000)
Rows Removed by Filter: 1
-- multi-shard DML with RETURNING with empty result
EXPLAIN :default_analyze_flags UPDATE explain_analyze_test SET b = 'something' WHERE b = 'does not exist' RETURNING *;
Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
Task Count: 4
Tuple data received from nodes: 0 bytes
Tasks Shown: One of 4
-> Task
Tuple data received from node: 0 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Update on explain_analyze_test_570009 explain_analyze_test (actual rows=0 loops=1)
-> Seq Scan on explain_analyze_test_570009 explain_analyze_test (actual rows=0 loops=1)
Filter: (b = 'does not exist'::text)
Rows Removed by Filter: 1
-- single-row insert
BEGIN;
EXPLAIN :default_analyze_flags INSERT INTO explain_analyze_test VALUES (5, 'value 5');
Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
Task Count: 1
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Insert on explain_analyze_test_570009 (actual rows=0 loops=1)
-> Result (actual rows=1 loops=1)
ROLLBACK;
-- multi-row insert
BEGIN;
EXPLAIN :default_analyze_flags INSERT INTO explain_analyze_test VALUES (5, 'value 5'), (6, 'value 6');
Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
Task Count: 2
Tasks Shown: One of 2
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Insert on explain_analyze_test_570009 citus_table_alias (actual rows=0 loops=1)
-> Result (actual rows=1 loops=1)
ROLLBACK;
-- distributed insert/select
BEGIN;
EXPLAIN :default_analyze_flags INSERT INTO explain_analyze_test SELECT * FROM explain_analyze_test;
Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Insert on explain_analyze_test_570009 citus_table_alias (actual rows=0 loops=1)
-> Seq Scan on explain_analyze_test_570009 explain_analyze_test (actual rows=1 loops=1)
Filter: ((worker_hash(a) >= '-2147483648'::integer) AND (worker_hash(a) <= '-1073741825'::integer))
ROLLBACK;
DROP TABLE explain_analyze_test;
-- test EXPLAIN ANALYZE works fine with primary keys
CREATE TABLE explain_pk(a int primary key, b int);
SELECT create_distributed_table('explain_pk', 'a');
BEGIN;
EXPLAIN :default_analyze_flags INSERT INTO explain_pk VALUES (1, 2), (2, 3);
Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
Task Count: 2
Tasks Shown: One of 2
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Insert on explain_pk_570013 citus_table_alias (actual rows=0 loops=1)
-> Result (actual rows=1 loops=1)
SELECT * FROM explain_pk ORDER BY 1;
1|2
2|3
ROLLBACK;
-- test EXPLAIN ANALYZE with non-text output formats
BEGIN;
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) INSERT INTO explain_pk VALUES (1, 2), (2, 3);
[
{
"Plan": {
"Node Type": "Custom Scan",
"Custom Plan Provider": "Citus Adaptive",
"Parallel Aware": false,
"Actual Rows": 0,
"Actual Loops": 1,
"Distributed Query": {
"Job": {
"Task Count": 2,
"Tasks Shown": "One of 2",
"Tasks": [
{
"Node": "host=localhost port=xxxxx dbname=regression",
"Remote Plan": [
[
{
"Plan": {
"Node Type": "ModifyTable",
"Operation": "Insert",
"Parallel Aware": false,
"Relation Name": "explain_pk_570013",
"Alias": "citus_table_alias",
"Actual Rows": 0,
"Actual Loops": 1,
"Plans": [
{
"Node Type": "Result",
"Parent Relationship": "Member",
"Parallel Aware": false,
"Actual Rows": 1,
"Actual Loops": 1
}
]
},
"Triggers": [
]
}
]
]
}
]
}
}
},
"Triggers": [
]
}
]
ROLLBACK;
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) SELECT * FROM explain_pk;
[
{
"Plan": {
"Node Type": "Custom Scan",
"Custom Plan Provider": "Citus Adaptive",
"Parallel Aware": false,
"Actual Rows": 0,
"Actual Loops": 1,
"Distributed Query": {
"Job": {
"Task Count": 4,
"Tuple data received from nodes": "0 bytes",
"Tasks Shown": "One of 4",
"Tasks": [
{
"Tuple data received from node": "0 bytes",
"Node": "host=localhost port=xxxxx dbname=regression",
"Remote Plan": [
[
{
"Plan": {
"Node Type": "Seq Scan",
"Parallel Aware": false,
"Relation Name": "explain_pk_570013",
"Alias": "explain_pk",
"Actual Rows": 0,
"Actual Loops": 1
},
"Triggers": [
]
}
]
]
}
]
}
}
},
"Triggers": [
]
}
]
BEGIN;
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML) INSERT INTO explain_pk VALUES (1, 2), (2, 3);
<explain xmlns="http://www.postgresql.org/2009/explain">
<Query>
<Plan>
<Node-Type>Custom Scan</Node-Type>
<Custom-Plan-Provider>Citus Adaptive</Custom-Plan-Provider>
<Parallel-Aware>false</Parallel-Aware>
<Actual-Rows>0</Actual-Rows>
<Actual-Loops>1</Actual-Loops>
<Distributed-Query>
<Job>
<Task-Count>2</Task-Count>
<Tasks-Shown>One of 2</Tasks-Shown>
<Tasks>
<Task>
<Node>host=localhost port=xxxxx dbname=regression</Node>
<Remote-Plan>
<explain xmlns="http://www.postgresql.org/2009/explain">
<Query>
<Plan>
<Node-Type>ModifyTable</Node-Type>
<Operation>Insert</Operation>
<Parallel-Aware>false</Parallel-Aware>
<Relation-Name>explain_pk_570013</Relation-Name>
<Alias>citus_table_alias</Alias>
<Actual-Rows>0</Actual-Rows>
<Actual-Loops>1</Actual-Loops>
<Plans>
<Plan>
<Node-Type>Result</Node-Type>
<Parent-Relationship>Member</Parent-Relationship>
<Parallel-Aware>false</Parallel-Aware>
<Actual-Rows>1</Actual-Rows>
<Actual-Loops>1</Actual-Loops>
</Plan>
</Plans>
</Plan>
<Triggers>
</Triggers>
</Query>
</explain>
</Remote-Plan>
</Task>
</Tasks>
</Job>
</Distributed-Query>
</Plan>
<Triggers>
</Triggers>
</Query>
</explain>
ROLLBACK;
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML) SELECT * FROM explain_pk;
<explain xmlns="http://www.postgresql.org/2009/explain">
<Query>
<Plan>
<Node-Type>Custom Scan</Node-Type>
<Custom-Plan-Provider>Citus Adaptive</Custom-Plan-Provider>
<Parallel-Aware>false</Parallel-Aware>
<Actual-Rows>0</Actual-Rows>
<Actual-Loops>1</Actual-Loops>
<Distributed-Query>
<Job>
<Task-Count>4</Task-Count>
<Tuple-data-received-from-nodes>0 bytes</Tuple-data-received-from-nodes>
<Tasks-Shown>One of 4</Tasks-Shown>
<Tasks>
<Task>
<Tuple-data-received-from-node>0 bytes</Tuple-data-received-from-node>
<Node>host=localhost port=xxxxx dbname=regression</Node>
<Remote-Plan>
<explain xmlns="http://www.postgresql.org/2009/explain">
<Query>
<Plan>
<Node-Type>Seq Scan</Node-Type>
<Parallel-Aware>false</Parallel-Aware>
<Relation-Name>explain_pk_570013</Relation-Name>
<Alias>explain_pk</Alias>
<Actual-Rows>0</Actual-Rows>
<Actual-Loops>1</Actual-Loops>
</Plan>
<Triggers>
</Triggers>
</Query>
</explain>
</Remote-Plan>
</Task>
</Tasks>
</Job>
</Distributed-Query>
</Plan>
<Triggers>
</Triggers>
</Query>
</explain>
DROP TABLE explain_pk;
-- test EXPLAIN ANALYZE with CTEs and subqueries
CREATE TABLE dist_table(a int, b int);
SELECT create_distributed_table('dist_table', 'a');
CREATE TABLE ref_table(a int);
SELECT create_reference_table('ref_table');
INSERT INTO dist_table SELECT i, i*i FROM generate_series(1, 10) i;
INSERT INTO ref_table SELECT i FROM generate_series(1, 10) i;
EXPLAIN :default_analyze_flags
WITH r AS (
SELECT GREATEST(random(), 2) r, a FROM dist_table
)
SELECT count(distinct a) from r NATURAL JOIN ref_table;
Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
-> Distributed Subplan XXX_1
Intermediate Data Size: 220 bytes
Result destination: Send to 2 nodes
-> Custom Scan (Citus Adaptive) (actual rows=10 loops=1)
Task Count: 4
Tuple data received from nodes: 21 bytes
Tasks Shown: One of 4
-> Task
Tuple data received from node: 9 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on dist_table_570017 dist_table (actual rows=4 loops=1)
Task Count: 1
Tuple data received from nodes: 2 bytes
Tasks Shown: All
-> Task
Tuple data received from node: 2 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate (actual rows=1 loops=1)
-> Hash Join (actual rows=10 loops=1)
Hash Cond: (ref_table.a = intermediate_result.a)
-> Seq Scan on ref_table_570021 ref_table (actual rows=10 loops=1)
-> Hash (actual rows=10 loops=1)
Buckets: 1024 Batches: 1 Memory Usage: 9kB
-> Function Scan on read_intermediate_result intermediate_result (actual rows=10 loops=1)
EXPLAIN :default_analyze_flags
SELECT count(distinct a) FROM (SELECT GREATEST(random(), 2) r, a FROM dist_table) t NATURAL JOIN ref_table;
Aggregate (actual rows=1 loops=1)
-> Custom Scan (Citus Adaptive) (actual rows=10 loops=1)
Task Count: 4
Tuple data received from nodes: 11 bytes
Tasks Shown: One of 4
-> Task
Tuple data received from node: 5 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Group (actual rows=4 loops=1)
Group Key: t.a
-> Merge Join (actual rows=4 loops=1)
Merge Cond: (t.a = ref_table.a)
-> Sort (actual rows=4 loops=1)
Sort Key: t.a
Sort Method: quicksort Memory: 25kB
-> Subquery Scan on t (actual rows=4 loops=1)
-> Seq Scan on dist_table_570017 dist_table (actual rows=4 loops=1)
-> Sort (actual rows=10 loops=1)
Sort Key: ref_table.a
Sort Method: quicksort Memory: 25kB
-> Seq Scan on ref_table_570021 ref_table (actual rows=10 loops=1)
EXPLAIN :default_analyze_flags
SELECT count(distinct a) FROM dist_table
WHERE EXISTS(SELECT random() < 2 FROM dist_table NATURAL JOIN ref_table);
Aggregate (actual rows=1 loops=1)
-> Custom Scan (Citus Adaptive) (actual rows=10 loops=1)
-> Distributed Subplan XXX_1
Intermediate Data Size: 70 bytes
Result destination: Send to 2 nodes
-> Custom Scan (Citus Adaptive) (actual rows=10 loops=1)
Task Count: 4
Tuple data received from nodes: 10 bytes
Tasks Shown: One of 4
-> Task
Tuple data received from node: 4 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Merge Join (actual rows=4 loops=1)
Merge Cond: (dist_table.a = ref_table.a)
-> Sort (actual rows=4 loops=1)
Sort Key: dist_table.a
Sort Method: quicksort Memory: 25kB
-> Seq Scan on dist_table_570017 dist_table (actual rows=4 loops=1)
-> Sort (actual rows=10 loops=1)
Sort Key: ref_table.a
Sort Method: quicksort Memory: 25kB
-> Seq Scan on ref_table_570021 ref_table (actual rows=10 loops=1)
Task Count: 4
Tuple data received from nodes: 11 bytes
Tasks Shown: One of 4
-> Task
Tuple data received from node: 5 bytes
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate (actual rows=4 loops=1)
Group Key: dist_table.a
InitPlan 1 (returns $0)
-> Function Scan on read_intermediate_result intermediate_result (actual rows=1 loops=1)
-> Result (actual rows=4 loops=1)
One-Time Filter: $0
-> Seq Scan on dist_table_570017 dist_table (actual rows=4 loops=1)
BEGIN;
EXPLAIN :default_analyze_flags
WITH r AS (
INSERT INTO dist_table SELECT a, a * a FROM dist_table
RETURNING a
), s AS (
SELECT random() < 2, a * a a2 FROM r
)
SELECT count(distinct a2) FROM s;
Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
-> Distributed Subplan XXX_1
Intermediate Data Size: 100 bytes
Result destination: Write locally
-> Custom Scan (Citus Adaptive) (actual rows=20 loops=1)
Task Count: 4
Tuple data received from nodes: 44 bytes
Tasks Shown: One of 4
-> Task
Tuple data received from node: 20 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Insert on dist_table_570017 citus_table_alias (actual rows=8 loops=1)
-> Seq Scan on dist_table_570017 dist_table (actual rows=8 loops=1)
Filter: ((worker_hash(a) >= '-2147483648'::integer) AND (worker_hash(a) <= '-1073741825'::integer))
-> Distributed Subplan XXX_2
Intermediate Data Size: 150 bytes
Result destination: Write locally
-> Custom Scan (Citus Adaptive) (actual rows=10 loops=1)
Task Count: 1
Tuple data received from nodes: 28 bytes
Tasks Shown: All
-> Task
Tuple data received from node: 28 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Function Scan on read_intermediate_result intermediate_result (actual rows=10 loops=1)
Task Count: 1
Tuple data received from nodes: 2 bytes
Tasks Shown: All
-> Task
Tuple data received from node: 2 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate (actual rows=1 loops=1)
-> Function Scan on read_intermediate_result intermediate_result (actual rows=10 loops=1)
ROLLBACK;
DROP TABLE ref_table, dist_table;
-- test EXPLAIN ANALYZE with different replication factors
SET citus.shard_count = 2;
SET citus.shard_replication_factor = 1;
CREATE TABLE dist_table_rep1(a int);
SELECT create_distributed_table('dist_table_rep1', 'a');
SET citus.shard_replication_factor = 2;
CREATE TABLE dist_table_rep2(a int);
SELECT create_distributed_table('dist_table_rep2', 'a');
EXPLAIN :default_analyze_flags INSERT INTO dist_table_rep1 VALUES(1), (2), (3), (4), (10), (100) RETURNING *;
Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
Task Count: 2
Tuple data received from nodes: 9 bytes
Tasks Shown: One of 2
-> Task
Tuple data received from node: 5 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Insert on dist_table_rep1_570022 citus_table_alias (actual rows=4 loops=1)
-> Values Scan on "*VALUES*" (actual rows=4 loops=1)
EXPLAIN :default_analyze_flags SELECT * from dist_table_rep1;
Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
Task Count: 2
Tuple data received from nodes: 9 bytes
Tasks Shown: One of 2
-> Task
Tuple data received from node: 5 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=4 loops=1)
EXPLAIN :default_analyze_flags INSERT INTO dist_table_rep2 VALUES(1), (2), (3), (4), (10), (100) RETURNING *;
Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
Task Count: 2
Tuple data received from nodes: 18 bytes
Tasks Shown: One of 2
-> Task
Tuple data received from node: 10 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Insert on dist_table_rep2_570024 citus_table_alias (actual rows=4 loops=1)
-> Values Scan on "*VALUES*" (actual rows=4 loops=1)
EXPLAIN :default_analyze_flags SELECT * from dist_table_rep2;
Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
Task Count: 2
Tuple data received from nodes: 9 bytes
Tasks Shown: One of 2
-> Task
Tuple data received from node: 5 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on dist_table_rep2_570024 dist_table_rep2 (actual rows=4 loops=1)
prepare p1 as SELECT * FROM dist_table_rep1;
EXPLAIN :default_analyze_flags EXECUTE p1;
Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
Task Count: 2
Tuple data received from nodes: 9 bytes
Tasks Shown: One of 2
-> Task
Tuple data received from node: 5 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=4 loops=1)
EXPLAIN :default_analyze_flags EXECUTE p1;
Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
Task Count: 2
Tuple data received from nodes: 9 bytes
Tasks Shown: One of 2
-> Task
Tuple data received from node: 5 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=4 loops=1)
EXPLAIN :default_analyze_flags EXECUTE p1;
Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
Task Count: 2
Tuple data received from nodes: 9 bytes
Tasks Shown: One of 2
-> Task
Tuple data received from node: 5 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=4 loops=1)
EXPLAIN :default_analyze_flags EXECUTE p1;
Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
Task Count: 2
Tuple data received from nodes: 9 bytes
Tasks Shown: One of 2
-> Task
Tuple data received from node: 5 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=4 loops=1)
EXPLAIN :default_analyze_flags EXECUTE p1;
Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
Task Count: 2
Tuple data received from nodes: 9 bytes
Tasks Shown: One of 2
-> Task
Tuple data received from node: 5 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=4 loops=1)
EXPLAIN :default_analyze_flags EXECUTE p1;
Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
Task Count: 2
Tuple data received from nodes: 9 bytes
Tasks Shown: One of 2
-> Task
Tuple data received from node: 5 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=4 loops=1)
prepare p2 AS SELECT * FROM dist_table_rep1 WHERE a = $1;
EXPLAIN :default_analyze_flags EXECUTE p2(1);
Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Task Count: 1
Tuple data received from nodes: 1 bytes
Tasks Shown: All
-> Task
Tuple data received from node: 1 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
Filter: (a = 1)
Rows Removed by Filter: 3
EXPLAIN :default_analyze_flags EXECUTE p2(1);
Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Task Count: 1
Tuple data received from nodes: 1 bytes
Tasks Shown: All
-> Task
Tuple data received from node: 1 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
Filter: (a = 1)
Rows Removed by Filter: 3
EXPLAIN :default_analyze_flags EXECUTE p2(1);
Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Task Count: 1
Tuple data received from nodes: 1 bytes
Tasks Shown: All
-> Task
Tuple data received from node: 1 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
Filter: (a = 1)
Rows Removed by Filter: 3
EXPLAIN :default_analyze_flags EXECUTE p2(1);
Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Task Count: 1
Tuple data received from nodes: 1 bytes
Tasks Shown: All
-> Task
Tuple data received from node: 1 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
Filter: (a = 1)
Rows Removed by Filter: 3
EXPLAIN :default_analyze_flags EXECUTE p2(1);
Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Task Count: 1
Tuple data received from nodes: 1 bytes
Tasks Shown: All
-> Task
Tuple data received from node: 1 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
Filter: (a = 1)
Rows Removed by Filter: 3
EXPLAIN :default_analyze_flags EXECUTE p2(1);
Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Task Count: 1
Tuple data received from nodes: 1 bytes
Tasks Shown: All
-> Task
Tuple data received from node: 1 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
Filter: (a = 1)
Rows Removed by Filter: 3
EXPLAIN :default_analyze_flags EXECUTE p2(10);
Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Task Count: 1
Tuple data received from nodes: 2 bytes
Tasks Shown: All
-> Task
Tuple data received from node: 2 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
Filter: (a = 10)
Rows Removed by Filter: 3
EXPLAIN :default_analyze_flags EXECUTE p2(100);
Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Task Count: 1
Tuple data received from nodes: 3 bytes
Tasks Shown: All
-> Task
Tuple data received from node: 3 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on dist_table_rep1_570023 dist_table_rep1 (actual rows=1 loops=1)
Filter: (a = 100)
Rows Removed by Filter: 1
prepare p3 AS SELECT * FROM dist_table_rep1 WHERE a = 1;
EXPLAIN :default_analyze_flags EXECUTE p3;
Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Task Count: 1
Tuple data received from nodes: 1 bytes
Tasks Shown: All
-> Task
Tuple data received from node: 1 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
Filter: (a = 1)
Rows Removed by Filter: 3
EXPLAIN :default_analyze_flags EXECUTE p3;
Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Task Count: 1
Tuple data received from nodes: 1 bytes
Tasks Shown: All
-> Task
Tuple data received from node: 1 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
Filter: (a = 1)
Rows Removed by Filter: 3
EXPLAIN :default_analyze_flags EXECUTE p3;
Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Task Count: 1
Tuple data received from nodes: 1 bytes
Tasks Shown: All
-> Task
Tuple data received from node: 1 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
Filter: (a = 1)
Rows Removed by Filter: 3
EXPLAIN :default_analyze_flags EXECUTE p3;
Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Task Count: 1
Tuple data received from nodes: 1 bytes
Tasks Shown: All
-> Task
Tuple data received from node: 1 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
Filter: (a = 1)
Rows Removed by Filter: 3
EXPLAIN :default_analyze_flags EXECUTE p3;
Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Task Count: 1
Tuple data received from nodes: 1 bytes
Tasks Shown: All
-> Task
Tuple data received from node: 1 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
Filter: (a = 1)
Rows Removed by Filter: 3
EXPLAIN :default_analyze_flags EXECUTE p3;
Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Task Count: 1
Tuple data received from nodes: 1 bytes
Tasks Shown: All
-> Task
Tuple data received from node: 1 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
Filter: (a = 1)
Rows Removed by Filter: 3
DROP TABLE dist_table_rep1, dist_table_rep2;