-- -- MULTI_EXPLAIN -- SET citus.next_shard_id TO 570000; \a\t RESET citus.task_executor_type; SET citus.explain_distributed_queries TO on; -- Function that parses explain output as JSON CREATE FUNCTION explain_json(query text) RETURNS jsonb AS $BODY$ DECLARE result jsonb; BEGIN EXECUTE format('EXPLAIN (FORMAT JSON) %s', query) INTO result; RETURN result; END; $BODY$ LANGUAGE plpgsql; -- Function that parses explain output as XML CREATE FUNCTION explain_xml(query text) RETURNS xml AS $BODY$ DECLARE result xml; BEGIN EXECUTE format('EXPLAIN (FORMAT XML) %s', query) INTO result; RETURN result; END; $BODY$ LANGUAGE plpgsql; -- VACUMM related tables to ensure test outputs are stable VACUUM ANALYZE lineitem; VACUUM ANALYZE orders; -- Test Text format EXPLAIN (COSTS FALSE, FORMAT TEXT) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; Sort Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity -> HashAggregate Group Key: remote_scan.l_quantity -> Custom Scan (Citus Adaptive) Task Count: 2 Tasks Shown: One of 2 -> Task Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: l_quantity -> Seq Scan on lineitem_290000 lineitem -- Test disable hash aggregate SET enable_hashagg TO off; EXPLAIN (COSTS FALSE, FORMAT TEXT) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; Sort Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity -> GroupAggregate Group Key: remote_scan.l_quantity -> Sort Sort Key: remote_scan.l_quantity -> Custom Scan (Citus Adaptive) Task Count: 2 Tasks Shown: One of 2 -> Task Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: l_quantity -> Seq Scan on lineitem_290000 lineitem SET enable_hashagg TO on; -- Test JSON format EXPLAIN (COSTS FALSE, FORMAT JSON) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; [ { "Plan": { "Node Type": "Sort", "Parallel Aware": false, "Sort Key": ["(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))", "remote_scan.l_quantity"], "Plans": [ { "Node Type": "Aggregate", "Strategy": "Hashed", "Partial Mode": "Simple", "Parent Relationship": "Outer", "Parallel Aware": false, "Group Key": ["remote_scan.l_quantity"], "Plans": [ { "Node Type": "Custom Scan", "Parent Relationship": "Outer", "Custom Plan Provider": "Citus Adaptive", "Parallel Aware": false, "Distributed Query": { "Job": { "Task Count": 2, "Tasks Shown": "One of 2", "Tasks": [ { "Node": "host=localhost port=xxxxx dbname=regression", "Remote Plan": [ [ { "Plan": { "Node Type": "Aggregate", "Strategy": "Hashed", "Partial Mode": "Simple", "Parallel Aware": false, "Group Key": ["l_quantity"], "Plans": [ { "Node Type": "Seq Scan", "Parent Relationship": "Outer", "Parallel Aware": false, "Relation Name": "lineitem_290000", "Alias": "lineitem" } ] } } ] ] } ] } } } ] } ] } } ] -- Validate JSON format SELECT true AS valid FROM explain_json($$ SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$); t -- Test XML format EXPLAIN (COSTS FALSE, FORMAT XML) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; Sort false (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)) remote_scan.l_quantity Aggregate Hashed Simple Outer false remote_scan.l_quantity Custom Scan Outer Citus Adaptive false 2 One of 2 host=localhost port=xxxxx dbname=regression Aggregate Hashed Simple false l_quantity Seq Scan Outer false lineitem_290000 lineitem -- Validate XML format SELECT true AS valid FROM explain_xml($$ SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$); t -- Test YAML format EXPLAIN (COSTS FALSE, FORMAT YAML) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; - Plan: Node Type: "Sort" Parallel Aware: false Sort Key: - "(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))" - "remote_scan.l_quantity" Plans: - Node Type: "Aggregate" Strategy: "Hashed" Partial Mode: "Simple" Parent Relationship: "Outer" Parallel Aware: false Group Key: - "remote_scan.l_quantity" Plans: - Node Type: "Custom Scan" Parent Relationship: "Outer" Custom Plan Provider: "Citus Adaptive" Parallel Aware: false Distributed Query: Job: Task Count: 2 Tasks Shown: "One of 2" Tasks: - Node: "host=localhost port=xxxxx dbname=regression" Remote Plan: - Plan: Node Type: "Aggregate" Strategy: "Hashed" Partial Mode: "Simple" Parallel Aware: false Group Key: - "l_quantity" Plans: - Node Type: "Seq Scan" Parent Relationship: "Outer" Parallel Aware: false Relation Name: "lineitem_290000" Alias: "lineitem" -- Test Text format EXPLAIN (COSTS FALSE, FORMAT TEXT) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; Sort Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity -> HashAggregate Group Key: remote_scan.l_quantity -> Custom Scan (Citus Adaptive) Task Count: 2 Tasks Shown: One of 2 -> Task Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Group Key: l_quantity -> Seq Scan on lineitem_290000 lineitem -- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output) EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; Sort (actual rows=50 loops=1) Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity Sort Method: quicksort Memory: 27kB -> HashAggregate (actual rows=50 loops=1) Group Key: remote_scan.l_quantity -> Custom Scan (Citus Adaptive) (actual rows=100 loops=1) Task Count: 2 Tasks Shown: One of 2 -> Task Node: host=localhost port=xxxxx dbname=regression -> HashAggregate (actual rows=50 loops=1) Group Key: l_quantity -> Seq Scan on lineitem_290000 lineitem (actual rows=6000 loops=1) -- EXPLAIN ANALYZE doesn't show worker tasks for repartition joins yet SET citus.shard_count TO 3; CREATE TABLE t1(a int, b int); CREATE TABLE t2(a int, b int); SELECT create_distributed_table('t1', 'a'), create_distributed_table('t2', 'a'); | BEGIN; SET LOCAL citus.enable_repartition_joins TO true; EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off) SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b; Aggregate (actual rows=1 loops=1) -> Custom Scan (Citus Adaptive) (actual rows=4 loops=1) Task Count: 4 Tasks Shown: None, not supported for re-partition queries -> MapMergeJob Map Task Count: 3 Merge Task Count: 4 -> MapMergeJob Map Task Count: 3 Merge Task Count: 4 END; DROP TABLE t1, t2; -- Test verbose EXPLAIN (COSTS FALSE, VERBOSE TRUE) SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem; Aggregate Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / pg_catalog.sum(remote_scan."?column?_2"))) -> Custom Scan (Citus Adaptive) Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2" Task Count: 2 Tasks Shown: One of 2 -> Task Node: host=localhost port=xxxxx dbname=regression -> Aggregate Output: sum(l_quantity), sum(l_quantity), count(l_quantity) -> Seq Scan on public.lineitem_290000 lineitem Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment -- Test join EXPLAIN (COSTS FALSE) SELECT * FROM lineitem JOIN orders ON l_orderkey = o_orderkey AND l_quantity < 5.0 ORDER BY l_quantity LIMIT 10; Limit -> Sort Sort Key: remote_scan.l_quantity -> Custom Scan (Citus Adaptive) Task Count: 2 Tasks Shown: One of 2 -> Task Node: host=localhost port=xxxxx dbname=regression -> Limit -> Sort Sort Key: lineitem.l_quantity -> Hash Join Hash Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Seq Scan on lineitem_290000 lineitem Filter: (l_quantity < 5.0) -> Hash -> Seq Scan on orders_290002 orders -- Test insert EXPLAIN (COSTS FALSE) INSERT INTO lineitem VALUES (1,0), (2, 0), (3, 0), (4, 0); Custom Scan (Citus Adaptive) Task Count: 1 Tasks Shown: All -> Task Node: host=localhost port=xxxxx dbname=regression -> Insert on lineitem_290000 citus_table_alias -> Values Scan on "*VALUES*" -- Test update EXPLAIN (COSTS FALSE) UPDATE lineitem SET l_suppkey = 12 WHERE l_orderkey = 1 AND l_partkey = 0; Custom Scan (Citus Adaptive) Task Count: 1 Tasks Shown: All -> Task Node: host=localhost port=xxxxx dbname=regression -> Update on lineitem_290000 lineitem -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem Index Cond: (l_orderkey = 1) Filter: (l_partkey = 0) -- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output) BEGIN; EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) UPDATE lineitem SET l_suppkey = 12 WHERE l_orderkey = 1 AND l_partkey = 0; Custom Scan (Citus Adaptive) (actual rows=0 loops=1) Task Count: 1 Tasks Shown: All -> Task Node: host=localhost port=xxxxx dbname=regression -> Update on lineitem_290000 lineitem (actual rows=0 loops=1) -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem (actual rows=0 loops=1) Index Cond: (l_orderkey = 1) Filter: (l_partkey = 0) Rows Removed by Filter: 6 ROLLBACk; -- Test delete EXPLAIN (COSTS FALSE) DELETE FROM lineitem WHERE l_orderkey = 1 AND l_partkey = 0; Custom Scan (Citus Adaptive) Task Count: 1 Tasks Shown: All -> Task Node: host=localhost port=xxxxx dbname=regression -> Delete on lineitem_290000 lineitem -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem Index Cond: (l_orderkey = 1) Filter: (l_partkey = 0) -- Test zero-shard update EXPLAIN (COSTS FALSE) UPDATE lineitem SET l_suppkey = 12 WHERE l_orderkey = 1 AND l_orderkey = 0; Custom Scan (Citus Adaptive) Task Count: 0 Tasks Shown: All -- Test zero-shard delete EXPLAIN (COSTS FALSE) DELETE FROM lineitem WHERE l_orderkey = 1 AND l_orderkey = 0; Custom Scan (Citus Adaptive) Task Count: 0 Tasks Shown: All -- Test single-shard SELECT EXPLAIN (COSTS FALSE) SELECT l_quantity FROM lineitem WHERE l_orderkey = 5; Custom Scan (Citus Adaptive) Task Count: 1 Tasks Shown: All -> Task Node: host=localhost port=xxxxx dbname=regression -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem Index Cond: (l_orderkey = 5) SELECT true AS valid FROM explain_xml($$ SELECT l_quantity FROM lineitem WHERE l_orderkey = 5$$); t SELECT true AS valid FROM explain_json($$ SELECT l_quantity FROM lineitem WHERE l_orderkey = 5$$); t -- Test CREATE TABLE ... AS EXPLAIN (COSTS FALSE) CREATE TABLE explain_result AS SELECT * FROM lineitem; Custom Scan (Citus Adaptive) Task Count: 2 Tasks Shown: One of 2 -> Task Node: host=localhost port=xxxxx dbname=regression -> Seq Scan on lineitem_290000 lineitem -- Test having EXPLAIN (COSTS FALSE, VERBOSE TRUE) SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem HAVING sum(l_quantity) > 100; Aggregate Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / pg_catalog.sum(remote_scan."?column?_2"))) Filter: (sum(remote_scan.worker_column_4) > '100'::numeric) -> Custom Scan (Citus Adaptive) Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2", remote_scan.worker_column_4 Task Count: 2 Tasks Shown: One of 2 -> Task Node: host=localhost port=xxxxx dbname=regression -> Aggregate Output: sum(l_quantity), sum(l_quantity), count(l_quantity), sum(l_quantity) -> Seq Scan on public.lineitem_290000 lineitem Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment -- Test having without aggregate EXPLAIN (COSTS FALSE, VERBOSE TRUE) SELECT l_quantity FROM lineitem GROUP BY l_quantity HAVING l_quantity > (100 * random()); HashAggregate Output: remote_scan.l_quantity Group Key: remote_scan.l_quantity Filter: ((remote_scan.worker_column_2)::double precision > ('100'::double precision * random())) -> Custom Scan (Citus Adaptive) Output: remote_scan.l_quantity, remote_scan.worker_column_2 Task Count: 2 Tasks Shown: One of 2 -> Task Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Output: l_quantity, l_quantity Group Key: lineitem.l_quantity -> Seq Scan on public.lineitem_290000 lineitem Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment -- Subquery pushdown tests with explain EXPLAIN (COSTS OFF) SELECT avg(array_length(events, 1)) AS event_average FROM (SELECT tenant_id, user_id, array_agg(event_type ORDER BY event_time) AS events FROM (SELECT (users.composite_id).tenant_id, (users.composite_id).user_id, event_type, events.event_time FROM users, events WHERE (users.composite_id) = (events.composite_id) AND users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type IN ('click', 'submit', 'pay')) AS subquery GROUP BY tenant_id, user_id) AS subquery; Aggregate -> Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: One of 4 -> Task Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> GroupAggregate Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id) -> Sort Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id) -> Hash Join Hash Cond: (users.composite_id = events.composite_id) -> Seq Scan on users_1400289 users Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type)) -> Hash -> Seq Scan on events_1400285 events Filter: ((event_type)::text = ANY ('{click,submit,pay}'::text[])) -- Union and left join subquery pushdown EXPLAIN (COSTS OFF) SELECT avg(array_length(events, 1)) AS event_average, hasdone FROM (SELECT subquery_1.tenant_id, subquery_1.user_id, array_agg(event ORDER BY event_time) AS events, COALESCE(hasdone, 'Has not done paying') AS hasdone FROM ( (SELECT (users.composite_id).tenant_id, (users.composite_id).user_id, (users.composite_id) as composite_id, 'action=>1'AS event, events.event_time FROM users, events WHERE (users.composite_id) = (events.composite_id) AND users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type = 'click') UNION (SELECT (users.composite_id).tenant_id, (users.composite_id).user_id, (users.composite_id) as composite_id, 'action=>2'AS event, events.event_time FROM users, events WHERE (users.composite_id) = (events.composite_id) AND users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type = 'submit') ) AS subquery_1 LEFT JOIN (SELECT DISTINCT ON ((composite_id).tenant_id, (composite_id).user_id) composite_id, (composite_id).tenant_id, (composite_id).user_id, 'Has done paying'::TEXT AS hasdone FROM events WHERE events.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND events.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type = 'pay') AS subquery_2 ON subquery_1.composite_id = subquery_2.composite_id GROUP BY subquery_1.tenant_id, subquery_1.user_id, hasdone) AS subquery_top GROUP BY hasdone; HashAggregate Group Key: remote_scan.hasdone -> Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: One of 4 -> Task Node: host=localhost port=xxxxx dbname=regression -> GroupAggregate Group Key: subquery_top.hasdone -> Sort Sort Key: subquery_top.hasdone -> Subquery Scan on subquery_top -> GroupAggregate Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.hasdone -> Sort Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.hasdone -> Hash Left Join Hash Cond: (users.composite_id = subquery_2.composite_id) -> HashAggregate Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), users.composite_id, ('action=>1'::text), events.event_time -> Append -> Hash Join Hash Cond: (users.composite_id = events.composite_id) -> Seq Scan on users_1400289 users Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type)) -> Hash -> Seq Scan on events_1400285 events Filter: ((event_type)::text = 'click'::text) -> Hash Join Hash Cond: (users_1.composite_id = events_1.composite_id) -> Seq Scan on users_1400289 users_1 Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type)) -> Hash -> Seq Scan on events_1400285 events_1 Filter: ((event_type)::text = 'submit'::text) -> Hash -> Subquery Scan on subquery_2 -> Unique -> Sort Sort Key: ((events_2.composite_id).tenant_id), ((events_2.composite_id).user_id) -> Seq Scan on events_1400285 events_2 Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type) AND ((event_type)::text = 'pay'::text)) -- Union, left join and having subquery pushdown EXPLAIN (COSTS OFF) SELECT avg(array_length(events, 1)) AS event_average, count_pay FROM ( SELECT subquery_1.tenant_id, subquery_1.user_id, array_agg(event ORDER BY event_time) AS events, COALESCE(count_pay, 0) AS count_pay FROM ( (SELECT (users.composite_id).tenant_id, (users.composite_id).user_id, (users.composite_id), 'action=>1'AS event, events.event_time FROM users, events WHERE (users.composite_id) = (events.composite_id) AND users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type = 'click') UNION (SELECT (users.composite_id).tenant_id, (users.composite_id).user_id, (users.composite_id), 'action=>2'AS event, events.event_time FROM users, events WHERE (users.composite_id) = (events.composite_id) AND users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type = 'submit') ) AS subquery_1 LEFT JOIN (SELECT (composite_id).tenant_id, (composite_id).user_id, composite_id, COUNT(*) AS count_pay FROM events WHERE events.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND events.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type = 'pay' GROUP BY composite_id HAVING COUNT(*) > 2) AS subquery_2 ON subquery_1.composite_id = subquery_2.composite_id GROUP BY subquery_1.tenant_id, subquery_1.user_id, count_pay) AS subquery_top WHERE array_ndims(events) > 0 GROUP BY count_pay ORDER BY count_pay; Sort Sort Key: remote_scan.count_pay -> HashAggregate Group Key: remote_scan.count_pay -> Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: One of 4 -> Task Node: host=localhost port=xxxxx dbname=regression -> GroupAggregate Group Key: subquery_top.count_pay -> Sort Sort Key: subquery_top.count_pay -> Subquery Scan on subquery_top -> GroupAggregate Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.count_pay Filter: (array_ndims(array_agg(('action=>1'::text) ORDER BY events.event_time)) > 0) -> Sort Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.count_pay -> Hash Left Join Hash Cond: (users.composite_id = subquery_2.composite_id) -> HashAggregate Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), users.composite_id, ('action=>1'::text), events.event_time -> Append -> Hash Join Hash Cond: (users.composite_id = events.composite_id) -> Seq Scan on users_1400289 users Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type)) -> Hash -> Seq Scan on events_1400285 events Filter: ((event_type)::text = 'click'::text) -> Hash Join Hash Cond: (users_1.composite_id = events_1.composite_id) -> Seq Scan on users_1400289 users_1 Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type)) -> Hash -> Seq Scan on events_1400285 events_1 Filter: ((event_type)::text = 'submit'::text) -> Hash -> Subquery Scan on subquery_2 -> GroupAggregate Group Key: events_2.composite_id Filter: (count(*) > 2) -> Sort Sort Key: events_2.composite_id -> Seq Scan on events_1400285 events_2 Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type) AND ((event_type)::text = 'pay'::text)) -- Lateral join subquery pushdown -- set subquery_pushdown due to limit in the query SET citus.subquery_pushdown to ON; EXPLAIN (COSTS OFF) SELECT tenant_id, user_id, user_lastseen, event_array FROM (SELECT tenant_id, user_id, max(lastseen) as user_lastseen, array_agg(event_type ORDER BY event_time) AS event_array FROM (SELECT (composite_id).tenant_id, (composite_id).user_id, composite_id, lastseen FROM users WHERE composite_id >= '(1, -9223372036854775808)'::user_composite_type AND composite_id <= '(1, 9223372036854775807)'::user_composite_type ORDER BY lastseen DESC LIMIT 10 ) AS subquery_top LEFT JOIN LATERAL (SELECT event_type, event_time FROM events WHERE (composite_id) = subquery_top.composite_id ORDER BY event_time DESC LIMIT 99) AS subquery_lateral ON true GROUP BY tenant_id, user_id ) AS shard_union ORDER BY user_lastseen DESC LIMIT 10; Limit -> Sort Sort Key: remote_scan.user_lastseen DESC -> Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: One of 4 -> Task Node: host=localhost port=xxxxx dbname=regression -> Limit -> Sort Sort Key: (max(users.lastseen)) DESC -> GroupAggregate Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id) -> Sort Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id) -> Nested Loop Left Join -> Limit -> Sort Sort Key: users.lastseen DESC -> Seq Scan on users_1400289 users Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type)) -> Limit -> Sort Sort Key: events.event_time DESC -> Seq Scan on events_1400285 events Filter: (composite_id = users.composite_id) RESET citus.subquery_pushdown; -- Test all tasks output SET citus.explain_all_tasks TO on; EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; Aggregate -> Custom Scan (Citus Adaptive) Task Count: 1 Tasks Shown: All -> Task Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Seq Scan on lineitem_290001 lineitem Filter: (l_orderkey > 9030) SELECT true AS valid FROM explain_xml($$ SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030$$); t SELECT true AS valid FROM explain_json($$ SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030$$); t -- Test multi shard update EXPLAIN (COSTS FALSE) UPDATE lineitem_hash_part SET l_suppkey = 12; Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: All -> Task Node: host=localhost port=xxxxx dbname=regression -> Update on lineitem_hash_part_360041 lineitem_hash_part -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part -> Task Node: host=localhost port=xxxxx dbname=regression -> Update on lineitem_hash_part_360042 lineitem_hash_part -> Seq Scan on lineitem_hash_part_360042 lineitem_hash_part -> Task Node: host=localhost port=xxxxx dbname=regression -> Update on lineitem_hash_part_360043 lineitem_hash_part -> Seq Scan on lineitem_hash_part_360043 lineitem_hash_part -> Task Node: host=localhost port=xxxxx dbname=regression -> Update on lineitem_hash_part_360044 lineitem_hash_part -> Seq Scan on lineitem_hash_part_360044 lineitem_hash_part EXPLAIN (COSTS FALSE) UPDATE lineitem_hash_part SET l_suppkey = 12 WHERE l_orderkey = 1 OR l_orderkey = 3; Custom Scan (Citus Adaptive) Task Count: 2 Tasks Shown: All -> Task Node: host=localhost port=xxxxx dbname=regression -> Update on lineitem_hash_part_360041 lineitem_hash_part -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part Filter: ((l_orderkey = 1) OR (l_orderkey = 3)) -> Task Node: host=localhost port=xxxxx dbname=regression -> Update on lineitem_hash_part_360042 lineitem_hash_part -> Seq Scan on lineitem_hash_part_360042 lineitem_hash_part Filter: ((l_orderkey = 1) OR (l_orderkey = 3)) -- Test multi shard delete EXPLAIN (COSTS FALSE) DELETE FROM lineitem_hash_part; Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: All -> Task Node: host=localhost port=xxxxx dbname=regression -> Delete on lineitem_hash_part_360041 lineitem_hash_part -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part -> Task Node: host=localhost port=xxxxx dbname=regression -> Delete on lineitem_hash_part_360042 lineitem_hash_part -> Seq Scan on lineitem_hash_part_360042 lineitem_hash_part -> Task Node: host=localhost port=xxxxx dbname=regression -> Delete on lineitem_hash_part_360043 lineitem_hash_part -> Seq Scan on lineitem_hash_part_360043 lineitem_hash_part -> Task Node: host=localhost port=xxxxx dbname=regression -> Delete on lineitem_hash_part_360044 lineitem_hash_part -> Seq Scan on lineitem_hash_part_360044 lineitem_hash_part -- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output) EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; Sort (actual rows=50 loops=1) Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity Sort Method: quicksort Memory: 27kB -> HashAggregate (actual rows=50 loops=1) Group Key: remote_scan.l_quantity -> Custom Scan (Citus Adaptive) (actual rows=100 loops=1) Task Count: 2 Tasks Shown: All -> Task Node: host=localhost port=xxxxx dbname=regression -> HashAggregate (actual rows=50 loops=1) Group Key: l_quantity -> Seq Scan on lineitem_290000 lineitem (actual rows=6000 loops=1) -> Task Node: host=localhost port=xxxxx dbname=regression -> HashAggregate (actual rows=50 loops=1) Group Key: l_quantity -> Seq Scan on lineitem_290001 lineitem (actual rows=6000 loops=1) SET citus.explain_all_tasks TO off; -- Test update with subquery EXPLAIN (COSTS FALSE) UPDATE lineitem_hash_part SET l_suppkey = 12 FROM orders_hash_part WHERE orders_hash_part.o_orderkey = lineitem_hash_part.l_orderkey; Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: One of 4 -> Task Node: host=localhost port=xxxxx dbname=regression -> Update on lineitem_hash_part_360041 lineitem_hash_part -> Hash Join Hash Cond: (lineitem_hash_part.l_orderkey = orders_hash_part.o_orderkey) -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part -> Hash -> Seq Scan on orders_hash_part_360045 orders_hash_part -- Test delete with subquery EXPLAIN (COSTS FALSE) DELETE FROM lineitem_hash_part USING orders_hash_part WHERE orders_hash_part.o_orderkey = lineitem_hash_part.l_orderkey; Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: One of 4 -> Task Node: host=localhost port=xxxxx dbname=regression -> Delete on lineitem_hash_part_360041 lineitem_hash_part -> Hash Join Hash Cond: (lineitem_hash_part.l_orderkey = orders_hash_part.o_orderkey) -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part -> Hash -> Seq Scan on orders_hash_part_360045 orders_hash_part -- Test track tracker SET citus.task_executor_type TO 'task-tracker'; EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; Aggregate -> Custom Scan (Citus Task-Tracker) Task Count: 1 Tasks Shown: All -> Task Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Seq Scan on lineitem_290001 lineitem Filter: (l_orderkey > 9030) -- Test re-partition join EXPLAIN (COSTS FALSE) SELECT count(*) FROM lineitem, orders, customer_append, supplier_single_shard WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey; Aggregate -> Custom Scan (Citus Task-Tracker) Task Count: 1 Tasks Shown: None, not supported for re-partition queries -> MapMergeJob Map Task Count: 1 Merge Task Count: 1 -> MapMergeJob Map Task Count: 2 Merge Task Count: 1 EXPLAIN (COSTS FALSE, FORMAT JSON) SELECT count(*) FROM lineitem, orders, customer_append, supplier_single_shard WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey; [ { "Plan": { "Node Type": "Aggregate", "Strategy": "Plain", "Partial Mode": "Simple", "Parallel Aware": false, "Plans": [ { "Node Type": "Custom Scan", "Parent Relationship": "Outer", "Custom Plan Provider": "Citus Task-Tracker", "Parallel Aware": false, "Distributed Query": { "Job": { "Task Count": 1, "Tasks Shown": "None, not supported for re-partition queries", "Dependent Jobs": [ { "Map Task Count": 1, "Merge Task Count": 1, "Dependent Jobs": [ { "Map Task Count": 2, "Merge Task Count": 1 } ] } ] } } } ] } } ] SELECT true AS valid FROM explain_json($$ SELECT count(*) FROM lineitem, orders, customer_append, supplier_single_shard WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey$$); t EXPLAIN (COSTS FALSE, FORMAT XML) SELECT count(*) FROM lineitem, orders, customer_append, supplier_single_shard WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey; Aggregate Plain Simple false Custom Scan Outer Citus Task-Tracker false 1 None, not supported for re-partition queries 1 1 2 1 SELECT true AS valid FROM explain_xml($$ SELECT count(*) FROM lineitem, orders, customer_append, supplier WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey$$); t -- make sure that EXPLAIN works without -- problems for queries that inlvolves only -- reference tables SELECT true AS valid FROM explain_xml($$ SELECT count(*) FROM nation WHERE n_name = 'CHINA'$$); t SELECT true AS valid FROM explain_xml($$ SELECT count(*) FROM nation, supplier WHERE nation.n_nationkey = supplier.s_nationkey$$); t EXPLAIN (COSTS FALSE, FORMAT YAML) SELECT count(*) FROM lineitem, orders, customer, supplier_single_shard WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey; - Plan: Node Type: "Aggregate" Strategy: "Plain" Partial Mode: "Simple" Parallel Aware: false Plans: - Node Type: "Custom Scan" Parent Relationship: "Outer" Custom Plan Provider: "Citus Task-Tracker" Parallel Aware: false Distributed Query: Job: Task Count: 1 Tasks Shown: "None, not supported for re-partition queries" Dependent Jobs: - Map Task Count: 2 Merge Task Count: 1 -- ensure local plans display correctly CREATE TABLE lineitem_clone (LIKE lineitem); EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem_clone; Aggregate -> Seq Scan on lineitem_clone -- ensure distributed plans don't break EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem; Aggregate -> Custom Scan (Citus Task-Tracker) Task Count: 2 Tasks Shown: One of 2 -> Task Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Seq Scan on lineitem_290000 lineitem -- ensure EXPLAIN EXECUTE doesn't crash PREPARE task_tracker_query AS SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; EXPLAIN (COSTS FALSE) EXECUTE task_tracker_query; Aggregate -> Custom Scan (Citus Task-Tracker) Task Count: 1 Tasks Shown: All -> Task Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Seq Scan on lineitem_290001 lineitem Filter: (l_orderkey > 9030) RESET citus.task_executor_type; PREPARE router_executor_query AS SELECT l_quantity FROM lineitem WHERE l_orderkey = 5; EXPLAIN EXECUTE router_executor_query; Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=100000 width=18) Task Count: 1 Tasks Shown: All -> Task Node: host=localhost port=xxxxx dbname=regression -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem (cost=0.28..13.60 rows=4 width=5) Index Cond: (l_orderkey = 5) PREPARE real_time_executor_query AS SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; EXPLAIN (COSTS FALSE) EXECUTE real_time_executor_query; Aggregate -> Custom Scan (Citus Adaptive) Task Count: 1 Tasks Shown: All -> Task Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Seq Scan on lineitem_290001 lineitem Filter: (l_orderkey > 9030) -- EXPLAIN EXECUTE of parametrized prepared statements is broken, but -- at least make sure to fail without crashing PREPARE router_executor_query_param(int) AS SELECT l_quantity FROM lineitem WHERE l_orderkey = $1; EXPLAIN EXECUTE router_executor_query_param(5); Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=100000 width=18) Task Count: 1 Tasks Shown: All -> Task Node: host=localhost port=xxxxx dbname=regression -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem (cost=0.28..13.60 rows=4 width=5) Index Cond: (l_orderkey = 5) EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF) EXECUTE router_executor_query_param(5); Custom Scan (Citus Adaptive) (actual rows=3 loops=1) Task Count: 1 Tasks Shown: All -> Task Node: host=localhost port=xxxxx dbname=regression -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem (actual rows=3 loops=1) Index Cond: (l_orderkey = 5) \set VERBOSITY TERSE PREPARE multi_shard_query_param(int) AS UPDATE lineitem SET l_quantity = $1; BEGIN; EXPLAIN EXECUTE multi_shard_query_param(5); WARNING: there is no parameter $1 WARNING: there is no parameter $1 Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) Task Count: 2 Tasks Shown: One of 2 -> Task Error: Could not get remote plan. ROLLBACK; BEGIN; EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF) EXECUTE multi_shard_query_param(5); WARNING: there is no parameter $1 WARNING: there is no parameter $1 Custom Scan (Citus Adaptive) (actual rows=0 loops=1) Task Count: 2 Tasks Shown: One of 2 -> Task Error: Could not get remote plan. ROLLBACK; \set VERBOSITY DEFAULT -- test explain in a transaction with alter table to test we use right connections BEGIN; CREATE TABLE explain_table(id int); SELECT create_distributed_table('explain_table', 'id'); ALTER TABLE explain_table ADD COLUMN value int; ROLLBACK; -- test explain with local INSERT ... SELECT EXPLAIN (COSTS OFF) INSERT INTO lineitem_hash_part SELECT o_orderkey FROM orders_hash_part LIMIT 3; Custom Scan (Citus INSERT ... SELECT) INSERT/SELECT method: pull to coordinator -> Limit -> Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: One of 4 -> Task Node: host=localhost port=xxxxx dbname=regression -> Limit -> Seq Scan on orders_hash_part_360045 orders_hash_part SELECT true AS valid FROM explain_json($$ INSERT INTO lineitem_hash_part (l_orderkey) SELECT o_orderkey FROM orders_hash_part LIMIT 3; $$); t EXPLAIN (COSTS OFF) INSERT INTO lineitem_hash_part (l_orderkey, l_quantity) SELECT o_orderkey, 5 FROM orders_hash_part LIMIT 3; Custom Scan (Citus INSERT ... SELECT) INSERT/SELECT method: pull to coordinator -> Limit -> Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: One of 4 -> Task Node: host=localhost port=xxxxx dbname=regression -> Limit -> Seq Scan on orders_hash_part_360045 orders_hash_part EXPLAIN (COSTS OFF) INSERT INTO lineitem_hash_part (l_orderkey) SELECT s FROM generate_series(1,5) s; Custom Scan (Citus INSERT ... SELECT) INSERT/SELECT method: pull to coordinator -> Function Scan on generate_series s -- WHERE EXISTS forces pg12 to materialize cte EXPLAIN (COSTS OFF) WITH cte1 AS (SELECT s FROM generate_series(1,10) s) INSERT INTO lineitem_hash_part WITH cte1 AS (SELECT * FROM cte1 WHERE EXISTS (SELECT * FROM cte1) LIMIT 5) SELECT s FROM cte1 WHERE EXISTS (SELECT * FROM cte1); Custom Scan (Citus INSERT ... SELECT) INSERT/SELECT method: pull to coordinator -> Result One-Time Filter: $3 CTE cte1 -> Function Scan on generate_series s CTE cte1 -> Limit InitPlan 2 (returns $1) -> CTE Scan on cte1 cte1_1 -> Result One-Time Filter: $1 -> CTE Scan on cte1 cte1_2 InitPlan 4 (returns $3) -> CTE Scan on cte1 cte1_3 -> CTE Scan on cte1 EXPLAIN (COSTS OFF) INSERT INTO lineitem_hash_part ( SELECT s FROM generate_series(1,5) s) UNION ( SELECT s FROM generate_series(5,10) s); Custom Scan (Citus INSERT ... SELECT) INSERT/SELECT method: pull to coordinator -> HashAggregate Group Key: s.s -> Append -> Function Scan on generate_series s -> Function Scan on generate_series s_1 -- explain with recursive planning -- prevent PG 11 - PG 12 outputs to diverge SET citus.enable_cte_inlining TO false; EXPLAIN (COSTS OFF, VERBOSE true) WITH keys AS ( SELECT DISTINCT l_orderkey FROM lineitem_hash_part ), series AS ( SELECT s FROM generate_series(1,10) s ) SELECT l_orderkey FROM series JOIN keys ON (s = l_orderkey) ORDER BY s; Custom Scan (Citus Adaptive) Output: remote_scan.l_orderkey -> Distributed Subplan XXX_1 -> HashAggregate Output: remote_scan.l_orderkey Group Key: remote_scan.l_orderkey -> Custom Scan (Citus Adaptive) Output: remote_scan.l_orderkey Task Count: 4 Tasks Shown: One of 4 -> Task Node: host=localhost port=xxxxx dbname=regression -> HashAggregate Output: l_orderkey Group Key: lineitem_hash_part.l_orderkey -> Seq Scan on public.lineitem_hash_part_360041 lineitem_hash_part Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment -> Distributed Subplan XXX_2 -> Function Scan on pg_catalog.generate_series s Output: s Function Call: generate_series(1, 10) Task Count: 1 Tasks Shown: All -> Task Node: host=localhost port=xxxxx dbname=regression -> Merge Join Output: intermediate_result_1.l_orderkey, intermediate_result.s Merge Cond: (intermediate_result.s = intermediate_result_1.l_orderkey) -> Sort Output: intermediate_result.s Sort Key: intermediate_result.s -> Function Scan on pg_catalog.read_intermediate_result intermediate_result Output: intermediate_result.s Function Call: read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) -> Sort Output: intermediate_result_1.l_orderkey Sort Key: intermediate_result_1.l_orderkey -> Function Scan on pg_catalog.read_intermediate_result intermediate_result_1 Output: intermediate_result_1.l_orderkey Function Call: read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) SET citus.enable_cte_inlining TO true; SELECT true AS valid FROM explain_json($$ WITH result AS ( SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity ), series AS ( SELECT s FROM generate_series(1,10) s ) SELECT * FROM result JOIN series ON (s = count_quantity) JOIN orders_hash_part ON (s = o_orderkey) $$); t SELECT true AS valid FROM explain_xml($$ WITH result AS ( SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity ), series AS ( SELECT s FROM generate_series(1,10) s ) SELECT * FROM result JOIN series ON (s = l_quantity) JOIN orders_hash_part ON (s = o_orderkey) $$); t -- -- Test EXPLAIN ANALYZE udfs -- \a\t \set default_opts '''{"costs": false, "timing": false, "summary": false}'''::jsonb CREATE TABLE explain_analyze_test(a int, b text);; INSERT INTO explain_analyze_test VALUES (1, 'value 1'), (2, 'value 2'), (3, 'value 3'), (4, 'value 4'); -- simple select BEGIN; SELECT * FROM worker_save_query_explain_analyze('SELECT 1', :default_opts) as (a int); a --------------------------------------------------------------------- 1 (1 row) SELECT worker_last_saved_explain_analyze(); worker_last_saved_explain_analyze --------------------------------------------------------------------- Result (actual rows=1 loops=1) + (1 row) END; -- insert into select BEGIN; SELECT * FROM worker_save_query_explain_analyze($Q$ INSERT INTO explain_analyze_test SELECT i, i::text FROM generate_series(1, 5) i $Q$, :default_opts) as (a int); a --------------------------------------------------------------------- (0 rows) SELECT worker_last_saved_explain_analyze(); worker_last_saved_explain_analyze --------------------------------------------------------------------- Insert on explain_analyze_test (actual rows=0 loops=1) + -> Function Scan on generate_series i (actual rows=5 loops=1)+ (1 row) ROLLBACK; -- select from table BEGIN; SELECT * FROM worker_save_query_explain_analyze($Q$SELECT * FROM explain_analyze_test$Q$, :default_opts) as (a int, b text); a | b --------------------------------------------------------------------- 1 | value 1 2 | value 2 3 | value 3 4 | value 4 (4 rows) SELECT worker_last_saved_explain_analyze(); worker_last_saved_explain_analyze --------------------------------------------------------------------- Seq Scan on explain_analyze_test (actual rows=4 loops=1)+ (1 row) ROLLBACK; -- insert into with returning BEGIN; SELECT * FROM worker_save_query_explain_analyze($Q$ INSERT INTO explain_analyze_test SELECT i, i::text FROM generate_series(1, 5) i RETURNING a, b$Q$, :default_opts) as (a int, b text); a | b --------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 4 | 4 5 | 5 (5 rows) SELECT worker_last_saved_explain_analyze(); worker_last_saved_explain_analyze --------------------------------------------------------------------- Insert on explain_analyze_test (actual rows=5 loops=1) + -> Function Scan on generate_series i (actual rows=5 loops=1)+ (1 row) ROLLBACK; -- delete with returning BEGIN; SELECT * FROM worker_save_query_explain_analyze($Q$ DELETE FROM explain_analyze_test WHERE a % 2 = 0 RETURNING a, b$Q$, :default_opts) as (a int, b text); a | b --------------------------------------------------------------------- 2 | value 2 4 | value 4 (2 rows) SELECT worker_last_saved_explain_analyze(); worker_last_saved_explain_analyze --------------------------------------------------------------------- Delete on explain_analyze_test (actual rows=2 loops=1) + -> Seq Scan on explain_analyze_test (actual rows=2 loops=1)+ Filter: ((a % 2) = 0) + Rows Removed by Filter: 2 + (1 row) ROLLBACK; -- delete without returning BEGIN; SELECT * FROM worker_save_query_explain_analyze($Q$ DELETE FROM explain_analyze_test WHERE a % 2 = 0$Q$, :default_opts) as (a int); a --------------------------------------------------------------------- (0 rows) SELECT worker_last_saved_explain_analyze(); worker_last_saved_explain_analyze --------------------------------------------------------------------- Delete on explain_analyze_test (actual rows=0 loops=1) + -> Seq Scan on explain_analyze_test (actual rows=2 loops=1)+ Filter: ((a % 2) = 0) + Rows Removed by Filter: 2 + (1 row) ROLLBACK; -- multiple queries (should ERROR) SELECT * FROM worker_save_query_explain_analyze('SELECT 1; SELECT 2', :default_opts) as (a int); ERROR: cannot EXPLAIN ANALYZE multiple queries -- error in query SELECT * FROM worker_save_query_explain_analyze('SELECT x', :default_opts) as (a int); ERROR: column "x" does not exist -- error in format string SELECT * FROM worker_save_query_explain_analyze('SELECT 1', '{"format": "invlaid_format"}') as (a int); ERROR: Invalid explain analyze format: "invlaid_format" -- test formats BEGIN; SELECT * FROM worker_save_query_explain_analyze('SELECT 1', '{"format": "text", "costs": false}') as (a int); a --------------------------------------------------------------------- 1 (1 row) SELECT worker_last_saved_explain_analyze(); worker_last_saved_explain_analyze --------------------------------------------------------------------- Result (actual rows=1 loops=1) + (1 row) SELECT * FROM worker_save_query_explain_analyze('SELECT 1', '{"format": "json", "costs": false}') as (a int); a --------------------------------------------------------------------- 1 (1 row) SELECT worker_last_saved_explain_analyze(); worker_last_saved_explain_analyze --------------------------------------------------------------------- [ + { + "Plan": { + "Node Type": "Result", + "Parallel Aware": false, + "Actual Rows": 1, + "Actual Loops": 1 + }, + "Triggers": [ + ] + } + ] (1 row) SELECT * FROM worker_save_query_explain_analyze('SELECT 1', '{"format": "xml", "costs": false}') as (a int); a --------------------------------------------------------------------- 1 (1 row) SELECT worker_last_saved_explain_analyze(); worker_last_saved_explain_analyze --------------------------------------------------------------------- + + + Result + false + 1 + 1 + + + + + (1 row) SELECT * FROM worker_save_query_explain_analyze('SELECT 1', '{"format": "yaml", "costs": false}') as (a int); a --------------------------------------------------------------------- 1 (1 row) SELECT worker_last_saved_explain_analyze(); worker_last_saved_explain_analyze --------------------------------------------------------------------- - Plan: + Node Type: "Result" + Parallel Aware: false + Actual Rows: 1 + Actual Loops: 1 + Triggers: (1 row) END; -- costs on, timing off BEGIN; SELECT * FROM worker_save_query_explain_analyze('SELECT * FROM explain_analyze_test', '{"timing": false, "costs": true}') as (a int); a --------------------------------------------------------------------- 1 2 3 4 (4 rows) SELECT worker_last_saved_explain_analyze() ~ 'Seq Scan.*\(cost=0.00.*\) \(actual rows.*\)'; ?column? --------------------------------------------------------------------- t (1 row) END; -- costs off, timing on BEGIN; SELECT * FROM worker_save_query_explain_analyze('SELECT * FROM explain_analyze_test', '{"timing": true, "costs": false}') as (a int); a --------------------------------------------------------------------- 1 2 3 4 (4 rows) SELECT worker_last_saved_explain_analyze() ~ 'Seq Scan on explain_analyze_test \(actual time=.* rows=.* loops=1\)'; ?column? --------------------------------------------------------------------- t (1 row) END; -- summary on BEGIN; SELECT * FROM worker_save_query_explain_analyze('SELECT 1', '{"timing": false, "costs": false, "summary": true}') as (a int); a --------------------------------------------------------------------- 1 (1 row) SELECT worker_last_saved_explain_analyze() ~ 'Planning Time:.*Execution Time:.*'; ?column? --------------------------------------------------------------------- t (1 row) END; -- buffers on BEGIN; SELECT * FROM worker_save_query_explain_analyze('SELECT * FROM explain_analyze_test', '{"timing": false, "costs": false, "buffers": true}') as (a int); a --------------------------------------------------------------------- 1 2 3 4 (4 rows) SELECT worker_last_saved_explain_analyze() ~ 'Buffers:'; ?column? --------------------------------------------------------------------- t (1 row) END; -- verbose on BEGIN; SELECT * FROM worker_save_query_explain_analyze('SELECT * FROM explain_analyze_test', '{"timing": false, "costs": false, "verbose": true}') as (a int); a --------------------------------------------------------------------- 1 2 3 4 (4 rows) SELECT worker_last_saved_explain_analyze() ~ 'Output: a, b'; ?column? --------------------------------------------------------------------- t (1 row) END; -- make sure deleted at transaction end SELECT * FROM worker_save_query_explain_analyze('SELECT 1', '{}') as (a int); a --------------------------------------------------------------------- 1 (1 row) SELECT worker_last_saved_explain_analyze() IS NULL; ?column? --------------------------------------------------------------------- t (1 row) -- should be deleted at the end of prepare commit BEGIN; SELECT * FROM worker_save_query_explain_analyze('UPDATE explain_analyze_test SET a=1', '{}') as (a int); a --------------------------------------------------------------------- (0 rows) SELECT worker_last_saved_explain_analyze() IS NOT NULL; ?column? --------------------------------------------------------------------- t (1 row) PREPARE TRANSACTION 'citus_0_1496350_7_0'; SELECT worker_last_saved_explain_analyze() IS NULL; ?column? --------------------------------------------------------------------- t (1 row) COMMIT PREPARED 'citus_0_1496350_7_0';