code review

pull/8344/head
Mehmet Yilmaz 2025-11-19 14:51:02 +00:00
parent a5c68ed720
commit 73ca66e944
16 changed files with 252 additions and 227 deletions

View File

@ -268,6 +268,12 @@ DEPS = {
"subquery_in_targetlist": TestDeps(
"minimal_schedule", ["multi_behavioral_analytics_create_table"]
),
"window_functions": TestDeps(
"minimal_schedule", ["multi_behavioral_analytics_create_table"]
),
"multi_subquery_window_functions": TestDeps(
"minimal_schedule", ["multi_behavioral_analytics_create_table"]
),
}

View File

@ -638,7 +638,7 @@ SELECT public.coordinator_plan($Q$
EXPLAIN (COSTS OFF)
SELECT a, COUNT(*) OVER (PARTITION BY a+1) FROM partitioned_distributed_table ORDER BY 1,2;
$Q$)
');
', true);
explain_filter
---------------------------------------------------------------------
Sort
@ -647,7 +647,7 @@ $Q$)
-> Sort
Sort Key: remote_scan.worker_column_2
-> Custom Scan (Citus Adaptive)
Task Count: N
Task Count: 4
(7 rows)
-- FOR UPDATE

View File

@ -524,17 +524,17 @@ EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF)
UPDATE lineitem
SET l_suppkey = 12
WHERE l_orderkey = 1 AND l_partkey = 0
');
Custom Scan (Citus Adaptive) (actual rows=N loops=N)
Task Count: N
', true);
Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
Task Count: 1
Tasks Shown: All
-> Task
Node: host=localhost port=N dbname=regression
-> Update on lineitem_360000 lineitem (actual rows=N loops=N)
-> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (actual rows=N loops=N)
Index Cond: (l_orderkey = N)
Filter: (l_partkey = N)
Rows Removed by Filter: N
Node: host=localhost port=xxxxx dbname=regression
-> Update on lineitem_360000 lineitem (actual rows=0 loops=1)
-> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (actual rows=0 loops=1)
Index Cond: (l_orderkey = 1)
Filter: (l_partkey = 0)
Rows Removed by Filter: 6
ROLLBACk;
-- Test delete
EXPLAIN (COSTS FALSE)
@ -1389,16 +1389,16 @@ Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0)
Node: host=localhost port=xxxxx dbname=regression
-> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (cost=0.28..13.60 rows=4 width=5)
Index Cond: (l_orderkey = 5)
select public.explain_filter('EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) EXECUTE router_executor_query_param(5)');
Custom Scan (Citus Adaptive) (actual rows=N loops=N)
Task Count: N
Tuple data received from nodes: N bytes
select public.explain_filter('EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) EXECUTE router_executor_query_param(5)', true);
Custom Scan (Citus Adaptive) (actual rows=3 loops=1)
Task Count: 1
Tuple data received from nodes: 30 bytes
Tasks Shown: All
-> Task
Tuple data received from node: N bytes
Node: host=localhost port=N dbname=regression
-> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (actual rows=N loops=N)
Index Cond: (l_orderkey = N)
Tuple data received from node: 30 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (actual rows=3 loops=1)
Index Cond: (l_orderkey = 5)
\set VERBOSITY TERSE
PREPARE multi_shard_query_param(int) AS UPDATE lineitem SET l_quantity = $1;
BEGIN;
@ -3120,14 +3120,14 @@ SELECT create_distributed_table('distributed_table_1','a');
INSERT INTO distributed_table_1 values (1,1);
select public.explain_filter('
EXPLAIN (ANALYZE on, COSTS off, TIMING off, SUMMARY off, BUFFERS off) SELECT row_number() OVER() AS r FROM distributed_table_1
');
WindowAgg (actual rows=N loops=N)
-> Custom Scan (Citus Adaptive) (actual rows=N loops=N)
Task Count: N
Tasks Shown: One of N
', true);
WindowAgg (actual rows=1 loops=1)
-> Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Task Count: 2
Tasks Shown: One of 2
-> Task
Node: host=localhost port=N dbname=regression
-> Seq Scan on distributed_table_1_570032 distributed_table_1 (actual rows=N loops=N)
Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on distributed_table_1_570032 distributed_table_1 (actual rows=1 loops=1)
CREATE TABLE distributed_table_2(a int, b int);
SELECT create_distributed_table('distributed_table_2','a');
@ -3138,30 +3138,30 @@ WITH r AS (SELECT row_number() OVER () AS r FROM distributed_table_1)
SELECT * FROM distributed_table_2
JOIN r ON (r = distributed_table_2.b)
LIMIT 3
');
Limit (actual rows=N loops=N)
-> Custom Scan (Citus Adaptive) (actual rows=N loops=N)
', true);
Limit (actual rows=1 loops=1)
-> Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
-> Distributed Subplan XXX_1
Intermediate Data Size: N bytes
Result destination: Send to N nodes
-> WindowAgg (actual rows=N loops=N)
-> Custom Scan (Citus Adaptive) (actual rows=N loops=N)
Task Count: N
Tasks Shown: One of N
Intermediate Data Size: 14 bytes
Result destination: Send to 2 nodes
-> WindowAgg (actual rows=1 loops=1)
-> Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Task Count: 2
Tasks Shown: One of 2
-> Task
Node: host=localhost port=N dbname=regression
-> Seq Scan on distributed_table_1_570032 distributed_table_1 (actual rows=N loops=N)
Task Count: N
Tuple data received from nodes: N bytes
Tasks Shown: One of N
Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on distributed_table_1_570032 distributed_table_1 (actual rows=1 loops=1)
Task Count: 2
Tuple data received from nodes: 16 bytes
Tasks Shown: One of 2
-> Task
Tuple data received from node: N bytes
Node: host=localhost port=N dbname=regression
-> Limit (actual rows=N loops=N)
-> Nested Loop (actual rows=N loops=N)
Tuple data received from node: 16 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Limit (actual rows=1 loops=1)
-> Nested Loop (actual rows=1 loops=1)
Join Filter: (distributed_table_2.b = intermediate_result.r)
-> Function Scan on read_intermediate_result intermediate_result (actual rows=N loops=N)
-> Seq Scan on distributed_table_2_570034 distributed_table_2 (actual rows=N loops=N)
-> Function Scan on read_intermediate_result intermediate_result (actual rows=1 loops=1)
-> Seq Scan on distributed_table_2_570034 distributed_table_2 (actual rows=1 loops=1)
EXPLAIN :default_analyze_flags SELECT FROM (SELECT * FROM reference_table) subquery;
Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Task Count: 1

View File

@ -524,17 +524,17 @@ EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF)
UPDATE lineitem
SET l_suppkey = 12
WHERE l_orderkey = 1 AND l_partkey = 0
');
Custom Scan (Citus Adaptive) (actual rows=N loops=N)
Task Count: N
', true);
Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
Task Count: 1
Tasks Shown: All
-> Task
Node: host=localhost port=N dbname=regression
-> Update on lineitem_360000 lineitem (actual rows=N loops=N)
-> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (actual rows=N loops=N)
Index Cond: (l_orderkey = N)
Filter: (l_partkey = N)
Rows Removed by Filter: N
Node: host=localhost port=xxxxx dbname=regression
-> Update on lineitem_360000 lineitem (actual rows=0 loops=1)
-> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (actual rows=0 loops=1)
Index Cond: (l_orderkey = 1)
Filter: (l_partkey = 0)
Rows Removed by Filter: 6
ROLLBACk;
-- Test delete
EXPLAIN (COSTS FALSE)
@ -1389,16 +1389,16 @@ Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0)
Node: host=localhost port=xxxxx dbname=regression
-> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (cost=0.28..13.60 rows=4 width=5)
Index Cond: (l_orderkey = 5)
select public.explain_filter('EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) EXECUTE router_executor_query_param(5)');
Custom Scan (Citus Adaptive) (actual rows=N loops=N)
Task Count: N
Tuple data received from nodes: N bytes
select public.explain_filter('EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) EXECUTE router_executor_query_param(5)', true);
Custom Scan (Citus Adaptive) (actual rows=3 loops=1)
Task Count: 1
Tuple data received from nodes: 30 bytes
Tasks Shown: All
-> Task
Tuple data received from node: N bytes
Node: host=localhost port=N dbname=regression
-> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (actual rows=N loops=N)
Index Cond: (l_orderkey = N)
Tuple data received from node: 30 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (actual rows=3 loops=1)
Index Cond: (l_orderkey = 5)
\set VERBOSITY TERSE
PREPARE multi_shard_query_param(int) AS UPDATE lineitem SET l_quantity = $1;
BEGIN;
@ -3109,14 +3109,14 @@ SELECT create_distributed_table('distributed_table_1','a');
INSERT INTO distributed_table_1 values (1,1);
select public.explain_filter('
EXPLAIN (ANALYZE on, COSTS off, TIMING off, SUMMARY off, BUFFERS off) SELECT row_number() OVER() AS r FROM distributed_table_1
');
WindowAgg (actual rows=N loops=N)
-> Custom Scan (Citus Adaptive) (actual rows=N loops=N)
Task Count: N
Tasks Shown: One of N
', true);
WindowAgg (actual rows=1 loops=1)
-> Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Task Count: 2
Tasks Shown: One of 2
-> Task
Node: host=localhost port=N dbname=regression
-> Seq Scan on distributed_table_1_570032 distributed_table_1 (actual rows=N loops=N)
Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on distributed_table_1_570032 distributed_table_1 (actual rows=1 loops=1)
CREATE TABLE distributed_table_2(a int, b int);
SELECT create_distributed_table('distributed_table_2','a');
@ -3127,30 +3127,30 @@ WITH r AS (SELECT row_number() OVER () AS r FROM distributed_table_1)
SELECT * FROM distributed_table_2
JOIN r ON (r = distributed_table_2.b)
LIMIT 3
');
Limit (actual rows=N loops=N)
-> Custom Scan (Citus Adaptive) (actual rows=N loops=N)
', true);
Limit (actual rows=1 loops=1)
-> Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
-> Distributed Subplan XXX_1
Intermediate Data Size: N bytes
Result destination: Send to N nodes
-> WindowAgg (actual rows=N loops=N)
-> Custom Scan (Citus Adaptive) (actual rows=N loops=N)
Task Count: N
Tasks Shown: One of N
Intermediate Data Size: 14 bytes
Result destination: Send to 2 nodes
-> WindowAgg (actual rows=1 loops=1)
-> Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Task Count: 2
Tasks Shown: One of 2
-> Task
Node: host=localhost port=N dbname=regression
-> Seq Scan on distributed_table_1_570032 distributed_table_1 (actual rows=N loops=N)
Task Count: N
Tuple data received from nodes: N bytes
Tasks Shown: One of N
Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on distributed_table_1_570032 distributed_table_1 (actual rows=1 loops=1)
Task Count: 2
Tuple data received from nodes: 16 bytes
Tasks Shown: One of 2
-> Task
Tuple data received from node: N bytes
Node: host=localhost port=N dbname=regression
-> Limit (actual rows=N loops=N)
-> Nested Loop (actual rows=N loops=N)
Tuple data received from node: 16 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Limit (actual rows=1 loops=1)
-> Nested Loop (actual rows=1 loops=1)
Join Filter: (distributed_table_2.b = intermediate_result.r)
-> Function Scan on read_intermediate_result intermediate_result (actual rows=N loops=N)
-> Seq Scan on distributed_table_2_570034 distributed_table_2 (actual rows=N loops=N)
-> Function Scan on read_intermediate_result intermediate_result (actual rows=1 loops=1)
-> Seq Scan on distributed_table_2_570034 distributed_table_2 (actual rows=1 loops=1)
EXPLAIN :default_analyze_flags SELECT FROM (SELECT * FROM reference_table) subquery;
Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Task Count: 1

View File

@ -59,7 +59,7 @@ select public.explain_filter('
EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF)
SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id)
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id
');
', true);
explain_filter
---------------------------------------------------------------------
WindowAgg
@ -69,11 +69,11 @@ FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id
Sort Key: remote_scan.worker_column_3
-> Custom Scan (Citus Adaptive)
Output: remote_scan.worker_column_3, remote_scan.id, remote_scan.max
Task Count: N
Tasks Shown: One of N
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT worker_column_1 AS id, worker_column_2 AS max, worker_column_3 FROM (SELECT t1.id AS worker_column_1, t2.a2 AS worker_column_2, t2.id AS worker_column_3 FROM (outer_join_columns_testing.t1_30070000 t1 LEFT JOIN outer_join_columns_testing.t2_30070004 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery
Node: host=localhost port=N dbname=regression
Node: host=localhost port=xxxxx dbname=regression
-> Hash Right Join
Output: t1.id, t2.a2, t2.id
Inner Unique: true
@ -99,7 +99,7 @@ select public.explain_filter('
EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF)
SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id)
FROM t2 RIGHT OUTER JOIN t1 ON t1.id = t2.account_id
');
', true);
explain_filter
---------------------------------------------------------------------
WindowAgg
@ -109,11 +109,11 @@ FROM t2 RIGHT OUTER JOIN t1 ON t1.id = t2.account_id
Sort Key: remote_scan.worker_column_3
-> Custom Scan (Citus Adaptive)
Output: remote_scan.worker_column_3, remote_scan.id, remote_scan.max
Task Count: N
Tasks Shown: One of N
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT worker_column_1 AS id, worker_column_2 AS max, worker_column_3 FROM (SELECT t1.id AS worker_column_1, t2.a2 AS worker_column_2, t2.id AS worker_column_3 FROM (outer_join_columns_testing.t2_30070004 t2 RIGHT JOIN outer_join_columns_testing.t1_30070000 t1 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery
Node: host=localhost port=N dbname=regression
Node: host=localhost port=xxxxx dbname=regression
-> Hash Right Join
Output: t1.id, t2.a2, t2.id
Inner Unique: true
@ -139,7 +139,7 @@ select public.explain_filter('
EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF)
SELECT DISTINCT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id)
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id
');
', true);
explain_filter
---------------------------------------------------------------------
HashAggregate
@ -152,11 +152,11 @@ FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id
Sort Key: remote_scan.worker_column_3
-> Custom Scan (Citus Adaptive)
Output: remote_scan.worker_column_3, remote_scan.id, remote_scan.max
Task Count: N
Tasks Shown: One of N
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT worker_column_1 AS id, worker_column_2 AS max, worker_column_3 FROM (SELECT t1.id AS worker_column_1, t2.a2 AS worker_column_2, t2.id AS worker_column_3 FROM (outer_join_columns_testing.t1_30070000 t1 LEFT JOIN outer_join_columns_testing.t2_30070004 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery
Node: host=localhost port=N dbname=regression
Node: host=localhost port=xxxxx dbname=regression
-> Hash Right Join
Output: t1.id, t2.a2, t2.id
Inner Unique: true
@ -313,24 +313,24 @@ EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF)
SELECT 1
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id
HAVING COUNT(DISTINCT a2) > 1;
');
', true);
explain_filter
---------------------------------------------------------------------
Aggregate
Output: remote_scan."?column?"
Filter: (count(DISTINCT remote_scan.worker_column_2) > N)
Filter: (count(DISTINCT remote_scan.worker_column_2) > 1)
-> Sort
Output: remote_scan."?column?", remote_scan.worker_column_2
Sort Key: remote_scan.worker_column_2
-> Custom Scan (Citus Adaptive)
Output: remote_scan."?column?", remote_scan.worker_column_2
Task Count: N
Tasks Shown: One of N
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT N, worker_column_1 AS worker_column_2 FROM (SELECT t2.a2 AS worker_column_1 FROM (outer_join_columns_testing.t1_30070000 t1 LEFT JOIN outer_join_columns_testing.t2_30070004 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery GROUP BY worker_column_1
Node: host=localhost port=N dbname=regression
Query: SELECT 1, worker_column_1 AS worker_column_2 FROM (SELECT t2.a2 AS worker_column_1 FROM (outer_join_columns_testing.t1_30070000 t1 LEFT JOIN outer_join_columns_testing.t2_30070004 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery GROUP BY worker_column_1
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: N, t2.a2
Output: 1, t2.a2
Group Key: t2.a2
-> Hash Right Join
Output: t2.a2

View File

@ -59,7 +59,7 @@ select public.explain_filter('
EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF)
SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id)
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id
');
', true);
explain_filter
---------------------------------------------------------------------
WindowAgg
@ -69,11 +69,11 @@ FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id
Sort Key: remote_scan.worker_column_3
-> Custom Scan (Citus Adaptive)
Output: remote_scan.worker_column_3, remote_scan.id, remote_scan.max
Task Count: N
Tasks Shown: One of N
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT worker_column_1 AS id, worker_column_2 AS max, worker_column_3 FROM (SELECT t1.id AS worker_column_1, t2.a2 AS worker_column_2, t2.id AS worker_column_3 FROM (outer_join_columns_testing.t1_30070000 t1 LEFT JOIN outer_join_columns_testing.t2_30070004 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery
Node: host=localhost port=N dbname=regression
Node: host=localhost port=xxxxx dbname=regression
-> Hash Right Join
Output: t1.id, t2.a2, t2.id
Inner Unique: true
@ -99,7 +99,7 @@ select public.explain_filter('
EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF)
SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id)
FROM t2 RIGHT OUTER JOIN t1 ON t1.id = t2.account_id
');
', true);
explain_filter
---------------------------------------------------------------------
WindowAgg
@ -109,11 +109,11 @@ FROM t2 RIGHT OUTER JOIN t1 ON t1.id = t2.account_id
Sort Key: remote_scan.worker_column_3
-> Custom Scan (Citus Adaptive)
Output: remote_scan.worker_column_3, remote_scan.id, remote_scan.max
Task Count: N
Tasks Shown: One of N
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT worker_column_1 AS id, worker_column_2 AS max, worker_column_3 FROM (SELECT t1.id AS worker_column_1, t2.a2 AS worker_column_2, t2.id AS worker_column_3 FROM (outer_join_columns_testing.t2_30070004 t2 RIGHT JOIN outer_join_columns_testing.t1_30070000 t1 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery
Node: host=localhost port=N dbname=regression
Node: host=localhost port=xxxxx dbname=regression
-> Hash Right Join
Output: t1.id, t2.a2, t2.id
Inner Unique: true
@ -139,7 +139,7 @@ select public.explain_filter('
EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF)
SELECT DISTINCT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id)
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id
');
', true);
explain_filter
---------------------------------------------------------------------
HashAggregate
@ -152,11 +152,11 @@ FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id
Sort Key: remote_scan.worker_column_3
-> Custom Scan (Citus Adaptive)
Output: remote_scan.worker_column_3, remote_scan.id, remote_scan.max
Task Count: N
Tasks Shown: One of N
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT worker_column_1 AS id, worker_column_2 AS max, worker_column_3 FROM (SELECT t1.id AS worker_column_1, t2.a2 AS worker_column_2, t2.id AS worker_column_3 FROM (outer_join_columns_testing.t1_30070000 t1 LEFT JOIN outer_join_columns_testing.t2_30070004 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery
Node: host=localhost port=N dbname=regression
Node: host=localhost port=xxxxx dbname=regression
-> Hash Right Join
Output: t1.id, t2.a2, t2.id
Inner Unique: true
@ -310,21 +310,21 @@ EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF)
SELECT 1
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id
HAVING COUNT(DISTINCT a2) > 1;
');
explain_filter
', true);
explain_filter
---------------------------------------------------------------------
Aggregate
Output: remote_scan."?column?"
Filter: (count(DISTINCT remote_scan.worker_column_2) > N)
Filter: (count(DISTINCT remote_scan.worker_column_2) > 1)
-> Custom Scan (Citus Adaptive)
Output: remote_scan."?column?", remote_scan.worker_column_2
Task Count: N
Tasks Shown: One of N
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT N, worker_column_1 AS worker_column_2 FROM (SELECT t2.a2 AS worker_column_1 FROM (outer_join_columns_testing.t1_30070000 t1 LEFT JOIN outer_join_columns_testing.t2_30070004 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery GROUP BY worker_column_1
Node: host=localhost port=N dbname=regression
Query: SELECT 1, worker_column_1 AS worker_column_2 FROM (SELECT t2.a2 AS worker_column_1 FROM (outer_join_columns_testing.t1_30070000 t1 LEFT JOIN outer_join_columns_testing.t2_30070004 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery GROUP BY worker_column_1
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: N, t2.a2
Output: 1, t2.a2
Group Key: t2.a2
-> Hash Right Join
Output: t2.a2

View File

@ -710,17 +710,17 @@ EXPLAIN (COSTS FALSE)
user_id)) AS ftop
ORDER BY 2 DESC, 1 DESC
LIMIT 5
');
', true);
explain_filter
---------------------------------------------------------------------
Limit
-> Sort
Sort Key: remote_scan.sum DESC, remote_scan.user_id DESC
-> Custom Scan (Citus Adaptive)
Task Count: N
Tasks Shown: One of N
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=N dbname=regression
Node: host=localhost port=xxxxx dbname=regression
-> Limit
-> Sort
Sort Key: (sum((sum(users_table.value_2) OVER (?)))) DESC, users_table.user_id DESC

View File

@ -724,33 +724,42 @@ END; $$ language plpgsql;
-- ignore details such as exact costs or row counts. These filter
-- functions replace changeable output details with fixed strings.
-- Copied from PG explain.sql
create function explain_filter(text) returns setof text
language plpgsql as
CREATE OR REPLACE FUNCTION explain_filter(cmd text, keep_numbers boolean DEFAULT false)
RETURNS SETOF text
LANGUAGE plpgsql AS
$$
declare
DECLARE
ln text;
begin
for ln in execute $1
loop
BEGIN
FOR ln IN EXECUTE cmd LOOP
-- PG18 extra line "Index Searches: N" — remove entirely
IF ln ~ '^[[:space:]]*Index[[:space:]]+Searches:[[:space:]]*[0-9]+[[:space:]]*$' THEN
CONTINUE;
CONTINUE;
END IF;
-- PG18 extra Window line — remove entirely
IF ln ~ '^[[:space:]]*Window:[[:space:]].*$' THEN -- e.g., "Window: w1 AS (...)"
CONTINUE;
END IF;
-- Replace any numeric word with just 'N'
ln := regexp_replace(ln, '-?\m\d+\M', 'N', 'g');
-- In sort output, the above won't match units-suffixed numbers
ln := regexp_replace(ln, '\m\d+kB', 'NkB', 'g');
-- Optional numeric normalization
IF NOT keep_numbers THEN
-- Replace any numeric word with just 'N'
ln := regexp_replace(ln, '-?\m\d+\M', 'N', 'g');
-- In sort output, the above won't match units-suffixed numbers
ln := regexp_replace(ln, '\m\d+kB', 'NkB', 'g');
END IF;
-- Ignore text-mode buffers output because it varies depending
-- on the system state
CONTINUE WHEN (ln ~ ' +Buffers: .*');
-- Ignore text-mode "Planning:" line because whether it's output
-- varies depending on the system state
CONTINUE WHEN (ln = 'Planning:');
return next ln;
end loop;
end;
RETURN NEXT ln;
END LOOP;
END;
$$;

View File

@ -2069,7 +2069,7 @@ SET citus.grep_remote_commands TO '%12242024%';
select public.explain_filter('explain (memory) select * from int8_tbl i8');
NOTICE: issuing EXPLAIN (ANALYZE FALSE, VERBOSE FALSE, COSTS TRUE, BUFFERS FALSE, WAL FALSE, TIMING FALSE, SUMMARY FALSE, MEMORY TRUE, SERIALIZE none, FORMAT TEXT) SELECT q1, q2 FROM pg17.int8_tbl_12242024 i8 WHERE true
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
CONTEXT: PL/pgSQL function public.explain_filter(text) line XX at FOR over EXECUTE statement
CONTEXT: PL/pgSQL function public.explain_filter(text,boolean) line XX at FOR over EXECUTE statement
explain_filter
---------------------------------------------------------------------
Custom Scan (Citus Adaptive) (cost=N.N..N.N rows=N width=N)
@ -2086,8 +2086,8 @@ CONTEXT: PL/pgSQL function public.explain_filter(text) line XX at FOR over EXEC
select public.explain_filter('explain (memory, analyze, buffers false) select * from int8_tbl i8');
NOTICE: issuing SELECT * FROM worker_save_query_explain_analyze('SELECT q1, q2 FROM pg17.int8_tbl_12242024 i8 WHERE true', '{"verbose": false, "costs": true, "buffers": false, "wal": false, "memory": true, "serialize": "none", "timing": true, "summary": true, "format": "TEXT"}') AS (field_0 bigint, field_1 bigint)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
CONTEXT: PL/pgSQL function public.explain_filter(text) line XX at FOR over EXECUTE statement
explain_filter
CONTEXT: PL/pgSQL function public.explain_filter(text,boolean) line XX at FOR over EXECUTE statement
explain_filter
---------------------------------------------------------------------
Custom Scan (Citus Adaptive) (cost=N.N..N.N rows=N width=N) (actual time=N.N..N.N rows=N loops=N)
Task Count: N
@ -2109,7 +2109,7 @@ CONTEXT: PL/pgSQL function public.explain_filter(text) line XX at FOR over EXEC
select public.explain_filter('explain (memory, summary, format yaml) select * from int8_tbl i8');
NOTICE: issuing EXPLAIN (ANALYZE FALSE, VERBOSE FALSE, COSTS TRUE, BUFFERS FALSE, WAL FALSE, TIMING FALSE, SUMMARY TRUE, MEMORY TRUE, SERIALIZE none, FORMAT YAML) SELECT q1, q2 FROM pg17.int8_tbl_12242024 i8 WHERE true
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
CONTEXT: PL/pgSQL function public.explain_filter(text) line XX at FOR over EXECUTE statement
CONTEXT: PL/pgSQL function public.explain_filter(text,boolean) line XX at FOR over EXECUTE statement
explain_filter
---------------------------------------------------------------------
- Plan: +
@ -2152,7 +2152,7 @@ CONTEXT: PL/pgSQL function public.explain_filter(text) line XX at FOR over EXEC
select public.explain_filter('explain (memory, analyze, buffers false, format json) select * from int8_tbl i8');
NOTICE: issuing SELECT * FROM worker_save_query_explain_analyze('SELECT q1, q2 FROM pg17.int8_tbl_12242024 i8 WHERE true', '{"verbose": false, "costs": true, "buffers": false, "wal": false, "memory": true, "serialize": "none", "timing": true, "summary": true, "format": "JSON"}') AS (field_0 bigint, field_1 bigint)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
CONTEXT: PL/pgSQL function public.explain_filter(text) line XX at FOR over EXECUTE statement
CONTEXT: PL/pgSQL function public.explain_filter(text,boolean) line XX at FOR over EXECUTE statement
explain_filter
---------------------------------------------------------------------
[ +
@ -2230,7 +2230,7 @@ prepare int8_query as select * from int8_tbl i8;
select public.explain_filter('explain (memory) execute int8_query');
NOTICE: issuing EXPLAIN (ANALYZE FALSE, VERBOSE FALSE, COSTS TRUE, BUFFERS FALSE, WAL FALSE, TIMING FALSE, SUMMARY FALSE, MEMORY TRUE, SERIALIZE none, FORMAT TEXT) SELECT q1, q2 FROM pg17.int8_tbl_12242024 i8 WHERE true
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
CONTEXT: PL/pgSQL function public.explain_filter(text) line XX at FOR over EXECUTE statement
CONTEXT: PL/pgSQL function public.explain_filter(text,boolean) line XX at FOR over EXECUTE statement
explain_filter
---------------------------------------------------------------------
Custom Scan (Citus Adaptive) (cost=N.N..N.N rows=N width=N)
@ -2248,7 +2248,7 @@ CONTEXT: PL/pgSQL function public.explain_filter(text) line XX at FOR over EXEC
select public.explain_filter('explain (analyze, serialize, buffers, format yaml) select * from int8_tbl i8');
NOTICE: issuing SELECT * FROM worker_save_query_explain_analyze('SELECT q1, q2 FROM pg17.int8_tbl_12242024 i8 WHERE true', '{"verbose": false, "costs": true, "buffers": true, "wal": false, "memory": false, "serialize": "text", "timing": true, "summary": true, "format": "YAML"}') AS (field_0 bigint, field_1 bigint)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
CONTEXT: PL/pgSQL function public.explain_filter(text) line XX at FOR over EXECUTE statement
CONTEXT: PL/pgSQL function public.explain_filter(text,boolean) line XX at FOR over EXECUTE statement
explain_filter
---------------------------------------------------------------------
- Plan: +
@ -2369,8 +2369,8 @@ CONTEXT: PL/pgSQL function public.explain_filter(text) line XX at FOR over EXEC
select public.explain_filter('explain (analyze, buffers false, serialize) select * from int8_tbl i8');
NOTICE: issuing SELECT * FROM worker_save_query_explain_analyze('SELECT q1, q2 FROM pg17.int8_tbl_12242024 i8 WHERE true', '{"verbose": false, "costs": true, "buffers": false, "wal": false, "memory": false, "serialize": "text", "timing": true, "summary": true, "format": "TEXT"}') AS (field_0 bigint, field_1 bigint)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
CONTEXT: PL/pgSQL function public.explain_filter(text) line XX at FOR over EXECUTE statement
explain_filter
CONTEXT: PL/pgSQL function public.explain_filter(text,boolean) line XX at FOR over EXECUTE statement
explain_filter
---------------------------------------------------------------------
Custom Scan (Citus Adaptive) (cost=N.N..N.N rows=N width=N) (actual time=N.N..N.N rows=N loops=N)
Task Count: N
@ -2391,8 +2391,8 @@ CONTEXT: PL/pgSQL function public.explain_filter(text) line XX at FOR over EXEC
select public.explain_filter('explain (analyze,serialize text,buffers,timing off) select * from int8_tbl i8');
NOTICE: issuing SELECT * FROM worker_save_query_explain_analyze('SELECT q1, q2 FROM pg17.int8_tbl_12242024 i8 WHERE true', '{"verbose": false, "costs": true, "buffers": true, "wal": false, "memory": false, "serialize": "text", "timing": false, "summary": true, "format": "TEXT"}') AS (field_0 bigint, field_1 bigint)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
CONTEXT: PL/pgSQL function public.explain_filter(text) line XX at FOR over EXECUTE statement
explain_filter
CONTEXT: PL/pgSQL function public.explain_filter(text,boolean) line XX at FOR over EXECUTE statement
explain_filter
---------------------------------------------------------------------
Custom Scan (Citus Adaptive) (cost=N.N..N.N rows=N width=N) (actual rows=N loops=N)
Task Count: N
@ -2413,8 +2413,8 @@ CONTEXT: PL/pgSQL function public.explain_filter(text) line XX at FOR over EXEC
select public.explain_filter('explain (analyze,serialize binary,buffers,timing) select * from int8_tbl i8');
NOTICE: issuing SELECT * FROM worker_save_query_explain_analyze('SELECT q1, q2 FROM pg17.int8_tbl_12242024 i8 WHERE true', '{"verbose": false, "costs": true, "buffers": true, "wal": false, "memory": false, "serialize": "binary", "timing": true, "summary": true, "format": "TEXT"}') AS (field_0 bigint, field_1 bigint)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
CONTEXT: PL/pgSQL function public.explain_filter(text) line XX at FOR over EXECUTE statement
explain_filter
CONTEXT: PL/pgSQL function public.explain_filter(text,boolean) line XX at FOR over EXECUTE statement
explain_filter
---------------------------------------------------------------------
Custom Scan (Citus Adaptive) (cost=N.N..N.N rows=N width=N) (actual time=N.N..N.N rows=N loops=N)
Task Count: N
@ -2436,8 +2436,8 @@ CONTEXT: PL/pgSQL function public.explain_filter(text) line XX at FOR over EXEC
select public.explain_filter('explain (analyze, buffers false, serialize) create temp table explain_temp as select * from int8_tbl i8');
NOTICE: issuing SELECT * FROM worker_save_query_explain_analyze('SELECT q1, q2 FROM pg17.int8_tbl_12242024 i8 WHERE true', '{"verbose": false, "costs": true, "buffers": false, "wal": false, "memory": false, "serialize": "text", "timing": true, "summary": true, "format": "TEXT"}') AS (field_0 bigint, field_1 bigint)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
CONTEXT: PL/pgSQL function public.explain_filter(text) line XX at FOR over EXECUTE statement
explain_filter
CONTEXT: PL/pgSQL function public.explain_filter(text,boolean) line XX at FOR over EXECUTE statement
explain_filter
---------------------------------------------------------------------
Custom Scan (Citus Adaptive) (cost=N.N..N.N rows=N width=N) (actual time=N.N..N.N rows=N loops=N)
Task Count: N

View File

@ -1302,19 +1302,19 @@ FROM
users_table
GROUP BY user_id, value_2
ORDER BY user_id, avg(value_1) DESC
');
', true);
explain_filter
---------------------------------------------------------------------
Sort
Sort Key: remote_scan.user_id, remote_scan.avg DESC
-> Custom Scan (Citus Adaptive)
Task Count: N
Tasks Shown: One of N
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=N dbname=regression
Node: host=localhost port=xxxxx dbname=regression
-> WindowAgg
-> Sort
Sort Key: users_table.user_id, (('N'::numeric / ('N'::numeric + avg(users_table.value_1))))
Sort Key: users_table.user_id, (('1'::numeric / ('1'::numeric + avg(users_table.value_1))))
-> HashAggregate
Group Key: users_table.user_id, users_table.value_2
-> Seq Scan on users_table_1400256 users_table
@ -1377,23 +1377,23 @@ FROM
GROUP BY user_id, value_2
ORDER BY user_id, avg(value_1) DESC
LIMIT 5
');
', true);
explain_filter
---------------------------------------------------------------------
Limit
-> Sort
Sort Key: remote_scan.user_id, remote_scan.avg DESC
-> Custom Scan (Citus Adaptive)
Task Count: N
Tasks Shown: One of N
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=N dbname=regression
Node: host=localhost port=xxxxx dbname=regression
-> Limit
-> Sort
Sort Key: users_table.user_id, (avg(users_table.value_1)) DESC
-> WindowAgg
-> Sort
Sort Key: users_table.user_id, (('N'::numeric / ('N'::numeric + avg(users_table.value_1))))
Sort Key: users_table.user_id, (('1'::numeric / ('1'::numeric + avg(users_table.value_1))))
-> HashAggregate
Group Key: users_table.user_id, users_table.value_2
-> Seq Scan on users_table_1400256 users_table
@ -1410,23 +1410,23 @@ FROM
GROUP BY user_id, value_2
ORDER BY user_id, avg(value_1) DESC
LIMIT 5
');
', true);
explain_filter
---------------------------------------------------------------------
Limit
-> Sort
Sort Key: remote_scan.user_id, remote_scan.avg DESC
-> Custom Scan (Citus Adaptive)
Task Count: N
Tasks Shown: One of N
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=N dbname=regression
Node: host=localhost port=xxxxx dbname=regression
-> Limit
-> Sort
Sort Key: users_table.user_id, (avg(users_table.value_1)) DESC
-> WindowAgg
-> Sort
Sort Key: users_table.user_id, (('N'::numeric / ('N'::numeric + avg(users_table.value_1))))
Sort Key: users_table.user_id, (('1'::numeric / ('1'::numeric + avg(users_table.value_1))))
-> HashAggregate
Group Key: users_table.user_id, users_table.value_2
-> Seq Scan on users_table_1400256 users_table
@ -1443,23 +1443,23 @@ FROM
GROUP BY user_id, value_2
ORDER BY user_id, avg(value_1) DESC
LIMIT 5
');
', true);
explain_filter
---------------------------------------------------------------------
Limit
-> Sort
Sort Key: remote_scan.user_id, remote_scan.avg DESC
-> Custom Scan (Citus Adaptive)
Task Count: N
Tasks Shown: One of N
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=N dbname=regression
Node: host=localhost port=xxxxx dbname=regression
-> Limit
-> Sort
Sort Key: users_table.user_id, (avg(users_table.value_1)) DESC
-> WindowAgg
-> Sort
Sort Key: users_table.user_id, ((N / (N + sum(users_table.value_2))))
Sort Key: users_table.user_id, ((1 / (1 + sum(users_table.value_2))))
-> HashAggregate
Group Key: users_table.user_id, users_table.value_2
-> Seq Scan on users_table_1400256 users_table
@ -1476,17 +1476,17 @@ FROM
GROUP BY user_id, value_2
ORDER BY user_id, avg(value_1) DESC
LIMIT 5
');
', true);
explain_filter
---------------------------------------------------------------------
Limit
-> Sort
Sort Key: remote_scan.user_id, remote_scan.avg DESC
-> Custom Scan (Citus Adaptive)
Task Count: N
Tasks Shown: One of N
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=N dbname=regression
Node: host=localhost port=xxxxx dbname=regression
-> Limit
-> Sort
Sort Key: users_table.user_id, (avg(users_table.value_1)) DESC
@ -1503,7 +1503,7 @@ select public.explain_filter('
EXPLAIN (COSTS FALSE)
SELECT user_id, count(value_1), stddev(value_1), count(user_id) OVER (PARTITION BY random())
FROM users_table GROUP BY user_id HAVING avg(value_1) > 2 LIMIT 1
');
', true);
explain_filter
---------------------------------------------------------------------
Limit
@ -1511,13 +1511,13 @@ FROM users_table GROUP BY user_id HAVING avg(value_1) > 2 LIMIT 1
-> Sort
Sort Key: remote_scan.worker_column_5
-> Custom Scan (Citus Adaptive)
Task Count: N
Tasks Shown: One of N
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=N dbname=regression
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Group Key: user_id
Filter: (avg(value_1) > 'N'::numeric)
Filter: (avg(value_1) > '2'::numeric)
-> Seq Scan on users_table_1400256 users_table
(13 rows)
@ -1561,17 +1561,17 @@ HAVING
sum(value_2) > 0
ORDER BY commits DESC
LIMIT 10
');
', true);
explain_filter
---------------------------------------------------------------------
Limit
-> Sort
Sort Key: remote_scan.commits DESC
-> Custom Scan (Citus Adaptive)
Task Count: N
Tasks Shown: One of N
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=N dbname=regression
Node: host=localhost port=xxxxx dbname=regression
-> Limit
-> Sort
Sort Key: (sum(daily_uniques.value_2)) DESC
@ -1580,7 +1580,7 @@ LIMIT 10
Sort Key: daily_uniques.user_id, (sum(daily_uniques.value_2)) DESC
-> HashAggregate
Group Key: daily_uniques.user_id
Filter: (sum(daily_uniques.value_2) > 'N'::double precision)
Filter: (sum(daily_uniques.value_2) > '0'::double precision)
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
(18 rows)

View File

@ -237,7 +237,7 @@ SELECT public.coordinator_plan($Q$
EXPLAIN (COSTS OFF)
SELECT a, COUNT(*) OVER (PARTITION BY a+1) FROM partitioned_distributed_table ORDER BY 1,2;
$Q$)
');
', true);
-- FOR UPDATE
SELECT * FROM partitioned_distributed_table WHERE a = 1 ORDER BY 1,2 FOR UPDATE;

View File

@ -189,7 +189,7 @@ EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF)
UPDATE lineitem
SET l_suppkey = 12
WHERE l_orderkey = 1 AND l_partkey = 0
');
', true);
ROLLBACk;
-- Test delete
@ -599,7 +599,7 @@ EXPLAIN (COSTS FALSE) EXECUTE real_time_executor_query;
-- at least make sure to fail without crashing
PREPARE router_executor_query_param(int) AS SELECT l_quantity FROM lineitem WHERE l_orderkey = $1;
EXPLAIN EXECUTE router_executor_query_param(5);
select public.explain_filter('EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) EXECUTE router_executor_query_param(5)');
select public.explain_filter('EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) EXECUTE router_executor_query_param(5)', true);
\set VERBOSITY TERSE
PREPARE multi_shard_query_param(int) AS UPDATE lineitem SET l_quantity = $1;
@ -1139,7 +1139,7 @@ INSERT INTO distributed_table_1 values (1,1);
select public.explain_filter('
EXPLAIN (ANALYZE on, COSTS off, TIMING off, SUMMARY off, BUFFERS off) SELECT row_number() OVER() AS r FROM distributed_table_1
');
', true);
CREATE TABLE distributed_table_2(a int, b int);
SELECT create_distributed_table('distributed_table_2','a');
@ -1151,7 +1151,7 @@ WITH r AS (SELECT row_number() OVER () AS r FROM distributed_table_1)
SELECT * FROM distributed_table_2
JOIN r ON (r = distributed_table_2.b)
LIMIT 3
');
', true);
EXPLAIN :default_analyze_flags SELECT FROM (SELECT * FROM reference_table) subquery;

View File

@ -36,7 +36,7 @@ select public.explain_filter('
EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF)
SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id)
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id
');
', true);
SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id)
FROM t2 RIGHT OUTER JOIN t1 ON t1.id = t2.account_id;
@ -44,7 +44,7 @@ select public.explain_filter('
EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF)
SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id)
FROM t2 RIGHT OUTER JOIN t1 ON t1.id = t2.account_id
');
', true);
SELECT DISTINCT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id)
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id;
@ -52,7 +52,7 @@ select public.explain_filter('
EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF)
SELECT DISTINCT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id)
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id
');
', true);
CREATE SEQUENCE test_seq START 101;
CREATE OR REPLACE FUNCTION TEST_F(int) returns INT language sql stable as $$ select $1 + 42; $$ ;
@ -97,7 +97,7 @@ EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF)
SELECT 1
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id
HAVING COUNT(DISTINCT a2) > 1;
');
', true);
-- Check right outer join
SELECT COUNT(DISTINCT a2)

View File

@ -476,7 +476,7 @@ EXPLAIN (COSTS FALSE)
user_id)) AS ftop
ORDER BY 2 DESC, 1 DESC
LIMIT 5
');
', true);
-- test with window functions which aren't pushed down
SELECT

View File

@ -755,33 +755,43 @@ END; $$ language plpgsql;
-- functions replace changeable output details with fixed strings.
-- Copied from PG explain.sql
create function explain_filter(text) returns setof text
language plpgsql as
CREATE OR REPLACE FUNCTION explain_filter(cmd text, keep_numbers boolean DEFAULT false)
RETURNS SETOF text
LANGUAGE plpgsql AS
$$
declare
DECLARE
ln text;
begin
for ln in execute $1
loop
BEGIN
FOR ln IN EXECUTE cmd LOOP
-- PG18 extra line "Index Searches: N" — remove entirely
IF ln ~ '^[[:space:]]*Index[[:space:]]+Searches:[[:space:]]*[0-9]+[[:space:]]*$' THEN
CONTINUE;
CONTINUE;
END IF;
-- PG18 extra Window line — remove entirely
IF ln ~ '^[[:space:]]*Window:[[:space:]].*$' THEN -- e.g., "Window: w1 AS (...)"
CONTINUE;
END IF;
-- Replace any numeric word with just 'N'
ln := regexp_replace(ln, '-?\m\d+\M', 'N', 'g');
-- In sort output, the above won't match units-suffixed numbers
ln := regexp_replace(ln, '\m\d+kB', 'NkB', 'g');
-- Optional numeric normalization
IF NOT keep_numbers THEN
-- Replace any numeric word with just 'N'
ln := regexp_replace(ln, '-?\m\d+\M', 'N', 'g');
-- In sort output, the above won't match units-suffixed numbers
ln := regexp_replace(ln, '\m\d+kB', 'NkB', 'g');
END IF;
-- Ignore text-mode buffers output because it varies depending
-- on the system state
CONTINUE WHEN (ln ~ ' +Buffers: .*');
-- Ignore text-mode "Planning:" line because whether it's output
-- varies depending on the system state
CONTINUE WHEN (ln = 'Planning:');
return next ln;
end loop;
end;
RETURN NEXT ln;
END LOOP;
END;
$$;

View File

@ -518,7 +518,7 @@ FROM
users_table
GROUP BY user_id, value_2
ORDER BY user_id, avg(value_1) DESC
');
', true);
-- order by in the window function is same as avg(value_1) DESC
SELECT
@ -542,7 +542,7 @@ FROM
GROUP BY user_id, value_2
ORDER BY user_id, avg(value_1) DESC
LIMIT 5
');
', true);
select public.explain_filter('
EXPLAIN (COSTS FALSE)
@ -555,7 +555,7 @@ FROM
GROUP BY user_id, value_2
ORDER BY user_id, avg(value_1) DESC
LIMIT 5
');
', true);
select public.explain_filter('
EXPLAIN (COSTS FALSE)
@ -568,7 +568,7 @@ FROM
GROUP BY user_id, value_2
ORDER BY user_id, avg(value_1) DESC
LIMIT 5
');
', true);
select public.explain_filter('
EXPLAIN (COSTS FALSE)
@ -581,14 +581,14 @@ FROM
GROUP BY user_id, value_2
ORDER BY user_id, avg(value_1) DESC
LIMIT 5
');
', true);
-- Grouping can be pushed down with aggregates even when window function can't
select public.explain_filter('
EXPLAIN (COSTS FALSE)
SELECT user_id, count(value_1), stddev(value_1), count(user_id) OVER (PARTITION BY random())
FROM users_table GROUP BY user_id HAVING avg(value_1) > 2 LIMIT 1
');
', true);
-- Window function with inlined CTE
WITH cte as (
@ -621,7 +621,7 @@ HAVING
sum(value_2) > 0
ORDER BY commits DESC
LIMIT 10
');
', true);
DROP TABLE daily_uniques;