From b2356f1c856036caadfc443a902c62f7d9b34ca4 Mon Sep 17 00:00:00 2001 From: Mehmet YILMAZ Date: Mon, 10 Nov 2025 10:43:11 +0300 Subject: [PATCH 1/4] PG18: Make EXPLAIN ANALYZE output stable by routing through explain_filter and hiding footers (#8325) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PostgreSQL 18 adds a new line to text EXPLAIN with ANALYZE (`Index Searches: N`). That extra line both creates noise and bumps psql’s `(N rows)` footer. This PR keeps ANALYZE (so statements still execute) while removing the version-specific churn in our regress outputs. ### What changed * **Use `explain_filter(...)` instead of raw text EXPLAIN** * In `local_shard_execution.sql` and `local_shard_execution_replicated.sql`, replace direct: ```sql EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) ; ``` with: ```sql \pset footer off SELECT public.explain_filter('EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) '); \pset footer on ``` * Expected files updated accordingly to show the `explain_filter` output block instead of raw EXPLAIN text. * **Extend `explain_filter` to drop the PG18 line** * Filter now removes any `Index Searches: ` line before normalizing numeric fields, preventing the “N” version of the same line from sneaking in. * **Keep suite-wide normalizer intact** --- src/test/regress/bin/normalize.sed | 3 -- .../expected/local_shard_execution.out | 46 ++++++++++--------- .../local_shard_execution_replicated.out | 44 +++++++++--------- .../regress/expected/multi_test_helpers.out | 5 ++ .../regress/sql/local_shard_execution.sql | 9 +++- .../sql/local_shard_execution_replicated.sql | 8 +++- src/test/regress/sql/multi_test_helpers.sql | 5 ++ 7 files changed, 70 insertions(+), 50 deletions(-) diff --git a/src/test/regress/bin/normalize.sed b/src/test/regress/bin/normalize.sed index e3df83fa4..f209a2dc8 100644 --- a/src/test/regress/bin/normalize.sed +++ b/src/test/regress/bin/normalize.sed @@ -376,9 +376,6 @@ s/\/is still referenced from table/g # ignore any "find_in_path:" lines in test output /DEBUG: find_in_path: trying .*/d -# PG18: EXPLAIN ANALYZE prints "Index Searches: N" for index scans — remove it -/^\s*Index Searches:\s*\d+\s*$/d - # EXPLAIN (PG18+): hide Materialize storage instrumentation # this rule can be removed when PG18 is the minimum supported version /^[ \t]*Storage:[ \t].*$/d diff --git a/src/test/regress/expected/local_shard_execution.out b/src/test/regress/expected/local_shard_execution.out index 3348db63a..09a88c6c0 100644 --- a/src/test/regress/expected/local_shard_execution.out +++ b/src/test/regress/expected/local_shard_execution.out @@ -312,21 +312,22 @@ EXPLAIN (COSTS OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20; Filter: (age = 20) (8 rows) -EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20; - QUERY PLAN +\pset footer off +select public.explain_filter('EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20'); + explain_filter --------------------------------------------------------------------- - Custom Scan (Citus Adaptive) (actual rows=1 loops=1) - Task Count: 1 - Tuple data received from nodes: 14 bytes + Custom Scan (Citus Adaptive) (actual rows=N loops=N) + Task Count: N + Tuple data received from nodes: N bytes Tasks Shown: All -> Task - Tuple data received from node: 14 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Index Scan using distributed_table_pkey_1470001 on distributed_table_1470001 distributed_table (actual rows=1 loops=1) - Index Cond: (key = 1) - Filter: (age = 20) -(10 rows) + Tuple data received from node: N bytes + Node: host=localhost port=N dbname=regression + -> Index Scan using distributed_table_pkey_1470001 on distributed_table_1470001 distributed_table (actual rows=N loops=N) + Index Cond: (key = N) + Filter: (age = N) +\pset footer on EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) WITH r AS ( SELECT GREATEST(random(), 2) z,* FROM distributed_table) SELECT 1 FROM r WHERE z < 3; @@ -368,21 +369,22 @@ EXPLAIN (COSTS OFF) DELETE FROM distributed_table WHERE key = 1 AND age = 20; Filter: (age = 20) (9 rows) -EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) DELETE FROM distributed_table WHERE key = 1 AND age = 20; - QUERY PLAN +\pset footer off +select public.explain_filter('EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) DELETE FROM distributed_table WHERE key = 1 AND age = 20'); + explain_filter --------------------------------------------------------------------- - Custom Scan (Citus Adaptive) (actual rows=0 loops=1) - Task Count: 1 + Custom Scan (Citus Adaptive) (actual rows=N loops=N) + Task Count: N Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Delete on distributed_table_1470001 distributed_table (actual rows=0 loops=1) - -> Index Scan using distributed_table_pkey_1470001 on distributed_table_1470001 distributed_table (actual rows=1 loops=1) - Index Cond: (key = 1) - Filter: (age = 20) - Trigger for constraint second_distributed_table_key_fkey_1470005: calls=1 -(10 rows) + Node: host=localhost port=N dbname=regression + -> Delete on distributed_table_1470001 distributed_table (actual rows=N loops=N) + -> Index Scan using distributed_table_pkey_1470001 on distributed_table_1470001 distributed_table (actual rows=N loops=N) + Index Cond: (key = N) + Filter: (age = N) + Trigger for constraint second_distributed_table_key_fkey_1470005: calls=N +\pset footer on -- show that EXPLAIN ANALYZE deleted the row and cascades deletes SELECT * FROM distributed_table WHERE key = 1 AND age = 20 ORDER BY 1,2,3; NOTICE: executing the command locally: SELECT key, value, age FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((key OPERATOR(pg_catalog.=) 1) AND (age OPERATOR(pg_catalog.=) 20)) ORDER BY key, value, age diff --git a/src/test/regress/expected/local_shard_execution_replicated.out b/src/test/regress/expected/local_shard_execution_replicated.out index 835df717d..ca85fdb4e 100644 --- a/src/test/regress/expected/local_shard_execution_replicated.out +++ b/src/test/regress/expected/local_shard_execution_replicated.out @@ -250,21 +250,22 @@ EXPLAIN (COSTS OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20; Filter: (age = 20) (8 rows) -EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20; - QUERY PLAN +\pset footer off +select public.explain_filter('EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20'); + explain_filter --------------------------------------------------------------------- - Custom Scan (Citus Adaptive) (actual rows=1 loops=1) - Task Count: 1 - Tuple data received from nodes: 14 bytes + Custom Scan (Citus Adaptive) (actual rows=N loops=N) + Task Count: N + Tuple data received from nodes: N bytes Tasks Shown: All -> Task - Tuple data received from node: 14 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Index Scan using distributed_table_pkey_1500001 on distributed_table_1500001 distributed_table (actual rows=1 loops=1) - Index Cond: (key = 1) - Filter: (age = 20) -(10 rows) + Tuple data received from node: N bytes + Node: host=localhost port=N dbname=regression + -> Index Scan using distributed_table_pkey_1500001 on distributed_table_1500001 distributed_table (actual rows=N loops=N) + Index Cond: (key = N) + Filter: (age = N) +\pset footer on EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) WITH r AS ( SELECT GREATEST(random(), 2) z,* FROM distributed_table) SELECT 1 FROM r WHERE z < 3; @@ -306,20 +307,21 @@ EXPLAIN (COSTS OFF) DELETE FROM distributed_table WHERE key = 1 AND age = 20; Filter: (age = 20) (9 rows) -EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) DELETE FROM distributed_table WHERE key = 1 AND age = 20; - QUERY PLAN +\pset footer off +select public.explain_filter('EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) DELETE FROM distributed_table WHERE key = 1 AND age = 20'); + explain_filter --------------------------------------------------------------------- - Custom Scan (Citus Adaptive) (actual rows=0 loops=1) - Task Count: 1 + Custom Scan (Citus Adaptive) (actual rows=N loops=N) + Task Count: N Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Delete on distributed_table_1500001 distributed_table (actual rows=0 loops=1) - -> Index Scan using distributed_table_pkey_1500001 on distributed_table_1500001 distributed_table (actual rows=1 loops=1) - Index Cond: (key = 1) - Filter: (age = 20) -(9 rows) + Node: host=localhost port=N dbname=regression + -> Delete on distributed_table_1500001 distributed_table (actual rows=N loops=N) + -> Index Scan using distributed_table_pkey_1500001 on distributed_table_1500001 distributed_table (actual rows=N loops=N) + Index Cond: (key = N) + Filter: (age = N) +\pset footer on -- show that EXPLAIN ANALYZE deleted the row SELECT * FROM distributed_table WHERE key = 1 AND age = 20 ORDER BY 1,2,3; NOTICE: executing the command locally: SELECT key, value, age FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((key OPERATOR(pg_catalog.=) 1) AND (age OPERATOR(pg_catalog.=) 20)) ORDER BY key, value, age diff --git a/src/test/regress/expected/multi_test_helpers.out b/src/test/regress/expected/multi_test_helpers.out index 957a3d11b..00c4a61d7 100644 --- a/src/test/regress/expected/multi_test_helpers.out +++ b/src/test/regress/expected/multi_test_helpers.out @@ -732,6 +732,11 @@ declare begin for ln in execute $1 loop + -- PG18 extra line "Index Searches: N" — remove entirely + IF ln ~ '^[[:space:]]*Index[[:space:]]+Searches:[[:space:]]*[0-9]+[[:space:]]*$' THEN + CONTINUE; + END IF; + -- Replace any numeric word with just 'N' ln := regexp_replace(ln, '-?\m\d+\M', 'N', 'g'); -- In sort output, the above won't match units-suffixed numbers diff --git a/src/test/regress/sql/local_shard_execution.sql b/src/test/regress/sql/local_shard_execution.sql index 0ba2f9e38..688896f56 100644 --- a/src/test/regress/sql/local_shard_execution.sql +++ b/src/test/regress/sql/local_shard_execution.sql @@ -218,7 +218,9 @@ SET citus.enable_binary_protocol = TRUE; -- though going through distributed execution EXPLAIN (COSTS OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20; -EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20; +\pset footer off +select public.explain_filter('EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20'); +\pset footer on EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) WITH r AS ( SELECT GREATEST(random(), 2) z,* FROM distributed_table) @@ -226,7 +228,10 @@ SELECT 1 FROM r WHERE z < 3; EXPLAIN (COSTS OFF) DELETE FROM distributed_table WHERE key = 1 AND age = 20; -EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) DELETE FROM distributed_table WHERE key = 1 AND age = 20; +\pset footer off +select public.explain_filter('EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) DELETE FROM distributed_table WHERE key = 1 AND age = 20'); +\pset footer on + -- show that EXPLAIN ANALYZE deleted the row and cascades deletes SELECT * FROM distributed_table WHERE key = 1 AND age = 20 ORDER BY 1,2,3; SELECT * FROM second_distributed_table WHERE key = 1 ORDER BY 1,2; diff --git a/src/test/regress/sql/local_shard_execution_replicated.sql b/src/test/regress/sql/local_shard_execution_replicated.sql index 0740d58da..45ed426ce 100644 --- a/src/test/regress/sql/local_shard_execution_replicated.sql +++ b/src/test/regress/sql/local_shard_execution_replicated.sql @@ -183,7 +183,9 @@ SET citus.enable_binary_protocol = TRUE; -- though going through distributed execution EXPLAIN (COSTS OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20; -EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20; +\pset footer off +select public.explain_filter('EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20'); +\pset footer on EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) WITH r AS ( SELECT GREATEST(random(), 2) z,* FROM distributed_table) @@ -191,7 +193,9 @@ SELECT 1 FROM r WHERE z < 3; EXPLAIN (COSTS OFF) DELETE FROM distributed_table WHERE key = 1 AND age = 20; -EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) DELETE FROM distributed_table WHERE key = 1 AND age = 20; +\pset footer off +select public.explain_filter('EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) DELETE FROM distributed_table WHERE key = 1 AND age = 20'); +\pset footer on -- show that EXPLAIN ANALYZE deleted the row SELECT * FROM distributed_table WHERE key = 1 AND age = 20 ORDER BY 1,2,3; SELECT * FROM second_distributed_table WHERE key = 1 ORDER BY 1,2; diff --git a/src/test/regress/sql/multi_test_helpers.sql b/src/test/regress/sql/multi_test_helpers.sql index 10242692c..e605e7e90 100644 --- a/src/test/regress/sql/multi_test_helpers.sql +++ b/src/test/regress/sql/multi_test_helpers.sql @@ -763,6 +763,11 @@ declare begin for ln in execute $1 loop + -- PG18 extra line "Index Searches: N" — remove entirely + IF ln ~ '^[[:space:]]*Index[[:space:]]+Searches:[[:space:]]*[0-9]+[[:space:]]*$' THEN + CONTINUE; + END IF; + -- Replace any numeric word with just 'N' ln := regexp_replace(ln, '-?\m\d+\M', 'N', 'g'); -- In sort output, the above won't match units-suffixed numbers From 4244bc85169119b6a64078869c29b1b040eab2da Mon Sep 17 00:00:00 2001 From: Mehmet YILMAZ Date: Mon, 10 Nov 2025 11:01:47 +0300 Subject: [PATCH 2/4] PG18: Normalize verbose CREATE SUBSCRIPTION connect errors (#8326) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit fixes #8317 https://github.com/postgres/postgres/commit/0d8bd0a72ea284ffb1d1154efbe799241cc5edc6 PG18 changed the wording of connection failures during `CREATE SUBSCRIPTION` to include a subscription prefix and a verbose “connection to server … failed:” preamble. This breaks one regression output (`multi_move_mx`). This PR adds normalization rules to map PG18 output back to the prior form so results are stable across PG15–PG18. **What changes** Add two rules in `src/test/regress/bin/normalize.sed`: ```sed # PG18: drop 'subscription ""' prefix # remove when PG18 is the minimum supported version s/^[[:space:]]*ERROR:[[:space:]]+subscription "[^"]+" could not connect to the publisher:[[:space:]]*/ERROR: could not connect to the publisher: /I # PG18: drop verbose 'connection to server … failed:' preamble s/^[[:space:]]*ERROR:[[:space:]]+could not connect to the publisher:[[:space:]]*connection to server .* failed:[[:space:]]*/ERROR: could not connect to the publisher: /I ``` **Before (PG18)** ``` ERROR: subscription "subs_01" could not connect to the publisher: connection to server at "localhost" (::1), port 57637 failed: root certificate file "/non/existing/certificate.crt" does not exist ``` **After normalization** ``` ERROR: could not connect to the publisher: root certificate file "/non/existing/certificate.crt" does not exist ``` **Why** Maintain identical regression outputs across supported PG versions while Citus still supports PG<18. --- src/test/regress/bin/normalize.sed | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/test/regress/bin/normalize.sed b/src/test/regress/bin/normalize.sed index f209a2dc8..0fc47e2fb 100644 --- a/src/test/regress/bin/normalize.sed +++ b/src/test/regress/bin/normalize.sed @@ -379,3 +379,9 @@ s/\/is still referenced from table/g # EXPLAIN (PG18+): hide Materialize storage instrumentation # this rule can be removed when PG18 is the minimum supported version /^[ \t]*Storage:[ \t].*$/d + +# PG18: drop 'subscription ""' prefix +# this rule can be removed when PG18 is the minimum supported version +s/^[[:space:]]*ERROR:[[:space:]]+subscription "[^"]+" could not connect to the publisher:[[:space:]]*/ERROR: could not connect to the publisher: /I +# PG18: drop verbose 'connection to server … failed:' preamble +s/^[[:space:]]*ERROR:[[:space:]]+could not connect to the publisher:[[:space:]]*connection to server .* failed:[[:space:]]*/ERROR: could not connect to the publisher: /I From f80fa1c83beae9bd2671779d802a96d0091f9b51 Mon Sep 17 00:00:00 2001 From: Mehmet YILMAZ Date: Thu, 13 Nov 2025 09:32:21 +0300 Subject: [PATCH 3/4] PG18 - Adjust columnar path tests for PG18 OR clause optimization (#8337) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit fixes #8264 PostgreSQL 18 introduced a planner improvement (commit `ae4569161`) that rewrites simple `OR` equality clauses into `= ANY(...)` forms, allowing the use of a single index scan instead of multiple scans or a custom scan. This change affects the columnar path tests where queries like `a=0 OR a=5` previously chose a Columnar or Seq Scan plan. In this PR: * Updated test expectations for `uses_custom_scan` and `uses_seq_scan` to reflect the new index scan plan. This keeps the test output consistent with PostgreSQL 18’s updated planner behavior. --- src/test/regress/expected/columnar_paths.out | 16 +++++++++++----- src/test/regress/expected/columnar_paths_0.out | 16 +++++++++++----- src/test/regress/sql/columnar_paths.sql | 17 ++++++++++++----- 3 files changed, 34 insertions(+), 15 deletions(-) diff --git a/src/test/regress/expected/columnar_paths.out b/src/test/regress/expected/columnar_paths.out index 1fdef4dd5..75fbc0fcd 100644 --- a/src/test/regress/expected/columnar_paths.out +++ b/src/test/regress/expected/columnar_paths.out @@ -204,18 +204,24 @@ $$ t (1 row) -SELECT columnar_test_helpers.uses_custom_scan ( -$$ -SELECT a FROM full_correlated WHERE a=0 OR a=5; -$$ -); +BEGIN; + SET LOCAL enable_indexscan TO 'OFF'; + SET LOCAL enable_bitmapscan TO 'OFF'; + SELECT columnar_test_helpers.uses_custom_scan ( + $$ + SELECT a FROM full_correlated WHERE a=0 OR a=5; + $$ + ); uses_custom_scan --------------------------------------------------------------------- t (1 row) +ROLLBACK; BEGIN; SET LOCAL columnar.enable_custom_scan TO 'OFF'; + SET LOCAL enable_indexscan TO 'OFF'; + SET LOCAL enable_bitmapscan TO 'OFF'; SELECT columnar_test_helpers.uses_seq_scan ( $$ SELECT a FROM full_correlated WHERE a=0 OR a=5; diff --git a/src/test/regress/expected/columnar_paths_0.out b/src/test/regress/expected/columnar_paths_0.out index 4fd8c4535..172a56a49 100644 --- a/src/test/regress/expected/columnar_paths_0.out +++ b/src/test/regress/expected/columnar_paths_0.out @@ -204,18 +204,24 @@ $$ t (1 row) -SELECT columnar_test_helpers.uses_custom_scan ( -$$ -SELECT a FROM full_correlated WHERE a=0 OR a=5; -$$ -); +BEGIN; + SET LOCAL enable_indexscan TO 'OFF'; + SET LOCAL enable_bitmapscan TO 'OFF'; + SELECT columnar_test_helpers.uses_custom_scan ( + $$ + SELECT a FROM full_correlated WHERE a=0 OR a=5; + $$ + ); uses_custom_scan --------------------------------------------------------------------- t (1 row) +ROLLBACK; BEGIN; SET LOCAL columnar.enable_custom_scan TO 'OFF'; + SET LOCAL enable_indexscan TO 'OFF'; + SET LOCAL enable_bitmapscan TO 'OFF'; SELECT columnar_test_helpers.uses_seq_scan ( $$ SELECT a FROM full_correlated WHERE a=0 OR a=5; diff --git a/src/test/regress/sql/columnar_paths.sql b/src/test/regress/sql/columnar_paths.sql index c9c1c2026..b9f6bf047 100644 --- a/src/test/regress/sql/columnar_paths.sql +++ b/src/test/regress/sql/columnar_paths.sql @@ -141,14 +141,21 @@ SELECT a FROM full_correlated WHERE a>200; $$ ); -SELECT columnar_test_helpers.uses_custom_scan ( -$$ -SELECT a FROM full_correlated WHERE a=0 OR a=5; -$$ -); + +BEGIN; + SET LOCAL enable_indexscan TO 'OFF'; + SET LOCAL enable_bitmapscan TO 'OFF'; + SELECT columnar_test_helpers.uses_custom_scan ( + $$ + SELECT a FROM full_correlated WHERE a=0 OR a=5; + $$ + ); +ROLLBACK; BEGIN; SET LOCAL columnar.enable_custom_scan TO 'OFF'; + SET LOCAL enable_indexscan TO 'OFF'; + SET LOCAL enable_bitmapscan TO 'OFF'; SELECT columnar_test_helpers.uses_seq_scan ( $$ SELECT a FROM full_correlated WHERE a=0 OR a=5; From 8bba66f20768792f08d717cdd5cedb0bcfe00558 Mon Sep 17 00:00:00 2001 From: Mehmet YILMAZ Date: Thu, 13 Nov 2025 11:16:33 +0300 Subject: [PATCH 4/4] Fix EXPLAIN output in regression tests for consistency (#8332) --- .../expected/columnar_chunk_filtering.out | 3 +- .../expected/columnar_chunk_filtering_0.out | 3 +- src/test/regress/expected/columnar_paths.out | 4 +-- .../regress/expected/columnar_paths_0.out | 4 +-- src/test/regress/expected/multi_explain.out | 36 ++++++++++--------- src/test/regress/expected/multi_explain_0.out | 36 ++++++++++--------- .../regress/sql/columnar_chunk_filtering.sql | 2 ++ src/test/regress/sql/columnar_paths.sql | 2 +- src/test/regress/sql/multi_explain.sql | 6 ++-- 9 files changed, 53 insertions(+), 43 deletions(-) diff --git a/src/test/regress/expected/columnar_chunk_filtering.out b/src/test/regress/expected/columnar_chunk_filtering.out index 5f7b816a9..a4dd0e0e5 100644 --- a/src/test/regress/expected/columnar_chunk_filtering.out +++ b/src/test/regress/expected/columnar_chunk_filtering.out @@ -979,6 +979,7 @@ DETAIL: unparameterized; 1 clauses pushed down (1 row) SET hash_mem_multiplier = 1.0; +\pset footer off SELECT columnar_test_helpers.explain_with_pg16_subplan_format($Q$ EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT sum(a) FROM pushdown_test where @@ -1017,8 +1018,8 @@ CONTEXT: PL/pgSQL function columnar_test_helpers.explain_with_pg16_subplan_form -> Materialize (actual rows=100 loops=199) -> Custom Scan (ColumnarScan) on pushdown_test pushdown_test_1 (actual rows=199 loops=1) Columnar Projected Columns: a -(11 rows) +\pset footer on RESET hash_mem_multiplier; SELECT sum(a) FROM pushdown_test where ( diff --git a/src/test/regress/expected/columnar_chunk_filtering_0.out b/src/test/regress/expected/columnar_chunk_filtering_0.out index 9ce0ff008..4d6cfef2f 100644 --- a/src/test/regress/expected/columnar_chunk_filtering_0.out +++ b/src/test/regress/expected/columnar_chunk_filtering_0.out @@ -979,6 +979,7 @@ DETAIL: unparameterized; 1 clauses pushed down (1 row) SET hash_mem_multiplier = 1.0; +\pset footer off SELECT columnar_test_helpers.explain_with_pg16_subplan_format($Q$ EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT sum(a) FROM pushdown_test where @@ -1017,8 +1018,8 @@ CONTEXT: PL/pgSQL function columnar_test_helpers.explain_with_pg16_subplan_form -> Materialize (actual rows=100 loops=199) -> Custom Scan (ColumnarScan) on pushdown_test pushdown_test_1 (actual rows=199 loops=1) Columnar Projected Columns: a -(11 rows) +\pset footer on RESET hash_mem_multiplier; SELECT sum(a) FROM pushdown_test where ( diff --git a/src/test/regress/expected/columnar_paths.out b/src/test/regress/expected/columnar_paths.out index 75fbc0fcd..e6da9a94f 100644 --- a/src/test/regress/expected/columnar_paths.out +++ b/src/test/regress/expected/columnar_paths.out @@ -604,11 +604,11 @@ SELECT * FROM correlated WHERE x = 78910; (1 row) -- should choose index scan; selective but uncorrelated -EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) +EXPLAIN (analyze off, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM uncorrelated WHERE x = 78910; QUERY PLAN --------------------------------------------------------------------- - Index Scan using uncorrelated_idx on uncorrelated (actual rows=1 loops=1) + Index Scan using uncorrelated_idx on uncorrelated Index Cond: (x = 78910) (2 rows) diff --git a/src/test/regress/expected/columnar_paths_0.out b/src/test/regress/expected/columnar_paths_0.out index 172a56a49..a9af87f35 100644 --- a/src/test/regress/expected/columnar_paths_0.out +++ b/src/test/regress/expected/columnar_paths_0.out @@ -608,11 +608,11 @@ SELECT * FROM correlated WHERE x = 78910; (1 row) -- should choose index scan; selective but uncorrelated -EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) +EXPLAIN (analyze off, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM uncorrelated WHERE x = 78910; QUERY PLAN --------------------------------------------------------------------- - Index Scan using uncorrelated_idx on uncorrelated (actual rows=1 loops=1) + Index Scan using uncorrelated_idx on uncorrelated Index Cond: (x = 78910) (2 rows) diff --git a/src/test/regress/expected/multi_explain.out b/src/test/regress/expected/multi_explain.out index 6fc2e36a3..5d80f4ce4 100644 --- a/src/test/regress/expected/multi_explain.out +++ b/src/test/regress/expected/multi_explain.out @@ -519,20 +519,22 @@ Custom Scan (Citus Adaptive) Filter: (l_partkey = 0) -- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output) BEGIN; +select public.explain_filter(' EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) UPDATE lineitem SET l_suppkey = 12 - WHERE l_orderkey = 1 AND l_partkey = 0; -Custom Scan (Citus Adaptive) (actual rows=0 loops=1) - Task Count: 1 + WHERE l_orderkey = 1 AND l_partkey = 0 + '); +Custom Scan (Citus Adaptive) (actual rows=N loops=N) + Task Count: N Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Update on lineitem_360000 lineitem (actual rows=0 loops=1) - -> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (actual rows=0 loops=1) - Index Cond: (l_orderkey = 1) - Filter: (l_partkey = 0) - Rows Removed by Filter: 6 + Node: host=localhost port=N dbname=regression + -> Update on lineitem_360000 lineitem (actual rows=N loops=N) + -> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (actual rows=N loops=N) + Index Cond: (l_orderkey = N) + Filter: (l_partkey = N) + Rows Removed by Filter: N ROLLBACk; -- Test delete EXPLAIN (COSTS FALSE) @@ -1387,16 +1389,16 @@ Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) Node: host=localhost port=xxxxx dbname=regression -> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (cost=0.28..13.60 rows=4 width=5) Index Cond: (l_orderkey = 5) -EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) EXECUTE router_executor_query_param(5); -Custom Scan (Citus Adaptive) (actual rows=3 loops=1) - Task Count: 1 - Tuple data received from nodes: 30 bytes +select public.explain_filter('EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) EXECUTE router_executor_query_param(5)'); +Custom Scan (Citus Adaptive) (actual rows=N loops=N) + Task Count: N + Tuple data received from nodes: N bytes Tasks Shown: All -> Task - Tuple data received from node: 30 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (actual rows=3 loops=1) - Index Cond: (l_orderkey = 5) + Tuple data received from node: N bytes + Node: host=localhost port=N dbname=regression + -> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (actual rows=N loops=N) + Index Cond: (l_orderkey = N) \set VERBOSITY TERSE PREPARE multi_shard_query_param(int) AS UPDATE lineitem SET l_quantity = $1; BEGIN; diff --git a/src/test/regress/expected/multi_explain_0.out b/src/test/regress/expected/multi_explain_0.out index 5589f3d69..b7cdc0e11 100644 --- a/src/test/regress/expected/multi_explain_0.out +++ b/src/test/regress/expected/multi_explain_0.out @@ -519,20 +519,22 @@ Custom Scan (Citus Adaptive) Filter: (l_partkey = 0) -- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output) BEGIN; +select public.explain_filter(' EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) UPDATE lineitem SET l_suppkey = 12 - WHERE l_orderkey = 1 AND l_partkey = 0; -Custom Scan (Citus Adaptive) (actual rows=0 loops=1) - Task Count: 1 + WHERE l_orderkey = 1 AND l_partkey = 0 + '); +Custom Scan (Citus Adaptive) (actual rows=N loops=N) + Task Count: N Tasks Shown: All -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Update on lineitem_360000 lineitem (actual rows=0 loops=1) - -> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (actual rows=0 loops=1) - Index Cond: (l_orderkey = 1) - Filter: (l_partkey = 0) - Rows Removed by Filter: 6 + Node: host=localhost port=N dbname=regression + -> Update on lineitem_360000 lineitem (actual rows=N loops=N) + -> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (actual rows=N loops=N) + Index Cond: (l_orderkey = N) + Filter: (l_partkey = N) + Rows Removed by Filter: N ROLLBACk; -- Test delete EXPLAIN (COSTS FALSE) @@ -1387,16 +1389,16 @@ Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) Node: host=localhost port=xxxxx dbname=regression -> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (cost=0.28..13.60 rows=4 width=5) Index Cond: (l_orderkey = 5) -EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) EXECUTE router_executor_query_param(5); -Custom Scan (Citus Adaptive) (actual rows=3 loops=1) - Task Count: 1 - Tuple data received from nodes: 30 bytes +select public.explain_filter('EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) EXECUTE router_executor_query_param(5)'); +Custom Scan (Citus Adaptive) (actual rows=N loops=N) + Task Count: N + Tuple data received from nodes: N bytes Tasks Shown: All -> Task - Tuple data received from node: 30 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (actual rows=3 loops=1) - Index Cond: (l_orderkey = 5) + Tuple data received from node: N bytes + Node: host=localhost port=N dbname=regression + -> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (actual rows=N loops=N) + Index Cond: (l_orderkey = N) \set VERBOSITY TERSE PREPARE multi_shard_query_param(int) AS UPDATE lineitem SET l_quantity = $1; BEGIN; diff --git a/src/test/regress/sql/columnar_chunk_filtering.sql b/src/test/regress/sql/columnar_chunk_filtering.sql index 7265f889b..5f2cf9bb9 100644 --- a/src/test/regress/sql/columnar_chunk_filtering.sql +++ b/src/test/regress/sql/columnar_chunk_filtering.sql @@ -419,6 +419,7 @@ SELECT sum(a) FROM pushdown_test where (a > random() and a <= 2000) or (a > 2000 SELECT sum(a) FROM pushdown_test where (a > random() and a <= 2000) or (a > 200000-1010); SET hash_mem_multiplier = 1.0; +\pset footer off SELECT columnar_test_helpers.explain_with_pg16_subplan_format($Q$ EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT sum(a) FROM pushdown_test where @@ -433,6 +434,7 @@ SELECT sum(a) FROM pushdown_test where or (a > 200000-2010); $Q$) as "QUERY PLAN"; +\pset footer on RESET hash_mem_multiplier; SELECT sum(a) FROM pushdown_test where ( diff --git a/src/test/regress/sql/columnar_paths.sql b/src/test/regress/sql/columnar_paths.sql index b9f6bf047..a08ae8662 100644 --- a/src/test/regress/sql/columnar_paths.sql +++ b/src/test/regress/sql/columnar_paths.sql @@ -373,7 +373,7 @@ SELECT * FROM correlated WHERE x = 78910; SELECT * FROM correlated WHERE x = 78910; -- should choose index scan; selective but uncorrelated -EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) +EXPLAIN (analyze off, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM uncorrelated WHERE x = 78910; SELECT * FROM uncorrelated WHERE x = 78910; diff --git a/src/test/regress/sql/multi_explain.sql b/src/test/regress/sql/multi_explain.sql index 365fda970..437c54218 100644 --- a/src/test/regress/sql/multi_explain.sql +++ b/src/test/regress/sql/multi_explain.sql @@ -184,10 +184,12 @@ EXPLAIN (COSTS FALSE) -- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output) BEGIN; +select public.explain_filter(' EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) UPDATE lineitem SET l_suppkey = 12 - WHERE l_orderkey = 1 AND l_partkey = 0; + WHERE l_orderkey = 1 AND l_partkey = 0 + '); ROLLBACk; -- Test delete @@ -597,7 +599,7 @@ EXPLAIN (COSTS FALSE) EXECUTE real_time_executor_query; -- at least make sure to fail without crashing PREPARE router_executor_query_param(int) AS SELECT l_quantity FROM lineitem WHERE l_orderkey = $1; EXPLAIN EXECUTE router_executor_query_param(5); -EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) EXECUTE router_executor_query_param(5); +select public.explain_filter('EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) EXECUTE router_executor_query_param(5)'); \set VERBOSITY TERSE PREPARE multi_shard_query_param(int) AS UPDATE lineitem SET l_quantity = $1;