From f1f0b09f739c2b13802942aaebe0cd462f9741f4 Mon Sep 17 00:00:00 2001 From: Mehmet YILMAZ Date: Thu, 21 Aug 2025 13:48:50 +0300 Subject: [PATCH] PG18 - Add BUFFERS OFF to EXPLAIN ANALYZE calls (#8101) Relevant PG18 commit: https://github.com/postgres/postgres/commit/c2a4078ebad71999dd451ae7d4358be3c9290b07 - Enable buffer-usage reporting by default in `EXPLAIN ANALYZE` on PostgreSQL 18 and above. Solution: - Introduce the explicit `BUFFERS OFF` option in every existing regression test to maintain pre-PG18 output consistency. - This appends, `BUFFERS OFF` to all `EXPLAIN ANALYZE(...)` calls in src/test/regress/sql and the corresponding .out files. fixes #8093 --- src/test/regress/expected/binary_protocol.out | 4 +- .../expected/columnar_chunk_filtering.out | 52 +++++++++---------- .../expected/columnar_chunk_filtering_0.out | 52 +++++++++---------- src/test/regress/expected/columnar_cursor.out | 4 +- src/test/regress/expected/columnar_paths.out | 4 +- .../regress/expected/columnar_paths_0.out | 4 +- .../expected/columnar_transactions.out | 12 ++--- .../expected/coordinator_shouldhaveshards.out | 2 +- .../expected/local_shard_execution.out | 2 +- .../local_shard_execution_replicated.out | 2 +- src/test/regress/expected/merge.out | 8 +-- .../regress/expected/multi_data_types.out | 4 +- src/test/regress/expected/multi_explain.out | 38 +++++++------- src/test/regress/expected/multi_explain_0.out | 38 +++++++------- .../regress/expected/multi_insert_select.out | 2 +- src/test/regress/expected/pg13.out | 8 +-- src/test/regress/expected/pgmerge.out | 2 +- .../expected/query_single_shard_table.out | 4 +- src/test/regress/expected/single_node.out | 2 +- src/test/regress/expected/single_node_0.out | 2 +- .../expected/upgrade_columnar_after.out | 2 +- .../expected/upgrade_columnar_before.out | 2 +- src/test/regress/sql/binary_protocol.sql | 4 +- .../regress/sql/columnar_chunk_filtering.sql | 52 +++++++++---------- src/test/regress/sql/columnar_cursor.sql | 4 +- src/test/regress/sql/columnar_paths.sql | 4 +- .../regress/sql/columnar_transactions.sql | 12 ++--- .../sql/coordinator_shouldhaveshards.sql | 2 +- .../regress/sql/local_shard_execution.sql | 2 +- .../sql/local_shard_execution_replicated.sql | 2 +- src/test/regress/sql/merge.sql | 8 +-- src/test/regress/sql/multi_data_types.sql | 4 +- src/test/regress/sql/multi_explain.sql | 38 +++++++------- src/test/regress/sql/multi_insert_select.sql | 2 +- src/test/regress/sql/pg13.sql | 8 +-- src/test/regress/sql/pgmerge.sql | 2 +- .../regress/sql/query_single_shard_table.sql | 4 +- src/test/regress/sql/single_node.sql | 2 +- .../regress/sql/upgrade_columnar_after.sql | 2 +- .../regress/sql/upgrade_columnar_before.sql | 2 +- 40 files changed, 202 insertions(+), 202 deletions(-) diff --git a/src/test/regress/expected/binary_protocol.out b/src/test/regress/expected/binary_protocol.out index 4b9a0eb18..abc9809db 100644 --- a/src/test/regress/expected/binary_protocol.out +++ b/src/test/regress/expected/binary_protocol.out @@ -49,7 +49,7 @@ SELECT id, id, id, id, id, 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 (10 rows) -EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE) SELECT id FROM t ORDER BY 1; +EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE, BUFFERS OFF) SELECT id FROM t ORDER BY 1; QUERY PLAN --------------------------------------------------------------------- Sort (actual rows=10 loops=1) @@ -66,7 +66,7 @@ EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE) SELECT id FROM (11 rows) SET citus.explain_all_tasks TO ON; -EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE) SELECT id FROM t ORDER BY 1; +EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE, BUFFERS OFF) SELECT id FROM t ORDER BY 1; QUERY PLAN --------------------------------------------------------------------- Sort (actual rows=10 loops=1) diff --git a/src/test/regress/expected/columnar_chunk_filtering.out b/src/test/regress/expected/columnar_chunk_filtering.out index d3c403eeb..69ffa7b5a 100644 --- a/src/test/regress/expected/columnar_chunk_filtering.out +++ b/src/test/regress/expected/columnar_chunk_filtering.out @@ -125,7 +125,7 @@ SELECT * FROM collation_chunk_filtering_test WHERE A > 'B'; CREATE TABLE simple_chunk_filtering(i int) USING COLUMNAR; INSERT INTO simple_chunk_filtering SELECT generate_series(0,234567); -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM simple_chunk_filtering WHERE i > 123456; QUERY PLAN --------------------------------------------------------------------- @@ -138,7 +138,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off) (6 rows) SET columnar.enable_qual_pushdown = false; -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM simple_chunk_filtering WHERE i > 123456; QUERY PLAN --------------------------------------------------------------------- @@ -153,7 +153,7 @@ SET columnar.enable_qual_pushdown TO DEFAULT; TRUNCATE simple_chunk_filtering; INSERT INTO simple_chunk_filtering SELECT generate_series(0,200000); COPY (SELECT * FROM simple_chunk_filtering WHERE i > 180000) TO '/dev/null'; -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM simple_chunk_filtering WHERE i > 180000; QUERY PLAN --------------------------------------------------------------------- @@ -168,7 +168,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off) DROP TABLE simple_chunk_filtering; CREATE TABLE multi_column_chunk_filtering(a int, b int) USING columnar; INSERT INTO multi_column_chunk_filtering SELECT i,i+1 FROM generate_series(0,234567) i; -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT count(*) FROM multi_column_chunk_filtering WHERE a > 50000; QUERY PLAN --------------------------------------------------------------------- @@ -181,7 +181,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off) Columnar Chunk Groups Removed by Filter: 5 (7 rows) -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT count(*) FROM multi_column_chunk_filtering WHERE a > 50000 AND b > 50000; QUERY PLAN --------------------------------------------------------------------- @@ -197,7 +197,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off) -- make next tests faster TRUNCATE multi_column_chunk_filtering; INSERT INTO multi_column_chunk_filtering SELECT generate_series(0,5); -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT b FROM multi_column_chunk_filtering WHERE a > 50000 AND b > 50000; QUERY PLAN --------------------------------------------------------------------- @@ -208,7 +208,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off) Columnar Chunk Groups Removed by Filter: 1 (5 rows) -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT b, a FROM multi_column_chunk_filtering WHERE b > 50000; QUERY PLAN --------------------------------------------------------------------- @@ -220,7 +220,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off) Columnar Chunk Groups Removed by Filter: 0 (6 rows) -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT FROM multi_column_chunk_filtering WHERE a > 50000; QUERY PLAN --------------------------------------------------------------------- @@ -231,7 +231,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off) Columnar Chunk Groups Removed by Filter: 1 (5 rows) -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT FROM multi_column_chunk_filtering; QUERY PLAN --------------------------------------------------------------------- @@ -242,7 +242,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off) BEGIN; ALTER TABLE multi_column_chunk_filtering DROP COLUMN a; ALTER TABLE multi_column_chunk_filtering DROP COLUMN b; - EXPLAIN (analyze on, costs off, timing off, summary off) + EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM multi_column_chunk_filtering; QUERY PLAN --------------------------------------------------------------------- @@ -253,7 +253,7 @@ BEGIN; ROLLBACK; CREATE TABLE another_columnar_table(x int, y int) USING columnar; INSERT INTO another_columnar_table SELECT generate_series(0,5); -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT a, y FROM multi_column_chunk_filtering, another_columnar_table WHERE x > 1; QUERY PLAN --------------------------------------------------------------------- @@ -364,7 +364,7 @@ set enable_mergejoin=false; set enable_hashjoin=false; set enable_material=false; -- test different kinds of expressions -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM r1, coltest WHERE id1 = id AND x1 > 15000 AND x1::text > '000000' AND n1 % 10 = 0; QUERY PLAN @@ -391,7 +391,7 @@ SELECT * FROM r1, coltest WHERE (3 rows) -- test equivalence classes -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM r1, r2, r3, r4, r5, r6, r7, coltest WHERE id = id1 AND id1 = id2 AND id2 = id3 AND id3 = id4 AND id4 = id5 AND id5 = id6 AND id6 = id7; @@ -561,7 +561,7 @@ set columnar.max_custom_scan_paths to default; set columnar.planner_debug_level to default; -- test more complex parameterization set columnar.planner_debug_level = 'notice'; -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM r1, r2, r3, coltest WHERE id1 = id2 AND id2 = id3 AND id3 = id AND n1 > x1 AND n2 > x2 AND n3 > x3; @@ -613,7 +613,7 @@ SELECT * FROM r1, r2, r3, coltest WHERE (3 rows) -- test partitioning parameterization -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM r1, coltest_part WHERE id1 = id AND n1 > x1; QUERY PLAN @@ -680,7 +680,7 @@ DETAIL: unparameterized; 0 clauses pushed down --------------------------------------------------------------------- (0 rows) -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM coltest c1 WHERE ceil(x1) > 4222; NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' HINT: Var must only reference this rel, and Expr must not reference this rel @@ -832,7 +832,7 @@ BEGIN; COMMIT; SET columnar.max_custom_scan_paths TO 50; SET columnar.qual_pushdown_correlation_threshold TO 0.0; -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a = 104356 or a = 76556; NOTICE: columnar planner: adding CustomScan path for pushdown_test DETAIL: unparameterized; 1 clauses pushed down @@ -855,7 +855,7 @@ DETAIL: unparameterized; 1 clauses pushed down 180912 (1 row) -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT sum(a) FROM pushdown_test WHERE a = 194356 or a = 104356 or a = 76556; NOTICE: columnar planner: adding CustomScan path for pushdown_test DETAIL: unparameterized; 1 clauses pushed down @@ -878,7 +878,7 @@ DETAIL: unparameterized; 1 clauses pushed down 375268 (1 row) -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a > a*-1 + b; NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' HINT: Var must only reference this rel, and Expr must not reference this rel @@ -894,7 +894,7 @@ DETAIL: unparameterized; 0 clauses pushed down Columnar Projected Columns: a, b (5 rows) -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT sum(a) FROM pushdown_test where (a > 1000 and a < 10000) or (a > 20000 and a < 50000); NOTICE: columnar planner: adding CustomScan path for pushdown_test DETAIL: unparameterized; 1 clauses pushed down @@ -917,7 +917,7 @@ DETAIL: unparameterized; 1 clauses pushed down 1099459500 (1 row) -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT sum(a) FROM pushdown_test where (a > random() and a < 2*a) or (a > 100); NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' HINT: Var must only reference this rel, and Expr must not reference this rel @@ -949,7 +949,7 @@ DETAIL: unparameterized; 0 clauses pushed down 20000100000 (1 row) -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT sum(a) FROM pushdown_test where (a > random() and a <= 2000) or (a > 200000-1010); NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' HINT: Var must only reference this rel, and Expr must not reference this rel @@ -978,7 +978,7 @@ DETAIL: unparameterized; 1 clauses pushed down SET hash_mem_multiplier = 1.0; SELECT columnar_test_helpers.explain_with_pg16_subplan_format($Q$ -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT sum(a) FROM pushdown_test where ( a > random() @@ -1043,7 +1043,7 @@ DETAIL: unparameterized; 1 clauses pushed down create function stable_1(arg int) returns int language plpgsql STRICT IMMUTABLE as $$ BEGIN RETURN 1+arg; END; $$; -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT sum(a) FROM pushdown_test where (a = random() and a < stable_1(a) and a < stable_1(6000)); NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' HINT: Var must only reference this rel, and Expr must not reference this rel @@ -1096,7 +1096,7 @@ BEGIN; INSERT INTO pushdown_test VALUES(7, 'USA'); INSERT INTO pushdown_test VALUES(8, 'ZW'); END; -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT id FROM pushdown_test WHERE country IN ('USA', 'BR', 'ZW'); QUERY PLAN --------------------------------------------------------------------- @@ -1123,7 +1123,7 @@ BEGIN return 'AL'; END; $$; -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM pushdown_test WHERE country IN ('USA', 'ZW', volatileFunction()); QUERY PLAN --------------------------------------------------------------------- diff --git a/src/test/regress/expected/columnar_chunk_filtering_0.out b/src/test/regress/expected/columnar_chunk_filtering_0.out index 83fee1c24..560cab484 100644 --- a/src/test/regress/expected/columnar_chunk_filtering_0.out +++ b/src/test/regress/expected/columnar_chunk_filtering_0.out @@ -125,7 +125,7 @@ SELECT * FROM collation_chunk_filtering_test WHERE A > 'B'; CREATE TABLE simple_chunk_filtering(i int) USING COLUMNAR; INSERT INTO simple_chunk_filtering SELECT generate_series(0,234567); -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM simple_chunk_filtering WHERE i > 123456; QUERY PLAN --------------------------------------------------------------------- @@ -138,7 +138,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off) (6 rows) SET columnar.enable_qual_pushdown = false; -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM simple_chunk_filtering WHERE i > 123456; QUERY PLAN --------------------------------------------------------------------- @@ -153,7 +153,7 @@ SET columnar.enable_qual_pushdown TO DEFAULT; TRUNCATE simple_chunk_filtering; INSERT INTO simple_chunk_filtering SELECT generate_series(0,200000); COPY (SELECT * FROM simple_chunk_filtering WHERE i > 180000) TO '/dev/null'; -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM simple_chunk_filtering WHERE i > 180000; QUERY PLAN --------------------------------------------------------------------- @@ -168,7 +168,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off) DROP TABLE simple_chunk_filtering; CREATE TABLE multi_column_chunk_filtering(a int, b int) USING columnar; INSERT INTO multi_column_chunk_filtering SELECT i,i+1 FROM generate_series(0,234567) i; -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT count(*) FROM multi_column_chunk_filtering WHERE a > 50000; QUERY PLAN --------------------------------------------------------------------- @@ -181,7 +181,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off) Columnar Chunk Groups Removed by Filter: 5 (7 rows) -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT count(*) FROM multi_column_chunk_filtering WHERE a > 50000 AND b > 50000; QUERY PLAN --------------------------------------------------------------------- @@ -197,7 +197,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off) -- make next tests faster TRUNCATE multi_column_chunk_filtering; INSERT INTO multi_column_chunk_filtering SELECT generate_series(0,5); -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT b FROM multi_column_chunk_filtering WHERE a > 50000 AND b > 50000; QUERY PLAN --------------------------------------------------------------------- @@ -208,7 +208,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off) Columnar Chunk Groups Removed by Filter: 1 (5 rows) -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT b, a FROM multi_column_chunk_filtering WHERE b > 50000; QUERY PLAN --------------------------------------------------------------------- @@ -220,7 +220,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off) Columnar Chunk Groups Removed by Filter: 0 (6 rows) -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT FROM multi_column_chunk_filtering WHERE a > 50000; QUERY PLAN --------------------------------------------------------------------- @@ -231,7 +231,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off) Columnar Chunk Groups Removed by Filter: 1 (5 rows) -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT FROM multi_column_chunk_filtering; QUERY PLAN --------------------------------------------------------------------- @@ -242,7 +242,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off) BEGIN; ALTER TABLE multi_column_chunk_filtering DROP COLUMN a; ALTER TABLE multi_column_chunk_filtering DROP COLUMN b; - EXPLAIN (analyze on, costs off, timing off, summary off) + EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM multi_column_chunk_filtering; QUERY PLAN --------------------------------------------------------------------- @@ -253,7 +253,7 @@ BEGIN; ROLLBACK; CREATE TABLE another_columnar_table(x int, y int) USING columnar; INSERT INTO another_columnar_table SELECT generate_series(0,5); -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT a, y FROM multi_column_chunk_filtering, another_columnar_table WHERE x > 1; QUERY PLAN --------------------------------------------------------------------- @@ -364,7 +364,7 @@ set enable_mergejoin=false; set enable_hashjoin=false; set enable_material=false; -- test different kinds of expressions -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM r1, coltest WHERE id1 = id AND x1 > 15000 AND x1::text > '000000' AND n1 % 10 = 0; QUERY PLAN @@ -391,7 +391,7 @@ SELECT * FROM r1, coltest WHERE (3 rows) -- test equivalence classes -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM r1, r2, r3, r4, r5, r6, r7, coltest WHERE id = id1 AND id1 = id2 AND id2 = id3 AND id3 = id4 AND id4 = id5 AND id5 = id6 AND id6 = id7; @@ -561,7 +561,7 @@ set columnar.max_custom_scan_paths to default; set columnar.planner_debug_level to default; -- test more complex parameterization set columnar.planner_debug_level = 'notice'; -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM r1, r2, r3, coltest WHERE id1 = id2 AND id2 = id3 AND id3 = id AND n1 > x1 AND n2 > x2 AND n3 > x3; @@ -613,7 +613,7 @@ SELECT * FROM r1, r2, r3, coltest WHERE (3 rows) -- test partitioning parameterization -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM r1, coltest_part WHERE id1 = id AND n1 > x1; QUERY PLAN @@ -680,7 +680,7 @@ DETAIL: unparameterized; 0 clauses pushed down --------------------------------------------------------------------- (0 rows) -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM coltest c1 WHERE ceil(x1) > 4222; NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' HINT: Var must only reference this rel, and Expr must not reference this rel @@ -832,7 +832,7 @@ BEGIN; COMMIT; SET columnar.max_custom_scan_paths TO 50; SET columnar.qual_pushdown_correlation_threshold TO 0.0; -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a = 104356 or a = 76556; NOTICE: columnar planner: adding CustomScan path for pushdown_test DETAIL: unparameterized; 1 clauses pushed down @@ -855,7 +855,7 @@ DETAIL: unparameterized; 1 clauses pushed down 180912 (1 row) -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT sum(a) FROM pushdown_test WHERE a = 194356 or a = 104356 or a = 76556; NOTICE: columnar planner: adding CustomScan path for pushdown_test DETAIL: unparameterized; 1 clauses pushed down @@ -878,7 +878,7 @@ DETAIL: unparameterized; 1 clauses pushed down 375268 (1 row) -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a > a*-1 + b; NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' HINT: Var must only reference this rel, and Expr must not reference this rel @@ -894,7 +894,7 @@ DETAIL: unparameterized; 0 clauses pushed down Columnar Projected Columns: a, b (5 rows) -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT sum(a) FROM pushdown_test where (a > 1000 and a < 10000) or (a > 20000 and a < 50000); NOTICE: columnar planner: adding CustomScan path for pushdown_test DETAIL: unparameterized; 1 clauses pushed down @@ -917,7 +917,7 @@ DETAIL: unparameterized; 1 clauses pushed down 1099459500 (1 row) -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT sum(a) FROM pushdown_test where (a > random() and a < 2*a) or (a > 100); NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' HINT: Var must only reference this rel, and Expr must not reference this rel @@ -949,7 +949,7 @@ DETAIL: unparameterized; 0 clauses pushed down 20000100000 (1 row) -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT sum(a) FROM pushdown_test where (a > random() and a <= 2000) or (a > 200000-1010); NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' HINT: Var must only reference this rel, and Expr must not reference this rel @@ -978,7 +978,7 @@ DETAIL: unparameterized; 1 clauses pushed down SET hash_mem_multiplier = 1.0; SELECT columnar_test_helpers.explain_with_pg16_subplan_format($Q$ -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT sum(a) FROM pushdown_test where ( a > random() @@ -1043,7 +1043,7 @@ DETAIL: unparameterized; 1 clauses pushed down create function stable_1(arg int) returns int language plpgsql STRICT IMMUTABLE as $$ BEGIN RETURN 1+arg; END; $$; -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT sum(a) FROM pushdown_test where (a = random() and a < stable_1(a) and a < stable_1(6000)); NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' HINT: Var must only reference this rel, and Expr must not reference this rel @@ -1096,7 +1096,7 @@ BEGIN; INSERT INTO pushdown_test VALUES(7, 'USA'); INSERT INTO pushdown_test VALUES(8, 'ZW'); END; -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT id FROM pushdown_test WHERE country IN ('USA', 'BR', 'ZW'); QUERY PLAN --------------------------------------------------------------------- @@ -1123,7 +1123,7 @@ BEGIN return 'AL'; END; $$; -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM pushdown_test WHERE country IN ('USA', 'ZW', volatileFunction()); QUERY PLAN --------------------------------------------------------------------- diff --git a/src/test/regress/expected/columnar_cursor.out b/src/test/regress/expected/columnar_cursor.out index 4e821cdea..e3d6d5151 100644 --- a/src/test/regress/expected/columnar_cursor.out +++ b/src/test/regress/expected/columnar_cursor.out @@ -4,7 +4,7 @@ CREATE TABLE test_cursor (a int, b int) USING columnar; INSERT INTO test_cursor SELECT i, j FROM generate_series(0, 100)i, generate_series(100, 200)j; -- A case where the WHERE clause might filter out some chunks -EXPLAIN (analyze on, costs off, timing off, summary off) SELECT * FROM test_cursor WHERE a = 25; +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM test_cursor WHERE a = 25; QUERY PLAN --------------------------------------------------------------------- Custom Scan (ColumnarScan) on test_cursor (actual rows=101 loops=1) @@ -107,7 +107,7 @@ UPDATE test_cursor SET a = 8000 WHERE CURRENT OF a_25; ERROR: UPDATE and CTID scans not supported for ColumnarScan COMMIT; -- A case where the WHERE clause doesn't filter out any chunks -EXPLAIN (analyze on, costs off, timing off, summary off) SELECT * FROM test_cursor WHERE a > 25; +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM test_cursor WHERE a > 25; QUERY PLAN --------------------------------------------------------------------- Custom Scan (ColumnarScan) on test_cursor (actual rows=7575 loops=1) diff --git a/src/test/regress/expected/columnar_paths.out b/src/test/regress/expected/columnar_paths.out index 1c4bfc608..1fdef4dd5 100644 --- a/src/test/regress/expected/columnar_paths.out +++ b/src/test/regress/expected/columnar_paths.out @@ -579,7 +579,7 @@ CREATE INDEX correlated_idx ON correlated(x); CREATE INDEX uncorrelated_idx ON uncorrelated(x); ANALYZE correlated, uncorrelated; -- should choose chunk group filtering; selective and correlated -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM correlated WHERE x = 78910; QUERY PLAN --------------------------------------------------------------------- @@ -598,7 +598,7 @@ SELECT * FROM correlated WHERE x = 78910; (1 row) -- should choose index scan; selective but uncorrelated -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM uncorrelated WHERE x = 78910; QUERY PLAN --------------------------------------------------------------------- diff --git a/src/test/regress/expected/columnar_paths_0.out b/src/test/regress/expected/columnar_paths_0.out index 2b7349e42..4fd8c4535 100644 --- a/src/test/regress/expected/columnar_paths_0.out +++ b/src/test/regress/expected/columnar_paths_0.out @@ -583,7 +583,7 @@ CREATE INDEX correlated_idx ON correlated(x); CREATE INDEX uncorrelated_idx ON uncorrelated(x); ANALYZE correlated, uncorrelated; -- should choose chunk group filtering; selective and correlated -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM correlated WHERE x = 78910; QUERY PLAN --------------------------------------------------------------------- @@ -602,7 +602,7 @@ SELECT * FROM correlated WHERE x = 78910; (1 row) -- should choose index scan; selective but uncorrelated -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM uncorrelated WHERE x = 78910; QUERY PLAN --------------------------------------------------------------------- diff --git a/src/test/regress/expected/columnar_transactions.out b/src/test/regress/expected/columnar_transactions.out index afe277151..cee45c7d7 100644 --- a/src/test/regress/expected/columnar_transactions.out +++ b/src/test/regress/expected/columnar_transactions.out @@ -219,7 +219,7 @@ EXPLAIN (COSTS OFF) EXECUTE p0; (2 rows) EXECUTE p0; -EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off) EXECUTE p0; +EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF) EXECUTE p0; QUERY PLAN --------------------------------------------------------------------- Insert on t (actual rows=0 loops=1) @@ -252,7 +252,7 @@ EXPLAIN (COSTS OFF) EXECUTE p1(16); (2 rows) EXECUTE p1(16); -EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off) EXECUTE p1(20); +EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF) EXECUTE p1(20); QUERY PLAN --------------------------------------------------------------------- Insert on t (actual rows=0 loops=1) @@ -289,7 +289,7 @@ EXPLAIN (COSTS OFF) EXECUTE p2(30, 40); (2 rows) EXECUTE p2(30, 40); -EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off) EXECUTE p2(50, 60); +EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF) EXECUTE p2(50, 60); QUERY PLAN --------------------------------------------------------------------- Insert on t (actual rows=0 loops=1) @@ -342,7 +342,7 @@ EXECUTE p3; 8 | 8 (2 rows) -EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off) EXECUTE p3; +EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF) EXECUTE p3; QUERY PLAN --------------------------------------------------------------------- Custom Scan (ColumnarScan) on t (actual rows=2 loops=1) @@ -397,7 +397,7 @@ EXECUTE p5(16); --------------------------------------------------------------------- (0 rows) -EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off) EXECUTE p5(9); +EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF) EXECUTE p5(9); QUERY PLAN --------------------------------------------------------------------- Custom Scan (ColumnarScan) on t (actual rows=2 loops=1) @@ -453,7 +453,7 @@ EXECUTE p6(30, 40); 31 | 41 (1 row) -EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off) EXECUTE p6(50, 60); +EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF) EXECUTE p6(50, 60); QUERY PLAN --------------------------------------------------------------------- Custom Scan (ColumnarScan) on t (actual rows=1 loops=1) diff --git a/src/test/regress/expected/coordinator_shouldhaveshards.out b/src/test/regress/expected/coordinator_shouldhaveshards.out index 6f24614ba..9ae64fe67 100644 --- a/src/test/regress/expected/coordinator_shouldhaveshards.out +++ b/src/test/regress/expected/coordinator_shouldhaveshards.out @@ -140,7 +140,7 @@ ROLLBACK; -- INSERT..SELECT with re-partitioning in EXPLAIN ANALYZE after local execution BEGIN; INSERT INTO test VALUES (0,1000); -EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) INSERT INTO test (x, y) SELECT y, x FROM test; +EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) INSERT INTO test (x, y) SELECT y, x FROM test; ERROR: EXPLAIN ANALYZE is currently not supported for INSERT ... SELECT commands with repartitioning ROLLBACK; -- DDL connects to locahost diff --git a/src/test/regress/expected/local_shard_execution.out b/src/test/regress/expected/local_shard_execution.out index ddfc18baf..0f0c7a14e 100644 --- a/src/test/regress/expected/local_shard_execution.out +++ b/src/test/regress/expected/local_shard_execution.out @@ -327,7 +327,7 @@ EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) SELECT * FROM distribute Filter: (age = 20) (10 rows) -EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF) +EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) WITH r AS ( SELECT GREATEST(random(), 2) z,* FROM distributed_table) SELECT 1 FROM r WHERE z < 3; QUERY PLAN diff --git a/src/test/regress/expected/local_shard_execution_replicated.out b/src/test/regress/expected/local_shard_execution_replicated.out index b086d7a84..bf49cc2ee 100644 --- a/src/test/regress/expected/local_shard_execution_replicated.out +++ b/src/test/regress/expected/local_shard_execution_replicated.out @@ -265,7 +265,7 @@ EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) SELECT * FROM distribute Filter: (age = 20) (10 rows) -EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF) +EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) WITH r AS ( SELECT GREATEST(random(), 2) z,* FROM distributed_table) SELECT 1 FROM r WHERE z < 3; QUERY PLAN diff --git a/src/test/regress/expected/merge.out b/src/test/regress/expected/merge.out index 8bee2e524..e95e20474 100644 --- a/src/test/regress/expected/merge.out +++ b/src/test/regress/expected/merge.out @@ -2019,7 +2019,7 @@ DEBUG: DEBUG: Execute MERGE task list -- let's verify if data inserted to second shard of target. -EXPLAIN (analyze on, costs off, timing off, summary off) SELECT * FROM target_table; +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM target_table; QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Adaptive) (actual rows=1 loops=1) @@ -2535,7 +2535,7 @@ HINT: To remove the local data, run: SELECT truncate_local_data_after_distribut -- single shard query given source_json is filtered and Postgres is smart to pushdown -- filter to the target_json as well SELECT public.coordinator_plan($Q$ -EXPLAIN (ANALYZE ON, TIMING OFF) MERGE INTO target_json sda +EXPLAIN (ANALYZE ON, TIMING OFF, BUFFERS OFF) MERGE INTO target_json sda USING (SELECT * FROM source_json WHERE id = 1) sdn ON sda.id = sdn.id WHEN NOT matched THEN @@ -2564,7 +2564,7 @@ SELECT * FROM target_json ORDER BY 1; --SELECT * FROM target_json ORDER BY 1; -- join for source_json is happening at a different place SELECT public.coordinator_plan($Q$ -EXPLAIN (ANALYZE ON, TIMING OFF) MERGE INTO target_json sda +EXPLAIN (ANALYZE ON, TIMING OFF, BUFFERS OFF) MERGE INTO target_json sda USING source_json s1 LEFT JOIN (SELECT * FROM source_json) s2 USING(z) ON sda.id = s1.id AND s1.id = s2.id WHEN NOT matched THEN @@ -2589,7 +2589,7 @@ SELECT * FROM target_json ORDER BY 1; -- update JSON column SELECT public.coordinator_plan($Q$ -EXPLAIN (ANALYZE ON, TIMING OFF) MERGE INTO target_json sda +EXPLAIN (ANALYZE ON, TIMING OFF, BUFFERS OFF) MERGE INTO target_json sda USING source_json sdn ON sda.id = sdn.id WHEN matched THEN diff --git a/src/test/regress/expected/multi_data_types.out b/src/test/regress/expected/multi_data_types.out index a88f9e1de..7b6a367ee 100644 --- a/src/test/regress/expected/multi_data_types.out +++ b/src/test/regress/expected/multi_data_types.out @@ -170,7 +170,7 @@ SELECT * FROM composite_type_partitioned_table WHERE id = 123; 123 | (123,456) (1 row) -EXPLAIN (ANALYZE TRUE, COSTS FALSE, VERBOSE FALSE, TIMING FALSE, SUMMARY FALSE) +EXPLAIN (ANALYZE TRUE, COSTS FALSE, VERBOSE FALSE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) INSERT INTO composite_type_partitioned_table VALUES (123, '(123, 456)'::other_composite_type); QUERY PLAN --------------------------------------------------------------------- @@ -212,7 +212,7 @@ $cf$); (1 row) INSERT INTO composite_type_partitioned_table VALUES (456, '(456, 678)'::other_composite_type); -EXPLAIN (ANALYZE TRUE, COSTS FALSE, VERBOSE FALSE, TIMING FALSE, SUMMARY FALSE) +EXPLAIN (ANALYZE TRUE, COSTS FALSE, VERBOSE FALSE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) INSERT INTO composite_type_partitioned_table VALUES (123, '(456, 678)'::other_composite_type); QUERY PLAN --------------------------------------------------------------------- diff --git a/src/test/regress/expected/multi_explain.out b/src/test/regress/expected/multi_explain.out index 49027b217..e67b2d6c6 100644 --- a/src/test/regress/expected/multi_explain.out +++ b/src/test/regress/expected/multi_explain.out @@ -338,7 +338,7 @@ Sort -> Seq Scan on lineitem_360000 lineitem -- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output) SELECT public.plan_normalize_memory($Q$ -EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) +EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; $Q$); @@ -365,7 +365,7 @@ SELECT create_distributed_table('t1', 'a'), create_distributed_table('t2', 'a'); | BEGIN; SET LOCAL citus.enable_repartition_joins TO true; -EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off) SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b; +EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF) SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b; Aggregate (actual rows=1 loops=1) -> Custom Scan (Citus Adaptive) (actual rows=6 loops=1) Task Count: 6 @@ -378,7 +378,7 @@ Aggregate (actual rows=1 loops=1) Map Task Count: 3 Merge Task Count: 6 -- Confirm repartiton join in distributed subplan works -EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off) +EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF) WITH repartition AS (SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b) SELECT count(*) from repartition; Custom Scan (Citus Adaptive) (actual rows=1 loops=1) @@ -408,7 +408,7 @@ END; DROP TABLE t1, t2; -- Test query text output, with ANALYZE ON SELECT public.plan_normalize_memory($Q$ -EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, VERBOSE TRUE) +EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, VERBOSE TRUE, BUFFERS OFF) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; $Q$); @@ -519,7 +519,7 @@ Custom Scan (Citus Adaptive) Filter: (l_partkey = 0) -- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output) BEGIN; -EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) +EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) UPDATE lineitem SET l_suppkey = 12 WHERE l_orderkey = 1 AND l_partkey = 0; @@ -1074,7 +1074,7 @@ Custom Scan (Citus Adaptive) -> Seq Scan on lineitem_hash_part_360044 lineitem_hash_part -- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output) SELECT public.plan_normalize_memory($Q$ -EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) +EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; $Q$); @@ -1387,7 +1387,7 @@ Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) Node: host=localhost port=xxxxx dbname=regression -> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (cost=0.28..13.60 rows=4 width=5) Index Cond: (l_orderkey = 5) -EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF) EXECUTE router_executor_query_param(5); +EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) EXECUTE router_executor_query_param(5); Custom Scan (Citus Adaptive) (actual rows=3 loops=1) Task Count: 1 Tuple data received from nodes: 30 bytes @@ -1410,7 +1410,7 @@ Custom Scan (Citus Adaptive) -> Seq Scan on lineitem_360000 lineitem ROLLBACK; BEGIN; -EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF) EXECUTE multi_shard_query_param(5); +EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) EXECUTE multi_shard_query_param(5); Custom Scan (Citus Adaptive) (actual rows=0 loops=1) Task Count: 2 Tasks Shown: One of 2 @@ -1993,7 +1993,7 @@ SET citus.shard_count TO 4; SET client_min_messages TO WARNING; SELECT create_distributed_table('explain_analyze_test', 'a'); -\set default_analyze_flags '(ANALYZE on, COSTS off, TIMING off, SUMMARY off)' +\set default_analyze_flags '(ANALYZE on, COSTS off, TIMING off, SUMMARY off, BUFFERS off)' \set default_explain_flags '(ANALYZE off, COSTS off, TIMING off, SUMMARY off)' -- router SELECT EXPLAIN :default_analyze_flags SELECT * FROM explain_analyze_test WHERE a = 1; @@ -2168,7 +2168,7 @@ SELECT * FROM explain_pk ORDER BY 1; ROLLBACK; -- test EXPLAIN ANALYZE with non-text output formats BEGIN; -EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) INSERT INTO explain_pk VALUES (1, 2), (2, 3); +EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON, BUFFERS OFF) INSERT INTO explain_pk VALUES (1, 2), (2, 3); [ { "Plan": { @@ -2224,7 +2224,7 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) INSERT INT } ] ROLLBACK; -EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) SELECT * FROM explain_pk; +EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON, BUFFERS OFF) SELECT * FROM explain_pk; [ { "Plan": { @@ -2271,7 +2271,7 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) SELECT * F } ] BEGIN; -EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML) INSERT INTO explain_pk VALUES (1, 2), (2, 3); +EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML, BUFFERS OFF) INSERT INTO explain_pk VALUES (1, 2), (2, 3); @@ -2326,7 +2326,7 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML) INSERT INTO ROLLBACK; -EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML) SELECT * FROM explain_pk; +EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML, BUFFERS OFF) SELECT * FROM explain_pk; @@ -2438,7 +2438,7 @@ Aggregate (actual rows=1 loops=1) Sort Method: quicksort Memory: 25kB -> Seq Scan on ref_table_570021 ref_table (actual rows=10 loops=1) SELECT public.explain_with_pg17_initplan_format($Q$ -EXPLAIN (ANALYZE on, COSTS off, TIMING off, SUMMARY off) +EXPLAIN (ANALYZE on, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF) SELECT count(distinct a) FROM dist_table WHERE EXISTS(SELECT random() < 2 FROM dist_table NATURAL JOIN ref_table); $Q$); @@ -3076,7 +3076,7 @@ SELECT create_distributed_table('explain_analyze_execution_time', 'a'); -- sleep for the shard that has the single row, so that -- will definitely be slower set citus.explain_analyze_sort_method to "taskId"; -EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) select a, CASE WHEN pg_sleep(0.4) IS NULL THEN 'x' END from explain_analyze_execution_time; +EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) select a, CASE WHEN pg_sleep(0.4) IS NULL THEN 'x' END from explain_analyze_execution_time; Custom Scan (Citus Adaptive) (actual rows=1 loops=1) Task Count: 2 Tuple data received from nodes: 4 bytes @@ -3086,7 +3086,7 @@ Custom Scan (Citus Adaptive) (actual rows=1 loops=1) Node: host=localhost port=xxxxx dbname=regression -> Seq Scan on explain_analyze_execution_time_570029 explain_analyze_execution_time (actual rows=0 loops=1) set citus.explain_analyze_sort_method to "execution-time"; -EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) select a, CASE WHEN pg_sleep(0.4) IS NULL THEN 'x' END from explain_analyze_execution_time; +EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) select a, CASE WHEN pg_sleep(0.4) IS NULL THEN 'x' END from explain_analyze_execution_time; Custom Scan (Citus Adaptive) (actual rows=1 loops=1) Task Count: 2 Tuple data received from nodes: 4 bytes @@ -3233,7 +3233,7 @@ SET search_path TO multi_explain; CREATE TABLE test_subplans (x int primary key, y int); SELECT create_distributed_table('test_subplans','x'); -EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off) +EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF) WITH a AS (INSERT INTO test_subplans VALUES (1,2) RETURNING *) SELECT * FROM a; Custom Scan (Citus Adaptive) (actual rows=1 loops=1) @@ -3260,7 +3260,7 @@ Custom Scan (Citus Adaptive) (actual rows=1 loops=1) SELECT * FROM test_subplans; 1|2 -- Will fail with duplicate pk -EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off) +EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF) WITH a AS (INSERT INTO test_subplans VALUES (1,2) RETURNING *) SELECT * FROM a; ERROR: duplicate key value violates unique constraint "test_subplans_pkey_570038" @@ -3268,7 +3268,7 @@ DETAIL: Key (x)=(1) already exists. CONTEXT: while executing command on localhost:xxxxx -- Test JSON format TRUNCATE test_subplans; -EXPLAIN (FORMAT JSON, COSTS off, ANALYZE on, TIMING off, SUMMARY off) +EXPLAIN (FORMAT JSON, COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF) WITH a AS (INSERT INTO test_subplans VALUES (1,2) RETURNING *) SELECT * FROM a; [ diff --git a/src/test/regress/expected/multi_explain_0.out b/src/test/regress/expected/multi_explain_0.out index 00a8309a9..8f2b412eb 100644 --- a/src/test/regress/expected/multi_explain_0.out +++ b/src/test/regress/expected/multi_explain_0.out @@ -338,7 +338,7 @@ Sort -> Seq Scan on lineitem_360000 lineitem -- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output) SELECT public.plan_normalize_memory($Q$ -EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) +EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; $Q$); @@ -365,7 +365,7 @@ SELECT create_distributed_table('t1', 'a'), create_distributed_table('t2', 'a'); | BEGIN; SET LOCAL citus.enable_repartition_joins TO true; -EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off) SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b; +EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF) SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b; Aggregate (actual rows=1 loops=1) -> Custom Scan (Citus Adaptive) (actual rows=6 loops=1) Task Count: 6 @@ -378,7 +378,7 @@ Aggregate (actual rows=1 loops=1) Map Task Count: 3 Merge Task Count: 6 -- Confirm repartiton join in distributed subplan works -EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off) +EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF) WITH repartition AS (SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b) SELECT count(*) from repartition; Custom Scan (Citus Adaptive) (actual rows=1 loops=1) @@ -408,7 +408,7 @@ END; DROP TABLE t1, t2; -- Test query text output, with ANALYZE ON SELECT public.plan_normalize_memory($Q$ -EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, VERBOSE TRUE) +EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, VERBOSE TRUE, BUFFERS OFF) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; $Q$); @@ -519,7 +519,7 @@ Custom Scan (Citus Adaptive) Filter: (l_partkey = 0) -- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output) BEGIN; -EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) +EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) UPDATE lineitem SET l_suppkey = 12 WHERE l_orderkey = 1 AND l_partkey = 0; @@ -1074,7 +1074,7 @@ Custom Scan (Citus Adaptive) -> Seq Scan on lineitem_hash_part_360044 lineitem_hash_part -- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output) SELECT public.plan_normalize_memory($Q$ -EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) +EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; $Q$); @@ -1387,7 +1387,7 @@ Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) Node: host=localhost port=xxxxx dbname=regression -> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (cost=0.28..13.60 rows=4 width=5) Index Cond: (l_orderkey = 5) -EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF) EXECUTE router_executor_query_param(5); +EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) EXECUTE router_executor_query_param(5); Custom Scan (Citus Adaptive) (actual rows=3 loops=1) Task Count: 1 Tuple data received from nodes: 30 bytes @@ -1410,7 +1410,7 @@ Custom Scan (Citus Adaptive) -> Seq Scan on lineitem_360000 lineitem ROLLBACK; BEGIN; -EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF) EXECUTE multi_shard_query_param(5); +EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) EXECUTE multi_shard_query_param(5); Custom Scan (Citus Adaptive) (actual rows=0 loops=1) Task Count: 2 Tasks Shown: One of 2 @@ -1993,7 +1993,7 @@ SET citus.shard_count TO 4; SET client_min_messages TO WARNING; SELECT create_distributed_table('explain_analyze_test', 'a'); -\set default_analyze_flags '(ANALYZE on, COSTS off, TIMING off, SUMMARY off)' +\set default_analyze_flags '(ANALYZE on, COSTS off, TIMING off, SUMMARY off, BUFFERS off)' \set default_explain_flags '(ANALYZE off, COSTS off, TIMING off, SUMMARY off)' -- router SELECT EXPLAIN :default_analyze_flags SELECT * FROM explain_analyze_test WHERE a = 1; @@ -2168,7 +2168,7 @@ SELECT * FROM explain_pk ORDER BY 1; ROLLBACK; -- test EXPLAIN ANALYZE with non-text output formats BEGIN; -EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) INSERT INTO explain_pk VALUES (1, 2), (2, 3); +EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON, BUFFERS OFF) INSERT INTO explain_pk VALUES (1, 2), (2, 3); [ { "Plan": { @@ -2224,7 +2224,7 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) INSERT INT } ] ROLLBACK; -EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) SELECT * FROM explain_pk; +EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON, BUFFERS OFF) SELECT * FROM explain_pk; [ { "Plan": { @@ -2271,7 +2271,7 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) SELECT * F } ] BEGIN; -EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML) INSERT INTO explain_pk VALUES (1, 2), (2, 3); +EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML, BUFFERS OFF) INSERT INTO explain_pk VALUES (1, 2), (2, 3); @@ -2326,7 +2326,7 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML) INSERT INTO ROLLBACK; -EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML) SELECT * FROM explain_pk; +EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML, BUFFERS OFF) SELECT * FROM explain_pk; @@ -2433,7 +2433,7 @@ Aggregate (actual rows=1 loops=1) Sort Method: quicksort Memory: 25kB -> Seq Scan on ref_table_570021 ref_table (actual rows=10 loops=1) SELECT public.explain_with_pg17_initplan_format($Q$ -EXPLAIN (ANALYZE on, COSTS off, TIMING off, SUMMARY off) +EXPLAIN (ANALYZE on, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF) SELECT count(distinct a) FROM dist_table WHERE EXISTS(SELECT random() < 2 FROM dist_table NATURAL JOIN ref_table); $Q$); @@ -3065,7 +3065,7 @@ SELECT create_distributed_table('explain_analyze_execution_time', 'a'); -- sleep for the shard that has the single row, so that -- will definitely be slower set citus.explain_analyze_sort_method to "taskId"; -EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) select a, CASE WHEN pg_sleep(0.4) IS NULL THEN 'x' END from explain_analyze_execution_time; +EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) select a, CASE WHEN pg_sleep(0.4) IS NULL THEN 'x' END from explain_analyze_execution_time; Custom Scan (Citus Adaptive) (actual rows=1 loops=1) Task Count: 2 Tuple data received from nodes: 4 bytes @@ -3075,7 +3075,7 @@ Custom Scan (Citus Adaptive) (actual rows=1 loops=1) Node: host=localhost port=xxxxx dbname=regression -> Seq Scan on explain_analyze_execution_time_570029 explain_analyze_execution_time (actual rows=0 loops=1) set citus.explain_analyze_sort_method to "execution-time"; -EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) select a, CASE WHEN pg_sleep(0.4) IS NULL THEN 'x' END from explain_analyze_execution_time; +EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) select a, CASE WHEN pg_sleep(0.4) IS NULL THEN 'x' END from explain_analyze_execution_time; Custom Scan (Citus Adaptive) (actual rows=1 loops=1) Task Count: 2 Tuple data received from nodes: 4 bytes @@ -3222,7 +3222,7 @@ SET search_path TO multi_explain; CREATE TABLE test_subplans (x int primary key, y int); SELECT create_distributed_table('test_subplans','x'); -EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off) +EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF) WITH a AS (INSERT INTO test_subplans VALUES (1,2) RETURNING *) SELECT * FROM a; Custom Scan (Citus Adaptive) (actual rows=1 loops=1) @@ -3249,7 +3249,7 @@ Custom Scan (Citus Adaptive) (actual rows=1 loops=1) SELECT * FROM test_subplans; 1|2 -- Will fail with duplicate pk -EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off) +EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF) WITH a AS (INSERT INTO test_subplans VALUES (1,2) RETURNING *) SELECT * FROM a; ERROR: duplicate key value violates unique constraint "test_subplans_pkey_570038" @@ -3257,7 +3257,7 @@ DETAIL: Key (x)=(1) already exists. CONTEXT: while executing command on localhost:xxxxx -- Test JSON format TRUNCATE test_subplans; -EXPLAIN (FORMAT JSON, COSTS off, ANALYZE on, TIMING off, SUMMARY off) +EXPLAIN (FORMAT JSON, COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF) WITH a AS (INSERT INTO test_subplans VALUES (1,2) RETURNING *) SELECT * FROM a; [ diff --git a/src/test/regress/expected/multi_insert_select.out b/src/test/regress/expected/multi_insert_select.out index 1d854704e..f5caedc69 100644 --- a/src/test/regress/expected/multi_insert_select.out +++ b/src/test/regress/expected/multi_insert_select.out @@ -905,7 +905,7 @@ $Q$); (4 rows) -- EXPLAIN ANALYZE is not supported for INSERT ... SELECT via coordinator -EXPLAIN (costs off, analyze on) +EXPLAIN (costs off, analyze on, BUFFERS OFF) INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id diff --git a/src/test/regress/expected/pg13.out b/src/test/regress/expected/pg13.out index a7549dac1..2e1816a6b 100644 --- a/src/test/regress/expected/pg13.out +++ b/src/test/regress/expected/pg13.out @@ -173,7 +173,7 @@ CREATE TABLE test_wal(a int, b int); EXPLAIN (WAL) INSERT INTO test_wal VALUES(1,11); ERROR: EXPLAIN option WAL requires ANALYZE -- test WAL working properly for router queries -EXPLAIN (ANALYZE TRUE, WAL TRUE, COSTS FALSE, SUMMARY FALSE, BUFFERS FALSE, TIMING FALSE) +EXPLAIN (ANALYZE TRUE, WAL TRUE, COSTS FALSE, SUMMARY FALSE, BUFFERS FALSE, TIMING FALSE, BUFFERS OFF) INSERT INTO test_wal VALUES(1,11); QUERY PLAN --------------------------------------------------------------------- @@ -192,7 +192,7 @@ HINT: To remove the local data, run: SELECT truncate_local_data_after_distribut (1 row) -EXPLAIN (ANALYZE TRUE, WAL TRUE, COSTS FALSE, SUMMARY FALSE, BUFFERS FALSE, TIMING FALSE) +EXPLAIN (ANALYZE TRUE, WAL TRUE, COSTS FALSE, SUMMARY FALSE, BUFFERS FALSE, TIMING FALSE, BUFFERS OFF) INSERT INTO test_wal VALUES(2,22); QUERY PLAN --------------------------------------------------------------------- @@ -208,7 +208,7 @@ INSERT INTO test_wal VALUES(2,22); -- Test WAL working for multi-shard query SET citus.explain_all_tasks TO on; -EXPLAIN (ANALYZE TRUE, WAL TRUE, COSTS FALSE, SUMMARY FALSE, BUFFERS FALSE, TIMING FALSE) +EXPLAIN (ANALYZE TRUE, WAL TRUE, COSTS FALSE, SUMMARY FALSE, BUFFERS FALSE, TIMING FALSE, BUFFERS OFF) INSERT INTO test_wal VALUES(3,33),(4,44),(5,55) RETURNING *; QUERY PLAN --------------------------------------------------------------------- @@ -229,7 +229,7 @@ INSERT INTO test_wal VALUES(3,33),(4,44),(5,55) RETURNING *; -- we don't get an error, hence we use explain_has_distributed_subplan. SELECT public.explain_has_distributed_subplan( $$ -EXPLAIN (ANALYZE TRUE, WAL TRUE, COSTS FALSE, SUMMARY FALSE, BUFFERS FALSE, TIMING FALSE) +EXPLAIN (ANALYZE TRUE, WAL TRUE, COSTS FALSE, SUMMARY FALSE, BUFFERS FALSE, TIMING FALSE, BUFFERS OFF) WITH cte_1 AS (INSERT INTO test_wal VALUES(6,66),(7,77),(8,88) RETURNING *) SELECT * FROM cte_1; $$ diff --git a/src/test/regress/expected/pgmerge.out b/src/test/regress/expected/pgmerge.out index 9057cac6b..aeb67fd59 100644 --- a/src/test/regress/expected/pgmerge.out +++ b/src/test/regress/expected/pgmerge.out @@ -1372,7 +1372,7 @@ $$ DECLARE ln text; BEGIN FOR ln IN - EXECUTE 'explain (analyze, timing off, summary off, costs off) ' || + EXECUTE 'explain (analyze, timing off, summary off, costs off, buffers off) ' || query LOOP ln := regexp_replace(ln, '(Memory( Usage)?|Buckets|Batches): \S*', '\1: xxx', 'g'); diff --git a/src/test/regress/expected/query_single_shard_table.out b/src/test/regress/expected/query_single_shard_table.out index 5475e0c63..74e499066 100644 --- a/src/test/regress/expected/query_single_shard_table.out +++ b/src/test/regress/expected/query_single_shard_table.out @@ -1396,7 +1396,7 @@ DEBUG: Creating router plan -- -- (*): < SELECT a, b > vs < SELECT table_name.a, table_name.b > SET client_min_messages TO WARNING; -EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE, VERBOSE FALSE) +EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE, VERBOSE FALSE, BUFFERS OFF) INSERT INTO nullkey_c1_t1 SELECT * FROM nullkey_c1_t2; QUERY PLAN --------------------------------------------------------------------- @@ -1419,7 +1419,7 @@ DEBUG: Creating router plan DEBUG: Collecting INSERT ... SELECT results on coordinator -- between a single-shard table and a table of different type SET client_min_messages TO WARNING; -EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE, VERBOSE FALSE) +EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE, VERBOSE FALSE, BUFFERS OFF) INSERT INTO nullkey_c1_t1 SELECT * FROM reference_table; QUERY PLAN --------------------------------------------------------------------- diff --git a/src/test/regress/expected/single_node.out b/src/test/regress/expected/single_node.out index d3251de49..2d1a26a44 100644 --- a/src/test/regress/expected/single_node.out +++ b/src/test/regress/expected/single_node.out @@ -983,7 +983,7 @@ BEGIN; ROLLBACK; -- explain analyze should work on a single node -EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) +EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) SELECT * FROM test; QUERY PLAN --------------------------------------------------------------------- diff --git a/src/test/regress/expected/single_node_0.out b/src/test/regress/expected/single_node_0.out index 12b385e96..a94c02951 100644 --- a/src/test/regress/expected/single_node_0.out +++ b/src/test/regress/expected/single_node_0.out @@ -994,7 +994,7 @@ BEGIN; ROLLBACK; -- explain analyze should work on a single node -EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) +EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) SELECT * FROM test; QUERY PLAN --------------------------------------------------------------------- diff --git a/src/test/regress/expected/upgrade_columnar_after.out b/src/test/regress/expected/upgrade_columnar_after.out index 768a057f9..62f9414a7 100644 --- a/src/test/regress/expected/upgrade_columnar_after.out +++ b/src/test/regress/expected/upgrade_columnar_after.out @@ -184,7 +184,7 @@ SELECT count(*) FROM less_common_data_types_table WHERE dist_key = 1 AND col1 = set columnar.enable_custom_scan to 'off'; set enable_seqscan to off; set seq_page_cost TO 10000000; -EXPLAIN (costs off, timing off, summary off, analyze on) +EXPLAIN (costs off, timing off, summary off, analyze on, BUFFERS OFF) SELECT count(*) FROM less_common_data_types_table WHERE dist_key = 1 AND col1 = ARRAY[1]; QUERY PLAN --------------------------------------------------------------------- diff --git a/src/test/regress/expected/upgrade_columnar_before.out b/src/test/regress/expected/upgrade_columnar_before.out index fd0e7993e..500407cf7 100644 --- a/src/test/regress/expected/upgrade_columnar_before.out +++ b/src/test/regress/expected/upgrade_columnar_before.out @@ -337,7 +337,7 @@ SELECT count(*) FROM less_common_data_types_table WHERE dist_key = 1 AND col1 = set columnar.enable_custom_scan to 'off'; set enable_seqscan to off; set seq_page_cost TO 10000000; -EXPLAIN (costs off, timing off, summary off, analyze on) +EXPLAIN (costs off, timing off, summary off, analyze on, BUFFERS OFF) SELECT count(*) FROM less_common_data_types_table WHERE dist_key = 1 AND col1 = ARRAY[1]; QUERY PLAN --------------------------------------------------------------------- diff --git a/src/test/regress/sql/binary_protocol.sql b/src/test/regress/sql/binary_protocol.sql index a6eefc14e..a0770fef5 100644 --- a/src/test/regress/sql/binary_protocol.sql +++ b/src/test/regress/sql/binary_protocol.sql @@ -20,9 +20,9 @@ SELECT id, id, id, id, id, id, id, id, id, id FROM t ORDER BY id; -EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE) SELECT id FROM t ORDER BY 1; +EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE, BUFFERS OFF) SELECT id FROM t ORDER BY 1; SET citus.explain_all_tasks TO ON; -EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE) SELECT id FROM t ORDER BY 1; +EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE, BUFFERS OFF) SELECT id FROM t ORDER BY 1; INSERT INTO t SELECT count(*) from t; diff --git a/src/test/regress/sql/columnar_chunk_filtering.sql b/src/test/regress/sql/columnar_chunk_filtering.sql index 08ac2b627..ce93b43fa 100644 --- a/src/test/regress/sql/columnar_chunk_filtering.sql +++ b/src/test/regress/sql/columnar_chunk_filtering.sql @@ -79,10 +79,10 @@ SELECT * FROM collation_chunk_filtering_test WHERE A > 'B'; CREATE TABLE simple_chunk_filtering(i int) USING COLUMNAR; INSERT INTO simple_chunk_filtering SELECT generate_series(0,234567); -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM simple_chunk_filtering WHERE i > 123456; SET columnar.enable_qual_pushdown = false; -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM simple_chunk_filtering WHERE i > 123456; SET columnar.enable_qual_pushdown TO DEFAULT; @@ -90,7 +90,7 @@ SET columnar.enable_qual_pushdown TO DEFAULT; TRUNCATE simple_chunk_filtering; INSERT INTO simple_chunk_filtering SELECT generate_series(0,200000); COPY (SELECT * FROM simple_chunk_filtering WHERE i > 180000) TO '/dev/null'; -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM simple_chunk_filtering WHERE i > 180000; DROP TABLE simple_chunk_filtering; @@ -99,39 +99,39 @@ DROP TABLE simple_chunk_filtering; CREATE TABLE multi_column_chunk_filtering(a int, b int) USING columnar; INSERT INTO multi_column_chunk_filtering SELECT i,i+1 FROM generate_series(0,234567) i; -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT count(*) FROM multi_column_chunk_filtering WHERE a > 50000; -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT count(*) FROM multi_column_chunk_filtering WHERE a > 50000 AND b > 50000; -- make next tests faster TRUNCATE multi_column_chunk_filtering; INSERT INTO multi_column_chunk_filtering SELECT generate_series(0,5); -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT b FROM multi_column_chunk_filtering WHERE a > 50000 AND b > 50000; -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT b, a FROM multi_column_chunk_filtering WHERE b > 50000; -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT FROM multi_column_chunk_filtering WHERE a > 50000; -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT FROM multi_column_chunk_filtering; BEGIN; ALTER TABLE multi_column_chunk_filtering DROP COLUMN a; ALTER TABLE multi_column_chunk_filtering DROP COLUMN b; - EXPLAIN (analyze on, costs off, timing off, summary off) + EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM multi_column_chunk_filtering; ROLLBACK; CREATE TABLE another_columnar_table(x int, y int) USING columnar; INSERT INTO another_columnar_table SELECT generate_series(0,5); -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT a, y FROM multi_column_chunk_filtering, another_columnar_table WHERE x > 1; EXPLAIN (costs off, timing off, summary off) @@ -219,7 +219,7 @@ set enable_hashjoin=false; set enable_material=false; -- test different kinds of expressions -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM r1, coltest WHERE id1 = id AND x1 > 15000 AND x1::text > '000000' AND n1 % 10 = 0; SELECT * FROM r1, coltest WHERE @@ -227,7 +227,7 @@ SELECT * FROM r1, coltest WHERE -- test equivalence classes -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM r1, r2, r3, r4, r5, r6, r7, coltest WHERE id = id1 AND id1 = id2 AND id2 = id3 AND id3 = id4 AND id4 = id5 AND id5 = id6 AND id6 = id7; @@ -258,7 +258,7 @@ set columnar.planner_debug_level to default; set columnar.planner_debug_level = 'notice'; -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM r1, r2, r3, coltest WHERE id1 = id2 AND id2 = id3 AND id3 = id AND n1 > x1 AND n2 > x2 AND n3 > x3; @@ -270,7 +270,7 @@ SELECT * FROM r1, r2, r3, coltest WHERE n1 > x1 AND n2 > x2 AND n3 > x3; -- test partitioning parameterization -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM r1, coltest_part WHERE id1 = id AND n1 > x1; SELECT * FROM r1, coltest_part WHERE @@ -300,7 +300,7 @@ END; $$; select * from coltest where x3 = vol(); -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM coltest c1 WHERE ceil(x1) > 4222; set columnar.planner_debug_level to default; @@ -391,32 +391,32 @@ COMMIT; SET columnar.max_custom_scan_paths TO 50; SET columnar.qual_pushdown_correlation_threshold TO 0.0; -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a = 104356 or a = 76556; SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a = 104356 or a = 76556; -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT sum(a) FROM pushdown_test WHERE a = 194356 or a = 104356 or a = 76556; SELECT sum(a) FROM pushdown_test WHERE a = 194356 or a = 104356 or a = 76556; -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a > a*-1 + b; -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT sum(a) FROM pushdown_test where (a > 1000 and a < 10000) or (a > 20000 and a < 50000); SELECT sum(a) FROM pushdown_test where (a > 1000 and a < 10000) or (a > 20000 and a < 50000); -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT sum(a) FROM pushdown_test where (a > random() and a < 2*a) or (a > 100); SELECT sum(a) FROM pushdown_test where (a > random() and a < 2*a) or (a > 100); -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT sum(a) FROM pushdown_test where (a > random() and a <= 2000) or (a > 200000-1010); SELECT sum(a) FROM pushdown_test where (a > random() and a <= 2000) or (a > 200000-1010); SET hash_mem_multiplier = 1.0; SELECT columnar_test_helpers.explain_with_pg16_subplan_format($Q$ -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT sum(a) FROM pushdown_test where ( a > random() @@ -445,7 +445,7 @@ or create function stable_1(arg int) returns int language plpgsql STRICT IMMUTABLE as $$ BEGIN RETURN 1+arg; END; $$; -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT sum(a) FROM pushdown_test where (a = random() and a < stable_1(a) and a < stable_1(6000)); SELECT sum(a) FROM pushdown_test where (a = random() and a < stable_1(a) and a < stable_1(6000)); @@ -476,7 +476,7 @@ BEGIN; INSERT INTO pushdown_test VALUES(7, 'USA'); INSERT INTO pushdown_test VALUES(8, 'ZW'); END; -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT id FROM pushdown_test WHERE country IN ('USA', 'BR', 'ZW'); SELECT id FROM pushdown_test WHERE country IN ('USA', 'BR', 'ZW'); @@ -488,7 +488,7 @@ BEGIN return 'AL'; END; $$; -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM pushdown_test WHERE country IN ('USA', 'ZW', volatileFunction()); SELECT * FROM pushdown_test WHERE country IN ('USA', 'ZW', volatileFunction()); diff --git a/src/test/regress/sql/columnar_cursor.sql b/src/test/regress/sql/columnar_cursor.sql index 125ed2fed..2c83fa5f3 100644 --- a/src/test/regress/sql/columnar_cursor.sql +++ b/src/test/regress/sql/columnar_cursor.sql @@ -6,7 +6,7 @@ CREATE TABLE test_cursor (a int, b int) USING columnar; INSERT INTO test_cursor SELECT i, j FROM generate_series(0, 100)i, generate_series(100, 200)j; -- A case where the WHERE clause might filter out some chunks -EXPLAIN (analyze on, costs off, timing off, summary off) SELECT * FROM test_cursor WHERE a = 25; +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM test_cursor WHERE a = 25; BEGIN; DECLARE a_25 SCROLL CURSOR FOR SELECT * FROM test_cursor WHERE a = 25 ORDER BY 2; @@ -33,7 +33,7 @@ UPDATE test_cursor SET a = 8000 WHERE CURRENT OF a_25; COMMIT; -- A case where the WHERE clause doesn't filter out any chunks -EXPLAIN (analyze on, costs off, timing off, summary off) SELECT * FROM test_cursor WHERE a > 25; +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM test_cursor WHERE a > 25; BEGIN; DECLARE a_25 SCROLL CURSOR FOR SELECT * FROM test_cursor WHERE a > 25 ORDER BY 1, 2; diff --git a/src/test/regress/sql/columnar_paths.sql b/src/test/regress/sql/columnar_paths.sql index 748b9006a..c9c1c2026 100644 --- a/src/test/regress/sql/columnar_paths.sql +++ b/src/test/regress/sql/columnar_paths.sql @@ -361,12 +361,12 @@ CREATE INDEX uncorrelated_idx ON uncorrelated(x); ANALYZE correlated, uncorrelated; -- should choose chunk group filtering; selective and correlated -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM correlated WHERE x = 78910; SELECT * FROM correlated WHERE x = 78910; -- should choose index scan; selective but uncorrelated -EXPLAIN (analyze on, costs off, timing off, summary off) +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM uncorrelated WHERE x = 78910; SELECT * FROM uncorrelated WHERE x = 78910; diff --git a/src/test/regress/sql/columnar_transactions.sql b/src/test/regress/sql/columnar_transactions.sql index ece4e9ac0..96ec43e5e 100644 --- a/src/test/regress/sql/columnar_transactions.sql +++ b/src/test/regress/sql/columnar_transactions.sql @@ -102,42 +102,42 @@ SELECT * FROM t ORDER BY a; PREPARE p0 AS INSERT INTO t VALUES (8, 8), (9, 9); EXPLAIN (COSTS OFF) EXECUTE p0; EXECUTE p0; -EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off) EXECUTE p0; +EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF) EXECUTE p0; SELECT * FROM t ORDER BY a; -- INSERT INTO with 1 param PREPARE p1(int) AS INSERT INTO t VALUES (10, $1), (11, $1+2); EXPLAIN (COSTS OFF) EXECUTE p1(16); EXECUTE p1(16); -EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off) EXECUTE p1(20); +EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF) EXECUTE p1(20); SELECT * FROM t ORDER BY a; -- INSERT INTO with >1 params PREPARE p2(int, int) AS INSERT INTO t VALUES (12, $1), (13, $1+2), (14, $2), ($1+1, $2+1); EXPLAIN (COSTS OFF) EXECUTE p2(30, 40); EXECUTE p2(30, 40); -EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off) EXECUTE p2(50, 60); +EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF) EXECUTE p2(50, 60); SELECT * FROM t ORDER BY a; -- SELECT with 0 params PREPARE p3 AS SELECT * FROM t WHERE a = 8; EXPLAIN (COSTS OFF) EXECUTE p3; EXECUTE p3; -EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off) EXECUTE p3; +EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF) EXECUTE p3; SELECT * FROM t ORDER BY a; -- SELECT with 1 param PREPARE p5(int) AS SELECT * FROM t WHERE a = $1; EXPLAIN (COSTS OFF) EXECUTE p5(16); EXECUTE p5(16); -EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off) EXECUTE p5(9); +EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF) EXECUTE p5(9); SELECT * FROM t ORDER BY a; -- SELECT with >1 params PREPARE p6(int, int) AS SELECT * FROM t WHERE a = $1+1 AND b = $2+1; EXPLAIN (COSTS OFF) EXECUTE p6(30, 40); EXECUTE p6(30, 40); -EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off) EXECUTE p6(50, 60); +EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF) EXECUTE p6(50, 60); SELECT * FROM t ORDER BY a; DROP TABLE t; diff --git a/src/test/regress/sql/coordinator_shouldhaveshards.sql b/src/test/regress/sql/coordinator_shouldhaveshards.sql index 6194d3a59..fdff7a31d 100644 --- a/src/test/regress/sql/coordinator_shouldhaveshards.sql +++ b/src/test/regress/sql/coordinator_shouldhaveshards.sql @@ -66,7 +66,7 @@ ROLLBACK; -- INSERT..SELECT with re-partitioning in EXPLAIN ANALYZE after local execution BEGIN; INSERT INTO test VALUES (0,1000); -EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) INSERT INTO test (x, y) SELECT y, x FROM test; +EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) INSERT INTO test (x, y) SELECT y, x FROM test; ROLLBACK; -- DDL connects to locahost diff --git a/src/test/regress/sql/local_shard_execution.sql b/src/test/regress/sql/local_shard_execution.sql index 2845693c9..095b9a7bf 100644 --- a/src/test/regress/sql/local_shard_execution.sql +++ b/src/test/regress/sql/local_shard_execution.sql @@ -220,7 +220,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20; EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20; -EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF) +EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) WITH r AS ( SELECT GREATEST(random(), 2) z,* FROM distributed_table) SELECT 1 FROM r WHERE z < 3; diff --git a/src/test/regress/sql/local_shard_execution_replicated.sql b/src/test/regress/sql/local_shard_execution_replicated.sql index 1c3d264e0..5b4343bdc 100644 --- a/src/test/regress/sql/local_shard_execution_replicated.sql +++ b/src/test/regress/sql/local_shard_execution_replicated.sql @@ -185,7 +185,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20; EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20; -EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF) +EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) WITH r AS ( SELECT GREATEST(random(), 2) z,* FROM distributed_table) SELECT 1 FROM r WHERE z < 3; diff --git a/src/test/regress/sql/merge.sql b/src/test/regress/sql/merge.sql index 7bafa1da5..3f4a8a666 100644 --- a/src/test/regress/sql/merge.sql +++ b/src/test/regress/sql/merge.sql @@ -1282,7 +1282,7 @@ WHEN NOT MATCHED THEN VALUES (s.some_number, 'parag'); -- let's verify if data inserted to second shard of target. -EXPLAIN (analyze on, costs off, timing off, summary off) SELECT * FROM target_table; +EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM target_table; -- let's verify target data too. SELECT * FROM target_table; @@ -1577,7 +1577,7 @@ SELECT create_distributed_table('target_json','id'), create_distributed_table('s -- single shard query given source_json is filtered and Postgres is smart to pushdown -- filter to the target_json as well SELECT public.coordinator_plan($Q$ -EXPLAIN (ANALYZE ON, TIMING OFF) MERGE INTO target_json sda +EXPLAIN (ANALYZE ON, TIMING OFF, BUFFERS OFF) MERGE INTO target_json sda USING (SELECT * FROM source_json WHERE id = 1) sdn ON sda.id = sdn.id WHEN NOT matched THEN @@ -1597,7 +1597,7 @@ SELECT * FROM target_json ORDER BY 1; -- join for source_json is happening at a different place SELECT public.coordinator_plan($Q$ -EXPLAIN (ANALYZE ON, TIMING OFF) MERGE INTO target_json sda +EXPLAIN (ANALYZE ON, TIMING OFF, BUFFERS OFF) MERGE INTO target_json sda USING source_json s1 LEFT JOIN (SELECT * FROM source_json) s2 USING(z) ON sda.id = s1.id AND s1.id = s2.id WHEN NOT matched THEN @@ -1607,7 +1607,7 @@ SELECT * FROM target_json ORDER BY 1; -- update JSON column SELECT public.coordinator_plan($Q$ -EXPLAIN (ANALYZE ON, TIMING OFF) MERGE INTO target_json sda +EXPLAIN (ANALYZE ON, TIMING OFF, BUFFERS OFF) MERGE INTO target_json sda USING source_json sdn ON sda.id = sdn.id WHEN matched THEN diff --git a/src/test/regress/sql/multi_data_types.sql b/src/test/regress/sql/multi_data_types.sql index d307c4c6f..2b0c3af9b 100644 --- a/src/test/regress/sql/multi_data_types.sql +++ b/src/test/regress/sql/multi_data_types.sql @@ -125,7 +125,7 @@ $cf$); INSERT INTO composite_type_partitioned_table VALUES (123, '(123, 456)'::other_composite_type); SELECT * FROM composite_type_partitioned_table WHERE id = 123; -EXPLAIN (ANALYZE TRUE, COSTS FALSE, VERBOSE FALSE, TIMING FALSE, SUMMARY FALSE) +EXPLAIN (ANALYZE TRUE, COSTS FALSE, VERBOSE FALSE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) INSERT INTO composite_type_partitioned_table VALUES (123, '(123, 456)'::other_composite_type); SELECT run_command_on_coordinator_and_workers($cf$ @@ -144,7 +144,7 @@ SELECT run_command_on_coordinator_and_workers($cf$ $cf$); INSERT INTO composite_type_partitioned_table VALUES (456, '(456, 678)'::other_composite_type); -EXPLAIN (ANALYZE TRUE, COSTS FALSE, VERBOSE FALSE, TIMING FALSE, SUMMARY FALSE) +EXPLAIN (ANALYZE TRUE, COSTS FALSE, VERBOSE FALSE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) INSERT INTO composite_type_partitioned_table VALUES (123, '(456, 678)'::other_composite_type); diff --git a/src/test/regress/sql/multi_explain.sql b/src/test/regress/sql/multi_explain.sql index c6502fec8..9ebc3e4e7 100644 --- a/src/test/regress/sql/multi_explain.sql +++ b/src/test/regress/sql/multi_explain.sql @@ -130,7 +130,7 @@ EXPLAIN (COSTS FALSE, FORMAT TEXT) -- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output) SELECT public.plan_normalize_memory($Q$ -EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) +EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; $Q$); @@ -142,9 +142,9 @@ CREATE TABLE t2(a int, b int); SELECT create_distributed_table('t1', 'a'), create_distributed_table('t2', 'a'); BEGIN; SET LOCAL citus.enable_repartition_joins TO true; -EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off) SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b; +EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF) SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b; -- Confirm repartiton join in distributed subplan works -EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off) +EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF) WITH repartition AS (SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b) SELECT count(*) from repartition; END; @@ -152,7 +152,7 @@ DROP TABLE t1, t2; -- Test query text output, with ANALYZE ON SELECT public.plan_normalize_memory($Q$ -EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, VERBOSE TRUE) +EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, VERBOSE TRUE, BUFFERS OFF) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; $Q$); @@ -184,7 +184,7 @@ EXPLAIN (COSTS FALSE) -- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output) BEGIN; -EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) +EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) UPDATE lineitem SET l_suppkey = 12 WHERE l_orderkey = 1 AND l_partkey = 0; @@ -488,7 +488,7 @@ EXPLAIN (COSTS FALSE) -- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output) SELECT public.plan_normalize_memory($Q$ -EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) +EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; $Q$); @@ -597,7 +597,7 @@ EXPLAIN (COSTS FALSE) EXECUTE real_time_executor_query; -- at least make sure to fail without crashing PREPARE router_executor_query_param(int) AS SELECT l_quantity FROM lineitem WHERE l_orderkey = $1; EXPLAIN EXECUTE router_executor_query_param(5); -EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF) EXECUTE router_executor_query_param(5); +EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) EXECUTE router_executor_query_param(5); \set VERBOSITY TERSE PREPARE multi_shard_query_param(int) AS UPDATE lineitem SET l_quantity = $1; @@ -605,7 +605,7 @@ BEGIN; EXPLAIN (COSTS OFF) EXECUTE multi_shard_query_param(5); ROLLBACK; BEGIN; -EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF) EXECUTE multi_shard_query_param(5); +EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) EXECUTE multi_shard_query_param(5); ROLLBACK; \set VERBOSITY DEFAULT @@ -865,7 +865,7 @@ SET citus.shard_count TO 4; SET client_min_messages TO WARNING; SELECT create_distributed_table('explain_analyze_test', 'a'); -\set default_analyze_flags '(ANALYZE on, COSTS off, TIMING off, SUMMARY off)' +\set default_analyze_flags '(ANALYZE on, COSTS off, TIMING off, SUMMARY off, BUFFERS off)' \set default_explain_flags '(ANALYZE off, COSTS off, TIMING off, SUMMARY off)' -- router SELECT @@ -928,16 +928,16 @@ ROLLBACK; -- test EXPLAIN ANALYZE with non-text output formats BEGIN; -EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) INSERT INTO explain_pk VALUES (1, 2), (2, 3); +EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON, BUFFERS OFF) INSERT INTO explain_pk VALUES (1, 2), (2, 3); ROLLBACK; -EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) SELECT * FROM explain_pk; +EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON, BUFFERS OFF) SELECT * FROM explain_pk; BEGIN; -EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML) INSERT INTO explain_pk VALUES (1, 2), (2, 3); +EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML, BUFFERS OFF) INSERT INTO explain_pk VALUES (1, 2), (2, 3); ROLLBACK; -EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML) SELECT * FROM explain_pk; +EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML, BUFFERS OFF) SELECT * FROM explain_pk; DROP TABLE explain_pk; @@ -960,7 +960,7 @@ EXPLAIN :default_analyze_flags SELECT count(distinct a) FROM (SELECT GREATEST(random(), 2) r, a FROM dist_table) t NATURAL JOIN ref_table; SELECT public.explain_with_pg17_initplan_format($Q$ -EXPLAIN (ANALYZE on, COSTS off, TIMING off, SUMMARY off) +EXPLAIN (ANALYZE on, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF) SELECT count(distinct a) FROM dist_table WHERE EXISTS(SELECT random() < 2 FROM dist_table NATURAL JOIN ref_table); $Q$); @@ -1114,9 +1114,9 @@ SELECT create_distributed_table('explain_analyze_execution_time', 'a'); -- sleep for the shard that has the single row, so that -- will definitely be slower set citus.explain_analyze_sort_method to "taskId"; -EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) select a, CASE WHEN pg_sleep(0.4) IS NULL THEN 'x' END from explain_analyze_execution_time; +EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) select a, CASE WHEN pg_sleep(0.4) IS NULL THEN 'x' END from explain_analyze_execution_time; set citus.explain_analyze_sort_method to "execution-time"; -EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) select a, CASE WHEN pg_sleep(0.4) IS NULL THEN 'x' END from explain_analyze_execution_time; +EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) select a, CASE WHEN pg_sleep(0.4) IS NULL THEN 'x' END from explain_analyze_execution_time; -- reset back reset citus.explain_analyze_sort_method; DROP TABLE explain_analyze_execution_time; @@ -1171,7 +1171,7 @@ SET search_path TO multi_explain; CREATE TABLE test_subplans (x int primary key, y int); SELECT create_distributed_table('test_subplans','x'); -EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off) +EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF) WITH a AS (INSERT INTO test_subplans VALUES (1,2) RETURNING *) SELECT * FROM a; @@ -1179,13 +1179,13 @@ SELECT * FROM a; SELECT * FROM test_subplans; -- Will fail with duplicate pk -EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off) +EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF) WITH a AS (INSERT INTO test_subplans VALUES (1,2) RETURNING *) SELECT * FROM a; -- Test JSON format TRUNCATE test_subplans; -EXPLAIN (FORMAT JSON, COSTS off, ANALYZE on, TIMING off, SUMMARY off) +EXPLAIN (FORMAT JSON, COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF) WITH a AS (INSERT INTO test_subplans VALUES (1,2) RETURNING *) SELECT * FROM a; diff --git a/src/test/regress/sql/multi_insert_select.sql b/src/test/regress/sql/multi_insert_select.sql index 1f0679e34..a9f468c19 100644 --- a/src/test/regress/sql/multi_insert_select.sql +++ b/src/test/regress/sql/multi_insert_select.sql @@ -682,7 +682,7 @@ SET client_min_messages TO WARNING; $Q$); -- EXPLAIN ANALYZE is not supported for INSERT ... SELECT via coordinator -EXPLAIN (costs off, analyze on) +EXPLAIN (costs off, analyze on, BUFFERS OFF) INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id diff --git a/src/test/regress/sql/pg13.sql b/src/test/regress/sql/pg13.sql index 11c1145d7..9ef6f9505 100644 --- a/src/test/regress/sql/pg13.sql +++ b/src/test/regress/sql/pg13.sql @@ -93,15 +93,15 @@ CREATE TABLE test_wal(a int, b int); -- test WAL without ANALYZE, this should raise an error EXPLAIN (WAL) INSERT INTO test_wal VALUES(1,11); -- test WAL working properly for router queries -EXPLAIN (ANALYZE TRUE, WAL TRUE, COSTS FALSE, SUMMARY FALSE, BUFFERS FALSE, TIMING FALSE) +EXPLAIN (ANALYZE TRUE, WAL TRUE, COSTS FALSE, SUMMARY FALSE, BUFFERS FALSE, TIMING FALSE, BUFFERS OFF) INSERT INTO test_wal VALUES(1,11); SELECT create_distributed_table('test_wal', 'a'); -EXPLAIN (ANALYZE TRUE, WAL TRUE, COSTS FALSE, SUMMARY FALSE, BUFFERS FALSE, TIMING FALSE) +EXPLAIN (ANALYZE TRUE, WAL TRUE, COSTS FALSE, SUMMARY FALSE, BUFFERS FALSE, TIMING FALSE, BUFFERS OFF) INSERT INTO test_wal VALUES(2,22); -- Test WAL working for multi-shard query SET citus.explain_all_tasks TO on; -EXPLAIN (ANALYZE TRUE, WAL TRUE, COSTS FALSE, SUMMARY FALSE, BUFFERS FALSE, TIMING FALSE) +EXPLAIN (ANALYZE TRUE, WAL TRUE, COSTS FALSE, SUMMARY FALSE, BUFFERS FALSE, TIMING FALSE, BUFFERS OFF) INSERT INTO test_wal VALUES(3,33),(4,44),(5,55) RETURNING *; -- make sure WAL works in distributed subplans @@ -109,7 +109,7 @@ INSERT INTO test_wal VALUES(3,33),(4,44),(5,55) RETURNING *; -- we don't get an error, hence we use explain_has_distributed_subplan. SELECT public.explain_has_distributed_subplan( $$ -EXPLAIN (ANALYZE TRUE, WAL TRUE, COSTS FALSE, SUMMARY FALSE, BUFFERS FALSE, TIMING FALSE) +EXPLAIN (ANALYZE TRUE, WAL TRUE, COSTS FALSE, SUMMARY FALSE, BUFFERS FALSE, TIMING FALSE, BUFFERS OFF) WITH cte_1 AS (INSERT INTO test_wal VALUES(6,66),(7,77),(8,88) RETURNING *) SELECT * FROM cte_1; $$ diff --git a/src/test/regress/sql/pgmerge.sql b/src/test/regress/sql/pgmerge.sql index eeeb881d3..d3bc7e3b3 100644 --- a/src/test/regress/sql/pgmerge.sql +++ b/src/test/regress/sql/pgmerge.sql @@ -894,7 +894,7 @@ $$ DECLARE ln text; BEGIN FOR ln IN - EXECUTE 'explain (analyze, timing off, summary off, costs off) ' || + EXECUTE 'explain (analyze, timing off, summary off, costs off, buffers off) ' || query LOOP ln := regexp_replace(ln, '(Memory( Usage)?|Buckets|Batches): \S*', '\1: xxx', 'g'); diff --git a/src/test/regress/sql/query_single_shard_table.sql b/src/test/regress/sql/query_single_shard_table.sql index 4abda0bea..0d4c31df5 100644 --- a/src/test/regress/sql/query_single_shard_table.sql +++ b/src/test/regress/sql/query_single_shard_table.sql @@ -618,7 +618,7 @@ JOIN LATERAL ( -- -- (*): < SELECT a, b > vs < SELECT table_name.a, table_name.b > SET client_min_messages TO WARNING; -EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE, VERBOSE FALSE) +EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE, VERBOSE FALSE, BUFFERS OFF) INSERT INTO nullkey_c1_t1 SELECT * FROM nullkey_c1_t2; SET client_min_messages TO DEBUG2; @@ -627,7 +627,7 @@ INSERT INTO nullkey_c1_t1 SELECT * FROM nullkey_c2_t1; -- between a single-shard table and a table of different type SET client_min_messages TO WARNING; -EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE, VERBOSE FALSE) +EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE, VERBOSE FALSE, BUFFERS OFF) INSERT INTO nullkey_c1_t1 SELECT * FROM reference_table; SET client_min_messages TO DEBUG2; diff --git a/src/test/regress/sql/single_node.sql b/src/test/regress/sql/single_node.sql index 962f59f79..b8838ac66 100644 --- a/src/test/regress/sql/single_node.sql +++ b/src/test/regress/sql/single_node.sql @@ -591,7 +591,7 @@ BEGIN; ROLLBACK; -- explain analyze should work on a single node -EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) +EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) SELECT * FROM test; -- common utility command diff --git a/src/test/regress/sql/upgrade_columnar_after.sql b/src/test/regress/sql/upgrade_columnar_after.sql index 133fcfde0..cd1be9443 100644 --- a/src/test/regress/sql/upgrade_columnar_after.sql +++ b/src/test/regress/sql/upgrade_columnar_after.sql @@ -65,7 +65,7 @@ set columnar.enable_custom_scan to 'off'; set enable_seqscan to off; set seq_page_cost TO 10000000; -EXPLAIN (costs off, timing off, summary off, analyze on) +EXPLAIN (costs off, timing off, summary off, analyze on, BUFFERS OFF) SELECT count(*) FROM less_common_data_types_table WHERE dist_key = 1 AND col1 = ARRAY[1]; -- make sure that we re-enable columnar scan diff --git a/src/test/regress/sql/upgrade_columnar_before.sql b/src/test/regress/sql/upgrade_columnar_before.sql index c2570aa55..9c943ef16 100644 --- a/src/test/regress/sql/upgrade_columnar_before.sql +++ b/src/test/regress/sql/upgrade_columnar_before.sql @@ -253,7 +253,7 @@ set columnar.enable_custom_scan to 'off'; set enable_seqscan to off; set seq_page_cost TO 10000000; -EXPLAIN (costs off, timing off, summary off, analyze on) +EXPLAIN (costs off, timing off, summary off, analyze on, BUFFERS OFF) SELECT count(*) FROM less_common_data_types_table WHERE dist_key = 1 AND col1 = ARRAY[1]; -- make sure that we re-enable columnar scan