PG18 - Add BUFFERS OFF to EXPLAIN ANALYZE calls (#8101)

Relevant PG18 commit:
c2a4078eba
- Enable buffer-usage reporting by default in `EXPLAIN ANALYZE` on
PostgreSQL 18 and above.

Solution:
- Introduce the explicit `BUFFERS OFF` option in every existing
regression test to maintain pre-PG18 output consistency.
- This appends, `BUFFERS OFF` to all `EXPLAIN ANALYZE(...)` calls in
src/test/regress/sql and the corresponding .out files.

fixes #8093
pull/8141/head
Mehmet YILMAZ 2025-08-21 13:48:50 +03:00 committed by GitHub
parent 683ead9607
commit f1f0b09f73
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
40 changed files with 202 additions and 202 deletions

View File

@ -49,7 +49,7 @@ SELECT id, id, id, id, id,
10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10
(10 rows) (10 rows)
EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE) SELECT id FROM t ORDER BY 1; EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE, BUFFERS OFF) SELECT id FROM t ORDER BY 1;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
Sort (actual rows=10 loops=1) Sort (actual rows=10 loops=1)
@ -66,7 +66,7 @@ EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE) SELECT id FROM
(11 rows) (11 rows)
SET citus.explain_all_tasks TO ON; SET citus.explain_all_tasks TO ON;
EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE) SELECT id FROM t ORDER BY 1; EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE, BUFFERS OFF) SELECT id FROM t ORDER BY 1;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
Sort (actual rows=10 loops=1) Sort (actual rows=10 loops=1)

View File

@ -125,7 +125,7 @@ SELECT * FROM collation_chunk_filtering_test WHERE A > 'B';
CREATE TABLE simple_chunk_filtering(i int) USING COLUMNAR; CREATE TABLE simple_chunk_filtering(i int) USING COLUMNAR;
INSERT INTO simple_chunk_filtering SELECT generate_series(0,234567); INSERT INTO simple_chunk_filtering SELECT generate_series(0,234567);
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT * FROM simple_chunk_filtering WHERE i > 123456; SELECT * FROM simple_chunk_filtering WHERE i > 123456;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -138,7 +138,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off)
(6 rows) (6 rows)
SET columnar.enable_qual_pushdown = false; SET columnar.enable_qual_pushdown = false;
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT * FROM simple_chunk_filtering WHERE i > 123456; SELECT * FROM simple_chunk_filtering WHERE i > 123456;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -153,7 +153,7 @@ SET columnar.enable_qual_pushdown TO DEFAULT;
TRUNCATE simple_chunk_filtering; TRUNCATE simple_chunk_filtering;
INSERT INTO simple_chunk_filtering SELECT generate_series(0,200000); INSERT INTO simple_chunk_filtering SELECT generate_series(0,200000);
COPY (SELECT * FROM simple_chunk_filtering WHERE i > 180000) TO '/dev/null'; COPY (SELECT * FROM simple_chunk_filtering WHERE i > 180000) TO '/dev/null';
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT * FROM simple_chunk_filtering WHERE i > 180000; SELECT * FROM simple_chunk_filtering WHERE i > 180000;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -168,7 +168,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off)
DROP TABLE simple_chunk_filtering; DROP TABLE simple_chunk_filtering;
CREATE TABLE multi_column_chunk_filtering(a int, b int) USING columnar; CREATE TABLE multi_column_chunk_filtering(a int, b int) USING columnar;
INSERT INTO multi_column_chunk_filtering SELECT i,i+1 FROM generate_series(0,234567) i; INSERT INTO multi_column_chunk_filtering SELECT i,i+1 FROM generate_series(0,234567) i;
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT count(*) FROM multi_column_chunk_filtering WHERE a > 50000; SELECT count(*) FROM multi_column_chunk_filtering WHERE a > 50000;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -181,7 +181,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off)
Columnar Chunk Groups Removed by Filter: 5 Columnar Chunk Groups Removed by Filter: 5
(7 rows) (7 rows)
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT count(*) FROM multi_column_chunk_filtering WHERE a > 50000 AND b > 50000; SELECT count(*) FROM multi_column_chunk_filtering WHERE a > 50000 AND b > 50000;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -197,7 +197,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off)
-- make next tests faster -- make next tests faster
TRUNCATE multi_column_chunk_filtering; TRUNCATE multi_column_chunk_filtering;
INSERT INTO multi_column_chunk_filtering SELECT generate_series(0,5); INSERT INTO multi_column_chunk_filtering SELECT generate_series(0,5);
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT b FROM multi_column_chunk_filtering WHERE a > 50000 AND b > 50000; SELECT b FROM multi_column_chunk_filtering WHERE a > 50000 AND b > 50000;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -208,7 +208,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off)
Columnar Chunk Groups Removed by Filter: 1 Columnar Chunk Groups Removed by Filter: 1
(5 rows) (5 rows)
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT b, a FROM multi_column_chunk_filtering WHERE b > 50000; SELECT b, a FROM multi_column_chunk_filtering WHERE b > 50000;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -220,7 +220,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off)
Columnar Chunk Groups Removed by Filter: 0 Columnar Chunk Groups Removed by Filter: 0
(6 rows) (6 rows)
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT FROM multi_column_chunk_filtering WHERE a > 50000; SELECT FROM multi_column_chunk_filtering WHERE a > 50000;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -231,7 +231,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off)
Columnar Chunk Groups Removed by Filter: 1 Columnar Chunk Groups Removed by Filter: 1
(5 rows) (5 rows)
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT FROM multi_column_chunk_filtering; SELECT FROM multi_column_chunk_filtering;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -242,7 +242,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off)
BEGIN; BEGIN;
ALTER TABLE multi_column_chunk_filtering DROP COLUMN a; ALTER TABLE multi_column_chunk_filtering DROP COLUMN a;
ALTER TABLE multi_column_chunk_filtering DROP COLUMN b; ALTER TABLE multi_column_chunk_filtering DROP COLUMN b;
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT * FROM multi_column_chunk_filtering; SELECT * FROM multi_column_chunk_filtering;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -253,7 +253,7 @@ BEGIN;
ROLLBACK; ROLLBACK;
CREATE TABLE another_columnar_table(x int, y int) USING columnar; CREATE TABLE another_columnar_table(x int, y int) USING columnar;
INSERT INTO another_columnar_table SELECT generate_series(0,5); INSERT INTO another_columnar_table SELECT generate_series(0,5);
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT a, y FROM multi_column_chunk_filtering, another_columnar_table WHERE x > 1; SELECT a, y FROM multi_column_chunk_filtering, another_columnar_table WHERE x > 1;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -364,7 +364,7 @@ set enable_mergejoin=false;
set enable_hashjoin=false; set enable_hashjoin=false;
set enable_material=false; set enable_material=false;
-- test different kinds of expressions -- test different kinds of expressions
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT * FROM r1, coltest WHERE SELECT * FROM r1, coltest WHERE
id1 = id AND x1 > 15000 AND x1::text > '000000' AND n1 % 10 = 0; id1 = id AND x1 > 15000 AND x1::text > '000000' AND n1 % 10 = 0;
QUERY PLAN QUERY PLAN
@ -391,7 +391,7 @@ SELECT * FROM r1, coltest WHERE
(3 rows) (3 rows)
-- test equivalence classes -- test equivalence classes
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT * FROM r1, r2, r3, r4, r5, r6, r7, coltest WHERE SELECT * FROM r1, r2, r3, r4, r5, r6, r7, coltest WHERE
id = id1 AND id1 = id2 AND id2 = id3 AND id3 = id4 AND id = id1 AND id1 = id2 AND id2 = id3 AND id3 = id4 AND
id4 = id5 AND id5 = id6 AND id6 = id7; id4 = id5 AND id5 = id6 AND id6 = id7;
@ -561,7 +561,7 @@ set columnar.max_custom_scan_paths to default;
set columnar.planner_debug_level to default; set columnar.planner_debug_level to default;
-- test more complex parameterization -- test more complex parameterization
set columnar.planner_debug_level = 'notice'; set columnar.planner_debug_level = 'notice';
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT * FROM r1, r2, r3, coltest WHERE SELECT * FROM r1, r2, r3, coltest WHERE
id1 = id2 AND id2 = id3 AND id3 = id AND id1 = id2 AND id2 = id3 AND id3 = id AND
n1 > x1 AND n2 > x2 AND n3 > x3; n1 > x1 AND n2 > x2 AND n3 > x3;
@ -613,7 +613,7 @@ SELECT * FROM r1, r2, r3, coltest WHERE
(3 rows) (3 rows)
-- test partitioning parameterization -- test partitioning parameterization
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT * FROM r1, coltest_part WHERE SELECT * FROM r1, coltest_part WHERE
id1 = id AND n1 > x1; id1 = id AND n1 > x1;
QUERY PLAN QUERY PLAN
@ -680,7 +680,7 @@ DETAIL: unparameterized; 0 clauses pushed down
--------------------------------------------------------------------- ---------------------------------------------------------------------
(0 rows) (0 rows)
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT * FROM coltest c1 WHERE ceil(x1) > 4222; SELECT * FROM coltest c1 WHERE ceil(x1) > 4222;
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var' NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
HINT: Var must only reference this rel, and Expr must not reference this rel HINT: Var must only reference this rel, and Expr must not reference this rel
@ -832,7 +832,7 @@ BEGIN;
COMMIT; COMMIT;
SET columnar.max_custom_scan_paths TO 50; SET columnar.max_custom_scan_paths TO 50;
SET columnar.qual_pushdown_correlation_threshold TO 0.0; SET columnar.qual_pushdown_correlation_threshold TO 0.0;
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a = 104356 or a = 76556; SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a = 104356 or a = 76556;
NOTICE: columnar planner: adding CustomScan path for pushdown_test NOTICE: columnar planner: adding CustomScan path for pushdown_test
DETAIL: unparameterized; 1 clauses pushed down DETAIL: unparameterized; 1 clauses pushed down
@ -855,7 +855,7 @@ DETAIL: unparameterized; 1 clauses pushed down
180912 180912
(1 row) (1 row)
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT sum(a) FROM pushdown_test WHERE a = 194356 or a = 104356 or a = 76556; SELECT sum(a) FROM pushdown_test WHERE a = 194356 or a = 104356 or a = 76556;
NOTICE: columnar planner: adding CustomScan path for pushdown_test NOTICE: columnar planner: adding CustomScan path for pushdown_test
DETAIL: unparameterized; 1 clauses pushed down DETAIL: unparameterized; 1 clauses pushed down
@ -878,7 +878,7 @@ DETAIL: unparameterized; 1 clauses pushed down
375268 375268
(1 row) (1 row)
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a > a*-1 + b; SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a > a*-1 + b;
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var' NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
HINT: Var must only reference this rel, and Expr must not reference this rel HINT: Var must only reference this rel, and Expr must not reference this rel
@ -894,7 +894,7 @@ DETAIL: unparameterized; 0 clauses pushed down
Columnar Projected Columns: a, b Columnar Projected Columns: a, b
(5 rows) (5 rows)
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT sum(a) FROM pushdown_test where (a > 1000 and a < 10000) or (a > 20000 and a < 50000); SELECT sum(a) FROM pushdown_test where (a > 1000 and a < 10000) or (a > 20000 and a < 50000);
NOTICE: columnar planner: adding CustomScan path for pushdown_test NOTICE: columnar planner: adding CustomScan path for pushdown_test
DETAIL: unparameterized; 1 clauses pushed down DETAIL: unparameterized; 1 clauses pushed down
@ -917,7 +917,7 @@ DETAIL: unparameterized; 1 clauses pushed down
1099459500 1099459500
(1 row) (1 row)
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT sum(a) FROM pushdown_test where (a > random() and a < 2*a) or (a > 100); SELECT sum(a) FROM pushdown_test where (a > random() and a < 2*a) or (a > 100);
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var' NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
HINT: Var must only reference this rel, and Expr must not reference this rel HINT: Var must only reference this rel, and Expr must not reference this rel
@ -949,7 +949,7 @@ DETAIL: unparameterized; 0 clauses pushed down
20000100000 20000100000
(1 row) (1 row)
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT sum(a) FROM pushdown_test where (a > random() and a <= 2000) or (a > 200000-1010); SELECT sum(a) FROM pushdown_test where (a > random() and a <= 2000) or (a > 200000-1010);
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var' NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
HINT: Var must only reference this rel, and Expr must not reference this rel HINT: Var must only reference this rel, and Expr must not reference this rel
@ -978,7 +978,7 @@ DETAIL: unparameterized; 1 clauses pushed down
SET hash_mem_multiplier = 1.0; SET hash_mem_multiplier = 1.0;
SELECT columnar_test_helpers.explain_with_pg16_subplan_format($Q$ SELECT columnar_test_helpers.explain_with_pg16_subplan_format($Q$
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT sum(a) FROM pushdown_test where SELECT sum(a) FROM pushdown_test where
( (
a > random() a > random()
@ -1043,7 +1043,7 @@ DETAIL: unparameterized; 1 clauses pushed down
create function stable_1(arg int) returns int language plpgsql STRICT IMMUTABLE as create function stable_1(arg int) returns int language plpgsql STRICT IMMUTABLE as
$$ BEGIN RETURN 1+arg; END; $$; $$ BEGIN RETURN 1+arg; END; $$;
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT sum(a) FROM pushdown_test where (a = random() and a < stable_1(a) and a < stable_1(6000)); SELECT sum(a) FROM pushdown_test where (a = random() and a < stable_1(a) and a < stable_1(6000));
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var' NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
HINT: Var must only reference this rel, and Expr must not reference this rel HINT: Var must only reference this rel, and Expr must not reference this rel
@ -1096,7 +1096,7 @@ BEGIN;
INSERT INTO pushdown_test VALUES(7, 'USA'); INSERT INTO pushdown_test VALUES(7, 'USA');
INSERT INTO pushdown_test VALUES(8, 'ZW'); INSERT INTO pushdown_test VALUES(8, 'ZW');
END; END;
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT id FROM pushdown_test WHERE country IN ('USA', 'BR', 'ZW'); SELECT id FROM pushdown_test WHERE country IN ('USA', 'BR', 'ZW');
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1123,7 +1123,7 @@ BEGIN
return 'AL'; return 'AL';
END; END;
$$; $$;
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT * FROM pushdown_test WHERE country IN ('USA', 'ZW', volatileFunction()); SELECT * FROM pushdown_test WHERE country IN ('USA', 'ZW', volatileFunction());
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -125,7 +125,7 @@ SELECT * FROM collation_chunk_filtering_test WHERE A > 'B';
CREATE TABLE simple_chunk_filtering(i int) USING COLUMNAR; CREATE TABLE simple_chunk_filtering(i int) USING COLUMNAR;
INSERT INTO simple_chunk_filtering SELECT generate_series(0,234567); INSERT INTO simple_chunk_filtering SELECT generate_series(0,234567);
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT * FROM simple_chunk_filtering WHERE i > 123456; SELECT * FROM simple_chunk_filtering WHERE i > 123456;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -138,7 +138,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off)
(6 rows) (6 rows)
SET columnar.enable_qual_pushdown = false; SET columnar.enable_qual_pushdown = false;
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT * FROM simple_chunk_filtering WHERE i > 123456; SELECT * FROM simple_chunk_filtering WHERE i > 123456;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -153,7 +153,7 @@ SET columnar.enable_qual_pushdown TO DEFAULT;
TRUNCATE simple_chunk_filtering; TRUNCATE simple_chunk_filtering;
INSERT INTO simple_chunk_filtering SELECT generate_series(0,200000); INSERT INTO simple_chunk_filtering SELECT generate_series(0,200000);
COPY (SELECT * FROM simple_chunk_filtering WHERE i > 180000) TO '/dev/null'; COPY (SELECT * FROM simple_chunk_filtering WHERE i > 180000) TO '/dev/null';
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT * FROM simple_chunk_filtering WHERE i > 180000; SELECT * FROM simple_chunk_filtering WHERE i > 180000;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -168,7 +168,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off)
DROP TABLE simple_chunk_filtering; DROP TABLE simple_chunk_filtering;
CREATE TABLE multi_column_chunk_filtering(a int, b int) USING columnar; CREATE TABLE multi_column_chunk_filtering(a int, b int) USING columnar;
INSERT INTO multi_column_chunk_filtering SELECT i,i+1 FROM generate_series(0,234567) i; INSERT INTO multi_column_chunk_filtering SELECT i,i+1 FROM generate_series(0,234567) i;
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT count(*) FROM multi_column_chunk_filtering WHERE a > 50000; SELECT count(*) FROM multi_column_chunk_filtering WHERE a > 50000;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -181,7 +181,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off)
Columnar Chunk Groups Removed by Filter: 5 Columnar Chunk Groups Removed by Filter: 5
(7 rows) (7 rows)
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT count(*) FROM multi_column_chunk_filtering WHERE a > 50000 AND b > 50000; SELECT count(*) FROM multi_column_chunk_filtering WHERE a > 50000 AND b > 50000;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -197,7 +197,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off)
-- make next tests faster -- make next tests faster
TRUNCATE multi_column_chunk_filtering; TRUNCATE multi_column_chunk_filtering;
INSERT INTO multi_column_chunk_filtering SELECT generate_series(0,5); INSERT INTO multi_column_chunk_filtering SELECT generate_series(0,5);
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT b FROM multi_column_chunk_filtering WHERE a > 50000 AND b > 50000; SELECT b FROM multi_column_chunk_filtering WHERE a > 50000 AND b > 50000;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -208,7 +208,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off)
Columnar Chunk Groups Removed by Filter: 1 Columnar Chunk Groups Removed by Filter: 1
(5 rows) (5 rows)
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT b, a FROM multi_column_chunk_filtering WHERE b > 50000; SELECT b, a FROM multi_column_chunk_filtering WHERE b > 50000;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -220,7 +220,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off)
Columnar Chunk Groups Removed by Filter: 0 Columnar Chunk Groups Removed by Filter: 0
(6 rows) (6 rows)
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT FROM multi_column_chunk_filtering WHERE a > 50000; SELECT FROM multi_column_chunk_filtering WHERE a > 50000;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -231,7 +231,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off)
Columnar Chunk Groups Removed by Filter: 1 Columnar Chunk Groups Removed by Filter: 1
(5 rows) (5 rows)
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT FROM multi_column_chunk_filtering; SELECT FROM multi_column_chunk_filtering;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -242,7 +242,7 @@ EXPLAIN (analyze on, costs off, timing off, summary off)
BEGIN; BEGIN;
ALTER TABLE multi_column_chunk_filtering DROP COLUMN a; ALTER TABLE multi_column_chunk_filtering DROP COLUMN a;
ALTER TABLE multi_column_chunk_filtering DROP COLUMN b; ALTER TABLE multi_column_chunk_filtering DROP COLUMN b;
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT * FROM multi_column_chunk_filtering; SELECT * FROM multi_column_chunk_filtering;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -253,7 +253,7 @@ BEGIN;
ROLLBACK; ROLLBACK;
CREATE TABLE another_columnar_table(x int, y int) USING columnar; CREATE TABLE another_columnar_table(x int, y int) USING columnar;
INSERT INTO another_columnar_table SELECT generate_series(0,5); INSERT INTO another_columnar_table SELECT generate_series(0,5);
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT a, y FROM multi_column_chunk_filtering, another_columnar_table WHERE x > 1; SELECT a, y FROM multi_column_chunk_filtering, another_columnar_table WHERE x > 1;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -364,7 +364,7 @@ set enable_mergejoin=false;
set enable_hashjoin=false; set enable_hashjoin=false;
set enable_material=false; set enable_material=false;
-- test different kinds of expressions -- test different kinds of expressions
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT * FROM r1, coltest WHERE SELECT * FROM r1, coltest WHERE
id1 = id AND x1 > 15000 AND x1::text > '000000' AND n1 % 10 = 0; id1 = id AND x1 > 15000 AND x1::text > '000000' AND n1 % 10 = 0;
QUERY PLAN QUERY PLAN
@ -391,7 +391,7 @@ SELECT * FROM r1, coltest WHERE
(3 rows) (3 rows)
-- test equivalence classes -- test equivalence classes
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT * FROM r1, r2, r3, r4, r5, r6, r7, coltest WHERE SELECT * FROM r1, r2, r3, r4, r5, r6, r7, coltest WHERE
id = id1 AND id1 = id2 AND id2 = id3 AND id3 = id4 AND id = id1 AND id1 = id2 AND id2 = id3 AND id3 = id4 AND
id4 = id5 AND id5 = id6 AND id6 = id7; id4 = id5 AND id5 = id6 AND id6 = id7;
@ -561,7 +561,7 @@ set columnar.max_custom_scan_paths to default;
set columnar.planner_debug_level to default; set columnar.planner_debug_level to default;
-- test more complex parameterization -- test more complex parameterization
set columnar.planner_debug_level = 'notice'; set columnar.planner_debug_level = 'notice';
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT * FROM r1, r2, r3, coltest WHERE SELECT * FROM r1, r2, r3, coltest WHERE
id1 = id2 AND id2 = id3 AND id3 = id AND id1 = id2 AND id2 = id3 AND id3 = id AND
n1 > x1 AND n2 > x2 AND n3 > x3; n1 > x1 AND n2 > x2 AND n3 > x3;
@ -613,7 +613,7 @@ SELECT * FROM r1, r2, r3, coltest WHERE
(3 rows) (3 rows)
-- test partitioning parameterization -- test partitioning parameterization
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT * FROM r1, coltest_part WHERE SELECT * FROM r1, coltest_part WHERE
id1 = id AND n1 > x1; id1 = id AND n1 > x1;
QUERY PLAN QUERY PLAN
@ -680,7 +680,7 @@ DETAIL: unparameterized; 0 clauses pushed down
--------------------------------------------------------------------- ---------------------------------------------------------------------
(0 rows) (0 rows)
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT * FROM coltest c1 WHERE ceil(x1) > 4222; SELECT * FROM coltest c1 WHERE ceil(x1) > 4222;
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var' NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
HINT: Var must only reference this rel, and Expr must not reference this rel HINT: Var must only reference this rel, and Expr must not reference this rel
@ -832,7 +832,7 @@ BEGIN;
COMMIT; COMMIT;
SET columnar.max_custom_scan_paths TO 50; SET columnar.max_custom_scan_paths TO 50;
SET columnar.qual_pushdown_correlation_threshold TO 0.0; SET columnar.qual_pushdown_correlation_threshold TO 0.0;
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a = 104356 or a = 76556; SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a = 104356 or a = 76556;
NOTICE: columnar planner: adding CustomScan path for pushdown_test NOTICE: columnar planner: adding CustomScan path for pushdown_test
DETAIL: unparameterized; 1 clauses pushed down DETAIL: unparameterized; 1 clauses pushed down
@ -855,7 +855,7 @@ DETAIL: unparameterized; 1 clauses pushed down
180912 180912
(1 row) (1 row)
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT sum(a) FROM pushdown_test WHERE a = 194356 or a = 104356 or a = 76556; SELECT sum(a) FROM pushdown_test WHERE a = 194356 or a = 104356 or a = 76556;
NOTICE: columnar planner: adding CustomScan path for pushdown_test NOTICE: columnar planner: adding CustomScan path for pushdown_test
DETAIL: unparameterized; 1 clauses pushed down DETAIL: unparameterized; 1 clauses pushed down
@ -878,7 +878,7 @@ DETAIL: unparameterized; 1 clauses pushed down
375268 375268
(1 row) (1 row)
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a > a*-1 + b; SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a > a*-1 + b;
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var' NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
HINT: Var must only reference this rel, and Expr must not reference this rel HINT: Var must only reference this rel, and Expr must not reference this rel
@ -894,7 +894,7 @@ DETAIL: unparameterized; 0 clauses pushed down
Columnar Projected Columns: a, b Columnar Projected Columns: a, b
(5 rows) (5 rows)
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT sum(a) FROM pushdown_test where (a > 1000 and a < 10000) or (a > 20000 and a < 50000); SELECT sum(a) FROM pushdown_test where (a > 1000 and a < 10000) or (a > 20000 and a < 50000);
NOTICE: columnar planner: adding CustomScan path for pushdown_test NOTICE: columnar planner: adding CustomScan path for pushdown_test
DETAIL: unparameterized; 1 clauses pushed down DETAIL: unparameterized; 1 clauses pushed down
@ -917,7 +917,7 @@ DETAIL: unparameterized; 1 clauses pushed down
1099459500 1099459500
(1 row) (1 row)
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT sum(a) FROM pushdown_test where (a > random() and a < 2*a) or (a > 100); SELECT sum(a) FROM pushdown_test where (a > random() and a < 2*a) or (a > 100);
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var' NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
HINT: Var must only reference this rel, and Expr must not reference this rel HINT: Var must only reference this rel, and Expr must not reference this rel
@ -949,7 +949,7 @@ DETAIL: unparameterized; 0 clauses pushed down
20000100000 20000100000
(1 row) (1 row)
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT sum(a) FROM pushdown_test where (a > random() and a <= 2000) or (a > 200000-1010); SELECT sum(a) FROM pushdown_test where (a > random() and a <= 2000) or (a > 200000-1010);
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var' NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
HINT: Var must only reference this rel, and Expr must not reference this rel HINT: Var must only reference this rel, and Expr must not reference this rel
@ -978,7 +978,7 @@ DETAIL: unparameterized; 1 clauses pushed down
SET hash_mem_multiplier = 1.0; SET hash_mem_multiplier = 1.0;
SELECT columnar_test_helpers.explain_with_pg16_subplan_format($Q$ SELECT columnar_test_helpers.explain_with_pg16_subplan_format($Q$
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT sum(a) FROM pushdown_test where SELECT sum(a) FROM pushdown_test where
( (
a > random() a > random()
@ -1043,7 +1043,7 @@ DETAIL: unparameterized; 1 clauses pushed down
create function stable_1(arg int) returns int language plpgsql STRICT IMMUTABLE as create function stable_1(arg int) returns int language plpgsql STRICT IMMUTABLE as
$$ BEGIN RETURN 1+arg; END; $$; $$ BEGIN RETURN 1+arg; END; $$;
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT sum(a) FROM pushdown_test where (a = random() and a < stable_1(a) and a < stable_1(6000)); SELECT sum(a) FROM pushdown_test where (a = random() and a < stable_1(a) and a < stable_1(6000));
NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var' NOTICE: columnar planner: cannot push down clause: must match 'Var <op> Expr' or 'Expr <op> Var'
HINT: Var must only reference this rel, and Expr must not reference this rel HINT: Var must only reference this rel, and Expr must not reference this rel
@ -1096,7 +1096,7 @@ BEGIN;
INSERT INTO pushdown_test VALUES(7, 'USA'); INSERT INTO pushdown_test VALUES(7, 'USA');
INSERT INTO pushdown_test VALUES(8, 'ZW'); INSERT INTO pushdown_test VALUES(8, 'ZW');
END; END;
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT id FROM pushdown_test WHERE country IN ('USA', 'BR', 'ZW'); SELECT id FROM pushdown_test WHERE country IN ('USA', 'BR', 'ZW');
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1123,7 +1123,7 @@ BEGIN
return 'AL'; return 'AL';
END; END;
$$; $$;
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT * FROM pushdown_test WHERE country IN ('USA', 'ZW', volatileFunction()); SELECT * FROM pushdown_test WHERE country IN ('USA', 'ZW', volatileFunction());
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -4,7 +4,7 @@
CREATE TABLE test_cursor (a int, b int) USING columnar; CREATE TABLE test_cursor (a int, b int) USING columnar;
INSERT INTO test_cursor SELECT i, j FROM generate_series(0, 100)i, generate_series(100, 200)j; INSERT INTO test_cursor SELECT i, j FROM generate_series(0, 100)i, generate_series(100, 200)j;
-- A case where the WHERE clause might filter out some chunks -- A case where the WHERE clause might filter out some chunks
EXPLAIN (analyze on, costs off, timing off, summary off) SELECT * FROM test_cursor WHERE a = 25; EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM test_cursor WHERE a = 25;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
Custom Scan (ColumnarScan) on test_cursor (actual rows=101 loops=1) Custom Scan (ColumnarScan) on test_cursor (actual rows=101 loops=1)
@ -107,7 +107,7 @@ UPDATE test_cursor SET a = 8000 WHERE CURRENT OF a_25;
ERROR: UPDATE and CTID scans not supported for ColumnarScan ERROR: UPDATE and CTID scans not supported for ColumnarScan
COMMIT; COMMIT;
-- A case where the WHERE clause doesn't filter out any chunks -- A case where the WHERE clause doesn't filter out any chunks
EXPLAIN (analyze on, costs off, timing off, summary off) SELECT * FROM test_cursor WHERE a > 25; EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM test_cursor WHERE a > 25;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
Custom Scan (ColumnarScan) on test_cursor (actual rows=7575 loops=1) Custom Scan (ColumnarScan) on test_cursor (actual rows=7575 loops=1)

View File

@ -579,7 +579,7 @@ CREATE INDEX correlated_idx ON correlated(x);
CREATE INDEX uncorrelated_idx ON uncorrelated(x); CREATE INDEX uncorrelated_idx ON uncorrelated(x);
ANALYZE correlated, uncorrelated; ANALYZE correlated, uncorrelated;
-- should choose chunk group filtering; selective and correlated -- should choose chunk group filtering; selective and correlated
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT * FROM correlated WHERE x = 78910; SELECT * FROM correlated WHERE x = 78910;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -598,7 +598,7 @@ SELECT * FROM correlated WHERE x = 78910;
(1 row) (1 row)
-- should choose index scan; selective but uncorrelated -- should choose index scan; selective but uncorrelated
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT * FROM uncorrelated WHERE x = 78910; SELECT * FROM uncorrelated WHERE x = 78910;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -583,7 +583,7 @@ CREATE INDEX correlated_idx ON correlated(x);
CREATE INDEX uncorrelated_idx ON uncorrelated(x); CREATE INDEX uncorrelated_idx ON uncorrelated(x);
ANALYZE correlated, uncorrelated; ANALYZE correlated, uncorrelated;
-- should choose chunk group filtering; selective and correlated -- should choose chunk group filtering; selective and correlated
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT * FROM correlated WHERE x = 78910; SELECT * FROM correlated WHERE x = 78910;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -602,7 +602,7 @@ SELECT * FROM correlated WHERE x = 78910;
(1 row) (1 row)
-- should choose index scan; selective but uncorrelated -- should choose index scan; selective but uncorrelated
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT * FROM uncorrelated WHERE x = 78910; SELECT * FROM uncorrelated WHERE x = 78910;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -219,7 +219,7 @@ EXPLAIN (COSTS OFF) EXECUTE p0;
(2 rows) (2 rows)
EXECUTE p0; EXECUTE p0;
EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off) EXECUTE p0; EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF) EXECUTE p0;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
Insert on t (actual rows=0 loops=1) Insert on t (actual rows=0 loops=1)
@ -252,7 +252,7 @@ EXPLAIN (COSTS OFF) EXECUTE p1(16);
(2 rows) (2 rows)
EXECUTE p1(16); EXECUTE p1(16);
EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off) EXECUTE p1(20); EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF) EXECUTE p1(20);
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
Insert on t (actual rows=0 loops=1) Insert on t (actual rows=0 loops=1)
@ -289,7 +289,7 @@ EXPLAIN (COSTS OFF) EXECUTE p2(30, 40);
(2 rows) (2 rows)
EXECUTE p2(30, 40); EXECUTE p2(30, 40);
EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off) EXECUTE p2(50, 60); EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF) EXECUTE p2(50, 60);
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
Insert on t (actual rows=0 loops=1) Insert on t (actual rows=0 loops=1)
@ -342,7 +342,7 @@ EXECUTE p3;
8 | 8 8 | 8
(2 rows) (2 rows)
EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off) EXECUTE p3; EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF) EXECUTE p3;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
Custom Scan (ColumnarScan) on t (actual rows=2 loops=1) Custom Scan (ColumnarScan) on t (actual rows=2 loops=1)
@ -397,7 +397,7 @@ EXECUTE p5(16);
--------------------------------------------------------------------- ---------------------------------------------------------------------
(0 rows) (0 rows)
EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off) EXECUTE p5(9); EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF) EXECUTE p5(9);
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
Custom Scan (ColumnarScan) on t (actual rows=2 loops=1) Custom Scan (ColumnarScan) on t (actual rows=2 loops=1)
@ -453,7 +453,7 @@ EXECUTE p6(30, 40);
31 | 41 31 | 41
(1 row) (1 row)
EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off) EXECUTE p6(50, 60); EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF) EXECUTE p6(50, 60);
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
Custom Scan (ColumnarScan) on t (actual rows=1 loops=1) Custom Scan (ColumnarScan) on t (actual rows=1 loops=1)

View File

@ -140,7 +140,7 @@ ROLLBACK;
-- INSERT..SELECT with re-partitioning in EXPLAIN ANALYZE after local execution -- INSERT..SELECT with re-partitioning in EXPLAIN ANALYZE after local execution
BEGIN; BEGIN;
INSERT INTO test VALUES (0,1000); INSERT INTO test VALUES (0,1000);
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) INSERT INTO test (x, y) SELECT y, x FROM test; EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) INSERT INTO test (x, y) SELECT y, x FROM test;
ERROR: EXPLAIN ANALYZE is currently not supported for INSERT ... SELECT commands with repartitioning ERROR: EXPLAIN ANALYZE is currently not supported for INSERT ... SELECT commands with repartitioning
ROLLBACK; ROLLBACK;
-- DDL connects to locahost -- DDL connects to locahost

View File

@ -327,7 +327,7 @@ EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) SELECT * FROM distribute
Filter: (age = 20) Filter: (age = 20)
(10 rows) (10 rows)
EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF) EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF)
WITH r AS ( SELECT GREATEST(random(), 2) z,* FROM distributed_table) WITH r AS ( SELECT GREATEST(random(), 2) z,* FROM distributed_table)
SELECT 1 FROM r WHERE z < 3; SELECT 1 FROM r WHERE z < 3;
QUERY PLAN QUERY PLAN

View File

@ -265,7 +265,7 @@ EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) SELECT * FROM distribute
Filter: (age = 20) Filter: (age = 20)
(10 rows) (10 rows)
EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF) EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF)
WITH r AS ( SELECT GREATEST(random(), 2) z,* FROM distributed_table) WITH r AS ( SELECT GREATEST(random(), 2) z,* FROM distributed_table)
SELECT 1 FROM r WHERE z < 3; SELECT 1 FROM r WHERE z < 3;
QUERY PLAN QUERY PLAN

View File

@ -2019,7 +2019,7 @@ DEBUG: <Deparsed MERGE query: MERGE INTO merge_schema.target_table_xxxxxxx t US
DEBUG: <Deparsed MERGE query: MERGE INTO merge_schema.target_table_xxxxxxx t USING (SELECT intermediate_result.id, intermediate_result.some_number FROM read_intermediate_result('merge_into_XXX_4000079'::text, 'binary'::citus_copy_format) intermediate_result(id integer, some_number integer)) s ON (t.id OPERATOR(pg_catalog.=) s.some_number) WHEN NOT MATCHED THEN INSERT (id, name) VALUES (s.some_number, 'parag'::text)> DEBUG: <Deparsed MERGE query: MERGE INTO merge_schema.target_table_xxxxxxx t USING (SELECT intermediate_result.id, intermediate_result.some_number FROM read_intermediate_result('merge_into_XXX_4000079'::text, 'binary'::citus_copy_format) intermediate_result(id integer, some_number integer)) s ON (t.id OPERATOR(pg_catalog.=) s.some_number) WHEN NOT MATCHED THEN INSERT (id, name) VALUES (s.some_number, 'parag'::text)>
DEBUG: Execute MERGE task list DEBUG: Execute MERGE task list
-- let's verify if data inserted to second shard of target. -- let's verify if data inserted to second shard of target.
EXPLAIN (analyze on, costs off, timing off, summary off) SELECT * FROM target_table; EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM target_table;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
Custom Scan (Citus Adaptive) (actual rows=1 loops=1) Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
@ -2535,7 +2535,7 @@ HINT: To remove the local data, run: SELECT truncate_local_data_after_distribut
-- single shard query given source_json is filtered and Postgres is smart to pushdown -- single shard query given source_json is filtered and Postgres is smart to pushdown
-- filter to the target_json as well -- filter to the target_json as well
SELECT public.coordinator_plan($Q$ SELECT public.coordinator_plan($Q$
EXPLAIN (ANALYZE ON, TIMING OFF) MERGE INTO target_json sda EXPLAIN (ANALYZE ON, TIMING OFF, BUFFERS OFF) MERGE INTO target_json sda
USING (SELECT * FROM source_json WHERE id = 1) sdn USING (SELECT * FROM source_json WHERE id = 1) sdn
ON sda.id = sdn.id ON sda.id = sdn.id
WHEN NOT matched THEN WHEN NOT matched THEN
@ -2564,7 +2564,7 @@ SELECT * FROM target_json ORDER BY 1;
--SELECT * FROM target_json ORDER BY 1; --SELECT * FROM target_json ORDER BY 1;
-- join for source_json is happening at a different place -- join for source_json is happening at a different place
SELECT public.coordinator_plan($Q$ SELECT public.coordinator_plan($Q$
EXPLAIN (ANALYZE ON, TIMING OFF) MERGE INTO target_json sda EXPLAIN (ANALYZE ON, TIMING OFF, BUFFERS OFF) MERGE INTO target_json sda
USING source_json s1 LEFT JOIN (SELECT * FROM source_json) s2 USING(z) USING source_json s1 LEFT JOIN (SELECT * FROM source_json) s2 USING(z)
ON sda.id = s1.id AND s1.id = s2.id ON sda.id = s1.id AND s1.id = s2.id
WHEN NOT matched THEN WHEN NOT matched THEN
@ -2589,7 +2589,7 @@ SELECT * FROM target_json ORDER BY 1;
-- update JSON column -- update JSON column
SELECT public.coordinator_plan($Q$ SELECT public.coordinator_plan($Q$
EXPLAIN (ANALYZE ON, TIMING OFF) MERGE INTO target_json sda EXPLAIN (ANALYZE ON, TIMING OFF, BUFFERS OFF) MERGE INTO target_json sda
USING source_json sdn USING source_json sdn
ON sda.id = sdn.id ON sda.id = sdn.id
WHEN matched THEN WHEN matched THEN

View File

@ -170,7 +170,7 @@ SELECT * FROM composite_type_partitioned_table WHERE id = 123;
123 | (123,456) 123 | (123,456)
(1 row) (1 row)
EXPLAIN (ANALYZE TRUE, COSTS FALSE, VERBOSE FALSE, TIMING FALSE, SUMMARY FALSE) EXPLAIN (ANALYZE TRUE, COSTS FALSE, VERBOSE FALSE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF)
INSERT INTO composite_type_partitioned_table VALUES (123, '(123, 456)'::other_composite_type); INSERT INTO composite_type_partitioned_table VALUES (123, '(123, 456)'::other_composite_type);
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -212,7 +212,7 @@ $cf$);
(1 row) (1 row)
INSERT INTO composite_type_partitioned_table VALUES (456, '(456, 678)'::other_composite_type); INSERT INTO composite_type_partitioned_table VALUES (456, '(456, 678)'::other_composite_type);
EXPLAIN (ANALYZE TRUE, COSTS FALSE, VERBOSE FALSE, TIMING FALSE, SUMMARY FALSE) EXPLAIN (ANALYZE TRUE, COSTS FALSE, VERBOSE FALSE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF)
INSERT INTO composite_type_partitioned_table VALUES (123, '(456, 678)'::other_composite_type); INSERT INTO composite_type_partitioned_table VALUES (123, '(456, 678)'::other_composite_type);
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -338,7 +338,7 @@ Sort
-> Seq Scan on lineitem_360000 lineitem -> Seq Scan on lineitem_360000 lineitem
-- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output) -- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output)
SELECT public.plan_normalize_memory($Q$ SELECT public.plan_normalize_memory($Q$
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF)
SELECT l_quantity, count(*) count_quantity FROM lineitem SELECT l_quantity, count(*) count_quantity FROM lineitem
GROUP BY l_quantity ORDER BY count_quantity, l_quantity; GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
$Q$); $Q$);
@ -365,7 +365,7 @@ SELECT create_distributed_table('t1', 'a'), create_distributed_table('t2', 'a');
| |
BEGIN; BEGIN;
SET LOCAL citus.enable_repartition_joins TO true; SET LOCAL citus.enable_repartition_joins TO true;
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off) SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b; EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF) SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b;
Aggregate (actual rows=1 loops=1) Aggregate (actual rows=1 loops=1)
-> Custom Scan (Citus Adaptive) (actual rows=6 loops=1) -> Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
Task Count: 6 Task Count: 6
@ -378,7 +378,7 @@ Aggregate (actual rows=1 loops=1)
Map Task Count: 3 Map Task Count: 3
Merge Task Count: 6 Merge Task Count: 6
-- Confirm repartiton join in distributed subplan works -- Confirm repartiton join in distributed subplan works
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off) EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF)
WITH repartition AS (SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b) WITH repartition AS (SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b)
SELECT count(*) from repartition; SELECT count(*) from repartition;
Custom Scan (Citus Adaptive) (actual rows=1 loops=1) Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
@ -408,7 +408,7 @@ END;
DROP TABLE t1, t2; DROP TABLE t1, t2;
-- Test query text output, with ANALYZE ON -- Test query text output, with ANALYZE ON
SELECT public.plan_normalize_memory($Q$ SELECT public.plan_normalize_memory($Q$
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, VERBOSE TRUE) EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, VERBOSE TRUE, BUFFERS OFF)
SELECT l_quantity, count(*) count_quantity FROM lineitem SELECT l_quantity, count(*) count_quantity FROM lineitem
GROUP BY l_quantity ORDER BY count_quantity, l_quantity; GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
$Q$); $Q$);
@ -519,7 +519,7 @@ Custom Scan (Citus Adaptive)
Filter: (l_partkey = 0) Filter: (l_partkey = 0)
-- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output) -- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output)
BEGIN; BEGIN;
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF)
UPDATE lineitem UPDATE lineitem
SET l_suppkey = 12 SET l_suppkey = 12
WHERE l_orderkey = 1 AND l_partkey = 0; WHERE l_orderkey = 1 AND l_partkey = 0;
@ -1074,7 +1074,7 @@ Custom Scan (Citus Adaptive)
-> Seq Scan on lineitem_hash_part_360044 lineitem_hash_part -> Seq Scan on lineitem_hash_part_360044 lineitem_hash_part
-- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output) -- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output)
SELECT public.plan_normalize_memory($Q$ SELECT public.plan_normalize_memory($Q$
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF)
SELECT l_quantity, count(*) count_quantity FROM lineitem SELECT l_quantity, count(*) count_quantity FROM lineitem
GROUP BY l_quantity ORDER BY count_quantity, l_quantity; GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
$Q$); $Q$);
@ -1387,7 +1387,7 @@ Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0)
Node: host=localhost port=xxxxx dbname=regression Node: host=localhost port=xxxxx dbname=regression
-> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (cost=0.28..13.60 rows=4 width=5) -> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (cost=0.28..13.60 rows=4 width=5)
Index Cond: (l_orderkey = 5) Index Cond: (l_orderkey = 5)
EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF) EXECUTE router_executor_query_param(5); EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) EXECUTE router_executor_query_param(5);
Custom Scan (Citus Adaptive) (actual rows=3 loops=1) Custom Scan (Citus Adaptive) (actual rows=3 loops=1)
Task Count: 1 Task Count: 1
Tuple data received from nodes: 30 bytes Tuple data received from nodes: 30 bytes
@ -1410,7 +1410,7 @@ Custom Scan (Citus Adaptive)
-> Seq Scan on lineitem_360000 lineitem -> Seq Scan on lineitem_360000 lineitem
ROLLBACK; ROLLBACK;
BEGIN; BEGIN;
EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF) EXECUTE multi_shard_query_param(5); EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) EXECUTE multi_shard_query_param(5);
Custom Scan (Citus Adaptive) (actual rows=0 loops=1) Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
Task Count: 2 Task Count: 2
Tasks Shown: One of 2 Tasks Shown: One of 2
@ -1993,7 +1993,7 @@ SET citus.shard_count TO 4;
SET client_min_messages TO WARNING; SET client_min_messages TO WARNING;
SELECT create_distributed_table('explain_analyze_test', 'a'); SELECT create_distributed_table('explain_analyze_test', 'a');
\set default_analyze_flags '(ANALYZE on, COSTS off, TIMING off, SUMMARY off)' \set default_analyze_flags '(ANALYZE on, COSTS off, TIMING off, SUMMARY off, BUFFERS off)'
\set default_explain_flags '(ANALYZE off, COSTS off, TIMING off, SUMMARY off)' \set default_explain_flags '(ANALYZE off, COSTS off, TIMING off, SUMMARY off)'
-- router SELECT -- router SELECT
EXPLAIN :default_analyze_flags SELECT * FROM explain_analyze_test WHERE a = 1; EXPLAIN :default_analyze_flags SELECT * FROM explain_analyze_test WHERE a = 1;
@ -2168,7 +2168,7 @@ SELECT * FROM explain_pk ORDER BY 1;
ROLLBACK; ROLLBACK;
-- test EXPLAIN ANALYZE with non-text output formats -- test EXPLAIN ANALYZE with non-text output formats
BEGIN; BEGIN;
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) INSERT INTO explain_pk VALUES (1, 2), (2, 3); EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON, BUFFERS OFF) INSERT INTO explain_pk VALUES (1, 2), (2, 3);
[ [
{ {
"Plan": { "Plan": {
@ -2224,7 +2224,7 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) INSERT INT
} }
] ]
ROLLBACK; ROLLBACK;
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) SELECT * FROM explain_pk; EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON, BUFFERS OFF) SELECT * FROM explain_pk;
[ [
{ {
"Plan": { "Plan": {
@ -2271,7 +2271,7 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) SELECT * F
} }
] ]
BEGIN; BEGIN;
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML) INSERT INTO explain_pk VALUES (1, 2), (2, 3); EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML, BUFFERS OFF) INSERT INTO explain_pk VALUES (1, 2), (2, 3);
<explain xmlns="http://www.postgresql.org/2009/explain"> <explain xmlns="http://www.postgresql.org/2009/explain">
<Query> <Query>
<Plan> <Plan>
@ -2326,7 +2326,7 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML) INSERT INTO
</Query> </Query>
</explain> </explain>
ROLLBACK; ROLLBACK;
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML) SELECT * FROM explain_pk; EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML, BUFFERS OFF) SELECT * FROM explain_pk;
<explain xmlns="http://www.postgresql.org/2009/explain"> <explain xmlns="http://www.postgresql.org/2009/explain">
<Query> <Query>
<Plan> <Plan>
@ -2438,7 +2438,7 @@ Aggregate (actual rows=1 loops=1)
Sort Method: quicksort Memory: 25kB Sort Method: quicksort Memory: 25kB
-> Seq Scan on ref_table_570021 ref_table (actual rows=10 loops=1) -> Seq Scan on ref_table_570021 ref_table (actual rows=10 loops=1)
SELECT public.explain_with_pg17_initplan_format($Q$ SELECT public.explain_with_pg17_initplan_format($Q$
EXPLAIN (ANALYZE on, COSTS off, TIMING off, SUMMARY off) EXPLAIN (ANALYZE on, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF)
SELECT count(distinct a) FROM dist_table SELECT count(distinct a) FROM dist_table
WHERE EXISTS(SELECT random() < 2 FROM dist_table NATURAL JOIN ref_table); WHERE EXISTS(SELECT random() < 2 FROM dist_table NATURAL JOIN ref_table);
$Q$); $Q$);
@ -3076,7 +3076,7 @@ SELECT create_distributed_table('explain_analyze_execution_time', 'a');
-- sleep for the shard that has the single row, so that -- sleep for the shard that has the single row, so that
-- will definitely be slower -- will definitely be slower
set citus.explain_analyze_sort_method to "taskId"; set citus.explain_analyze_sort_method to "taskId";
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) select a, CASE WHEN pg_sleep(0.4) IS NULL THEN 'x' END from explain_analyze_execution_time; EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) select a, CASE WHEN pg_sleep(0.4) IS NULL THEN 'x' END from explain_analyze_execution_time;
Custom Scan (Citus Adaptive) (actual rows=1 loops=1) Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Task Count: 2 Task Count: 2
Tuple data received from nodes: 4 bytes Tuple data received from nodes: 4 bytes
@ -3086,7 +3086,7 @@ Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Node: host=localhost port=xxxxx dbname=regression Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on explain_analyze_execution_time_570029 explain_analyze_execution_time (actual rows=0 loops=1) -> Seq Scan on explain_analyze_execution_time_570029 explain_analyze_execution_time (actual rows=0 loops=1)
set citus.explain_analyze_sort_method to "execution-time"; set citus.explain_analyze_sort_method to "execution-time";
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) select a, CASE WHEN pg_sleep(0.4) IS NULL THEN 'x' END from explain_analyze_execution_time; EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) select a, CASE WHEN pg_sleep(0.4) IS NULL THEN 'x' END from explain_analyze_execution_time;
Custom Scan (Citus Adaptive) (actual rows=1 loops=1) Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Task Count: 2 Task Count: 2
Tuple data received from nodes: 4 bytes Tuple data received from nodes: 4 bytes
@ -3233,7 +3233,7 @@ SET search_path TO multi_explain;
CREATE TABLE test_subplans (x int primary key, y int); CREATE TABLE test_subplans (x int primary key, y int);
SELECT create_distributed_table('test_subplans','x'); SELECT create_distributed_table('test_subplans','x');
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off) EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF)
WITH a AS (INSERT INTO test_subplans VALUES (1,2) RETURNING *) WITH a AS (INSERT INTO test_subplans VALUES (1,2) RETURNING *)
SELECT * FROM a; SELECT * FROM a;
Custom Scan (Citus Adaptive) (actual rows=1 loops=1) Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
@ -3260,7 +3260,7 @@ Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
SELECT * FROM test_subplans; SELECT * FROM test_subplans;
1|2 1|2
-- Will fail with duplicate pk -- Will fail with duplicate pk
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off) EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF)
WITH a AS (INSERT INTO test_subplans VALUES (1,2) RETURNING *) WITH a AS (INSERT INTO test_subplans VALUES (1,2) RETURNING *)
SELECT * FROM a; SELECT * FROM a;
ERROR: duplicate key value violates unique constraint "test_subplans_pkey_570038" ERROR: duplicate key value violates unique constraint "test_subplans_pkey_570038"
@ -3268,7 +3268,7 @@ DETAIL: Key (x)=(1) already exists.
CONTEXT: while executing command on localhost:xxxxx CONTEXT: while executing command on localhost:xxxxx
-- Test JSON format -- Test JSON format
TRUNCATE test_subplans; TRUNCATE test_subplans;
EXPLAIN (FORMAT JSON, COSTS off, ANALYZE on, TIMING off, SUMMARY off) EXPLAIN (FORMAT JSON, COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF)
WITH a AS (INSERT INTO test_subplans VALUES (1,2) RETURNING *) WITH a AS (INSERT INTO test_subplans VALUES (1,2) RETURNING *)
SELECT * FROM a; SELECT * FROM a;
[ [

View File

@ -338,7 +338,7 @@ Sort
-> Seq Scan on lineitem_360000 lineitem -> Seq Scan on lineitem_360000 lineitem
-- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output) -- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output)
SELECT public.plan_normalize_memory($Q$ SELECT public.plan_normalize_memory($Q$
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF)
SELECT l_quantity, count(*) count_quantity FROM lineitem SELECT l_quantity, count(*) count_quantity FROM lineitem
GROUP BY l_quantity ORDER BY count_quantity, l_quantity; GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
$Q$); $Q$);
@ -365,7 +365,7 @@ SELECT create_distributed_table('t1', 'a'), create_distributed_table('t2', 'a');
| |
BEGIN; BEGIN;
SET LOCAL citus.enable_repartition_joins TO true; SET LOCAL citus.enable_repartition_joins TO true;
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off) SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b; EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF) SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b;
Aggregate (actual rows=1 loops=1) Aggregate (actual rows=1 loops=1)
-> Custom Scan (Citus Adaptive) (actual rows=6 loops=1) -> Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
Task Count: 6 Task Count: 6
@ -378,7 +378,7 @@ Aggregate (actual rows=1 loops=1)
Map Task Count: 3 Map Task Count: 3
Merge Task Count: 6 Merge Task Count: 6
-- Confirm repartiton join in distributed subplan works -- Confirm repartiton join in distributed subplan works
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off) EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF)
WITH repartition AS (SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b) WITH repartition AS (SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b)
SELECT count(*) from repartition; SELECT count(*) from repartition;
Custom Scan (Citus Adaptive) (actual rows=1 loops=1) Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
@ -408,7 +408,7 @@ END;
DROP TABLE t1, t2; DROP TABLE t1, t2;
-- Test query text output, with ANALYZE ON -- Test query text output, with ANALYZE ON
SELECT public.plan_normalize_memory($Q$ SELECT public.plan_normalize_memory($Q$
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, VERBOSE TRUE) EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, VERBOSE TRUE, BUFFERS OFF)
SELECT l_quantity, count(*) count_quantity FROM lineitem SELECT l_quantity, count(*) count_quantity FROM lineitem
GROUP BY l_quantity ORDER BY count_quantity, l_quantity; GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
$Q$); $Q$);
@ -519,7 +519,7 @@ Custom Scan (Citus Adaptive)
Filter: (l_partkey = 0) Filter: (l_partkey = 0)
-- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output) -- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output)
BEGIN; BEGIN;
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF)
UPDATE lineitem UPDATE lineitem
SET l_suppkey = 12 SET l_suppkey = 12
WHERE l_orderkey = 1 AND l_partkey = 0; WHERE l_orderkey = 1 AND l_partkey = 0;
@ -1074,7 +1074,7 @@ Custom Scan (Citus Adaptive)
-> Seq Scan on lineitem_hash_part_360044 lineitem_hash_part -> Seq Scan on lineitem_hash_part_360044 lineitem_hash_part
-- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output) -- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output)
SELECT public.plan_normalize_memory($Q$ SELECT public.plan_normalize_memory($Q$
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF)
SELECT l_quantity, count(*) count_quantity FROM lineitem SELECT l_quantity, count(*) count_quantity FROM lineitem
GROUP BY l_quantity ORDER BY count_quantity, l_quantity; GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
$Q$); $Q$);
@ -1387,7 +1387,7 @@ Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0)
Node: host=localhost port=xxxxx dbname=regression Node: host=localhost port=xxxxx dbname=regression
-> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (cost=0.28..13.60 rows=4 width=5) -> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (cost=0.28..13.60 rows=4 width=5)
Index Cond: (l_orderkey = 5) Index Cond: (l_orderkey = 5)
EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF) EXECUTE router_executor_query_param(5); EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) EXECUTE router_executor_query_param(5);
Custom Scan (Citus Adaptive) (actual rows=3 loops=1) Custom Scan (Citus Adaptive) (actual rows=3 loops=1)
Task Count: 1 Task Count: 1
Tuple data received from nodes: 30 bytes Tuple data received from nodes: 30 bytes
@ -1410,7 +1410,7 @@ Custom Scan (Citus Adaptive)
-> Seq Scan on lineitem_360000 lineitem -> Seq Scan on lineitem_360000 lineitem
ROLLBACK; ROLLBACK;
BEGIN; BEGIN;
EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF) EXECUTE multi_shard_query_param(5); EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) EXECUTE multi_shard_query_param(5);
Custom Scan (Citus Adaptive) (actual rows=0 loops=1) Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
Task Count: 2 Task Count: 2
Tasks Shown: One of 2 Tasks Shown: One of 2
@ -1993,7 +1993,7 @@ SET citus.shard_count TO 4;
SET client_min_messages TO WARNING; SET client_min_messages TO WARNING;
SELECT create_distributed_table('explain_analyze_test', 'a'); SELECT create_distributed_table('explain_analyze_test', 'a');
\set default_analyze_flags '(ANALYZE on, COSTS off, TIMING off, SUMMARY off)' \set default_analyze_flags '(ANALYZE on, COSTS off, TIMING off, SUMMARY off, BUFFERS off)'
\set default_explain_flags '(ANALYZE off, COSTS off, TIMING off, SUMMARY off)' \set default_explain_flags '(ANALYZE off, COSTS off, TIMING off, SUMMARY off)'
-- router SELECT -- router SELECT
EXPLAIN :default_analyze_flags SELECT * FROM explain_analyze_test WHERE a = 1; EXPLAIN :default_analyze_flags SELECT * FROM explain_analyze_test WHERE a = 1;
@ -2168,7 +2168,7 @@ SELECT * FROM explain_pk ORDER BY 1;
ROLLBACK; ROLLBACK;
-- test EXPLAIN ANALYZE with non-text output formats -- test EXPLAIN ANALYZE with non-text output formats
BEGIN; BEGIN;
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) INSERT INTO explain_pk VALUES (1, 2), (2, 3); EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON, BUFFERS OFF) INSERT INTO explain_pk VALUES (1, 2), (2, 3);
[ [
{ {
"Plan": { "Plan": {
@ -2224,7 +2224,7 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) INSERT INT
} }
] ]
ROLLBACK; ROLLBACK;
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) SELECT * FROM explain_pk; EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON, BUFFERS OFF) SELECT * FROM explain_pk;
[ [
{ {
"Plan": { "Plan": {
@ -2271,7 +2271,7 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) SELECT * F
} }
] ]
BEGIN; BEGIN;
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML) INSERT INTO explain_pk VALUES (1, 2), (2, 3); EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML, BUFFERS OFF) INSERT INTO explain_pk VALUES (1, 2), (2, 3);
<explain xmlns="http://www.postgresql.org/2009/explain"> <explain xmlns="http://www.postgresql.org/2009/explain">
<Query> <Query>
<Plan> <Plan>
@ -2326,7 +2326,7 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML) INSERT INTO
</Query> </Query>
</explain> </explain>
ROLLBACK; ROLLBACK;
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML) SELECT * FROM explain_pk; EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML, BUFFERS OFF) SELECT * FROM explain_pk;
<explain xmlns="http://www.postgresql.org/2009/explain"> <explain xmlns="http://www.postgresql.org/2009/explain">
<Query> <Query>
<Plan> <Plan>
@ -2433,7 +2433,7 @@ Aggregate (actual rows=1 loops=1)
Sort Method: quicksort Memory: 25kB Sort Method: quicksort Memory: 25kB
-> Seq Scan on ref_table_570021 ref_table (actual rows=10 loops=1) -> Seq Scan on ref_table_570021 ref_table (actual rows=10 loops=1)
SELECT public.explain_with_pg17_initplan_format($Q$ SELECT public.explain_with_pg17_initplan_format($Q$
EXPLAIN (ANALYZE on, COSTS off, TIMING off, SUMMARY off) EXPLAIN (ANALYZE on, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF)
SELECT count(distinct a) FROM dist_table SELECT count(distinct a) FROM dist_table
WHERE EXISTS(SELECT random() < 2 FROM dist_table NATURAL JOIN ref_table); WHERE EXISTS(SELECT random() < 2 FROM dist_table NATURAL JOIN ref_table);
$Q$); $Q$);
@ -3065,7 +3065,7 @@ SELECT create_distributed_table('explain_analyze_execution_time', 'a');
-- sleep for the shard that has the single row, so that -- sleep for the shard that has the single row, so that
-- will definitely be slower -- will definitely be slower
set citus.explain_analyze_sort_method to "taskId"; set citus.explain_analyze_sort_method to "taskId";
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) select a, CASE WHEN pg_sleep(0.4) IS NULL THEN 'x' END from explain_analyze_execution_time; EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) select a, CASE WHEN pg_sleep(0.4) IS NULL THEN 'x' END from explain_analyze_execution_time;
Custom Scan (Citus Adaptive) (actual rows=1 loops=1) Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Task Count: 2 Task Count: 2
Tuple data received from nodes: 4 bytes Tuple data received from nodes: 4 bytes
@ -3075,7 +3075,7 @@ Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Node: host=localhost port=xxxxx dbname=regression Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on explain_analyze_execution_time_570029 explain_analyze_execution_time (actual rows=0 loops=1) -> Seq Scan on explain_analyze_execution_time_570029 explain_analyze_execution_time (actual rows=0 loops=1)
set citus.explain_analyze_sort_method to "execution-time"; set citus.explain_analyze_sort_method to "execution-time";
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) select a, CASE WHEN pg_sleep(0.4) IS NULL THEN 'x' END from explain_analyze_execution_time; EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) select a, CASE WHEN pg_sleep(0.4) IS NULL THEN 'x' END from explain_analyze_execution_time;
Custom Scan (Citus Adaptive) (actual rows=1 loops=1) Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Task Count: 2 Task Count: 2
Tuple data received from nodes: 4 bytes Tuple data received from nodes: 4 bytes
@ -3222,7 +3222,7 @@ SET search_path TO multi_explain;
CREATE TABLE test_subplans (x int primary key, y int); CREATE TABLE test_subplans (x int primary key, y int);
SELECT create_distributed_table('test_subplans','x'); SELECT create_distributed_table('test_subplans','x');
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off) EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF)
WITH a AS (INSERT INTO test_subplans VALUES (1,2) RETURNING *) WITH a AS (INSERT INTO test_subplans VALUES (1,2) RETURNING *)
SELECT * FROM a; SELECT * FROM a;
Custom Scan (Citus Adaptive) (actual rows=1 loops=1) Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
@ -3249,7 +3249,7 @@ Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
SELECT * FROM test_subplans; SELECT * FROM test_subplans;
1|2 1|2
-- Will fail with duplicate pk -- Will fail with duplicate pk
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off) EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF)
WITH a AS (INSERT INTO test_subplans VALUES (1,2) RETURNING *) WITH a AS (INSERT INTO test_subplans VALUES (1,2) RETURNING *)
SELECT * FROM a; SELECT * FROM a;
ERROR: duplicate key value violates unique constraint "test_subplans_pkey_570038" ERROR: duplicate key value violates unique constraint "test_subplans_pkey_570038"
@ -3257,7 +3257,7 @@ DETAIL: Key (x)=(1) already exists.
CONTEXT: while executing command on localhost:xxxxx CONTEXT: while executing command on localhost:xxxxx
-- Test JSON format -- Test JSON format
TRUNCATE test_subplans; TRUNCATE test_subplans;
EXPLAIN (FORMAT JSON, COSTS off, ANALYZE on, TIMING off, SUMMARY off) EXPLAIN (FORMAT JSON, COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF)
WITH a AS (INSERT INTO test_subplans VALUES (1,2) RETURNING *) WITH a AS (INSERT INTO test_subplans VALUES (1,2) RETURNING *)
SELECT * FROM a; SELECT * FROM a;
[ [

View File

@ -905,7 +905,7 @@ $Q$);
(4 rows) (4 rows)
-- EXPLAIN ANALYZE is not supported for INSERT ... SELECT via coordinator -- EXPLAIN ANALYZE is not supported for INSERT ... SELECT via coordinator
EXPLAIN (costs off, analyze on) EXPLAIN (costs off, analyze on, BUFFERS OFF)
INSERT INTO agg_events (user_id) INSERT INTO agg_events (user_id)
SELECT SELECT
raw_events_first.user_id raw_events_first.user_id

View File

@ -173,7 +173,7 @@ CREATE TABLE test_wal(a int, b int);
EXPLAIN (WAL) INSERT INTO test_wal VALUES(1,11); EXPLAIN (WAL) INSERT INTO test_wal VALUES(1,11);
ERROR: EXPLAIN option WAL requires ANALYZE ERROR: EXPLAIN option WAL requires ANALYZE
-- test WAL working properly for router queries -- test WAL working properly for router queries
EXPLAIN (ANALYZE TRUE, WAL TRUE, COSTS FALSE, SUMMARY FALSE, BUFFERS FALSE, TIMING FALSE) EXPLAIN (ANALYZE TRUE, WAL TRUE, COSTS FALSE, SUMMARY FALSE, BUFFERS FALSE, TIMING FALSE, BUFFERS OFF)
INSERT INTO test_wal VALUES(1,11); INSERT INTO test_wal VALUES(1,11);
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -192,7 +192,7 @@ HINT: To remove the local data, run: SELECT truncate_local_data_after_distribut
(1 row) (1 row)
EXPLAIN (ANALYZE TRUE, WAL TRUE, COSTS FALSE, SUMMARY FALSE, BUFFERS FALSE, TIMING FALSE) EXPLAIN (ANALYZE TRUE, WAL TRUE, COSTS FALSE, SUMMARY FALSE, BUFFERS FALSE, TIMING FALSE, BUFFERS OFF)
INSERT INTO test_wal VALUES(2,22); INSERT INTO test_wal VALUES(2,22);
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -208,7 +208,7 @@ INSERT INTO test_wal VALUES(2,22);
-- Test WAL working for multi-shard query -- Test WAL working for multi-shard query
SET citus.explain_all_tasks TO on; SET citus.explain_all_tasks TO on;
EXPLAIN (ANALYZE TRUE, WAL TRUE, COSTS FALSE, SUMMARY FALSE, BUFFERS FALSE, TIMING FALSE) EXPLAIN (ANALYZE TRUE, WAL TRUE, COSTS FALSE, SUMMARY FALSE, BUFFERS FALSE, TIMING FALSE, BUFFERS OFF)
INSERT INTO test_wal VALUES(3,33),(4,44),(5,55) RETURNING *; INSERT INTO test_wal VALUES(3,33),(4,44),(5,55) RETURNING *;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -229,7 +229,7 @@ INSERT INTO test_wal VALUES(3,33),(4,44),(5,55) RETURNING *;
-- we don't get an error, hence we use explain_has_distributed_subplan. -- we don't get an error, hence we use explain_has_distributed_subplan.
SELECT public.explain_has_distributed_subplan( SELECT public.explain_has_distributed_subplan(
$$ $$
EXPLAIN (ANALYZE TRUE, WAL TRUE, COSTS FALSE, SUMMARY FALSE, BUFFERS FALSE, TIMING FALSE) EXPLAIN (ANALYZE TRUE, WAL TRUE, COSTS FALSE, SUMMARY FALSE, BUFFERS FALSE, TIMING FALSE, BUFFERS OFF)
WITH cte_1 AS (INSERT INTO test_wal VALUES(6,66),(7,77),(8,88) RETURNING *) WITH cte_1 AS (INSERT INTO test_wal VALUES(6,66),(7,77),(8,88) RETURNING *)
SELECT * FROM cte_1; SELECT * FROM cte_1;
$$ $$

View File

@ -1372,7 +1372,7 @@ $$
DECLARE ln text; DECLARE ln text;
BEGIN BEGIN
FOR ln IN FOR ln IN
EXECUTE 'explain (analyze, timing off, summary off, costs off) ' || EXECUTE 'explain (analyze, timing off, summary off, costs off, buffers off) ' ||
query query
LOOP LOOP
ln := regexp_replace(ln, '(Memory( Usage)?|Buckets|Batches): \S*', '\1: xxx', 'g'); ln := regexp_replace(ln, '(Memory( Usage)?|Buckets|Batches): \S*', '\1: xxx', 'g');

View File

@ -1396,7 +1396,7 @@ DEBUG: Creating router plan
-- --
-- (*): < SELECT a, b > vs < SELECT table_name.a, table_name.b > -- (*): < SELECT a, b > vs < SELECT table_name.a, table_name.b >
SET client_min_messages TO WARNING; SET client_min_messages TO WARNING;
EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE, VERBOSE FALSE) EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE, VERBOSE FALSE, BUFFERS OFF)
INSERT INTO nullkey_c1_t1 SELECT * FROM nullkey_c1_t2; INSERT INTO nullkey_c1_t1 SELECT * FROM nullkey_c1_t2;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1419,7 +1419,7 @@ DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator DEBUG: Collecting INSERT ... SELECT results on coordinator
-- between a single-shard table and a table of different type -- between a single-shard table and a table of different type
SET client_min_messages TO WARNING; SET client_min_messages TO WARNING;
EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE, VERBOSE FALSE) EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE, VERBOSE FALSE, BUFFERS OFF)
INSERT INTO nullkey_c1_t1 SELECT * FROM reference_table; INSERT INTO nullkey_c1_t1 SELECT * FROM reference_table;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -983,7 +983,7 @@ BEGIN;
ROLLBACK; ROLLBACK;
-- explain analyze should work on a single node -- explain analyze should work on a single node
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF)
SELECT * FROM test; SELECT * FROM test;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -994,7 +994,7 @@ BEGIN;
ROLLBACK; ROLLBACK;
-- explain analyze should work on a single node -- explain analyze should work on a single node
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF)
SELECT * FROM test; SELECT * FROM test;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -184,7 +184,7 @@ SELECT count(*) FROM less_common_data_types_table WHERE dist_key = 1 AND col1 =
set columnar.enable_custom_scan to 'off'; set columnar.enable_custom_scan to 'off';
set enable_seqscan to off; set enable_seqscan to off;
set seq_page_cost TO 10000000; set seq_page_cost TO 10000000;
EXPLAIN (costs off, timing off, summary off, analyze on) EXPLAIN (costs off, timing off, summary off, analyze on, BUFFERS OFF)
SELECT count(*) FROM less_common_data_types_table WHERE dist_key = 1 AND col1 = ARRAY[1]; SELECT count(*) FROM less_common_data_types_table WHERE dist_key = 1 AND col1 = ARRAY[1];
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -337,7 +337,7 @@ SELECT count(*) FROM less_common_data_types_table WHERE dist_key = 1 AND col1 =
set columnar.enable_custom_scan to 'off'; set columnar.enable_custom_scan to 'off';
set enable_seqscan to off; set enable_seqscan to off;
set seq_page_cost TO 10000000; set seq_page_cost TO 10000000;
EXPLAIN (costs off, timing off, summary off, analyze on) EXPLAIN (costs off, timing off, summary off, analyze on, BUFFERS OFF)
SELECT count(*) FROM less_common_data_types_table WHERE dist_key = 1 AND col1 = ARRAY[1]; SELECT count(*) FROM less_common_data_types_table WHERE dist_key = 1 AND col1 = ARRAY[1];
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -20,9 +20,9 @@ SELECT id, id, id, id, id,
id, id, id, id, id id, id, id, id, id
FROM t ORDER BY id; FROM t ORDER BY id;
EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE) SELECT id FROM t ORDER BY 1; EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE, BUFFERS OFF) SELECT id FROM t ORDER BY 1;
SET citus.explain_all_tasks TO ON; SET citus.explain_all_tasks TO ON;
EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE) SELECT id FROM t ORDER BY 1; EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE, BUFFERS OFF) SELECT id FROM t ORDER BY 1;
INSERT INTO t SELECT count(*) from t; INSERT INTO t SELECT count(*) from t;

View File

@ -79,10 +79,10 @@ SELECT * FROM collation_chunk_filtering_test WHERE A > 'B';
CREATE TABLE simple_chunk_filtering(i int) USING COLUMNAR; CREATE TABLE simple_chunk_filtering(i int) USING COLUMNAR;
INSERT INTO simple_chunk_filtering SELECT generate_series(0,234567); INSERT INTO simple_chunk_filtering SELECT generate_series(0,234567);
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT * FROM simple_chunk_filtering WHERE i > 123456; SELECT * FROM simple_chunk_filtering WHERE i > 123456;
SET columnar.enable_qual_pushdown = false; SET columnar.enable_qual_pushdown = false;
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT * FROM simple_chunk_filtering WHERE i > 123456; SELECT * FROM simple_chunk_filtering WHERE i > 123456;
SET columnar.enable_qual_pushdown TO DEFAULT; SET columnar.enable_qual_pushdown TO DEFAULT;
@ -90,7 +90,7 @@ SET columnar.enable_qual_pushdown TO DEFAULT;
TRUNCATE simple_chunk_filtering; TRUNCATE simple_chunk_filtering;
INSERT INTO simple_chunk_filtering SELECT generate_series(0,200000); INSERT INTO simple_chunk_filtering SELECT generate_series(0,200000);
COPY (SELECT * FROM simple_chunk_filtering WHERE i > 180000) TO '/dev/null'; COPY (SELECT * FROM simple_chunk_filtering WHERE i > 180000) TO '/dev/null';
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT * FROM simple_chunk_filtering WHERE i > 180000; SELECT * FROM simple_chunk_filtering WHERE i > 180000;
DROP TABLE simple_chunk_filtering; DROP TABLE simple_chunk_filtering;
@ -99,39 +99,39 @@ DROP TABLE simple_chunk_filtering;
CREATE TABLE multi_column_chunk_filtering(a int, b int) USING columnar; CREATE TABLE multi_column_chunk_filtering(a int, b int) USING columnar;
INSERT INTO multi_column_chunk_filtering SELECT i,i+1 FROM generate_series(0,234567) i; INSERT INTO multi_column_chunk_filtering SELECT i,i+1 FROM generate_series(0,234567) i;
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT count(*) FROM multi_column_chunk_filtering WHERE a > 50000; SELECT count(*) FROM multi_column_chunk_filtering WHERE a > 50000;
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT count(*) FROM multi_column_chunk_filtering WHERE a > 50000 AND b > 50000; SELECT count(*) FROM multi_column_chunk_filtering WHERE a > 50000 AND b > 50000;
-- make next tests faster -- make next tests faster
TRUNCATE multi_column_chunk_filtering; TRUNCATE multi_column_chunk_filtering;
INSERT INTO multi_column_chunk_filtering SELECT generate_series(0,5); INSERT INTO multi_column_chunk_filtering SELECT generate_series(0,5);
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT b FROM multi_column_chunk_filtering WHERE a > 50000 AND b > 50000; SELECT b FROM multi_column_chunk_filtering WHERE a > 50000 AND b > 50000;
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT b, a FROM multi_column_chunk_filtering WHERE b > 50000; SELECT b, a FROM multi_column_chunk_filtering WHERE b > 50000;
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT FROM multi_column_chunk_filtering WHERE a > 50000; SELECT FROM multi_column_chunk_filtering WHERE a > 50000;
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT FROM multi_column_chunk_filtering; SELECT FROM multi_column_chunk_filtering;
BEGIN; BEGIN;
ALTER TABLE multi_column_chunk_filtering DROP COLUMN a; ALTER TABLE multi_column_chunk_filtering DROP COLUMN a;
ALTER TABLE multi_column_chunk_filtering DROP COLUMN b; ALTER TABLE multi_column_chunk_filtering DROP COLUMN b;
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT * FROM multi_column_chunk_filtering; SELECT * FROM multi_column_chunk_filtering;
ROLLBACK; ROLLBACK;
CREATE TABLE another_columnar_table(x int, y int) USING columnar; CREATE TABLE another_columnar_table(x int, y int) USING columnar;
INSERT INTO another_columnar_table SELECT generate_series(0,5); INSERT INTO another_columnar_table SELECT generate_series(0,5);
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT a, y FROM multi_column_chunk_filtering, another_columnar_table WHERE x > 1; SELECT a, y FROM multi_column_chunk_filtering, another_columnar_table WHERE x > 1;
EXPLAIN (costs off, timing off, summary off) EXPLAIN (costs off, timing off, summary off)
@ -219,7 +219,7 @@ set enable_hashjoin=false;
set enable_material=false; set enable_material=false;
-- test different kinds of expressions -- test different kinds of expressions
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT * FROM r1, coltest WHERE SELECT * FROM r1, coltest WHERE
id1 = id AND x1 > 15000 AND x1::text > '000000' AND n1 % 10 = 0; id1 = id AND x1 > 15000 AND x1::text > '000000' AND n1 % 10 = 0;
SELECT * FROM r1, coltest WHERE SELECT * FROM r1, coltest WHERE
@ -227,7 +227,7 @@ SELECT * FROM r1, coltest WHERE
-- test equivalence classes -- test equivalence classes
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT * FROM r1, r2, r3, r4, r5, r6, r7, coltest WHERE SELECT * FROM r1, r2, r3, r4, r5, r6, r7, coltest WHERE
id = id1 AND id1 = id2 AND id2 = id3 AND id3 = id4 AND id = id1 AND id1 = id2 AND id2 = id3 AND id3 = id4 AND
id4 = id5 AND id5 = id6 AND id6 = id7; id4 = id5 AND id5 = id6 AND id6 = id7;
@ -258,7 +258,7 @@ set columnar.planner_debug_level to default;
set columnar.planner_debug_level = 'notice'; set columnar.planner_debug_level = 'notice';
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT * FROM r1, r2, r3, coltest WHERE SELECT * FROM r1, r2, r3, coltest WHERE
id1 = id2 AND id2 = id3 AND id3 = id AND id1 = id2 AND id2 = id3 AND id3 = id AND
n1 > x1 AND n2 > x2 AND n3 > x3; n1 > x1 AND n2 > x2 AND n3 > x3;
@ -270,7 +270,7 @@ SELECT * FROM r1, r2, r3, coltest WHERE
n1 > x1 AND n2 > x2 AND n3 > x3; n1 > x1 AND n2 > x2 AND n3 > x3;
-- test partitioning parameterization -- test partitioning parameterization
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT * FROM r1, coltest_part WHERE SELECT * FROM r1, coltest_part WHERE
id1 = id AND n1 > x1; id1 = id AND n1 > x1;
SELECT * FROM r1, coltest_part WHERE SELECT * FROM r1, coltest_part WHERE
@ -300,7 +300,7 @@ END;
$$; $$;
select * from coltest where x3 = vol(); select * from coltest where x3 = vol();
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT * FROM coltest c1 WHERE ceil(x1) > 4222; SELECT * FROM coltest c1 WHERE ceil(x1) > 4222;
set columnar.planner_debug_level to default; set columnar.planner_debug_level to default;
@ -391,32 +391,32 @@ COMMIT;
SET columnar.max_custom_scan_paths TO 50; SET columnar.max_custom_scan_paths TO 50;
SET columnar.qual_pushdown_correlation_threshold TO 0.0; SET columnar.qual_pushdown_correlation_threshold TO 0.0;
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a = 104356 or a = 76556; SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a = 104356 or a = 76556;
SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a = 104356 or a = 76556; SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a = 104356 or a = 76556;
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT sum(a) FROM pushdown_test WHERE a = 194356 or a = 104356 or a = 76556; SELECT sum(a) FROM pushdown_test WHERE a = 194356 or a = 104356 or a = 76556;
SELECT sum(a) FROM pushdown_test WHERE a = 194356 or a = 104356 or a = 76556; SELECT sum(a) FROM pushdown_test WHERE a = 194356 or a = 104356 or a = 76556;
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a > a*-1 + b; SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a > a*-1 + b;
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT sum(a) FROM pushdown_test where (a > 1000 and a < 10000) or (a > 20000 and a < 50000); SELECT sum(a) FROM pushdown_test where (a > 1000 and a < 10000) or (a > 20000 and a < 50000);
SELECT sum(a) FROM pushdown_test where (a > 1000 and a < 10000) or (a > 20000 and a < 50000); SELECT sum(a) FROM pushdown_test where (a > 1000 and a < 10000) or (a > 20000 and a < 50000);
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT sum(a) FROM pushdown_test where (a > random() and a < 2*a) or (a > 100); SELECT sum(a) FROM pushdown_test where (a > random() and a < 2*a) or (a > 100);
SELECT sum(a) FROM pushdown_test where (a > random() and a < 2*a) or (a > 100); SELECT sum(a) FROM pushdown_test where (a > random() and a < 2*a) or (a > 100);
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT sum(a) FROM pushdown_test where (a > random() and a <= 2000) or (a > 200000-1010); SELECT sum(a) FROM pushdown_test where (a > random() and a <= 2000) or (a > 200000-1010);
SELECT sum(a) FROM pushdown_test where (a > random() and a <= 2000) or (a > 200000-1010); SELECT sum(a) FROM pushdown_test where (a > random() and a <= 2000) or (a > 200000-1010);
SET hash_mem_multiplier = 1.0; SET hash_mem_multiplier = 1.0;
SELECT columnar_test_helpers.explain_with_pg16_subplan_format($Q$ SELECT columnar_test_helpers.explain_with_pg16_subplan_format($Q$
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT sum(a) FROM pushdown_test where SELECT sum(a) FROM pushdown_test where
( (
a > random() a > random()
@ -445,7 +445,7 @@ or
create function stable_1(arg int) returns int language plpgsql STRICT IMMUTABLE as create function stable_1(arg int) returns int language plpgsql STRICT IMMUTABLE as
$$ BEGIN RETURN 1+arg; END; $$; $$ BEGIN RETURN 1+arg; END; $$;
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT sum(a) FROM pushdown_test where (a = random() and a < stable_1(a) and a < stable_1(6000)); SELECT sum(a) FROM pushdown_test where (a = random() and a < stable_1(a) and a < stable_1(6000));
SELECT sum(a) FROM pushdown_test where (a = random() and a < stable_1(a) and a < stable_1(6000)); SELECT sum(a) FROM pushdown_test where (a = random() and a < stable_1(a) and a < stable_1(6000));
@ -476,7 +476,7 @@ BEGIN;
INSERT INTO pushdown_test VALUES(7, 'USA'); INSERT INTO pushdown_test VALUES(7, 'USA');
INSERT INTO pushdown_test VALUES(8, 'ZW'); INSERT INTO pushdown_test VALUES(8, 'ZW');
END; END;
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT id FROM pushdown_test WHERE country IN ('USA', 'BR', 'ZW'); SELECT id FROM pushdown_test WHERE country IN ('USA', 'BR', 'ZW');
SELECT id FROM pushdown_test WHERE country IN ('USA', 'BR', 'ZW'); SELECT id FROM pushdown_test WHERE country IN ('USA', 'BR', 'ZW');
@ -488,7 +488,7 @@ BEGIN
return 'AL'; return 'AL';
END; END;
$$; $$;
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT * FROM pushdown_test WHERE country IN ('USA', 'ZW', volatileFunction()); SELECT * FROM pushdown_test WHERE country IN ('USA', 'ZW', volatileFunction());
SELECT * FROM pushdown_test WHERE country IN ('USA', 'ZW', volatileFunction()); SELECT * FROM pushdown_test WHERE country IN ('USA', 'ZW', volatileFunction());

View File

@ -6,7 +6,7 @@ CREATE TABLE test_cursor (a int, b int) USING columnar;
INSERT INTO test_cursor SELECT i, j FROM generate_series(0, 100)i, generate_series(100, 200)j; INSERT INTO test_cursor SELECT i, j FROM generate_series(0, 100)i, generate_series(100, 200)j;
-- A case where the WHERE clause might filter out some chunks -- A case where the WHERE clause might filter out some chunks
EXPLAIN (analyze on, costs off, timing off, summary off) SELECT * FROM test_cursor WHERE a = 25; EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM test_cursor WHERE a = 25;
BEGIN; BEGIN;
DECLARE a_25 SCROLL CURSOR DECLARE a_25 SCROLL CURSOR
FOR SELECT * FROM test_cursor WHERE a = 25 ORDER BY 2; FOR SELECT * FROM test_cursor WHERE a = 25 ORDER BY 2;
@ -33,7 +33,7 @@ UPDATE test_cursor SET a = 8000 WHERE CURRENT OF a_25;
COMMIT; COMMIT;
-- A case where the WHERE clause doesn't filter out any chunks -- A case where the WHERE clause doesn't filter out any chunks
EXPLAIN (analyze on, costs off, timing off, summary off) SELECT * FROM test_cursor WHERE a > 25; EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM test_cursor WHERE a > 25;
BEGIN; BEGIN;
DECLARE a_25 SCROLL CURSOR DECLARE a_25 SCROLL CURSOR
FOR SELECT * FROM test_cursor WHERE a > 25 ORDER BY 1, 2; FOR SELECT * FROM test_cursor WHERE a > 25 ORDER BY 1, 2;

View File

@ -361,12 +361,12 @@ CREATE INDEX uncorrelated_idx ON uncorrelated(x);
ANALYZE correlated, uncorrelated; ANALYZE correlated, uncorrelated;
-- should choose chunk group filtering; selective and correlated -- should choose chunk group filtering; selective and correlated
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT * FROM correlated WHERE x = 78910; SELECT * FROM correlated WHERE x = 78910;
SELECT * FROM correlated WHERE x = 78910; SELECT * FROM correlated WHERE x = 78910;
-- should choose index scan; selective but uncorrelated -- should choose index scan; selective but uncorrelated
EXPLAIN (analyze on, costs off, timing off, summary off) EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF)
SELECT * FROM uncorrelated WHERE x = 78910; SELECT * FROM uncorrelated WHERE x = 78910;
SELECT * FROM uncorrelated WHERE x = 78910; SELECT * FROM uncorrelated WHERE x = 78910;

View File

@ -102,42 +102,42 @@ SELECT * FROM t ORDER BY a;
PREPARE p0 AS INSERT INTO t VALUES (8, 8), (9, 9); PREPARE p0 AS INSERT INTO t VALUES (8, 8), (9, 9);
EXPLAIN (COSTS OFF) EXECUTE p0; EXPLAIN (COSTS OFF) EXECUTE p0;
EXECUTE p0; EXECUTE p0;
EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off) EXECUTE p0; EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF) EXECUTE p0;
SELECT * FROM t ORDER BY a; SELECT * FROM t ORDER BY a;
-- INSERT INTO with 1 param -- INSERT INTO with 1 param
PREPARE p1(int) AS INSERT INTO t VALUES (10, $1), (11, $1+2); PREPARE p1(int) AS INSERT INTO t VALUES (10, $1), (11, $1+2);
EXPLAIN (COSTS OFF) EXECUTE p1(16); EXPLAIN (COSTS OFF) EXECUTE p1(16);
EXECUTE p1(16); EXECUTE p1(16);
EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off) EXECUTE p1(20); EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF) EXECUTE p1(20);
SELECT * FROM t ORDER BY a; SELECT * FROM t ORDER BY a;
-- INSERT INTO with >1 params -- INSERT INTO with >1 params
PREPARE p2(int, int) AS INSERT INTO t VALUES (12, $1), (13, $1+2), (14, $2), ($1+1, $2+1); PREPARE p2(int, int) AS INSERT INTO t VALUES (12, $1), (13, $1+2), (14, $2), ($1+1, $2+1);
EXPLAIN (COSTS OFF) EXECUTE p2(30, 40); EXPLAIN (COSTS OFF) EXECUTE p2(30, 40);
EXECUTE p2(30, 40); EXECUTE p2(30, 40);
EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off) EXECUTE p2(50, 60); EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF) EXECUTE p2(50, 60);
SELECT * FROM t ORDER BY a; SELECT * FROM t ORDER BY a;
-- SELECT with 0 params -- SELECT with 0 params
PREPARE p3 AS SELECT * FROM t WHERE a = 8; PREPARE p3 AS SELECT * FROM t WHERE a = 8;
EXPLAIN (COSTS OFF) EXECUTE p3; EXPLAIN (COSTS OFF) EXECUTE p3;
EXECUTE p3; EXECUTE p3;
EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off) EXECUTE p3; EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF) EXECUTE p3;
SELECT * FROM t ORDER BY a; SELECT * FROM t ORDER BY a;
-- SELECT with 1 param -- SELECT with 1 param
PREPARE p5(int) AS SELECT * FROM t WHERE a = $1; PREPARE p5(int) AS SELECT * FROM t WHERE a = $1;
EXPLAIN (COSTS OFF) EXECUTE p5(16); EXPLAIN (COSTS OFF) EXECUTE p5(16);
EXECUTE p5(16); EXECUTE p5(16);
EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off) EXECUTE p5(9); EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF) EXECUTE p5(9);
SELECT * FROM t ORDER BY a; SELECT * FROM t ORDER BY a;
-- SELECT with >1 params -- SELECT with >1 params
PREPARE p6(int, int) AS SELECT * FROM t WHERE a = $1+1 AND b = $2+1; PREPARE p6(int, int) AS SELECT * FROM t WHERE a = $1+1 AND b = $2+1;
EXPLAIN (COSTS OFF) EXECUTE p6(30, 40); EXPLAIN (COSTS OFF) EXECUTE p6(30, 40);
EXECUTE p6(30, 40); EXECUTE p6(30, 40);
EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off) EXECUTE p6(50, 60); EXPLAIN (ANALYZE true, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF) EXECUTE p6(50, 60);
SELECT * FROM t ORDER BY a; SELECT * FROM t ORDER BY a;
DROP TABLE t; DROP TABLE t;

View File

@ -66,7 +66,7 @@ ROLLBACK;
-- INSERT..SELECT with re-partitioning in EXPLAIN ANALYZE after local execution -- INSERT..SELECT with re-partitioning in EXPLAIN ANALYZE after local execution
BEGIN; BEGIN;
INSERT INTO test VALUES (0,1000); INSERT INTO test VALUES (0,1000);
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) INSERT INTO test (x, y) SELECT y, x FROM test; EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) INSERT INTO test (x, y) SELECT y, x FROM test;
ROLLBACK; ROLLBACK;
-- DDL connects to locahost -- DDL connects to locahost

View File

@ -220,7 +220,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20;
EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20; EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20;
EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF) EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF)
WITH r AS ( SELECT GREATEST(random(), 2) z,* FROM distributed_table) WITH r AS ( SELECT GREATEST(random(), 2) z,* FROM distributed_table)
SELECT 1 FROM r WHERE z < 3; SELECT 1 FROM r WHERE z < 3;

View File

@ -185,7 +185,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20;
EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20; EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) SELECT * FROM distributed_table WHERE key = 1 AND age = 20;
EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF) EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF)
WITH r AS ( SELECT GREATEST(random(), 2) z,* FROM distributed_table) WITH r AS ( SELECT GREATEST(random(), 2) z,* FROM distributed_table)
SELECT 1 FROM r WHERE z < 3; SELECT 1 FROM r WHERE z < 3;

View File

@ -1282,7 +1282,7 @@ WHEN NOT MATCHED THEN
VALUES (s.some_number, 'parag'); VALUES (s.some_number, 'parag');
-- let's verify if data inserted to second shard of target. -- let's verify if data inserted to second shard of target.
EXPLAIN (analyze on, costs off, timing off, summary off) SELECT * FROM target_table; EXPLAIN (analyze on, costs off, timing off, summary off, BUFFERS OFF) SELECT * FROM target_table;
-- let's verify target data too. -- let's verify target data too.
SELECT * FROM target_table; SELECT * FROM target_table;
@ -1577,7 +1577,7 @@ SELECT create_distributed_table('target_json','id'), create_distributed_table('s
-- single shard query given source_json is filtered and Postgres is smart to pushdown -- single shard query given source_json is filtered and Postgres is smart to pushdown
-- filter to the target_json as well -- filter to the target_json as well
SELECT public.coordinator_plan($Q$ SELECT public.coordinator_plan($Q$
EXPLAIN (ANALYZE ON, TIMING OFF) MERGE INTO target_json sda EXPLAIN (ANALYZE ON, TIMING OFF, BUFFERS OFF) MERGE INTO target_json sda
USING (SELECT * FROM source_json WHERE id = 1) sdn USING (SELECT * FROM source_json WHERE id = 1) sdn
ON sda.id = sdn.id ON sda.id = sdn.id
WHEN NOT matched THEN WHEN NOT matched THEN
@ -1597,7 +1597,7 @@ SELECT * FROM target_json ORDER BY 1;
-- join for source_json is happening at a different place -- join for source_json is happening at a different place
SELECT public.coordinator_plan($Q$ SELECT public.coordinator_plan($Q$
EXPLAIN (ANALYZE ON, TIMING OFF) MERGE INTO target_json sda EXPLAIN (ANALYZE ON, TIMING OFF, BUFFERS OFF) MERGE INTO target_json sda
USING source_json s1 LEFT JOIN (SELECT * FROM source_json) s2 USING(z) USING source_json s1 LEFT JOIN (SELECT * FROM source_json) s2 USING(z)
ON sda.id = s1.id AND s1.id = s2.id ON sda.id = s1.id AND s1.id = s2.id
WHEN NOT matched THEN WHEN NOT matched THEN
@ -1607,7 +1607,7 @@ SELECT * FROM target_json ORDER BY 1;
-- update JSON column -- update JSON column
SELECT public.coordinator_plan($Q$ SELECT public.coordinator_plan($Q$
EXPLAIN (ANALYZE ON, TIMING OFF) MERGE INTO target_json sda EXPLAIN (ANALYZE ON, TIMING OFF, BUFFERS OFF) MERGE INTO target_json sda
USING source_json sdn USING source_json sdn
ON sda.id = sdn.id ON sda.id = sdn.id
WHEN matched THEN WHEN matched THEN

View File

@ -125,7 +125,7 @@ $cf$);
INSERT INTO composite_type_partitioned_table VALUES (123, '(123, 456)'::other_composite_type); INSERT INTO composite_type_partitioned_table VALUES (123, '(123, 456)'::other_composite_type);
SELECT * FROM composite_type_partitioned_table WHERE id = 123; SELECT * FROM composite_type_partitioned_table WHERE id = 123;
EXPLAIN (ANALYZE TRUE, COSTS FALSE, VERBOSE FALSE, TIMING FALSE, SUMMARY FALSE) EXPLAIN (ANALYZE TRUE, COSTS FALSE, VERBOSE FALSE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF)
INSERT INTO composite_type_partitioned_table VALUES (123, '(123, 456)'::other_composite_type); INSERT INTO composite_type_partitioned_table VALUES (123, '(123, 456)'::other_composite_type);
SELECT run_command_on_coordinator_and_workers($cf$ SELECT run_command_on_coordinator_and_workers($cf$
@ -144,7 +144,7 @@ SELECT run_command_on_coordinator_and_workers($cf$
$cf$); $cf$);
INSERT INTO composite_type_partitioned_table VALUES (456, '(456, 678)'::other_composite_type); INSERT INTO composite_type_partitioned_table VALUES (456, '(456, 678)'::other_composite_type);
EXPLAIN (ANALYZE TRUE, COSTS FALSE, VERBOSE FALSE, TIMING FALSE, SUMMARY FALSE) EXPLAIN (ANALYZE TRUE, COSTS FALSE, VERBOSE FALSE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF)
INSERT INTO composite_type_partitioned_table VALUES (123, '(456, 678)'::other_composite_type); INSERT INTO composite_type_partitioned_table VALUES (123, '(456, 678)'::other_composite_type);

View File

@ -130,7 +130,7 @@ EXPLAIN (COSTS FALSE, FORMAT TEXT)
-- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output) -- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output)
SELECT public.plan_normalize_memory($Q$ SELECT public.plan_normalize_memory($Q$
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF)
SELECT l_quantity, count(*) count_quantity FROM lineitem SELECT l_quantity, count(*) count_quantity FROM lineitem
GROUP BY l_quantity ORDER BY count_quantity, l_quantity; GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
$Q$); $Q$);
@ -142,9 +142,9 @@ CREATE TABLE t2(a int, b int);
SELECT create_distributed_table('t1', 'a'), create_distributed_table('t2', 'a'); SELECT create_distributed_table('t1', 'a'), create_distributed_table('t2', 'a');
BEGIN; BEGIN;
SET LOCAL citus.enable_repartition_joins TO true; SET LOCAL citus.enable_repartition_joins TO true;
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off) SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b; EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF) SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b;
-- Confirm repartiton join in distributed subplan works -- Confirm repartiton join in distributed subplan works
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off) EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF)
WITH repartition AS (SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b) WITH repartition AS (SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b)
SELECT count(*) from repartition; SELECT count(*) from repartition;
END; END;
@ -152,7 +152,7 @@ DROP TABLE t1, t2;
-- Test query text output, with ANALYZE ON -- Test query text output, with ANALYZE ON
SELECT public.plan_normalize_memory($Q$ SELECT public.plan_normalize_memory($Q$
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, VERBOSE TRUE) EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, VERBOSE TRUE, BUFFERS OFF)
SELECT l_quantity, count(*) count_quantity FROM lineitem SELECT l_quantity, count(*) count_quantity FROM lineitem
GROUP BY l_quantity ORDER BY count_quantity, l_quantity; GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
$Q$); $Q$);
@ -184,7 +184,7 @@ EXPLAIN (COSTS FALSE)
-- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output) -- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output)
BEGIN; BEGIN;
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF)
UPDATE lineitem UPDATE lineitem
SET l_suppkey = 12 SET l_suppkey = 12
WHERE l_orderkey = 1 AND l_partkey = 0; WHERE l_orderkey = 1 AND l_partkey = 0;
@ -488,7 +488,7 @@ EXPLAIN (COSTS FALSE)
-- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output) -- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output)
SELECT public.plan_normalize_memory($Q$ SELECT public.plan_normalize_memory($Q$
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF)
SELECT l_quantity, count(*) count_quantity FROM lineitem SELECT l_quantity, count(*) count_quantity FROM lineitem
GROUP BY l_quantity ORDER BY count_quantity, l_quantity; GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
$Q$); $Q$);
@ -597,7 +597,7 @@ EXPLAIN (COSTS FALSE) EXECUTE real_time_executor_query;
-- at least make sure to fail without crashing -- at least make sure to fail without crashing
PREPARE router_executor_query_param(int) AS SELECT l_quantity FROM lineitem WHERE l_orderkey = $1; PREPARE router_executor_query_param(int) AS SELECT l_quantity FROM lineitem WHERE l_orderkey = $1;
EXPLAIN EXECUTE router_executor_query_param(5); EXPLAIN EXECUTE router_executor_query_param(5);
EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF) EXECUTE router_executor_query_param(5); EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) EXECUTE router_executor_query_param(5);
\set VERBOSITY TERSE \set VERBOSITY TERSE
PREPARE multi_shard_query_param(int) AS UPDATE lineitem SET l_quantity = $1; PREPARE multi_shard_query_param(int) AS UPDATE lineitem SET l_quantity = $1;
@ -605,7 +605,7 @@ BEGIN;
EXPLAIN (COSTS OFF) EXECUTE multi_shard_query_param(5); EXPLAIN (COSTS OFF) EXECUTE multi_shard_query_param(5);
ROLLBACK; ROLLBACK;
BEGIN; BEGIN;
EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF) EXECUTE multi_shard_query_param(5); EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) EXECUTE multi_shard_query_param(5);
ROLLBACK; ROLLBACK;
\set VERBOSITY DEFAULT \set VERBOSITY DEFAULT
@ -865,7 +865,7 @@ SET citus.shard_count TO 4;
SET client_min_messages TO WARNING; SET client_min_messages TO WARNING;
SELECT create_distributed_table('explain_analyze_test', 'a'); SELECT create_distributed_table('explain_analyze_test', 'a');
\set default_analyze_flags '(ANALYZE on, COSTS off, TIMING off, SUMMARY off)' \set default_analyze_flags '(ANALYZE on, COSTS off, TIMING off, SUMMARY off, BUFFERS off)'
\set default_explain_flags '(ANALYZE off, COSTS off, TIMING off, SUMMARY off)' \set default_explain_flags '(ANALYZE off, COSTS off, TIMING off, SUMMARY off)'
-- router SELECT -- router SELECT
@ -928,16 +928,16 @@ ROLLBACK;
-- test EXPLAIN ANALYZE with non-text output formats -- test EXPLAIN ANALYZE with non-text output formats
BEGIN; BEGIN;
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) INSERT INTO explain_pk VALUES (1, 2), (2, 3); EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON, BUFFERS OFF) INSERT INTO explain_pk VALUES (1, 2), (2, 3);
ROLLBACK; ROLLBACK;
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) SELECT * FROM explain_pk; EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON, BUFFERS OFF) SELECT * FROM explain_pk;
BEGIN; BEGIN;
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML) INSERT INTO explain_pk VALUES (1, 2), (2, 3); EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML, BUFFERS OFF) INSERT INTO explain_pk VALUES (1, 2), (2, 3);
ROLLBACK; ROLLBACK;
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML) SELECT * FROM explain_pk; EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML, BUFFERS OFF) SELECT * FROM explain_pk;
DROP TABLE explain_pk; DROP TABLE explain_pk;
@ -960,7 +960,7 @@ EXPLAIN :default_analyze_flags
SELECT count(distinct a) FROM (SELECT GREATEST(random(), 2) r, a FROM dist_table) t NATURAL JOIN ref_table; SELECT count(distinct a) FROM (SELECT GREATEST(random(), 2) r, a FROM dist_table) t NATURAL JOIN ref_table;
SELECT public.explain_with_pg17_initplan_format($Q$ SELECT public.explain_with_pg17_initplan_format($Q$
EXPLAIN (ANALYZE on, COSTS off, TIMING off, SUMMARY off) EXPLAIN (ANALYZE on, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF)
SELECT count(distinct a) FROM dist_table SELECT count(distinct a) FROM dist_table
WHERE EXISTS(SELECT random() < 2 FROM dist_table NATURAL JOIN ref_table); WHERE EXISTS(SELECT random() < 2 FROM dist_table NATURAL JOIN ref_table);
$Q$); $Q$);
@ -1114,9 +1114,9 @@ SELECT create_distributed_table('explain_analyze_execution_time', 'a');
-- sleep for the shard that has the single row, so that -- sleep for the shard that has the single row, so that
-- will definitely be slower -- will definitely be slower
set citus.explain_analyze_sort_method to "taskId"; set citus.explain_analyze_sort_method to "taskId";
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) select a, CASE WHEN pg_sleep(0.4) IS NULL THEN 'x' END from explain_analyze_execution_time; EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) select a, CASE WHEN pg_sleep(0.4) IS NULL THEN 'x' END from explain_analyze_execution_time;
set citus.explain_analyze_sort_method to "execution-time"; set citus.explain_analyze_sort_method to "execution-time";
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) select a, CASE WHEN pg_sleep(0.4) IS NULL THEN 'x' END from explain_analyze_execution_time; EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) select a, CASE WHEN pg_sleep(0.4) IS NULL THEN 'x' END from explain_analyze_execution_time;
-- reset back -- reset back
reset citus.explain_analyze_sort_method; reset citus.explain_analyze_sort_method;
DROP TABLE explain_analyze_execution_time; DROP TABLE explain_analyze_execution_time;
@ -1171,7 +1171,7 @@ SET search_path TO multi_explain;
CREATE TABLE test_subplans (x int primary key, y int); CREATE TABLE test_subplans (x int primary key, y int);
SELECT create_distributed_table('test_subplans','x'); SELECT create_distributed_table('test_subplans','x');
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off) EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF)
WITH a AS (INSERT INTO test_subplans VALUES (1,2) RETURNING *) WITH a AS (INSERT INTO test_subplans VALUES (1,2) RETURNING *)
SELECT * FROM a; SELECT * FROM a;
@ -1179,13 +1179,13 @@ SELECT * FROM a;
SELECT * FROM test_subplans; SELECT * FROM test_subplans;
-- Will fail with duplicate pk -- Will fail with duplicate pk
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off) EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF)
WITH a AS (INSERT INTO test_subplans VALUES (1,2) RETURNING *) WITH a AS (INSERT INTO test_subplans VALUES (1,2) RETURNING *)
SELECT * FROM a; SELECT * FROM a;
-- Test JSON format -- Test JSON format
TRUNCATE test_subplans; TRUNCATE test_subplans;
EXPLAIN (FORMAT JSON, COSTS off, ANALYZE on, TIMING off, SUMMARY off) EXPLAIN (FORMAT JSON, COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF)
WITH a AS (INSERT INTO test_subplans VALUES (1,2) RETURNING *) WITH a AS (INSERT INTO test_subplans VALUES (1,2) RETURNING *)
SELECT * FROM a; SELECT * FROM a;

View File

@ -682,7 +682,7 @@ SET client_min_messages TO WARNING;
$Q$); $Q$);
-- EXPLAIN ANALYZE is not supported for INSERT ... SELECT via coordinator -- EXPLAIN ANALYZE is not supported for INSERT ... SELECT via coordinator
EXPLAIN (costs off, analyze on) EXPLAIN (costs off, analyze on, BUFFERS OFF)
INSERT INTO agg_events (user_id) INSERT INTO agg_events (user_id)
SELECT SELECT
raw_events_first.user_id raw_events_first.user_id

View File

@ -93,15 +93,15 @@ CREATE TABLE test_wal(a int, b int);
-- test WAL without ANALYZE, this should raise an error -- test WAL without ANALYZE, this should raise an error
EXPLAIN (WAL) INSERT INTO test_wal VALUES(1,11); EXPLAIN (WAL) INSERT INTO test_wal VALUES(1,11);
-- test WAL working properly for router queries -- test WAL working properly for router queries
EXPLAIN (ANALYZE TRUE, WAL TRUE, COSTS FALSE, SUMMARY FALSE, BUFFERS FALSE, TIMING FALSE) EXPLAIN (ANALYZE TRUE, WAL TRUE, COSTS FALSE, SUMMARY FALSE, BUFFERS FALSE, TIMING FALSE, BUFFERS OFF)
INSERT INTO test_wal VALUES(1,11); INSERT INTO test_wal VALUES(1,11);
SELECT create_distributed_table('test_wal', 'a'); SELECT create_distributed_table('test_wal', 'a');
EXPLAIN (ANALYZE TRUE, WAL TRUE, COSTS FALSE, SUMMARY FALSE, BUFFERS FALSE, TIMING FALSE) EXPLAIN (ANALYZE TRUE, WAL TRUE, COSTS FALSE, SUMMARY FALSE, BUFFERS FALSE, TIMING FALSE, BUFFERS OFF)
INSERT INTO test_wal VALUES(2,22); INSERT INTO test_wal VALUES(2,22);
-- Test WAL working for multi-shard query -- Test WAL working for multi-shard query
SET citus.explain_all_tasks TO on; SET citus.explain_all_tasks TO on;
EXPLAIN (ANALYZE TRUE, WAL TRUE, COSTS FALSE, SUMMARY FALSE, BUFFERS FALSE, TIMING FALSE) EXPLAIN (ANALYZE TRUE, WAL TRUE, COSTS FALSE, SUMMARY FALSE, BUFFERS FALSE, TIMING FALSE, BUFFERS OFF)
INSERT INTO test_wal VALUES(3,33),(4,44),(5,55) RETURNING *; INSERT INTO test_wal VALUES(3,33),(4,44),(5,55) RETURNING *;
-- make sure WAL works in distributed subplans -- make sure WAL works in distributed subplans
@ -109,7 +109,7 @@ INSERT INTO test_wal VALUES(3,33),(4,44),(5,55) RETURNING *;
-- we don't get an error, hence we use explain_has_distributed_subplan. -- we don't get an error, hence we use explain_has_distributed_subplan.
SELECT public.explain_has_distributed_subplan( SELECT public.explain_has_distributed_subplan(
$$ $$
EXPLAIN (ANALYZE TRUE, WAL TRUE, COSTS FALSE, SUMMARY FALSE, BUFFERS FALSE, TIMING FALSE) EXPLAIN (ANALYZE TRUE, WAL TRUE, COSTS FALSE, SUMMARY FALSE, BUFFERS FALSE, TIMING FALSE, BUFFERS OFF)
WITH cte_1 AS (INSERT INTO test_wal VALUES(6,66),(7,77),(8,88) RETURNING *) WITH cte_1 AS (INSERT INTO test_wal VALUES(6,66),(7,77),(8,88) RETURNING *)
SELECT * FROM cte_1; SELECT * FROM cte_1;
$$ $$

View File

@ -894,7 +894,7 @@ $$
DECLARE ln text; DECLARE ln text;
BEGIN BEGIN
FOR ln IN FOR ln IN
EXECUTE 'explain (analyze, timing off, summary off, costs off) ' || EXECUTE 'explain (analyze, timing off, summary off, costs off, buffers off) ' ||
query query
LOOP LOOP
ln := regexp_replace(ln, '(Memory( Usage)?|Buckets|Batches): \S*', '\1: xxx', 'g'); ln := regexp_replace(ln, '(Memory( Usage)?|Buckets|Batches): \S*', '\1: xxx', 'g');

View File

@ -618,7 +618,7 @@ JOIN LATERAL (
-- --
-- (*): < SELECT a, b > vs < SELECT table_name.a, table_name.b > -- (*): < SELECT a, b > vs < SELECT table_name.a, table_name.b >
SET client_min_messages TO WARNING; SET client_min_messages TO WARNING;
EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE, VERBOSE FALSE) EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE, VERBOSE FALSE, BUFFERS OFF)
INSERT INTO nullkey_c1_t1 SELECT * FROM nullkey_c1_t2; INSERT INTO nullkey_c1_t1 SELECT * FROM nullkey_c1_t2;
SET client_min_messages TO DEBUG2; SET client_min_messages TO DEBUG2;
@ -627,7 +627,7 @@ INSERT INTO nullkey_c1_t1 SELECT * FROM nullkey_c2_t1;
-- between a single-shard table and a table of different type -- between a single-shard table and a table of different type
SET client_min_messages TO WARNING; SET client_min_messages TO WARNING;
EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE, VERBOSE FALSE) EXPLAIN (ANALYZE TRUE, TIMING FALSE, COSTS FALSE, SUMMARY FALSE, VERBOSE FALSE, BUFFERS OFF)
INSERT INTO nullkey_c1_t1 SELECT * FROM reference_table; INSERT INTO nullkey_c1_t1 SELECT * FROM reference_table;
SET client_min_messages TO DEBUG2; SET client_min_messages TO DEBUG2;

View File

@ -591,7 +591,7 @@ BEGIN;
ROLLBACK; ROLLBACK;
-- explain analyze should work on a single node -- explain analyze should work on a single node
EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF)
SELECT * FROM test; SELECT * FROM test;
-- common utility command -- common utility command

View File

@ -65,7 +65,7 @@ set columnar.enable_custom_scan to 'off';
set enable_seqscan to off; set enable_seqscan to off;
set seq_page_cost TO 10000000; set seq_page_cost TO 10000000;
EXPLAIN (costs off, timing off, summary off, analyze on) EXPLAIN (costs off, timing off, summary off, analyze on, BUFFERS OFF)
SELECT count(*) FROM less_common_data_types_table WHERE dist_key = 1 AND col1 = ARRAY[1]; SELECT count(*) FROM less_common_data_types_table WHERE dist_key = 1 AND col1 = ARRAY[1];
-- make sure that we re-enable columnar scan -- make sure that we re-enable columnar scan

View File

@ -253,7 +253,7 @@ set columnar.enable_custom_scan to 'off';
set enable_seqscan to off; set enable_seqscan to off;
set seq_page_cost TO 10000000; set seq_page_cost TO 10000000;
EXPLAIN (costs off, timing off, summary off, analyze on) EXPLAIN (costs off, timing off, summary off, analyze on, BUFFERS OFF)
SELECT count(*) FROM less_common_data_types_table WHERE dist_key = 1 AND col1 = ARRAY[1]; SELECT count(*) FROM less_common_data_types_table WHERE dist_key = 1 AND col1 = ARRAY[1];
-- make sure that we re-enable columnar scan -- make sure that we re-enable columnar scan