diff --git a/src/test/regress/bin/normalize.sed b/src/test/regress/bin/normalize.sed index 7bed04edf..a47cf6348 100644 --- a/src/test/regress/bin/normalize.sed +++ b/src/test/regress/bin/normalize.sed @@ -301,5 +301,6 @@ s/(NOTICE: issuing CREATE EXTENSION IF NOT EXISTS citus_columnar WITH SCHEMA p # (This is not preprocessor directive, but a reminder for the developer that will drop PG14&15 support ) s/, password_required=false//g +s/provide the file or change sslmode/provide the file, use the system's trusted roots with sslrootcert=system, or change sslmode/g #endif /* PG_VERSION_NUM < PG_VERSION_16 */ diff --git a/src/test/regress/expected/columnar_chunk_filtering.out b/src/test/regress/expected/columnar_chunk_filtering.out index 0d0534ccc..3acdd957d 100644 --- a/src/test/regress/expected/columnar_chunk_filtering.out +++ b/src/test/regress/expected/columnar_chunk_filtering.out @@ -1,6 +1,10 @@ -- -- Test chunk filtering in columnar using min/max values in stripe skip lists. -- +-- It has an alternative test output file +-- because PG16 changed the order of some Filters in EXPLAIN +-- Relevant PG commit: +-- https://github.com/postgres/postgres/commit/2489d76c4906f4461a364ca8ad7e0751ead8aa0d -- -- filtered_row_count returns number of rows filtered by the WHERE clause. -- If chunks get filtered by columnar, less rows are passed to WHERE @@ -370,10 +374,10 @@ SELECT * FROM r1, coltest WHERE Filter: ((n1 % 10) = 0) Rows Removed by Filter: 1 -> Custom Scan (ColumnarScan) on coltest (actual rows=1 loops=4) - Filter: ((x1 > 15000) AND (r1.id1 = id) AND ((x1)::text > '000000'::text)) + Filter: ((x1 > 15000) AND (id = r1.id1) AND ((x1)::text > '000000'::text)) Rows Removed by Filter: 999 Columnar Projected Columns: id, x1, x2, x3 - Columnar Chunk Group Filters: ((x1 > 15000) AND (r1.id1 = id)) + Columnar Chunk Group Filters: ((x1 > 15000) AND (id = r1.id1)) Columnar Chunk Groups Removed by Filter: 19 (10 rows) @@ -413,10 +417,10 @@ SELECT * FROM r1, r2, r3, r4, r5, r6, r7, coltest WHERE -> Seq Scan on r2 (actual rows=5 loops=5) -> Seq Scan on r3 (actual rows=5 loops=5) -> Custom Scan (ColumnarScan) on coltest (actual rows=1 loops=5) - Filter: (r1.id1 = id) + Filter: (id = r1.id1) Rows Removed by Filter: 999 Columnar Projected Columns: id, x1, x2, x3 - Columnar Chunk Group Filters: (r1.id1 = id) + Columnar Chunk Group Filters: (id = r1.id1) Columnar Chunk Groups Removed by Filter: 19 -> Seq Scan on r4 (actual rows=1 loops=5) -> Seq Scan on r5 (actual rows=1 loops=1) @@ -588,10 +592,10 @@ DETAIL: parameterized by rels {r3}; 2 clauses pushed down -> Nested Loop (actual rows=3 loops=1) -> Seq Scan on r1 (actual rows=5 loops=1) -> Custom Scan (ColumnarScan) on coltest (actual rows=1 loops=5) - Filter: ((r1.n1 > x1) AND (r1.id1 = id)) + Filter: ((r1.n1 > x1) AND (id = r1.id1)) Rows Removed by Filter: 799 Columnar Projected Columns: id, x1, x2, x3 - Columnar Chunk Group Filters: ((r1.n1 > x1) AND (r1.id1 = id)) + Columnar Chunk Group Filters: ((r1.n1 > x1) AND (id = r1.id1)) Columnar Chunk Groups Removed by Filter: 19 -> Seq Scan on r2 (actual rows=5 loops=3) -> Seq Scan on r3 (actual rows=5 loops=3) @@ -618,10 +622,10 @@ SELECT * FROM r1, coltest_part WHERE -> Seq Scan on r1 (actual rows=5 loops=1) -> Append (actual rows=1 loops=5) -> Custom Scan (ColumnarScan) on coltest_part0 coltest_part_1 (actual rows=1 loops=3) - Filter: ((r1.n1 > x1) AND (r1.id1 = id)) + Filter: ((r1.n1 > x1) AND (id = r1.id1)) Rows Removed by Filter: 999 Columnar Projected Columns: id, x1, x2, x3 - Columnar Chunk Group Filters: ((r1.n1 > x1) AND (r1.id1 = id)) + Columnar Chunk Group Filters: ((r1.n1 > x1) AND (id = r1.id1)) Columnar Chunk Groups Removed by Filter: 9 -> Seq Scan on coltest_part1 coltest_part_2 (actual rows=0 loops=2) Filter: ((r1.n1 > x1) AND (r1.id1 = id)) diff --git a/src/test/regress/expected/columnar_chunk_filtering_0.out b/src/test/regress/expected/columnar_chunk_filtering_0.out new file mode 100644 index 000000000..746f3406f --- /dev/null +++ b/src/test/regress/expected/columnar_chunk_filtering_0.out @@ -0,0 +1,1138 @@ +-- +-- Test chunk filtering in columnar using min/max values in stripe skip lists. +-- +-- It has an alternative test output file +-- because PG16 changed the order of some Filters in EXPLAIN +-- Relevant PG commit: +-- https://github.com/postgres/postgres/commit/2489d76c4906f4461a364ca8ad7e0751ead8aa0d +-- +-- filtered_row_count returns number of rows filtered by the WHERE clause. +-- If chunks get filtered by columnar, less rows are passed to WHERE +-- clause, so this function should return a lower number. +-- +CREATE OR REPLACE FUNCTION filtered_row_count (query text) RETURNS bigint AS +$$ + DECLARE + result bigint; + rec text; + BEGIN + result := 0; + + FOR rec IN EXECUTE 'EXPLAIN ANALYZE ' || query LOOP + IF rec ~ '^\s+Rows Removed by Filter' then + result := regexp_replace(rec, '[^0-9]*', '', 'g'); + END IF; + END LOOP; + + RETURN result; + END; +$$ LANGUAGE PLPGSQL; +set columnar.qual_pushdown_correlation = 0.0; +-- Create and load data +-- chunk_group_row_limit '1000', stripe_row_limit '2000' +set columnar.stripe_row_limit = 2000; +set columnar.chunk_group_row_limit = 1000; +CREATE TABLE test_chunk_filtering (a int) + USING columnar; +INSERT INTO test_chunk_filtering SELECT generate_series(1,10000); +-- Verify that filtered_row_count is less than 1000 for the following queries +SELECT filtered_row_count('SELECT count(*) FROM test_chunk_filtering'); + filtered_row_count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT filtered_row_count('SELECT count(*) FROM test_chunk_filtering WHERE a < 200'); + filtered_row_count +--------------------------------------------------------------------- + 801 +(1 row) + +SELECT filtered_row_count('SELECT count(*) FROM test_chunk_filtering WHERE a > 200'); + filtered_row_count +--------------------------------------------------------------------- + 200 +(1 row) + +SELECT filtered_row_count('SELECT count(*) FROM test_chunk_filtering WHERE a < 9900'); + filtered_row_count +--------------------------------------------------------------------- + 101 +(1 row) + +SELECT filtered_row_count('SELECT count(*) FROM test_chunk_filtering WHERE a > 9900'); + filtered_row_count +--------------------------------------------------------------------- + 900 +(1 row) + +SELECT filtered_row_count('SELECT count(*) FROM test_chunk_filtering WHERE a < 0'); + filtered_row_count +--------------------------------------------------------------------- + 0 +(1 row) + +-- Verify that filtered_row_count is less than 2000 for the following queries +SELECT filtered_row_count('SELECT count(*) FROM test_chunk_filtering WHERE a BETWEEN 1 AND 10'); + filtered_row_count +--------------------------------------------------------------------- + 990 +(1 row) + +SELECT filtered_row_count('SELECT count(*) FROM test_chunk_filtering WHERE a BETWEEN 990 AND 2010'); + filtered_row_count +--------------------------------------------------------------------- + 1979 +(1 row) + +SELECT filtered_row_count('SELECT count(*) FROM test_chunk_filtering WHERE a BETWEEN -10 AND 0'); + filtered_row_count +--------------------------------------------------------------------- + 0 +(1 row) + +-- Load data for second time and verify that filtered_row_count is exactly twice as before +INSERT INTO test_chunk_filtering SELECT generate_series(1,10000); +SELECT filtered_row_count('SELECT count(*) FROM test_chunk_filtering WHERE a < 200'); + filtered_row_count +--------------------------------------------------------------------- + 1602 +(1 row) + +SELECT filtered_row_count('SELECT count(*) FROM test_chunk_filtering WHERE a < 0'); + filtered_row_count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT filtered_row_count('SELECT count(*) FROM test_chunk_filtering WHERE a BETWEEN 990 AND 2010'); + filtered_row_count +--------------------------------------------------------------------- + 3958 +(1 row) + +set columnar.stripe_row_limit to default; +set columnar.chunk_group_row_limit to default; +-- Verify that we are fine with collations which use a different alphabet order +CREATE TABLE collation_chunk_filtering_test(A text collate "da_DK") + USING columnar; +COPY collation_chunk_filtering_test FROM STDIN; +SELECT * FROM collation_chunk_filtering_test WHERE A > 'B'; + a +--------------------------------------------------------------------- + Å +(1 row) + +CREATE TABLE simple_chunk_filtering(i int) USING COLUMNAR; +INSERT INTO simple_chunk_filtering SELECT generate_series(0,234567); +EXPLAIN (analyze on, costs off, timing off, summary off) + SELECT * FROM simple_chunk_filtering WHERE i > 123456; + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (ColumnarScan) on simple_chunk_filtering (actual rows=111111 loops=1) + Filter: (i > 123456) + Rows Removed by Filter: 3457 + Columnar Projected Columns: i + Columnar Chunk Group Filters: (i > 123456) + Columnar Chunk Groups Removed by Filter: 12 +(6 rows) + +SET columnar.enable_qual_pushdown = false; +EXPLAIN (analyze on, costs off, timing off, summary off) + SELECT * FROM simple_chunk_filtering WHERE i > 123456; + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (ColumnarScan) on simple_chunk_filtering (actual rows=111111 loops=1) + Filter: (i > 123456) + Rows Removed by Filter: 123457 + Columnar Projected Columns: i +(4 rows) + +SET columnar.enable_qual_pushdown TO DEFAULT; +-- https://github.com/citusdata/citus/issues/4555 +TRUNCATE simple_chunk_filtering; +INSERT INTO simple_chunk_filtering SELECT generate_series(0,200000); +COPY (SELECT * FROM simple_chunk_filtering WHERE i > 180000) TO '/dev/null'; +EXPLAIN (analyze on, costs off, timing off, summary off) + SELECT * FROM simple_chunk_filtering WHERE i > 180000; + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (ColumnarScan) on simple_chunk_filtering (actual rows=20000 loops=1) + Filter: (i > 180000) + Rows Removed by Filter: 1 + Columnar Projected Columns: i + Columnar Chunk Group Filters: (i > 180000) + Columnar Chunk Groups Removed by Filter: 18 +(6 rows) + +DROP TABLE simple_chunk_filtering; +CREATE TABLE multi_column_chunk_filtering(a int, b int) USING columnar; +INSERT INTO multi_column_chunk_filtering SELECT i,i+1 FROM generate_series(0,234567) i; +EXPLAIN (analyze on, costs off, timing off, summary off) + SELECT count(*) FROM multi_column_chunk_filtering WHERE a > 50000; + QUERY PLAN +--------------------------------------------------------------------- + Aggregate (actual rows=1 loops=1) + -> Custom Scan (ColumnarScan) on multi_column_chunk_filtering (actual rows=184567 loops=1) + Filter: (a > 50000) + Rows Removed by Filter: 1 + Columnar Projected Columns: a + Columnar Chunk Group Filters: (a > 50000) + Columnar Chunk Groups Removed by Filter: 5 +(7 rows) + +EXPLAIN (analyze on, costs off, timing off, summary off) + SELECT count(*) FROM multi_column_chunk_filtering WHERE a > 50000 AND b > 50000; + QUERY PLAN +--------------------------------------------------------------------- + Aggregate (actual rows=1 loops=1) + -> Custom Scan (ColumnarScan) on multi_column_chunk_filtering (actual rows=184567 loops=1) + Filter: ((a > 50000) AND (b > 50000)) + Rows Removed by Filter: 1 + Columnar Projected Columns: a, b + Columnar Chunk Group Filters: ((a > 50000) AND (b > 50000)) + Columnar Chunk Groups Removed by Filter: 5 +(7 rows) + +-- make next tests faster +TRUNCATE multi_column_chunk_filtering; +INSERT INTO multi_column_chunk_filtering SELECT generate_series(0,5); +EXPLAIN (analyze on, costs off, timing off, summary off) + SELECT b FROM multi_column_chunk_filtering WHERE a > 50000 AND b > 50000; + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (ColumnarScan) on multi_column_chunk_filtering (actual rows=0 loops=1) + Filter: ((a > 50000) AND (b > 50000)) + Columnar Projected Columns: a, b + Columnar Chunk Group Filters: ((a > 50000) AND (b > 50000)) + Columnar Chunk Groups Removed by Filter: 1 +(5 rows) + +EXPLAIN (analyze on, costs off, timing off, summary off) + SELECT b, a FROM multi_column_chunk_filtering WHERE b > 50000; + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (ColumnarScan) on multi_column_chunk_filtering (actual rows=0 loops=1) + Filter: (b > 50000) + Rows Removed by Filter: 6 + Columnar Projected Columns: a, b + Columnar Chunk Group Filters: (b > 50000) + Columnar Chunk Groups Removed by Filter: 0 +(6 rows) + +EXPLAIN (analyze on, costs off, timing off, summary off) + SELECT FROM multi_column_chunk_filtering WHERE a > 50000; + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (ColumnarScan) on multi_column_chunk_filtering (actual rows=0 loops=1) + Filter: (a > 50000) + Columnar Projected Columns: a + Columnar Chunk Group Filters: (a > 50000) + Columnar Chunk Groups Removed by Filter: 1 +(5 rows) + +EXPLAIN (analyze on, costs off, timing off, summary off) + SELECT FROM multi_column_chunk_filtering; + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (ColumnarScan) on multi_column_chunk_filtering (actual rows=6 loops=1) + Columnar Projected Columns: +(2 rows) + +BEGIN; + ALTER TABLE multi_column_chunk_filtering DROP COLUMN a; + ALTER TABLE multi_column_chunk_filtering DROP COLUMN b; + EXPLAIN (analyze on, costs off, timing off, summary off) + SELECT * FROM multi_column_chunk_filtering; + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (ColumnarScan) on multi_column_chunk_filtering (actual rows=6 loops=1) + Columnar Projected Columns: +(2 rows) + +ROLLBACK; +CREATE TABLE another_columnar_table(x int, y int) USING columnar; +INSERT INTO another_columnar_table SELECT generate_series(0,5); +EXPLAIN (analyze on, costs off, timing off, summary off) + SELECT a, y FROM multi_column_chunk_filtering, another_columnar_table WHERE x > 1; + QUERY PLAN +--------------------------------------------------------------------- + Nested Loop (actual rows=24 loops=1) + -> Custom Scan (ColumnarScan) on another_columnar_table (actual rows=4 loops=1) + Filter: (x > 1) + Rows Removed by Filter: 2 + Columnar Projected Columns: x, y + Columnar Chunk Group Filters: (x > 1) + Columnar Chunk Groups Removed by Filter: 0 + -> Custom Scan (ColumnarScan) on multi_column_chunk_filtering (actual rows=6 loops=4) + Columnar Projected Columns: a +(9 rows) + +EXPLAIN (costs off, timing off, summary off) + SELECT y, * FROM another_columnar_table; + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (ColumnarScan) on another_columnar_table + Columnar Projected Columns: x, y +(2 rows) + +EXPLAIN (costs off, timing off, summary off) + SELECT *, x FROM another_columnar_table; + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (ColumnarScan) on another_columnar_table + Columnar Projected Columns: x, y +(2 rows) + +EXPLAIN (costs off, timing off, summary off) + SELECT y, another_columnar_table FROM another_columnar_table; + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (ColumnarScan) on another_columnar_table + Columnar Projected Columns: x, y +(2 rows) + +EXPLAIN (costs off, timing off, summary off) + SELECT another_columnar_table, x FROM another_columnar_table; + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (ColumnarScan) on another_columnar_table + Columnar Projected Columns: x, y +(2 rows) + +DROP TABLE multi_column_chunk_filtering, another_columnar_table; +-- +-- https://github.com/citusdata/citus/issues/4780 +-- +create table part_table (id int) partition by range (id); +create table part_1_row partition of part_table for values from (150000) to (160000); +create table part_2_columnar partition of part_table for values from (0) to (150000) using columnar; +insert into part_table select generate_series(1,159999); +select filtered_row_count('select count(*) from part_table where id > 75000'); + filtered_row_count +--------------------------------------------------------------------- + 5000 +(1 row) + +drop table part_table; +-- test join parameterization +set columnar.stripe_row_limit = 2000; +set columnar.chunk_group_row_limit = 1000; +create table r1(id1 int, n1 int); -- row +create table r2(id2 int, n2 int); -- row +create table r3(id3 int, n3 int); -- row +create table r4(id4 int, n4 int); -- row +create table r5(id5 int, n5 int); -- row +create table r6(id6 int, n6 int); -- row +create table r7(id7 int, n7 int); -- row +create table coltest(id int, x1 int, x2 int, x3 int) using columnar; +create table coltest_part(id int, x1 int, x2 int, x3 int) + partition by range (id); +create table coltest_part0 + partition of coltest_part for values from (0) to (10000) + using columnar; +create table coltest_part1 + partition of coltest_part for values from (10000) to (20000); -- row +set columnar.stripe_row_limit to default; +set columnar.chunk_group_row_limit to default; +insert into r1 values(1234, 12350); +insert into r1 values(4567, 45000); +insert into r1 values(9101, 176000); +insert into r1 values(14202, 7); +insert into r1 values(18942, 189430); +insert into r2 values(1234, 123502); +insert into r2 values(4567, 450002); +insert into r2 values(9101, 1760002); +insert into r2 values(14202, 72); +insert into r2 values(18942, 1894302); +insert into r3 values(1234, 1235075); +insert into r3 values(4567, 4500075); +insert into r3 values(9101, 17600075); +insert into r3 values(14202, 775); +insert into r3 values(18942, 18943075); +insert into r4 values(1234, -1); +insert into r5 values(1234, -1); +insert into r6 values(1234, -1); +insert into r7 values(1234, -1); +insert into coltest + select g, g*10, g*100, g*1000 from generate_series(0, 19999) g; +insert into coltest_part + select g, g*10, g*100, g*1000 from generate_series(0, 19999) g; +ANALYZE r1, r2, r3, coltest, coltest_part; +-- force nested loop +set enable_mergejoin=false; +set enable_hashjoin=false; +set enable_material=false; +-- test different kinds of expressions +EXPLAIN (analyze on, costs off, timing off, summary off) +SELECT * FROM r1, coltest WHERE + id1 = id AND x1 > 15000 AND x1::text > '000000' AND n1 % 10 = 0; + QUERY PLAN +--------------------------------------------------------------------- + Nested Loop (actual rows=3 loops=1) + -> Seq Scan on r1 (actual rows=4 loops=1) + Filter: ((n1 % 10) = 0) + Rows Removed by Filter: 1 + -> Custom Scan (ColumnarScan) on coltest (actual rows=1 loops=4) + Filter: ((x1 > 15000) AND (r1.id1 = id) AND ((x1)::text > '000000'::text)) + Rows Removed by Filter: 999 + Columnar Projected Columns: id, x1, x2, x3 + Columnar Chunk Group Filters: ((x1 > 15000) AND (r1.id1 = id)) + Columnar Chunk Groups Removed by Filter: 19 +(10 rows) + +SELECT * FROM r1, coltest WHERE + id1 = id AND x1 > 15000 AND x1::text > '000000' AND n1 % 10 = 0; + id1 | n1 | id | x1 | x2 | x3 +--------------------------------------------------------------------- + 4567 | 45000 | 4567 | 45670 | 456700 | 4567000 + 9101 | 176000 | 9101 | 91010 | 910100 | 9101000 + 18942 | 189430 | 18942 | 189420 | 1894200 | 18942000 +(3 rows) + +-- test equivalence classes +EXPLAIN (analyze on, costs off, timing off, summary off) +SELECT * FROM r1, r2, r3, r4, r5, r6, r7, coltest WHERE + id = id1 AND id1 = id2 AND id2 = id3 AND id3 = id4 AND + id4 = id5 AND id5 = id6 AND id6 = id7; + QUERY PLAN +--------------------------------------------------------------------- + Nested Loop (actual rows=1 loops=1) + Join Filter: (coltest.id = r7.id7) + -> Nested Loop (actual rows=1 loops=1) + Join Filter: (coltest.id = r6.id6) + -> Nested Loop (actual rows=1 loops=1) + Join Filter: (coltest.id = r5.id5) + -> Nested Loop (actual rows=1 loops=1) + Join Filter: (coltest.id = r4.id4) + Rows Removed by Join Filter: 4 + -> Nested Loop (actual rows=5 loops=1) + -> Nested Loop (actual rows=5 loops=1) + Join Filter: (r1.id1 = r3.id3) + Rows Removed by Join Filter: 20 + -> Nested Loop (actual rows=5 loops=1) + Join Filter: (r1.id1 = r2.id2) + Rows Removed by Join Filter: 20 + -> Seq Scan on r1 (actual rows=5 loops=1) + -> Seq Scan on r2 (actual rows=5 loops=5) + -> Seq Scan on r3 (actual rows=5 loops=5) + -> Custom Scan (ColumnarScan) on coltest (actual rows=1 loops=5) + Filter: (r1.id1 = id) + Rows Removed by Filter: 999 + Columnar Projected Columns: id, x1, x2, x3 + Columnar Chunk Group Filters: (r1.id1 = id) + Columnar Chunk Groups Removed by Filter: 19 + -> Seq Scan on r4 (actual rows=1 loops=5) + -> Seq Scan on r5 (actual rows=1 loops=1) + -> Seq Scan on r6 (actual rows=1 loops=1) + -> Seq Scan on r7 (actual rows=1 loops=1) +(29 rows) + +SELECT * FROM r1, r2, r3, r4, r5, r6, r7, coltest WHERE + id = id1 AND id1 = id2 AND id2 = id3 AND id3 = id4 AND + id4 = id5 AND id5 = id6 AND id6 = id7; + id1 | n1 | id2 | n2 | id3 | n3 | id4 | n4 | id5 | n5 | id6 | n6 | id7 | n7 | id | x1 | x2 | x3 +--------------------------------------------------------------------- + 1234 | 12350 | 1234 | 123502 | 1234 | 1235075 | 1234 | -1 | 1234 | -1 | 1234 | -1 | 1234 | -1 | 1234 | 12340 | 123400 | 1234000 +(1 row) + +-- test path generation with different thresholds +set columnar.planner_debug_level = 'notice'; +set columnar.max_custom_scan_paths to 10; +EXPLAIN (costs off, timing off, summary off) + SELECT * FROM coltest c1, coltest c2, coltest c3, coltest c4 WHERE + c1.id = c2.id and c1.id = c3.id and c1.id = c4.id; +NOTICE: columnar planner: adding CustomScan path for c1 +DETAIL: unparameterized; 0 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for c1 +DETAIL: parameterized by rels {c2}; 1 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for c1 +DETAIL: parameterized by rels {c2, c3}; 1 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for c1 +DETAIL: parameterized by rels {c2, c3, c4}; 1 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for c1 +DETAIL: parameterized by rels {c2, c4}; 1 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for c1 +DETAIL: parameterized by rels {c3}; 1 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for c1 +DETAIL: parameterized by rels {c3, c4}; 1 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for c1 +DETAIL: parameterized by rels {c4}; 1 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for c2 +DETAIL: unparameterized; 0 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for c2 +DETAIL: parameterized by rels {c1}; 1 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for c2 +DETAIL: parameterized by rels {c1, c3}; 1 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for c2 +DETAIL: parameterized by rels {c1, c3, c4}; 1 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for c2 +DETAIL: parameterized by rels {c1, c4}; 1 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for c2 +DETAIL: parameterized by rels {c3}; 1 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for c2 +DETAIL: parameterized by rels {c3, c4}; 1 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for c2 +DETAIL: parameterized by rels {c4}; 1 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for c3 +DETAIL: unparameterized; 0 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for c3 +DETAIL: parameterized by rels {c1}; 1 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for c3 +DETAIL: parameterized by rels {c1, c2}; 1 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for c3 +DETAIL: parameterized by rels {c1, c2, c4}; 1 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for c3 +DETAIL: parameterized by rels {c1, c4}; 1 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for c3 +DETAIL: parameterized by rels {c2}; 1 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for c3 +DETAIL: parameterized by rels {c2, c4}; 1 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for c3 +DETAIL: parameterized by rels {c4}; 1 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for c4 +DETAIL: unparameterized; 0 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for c4 +DETAIL: parameterized by rels {c1}; 1 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for c4 +DETAIL: parameterized by rels {c1, c2}; 1 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for c4 +DETAIL: parameterized by rels {c1, c2, c3}; 1 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for c4 +DETAIL: parameterized by rels {c1, c3}; 1 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for c4 +DETAIL: parameterized by rels {c2}; 1 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for c4 +DETAIL: parameterized by rels {c2, c3}; 1 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for c4 +DETAIL: parameterized by rels {c3}; 1 clauses pushed down + QUERY PLAN +--------------------------------------------------------------------- + Nested Loop + -> Nested Loop + -> Nested Loop + -> Custom Scan (ColumnarScan) on coltest c1 + Columnar Projected Columns: id, x1, x2, x3 + -> Custom Scan (ColumnarScan) on coltest c2 + Filter: (c1.id = id) + Columnar Projected Columns: id, x1, x2, x3 + Columnar Chunk Group Filters: (c1.id = id) + -> Custom Scan (ColumnarScan) on coltest c3 + Filter: (c1.id = id) + Columnar Projected Columns: id, x1, x2, x3 + Columnar Chunk Group Filters: (c1.id = id) + -> Custom Scan (ColumnarScan) on coltest c4 + Filter: (c1.id = id) + Columnar Projected Columns: id, x1, x2, x3 + Columnar Chunk Group Filters: (c1.id = id) +(17 rows) + +set columnar.max_custom_scan_paths to 2; +EXPLAIN (costs off, timing off, summary off) + SELECT * FROM coltest c1, coltest c2, coltest c3, coltest c4 WHERE + c1.id = c2.id and c1.id = c3.id and c1.id = c4.id; +NOTICE: columnar planner: adding CustomScan path for c1 +DETAIL: unparameterized; 0 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for c2 +DETAIL: unparameterized; 0 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for c3 +DETAIL: unparameterized; 0 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for c4 +DETAIL: unparameterized; 0 clauses pushed down + QUERY PLAN +--------------------------------------------------------------------- + Nested Loop + Join Filter: (c1.id = c4.id) + -> Nested Loop + Join Filter: (c1.id = c3.id) + -> Nested Loop + Join Filter: (c1.id = c2.id) + -> Custom Scan (ColumnarScan) on coltest c1 + Columnar Projected Columns: id, x1, x2, x3 + -> Custom Scan (ColumnarScan) on coltest c2 + Columnar Projected Columns: id, x1, x2, x3 + -> Custom Scan (ColumnarScan) on coltest c3 + Columnar Projected Columns: id, x1, x2, x3 + -> Custom Scan (ColumnarScan) on coltest c4 + Columnar Projected Columns: id, x1, x2, x3 +(14 rows) + +set columnar.max_custom_scan_paths to default; +set columnar.planner_debug_level to default; +-- test more complex parameterization +set columnar.planner_debug_level = 'notice'; +EXPLAIN (analyze on, costs off, timing off, summary off) +SELECT * FROM r1, r2, r3, coltest WHERE + id1 = id2 AND id2 = id3 AND id3 = id AND + n1 > x1 AND n2 > x2 AND n3 > x3; +NOTICE: columnar planner: adding CustomScan path for coltest +DETAIL: unparameterized; 0 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for coltest +DETAIL: parameterized by rels {r1}; 2 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for coltest +DETAIL: parameterized by rels {r1, r2}; 3 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for coltest +DETAIL: parameterized by rels {r1, r2, r3}; 4 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for coltest +DETAIL: parameterized by rels {r1, r3}; 3 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for coltest +DETAIL: parameterized by rels {r2}; 2 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for coltest +DETAIL: parameterized by rels {r2, r3}; 3 clauses pushed down +NOTICE: columnar planner: adding CustomScan path for coltest +DETAIL: parameterized by rels {r3}; 2 clauses pushed down + QUERY PLAN +--------------------------------------------------------------------- + Nested Loop (actual rows=3 loops=1) + Join Filter: ((r3.n3 > coltest.x3) AND (r1.id1 = r3.id3)) + Rows Removed by Join Filter: 12 + -> Nested Loop (actual rows=3 loops=1) + Join Filter: ((r2.n2 > coltest.x2) AND (r1.id1 = r2.id2)) + Rows Removed by Join Filter: 12 + -> Nested Loop (actual rows=3 loops=1) + -> Seq Scan on r1 (actual rows=5 loops=1) + -> Custom Scan (ColumnarScan) on coltest (actual rows=1 loops=5) + Filter: ((r1.n1 > x1) AND (r1.id1 = id)) + Rows Removed by Filter: 799 + Columnar Projected Columns: id, x1, x2, x3 + Columnar Chunk Group Filters: ((r1.n1 > x1) AND (r1.id1 = id)) + Columnar Chunk Groups Removed by Filter: 19 + -> Seq Scan on r2 (actual rows=5 loops=3) + -> Seq Scan on r3 (actual rows=5 loops=3) +(16 rows) + +set columnar.planner_debug_level to default; +SELECT * FROM r1, r2, r3, coltest WHERE + id1 = id2 AND id2 = id3 AND id3 = id AND + n1 > x1 AND n2 > x2 AND n3 > x3; + id1 | n1 | id2 | n2 | id3 | n3 | id | x1 | x2 | x3 +--------------------------------------------------------------------- + 1234 | 12350 | 1234 | 123502 | 1234 | 1235075 | 1234 | 12340 | 123400 | 1234000 + 9101 | 176000 | 9101 | 1760002 | 9101 | 17600075 | 9101 | 91010 | 910100 | 9101000 + 18942 | 189430 | 18942 | 1894302 | 18942 | 18943075 | 18942 | 189420 | 1894200 | 18942000 +(3 rows) + +-- test partitioning parameterization +EXPLAIN (analyze on, costs off, timing off, summary off) +SELECT * FROM r1, coltest_part WHERE + id1 = id AND n1 > x1; + QUERY PLAN +--------------------------------------------------------------------- + Nested Loop (actual rows=3 loops=1) + -> Seq Scan on r1 (actual rows=5 loops=1) + -> Append (actual rows=1 loops=5) + -> Custom Scan (ColumnarScan) on coltest_part0 coltest_part_1 (actual rows=1 loops=3) + Filter: ((r1.n1 > x1) AND (r1.id1 = id)) + Rows Removed by Filter: 999 + Columnar Projected Columns: id, x1, x2, x3 + Columnar Chunk Group Filters: ((r1.n1 > x1) AND (r1.id1 = id)) + Columnar Chunk Groups Removed by Filter: 9 + -> Seq Scan on coltest_part1 coltest_part_2 (actual rows=0 loops=2) + Filter: ((r1.n1 > x1) AND (r1.id1 = id)) + Rows Removed by Filter: 10000 +(12 rows) + +SELECT * FROM r1, coltest_part WHERE + id1 = id AND n1 > x1; + id1 | n1 | id | x1 | x2 | x3 +--------------------------------------------------------------------- + 1234 | 12350 | 1234 | 12340 | 123400 | 1234000 + 9101 | 176000 | 9101 | 91010 | 910100 | 9101000 + 18942 | 189430 | 18942 | 189420 | 1894200 | 18942000 +(3 rows) + +set enable_mergejoin to default; +set enable_hashjoin to default; +set enable_material to default; +set columnar.planner_debug_level = 'notice'; +alter table coltest add column x5 int default (random()*20000)::int; +analyze coltest; +-- test that expressions on whole-row references are not pushed down +select * from coltest where coltest = (1,1,1,1); +NOTICE: columnar planner: cannot push down clause: var is whole-row reference or system column +NOTICE: columnar planner: adding CustomScan path for coltest +DETAIL: unparameterized; 0 clauses pushed down + id | x1 | x2 | x3 | x5 +--------------------------------------------------------------------- +(0 rows) + +-- test that expressions on uncorrelated attributes are not pushed down +set columnar.qual_pushdown_correlation to default; +select * from coltest where x5 = 23484; +NOTICE: columnar planner: cannot push down clause: absolute correlation (X.YZ) of var attribute 5 is smaller than the value configured in "columnar.qual_pushdown_correlation_threshold" (0.900) +NOTICE: columnar planner: adding CustomScan path for coltest +DETAIL: unparameterized; 0 clauses pushed down + id | x1 | x2 | x3 | x5 +--------------------------------------------------------------------- +(0 rows) + +-- test that expressions on volatile functions are not pushed down +create function vol() returns int language plpgsql as $$ +BEGIN + RETURN 1; +END; +$$; +select * from coltest where x3 = vol(); +NOTICE: columnar planner: cannot push down clause: expr contains volatile functions +NOTICE: columnar planner: adding CustomScan path for coltest +DETAIL: unparameterized; 0 clauses pushed down + id | x1 | x2 | x3 | x5 +--------------------------------------------------------------------- +(0 rows) + +EXPLAIN (analyze on, costs off, timing off, summary off) + SELECT * FROM coltest c1 WHERE ceil(x1) > 4222; +NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' +HINT: Var must only reference this rel, and Expr must not reference this rel +NOTICE: columnar planner: adding CustomScan path for c1 +DETAIL: unparameterized; 0 clauses pushed down + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (ColumnarScan) on coltest c1 (actual rows=19577 loops=1) + Filter: (ceil((x1)::double precision) > '4222'::double precision) + Rows Removed by Filter: 423 + Columnar Projected Columns: id, x1, x2, x3, x5 +(4 rows) + +set columnar.planner_debug_level to default; +-- +-- https://github.com/citusdata/citus/issues/4488 +-- +create table columnar_prepared_stmt (x int, y int) using columnar; +insert into columnar_prepared_stmt select s, s from generate_series(1,5000000) s; +prepare foo (int) as select x from columnar_prepared_stmt where x = $1; +execute foo(3); + x +--------------------------------------------------------------------- + 3 +(1 row) + +execute foo(3); + x +--------------------------------------------------------------------- + 3 +(1 row) + +execute foo(3); + x +--------------------------------------------------------------------- + 3 +(1 row) + +execute foo(3); + x +--------------------------------------------------------------------- + 3 +(1 row) + +select filtered_row_count('execute foo(3)'); + filtered_row_count +--------------------------------------------------------------------- + 9999 +(1 row) + +select filtered_row_count('execute foo(3)'); + filtered_row_count +--------------------------------------------------------------------- + 9999 +(1 row) + +select filtered_row_count('execute foo(3)'); + filtered_row_count +--------------------------------------------------------------------- + 9999 +(1 row) + +select filtered_row_count('execute foo(3)'); + filtered_row_count +--------------------------------------------------------------------- + 9999 +(1 row) + +drop table columnar_prepared_stmt; +-- +-- https://github.com/citusdata/citus/issues/5258 +-- +set default_table_access_method to columnar; +CREATE TABLE atest1 ( a int, b text ); +CREATE TABLE atest2 (col1 varchar(10), col2 boolean); +INSERT INTO atest1 VALUES (1, 'one'); +SELECT * FROM atest1; -- ok + a | b +--------------------------------------------------------------------- + 1 | one +(1 row) + +SELECT * FROM atest2; -- ok + col1 | col2 +--------------------------------------------------------------------- +(0 rows) + +INSERT INTO atest1 VALUES (2, 'two'); -- ok +INSERT INTO atest1 SELECT 1, b FROM atest1; -- ok +SELECT * FROM atest2 WHERE ( col1 IN ( SELECT b FROM atest1 ) ); + col1 | col2 +--------------------------------------------------------------------- +(0 rows) + +CREATE TABLE t1 (name TEXT, n INTEGER); +CREATE TABLE t2 (name TEXT, n INTEGER); +CREATE TABLE t3 (name TEXT, n INTEGER); +INSERT INTO t1 VALUES ( 'bb', 11 ); +INSERT INTO t2 VALUES ( 'bb', 12 ); +INSERT INTO t2 VALUES ( 'cc', 22 ); +INSERT INTO t2 VALUES ( 'ee', 42 ); +INSERT INTO t3 VALUES ( 'bb', 13 ); +INSERT INTO t3 VALUES ( 'cc', 23 ); +INSERT INTO t3 VALUES ( 'dd', 33 ); +SELECT * FROM +(SELECT name, n as s1_n, 1 as s1_1 FROM t1) as s1 +NATURAL INNER JOIN +(SELECT name, n as s2_n, 2 as s2_2 FROM t2) as s2 +NATURAL INNER JOIN +(SELECT name, n as s3_n, 3 as s3_2 FROM t3) s3; + name | s1_n | s1_1 | s2_n | s2_2 | s3_n | s3_2 +--------------------------------------------------------------------- + bb | 11 | 1 | 12 | 2 | 13 | 3 +(1 row) + +CREATE TABLE numrange_test (nr NUMRANGE); +INSERT INTO numrange_test VALUES('[,)'); +INSERT INTO numrange_test VALUES('[3,]'); +INSERT INTO numrange_test VALUES('[, 5)'); +INSERT INTO numrange_test VALUES(numrange(1.1, 2.2)); +INSERT INTO numrange_test VALUES('empty'); +INSERT INTO numrange_test VALUES(numrange(1.7, 1.7, '[]')); +create table numrange_test2(nr numrange); +INSERT INTO numrange_test2 VALUES('[, 5)'); +INSERT INTO numrange_test2 VALUES(numrange(1.1, 2.2)); +INSERT INTO numrange_test2 VALUES(numrange(1.1, 2.2)); +INSERT INTO numrange_test2 VALUES(numrange(1.1, 2.2,'()')); +INSERT INTO numrange_test2 VALUES('empty'); +set enable_nestloop=t; +set enable_hashjoin=f; +set enable_mergejoin=f; +select * from numrange_test natural join numrange_test2 order by nr; + nr +--------------------------------------------------------------------- + empty + (,5) + [1.1,2.2) + [1.1,2.2) +(4 rows) + +DROP TABLE atest1, atest2, t1, t2, t3, numrange_test, numrange_test2; +set default_table_access_method to default; +set columnar.planner_debug_level to notice; +BEGIN; + SET LOCAL columnar.stripe_row_limit = 2000; + SET LOCAL columnar.chunk_group_row_limit = 1000; + create table pushdown_test (a int, b int) using columnar; + insert into pushdown_test values (generate_series(1, 200000)); +COMMIT; +SET columnar.max_custom_scan_paths TO 50; +SET columnar.qual_pushdown_correlation_threshold TO 0.0; +EXPLAIN (analyze on, costs off, timing off, summary off) +SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a = 104356 or a = 76556; +NOTICE: columnar planner: adding CustomScan path for pushdown_test +DETAIL: unparameterized; 1 clauses pushed down + QUERY PLAN +--------------------------------------------------------------------- + Aggregate (actual rows=1 loops=1) + -> Custom Scan (ColumnarScan) on pushdown_test (actual rows=2 loops=1) + Filter: ((a = 204356) OR (a = 104356) OR (a = 76556)) + Rows Removed by Filter: 1998 + Columnar Projected Columns: a + Columnar Chunk Group Filters: ((a = 204356) OR (a = 104356) OR (a = 76556)) + Columnar Chunk Groups Removed by Filter: 198 +(7 rows) + +SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a = 104356 or a = 76556; +NOTICE: columnar planner: adding CustomScan path for pushdown_test +DETAIL: unparameterized; 1 clauses pushed down + sum +--------------------------------------------------------------------- + 180912 +(1 row) + +EXPLAIN (analyze on, costs off, timing off, summary off) +SELECT sum(a) FROM pushdown_test WHERE a = 194356 or a = 104356 or a = 76556; +NOTICE: columnar planner: adding CustomScan path for pushdown_test +DETAIL: unparameterized; 1 clauses pushed down + QUERY PLAN +--------------------------------------------------------------------- + Aggregate (actual rows=1 loops=1) + -> Custom Scan (ColumnarScan) on pushdown_test (actual rows=3 loops=1) + Filter: ((a = 194356) OR (a = 104356) OR (a = 76556)) + Rows Removed by Filter: 2997 + Columnar Projected Columns: a + Columnar Chunk Group Filters: ((a = 194356) OR (a = 104356) OR (a = 76556)) + Columnar Chunk Groups Removed by Filter: 197 +(7 rows) + +SELECT sum(a) FROM pushdown_test WHERE a = 194356 or a = 104356 or a = 76556; +NOTICE: columnar planner: adding CustomScan path for pushdown_test +DETAIL: unparameterized; 1 clauses pushed down + sum +--------------------------------------------------------------------- + 375268 +(1 row) + +EXPLAIN (analyze on, costs off, timing off, summary off) +SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a > a*-1 + b; +NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' +HINT: Var must only reference this rel, and Expr must not reference this rel +NOTICE: columnar planner: cannot push down clause: all arguments of an OR expression must be pushdownable but one of them was not, due to the reason given above +NOTICE: columnar planner: adding CustomScan path for pushdown_test +DETAIL: unparameterized; 0 clauses pushed down + QUERY PLAN +--------------------------------------------------------------------- + Aggregate (actual rows=1 loops=1) + -> Custom Scan (ColumnarScan) on pushdown_test (actual rows=0 loops=1) + Filter: ((a = 204356) OR (a > ((a * '-1'::integer) + b))) + Rows Removed by Filter: 200000 + Columnar Projected Columns: a, b +(5 rows) + +EXPLAIN (analyze on, costs off, timing off, summary off) +SELECT sum(a) FROM pushdown_test where (a > 1000 and a < 10000) or (a > 20000 and a < 50000); +NOTICE: columnar planner: adding CustomScan path for pushdown_test +DETAIL: unparameterized; 1 clauses pushed down + QUERY PLAN +--------------------------------------------------------------------- + Aggregate (actual rows=1 loops=1) + -> Custom Scan (ColumnarScan) on pushdown_test (actual rows=38998 loops=1) + Filter: (((a > 1000) AND (a < 10000)) OR ((a > 20000) AND (a < 50000))) + Rows Removed by Filter: 2 + Columnar Projected Columns: a + Columnar Chunk Group Filters: (((a > 1000) AND (a < 10000)) OR ((a > 20000) AND (a < 50000))) + Columnar Chunk Groups Removed by Filter: 161 +(7 rows) + +SELECT sum(a) FROM pushdown_test where (a > 1000 and a < 10000) or (a > 20000 and a < 50000); +NOTICE: columnar planner: adding CustomScan path for pushdown_test +DETAIL: unparameterized; 1 clauses pushed down + sum +--------------------------------------------------------------------- + 1099459500 +(1 row) + +EXPLAIN (analyze on, costs off, timing off, summary off) +SELECT sum(a) FROM pushdown_test where (a > random() and a < 2*a) or (a > 100); +NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' +HINT: Var must only reference this rel, and Expr must not reference this rel +NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' +HINT: Var must only reference this rel, and Expr must not reference this rel +NOTICE: columnar planner: cannot push down clause: none of the arguments were pushdownable, due to the reason(s) given above +NOTICE: columnar planner: cannot push down clause: all arguments of an OR expression must be pushdownable but one of them was not, due to the reason given above +NOTICE: columnar planner: adding CustomScan path for pushdown_test +DETAIL: unparameterized; 0 clauses pushed down + QUERY PLAN +--------------------------------------------------------------------- + Aggregate (actual rows=1 loops=1) + -> Custom Scan (ColumnarScan) on pushdown_test (actual rows=200000 loops=1) + Filter: ((((a)::double precision > random()) AND (a < (2 * a))) OR (a > 100)) + Columnar Projected Columns: a +(4 rows) + +SELECT sum(a) FROM pushdown_test where (a > random() and a < 2*a) or (a > 100); +NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' +HINT: Var must only reference this rel, and Expr must not reference this rel +NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' +HINT: Var must only reference this rel, and Expr must not reference this rel +NOTICE: columnar planner: cannot push down clause: none of the arguments were pushdownable, due to the reason(s) given above +NOTICE: columnar planner: cannot push down clause: all arguments of an OR expression must be pushdownable but one of them was not, due to the reason given above +NOTICE: columnar planner: adding CustomScan path for pushdown_test +DETAIL: unparameterized; 0 clauses pushed down + sum +--------------------------------------------------------------------- + 20000100000 +(1 row) + +EXPLAIN (analyze on, costs off, timing off, summary off) +SELECT sum(a) FROM pushdown_test where (a > random() and a <= 2000) or (a > 200000-1010); +NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' +HINT: Var must only reference this rel, and Expr must not reference this rel +NOTICE: columnar planner: adding CustomScan path for pushdown_test +DETAIL: unparameterized; 1 clauses pushed down + QUERY PLAN +--------------------------------------------------------------------- + Aggregate (actual rows=1 loops=1) + -> Custom Scan (ColumnarScan) on pushdown_test (actual rows=3010 loops=1) + Filter: ((((a)::double precision > random()) AND (a <= 2000)) OR (a > 198990)) + Rows Removed by Filter: 990 + Columnar Projected Columns: a + Columnar Chunk Group Filters: ((a <= 2000) OR (a > 198990)) + Columnar Chunk Groups Removed by Filter: 196 +(7 rows) + +SELECT sum(a) FROM pushdown_test where (a > random() and a <= 2000) or (a > 200000-1010); +NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' +HINT: Var must only reference this rel, and Expr must not reference this rel +NOTICE: columnar planner: adding CustomScan path for pushdown_test +DETAIL: unparameterized; 1 clauses pushed down + sum +--------------------------------------------------------------------- + 203491455 +(1 row) + +SET hash_mem_multiplier = 1.0; +EXPLAIN (analyze on, costs off, timing off, summary off) +SELECT sum(a) FROM pushdown_test where +( + a > random() + and + ( + (a < 200 and a not in (select a from pushdown_test)) or + (a > 1000 and a < 2000) + ) +) +or +(a > 200000-2010); +NOTICE: columnar planner: adding CustomScan path for pushdown_test +DETAIL: unparameterized; 0 clauses pushed down +NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' +HINT: Var must only reference this rel, and Expr must not reference this rel +NOTICE: columnar planner: cannot push down clause: must not contain a subplan +NOTICE: columnar planner: adding CustomScan path for pushdown_test +DETAIL: unparameterized; 1 clauses pushed down + QUERY PLAN +--------------------------------------------------------------------- + Aggregate (actual rows=1 loops=1) + -> Custom Scan (ColumnarScan) on pushdown_test (actual rows=3009 loops=1) + Filter: ((((a)::double precision > random()) AND (((a < 200) AND (NOT (SubPlan 1))) OR ((a > 1000) AND (a < 2000)))) OR (a > 197990)) + Rows Removed by Filter: 1991 + Columnar Projected Columns: a + Columnar Chunk Group Filters: (((a < 200) OR ((a > 1000) AND (a < 2000))) OR (a > 197990)) + Columnar Chunk Groups Removed by Filter: 195 + SubPlan 1 + -> Materialize (actual rows=100 loops=199) + -> Custom Scan (ColumnarScan) on pushdown_test pushdown_test_1 (actual rows=199 loops=1) + Columnar Projected Columns: a +(11 rows) + +RESET hash_mem_multiplier; +SELECT sum(a) FROM pushdown_test where +( + a > random() + and + ( + (a < 200 and a not in (select a from pushdown_test)) or + (a > 1000 and a < 2000) + ) +) +or +(a > 200000-2010); +NOTICE: columnar planner: adding CustomScan path for pushdown_test +DETAIL: unparameterized; 0 clauses pushed down +NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' +HINT: Var must only reference this rel, and Expr must not reference this rel +NOTICE: columnar planner: cannot push down clause: must not contain a subplan +NOTICE: columnar planner: adding CustomScan path for pushdown_test +DETAIL: unparameterized; 1 clauses pushed down + sum +--------------------------------------------------------------------- + 401479455 +(1 row) + +create function stable_1(arg int) returns int language plpgsql STRICT IMMUTABLE as +$$ BEGIN RETURN 1+arg; END; $$; +EXPLAIN (analyze on, costs off, timing off, summary off) +SELECT sum(a) FROM pushdown_test where (a = random() and a < stable_1(a) and a < stable_1(6000)); +NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' +HINT: Var must only reference this rel, and Expr must not reference this rel +NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' +HINT: Var must only reference this rel, and Expr must not reference this rel +NOTICE: columnar planner: adding CustomScan path for pushdown_test +DETAIL: unparameterized; 1 clauses pushed down + QUERY PLAN +--------------------------------------------------------------------- + Aggregate (actual rows=1 loops=1) + -> Custom Scan (ColumnarScan) on pushdown_test (actual rows=0 loops=1) + Filter: ((a < 6001) AND ((a)::double precision = random()) AND (a < stable_1(a))) + Rows Removed by Filter: 6000 + Columnar Projected Columns: a + Columnar Chunk Group Filters: (a < 6001) + Columnar Chunk Groups Removed by Filter: 194 +(7 rows) + +SELECT sum(a) FROM pushdown_test where (a = random() and a < stable_1(a) and a < stable_1(6000)); +NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' +HINT: Var must only reference this rel, and Expr must not reference this rel +NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' +HINT: Var must only reference this rel, and Expr must not reference this rel +NOTICE: columnar planner: adding CustomScan path for pushdown_test +DETAIL: unparameterized; 1 clauses pushed down + sum +--------------------------------------------------------------------- + +(1 row) + +RESET columnar.max_custom_scan_paths; +RESET columnar.qual_pushdown_correlation_threshold; +RESET columnar.planner_debug_level; +DROP TABLE pushdown_test; +-- https://github.com/citusdata/citus/issues/5803 +CREATE TABLE pushdown_test(id int, country text) using columnar; +BEGIN; + INSERT INTO pushdown_test VALUES(1, 'AL'); + INSERT INTO pushdown_test VALUES(2, 'AU'); +END; +BEGIN; + INSERT INTO pushdown_test VALUES(3, 'BR'); + INSERT INTO pushdown_test VALUES(4, 'BT'); +END; +BEGIN; + INSERT INTO pushdown_test VALUES(5, 'PK'); + INSERT INTO pushdown_test VALUES(6, 'PA'); +END; +BEGIN; + INSERT INTO pushdown_test VALUES(7, 'USA'); + INSERT INTO pushdown_test VALUES(8, 'ZW'); +END; +EXPLAIN (analyze on, costs off, timing off, summary off) +SELECT id FROM pushdown_test WHERE country IN ('USA', 'BR', 'ZW'); + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (ColumnarScan) on pushdown_test (actual rows=3 loops=1) + Filter: (country = ANY ('{USA,BR,ZW}'::text[])) + Rows Removed by Filter: 1 + Columnar Projected Columns: id, country + Columnar Chunk Group Filters: (country = ANY ('{USA,BR,ZW}'::text[])) + Columnar Chunk Groups Removed by Filter: 2 +(6 rows) + +SELECT id FROM pushdown_test WHERE country IN ('USA', 'BR', 'ZW'); + id +--------------------------------------------------------------------- + 3 + 7 + 8 +(3 rows) + +-- test for volatile functions with IN +CREATE FUNCTION volatileFunction() returns TEXT language plpgsql AS +$$ +BEGIN + return 'AL'; +END; +$$; +EXPLAIN (analyze on, costs off, timing off, summary off) +SELECT * FROM pushdown_test WHERE country IN ('USA', 'ZW', volatileFunction()); + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (ColumnarScan) on pushdown_test (actual rows=3 loops=1) + Filter: (country = ANY (ARRAY['USA'::text, 'ZW'::text, volatilefunction()])) + Rows Removed by Filter: 5 + Columnar Projected Columns: id, country +(4 rows) + +SELECT * FROM pushdown_test WHERE country IN ('USA', 'ZW', volatileFunction()); + id | country +--------------------------------------------------------------------- + 1 | AL + 7 | USA + 8 | ZW +(3 rows) + +DROP TABLE pushdown_test; diff --git a/src/test/regress/expected/columnar_memory.out b/src/test/regress/expected/columnar_memory.out index 865472da1..229502437 100644 --- a/src/test/regress/expected/columnar_memory.out +++ b/src/test/regress/expected/columnar_memory.out @@ -77,10 +77,10 @@ FROM columnar_test_helpers.columnar_store_memory_stats(); top_growth | 1 -- before this change, max mem usage while executing inserts was 28MB and --- with this change it's less than 8MB. +-- with this change it's less than 9MB. SELECT - (SELECT max(memusage) < 8 * 1024 * 1024 FROM t WHERE tag='large batch') AS large_batch_ok, - (SELECT max(memusage) < 8 * 1024 * 1024 FROM t WHERE tag='first batch') AS first_batch_ok; + (SELECT max(memusage) < 9 * 1024 * 1024 FROM t WHERE tag='large batch') AS large_batch_ok, + (SELECT max(memusage) < 9 * 1024 * 1024 FROM t WHERE tag='first batch') AS first_batch_ok; -[ RECORD 1 ]--+-- large_batch_ok | t first_batch_ok | t diff --git a/src/test/regress/expected/create_role_propagation.out b/src/test/regress/expected/create_role_propagation.out index c5111b05f..59f7948a1 100644 --- a/src/test/regress/expected/create_role_propagation.out +++ b/src/test/regress/expected/create_role_propagation.out @@ -244,13 +244,13 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port); 1 (1 row) -SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; - role | member | grantor | admin_option +SELECT roleid::regrole::text AS role, member::regrole::text, (grantor::regrole::text IN ('postgres', 'non_dist_role_1', 'dist_role_1')) AS grantor, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; + role | member | grantor | admin_option --------------------------------------------------------------------- - dist_role_1 | dist_role_2 | non_dist_role_1 | f - dist_role_3 | non_dist_role_3 | postgres | f - non_dist_role_1 | non_dist_role_2 | dist_role_1 | f - non_dist_role_4 | dist_role_4 | postgres | f + dist_role_1 | dist_role_2 | t | f + dist_role_3 | non_dist_role_3 | t | f + non_dist_role_1 | non_dist_role_2 | t | f + non_dist_role_4 | dist_role_4 | t | f (4 rows) SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%dist\_%' ORDER BY 1; diff --git a/src/test/regress/expected/insert_select_repartition.out b/src/test/regress/expected/insert_select_repartition.out index 88acc49e3..476aa8640 100644 --- a/src/test/regress/expected/insert_select_repartition.out +++ b/src/test/regress/expected/insert_select_repartition.out @@ -1214,7 +1214,7 @@ SELECT c1, c2, c3, c4, -1::float AS c5, sum(cardinality), sum(sum) FROM source_table -GROUP BY c1, c2, c3, c4, c5, c6 +GROUP BY c1, c2, c3, c4, c6 ON CONFLICT(c1, c2, c3, c4, c5, c6) DO UPDATE SET cardinality = enriched.cardinality + excluded.cardinality, @@ -1232,7 +1232,7 @@ SELECT c1, c2, c3, c4, -1::float AS c5, sum(cardinality), sum(sum) FROM source_table -GROUP BY c1, c2, c3, c4, c5, c6 +GROUP BY c1, c2, c3, c4, c6 ON CONFLICT(c1, c2, c3, c4, c5, c6) DO UPDATE SET cardinality = enriched.cardinality + excluded.cardinality, @@ -1247,7 +1247,7 @@ DO UPDATE SET -> Task Node: host=localhost port=xxxxx dbname=regression -> HashAggregate - Group Key: c1, c2, c3, c4, '-1'::double precision, insert_select_repartition.dist_func(c1, 4) + Group Key: c1, c2, c3, c4, insert_select_repartition.dist_func(c1, 4) -> Seq Scan on source_table_4213644 source_table (10 rows) diff --git a/src/test/regress/expected/insert_select_repartition_0.out b/src/test/regress/expected/insert_select_repartition_0.out index 7217be3e9..904bd215a 100644 --- a/src/test/regress/expected/insert_select_repartition_0.out +++ b/src/test/regress/expected/insert_select_repartition_0.out @@ -1214,7 +1214,7 @@ SELECT c1, c2, c3, c4, -1::float AS c5, sum(cardinality), sum(sum) FROM source_table -GROUP BY c1, c2, c3, c4, c5, c6 +GROUP BY c1, c2, c3, c4, c6 ON CONFLICT(c1, c2, c3, c4, c5, c6) DO UPDATE SET cardinality = enriched.cardinality + excluded.cardinality, @@ -1232,7 +1232,7 @@ SELECT c1, c2, c3, c4, -1::float AS c5, sum(cardinality), sum(sum) FROM source_table -GROUP BY c1, c2, c3, c4, c5, c6 +GROUP BY c1, c2, c3, c4, c6 ON CONFLICT(c1, c2, c3, c4, c5, c6) DO UPDATE SET cardinality = enriched.cardinality + excluded.cardinality, @@ -1247,7 +1247,7 @@ DO UPDATE SET -> Task Node: host=localhost port=xxxxx dbname=regression -> HashAggregate - Group Key: c1, c2, c3, c4, '-1'::double precision, insert_select_repartition.dist_func(c1, 4) + Group Key: c1, c2, c3, c4, insert_select_repartition.dist_func(c1, 4) -> Seq Scan on source_table_4213644 source_table (10 rows) diff --git a/src/test/regress/expected/multi_hash_pruning.out b/src/test/regress/expected/multi_hash_pruning.out index 0a113c5f8..09b1ccd87 100644 --- a/src/test/regress/expected/multi_hash_pruning.out +++ b/src/test/regress/expected/multi_hash_pruning.out @@ -1232,31 +1232,20 @@ WHERE o_orderkey IN (1, 2) -> Seq Scan on lineitem_hash_partitioned_630004 lineitem_hash_partitioned (13 rows) +SELECT public.coordinator_plan($Q$ EXPLAIN (COSTS OFF) SELECT count(*) FROM orders_hash_partitioned FULL OUTER JOIN lineitem_hash_partitioned ON (o_orderkey = l_orderkey) WHERE o_orderkey IN (1, 2) AND l_orderkey IN (2, 3); - QUERY PLAN +$Q$); + coordinator_plan --------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) Task Count: 3 - Tasks Shown: One of 3 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Aggregate - -> Nested Loop - Join Filter: (orders_hash_partitioned.o_orderkey = lineitem_hash_partitioned.l_orderkey) - -> Seq Scan on orders_hash_partitioned_630000 orders_hash_partitioned - Filter: (o_orderkey = ANY ('{1,2}'::integer[])) - -> Materialize - -> Bitmap Heap Scan on lineitem_hash_partitioned_630004 lineitem_hash_partitioned - Recheck Cond: (l_orderkey = ANY ('{2,3}'::integer[])) - -> Bitmap Index Scan on lineitem_hash_partitioned_pkey_630004 - Index Cond: (l_orderkey = ANY ('{2,3}'::integer[])) -(16 rows) +(3 rows) SET citus.task_executor_type TO DEFAULT; DROP TABLE lineitem_hash_partitioned; diff --git a/src/test/regress/expected/multi_having_pushdown.out b/src/test/regress/expected/multi_having_pushdown.out index d2051a55c..a1ef9f52f 100644 --- a/src/test/regress/expected/multi_having_pushdown.out +++ b/src/test/regress/expected/multi_having_pushdown.out @@ -120,7 +120,7 @@ EXPLAIN (COSTS FALSE) SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem_hash, orders_hash WHERE o_orderkey = l_orderkey - GROUP BY l_orderkey, o_orderkey, l_shipmode HAVING sum(l_quantity) > 24 + GROUP BY l_orderkey, l_shipmode HAVING sum(l_quantity) > 24 ORDER BY 1 DESC LIMIT 3; QUERY PLAN --------------------------------------------------------------------- @@ -136,7 +136,7 @@ EXPLAIN (COSTS FALSE) -> Sort Sort Key: (sum((lineitem_hash.l_extendedprice * lineitem_hash.l_discount))) DESC -> HashAggregate - Group Key: lineitem_hash.l_orderkey, orders_hash.o_orderkey, lineitem_hash.l_shipmode + Group Key: lineitem_hash.l_orderkey, lineitem_hash.l_shipmode Filter: (sum(lineitem_hash.l_quantity) > '24'::numeric) -> Hash Join Hash Cond: (orders_hash.o_orderkey = lineitem_hash.l_orderkey) diff --git a/src/test/regress/expected/multi_move_mx.out b/src/test/regress/expected/multi_move_mx.out index b6cc5d0d7..b5aeec8ca 100644 --- a/src/test/regress/expected/multi_move_mx.out +++ b/src/test/regress/expected/multi_move_mx.out @@ -148,7 +148,7 @@ SELECT pg_reload_conf(); CREATE SUBSCRIPTION subs_01 CONNECTION 'host=''localhost'' port=57637' PUBLICATION pub_01 WITH (citus_use_authinfo=true); ERROR: could not connect to the publisher: root certificate file "/non/existing/certificate.crt" does not exist -Either provide the file or change sslmode to disable server certificate verification. +Either provide the file, use the system's trusted roots with sslrootcert=system, or change sslmode to disable server certificate verification. ALTER SYSTEM RESET citus.node_conninfo; SELECT pg_reload_conf(); pg_reload_conf diff --git a/src/test/regress/expected/multi_mx_hide_shard_names.out b/src/test/regress/expected/multi_mx_hide_shard_names.out index 01d9736f2..116269a4e 100644 --- a/src/test/regress/expected/multi_mx_hide_shard_names.out +++ b/src/test/regress/expected/multi_mx_hide_shard_names.out @@ -425,9 +425,25 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name test_table_2_1130000 (4 rows) +-- PG16 added one more backend type B_STANDALONE_BACKEND +-- and also alphabetized the backend types, hence the orders changed +-- Relevant PG commit: +-- https://github.com/postgres/postgres/commit/0c679464a837079acc75ff1d45eaa83f79e05690 +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 +\gset +\if :server_version_ge_16 +SELECT 4 AS client_backend \gset +SELECT 5 AS bgworker \gset +SELECT 12 AS walsender \gset +\else +SELECT 3 AS client_backend \gset +SELECT 4 AS bgworker \gset +SELECT 9 AS walsender \gset +\endif -- say, we set it to bgworker -- the shards and indexes do not show up -SELECT set_backend_type(4); +SELECT set_backend_type(:bgworker); NOTICE: backend type switched to: background worker set_backend_type --------------------------------------------------------------------- @@ -445,7 +461,7 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name -- or, we set it to walsender -- the shards and indexes do not show up -SELECT set_backend_type(9); +SELECT set_backend_type(:walsender); NOTICE: backend type switched to: walsender set_backend_type --------------------------------------------------------------------- @@ -480,7 +496,7 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name RESET application_name; -- but, client backends to see the shards -SELECT set_backend_type(3); +SELECT set_backend_type(:client_backend); NOTICE: backend type switched to: client backend set_backend_type --------------------------------------------------------------------- diff --git a/src/test/regress/expected/multi_subquery.out b/src/test/regress/expected/multi_subquery.out index f4c4ccc21..60f978f5e 100644 --- a/src/test/regress/expected/multi_subquery.out +++ b/src/test/regress/expected/multi_subquery.out @@ -1062,7 +1062,7 @@ SELECT count(*) FROM keyval1 GROUP BY key HAVING sum(value) > (SELECT sum(value) (26 rows) EXPLAIN (COSTS OFF) -SELECT count(*) FROM keyval1 k1 WHERE k1.key = 2 GROUP BY key HAVING sum(value) > (SELECT sum(value) FROM keyval2 k2 WHERE k2.key = 2 GROUP BY key ORDER BY 1 DESC LIMIT 1); +SELECT count(*) FROM keyval1 k1 WHERE k1.key = 2 HAVING sum(value) > (SELECT sum(value) FROM keyval2 k2 WHERE k2.key = 2 ORDER BY 1 DESC LIMIT 1); QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Adaptive) @@ -1070,20 +1070,18 @@ SELECT count(*) FROM keyval1 k1 WHERE k1.key = 2 GROUP BY key HAVING sum(value) Tasks Shown: All -> Task Node: host=localhost port=xxxxx dbname=regression - -> GroupAggregate - Group Key: k1.key + -> Aggregate Filter: (sum(k1.value) > $0) InitPlan 1 (returns $0) -> Limit -> Sort Sort Key: (sum(k2.value)) DESC - -> GroupAggregate - Group Key: k2.key + -> Aggregate -> Seq Scan on keyval2_xxxxxxx k2 Filter: (key = 2) -> Seq Scan on keyval1_xxxxxxx k1 Filter: (key = 2) -(18 rows) +(16 rows) -- Simple join subquery pushdown SELECT diff --git a/src/test/regress/expected/pg12.out b/src/test/regress/expected/pg12.out index 8999038ec..acc0c3f63 100644 --- a/src/test/regress/expected/pg12.out +++ b/src/test/regress/expected/pg12.out @@ -370,11 +370,13 @@ SELECT DISTINCT y FROM test; (1 row) -- non deterministic collations +SET client_min_messages TO WARNING; CREATE COLLATION test_pg12.case_insensitive ( provider = icu, locale = '@colStrength=secondary', deterministic = false ); +RESET client_min_messages; CREATE TABLE col_test ( id int, val text collate case_insensitive diff --git a/src/test/regress/expected/undistribute_table.out b/src/test/regress/expected/undistribute_table.out index 98b1d98f1..6c77af4fb 100644 --- a/src/test/regress/expected/undistribute_table.out +++ b/src/test/regress/expected/undistribute_table.out @@ -400,22 +400,6 @@ NOTICE: renaming the new table to undistribute_table.dist_type_table (1 row) --- test CREATE RULE with ON SELECT -CREATE TABLE rule_table_1 (a INT); -CREATE TABLE rule_table_2 (a INT); -SELECT create_distributed_table('rule_table_2', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -CREATE RULE "_RETURN" AS ON SELECT TO rule_table_1 DO INSTEAD SELECT * FROM rule_table_2; --- the CREATE RULE turns rule_table_1 into a view -ALTER EXTENSION plpgsql ADD VIEW rule_table_1; -NOTICE: Citus does not propagate adding/dropping member objects -HINT: You can add/drop the member objects on the workers as well. -SELECT undistribute_table('rule_table_2'); -ERROR: cannot alter table because an extension depends on it -- test CREATE RULE without ON SELECT CREATE TABLE rule_table_3 (a INT); CREATE TABLE rule_table_4 (a INT); @@ -444,9 +428,6 @@ NOTICE: renaming the new table to undistribute_table.rule_table_4 ALTER EXTENSION plpgsql DROP VIEW extension_view; NOTICE: Citus does not propagate adding/dropping member objects HINT: You can add/drop the member objects on the workers as well. -ALTER EXTENSION plpgsql DROP VIEW rule_table_1; -NOTICE: Citus does not propagate adding/dropping member objects -HINT: You can add/drop the member objects on the workers as well. ALTER EXTENSION plpgsql DROP TABLE rule_table_3; NOTICE: Citus does not propagate adding/dropping member objects HINT: You can add/drop the member objects on the workers as well. @@ -456,11 +437,9 @@ DETAIL: drop cascades to view undis_view1 drop cascades to view undis_view2 drop cascades to view another_schema.undis_view3 DROP SCHEMA undistribute_table, another_schema CASCADE; -NOTICE: drop cascades to 7 other objects +NOTICE: drop cascades to 5 other objects DETAIL: drop cascades to table extension_table drop cascades to view extension_view drop cascades to table dist_type_table -drop cascades to table rule_table_2 -drop cascades to view rule_table_1 drop cascades to table rule_table_3 drop cascades to table rule_table_4 diff --git a/src/test/regress/sql/columnar_chunk_filtering.sql b/src/test/regress/sql/columnar_chunk_filtering.sql index b8b2b411d..d37a8d8b6 100644 --- a/src/test/regress/sql/columnar_chunk_filtering.sql +++ b/src/test/regress/sql/columnar_chunk_filtering.sql @@ -1,6 +1,10 @@ -- -- Test chunk filtering in columnar using min/max values in stripe skip lists. -- +-- It has an alternative test output file +-- because PG16 changed the order of some Filters in EXPLAIN +-- Relevant PG commit: +-- https://github.com/postgres/postgres/commit/2489d76c4906f4461a364ca8ad7e0751ead8aa0d -- diff --git a/src/test/regress/sql/columnar_memory.sql b/src/test/regress/sql/columnar_memory.sql index 21bab57f5..5f29eb1e3 100644 --- a/src/test/regress/sql/columnar_memory.sql +++ b/src/test/regress/sql/columnar_memory.sql @@ -77,10 +77,10 @@ SELECT CASE WHEN 1.0 * TopMemoryContext / :top_post BETWEEN 0.98 AND 1.03 THEN 1 FROM columnar_test_helpers.columnar_store_memory_stats(); -- before this change, max mem usage while executing inserts was 28MB and --- with this change it's less than 8MB. +-- with this change it's less than 9MB. SELECT - (SELECT max(memusage) < 8 * 1024 * 1024 FROM t WHERE tag='large batch') AS large_batch_ok, - (SELECT max(memusage) < 8 * 1024 * 1024 FROM t WHERE tag='first batch') AS first_batch_ok; + (SELECT max(memusage) < 9 * 1024 * 1024 FROM t WHERE tag='large batch') AS large_batch_ok, + (SELECT max(memusage) < 9 * 1024 * 1024 FROM t WHERE tag='first batch') AS first_batch_ok; \x diff --git a/src/test/regress/sql/create_role_propagation.sql b/src/test/regress/sql/create_role_propagation.sql index ceda9f10c..027e4f72e 100644 --- a/src/test/regress/sql/create_role_propagation.sql +++ b/src/test/regress/sql/create_role_propagation.sql @@ -117,7 +117,7 @@ GRANT non_dist_role_4 TO dist_role_4; SELECT 1 FROM master_add_node('localhost', :worker_2_port); -SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; +SELECT roleid::regrole::text AS role, member::regrole::text, (grantor::regrole::text IN ('postgres', 'non_dist_role_1', 'dist_role_1')) AS grantor, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%dist\_%' ORDER BY 1; \c - - - :worker_1_port diff --git a/src/test/regress/sql/insert_select_repartition.sql b/src/test/regress/sql/insert_select_repartition.sql index 4d13a83f4..30d77f5b8 100644 --- a/src/test/regress/sql/insert_select_repartition.sql +++ b/src/test/regress/sql/insert_select_repartition.sql @@ -611,7 +611,7 @@ SELECT c1, c2, c3, c4, -1::float AS c5, sum(cardinality), sum(sum) FROM source_table -GROUP BY c1, c2, c3, c4, c5, c6 +GROUP BY c1, c2, c3, c4, c6 ON CONFLICT(c1, c2, c3, c4, c5, c6) DO UPDATE SET cardinality = enriched.cardinality + excluded.cardinality, @@ -625,7 +625,7 @@ SELECT c1, c2, c3, c4, -1::float AS c5, sum(cardinality), sum(sum) FROM source_table -GROUP BY c1, c2, c3, c4, c5, c6 +GROUP BY c1, c2, c3, c4, c6 ON CONFLICT(c1, c2, c3, c4, c5, c6) DO UPDATE SET cardinality = enriched.cardinality + excluded.cardinality, diff --git a/src/test/regress/sql/multi_hash_pruning.sql b/src/test/regress/sql/multi_hash_pruning.sql index df432ca90..ef6da8638 100644 --- a/src/test/regress/sql/multi_hash_pruning.sql +++ b/src/test/regress/sql/multi_hash_pruning.sql @@ -336,12 +336,14 @@ FULL OUTER JOIN lineitem_hash_partitioned ON (o_orderkey = l_orderkey) WHERE o_orderkey IN (1, 2) OR l_orderkey IN (2, 3); +SELECT public.coordinator_plan($Q$ EXPLAIN (COSTS OFF) SELECT count(*) FROM orders_hash_partitioned FULL OUTER JOIN lineitem_hash_partitioned ON (o_orderkey = l_orderkey) WHERE o_orderkey IN (1, 2) AND l_orderkey IN (2, 3); +$Q$); SET citus.task_executor_type TO DEFAULT; diff --git a/src/test/regress/sql/multi_having_pushdown.sql b/src/test/regress/sql/multi_having_pushdown.sql index 497fd8cc3..48475099d 100644 --- a/src/test/regress/sql/multi_having_pushdown.sql +++ b/src/test/regress/sql/multi_having_pushdown.sql @@ -43,7 +43,7 @@ EXPLAIN (COSTS FALSE) SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem_hash, orders_hash WHERE o_orderkey = l_orderkey - GROUP BY l_orderkey, o_orderkey, l_shipmode HAVING sum(l_quantity) > 24 + GROUP BY l_orderkey, l_shipmode HAVING sum(l_quantity) > 24 ORDER BY 1 DESC LIMIT 3; EXPLAIN (COSTS FALSE) diff --git a/src/test/regress/sql/multi_mx_hide_shard_names.sql b/src/test/regress/sql/multi_mx_hide_shard_names.sql index 9d2536973..e5213a41b 100644 --- a/src/test/regress/sql/multi_mx_hide_shard_names.sql +++ b/src/test/regress/sql/multi_mx_hide_shard_names.sql @@ -226,14 +226,32 @@ RESET citus.enable_metadata_sync; -- the shards and indexes do not show up SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname; +-- PG16 added one more backend type B_STANDALONE_BACKEND +-- and also alphabetized the backend types, hence the orders changed +-- Relevant PG commit: +-- https://github.com/postgres/postgres/commit/0c679464a837079acc75ff1d45eaa83f79e05690 +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 +\gset + +\if :server_version_ge_16 +SELECT 4 AS client_backend \gset +SELECT 5 AS bgworker \gset +SELECT 12 AS walsender \gset +\else +SELECT 3 AS client_backend \gset +SELECT 4 AS bgworker \gset +SELECT 9 AS walsender \gset +\endif + -- say, we set it to bgworker -- the shards and indexes do not show up -SELECT set_backend_type(4); +SELECT set_backend_type(:bgworker); SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname; -- or, we set it to walsender -- the shards and indexes do not show up -SELECT set_backend_type(9); +SELECT set_backend_type(:walsender); SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname; -- unless the application name starts with citus_shard @@ -242,7 +260,7 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name RESET application_name; -- but, client backends to see the shards -SELECT set_backend_type(3); +SELECT set_backend_type(:client_backend); SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname; diff --git a/src/test/regress/sql/multi_subquery.sql b/src/test/regress/sql/multi_subquery.sql index 68265606a..e5d8aa17c 100644 --- a/src/test/regress/sql/multi_subquery.sql +++ b/src/test/regress/sql/multi_subquery.sql @@ -676,7 +676,7 @@ EXPLAIN (COSTS OFF) SELECT count(*) FROM keyval1 GROUP BY key HAVING sum(value) > (SELECT sum(value) FROM keyval2 GROUP BY key ORDER BY 1 DESC LIMIT 1); EXPLAIN (COSTS OFF) -SELECT count(*) FROM keyval1 k1 WHERE k1.key = 2 GROUP BY key HAVING sum(value) > (SELECT sum(value) FROM keyval2 k2 WHERE k2.key = 2 GROUP BY key ORDER BY 1 DESC LIMIT 1); +SELECT count(*) FROM keyval1 k1 WHERE k1.key = 2 HAVING sum(value) > (SELECT sum(value) FROM keyval2 k2 WHERE k2.key = 2 ORDER BY 1 DESC LIMIT 1); -- Simple join subquery pushdown SELECT diff --git a/src/test/regress/sql/pg12.sql b/src/test/regress/sql/pg12.sql index a86dbbb42..831ce40bb 100644 --- a/src/test/regress/sql/pg12.sql +++ b/src/test/regress/sql/pg12.sql @@ -242,11 +242,13 @@ COMMIT; SELECT DISTINCT y FROM test; -- non deterministic collations +SET client_min_messages TO WARNING; CREATE COLLATION test_pg12.case_insensitive ( provider = icu, locale = '@colStrength=secondary', deterministic = false ); +RESET client_min_messages; CREATE TABLE col_test ( id int, diff --git a/src/test/regress/sql/undistribute_table.sql b/src/test/regress/sql/undistribute_table.sql index 1703440c0..22c14696b 100644 --- a/src/test/regress/sql/undistribute_table.sql +++ b/src/test/regress/sql/undistribute_table.sql @@ -131,18 +131,6 @@ SELECT create_distributed_table('dist_type_table', 'a'); SELECT undistribute_table('dist_type_table'); --- test CREATE RULE with ON SELECT -CREATE TABLE rule_table_1 (a INT); -CREATE TABLE rule_table_2 (a INT); -SELECT create_distributed_table('rule_table_2', 'a'); - -CREATE RULE "_RETURN" AS ON SELECT TO rule_table_1 DO INSTEAD SELECT * FROM rule_table_2; - --- the CREATE RULE turns rule_table_1 into a view -ALTER EXTENSION plpgsql ADD VIEW rule_table_1; - -SELECT undistribute_table('rule_table_2'); - -- test CREATE RULE without ON SELECT CREATE TABLE rule_table_3 (a INT); CREATE TABLE rule_table_4 (a INT); @@ -155,7 +143,6 @@ ALTER EXTENSION plpgsql ADD TABLE rule_table_3; SELECT undistribute_table('rule_table_4'); ALTER EXTENSION plpgsql DROP VIEW extension_view; -ALTER EXTENSION plpgsql DROP VIEW rule_table_1; ALTER EXTENSION plpgsql DROP TABLE rule_table_3; DROP TABLE view_table CASCADE;