From 3401b31c13b77e4688243503de1759a7db9af558 Mon Sep 17 00:00:00 2001 From: Naisila Puka <37271756+naisila@users.noreply.github.com> Date: Mon, 8 Aug 2022 11:19:14 +0300 Subject: [PATCH] Deletes unnecessary test outputs (#6140) --- .../expected/columnar_chunk_filtering_0.out | 1068 ----------------- .../expected/columnar_partitioning_1.out | 517 -------- .../failure_on_create_subscription_0.out | 103 -- .../regress/expected/failure_savepoints.out | 8 +- .../regress/expected/failure_vacuum_1.out | 126 -- .../isolation_logical_replication_0.out | 223 ---- ...ogical_replication_with_partitioning_0.out | 4 - ...ogical_replication_with_partitioning_1.out | 4 - src/test/regress/sql/failure_savepoints.sql | 8 +- 9 files changed, 6 insertions(+), 2055 deletions(-) delete mode 100644 src/test/regress/expected/columnar_chunk_filtering_0.out delete mode 100644 src/test/regress/expected/columnar_partitioning_1.out delete mode 100644 src/test/regress/expected/failure_on_create_subscription_0.out delete mode 100644 src/test/regress/expected/failure_vacuum_1.out delete mode 100644 src/test/regress/expected/isolation_logical_replication_0.out delete mode 100644 src/test/regress/expected/isolation_logical_replication_with_partitioning_0.out delete mode 100644 src/test/regress/expected/isolation_logical_replication_with_partitioning_1.out diff --git a/src/test/regress/expected/columnar_chunk_filtering_0.out b/src/test/regress/expected/columnar_chunk_filtering_0.out deleted file mode 100644 index b52581c0e..000000000 --- a/src/test/regress/expected/columnar_chunk_filtering_0.out +++ /dev/null @@ -1,1068 +0,0 @@ --- --- Test chunk filtering in columnar using min/max values in stripe skip lists. --- --- --- filtered_row_count returns number of rows filtered by the WHERE clause. --- If chunks get filtered by columnar, less rows are passed to WHERE --- clause, so this function should return a lower number. --- -CREATE OR REPLACE FUNCTION filtered_row_count (query text) RETURNS bigint AS -$$ - DECLARE - result bigint; - rec text; - BEGIN - result := 0; - - FOR rec IN EXECUTE 'EXPLAIN ANALYZE ' || query LOOP - IF rec ~ '^\s+Rows Removed by Filter' then - result := regexp_replace(rec, '[^0-9]*', '', 'g'); - END IF; - END LOOP; - - RETURN result; - END; -$$ LANGUAGE PLPGSQL; -set columnar.qual_pushdown_correlation = 0.0; --- Create and load data --- chunk_group_row_limit '1000', stripe_row_limit '2000' -set columnar.stripe_row_limit = 2000; -set columnar.chunk_group_row_limit = 1000; -CREATE TABLE test_chunk_filtering (a int) - USING columnar; -INSERT INTO test_chunk_filtering SELECT generate_series(1,10000); --- Verify that filtered_row_count is less than 1000 for the following queries -SELECT filtered_row_count('SELECT count(*) FROM test_chunk_filtering'); - filtered_row_count ---------------------------------------------------------------------- - 0 -(1 row) - -SELECT filtered_row_count('SELECT count(*) FROM test_chunk_filtering WHERE a < 200'); - filtered_row_count ---------------------------------------------------------------------- - 801 -(1 row) - -SELECT filtered_row_count('SELECT count(*) FROM test_chunk_filtering WHERE a > 200'); - filtered_row_count ---------------------------------------------------------------------- - 200 -(1 row) - -SELECT filtered_row_count('SELECT count(*) FROM test_chunk_filtering WHERE a < 9900'); - filtered_row_count ---------------------------------------------------------------------- - 101 -(1 row) - -SELECT filtered_row_count('SELECT count(*) FROM test_chunk_filtering WHERE a > 9900'); - filtered_row_count ---------------------------------------------------------------------- - 900 -(1 row) - -SELECT filtered_row_count('SELECT count(*) FROM test_chunk_filtering WHERE a < 0'); - filtered_row_count ---------------------------------------------------------------------- - 0 -(1 row) - --- Verify that filtered_row_count is less than 2000 for the following queries -SELECT filtered_row_count('SELECT count(*) FROM test_chunk_filtering WHERE a BETWEEN 1 AND 10'); - filtered_row_count ---------------------------------------------------------------------- - 990 -(1 row) - -SELECT filtered_row_count('SELECT count(*) FROM test_chunk_filtering WHERE a BETWEEN 990 AND 2010'); - filtered_row_count ---------------------------------------------------------------------- - 1979 -(1 row) - -SELECT filtered_row_count('SELECT count(*) FROM test_chunk_filtering WHERE a BETWEEN -10 AND 0'); - filtered_row_count ---------------------------------------------------------------------- - 0 -(1 row) - --- Load data for second time and verify that filtered_row_count is exactly twice as before -INSERT INTO test_chunk_filtering SELECT generate_series(1,10000); -SELECT filtered_row_count('SELECT count(*) FROM test_chunk_filtering WHERE a < 200'); - filtered_row_count ---------------------------------------------------------------------- - 1602 -(1 row) - -SELECT filtered_row_count('SELECT count(*) FROM test_chunk_filtering WHERE a < 0'); - filtered_row_count ---------------------------------------------------------------------- - 0 -(1 row) - -SELECT filtered_row_count('SELECT count(*) FROM test_chunk_filtering WHERE a BETWEEN 990 AND 2010'); - filtered_row_count ---------------------------------------------------------------------- - 3958 -(1 row) - -set columnar.stripe_row_limit to default; -set columnar.chunk_group_row_limit to default; --- Verify that we are fine with collations which use a different alphabet order -CREATE TABLE collation_chunk_filtering_test(A text collate "da_DK") - USING columnar; -COPY collation_chunk_filtering_test FROM STDIN; -SELECT * FROM collation_chunk_filtering_test WHERE A > 'B'; - a ---------------------------------------------------------------------- - Å -(1 row) - -CREATE TABLE simple_chunk_filtering(i int) USING COLUMNAR; -INSERT INTO simple_chunk_filtering SELECT generate_series(0,234567); -EXPLAIN (analyze on, costs off, timing off, summary off) - SELECT * FROM simple_chunk_filtering WHERE i > 123456; - QUERY PLAN ---------------------------------------------------------------------- - Custom Scan (ColumnarScan) on simple_chunk_filtering (actual rows=111111 loops=1) - Filter: (i > 123456) - Rows Removed by Filter: 3457 - Columnar Projected Columns: i - Columnar Chunk Group Filters: (i > 123456) - Columnar Chunk Groups Removed by Filter: 12 -(6 rows) - -SET columnar.enable_qual_pushdown = false; -EXPLAIN (analyze on, costs off, timing off, summary off) - SELECT * FROM simple_chunk_filtering WHERE i > 123456; - QUERY PLAN ---------------------------------------------------------------------- - Custom Scan (ColumnarScan) on simple_chunk_filtering (actual rows=111111 loops=1) - Filter: (i > 123456) - Rows Removed by Filter: 123457 - Columnar Projected Columns: i -(4 rows) - -SET columnar.enable_qual_pushdown TO DEFAULT; --- https://github.com/citusdata/citus/issues/4555 -TRUNCATE simple_chunk_filtering; -INSERT INTO simple_chunk_filtering SELECT generate_series(0,200000); -COPY (SELECT * FROM simple_chunk_filtering WHERE i > 180000) TO '/dev/null'; -EXPLAIN (analyze on, costs off, timing off, summary off) - SELECT * FROM simple_chunk_filtering WHERE i > 180000; - QUERY PLAN ---------------------------------------------------------------------- - Custom Scan (ColumnarScan) on simple_chunk_filtering (actual rows=20000 loops=1) - Filter: (i > 180000) - Rows Removed by Filter: 1 - Columnar Projected Columns: i - Columnar Chunk Group Filters: (i > 180000) - Columnar Chunk Groups Removed by Filter: 18 -(6 rows) - -DROP TABLE simple_chunk_filtering; -CREATE TABLE multi_column_chunk_filtering(a int, b int) USING columnar; -INSERT INTO multi_column_chunk_filtering SELECT i,i+1 FROM generate_series(0,234567) i; -EXPLAIN (analyze on, costs off, timing off, summary off) - SELECT count(*) FROM multi_column_chunk_filtering WHERE a > 50000; - QUERY PLAN ---------------------------------------------------------------------- - Aggregate (actual rows=1 loops=1) - -> Custom Scan (ColumnarScan) on multi_column_chunk_filtering (actual rows=184567 loops=1) - Filter: (a > 50000) - Rows Removed by Filter: 1 - Columnar Projected Columns: a - Columnar Chunk Group Filters: (a > 50000) - Columnar Chunk Groups Removed by Filter: 5 -(7 rows) - -EXPLAIN (analyze on, costs off, timing off, summary off) - SELECT count(*) FROM multi_column_chunk_filtering WHERE a > 50000 AND b > 50000; - QUERY PLAN ---------------------------------------------------------------------- - Aggregate (actual rows=1 loops=1) - -> Custom Scan (ColumnarScan) on multi_column_chunk_filtering (actual rows=184567 loops=1) - Filter: ((a > 50000) AND (b > 50000)) - Rows Removed by Filter: 1 - Columnar Projected Columns: a, b - Columnar Chunk Group Filters: ((a > 50000) AND (b > 50000)) - Columnar Chunk Groups Removed by Filter: 5 -(7 rows) - --- make next tests faster -TRUNCATE multi_column_chunk_filtering; -INSERT INTO multi_column_chunk_filtering SELECT generate_series(0,5); -EXPLAIN (analyze on, costs off, timing off, summary off) - SELECT b FROM multi_column_chunk_filtering WHERE a > 50000 AND b > 50000; - QUERY PLAN ---------------------------------------------------------------------- - Custom Scan (ColumnarScan) on multi_column_chunk_filtering (actual rows=0 loops=1) - Filter: ((a > 50000) AND (b > 50000)) - Columnar Projected Columns: a, b - Columnar Chunk Group Filters: ((a > 50000) AND (b > 50000)) - Columnar Chunk Groups Removed by Filter: 1 -(5 rows) - -EXPLAIN (analyze on, costs off, timing off, summary off) - SELECT b, a FROM multi_column_chunk_filtering WHERE b > 50000; - QUERY PLAN ---------------------------------------------------------------------- - Custom Scan (ColumnarScan) on multi_column_chunk_filtering (actual rows=0 loops=1) - Filter: (b > 50000) - Rows Removed by Filter: 6 - Columnar Projected Columns: a, b - Columnar Chunk Group Filters: (b > 50000) - Columnar Chunk Groups Removed by Filter: 0 -(6 rows) - -EXPLAIN (analyze on, costs off, timing off, summary off) - SELECT FROM multi_column_chunk_filtering WHERE a > 50000; - QUERY PLAN ---------------------------------------------------------------------- - Custom Scan (ColumnarScan) on multi_column_chunk_filtering (actual rows=0 loops=1) - Filter: (a > 50000) - Columnar Projected Columns: a - Columnar Chunk Group Filters: (a > 50000) - Columnar Chunk Groups Removed by Filter: 1 -(5 rows) - -EXPLAIN (analyze on, costs off, timing off, summary off) - SELECT FROM multi_column_chunk_filtering; - QUERY PLAN ---------------------------------------------------------------------- - Custom Scan (ColumnarScan) on multi_column_chunk_filtering (actual rows=6 loops=1) - Columnar Projected Columns: -(2 rows) - -BEGIN; - ALTER TABLE multi_column_chunk_filtering DROP COLUMN a; - ALTER TABLE multi_column_chunk_filtering DROP COLUMN b; - EXPLAIN (analyze on, costs off, timing off, summary off) - SELECT * FROM multi_column_chunk_filtering; - QUERY PLAN ---------------------------------------------------------------------- - Custom Scan (ColumnarScan) on multi_column_chunk_filtering (actual rows=6 loops=1) - Columnar Projected Columns: -(2 rows) - -ROLLBACK; -CREATE TABLE another_columnar_table(x int, y int) USING columnar; -INSERT INTO another_columnar_table SELECT generate_series(0,5); -EXPLAIN (analyze on, costs off, timing off, summary off) - SELECT a, y FROM multi_column_chunk_filtering, another_columnar_table WHERE x > 1; - QUERY PLAN ---------------------------------------------------------------------- - Nested Loop (actual rows=24 loops=1) - -> Custom Scan (ColumnarScan) on another_columnar_table (actual rows=4 loops=1) - Filter: (x > 1) - Rows Removed by Filter: 2 - Columnar Projected Columns: x, y - Columnar Chunk Group Filters: (x > 1) - Columnar Chunk Groups Removed by Filter: 0 - -> Custom Scan (ColumnarScan) on multi_column_chunk_filtering (actual rows=6 loops=4) - Columnar Projected Columns: a -(9 rows) - -EXPLAIN (costs off, timing off, summary off) - SELECT y, * FROM another_columnar_table; - QUERY PLAN ---------------------------------------------------------------------- - Custom Scan (ColumnarScan) on another_columnar_table - Columnar Projected Columns: x, y -(2 rows) - -EXPLAIN (costs off, timing off, summary off) - SELECT *, x FROM another_columnar_table; - QUERY PLAN ---------------------------------------------------------------------- - Custom Scan (ColumnarScan) on another_columnar_table - Columnar Projected Columns: x, y -(2 rows) - -EXPLAIN (costs off, timing off, summary off) - SELECT y, another_columnar_table FROM another_columnar_table; - QUERY PLAN ---------------------------------------------------------------------- - Custom Scan (ColumnarScan) on another_columnar_table - Columnar Projected Columns: x, y -(2 rows) - -EXPLAIN (costs off, timing off, summary off) - SELECT another_columnar_table, x FROM another_columnar_table; - QUERY PLAN ---------------------------------------------------------------------- - Custom Scan (ColumnarScan) on another_columnar_table - Columnar Projected Columns: x, y -(2 rows) - -DROP TABLE multi_column_chunk_filtering, another_columnar_table; --- --- https://github.com/citusdata/citus/issues/4780 --- -create table part_table (id int) partition by range (id); -create table part_1_row partition of part_table for values from (150000) to (160000); -create table part_2_columnar partition of part_table for values from (0) to (150000) using columnar; -insert into part_table select generate_series(1,159999); -select filtered_row_count('select count(*) from part_table where id > 75000'); - filtered_row_count ---------------------------------------------------------------------- - 5000 -(1 row) - -drop table part_table; --- test join parameterization -set columnar.stripe_row_limit = 2000; -set columnar.chunk_group_row_limit = 1000; -create table r1(id1 int, n1 int); -- row -create table r2(id2 int, n2 int); -- row -create table r3(id3 int, n3 int); -- row -create table r4(id4 int, n4 int); -- row -create table r5(id5 int, n5 int); -- row -create table r6(id6 int, n6 int); -- row -create table r7(id7 int, n7 int); -- row -create table coltest(id int, x1 int, x2 int, x3 int) using columnar; -create table coltest_part(id int, x1 int, x2 int, x3 int) - partition by range (id); -create table coltest_part0 - partition of coltest_part for values from (0) to (10000) - using columnar; -create table coltest_part1 - partition of coltest_part for values from (10000) to (20000); -- row -set columnar.stripe_row_limit to default; -set columnar.chunk_group_row_limit to default; -insert into r1 values(1234, 12350); -insert into r1 values(4567, 45000); -insert into r1 values(9101, 176000); -insert into r1 values(14202, 7); -insert into r1 values(18942, 189430); -insert into r2 values(1234, 123502); -insert into r2 values(4567, 450002); -insert into r2 values(9101, 1760002); -insert into r2 values(14202, 72); -insert into r2 values(18942, 1894302); -insert into r3 values(1234, 1235075); -insert into r3 values(4567, 4500075); -insert into r3 values(9101, 17600075); -insert into r3 values(14202, 775); -insert into r3 values(18942, 18943075); -insert into r4 values(1234, -1); -insert into r5 values(1234, -1); -insert into r6 values(1234, -1); -insert into r7 values(1234, -1); -insert into coltest - select g, g*10, g*100, g*1000 from generate_series(0, 19999) g; -insert into coltest_part - select g, g*10, g*100, g*1000 from generate_series(0, 19999) g; -ANALYZE r1, r2, r3, coltest, coltest_part; --- force nested loop -set enable_mergejoin=false; -set enable_hashjoin=false; -set enable_material=false; --- test different kinds of expressions -EXPLAIN (analyze on, costs off, timing off, summary off) -SELECT * FROM r1, coltest WHERE - id1 = id AND x1 > 15000 AND x1::text > '000000' AND n1 % 10 = 0; - QUERY PLAN ---------------------------------------------------------------------- - Nested Loop (actual rows=3 loops=1) - -> Seq Scan on r1 (actual rows=4 loops=1) - Filter: ((n1 % 10) = 0) - Rows Removed by Filter: 1 - -> Custom Scan (ColumnarScan) on coltest (actual rows=1 loops=4) - Filter: ((x1 > 15000) AND (r1.id1 = id) AND ((x1)::text > '000000'::text)) - Rows Removed by Filter: 999 - Columnar Projected Columns: id, x1, x2, x3 - Columnar Chunk Group Filters: ((x1 > 15000) AND (r1.id1 = id)) - Columnar Chunk Groups Removed by Filter: 19 -(10 rows) - -SELECT * FROM r1, coltest WHERE - id1 = id AND x1 > 15000 AND x1::text > '000000' AND n1 % 10 = 0; - id1 | n1 | id | x1 | x2 | x3 ---------------------------------------------------------------------- - 4567 | 45000 | 4567 | 45670 | 456700 | 4567000 - 9101 | 176000 | 9101 | 91010 | 910100 | 9101000 - 18942 | 189430 | 18942 | 189420 | 1894200 | 18942000 -(3 rows) - --- test equivalence classes -EXPLAIN (analyze on, costs off, timing off, summary off) -SELECT * FROM r1, r2, r3, r4, r5, r6, r7, coltest WHERE - id = id1 AND id1 = id2 AND id2 = id3 AND id3 = id4 AND - id4 = id5 AND id5 = id6 AND id6 = id7; - QUERY PLAN ---------------------------------------------------------------------- - Nested Loop (actual rows=1 loops=1) - Join Filter: (coltest.id = r7.id7) - -> Nested Loop (actual rows=1 loops=1) - Join Filter: (coltest.id = r6.id6) - -> Nested Loop (actual rows=1 loops=1) - Join Filter: (coltest.id = r5.id5) - -> Nested Loop (actual rows=1 loops=1) - Join Filter: (coltest.id = r4.id4) - Rows Removed by Join Filter: 4 - -> Nested Loop (actual rows=5 loops=1) - -> Nested Loop (actual rows=5 loops=1) - Join Filter: (r1.id1 = r3.id3) - Rows Removed by Join Filter: 20 - -> Nested Loop (actual rows=5 loops=1) - Join Filter: (r1.id1 = r2.id2) - Rows Removed by Join Filter: 20 - -> Seq Scan on r1 (actual rows=5 loops=1) - -> Seq Scan on r2 (actual rows=5 loops=5) - -> Seq Scan on r3 (actual rows=5 loops=5) - -> Custom Scan (ColumnarScan) on coltest (actual rows=1 loops=5) - Filter: (r1.id1 = id) - Rows Removed by Filter: 999 - Columnar Projected Columns: id, x1, x2, x3 - Columnar Chunk Group Filters: (r1.id1 = id) - Columnar Chunk Groups Removed by Filter: 19 - -> Seq Scan on r4 (actual rows=1 loops=5) - -> Seq Scan on r5 (actual rows=1 loops=1) - -> Seq Scan on r6 (actual rows=1 loops=1) - -> Seq Scan on r7 (actual rows=1 loops=1) -(29 rows) - -SELECT * FROM r1, r2, r3, r4, r5, r6, r7, coltest WHERE - id = id1 AND id1 = id2 AND id2 = id3 AND id3 = id4 AND - id4 = id5 AND id5 = id6 AND id6 = id7; - id1 | n1 | id2 | n2 | id3 | n3 | id4 | n4 | id5 | n5 | id6 | n6 | id7 | n7 | id | x1 | x2 | x3 ---------------------------------------------------------------------- - 1234 | 12350 | 1234 | 123502 | 1234 | 1235075 | 1234 | -1 | 1234 | -1 | 1234 | -1 | 1234 | -1 | 1234 | 12340 | 123400 | 1234000 -(1 row) - --- test path generation with different thresholds -set columnar.planner_debug_level = 'notice'; -set columnar.max_custom_scan_paths to 10; -EXPLAIN (costs off, timing off, summary off) - SELECT * FROM coltest c1, coltest c2, coltest c3, coltest c4 WHERE - c1.id = c2.id and c1.id = c3.id and c1.id = c4.id; -NOTICE: columnar planner: adding CustomScan path for c1 -DETAIL: unparameterized; 0 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for c1 -DETAIL: parameterized by rels {c2}; 1 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for c1 -DETAIL: parameterized by rels {c2, c3}; 1 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for c1 -DETAIL: parameterized by rels {c2, c3, c4}; 1 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for c1 -DETAIL: parameterized by rels {c2, c4}; 1 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for c1 -DETAIL: parameterized by rels {c3}; 1 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for c1 -DETAIL: parameterized by rels {c3, c4}; 1 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for c1 -DETAIL: parameterized by rels {c4}; 1 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for c2 -DETAIL: unparameterized; 0 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for c2 -DETAIL: parameterized by rels {c1}; 1 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for c2 -DETAIL: parameterized by rels {c1, c3}; 1 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for c2 -DETAIL: parameterized by rels {c1, c3, c4}; 1 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for c2 -DETAIL: parameterized by rels {c1, c4}; 1 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for c2 -DETAIL: parameterized by rels {c3}; 1 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for c2 -DETAIL: parameterized by rels {c3, c4}; 1 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for c2 -DETAIL: parameterized by rels {c4}; 1 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for c3 -DETAIL: unparameterized; 0 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for c3 -DETAIL: parameterized by rels {c1}; 1 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for c3 -DETAIL: parameterized by rels {c1, c2}; 1 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for c3 -DETAIL: parameterized by rels {c1, c2, c4}; 1 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for c3 -DETAIL: parameterized by rels {c1, c4}; 1 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for c3 -DETAIL: parameterized by rels {c2}; 1 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for c3 -DETAIL: parameterized by rels {c2, c4}; 1 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for c3 -DETAIL: parameterized by rels {c4}; 1 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for c4 -DETAIL: unparameterized; 0 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for c4 -DETAIL: parameterized by rels {c1}; 1 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for c4 -DETAIL: parameterized by rels {c1, c2}; 1 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for c4 -DETAIL: parameterized by rels {c1, c2, c3}; 1 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for c4 -DETAIL: parameterized by rels {c1, c3}; 1 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for c4 -DETAIL: parameterized by rels {c2}; 1 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for c4 -DETAIL: parameterized by rels {c2, c3}; 1 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for c4 -DETAIL: parameterized by rels {c3}; 1 clauses pushed down - QUERY PLAN ---------------------------------------------------------------------- - Nested Loop - -> Nested Loop - -> Nested Loop - -> Custom Scan (ColumnarScan) on coltest c1 - Columnar Projected Columns: id, x1, x2, x3 - -> Custom Scan (ColumnarScan) on coltest c2 - Filter: (c1.id = id) - Columnar Projected Columns: id, x1, x2, x3 - Columnar Chunk Group Filters: (c1.id = id) - -> Custom Scan (ColumnarScan) on coltest c3 - Filter: (c1.id = id) - Columnar Projected Columns: id, x1, x2, x3 - Columnar Chunk Group Filters: (c1.id = id) - -> Custom Scan (ColumnarScan) on coltest c4 - Filter: (c1.id = id) - Columnar Projected Columns: id, x1, x2, x3 - Columnar Chunk Group Filters: (c1.id = id) -(17 rows) - -set columnar.max_custom_scan_paths to 2; -EXPLAIN (costs off, timing off, summary off) - SELECT * FROM coltest c1, coltest c2, coltest c3, coltest c4 WHERE - c1.id = c2.id and c1.id = c3.id and c1.id = c4.id; -NOTICE: columnar planner: adding CustomScan path for c1 -DETAIL: unparameterized; 0 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for c2 -DETAIL: unparameterized; 0 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for c3 -DETAIL: unparameterized; 0 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for c4 -DETAIL: unparameterized; 0 clauses pushed down - QUERY PLAN ---------------------------------------------------------------------- - Nested Loop - Join Filter: (c1.id = c4.id) - -> Nested Loop - Join Filter: (c1.id = c3.id) - -> Nested Loop - Join Filter: (c1.id = c2.id) - -> Custom Scan (ColumnarScan) on coltest c1 - Columnar Projected Columns: id, x1, x2, x3 - -> Custom Scan (ColumnarScan) on coltest c2 - Columnar Projected Columns: id, x1, x2, x3 - -> Custom Scan (ColumnarScan) on coltest c3 - Columnar Projected Columns: id, x1, x2, x3 - -> Custom Scan (ColumnarScan) on coltest c4 - Columnar Projected Columns: id, x1, x2, x3 -(14 rows) - -set columnar.max_custom_scan_paths to default; -set columnar.planner_debug_level to default; --- test more complex parameterization -set columnar.planner_debug_level = 'notice'; -EXPLAIN (analyze on, costs off, timing off, summary off) -SELECT * FROM r1, r2, r3, coltest WHERE - id1 = id2 AND id2 = id3 AND id3 = id AND - n1 > x1 AND n2 > x2 AND n3 > x3; -NOTICE: columnar planner: adding CustomScan path for coltest -DETAIL: unparameterized; 0 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for coltest -DETAIL: parameterized by rels {r1}; 2 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for coltest -DETAIL: parameterized by rels {r1, r2}; 3 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for coltest -DETAIL: parameterized by rels {r1, r2, r3}; 4 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for coltest -DETAIL: parameterized by rels {r1, r3}; 3 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for coltest -DETAIL: parameterized by rels {r2}; 2 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for coltest -DETAIL: parameterized by rels {r2, r3}; 3 clauses pushed down -NOTICE: columnar planner: adding CustomScan path for coltest -DETAIL: parameterized by rels {r3}; 2 clauses pushed down - QUERY PLAN ---------------------------------------------------------------------- - Nested Loop (actual rows=3 loops=1) - Join Filter: ((r3.n3 > coltest.x3) AND (r1.id1 = r3.id3)) - Rows Removed by Join Filter: 12 - -> Nested Loop (actual rows=3 loops=1) - Join Filter: ((r2.n2 > coltest.x2) AND (r1.id1 = r2.id2)) - Rows Removed by Join Filter: 12 - -> Nested Loop (actual rows=3 loops=1) - -> Seq Scan on r1 (actual rows=5 loops=1) - -> Custom Scan (ColumnarScan) on coltest (actual rows=1 loops=5) - Filter: ((r1.n1 > x1) AND (r1.id1 = id)) - Rows Removed by Filter: 799 - Columnar Projected Columns: id, x1, x2, x3 - Columnar Chunk Group Filters: ((r1.n1 > x1) AND (r1.id1 = id)) - Columnar Chunk Groups Removed by Filter: 19 - -> Seq Scan on r2 (actual rows=5 loops=3) - -> Seq Scan on r3 (actual rows=5 loops=3) -(16 rows) - -set columnar.planner_debug_level to default; -SELECT * FROM r1, r2, r3, coltest WHERE - id1 = id2 AND id2 = id3 AND id3 = id AND - n1 > x1 AND n2 > x2 AND n3 > x3; - id1 | n1 | id2 | n2 | id3 | n3 | id | x1 | x2 | x3 ---------------------------------------------------------------------- - 1234 | 12350 | 1234 | 123502 | 1234 | 1235075 | 1234 | 12340 | 123400 | 1234000 - 9101 | 176000 | 9101 | 1760002 | 9101 | 17600075 | 9101 | 91010 | 910100 | 9101000 - 18942 | 189430 | 18942 | 1894302 | 18942 | 18943075 | 18942 | 189420 | 1894200 | 18942000 -(3 rows) - --- test partitioning parameterization -EXPLAIN (analyze on, costs off, timing off, summary off) -SELECT * FROM r1, coltest_part WHERE - id1 = id AND n1 > x1; - QUERY PLAN ---------------------------------------------------------------------- - Nested Loop (actual rows=3 loops=1) - -> Seq Scan on r1 (actual rows=5 loops=1) - -> Append (actual rows=1 loops=5) - -> Custom Scan (ColumnarScan) on coltest_part0 (actual rows=1 loops=3) - Filter: ((r1.n1 > x1) AND (r1.id1 = id)) - Rows Removed by Filter: 999 - Columnar Projected Columns: id, x1, x2, x3 - Columnar Chunk Group Filters: ((r1.n1 > x1) AND (r1.id1 = id)) - Columnar Chunk Groups Removed by Filter: 9 - -> Seq Scan on coltest_part1 (actual rows=0 loops=2) - Filter: ((r1.n1 > x1) AND (r1.id1 = id)) - Rows Removed by Filter: 10000 -(12 rows) - -SELECT * FROM r1, coltest_part WHERE - id1 = id AND n1 > x1; - id1 | n1 | id | x1 | x2 | x3 ---------------------------------------------------------------------- - 1234 | 12350 | 1234 | 12340 | 123400 | 1234000 - 9101 | 176000 | 9101 | 91010 | 910100 | 9101000 - 18942 | 189430 | 18942 | 189420 | 1894200 | 18942000 -(3 rows) - -set enable_mergejoin to default; -set enable_hashjoin to default; -set enable_material to default; -set columnar.planner_debug_level = 'notice'; -alter table coltest add column x5 int default (random()*20000)::int; -analyze coltest; --- test that expressions on whole-row references are not pushed down -select * from coltest where coltest = (1,1,1,1); -NOTICE: columnar planner: cannot push down clause: var is whole-row reference or system column -NOTICE: columnar planner: adding CustomScan path for coltest -DETAIL: unparameterized; 0 clauses pushed down - id | x1 | x2 | x3 | x5 ---------------------------------------------------------------------- -(0 rows) - --- test that expressions on uncorrelated attributes are not pushed down -set columnar.qual_pushdown_correlation to default; -select * from coltest where x5 = 23484; -NOTICE: columnar planner: cannot push down clause: absolute correlation (X.YZ) of var attribute 5 is smaller than the value configured in "columnar.qual_pushdown_correlation_threshold" (0.900) -NOTICE: columnar planner: adding CustomScan path for coltest -DETAIL: unparameterized; 0 clauses pushed down - id | x1 | x2 | x3 | x5 ---------------------------------------------------------------------- -(0 rows) - --- test that expressions on volatile functions are not pushed down -create function vol() returns int language plpgsql as $$ -BEGIN - RETURN 1; -END; -$$; -select * from coltest where x3 = vol(); -NOTICE: columnar planner: cannot push down clause: expr contains volatile functions -NOTICE: columnar planner: adding CustomScan path for coltest -DETAIL: unparameterized; 0 clauses pushed down - id | x1 | x2 | x3 | x5 ---------------------------------------------------------------------- -(0 rows) - -EXPLAIN (analyze on, costs off, timing off, summary off) - SELECT * FROM coltest c1 WHERE ceil(x1) > 4222; -NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' -HINT: Var must only reference this rel, and Expr must not reference this rel -NOTICE: columnar planner: adding CustomScan path for c1 -DETAIL: unparameterized; 0 clauses pushed down - QUERY PLAN ---------------------------------------------------------------------- - Custom Scan (ColumnarScan) on coltest c1 (actual rows=19577 loops=1) - Filter: (ceil((x1)::double precision) > '4222'::double precision) - Rows Removed by Filter: 423 - Columnar Projected Columns: id, x1, x2, x3, x5 -(4 rows) - -set columnar.planner_debug_level to default; --- --- https://github.com/citusdata/citus/issues/4488 --- -create table columnar_prepared_stmt (x int, y int) using columnar; -insert into columnar_prepared_stmt select s, s from generate_series(1,5000000) s; -prepare foo (int) as select x from columnar_prepared_stmt where x = $1; -execute foo(3); - x ---------------------------------------------------------------------- - 3 -(1 row) - -execute foo(3); - x ---------------------------------------------------------------------- - 3 -(1 row) - -execute foo(3); - x ---------------------------------------------------------------------- - 3 -(1 row) - -execute foo(3); - x ---------------------------------------------------------------------- - 3 -(1 row) - -select filtered_row_count('execute foo(3)'); - filtered_row_count ---------------------------------------------------------------------- - 9999 -(1 row) - -select filtered_row_count('execute foo(3)'); - filtered_row_count ---------------------------------------------------------------------- - 9999 -(1 row) - -select filtered_row_count('execute foo(3)'); - filtered_row_count ---------------------------------------------------------------------- - 9999 -(1 row) - -select filtered_row_count('execute foo(3)'); - filtered_row_count ---------------------------------------------------------------------- - 9999 -(1 row) - -drop table columnar_prepared_stmt; --- --- https://github.com/citusdata/citus/issues/5258 --- -set default_table_access_method to columnar; -CREATE TABLE atest1 ( a int, b text ); -CREATE TABLE atest2 (col1 varchar(10), col2 boolean); -INSERT INTO atest1 VALUES (1, 'one'); -SELECT * FROM atest1; -- ok - a | b ---------------------------------------------------------------------- - 1 | one -(1 row) - -SELECT * FROM atest2; -- ok - col1 | col2 ---------------------------------------------------------------------- -(0 rows) - -INSERT INTO atest1 VALUES (2, 'two'); -- ok -INSERT INTO atest1 SELECT 1, b FROM atest1; -- ok -SELECT * FROM atest2 WHERE ( col1 IN ( SELECT b FROM atest1 ) ); - col1 | col2 ---------------------------------------------------------------------- -(0 rows) - -CREATE TABLE t1 (name TEXT, n INTEGER); -CREATE TABLE t2 (name TEXT, n INTEGER); -CREATE TABLE t3 (name TEXT, n INTEGER); -INSERT INTO t1 VALUES ( 'bb', 11 ); -INSERT INTO t2 VALUES ( 'bb', 12 ); -INSERT INTO t2 VALUES ( 'cc', 22 ); -INSERT INTO t2 VALUES ( 'ee', 42 ); -INSERT INTO t3 VALUES ( 'bb', 13 ); -INSERT INTO t3 VALUES ( 'cc', 23 ); -INSERT INTO t3 VALUES ( 'dd', 33 ); -SELECT * FROM -(SELECT name, n as s1_n, 1 as s1_1 FROM t1) as s1 -NATURAL INNER JOIN -(SELECT name, n as s2_n, 2 as s2_2 FROM t2) as s2 -NATURAL INNER JOIN -(SELECT name, n as s3_n, 3 as s3_2 FROM t3) s3; - name | s1_n | s1_1 | s2_n | s2_2 | s3_n | s3_2 ---------------------------------------------------------------------- - bb | 11 | 1 | 12 | 2 | 13 | 3 -(1 row) - -CREATE TABLE numrange_test (nr NUMRANGE); -INSERT INTO numrange_test VALUES('[,)'); -INSERT INTO numrange_test VALUES('[3,]'); -INSERT INTO numrange_test VALUES('[, 5)'); -INSERT INTO numrange_test VALUES(numrange(1.1, 2.2)); -INSERT INTO numrange_test VALUES('empty'); -INSERT INTO numrange_test VALUES(numrange(1.7, 1.7, '[]')); -create table numrange_test2(nr numrange); -INSERT INTO numrange_test2 VALUES('[, 5)'); -INSERT INTO numrange_test2 VALUES(numrange(1.1, 2.2)); -INSERT INTO numrange_test2 VALUES(numrange(1.1, 2.2)); -INSERT INTO numrange_test2 VALUES(numrange(1.1, 2.2,'()')); -INSERT INTO numrange_test2 VALUES('empty'); -set enable_nestloop=t; -set enable_hashjoin=f; -set enable_mergejoin=f; -select * from numrange_test natural join numrange_test2 order by nr; - nr ---------------------------------------------------------------------- - empty - (,5) - [1.1,2.2) - [1.1,2.2) -(4 rows) - -DROP TABLE atest1, atest2, t1, t2, t3, numrange_test, numrange_test2; -set default_table_access_method to default; -set columnar.planner_debug_level to notice; -BEGIN; - SET LOCAL columnar.stripe_row_limit = 2000; - SET LOCAL columnar.chunk_group_row_limit = 1000; - create table pushdown_test (a int, b int) using columnar; - insert into pushdown_test values (generate_series(1, 200000)); -COMMIT; -SET columnar.max_custom_scan_paths TO 50; -SET columnar.qual_pushdown_correlation_threshold TO 0.0; -EXPLAIN (analyze on, costs off, timing off, summary off) -SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a = 104356 or a = 76556; -NOTICE: columnar planner: adding CustomScan path for pushdown_test -DETAIL: unparameterized; 1 clauses pushed down - QUERY PLAN ---------------------------------------------------------------------- - Aggregate (actual rows=1 loops=1) - -> Custom Scan (ColumnarScan) on pushdown_test (actual rows=2 loops=1) - Filter: ((a = 204356) OR (a = 104356) OR (a = 76556)) - Rows Removed by Filter: 1998 - Columnar Projected Columns: a - Columnar Chunk Group Filters: ((a = 204356) OR (a = 104356) OR (a = 76556)) - Columnar Chunk Groups Removed by Filter: 198 -(7 rows) - -SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a = 104356 or a = 76556; -NOTICE: columnar planner: adding CustomScan path for pushdown_test -DETAIL: unparameterized; 1 clauses pushed down - sum ---------------------------------------------------------------------- - 180912 -(1 row) - -EXPLAIN (analyze on, costs off, timing off, summary off) -SELECT sum(a) FROM pushdown_test WHERE a = 194356 or a = 104356 or a = 76556; -NOTICE: columnar planner: adding CustomScan path for pushdown_test -DETAIL: unparameterized; 1 clauses pushed down - QUERY PLAN ---------------------------------------------------------------------- - Aggregate (actual rows=1 loops=1) - -> Custom Scan (ColumnarScan) on pushdown_test (actual rows=3 loops=1) - Filter: ((a = 194356) OR (a = 104356) OR (a = 76556)) - Rows Removed by Filter: 2997 - Columnar Projected Columns: a - Columnar Chunk Group Filters: ((a = 194356) OR (a = 104356) OR (a = 76556)) - Columnar Chunk Groups Removed by Filter: 197 -(7 rows) - -SELECT sum(a) FROM pushdown_test WHERE a = 194356 or a = 104356 or a = 76556; -NOTICE: columnar planner: adding CustomScan path for pushdown_test -DETAIL: unparameterized; 1 clauses pushed down - sum ---------------------------------------------------------------------- - 375268 -(1 row) - -EXPLAIN (analyze on, costs off, timing off, summary off) -SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a > a*-1 + b; -NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' -HINT: Var must only reference this rel, and Expr must not reference this rel -NOTICE: columnar planner: cannot push down clause: all arguments of an OR expression must be pushdownable but one of them was not, due to the reason given above -NOTICE: columnar planner: adding CustomScan path for pushdown_test -DETAIL: unparameterized; 0 clauses pushed down - QUERY PLAN ---------------------------------------------------------------------- - Aggregate (actual rows=1 loops=1) - -> Custom Scan (ColumnarScan) on pushdown_test (actual rows=0 loops=1) - Filter: ((a = 204356) OR (a > ((a * '-1'::integer) + b))) - Rows Removed by Filter: 200000 - Columnar Projected Columns: a, b -(5 rows) - -EXPLAIN (analyze on, costs off, timing off, summary off) -SELECT sum(a) FROM pushdown_test where (a > 1000 and a < 10000) or (a > 20000 and a < 50000); -NOTICE: columnar planner: adding CustomScan path for pushdown_test -DETAIL: unparameterized; 1 clauses pushed down - QUERY PLAN ---------------------------------------------------------------------- - Aggregate (actual rows=1 loops=1) - -> Custom Scan (ColumnarScan) on pushdown_test (actual rows=38998 loops=1) - Filter: (((a > 1000) AND (a < 10000)) OR ((a > 20000) AND (a < 50000))) - Rows Removed by Filter: 2 - Columnar Projected Columns: a - Columnar Chunk Group Filters: (((a > 1000) AND (a < 10000)) OR ((a > 20000) AND (a < 50000))) - Columnar Chunk Groups Removed by Filter: 161 -(7 rows) - -SELECT sum(a) FROM pushdown_test where (a > 1000 and a < 10000) or (a > 20000 and a < 50000); -NOTICE: columnar planner: adding CustomScan path for pushdown_test -DETAIL: unparameterized; 1 clauses pushed down - sum ---------------------------------------------------------------------- - 1099459500 -(1 row) - -EXPLAIN (analyze on, costs off, timing off, summary off) -SELECT sum(a) FROM pushdown_test where (a > random() and a < 2*a) or (a > 100); -NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' -HINT: Var must only reference this rel, and Expr must not reference this rel -NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' -HINT: Var must only reference this rel, and Expr must not reference this rel -NOTICE: columnar planner: cannot push down clause: none of the arguments were pushdownable, due to the reason(s) given above -NOTICE: columnar planner: cannot push down clause: all arguments of an OR expression must be pushdownable but one of them was not, due to the reason given above -NOTICE: columnar planner: adding CustomScan path for pushdown_test -DETAIL: unparameterized; 0 clauses pushed down - QUERY PLAN ---------------------------------------------------------------------- - Aggregate (actual rows=1 loops=1) - -> Custom Scan (ColumnarScan) on pushdown_test (actual rows=200000 loops=1) - Filter: ((((a)::double precision > random()) AND (a < (2 * a))) OR (a > 100)) - Columnar Projected Columns: a -(4 rows) - -SELECT sum(a) FROM pushdown_test where (a > random() and a < 2*a) or (a > 100); -NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' -HINT: Var must only reference this rel, and Expr must not reference this rel -NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' -HINT: Var must only reference this rel, and Expr must not reference this rel -NOTICE: columnar planner: cannot push down clause: none of the arguments were pushdownable, due to the reason(s) given above -NOTICE: columnar planner: cannot push down clause: all arguments of an OR expression must be pushdownable but one of them was not, due to the reason given above -NOTICE: columnar planner: adding CustomScan path for pushdown_test -DETAIL: unparameterized; 0 clauses pushed down - sum ---------------------------------------------------------------------- - 20000100000 -(1 row) - -EXPLAIN (analyze on, costs off, timing off, summary off) -SELECT sum(a) FROM pushdown_test where (a > random() and a <= 2000) or (a > 200000-1010); -NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' -HINT: Var must only reference this rel, and Expr must not reference this rel -NOTICE: columnar planner: adding CustomScan path for pushdown_test -DETAIL: unparameterized; 1 clauses pushed down - QUERY PLAN ---------------------------------------------------------------------- - Aggregate (actual rows=1 loops=1) - -> Custom Scan (ColumnarScan) on pushdown_test (actual rows=3010 loops=1) - Filter: ((((a)::double precision > random()) AND (a <= 2000)) OR (a > 198990)) - Rows Removed by Filter: 990 - Columnar Projected Columns: a - Columnar Chunk Group Filters: ((a <= 2000) OR (a > 198990)) - Columnar Chunk Groups Removed by Filter: 196 -(7 rows) - -SELECT sum(a) FROM pushdown_test where (a > random() and a <= 2000) or (a > 200000-1010); -NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' -HINT: Var must only reference this rel, and Expr must not reference this rel -NOTICE: columnar planner: adding CustomScan path for pushdown_test -DETAIL: unparameterized; 1 clauses pushed down - sum ---------------------------------------------------------------------- - 203491455 -(1 row) - -EXPLAIN (analyze on, costs off, timing off, summary off) -SELECT sum(a) FROM pushdown_test where -( - a > random() - and - ( - (a < 200 and a not in (select a from pushdown_test)) or - (a > 1000 and a < 2000) - ) -) -or -(a > 200000-2010); -NOTICE: columnar planner: adding CustomScan path for pushdown_test -DETAIL: unparameterized; 0 clauses pushed down -NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' -HINT: Var must only reference this rel, and Expr must not reference this rel -NOTICE: columnar planner: cannot push down clause: must not contain a subplan -NOTICE: columnar planner: adding CustomScan path for pushdown_test -DETAIL: unparameterized; 1 clauses pushed down - QUERY PLAN ---------------------------------------------------------------------- - Aggregate (actual rows=1 loops=1) - -> Custom Scan (ColumnarScan) on pushdown_test (actual rows=3009 loops=1) - Filter: ((((a)::double precision > random()) AND (((a < 200) AND (NOT (SubPlan 1))) OR ((a > 1000) AND (a < 2000)))) OR (a > 197990)) - Rows Removed by Filter: 1991 - Columnar Projected Columns: a - Columnar Chunk Group Filters: (((a < 200) OR ((a > 1000) AND (a < 2000))) OR (a > 197990)) - Columnar Chunk Groups Removed by Filter: 195 - SubPlan 1 - -> Materialize (actual rows=100 loops=199) - -> Custom Scan (ColumnarScan) on pushdown_test pushdown_test_1 (actual rows=199 loops=1) - Columnar Projected Columns: a -(11 rows) - -SELECT sum(a) FROM pushdown_test where -( - a > random() - and - ( - (a < 200 and a not in (select a from pushdown_test)) or - (a > 1000 and a < 2000) - ) -) -or -(a > 200000-2010); -NOTICE: columnar planner: adding CustomScan path for pushdown_test -DETAIL: unparameterized; 0 clauses pushed down -NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' -HINT: Var must only reference this rel, and Expr must not reference this rel -NOTICE: columnar planner: cannot push down clause: must not contain a subplan -NOTICE: columnar planner: adding CustomScan path for pushdown_test -DETAIL: unparameterized; 1 clauses pushed down - sum ---------------------------------------------------------------------- - 401479455 -(1 row) - -create function stable_1(arg int) returns int language plpgsql STRICT IMMUTABLE as -$$ BEGIN RETURN 1+arg; END; $$; -EXPLAIN (analyze on, costs off, timing off, summary off) -SELECT sum(a) FROM pushdown_test where (a = random() and a < stable_1(a) and a < stable_1(6000)); -NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' -HINT: Var must only reference this rel, and Expr must not reference this rel -NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' -HINT: Var must only reference this rel, and Expr must not reference this rel -NOTICE: columnar planner: adding CustomScan path for pushdown_test -DETAIL: unparameterized; 1 clauses pushed down - QUERY PLAN ---------------------------------------------------------------------- - Aggregate (actual rows=1 loops=1) - -> Custom Scan (ColumnarScan) on pushdown_test (actual rows=0 loops=1) - Filter: ((a < 6001) AND ((a)::double precision = random()) AND (a < stable_1(a))) - Rows Removed by Filter: 6000 - Columnar Projected Columns: a - Columnar Chunk Group Filters: (a < 6001) - Columnar Chunk Groups Removed by Filter: 194 -(7 rows) - -SELECT sum(a) FROM pushdown_test where (a = random() and a < stable_1(a) and a < stable_1(6000)); -NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' -HINT: Var must only reference this rel, and Expr must not reference this rel -NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var' -HINT: Var must only reference this rel, and Expr must not reference this rel -NOTICE: columnar planner: adding CustomScan path for pushdown_test -DETAIL: unparameterized; 1 clauses pushed down - sum ---------------------------------------------------------------------- - -(1 row) - -RESET columnar.max_custom_scan_paths; -RESET columnar.qual_pushdown_correlation_threshold; -RESET columnar.planner_debug_level; -DROP TABLE pushdown_test; diff --git a/src/test/regress/expected/columnar_partitioning_1.out b/src/test/regress/expected/columnar_partitioning_1.out deleted file mode 100644 index 4a9776db7..000000000 --- a/src/test/regress/expected/columnar_partitioning_1.out +++ /dev/null @@ -1,517 +0,0 @@ -CREATE TABLE parent(ts timestamptz, i int, n numeric, s text) - PARTITION BY RANGE (ts); --- row partitions -CREATE TABLE p0 PARTITION OF parent - FOR VALUES FROM ('2020-01-01') TO ('2020-02-01'); -CREATE TABLE p1 PARTITION OF parent - FOR VALUES FROM ('2020-02-01') TO ('2020-03-01'); -CREATE TABLE p2 PARTITION OF parent - FOR VALUES FROM ('2020-03-01') TO ('2020-04-01'); -CREATE TABLE p3 PARTITION OF parent - FOR VALUES FROM ('2020-04-01') TO ('2020-05-01'); -INSERT INTO parent SELECT '2020-01-15', 10, 100, 'one thousand' - FROM generate_series(1,100000); -INSERT INTO parent SELECT '2020-02-15', 20, 200, 'two thousand' - FROM generate_series(1,100000); -INSERT INTO parent SELECT '2020-03-15', 30, 300, 'three thousand' - FROM generate_series(1,100000); -INSERT INTO parent SELECT '2020-04-15', 30, 300, 'three thousand' - FROM generate_series(1,100000); --- run parallel plans -SET force_parallel_mode = regress; -SET min_parallel_table_scan_size = 1; -SET parallel_tuple_cost = 0; -SET max_parallel_workers = 4; -SET max_parallel_workers_per_gather = 4; -EXPLAIN (costs off) SELECT count(*), sum(i), min(i), max(i) FROM parent; - QUERY PLAN ---------------------------------------------------------------------- - Finalize Aggregate - -> Gather - Workers Planned: 4 - -> Partial Aggregate - -> Parallel Append - -> Parallel Seq Scan on p0 - -> Parallel Seq Scan on p1 - -> Parallel Seq Scan on p2 - -> Parallel Seq Scan on p3 -(9 rows) - -SELECT count(*), sum(i), min(i), max(i) FROM parent; - count | sum | min | max ---------------------------------------------------------------------- - 400000 | 9000000 | 10 | 30 -(1 row) - --- set older partitions as columnar -SELECT alter_table_set_access_method('p0','columnar'); -NOTICE: creating a new table for public.p0 -NOTICE: moving the data of public.p0 -NOTICE: dropping the old public.p0 -NOTICE: renaming the new table to public.p0 - alter_table_set_access_method ---------------------------------------------------------------------- - -(1 row) - -SELECT alter_table_set_access_method('p1','columnar'); -NOTICE: creating a new table for public.p1 -NOTICE: moving the data of public.p1 -NOTICE: dropping the old public.p1 -NOTICE: renaming the new table to public.p1 - alter_table_set_access_method ---------------------------------------------------------------------- - -(1 row) - -SELECT alter_table_set_access_method('p3','columnar'); -NOTICE: creating a new table for public.p3 -NOTICE: moving the data of public.p3 -NOTICE: dropping the old public.p3 -NOTICE: renaming the new table to public.p3 - alter_table_set_access_method ---------------------------------------------------------------------- - -(1 row) - --- should also be parallel plan -EXPLAIN (costs off) SELECT count(*), sum(i), min(i), max(i) FROM parent; - QUERY PLAN ---------------------------------------------------------------------- - Finalize Aggregate - -> Gather - Workers Planned: 4 - -> Partial Aggregate - -> Parallel Append - -> Custom Scan (ColumnarScan) on p3 - Columnar Projected Columns: i - -> Custom Scan (ColumnarScan) on p0 - Columnar Projected Columns: i - -> Custom Scan (ColumnarScan) on p1 - Columnar Projected Columns: i - -> Parallel Seq Scan on p2 -(12 rows) - -SELECT count(*), sum(i), min(i), max(i) FROM parent; - count | sum | min | max ---------------------------------------------------------------------- - 400000 | 9000000 | 10 | 30 -(1 row) - --- and also parallel without custom scan -SET columnar.enable_custom_scan = FALSE; -EXPLAIN (costs off) SELECT count(*), sum(i), min(i), max(i) FROM parent; - QUERY PLAN ---------------------------------------------------------------------- - Finalize Aggregate - -> Gather - Workers Planned: 4 - -> Partial Aggregate - -> Parallel Append - -> Seq Scan on p3 - -> Seq Scan on p0 - -> Seq Scan on p1 - -> Parallel Seq Scan on p2 -(9 rows) - -SELECT count(*), sum(i), min(i), max(i) FROM parent; - count | sum | min | max ---------------------------------------------------------------------- - 400000 | 9000000 | 10 | 30 -(1 row) - -SET columnar.enable_custom_scan TO DEFAULT; -SET force_parallel_mode TO DEFAULT; -SET min_parallel_table_scan_size TO DEFAULT; -SET parallel_tuple_cost TO DEFAULT; -SET max_parallel_workers TO DEFAULT; -SET max_parallel_workers_per_gather TO DEFAULT; -CREATE INDEX parent_btree ON parent (n); -ANALYZE parent; --- will use columnar custom scan on columnar partitions but index --- scan on heap partition -EXPLAIN (costs off) SELECT count(*), sum(i), min(i), max(i) FROM parent -WHERE ts > '2020-02-20' AND n < 5; - QUERY PLAN ---------------------------------------------------------------------- - Aggregate - -> Append - -> Custom Scan (ColumnarScan) on p1 - Filter: ((ts > 'Thu Feb 20 00:00:00 2020 PST'::timestamp with time zone) AND (n < '5'::numeric)) - Columnar Projected Columns: ts, i, n - Columnar Chunk Group Filters: ((ts > 'Thu Feb 20 00:00:00 2020 PST'::timestamp with time zone) AND (n < '5'::numeric)) - -> Index Scan using p2_n_idx on p2 - Index Cond: (n < '5'::numeric) - Filter: (ts > 'Thu Feb 20 00:00:00 2020 PST'::timestamp with time zone) - -> Custom Scan (ColumnarScan) on p3 - Filter: ((ts > 'Thu Feb 20 00:00:00 2020 PST'::timestamp with time zone) AND (n < '5'::numeric)) - Columnar Projected Columns: ts, i, n - Columnar Chunk Group Filters: ((ts > 'Thu Feb 20 00:00:00 2020 PST'::timestamp with time zone) AND (n < '5'::numeric)) -(13 rows) - -BEGIN; - SET LOCAL columnar.enable_custom_scan TO 'OFF'; - -- now that we disabled columnar custom scan, will use seq scan on columnar - -- partitions since index scan is more expensive than seq scan too - EXPLAIN (costs off) SELECT count(*), sum(i), min(i), max(i) FROM parent - WHERE ts > '2020-02-20' AND n < 5; - QUERY PLAN ---------------------------------------------------------------------- - Aggregate - -> Append - -> Seq Scan on p1 - Filter: ((ts > 'Thu Feb 20 00:00:00 2020 PST'::timestamp with time zone) AND (n < '5'::numeric)) - -> Index Scan using p2_n_idx on p2 - Index Cond: (n < '5'::numeric) - Filter: (ts > 'Thu Feb 20 00:00:00 2020 PST'::timestamp with time zone) - -> Seq Scan on p3 - Filter: ((ts > 'Thu Feb 20 00:00:00 2020 PST'::timestamp with time zone) AND (n < '5'::numeric)) -(9 rows) - -ROLLBACK; -DROP TABLE parent; --- --- Test inheritance --- -CREATE TABLE i_row(i int); -INSERT INTO i_row VALUES(100); -CREATE TABLE i_col(i int) USING columnar; -INSERT INTO i_col VALUES(200); -CREATE TABLE ij_row_row(j int) INHERITS(i_row); -INSERT INTO ij_row_row VALUES(300, 1000); -CREATE TABLE ij_row_col(j int) INHERITS(i_row) USING columnar; -INSERT INTO ij_row_col VALUES(400, 2000); -CREATE TABLE ij_col_row(j int) INHERITS(i_col); -INSERT INTO ij_col_row VALUES(500, 3000); -CREATE TABLE ij_col_col(j int) INHERITS(i_col) USING columnar; -INSERT INTO ij_col_col VALUES(600, 4000); -EXPLAIN (costs off) SELECT * FROM i_row; - QUERY PLAN ---------------------------------------------------------------------- - Append - -> Seq Scan on i_row - -> Seq Scan on ij_row_row - -> Custom Scan (ColumnarScan) on ij_row_col - Columnar Projected Columns: i -(5 rows) - -SELECT * FROM i_row; - i ---------------------------------------------------------------------- - 100 - 300 - 400 -(3 rows) - -EXPLAIN (costs off) SELECT * FROM ONLY i_row; - QUERY PLAN ---------------------------------------------------------------------- - Seq Scan on i_row -(1 row) - -SELECT * FROM ONLY i_row; - i ---------------------------------------------------------------------- - 100 -(1 row) - -EXPLAIN (costs off) SELECT * FROM i_col; - QUERY PLAN ---------------------------------------------------------------------- - Append - -> Custom Scan (ColumnarScan) on i_col - Columnar Projected Columns: i - -> Seq Scan on ij_col_row - -> Custom Scan (ColumnarScan) on ij_col_col - Columnar Projected Columns: i -(6 rows) - -SELECT * FROM i_col; - i ---------------------------------------------------------------------- - 200 - 500 - 600 -(3 rows) - -EXPLAIN (costs off) SELECT * FROM ONLY i_col; - QUERY PLAN ---------------------------------------------------------------------- - Custom Scan (ColumnarScan) on i_col - Columnar Projected Columns: i -(2 rows) - -SELECT * FROM ONLY i_col; - i ---------------------------------------------------------------------- - 200 -(1 row) - -EXPLAIN (costs off) SELECT * FROM ij_row_row; - QUERY PLAN ---------------------------------------------------------------------- - Seq Scan on ij_row_row -(1 row) - -SELECT * FROM ij_row_row; - i | j ---------------------------------------------------------------------- - 300 | 1000 -(1 row) - -EXPLAIN (costs off) SELECT * FROM ij_row_col; - QUERY PLAN ---------------------------------------------------------------------- - Custom Scan (ColumnarScan) on ij_row_col - Columnar Projected Columns: i, j -(2 rows) - -SELECT * FROM ij_row_col; - i | j ---------------------------------------------------------------------- - 400 | 2000 -(1 row) - -EXPLAIN (costs off) SELECT * FROM ij_col_row; - QUERY PLAN ---------------------------------------------------------------------- - Seq Scan on ij_col_row -(1 row) - -SELECT * FROM ij_col_row; - i | j ---------------------------------------------------------------------- - 500 | 3000 -(1 row) - -EXPLAIN (costs off) SELECT * FROM ij_col_col; - QUERY PLAN ---------------------------------------------------------------------- - Custom Scan (ColumnarScan) on ij_col_col - Columnar Projected Columns: i, j -(2 rows) - -SELECT * FROM ij_col_col; - i | j ---------------------------------------------------------------------- - 600 | 4000 -(1 row) - -SET columnar.enable_custom_scan = FALSE; -EXPLAIN (costs off) SELECT * FROM i_row; - QUERY PLAN ---------------------------------------------------------------------- - Append - -> Seq Scan on i_row - -> Seq Scan on ij_row_row - -> Seq Scan on ij_row_col -(4 rows) - -SELECT * FROM i_row; - i ---------------------------------------------------------------------- - 100 - 300 - 400 -(3 rows) - -EXPLAIN (costs off) SELECT * FROM ONLY i_row; - QUERY PLAN ---------------------------------------------------------------------- - Seq Scan on i_row -(1 row) - -SELECT * FROM ONLY i_row; - i ---------------------------------------------------------------------- - 100 -(1 row) - -EXPLAIN (costs off) SELECT * FROM i_col; - QUERY PLAN ---------------------------------------------------------------------- - Append - -> Seq Scan on i_col - -> Seq Scan on ij_col_row - -> Seq Scan on ij_col_col -(4 rows) - -SELECT * FROM i_col; - i ---------------------------------------------------------------------- - 200 - 500 - 600 -(3 rows) - -EXPLAIN (costs off) SELECT * FROM ONLY i_col; - QUERY PLAN ---------------------------------------------------------------------- - Seq Scan on i_col -(1 row) - -SELECT * FROM ONLY i_col; - i ---------------------------------------------------------------------- - 200 -(1 row) - -EXPLAIN (costs off) SELECT * FROM ij_row_row; - QUERY PLAN ---------------------------------------------------------------------- - Seq Scan on ij_row_row -(1 row) - -SELECT * FROM ij_row_row; - i | j ---------------------------------------------------------------------- - 300 | 1000 -(1 row) - -EXPLAIN (costs off) SELECT * FROM ij_row_col; - QUERY PLAN ---------------------------------------------------------------------- - Seq Scan on ij_row_col -(1 row) - -SELECT * FROM ij_row_col; - i | j ---------------------------------------------------------------------- - 400 | 2000 -(1 row) - -EXPLAIN (costs off) SELECT * FROM ij_col_row; - QUERY PLAN ---------------------------------------------------------------------- - Seq Scan on ij_col_row -(1 row) - -SELECT * FROM ij_col_row; - i | j ---------------------------------------------------------------------- - 500 | 3000 -(1 row) - -EXPLAIN (costs off) SELECT * FROM ij_col_col; - QUERY PLAN ---------------------------------------------------------------------- - Seq Scan on ij_col_col -(1 row) - -SELECT * FROM ij_col_col; - i | j ---------------------------------------------------------------------- - 600 | 4000 -(1 row) - -SET columnar.enable_custom_scan TO DEFAULT; --- remove the child table from the inheritance hierarchy table -ALTER TABLE ij_row_row NO INHERIT i_row; -DROP TABLE ij_row_row; -DROP TABLE i_row CASCADE; -NOTICE: drop cascades to table ij_row_col -DROP TABLE i_col CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to table ij_col_row -drop cascades to table ij_col_col --- --- https://github.com/citusdata/citus/issues/5257 --- -set default_table_access_method to columnar; -CREATE TABLE prt1 (a int, b int, c varchar) PARTITION BY RANGE(a); -CREATE TABLE prt1_p1 PARTITION OF prt1 FOR VALUES FROM (0) TO (250); -CREATE TABLE prt1_p3 PARTITION OF prt1 FOR VALUES FROM (500) TO (600); -CREATE TABLE prt1_p2 PARTITION OF prt1 FOR VALUES FROM (250) TO (500); -INSERT INTO prt1 SELECT i, i % 25, to_char(i, 'FM0000') FROM generate_series(0, 599) i WHERE i % 2 = 0; -CREATE TABLE prt2 (a int, b int, c varchar) PARTITION BY RANGE(b); -CREATE TABLE prt2_p1 PARTITION OF prt2 FOR VALUES FROM (0) TO (250); -CREATE TABLE prt2_p2 PARTITION OF prt2 FOR VALUES FROM (250) TO (500); -CREATE TABLE prt2_p3 PARTITION OF prt2 FOR VALUES FROM (500) TO (600); -INSERT INTO prt2 SELECT i % 25, i, to_char(i, 'FM0000') FROM generate_series(0, 599) i WHERE i % 3 = 0; -SET enable_partitionwise_join to true; -EXPLAIN (costs off, timing off, summary off) -SELECT * FROM - prt1 t1 LEFT JOIN LATERAL - (SELECT t2.a AS t2a, t3.a AS t3a, least(t1.a,t2.a,t3.b) - FROM prt1 t2 - JOIN prt2 t3 ON (t2.a = t3.b) - ) ss - ON t1.a = ss.t2a WHERE t1.b = 0 - ORDER BY t1.a; - QUERY PLAN ---------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Nested Loop Left Join - -> Custom Scan (ColumnarScan) on prt1_p1 t1 - Filter: (b = 0) - Columnar Projected Columns: a, b, c - Columnar Chunk Group Filters: (b = 0) - -> Hash Join - Hash Cond: (t2.a = t3.b) - -> Custom Scan (ColumnarScan) on prt1_p1 t2 - Filter: (t1.a = a) - Columnar Projected Columns: a - Columnar Chunk Group Filters: (t1.a = a) - -> Hash - -> Custom Scan (ColumnarScan) on prt2_p1 t3 - Columnar Projected Columns: a, b - -> Nested Loop Left Join - -> Custom Scan (ColumnarScan) on prt1_p2 t1_1 - Filter: (b = 0) - Columnar Projected Columns: a, b, c - Columnar Chunk Group Filters: (b = 0) - -> Hash Join - Hash Cond: (t2_1.a = t3_1.b) - -> Custom Scan (ColumnarScan) on prt1_p2 t2_1 - Filter: (t1_1.a = a) - Columnar Projected Columns: a - Columnar Chunk Group Filters: (t1_1.a = a) - -> Hash - -> Custom Scan (ColumnarScan) on prt2_p2 t3_1 - Columnar Projected Columns: a, b - -> Nested Loop Left Join - -> Custom Scan (ColumnarScan) on prt1_p3 t1_2 - Filter: (b = 0) - Columnar Projected Columns: a, b, c - Columnar Chunk Group Filters: (b = 0) - -> Hash Join - Hash Cond: (t2_2.a = t3_2.b) - -> Custom Scan (ColumnarScan) on prt1_p3 t2_2 - Filter: (t1_2.a = a) - Columnar Projected Columns: a - Columnar Chunk Group Filters: (t1_2.a = a) - -> Hash - -> Custom Scan (ColumnarScan) on prt2_p3 t3_2 - Columnar Projected Columns: a, b -(45 rows) - -SELECT * FROM - prt1 t1 LEFT JOIN LATERAL - (SELECT t2.a AS t2a, t3.a AS t3a, least(t1.a,t2.a,t3.b) - FROM prt1 t2 - JOIN prt2 t3 ON (t2.a = t3.b) - ) ss - ON t1.a = ss.t2a WHERE t1.b = 0 - ORDER BY t1.a; - a | b | c | t2a | t3a | least ---------------------------------------------------------------------- - 0 | 0 | 0000 | 0 | 0 | 0 - 50 | 0 | 0050 | | | - 100 | 0 | 0100 | | | - 150 | 0 | 0150 | 150 | 0 | 150 - 200 | 0 | 0200 | | | - 250 | 0 | 0250 | | | - 300 | 0 | 0300 | 300 | 0 | 300 - 350 | 0 | 0350 | | | - 400 | 0 | 0400 | | | - 450 | 0 | 0450 | 450 | 0 | 450 - 500 | 0 | 0500 | | | - 550 | 0 | 0550 | | | -(12 rows) - -set default_table_access_method to default; -SET enable_partitionwise_join to default; -DROP TABLE prt1; -DROP TABLE prt2; diff --git a/src/test/regress/expected/failure_on_create_subscription_0.out b/src/test/regress/expected/failure_on_create_subscription_0.out deleted file mode 100644 index 7ea3ee23f..000000000 --- a/src/test/regress/expected/failure_on_create_subscription_0.out +++ /dev/null @@ -1,103 +0,0 @@ --- --- failure_on_create_subscription --- --- Since the result of these tests depends on the success of background --- process that creating the replication slot on the publisher. These --- tests are separated. -CREATE SCHEMA IF NOT EXISTS move_shard; -SET SEARCH_PATH = move_shard; -SET citus.shard_count TO 4; -SET citus.next_shard_id TO 100; -SET citus.shard_replication_factor TO 1; -SELECT pg_backend_pid() as pid \gset -SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE t(id int PRIMARY KEY, int_data int, data text); -SELECT create_distributed_table('t', 'id'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -CREATE VIEW shards_in_workers AS -SELECT shardid, - (CASE WHEN nodeport = :worker_1_port THEN 'worker1' ELSE 'worker2' END) AS worker -FROM pg_dist_placement NATURAL JOIN pg_dist_node -ORDER BY 1,2 ASC; --- Insert some data -INSERT INTO t SELECT x, x+1, MD5(random()::text) FROM generate_series(1,100000) AS f(x); --- Initial shard placements -SELECT * FROM shards_in_workers; - shardid | worker ---------------------------------------------------------------------- - 100 | worker2 - 101 | worker1 - 102 | worker2 - 103 | worker1 -(4 rows) - --- failure on creating the subscription -SELECT citus.mitmproxy('conn.onQuery(query="CREATE SUBSCRIPTION").kill()'); - mitmproxy ---------------------------------------------------------------------- - -(1 row) - -SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); -WARNING: could not drop the replication slot "citus_shard_move_subscription" on publisher -DETAIL: The error was: ERROR: replication slot "citus_shard_move_subscription" does not exist -CONTEXT: while executing command on localhost:xxxxx -ERROR: connection not open -CONTEXT: while executing command on localhost:xxxxx --- Verify that the shard is not moved and the number of rows are still 100k -SELECT * FROM shards_in_workers; - shardid | worker ---------------------------------------------------------------------- - 100 | worker2 - 101 | worker1 - 102 | worker2 - 103 | worker1 -(4 rows) - -SELECT count(*) FROM t; - count ---------------------------------------------------------------------- - 100000 -(1 row) - --- Verify that shard can be moved after a temporary failure -SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ---------------------------------------------------------------------- - -(1 row) - -SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); - master_move_shard_placement ---------------------------------------------------------------------- - -(1 row) - -SELECT * FROM shards_in_workers; - shardid | worker ---------------------------------------------------------------------- - 100 | worker2 - 101 | worker2 - 102 | worker2 - 103 | worker1 -(4 rows) - -SELECT count(*) FROM t; - count ---------------------------------------------------------------------- - 100000 -(1 row) - -DROP SCHEMA move_shard CASCADE ; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to table t -drop cascades to view shards_in_workers diff --git a/src/test/regress/expected/failure_savepoints.out b/src/test/regress/expected/failure_savepoints.out index fb4c870bb..697b5b190 100644 --- a/src/test/regress/expected/failure_savepoints.out +++ b/src/test/regress/expected/failure_savepoints.out @@ -1,8 +1,6 @@ --- We have two different output files for this failure test because the --- failure behaviour of SAVEPOINT and RELEASE commands are different if --- we use the executor. If we use it, these commands error out if any of --- the placement commands fail. Otherwise, we might mark the placement --- as invalid and continue with a WARNING. +-- +-- FAILURE_SAVEPOINTS +-- SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- diff --git a/src/test/regress/expected/failure_vacuum_1.out b/src/test/regress/expected/failure_vacuum_1.out deleted file mode 100644 index 7e87f1840..000000000 --- a/src/test/regress/expected/failure_vacuum_1.out +++ /dev/null @@ -1,126 +0,0 @@ --- We have different output files for the executor. This is because --- we don't mark transactions with ANALYZE as critical anymore, and --- get WARNINGs instead of ERRORs. -SET citus.next_shard_id TO 12000000; -SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ---------------------------------------------------------------------- - -(1 row) - -SET citus.shard_count = 1; -SET citus.shard_replication_factor = 2; -- one shard per worker -SET citus.multi_shard_commit_protocol TO '1pc'; -CREATE TABLE vacuum_test (key int, value int); -SELECT create_distributed_table('vacuum_test', 'key'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT citus.clear_network_traffic(); - clear_network_traffic ---------------------------------------------------------------------- - -(1 row) - -SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM").kill()'); - mitmproxy ---------------------------------------------------------------------- - -(1 row) - -VACUUM vacuum_test; -ERROR: connection error: localhost:xxxxx -DETAIL: server closed the connection unexpectedly -SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").kill()'); - mitmproxy ---------------------------------------------------------------------- - -(1 row) - -ANALYZE vacuum_test; -WARNING: connection error: localhost:xxxxx -DETAIL: server closed the connection unexpectedly -SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()'); - mitmproxy ---------------------------------------------------------------------- - -(1 row) - -ANALYZE vacuum_test; --- ANALYZE transactions being critical is an open question, see #2430 --- show that we marked as INVALID on COMMIT FAILURE -SELECT shardid, shardstate FROM pg_dist_shard_placement where shardstate != 1 AND -shardid in ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'vacuum_test'::regclass); - shardid | shardstate ---------------------------------------------------------------------- - 12000000 | 3 -(1 row) - -UPDATE pg_dist_shard_placement SET shardstate = 1 -WHERE shardid IN ( - SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'vacuum_test'::regclass -); --- the same tests with cancel -SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM").cancel(' || pg_backend_pid() || ')'); - mitmproxy ---------------------------------------------------------------------- - -(1 row) - -VACUUM vacuum_test; -ERROR: canceling statement due to user request -SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").cancel(' || pg_backend_pid() || ')'); - mitmproxy ---------------------------------------------------------------------- - -(1 row) - -ANALYZE vacuum_test; -ERROR: canceling statement due to user request --- cancel during COMMIT should be ignored -SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").cancel(' || pg_backend_pid() || ')'); - mitmproxy ---------------------------------------------------------------------- - -(1 row) - -ANALYZE vacuum_test; -SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE other_vacuum_test (key int, value int); -SELECT create_distributed_table('other_vacuum_test', 'key'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM.*other").kill()'); - mitmproxy ---------------------------------------------------------------------- - -(1 row) - -VACUUM vacuum_test, other_vacuum_test; -ERROR: syntax error at or near "," -SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM.*other").cancel(' || pg_backend_pid() || ')'); - mitmproxy ---------------------------------------------------------------------- - -(1 row) - -VACUUM vacuum_test, other_vacuum_test; -ERROR: syntax error at or near "," --- ==== Clean up, we're done here ==== -SELECT citus.mitmproxy('conn.allow()'); - mitmproxy ---------------------------------------------------------------------- - -(1 row) - -DROP TABLE vacuum_test, other_vacuum_test; diff --git a/src/test/regress/expected/isolation_logical_replication_0.out b/src/test/regress/expected/isolation_logical_replication_0.out deleted file mode 100644 index 434260965..000000000 --- a/src/test/regress/expected/isolation_logical_replication_0.out +++ /dev/null @@ -1,223 +0,0 @@ -Parsed test spec with 3 sessions - -starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement s2-insert s3-release-advisory-lock s1-end -step s3-acquire-advisory-lock: - SELECT pg_advisory_lock(44000, 55152); - -pg_advisory_lock - - -step s1-begin: - BEGIN; - -step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); - -master_move_shard_placement - - -step s2-insert: - INSERT INTO logical_replicate_placement VALUES (15, 15); - -step s3-release-advisory-lock: - SELECT pg_advisory_unlock(44000, 55152); - -pg_advisory_unlock - -t -step s1-end: - COMMIT; - -step s2-insert: <... completed> - -starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement s2-upsert s3-release-advisory-lock s1-end -step s3-acquire-advisory-lock: - SELECT pg_advisory_lock(44000, 55152); - -pg_advisory_lock - - -step s1-begin: - BEGIN; - -step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); - -master_move_shard_placement - - -step s2-upsert: - INSERT INTO logical_replicate_placement VALUES (15, 15); - - INSERT INTO logical_replicate_placement VALUES (15, 15) ON CONFLICT (x) DO UPDATE SET y = logical_replicate_placement.y + 1; - -step s3-release-advisory-lock: - SELECT pg_advisory_unlock(44000, 55152); - -pg_advisory_unlock - -t -step s1-end: - COMMIT; - -step s2-upsert: <... completed> - -starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement s2-update s3-release-advisory-lock s1-end -step s3-acquire-advisory-lock: - SELECT pg_advisory_lock(44000, 55152); - -pg_advisory_lock - - -step s1-begin: - BEGIN; - -step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); - -master_move_shard_placement - - -step s2-update: - UPDATE logical_replicate_placement SET y = y + 1 WHERE x = 15; - -step s3-release-advisory-lock: - SELECT pg_advisory_unlock(44000, 55152); - -pg_advisory_unlock - -t -step s1-end: - COMMIT; - -step s2-update: <... completed> - -starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement s2-delete s3-release-advisory-lock s1-end -step s3-acquire-advisory-lock: - SELECT pg_advisory_lock(44000, 55152); - -pg_advisory_lock - - -step s1-begin: - BEGIN; - -step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); - -master_move_shard_placement - - -step s2-delete: - DELETE FROM logical_replicate_placement WHERE x = 15; - -step s3-release-advisory-lock: - SELECT pg_advisory_unlock(44000, 55152); - -pg_advisory_unlock - -t -step s1-end: - COMMIT; - -step s2-delete: <... completed> - -starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement s2-copy s3-release-advisory-lock s1-end -step s3-acquire-advisory-lock: - SELECT pg_advisory_lock(44000, 55152); - -pg_advisory_lock - - -step s1-begin: - BEGIN; - -step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); - -master_move_shard_placement - - -step s2-copy: - COPY logical_replicate_placement FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; - -step s3-release-advisory-lock: - SELECT pg_advisory_unlock(44000, 55152); - -pg_advisory_unlock - -t -step s1-end: - COMMIT; - -step s2-copy: <... completed> - -starting permutation: s1-begin s1-move-placement s2-truncate s1-end -step s1-begin: - BEGIN; - -step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); - -master_move_shard_placement - - -step s2-truncate: - TRUNCATE logical_replicate_placement; - -step s1-end: - COMMIT; - -step s2-truncate: <... completed> - -starting permutation: s1-begin s1-move-placement s2-alter-table s1-end -step s1-begin: - BEGIN; - -step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); - -master_move_shard_placement - - -step s2-alter-table: - ALTER TABLE logical_replicate_placement ADD COLUMN z INT; - -step s1-end: - COMMIT; - -step s2-alter-table: <... completed> - -starting permutation: s1-begin s2-truncate s1-move-placement s1-end -step s1-begin: - BEGIN; - -step s2-truncate: - TRUNCATE logical_replicate_placement; - -step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); - -master_move_shard_placement - - -step s1-end: - COMMIT; - - -starting permutation: s1-begin s2-alter-table s1-move-placement s1-end -step s1-begin: - BEGIN; - -step s2-alter-table: - ALTER TABLE logical_replicate_placement ADD COLUMN z INT; - -step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); - -master_move_shard_placement - - -step s1-end: - COMMIT; - diff --git a/src/test/regress/expected/isolation_logical_replication_with_partitioning_0.out b/src/test/regress/expected/isolation_logical_replication_with_partitioning_0.out deleted file mode 100644 index 7c39e576a..000000000 --- a/src/test/regress/expected/isolation_logical_replication_with_partitioning_0.out +++ /dev/null @@ -1,4 +0,0 @@ -Parsed test spec with 3 sessions - -starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement-partitioned s2-insert-partitioned s3-release-advisory-lock s1-end -setup failed: ERROR: primary key constraints are not supported on partitioned tables diff --git a/src/test/regress/expected/isolation_logical_replication_with_partitioning_1.out b/src/test/regress/expected/isolation_logical_replication_with_partitioning_1.out deleted file mode 100644 index 87b003946..000000000 --- a/src/test/regress/expected/isolation_logical_replication_with_partitioning_1.out +++ /dev/null @@ -1,4 +0,0 @@ -Parsed test spec with 3 sessions - -starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement-partitioned s2-insert-partitioned s3-release-advisory-lock s1-end -setup failed: ERROR: syntax error at or near "PARTITION" diff --git a/src/test/regress/sql/failure_savepoints.sql b/src/test/regress/sql/failure_savepoints.sql index 29f74badf..b586bcb5c 100644 --- a/src/test/regress/sql/failure_savepoints.sql +++ b/src/test/regress/sql/failure_savepoints.sql @@ -1,8 +1,6 @@ --- We have two different output files for this failure test because the --- failure behaviour of SAVEPOINT and RELEASE commands are different if --- we use the executor. If we use it, these commands error out if any of --- the placement commands fail. Otherwise, we might mark the placement --- as invalid and continue with a WARNING. +-- +-- FAILURE_SAVEPOINTS +-- SELECT citus.mitmproxy('conn.allow()');