From 4012e5938a7ce0f66a5ad254c798e9669e86908a Mon Sep 17 00:00:00 2001 From: Mehmet YILMAZ Date: Wed, 17 Sep 2025 10:46:36 +0300 Subject: [PATCH 01/14] =?UTF-8?q?PG18=20-=20normalize=20PG18=20=E2=80=9CRE?= =?UTF-8?q?STRICT=E2=80=9D=20FK=20error=20wording=20to=20legacy=20form=20(?= =?UTF-8?q?#8188)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit fixes #8186 https://github.com/postgres/postgres/commit/086c84b23d99c2ad268f97508cd840efc1fdfd79 PG18 emitting a more specific message for foreign-key violations when the action is `RESTRICT` (SQLSTATE 23001), e.g. `violates RESTRICT setting of foreign key constraint ...` and `Key (...) is referenced from table ...`. Older versions printed the generic FK text (SQLSTATE 23503), e.g. `violates foreign key constraint ...` and `Key (...) is still referenced from table ...`. This change was causing noisy diffs in our regression tests (e.g., `multi_foreign_key.out`). To keep a single set of expected files across PG15–PG18, this PR adds two normalization rules to the test filter: ```sed # PG18 FK wording -> legacy generic form s/violates RESTRICT setting of foreign key constraint/violates foreign key constraint/g # DETAIL line: "is referenced" -> "is still referenced" s/\/is still referenced from table/g ``` **Scope / impact** * Test-only change; runtime behavior is unaffected. * Keeps outputs stable across PG15–PG18 without version-splitting expected files. * Rules are narrowly targeted to the FK wording introduced in PG18. with pr: https://github.com/citusdata/citus/actions/runs/17698469722/job/50300960878#step:5:252 --- src/test/regress/bin/normalize.sed | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/test/regress/bin/normalize.sed b/src/test/regress/bin/normalize.sed index 91484dc75..6df3087cb 100644 --- a/src/test/regress/bin/normalize.sed +++ b/src/test/regress/bin/normalize.sed @@ -359,3 +359,9 @@ s/(Actual[[:space:]]+Rows:[[:space:]]*)N\.N/\1N/gI s/^([ \t]*)List of tables$/\1List of relations/g s/^([ \t]*)List of indexes$/\1List of relations/g s/^([ \t]*)List of sequences$/\1List of relations/g + +# --- PG18 FK wording -> legacy generic form --- +# e.g., "violates RESTRICT setting of foreign key constraint" -> "violates foreign key constraint" +s/violates RESTRICT setting of foreign key constraint/violates foreign key constraint/g +# DETAIL line changed "is referenced" -> old "is still referenced" +s/\/is still referenced from table/g From b58af1c8d584f4fc74c01bf78c16bbfaa1cc07e0 Mon Sep 17 00:00:00 2001 From: Mehmet YILMAZ Date: Wed, 17 Sep 2025 14:12:15 +0300 Subject: [PATCH 02/14] PG18: stabilize constraint-name tests by filtering pg_constraint on contype (#8185) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit https://github.com/postgres/postgres/commit/14e87ffa5c543b5f30ead7413084c25f7735039f PostgreSQL 18 now records column `NOT NULL` constraints in `pg_constraint` (`contype = 'n'`). That means queries that previously listed “all constraints” for a relation now return extra rows, causing noisy diffs in Citus regression tests. This PR narrows each catalog probe to the specific constraint type under test (PK/UNIQUE/EXCLUDE/CHECK), keeping results stable across PG15–PG18. ## What changed * Update `src/test/regress/sql/multi_alter_table_add_constraints_without_name.sql` to: * Add `AND con.contype IN ('p'|'u'|'x'|'c')` in each query, matching the constraint just created. * Join namespace via `rel.relnamespace` for robustness. * Refresh `src/test/regress/expected/multi_alter_table_add_constraints_without_name.out` to reflect the filtered results. ## Why * PG18 adds named `NOT NULL` entries to `pg_constraint`, which previously lived only in `pg_attribute`. Tests that select from `pg_constraint` without filtering now see extra rows (e.g., `*_not_null`), breaking expectations. Filtering by `contype` validates exactly what the test intends (PK/UNIQUE/EXCLUDE/CHECK naming/propagation) and ignores unrelated `NOT NULL` rows. ```diff diff -dU10 -w /__w/citus/citus/src/test/regress/expected/multi_alter_table_add_constraints_without_name.out /__w/citus/citus/src/test/regress/results/multi_alter_table_add_constraints_without_name.out --- /__w/citus/citus/src/test/regress/expected/multi_alter_table_add_constraints_without_name.out.modified 2025-09-11 14:36:52.521254512 +0000 +++ /__w/citus/citus/src/test/regress/results/multi_alter_table_add_constraints_without_name.out.modified 2025-09-11 14:36:52.549254440 +0000 @@ -20,34 +20,36 @@ ALTER TABLE AT_AddConstNoName.products ADD PRIMARY KEY(product_no); SELECT con.conname FROM pg_catalog.pg_constraint con INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace WHERE rel.relname = 'products'; conname ------------------------------ products_pkey -(1 row) + products_product_no_not_null +(2 rows) -- Check that the primary key name created on the coordinator is sent to workers and -- the constraints created for the shard tables conform to the _shardid naming scheme. \c - - :public_worker_1_host :worker_1_port SELECT con.conname FROM pg_catalog.pg_constraint con INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace WHERE rel.relname = 'products_5410000'; conname -------------------------------------- + products_5410000_product_no_not_null products_pkey_5410000 -(1 row) +(2 rows) ``` after pr: https://github.com/citusdata/citus/actions/runs/17697415668/job/50298622183#step:5:265 --- ...ter_table_add_constraints_without_name.out | 666 ++++++++++-------- ...ter_table_add_constraints_without_name.sql | 666 ++++++++++-------- 2 files changed, 766 insertions(+), 566 deletions(-) diff --git a/src/test/regress/expected/multi_alter_table_add_constraints_without_name.out b/src/test/regress/expected/multi_alter_table_add_constraints_without_name.out index 6a6251f9e..6a4c17104 100644 --- a/src/test/regress/expected/multi_alter_table_add_constraints_without_name.out +++ b/src/test/regress/expected/multi_alter_table_add_constraints_without_name.out @@ -20,10 +20,11 @@ SELECT create_distributed_table('AT_AddConstNoName.products', 'product_no'); ALTER TABLE AT_AddConstNoName.products ADD PRIMARY KEY(product_no); SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products' + AND con.contype = 'p'; conname --------------------------------------------------------------------- products_pkey @@ -33,10 +34,11 @@ SELECT con.conname -- the constraints created for the shard tables conform to the _shardid naming scheme. \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products_5410000'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products_5410000' + AND con.contype = 'p'; conname --------------------------------------------------------------------- products_pkey_5410000 @@ -45,7 +47,7 @@ SELECT con.conname \c - - :master_host :master_port ALTER TABLE AT_AddConstNoName.products DROP CONSTRAINT products_pkey; -- Check "ADD PRIMARY KEY USING INDEX ..." -CREATE TABLE AT_AddConstNoName.tbl(col1 int, col2 int); +CREATE TABLE AT_AddConstNoName.tbl(col1 int, col2 int); SELECT create_distributed_table('AT_AddConstNoName.tbl', 'col1'); create_distributed_table --------------------------------------------------------------------- @@ -55,10 +57,11 @@ SELECT create_distributed_table('AT_AddConstNoName.tbl', 'col1'); CREATE UNIQUE INDEX my_index ON AT_AddConstNoName.tbl(col1); ALTER TABLE AT_AddConstNoName.tbl ADD PRIMARY KEY USING INDEX my_index; SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'tbl'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'tbl' + AND con.contype = 'p'; conname --------------------------------------------------------------------- my_index @@ -66,10 +69,12 @@ SELECT con.conname \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'tbl%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'tbl%' + AND con.contype = 'p' + ORDER BY con.conname ASC; conname --------------------------------------------------------------------- my_index @@ -85,10 +90,11 @@ ALTER TABLE AT_AddConstNoName.tbl DROP CONSTRAINT my_index; CREATE UNIQUE INDEX my_index ON AT_AddConstNoName.tbl(col1); ALTER TABLE AT_AddConstNoName.tbl ADD UNIQUE USING INDEX my_index; SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'tbl'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'tbl' + AND con.contype = 'u'; conname --------------------------------------------------------------------- my_index @@ -96,10 +102,12 @@ SELECT con.conname \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'tbl%'ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'tbl%' + AND con.contype = 'u' + ORDER BY con.conname ASC; conname --------------------------------------------------------------------- my_index @@ -139,10 +147,11 @@ ALTER TABLE AT_AddConstNoName.products DROP CONSTRAINT products_pkey; -- Check "ADD UNIQUE" ALTER TABLE AT_AddConstNoName.products ADD UNIQUE(product_no); SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products' + AND con.contype = 'u'; conname --------------------------------------------------------------------- products_product_no_key @@ -152,10 +161,11 @@ SELECT con.conname -- the constraints created for the shard tables conform to the _shardid scheme. \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products_5410000'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products_5410000' + AND con.contype = 'u'; conname --------------------------------------------------------------------- products_product_no_key_5410000 @@ -166,10 +176,11 @@ ALTER TABLE AT_AddConstNoName.products DROP CONSTRAINT products_product_no_key; -- Check "ADD UNIQUE" with column name list ALTER TABLE AT_AddConstNoName.products ADD UNIQUE(product_no,name); SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products' + AND con.contype = 'u'; conname --------------------------------------------------------------------- products_product_no_name_key @@ -177,10 +188,11 @@ SELECT con.conname \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products_5410000'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products_5410000' + AND con.contype = 'u'; conname --------------------------------------------------------------------- products_product_no_name_key_5410000 @@ -191,10 +203,11 @@ ALTER TABLE AT_AddConstNoName.products DROP CONSTRAINT products_product_no_name_ -- Check "ADD UNIQUE ... INCLUDE" ALTER TABLE AT_AddConstNoName.products ADD UNIQUE(product_no) INCLUDE(price); SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products' + AND con.contype = 'u'; conname --------------------------------------------------------------------- products_product_no_key @@ -202,10 +215,11 @@ SELECT con.conname \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products_5410000'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products_5410000' + AND con.contype = 'u'; conname --------------------------------------------------------------------- products_product_no_key_5410000 @@ -245,10 +259,11 @@ ALTER TABLE AT_AddConstNoName.products DROP CONSTRAINT products_product_no_key; CREATE EXTENSION btree_gist; ALTER TABLE AT_AddConstNoName.products ADD EXCLUDE USING gist (name WITH <> , product_no WITH =); SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products' + AND con.contype = 'x'; conname --------------------------------------------------------------------- products_name_product_no_excl @@ -256,10 +271,11 @@ SELECT con.conname \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products_5410000'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products_5410000' + AND con.contype = 'x'; conname --------------------------------------------------------------------- products_name_product_no_excl_5410000 @@ -295,10 +311,11 @@ ALTER TABLE AT_AddConstNoName.products DROP CONSTRAINT products_name_product_no_ -- Check "ADD CHECK" ALTER TABLE AT_AddConstNoName.products ADD CHECK (product_no > 0 AND price > 0); SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products' + AND con.contype = 'c'; conname --------------------------------------------------------------------- products_check @@ -306,10 +323,11 @@ SELECT con.conname \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products_5410000'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products_5410000' + AND con.contype = 'c'; conname --------------------------------------------------------------------- products_check_5410000 @@ -320,10 +338,11 @@ ALTER TABLE AT_AddConstNoName.products DROP CONSTRAINT products_check; -- Check "ADD CHECK ... NOINHERIT" ALTER TABLE AT_AddConstNoName.products ADD CHECK (product_no > 0 AND price > 0) NO INHERIT; SELECT con.conname, con.connoinherit - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products' + AND con.contype = 'c'; conname | connoinherit --------------------------------------------------------------------- products_check | t @@ -331,10 +350,11 @@ SELECT con.conname, con.connoinherit \c - - :public_worker_1_host :worker_1_port SELECT con.conname, con.connoinherit - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products_5410000'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products_5410000' + AND con.contype = 'c'; conname | connoinherit --------------------------------------------------------------------- products_check_5410000 | t @@ -345,10 +365,11 @@ ALTER TABLE AT_AddConstNoName.products DROP CONSTRAINT products_check; -- Check "ADD CHECK ... NOT VALID" ALTER TABLE AT_AddConstNoName.products ADD CHECK (product_no > 0 AND price > 0) NOT VALID; SELECT con.conname, con.convalidated - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products' + AND con.contype = 'c'; conname | convalidated --------------------------------------------------------------------- products_check | f @@ -356,10 +377,11 @@ SELECT con.conname, con.convalidated \c - - :public_worker_1_host :worker_1_port SELECT con.conname, con.convalidated - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products_5410000'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products_5410000' + AND con.contype = 'c'; conname | convalidated --------------------------------------------------------------------- products_check_5410000 | f @@ -401,10 +423,11 @@ ALTER TABLE AT_AddConstNoName.products_ref_3 ADD CONSTRAINT products_ref_pkey PR ALTER TABLE AT_AddConstNoName.products_ref_2 ADD CONSTRAINT products_ref_pkey1 PRIMARY KEY(name); ALTER TABLE AT_AddConstNoName.products_ref ADD PRIMARY KEY(name); SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products_ref'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products_ref' + AND con.contype = 'p'; conname --------------------------------------------------------------------- products_ref_pkey2 @@ -416,10 +439,11 @@ ALTER TABLE AT_AddConstNoName.products_ref_3 ADD CONSTRAINT products_ref_name_ke ALTER TABLE AT_AddConstNoName.products_ref_2 ADD CONSTRAINT products_ref_name_key1 UNIQUE(name); ALTER TABLE AT_AddConstNoName.products_ref ADD UNIQUE(name); SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products_ref'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products_ref' + AND con.contype = 'u'; conname --------------------------------------------------------------------- products_ref_name_key2 @@ -431,10 +455,11 @@ ALTER TABLE AT_AddConstNoName.products_ref_3 ADD CONSTRAINT products_ref_product ALTER TABLE AT_AddConstNoName.products_ref_2 ADD CONSTRAINT products_ref_product_no_excl1 EXCLUDE (product_no WITH =); ALTER TABLE AT_AddConstNoName.products_ref ADD EXCLUDE(product_no WITH =); SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products_ref'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products_ref' + AND con.contype = 'x'; conname --------------------------------------------------------------------- products_ref_product_no_excl2 @@ -446,10 +471,11 @@ ALTER TABLE AT_AddConstNoName.products_ref_3 ADD CONSTRAINT products_ref_check ALTER TABLE AT_AddConstNoName.products_ref_2 ADD CONSTRAINT products_ref_check1 CHECK (product_no > 0); ALTER TABLE AT_AddConstNoName.products_ref ADD CHECK (product_no > 0); SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products_ref'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products_ref' + AND con.contype = 'c'; conname --------------------------------------------------------------------- products_ref_check2 @@ -473,10 +499,11 @@ SELECT create_distributed_table('AT_AddConstNoName.verylonglonglonglonglonglongl ALTER TABLE AT_AddConstNoName.verylonglonglonglonglonglonglonglonglonglonglonglonglonglonglon ADD PRIMARY KEY(product_no); -- Constraint should be created on the coordinator with a shortened name SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'very%'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'very%' + AND con.contype = 'p'; conname --------------------------------------------------------------------- verylonglonglonglonglonglonglonglonglonglonglonglonglonglo_pkey @@ -485,10 +512,12 @@ SELECT con.conname -- Constraints for the main table and the shards should be created on the worker with a shortened name \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'very%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'very%' + AND con.contype = 'p' + ORDER BY con.conname ASC; conname --------------------------------------------------------------------- verylonglonglonglonglonglonglonglonglonglonglo_559ab79d_5410010 @@ -503,10 +532,11 @@ SELECT con.conname ALTER TABLE AT_AddConstNoName.verylonglonglonglonglonglonglonglonglonglonglonglonglonglonglon DROP CONSTRAINT verylonglonglonglonglonglonglonglonglonglonglonglonglonglo_pkey; \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'very%'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'very%' + AND con.contype = 'p'; conname --------------------------------------------------------------------- (0 rows) @@ -516,10 +546,11 @@ SELECT con.conname ALTER TABLE AT_AddConstNoName.verylonglonglonglonglonglonglonglonglonglonglonglonglonglonglon ADD UNIQUE(product_no); -- Constraint should be created on the coordinator with a shortened name SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'very%'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'very%' + AND con.contype = 'u'; conname --------------------------------------------------------------------- verylonglonglonglonglonglonglonglonglonglonglong_product_no_key @@ -528,10 +559,12 @@ SELECT con.conname -- Constraints for the main table and the shards should be created on the worker with a shortened name \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'very%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'very%' + AND con.contype = 'u' + ORDER BY con.conname ASC; conname --------------------------------------------------------------------- verylonglonglonglonglonglonglonglonglonglonglo_cd61b0cf_5410010 @@ -546,10 +579,11 @@ SELECT con.conname ALTER TABLE AT_AddConstNoName.verylonglonglonglonglonglonglonglonglonglonglonglonglonglonglon DROP CONSTRAINT verylonglonglonglonglonglonglonglonglonglonglong_product_no_key; \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'very%'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'very%' + AND con.contype = 'u'; conname --------------------------------------------------------------------- (0 rows) @@ -559,10 +593,11 @@ SELECT con.conname ALTER TABLE AT_AddConstNoName.verylonglonglonglonglonglonglonglonglonglonglonglonglonglonglon ADD EXCLUDE (product_no WITH =); -- Constraint should be created on the coordinator with a shortened name SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'very%'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'very%' + AND con.contype = 'x'; conname --------------------------------------------------------------------- verylonglonglonglonglonglonglonglonglonglonglon_product_no_excl @@ -571,10 +606,12 @@ SELECT con.conname -- Constraints for the main table and the shards should be created on the worker with a shortened name \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'very%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'very%' + AND con.contype = 'x' + ORDER BY con.conname ASC; conname --------------------------------------------------------------------- verylonglonglonglonglonglonglonglonglonglonglo_057ed027_5410010 @@ -589,10 +626,11 @@ SELECT con.conname ALTER TABLE AT_AddConstNoName.verylonglonglonglonglonglonglonglonglonglonglonglonglonglonglon DROP CONSTRAINT verylonglonglonglonglonglonglonglonglonglonglon_product_no_excl; \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'very%'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'very%' + AND con.contype = 'x'; conname --------------------------------------------------------------------- (0 rows) @@ -602,10 +640,11 @@ SELECT con.conname ALTER TABLE AT_AddConstNoName.verylonglonglonglonglonglonglonglonglonglonglonglonglonglonglon ADD CHECK (product_no > 0); -- Constraint should be created on the coordinator with a shortened name SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'very%'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'very%' + AND con.contype = 'c'; conname --------------------------------------------------------------------- verylonglonglonglonglonglonglonglonglonglonglonglonglongl_check @@ -614,10 +653,12 @@ SELECT con.conname -- Constraints for the main table and the shards should be created on the worker with a shortened name \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'very%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'very%' + AND con.contype = 'c' + ORDER BY con.conname ASC; conname --------------------------------------------------------------------- verylonglonglonglonglonglonglonglonglonglonglo_d943e063_5410010 @@ -632,10 +673,11 @@ SELECT con.conname ALTER TABLE AT_AddConstNoName.verylonglonglonglonglonglonglonglonglonglonglonglonglonglonglon DROP CONSTRAINT verylonglonglonglonglonglonglonglonglonglonglonglonglongl_check; \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'very%'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'very%' + AND con.contype = 'c'; conname --------------------------------------------------------------------- (0 rows) @@ -663,10 +705,11 @@ DEBUG: verifying table "p1" DEBUG: verifying table "longlonglonglonglonglonglonglonglonglonglonglonglonglonglongabc" RESET client_min_messages; SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'dist_partitioned_table'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'dist_partitioned_table' + AND con.contype = 'p'; conname --------------------------------------------------------------------- dist_partitioned_table_pkey @@ -674,10 +717,12 @@ SELECT con.conname \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' + AND con.contype = 'p' + ORDER BY con.conname ASC; conname --------------------------------------------------------------------- longlonglonglonglonglonglonglonglonglonglonglo_9e4e3069_5410018 @@ -691,10 +736,12 @@ SELECT con.conname ALTER TABLE AT_AddConstNoName.dist_partitioned_table DROP CONSTRAINT dist_partitioned_table_pkey; \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' + AND con.contype = 'p' + ORDER BY con.conname ASC; conname --------------------------------------------------------------------- (0 rows) @@ -709,10 +756,11 @@ DEBUG: ALTER TABLE / ADD UNIQUE will create implicit index "longlonglonglonglon DEBUG: ALTER TABLE / ADD UNIQUE will create implicit index "p1_partition_col_key" for table "p1" RESET client_min_messages; SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'dist_partitioned_table'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'dist_partitioned_table' + AND con.contype = 'u'; conname --------------------------------------------------------------------- dist_partitioned_table_partition_col_key @@ -720,10 +768,12 @@ SELECT con.conname \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' + AND con.contype = 'u' + ORDER BY con.conname ASC; conname --------------------------------------------------------------------- longlonglonglonglonglonglonglonglonglonglongl__d794d9f1_5410018 @@ -734,7 +784,7 @@ SELECT con.conname (5 rows) \c - - :master_host :master_port -ALTER TABLE AT_AddConstNoName.dist_partitioned_table DROP CONSTRAINT dist_partitioned_table_partition_col_key; +ALTER TABLE AT_AddConstNoName.dist_partitioned_table DROP CONSTRAINT dist_partitioned_table_partition_col_key; -- Check "ADD CHECK" SET client_min_messages TO DEBUG1; ALTER TABLE AT_AddConstNoName.dist_partitioned_table ADD CHECK(dist_col >= another_col); @@ -743,10 +793,11 @@ DEBUG: verifying table "p1" DEBUG: verifying table "longlonglonglonglonglonglonglonglonglonglonglonglonglonglongabc" RESET client_min_messages; SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'dist_partitioned_table'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'dist_partitioned_table' + AND con.contype = 'c'; conname --------------------------------------------------------------------- dist_partitioned_table_check @@ -754,10 +805,12 @@ SELECT con.conname \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' + AND con.contype = 'c' + ORDER BY con.conname ASC; conname --------------------------------------------------------------------- dist_partitioned_table_check @@ -771,10 +824,12 @@ SELECT con.conname ALTER TABLE AT_AddConstNoName.dist_partitioned_table DROP CONSTRAINT dist_partitioned_table_check; \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' + AND con.contype = 'c' + ORDER BY con.conname ASC; conname --------------------------------------------------------------------- (0 rows) @@ -867,10 +922,12 @@ SELECT citus_add_local_table_to_metadata('AT_AddConstNoName.citus_local_table'); ALTER TABLE AT_AddConstNoName.citus_local_table ADD PRIMARY KEY(id); -- Check the primary key is created for the local table and its shard SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'citus_local_table%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'citus_local_table%' + AND con.contype = 'p' + ORDER BY con.conname ASC; conname --------------------------------------------------------------------- citus_local_table_pkey @@ -884,10 +941,12 @@ SELECT create_distributed_table('AT_AddConstNoName.citus_local_table','id'); (1 row) SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'citus_local_table%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'citus_local_table%' + AND con.contype = 'p' + ORDER BY con.conname ASC; conname --------------------------------------------------------------------- citus_local_table_pkey @@ -895,10 +954,12 @@ SELECT con.conname \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'citus_local_table%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'citus_local_table%' + AND con.contype = 'p' + ORDER BY con.conname ASC; conname --------------------------------------------------------------------- citus_local_table_pkey @@ -915,10 +976,12 @@ ALTER TABLE AT_AddConstNoName.citus_local_table DROP CONSTRAINT citus_local_tabl ALTER TABLE AT_AddConstNoName.citus_local_table ADD UNIQUE(id); -- Check the UNIQUE constraint is created for the local table and its shard SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'citus_local_table%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'citus_local_table%' + AND con.contype = 'u' + ORDER BY con.conname ASC; conname --------------------------------------------------------------------- citus_local_table_id_key @@ -926,10 +989,12 @@ SELECT con.conname \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'citus_local_table%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'citus_local_table%' + AND con.contype = 'u' + ORDER BY con.conname ASC; conname --------------------------------------------------------------------- citus_local_table_id_key @@ -946,20 +1011,24 @@ ALTER TABLE AT_AddConstNoName.citus_local_table DROP CONSTRAINT citus_local_tabl ALTER TABLE AT_AddConstNoName.citus_local_table ADD EXCLUDE(id WITH =); -- Check the EXCLUDE constraint is created for the local table and its shard SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'citus_local_table%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'citus_local_table%' + AND con.contype = 'x' + ORDER BY con.conname ASC; conname --------------------------------------------------------------------- citus_local_table_id_excl (1 row) SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'citus_local_table%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'citus_local_table%' + AND con.contype = 'x' + ORDER BY con.conname ASC; conname --------------------------------------------------------------------- citus_local_table_id_excl @@ -967,10 +1036,12 @@ SELECT con.conname \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'citus_local_table%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'citus_local_table%' + AND con.contype = 'x' + ORDER BY con.conname ASC; conname --------------------------------------------------------------------- citus_local_table_id_excl @@ -987,20 +1058,24 @@ ALTER TABLE AT_AddConstNoName.citus_local_table DROP CONSTRAINT citus_local_tabl ALTER TABLE AT_AddConstNoName.citus_local_table ADD CHECK(id > 100); -- Check the CHECK constraint is created for the local table and its shard SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'citus_local_table%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'citus_local_table%' + AND con.contype = 'c' + ORDER BY con.conname ASC; conname --------------------------------------------------------------------- citus_local_table_check (1 row) SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'citus_local_table%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'citus_local_table%' + AND con.contype = 'c' + ORDER BY con.conname ASC; conname --------------------------------------------------------------------- citus_local_table_check @@ -1008,10 +1083,12 @@ SELECT con.conname \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'citus_local_table%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'citus_local_table%' + AND con.contype = 'c' + ORDER BY con.conname ASC; conname --------------------------------------------------------------------- citus_local_table_check @@ -1051,10 +1128,11 @@ DEBUG: ALTER TABLE / ADD PRIMARY KEY will create implicit index "longlonglonglo DEBUG: ALTER TABLE / ADD PRIMARY KEY will create implicit index "p1_pkey" for table "p1" RESET client_min_messages; SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'citus_local_partitioned_table'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'citus_local_partitioned_table' + AND con.contype = 'p'; conname --------------------------------------------------------------------- citus_local_partitioned_table_pkey @@ -1062,10 +1140,12 @@ SELECT con.conname \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' + AND con.contype = 'p' + ORDER BY con.conname ASC; conname --------------------------------------------------------------------- longlonglonglonglonglonglonglonglonglonglonglo_9e4e3069_5410038 @@ -1088,10 +1168,11 @@ DEBUG: ALTER TABLE / ADD UNIQUE will create implicit index "longlonglonglonglon DEBUG: ALTER TABLE / ADD UNIQUE will create implicit index "p1_partition_col_key" for table "p1" RESET client_min_messages; SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'citus_local_partitioned_table'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'citus_local_partitioned_table' + AND con.contype = 'u'; conname --------------------------------------------------------------------- citus_local_partitioned_table_partition_col_key @@ -1099,10 +1180,12 @@ SELECT con.conname \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' + AND con.contype = 'u' + ORDER BY con.conname ASC; conname --------------------------------------------------------------------- longlonglonglonglonglonglonglonglonglonglongl__d794d9f1_5410038 @@ -1122,10 +1205,11 @@ DEBUG: verifying table "longlonglonglonglonglonglonglonglonglonglonglonglonglon DEBUG: verifying table "p1" RESET client_min_messages; SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'citus_local_partitioned_table'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'citus_local_partitioned_table' + AND con.contype = 'c'; conname --------------------------------------------------------------------- citus_local_partitioned_table_check @@ -1133,10 +1217,12 @@ SELECT con.conname \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' + AND con.contype = 'c' + ORDER BY con.conname ASC; conname --------------------------------------------------------------------- citus_local_partitioned_table_check @@ -1150,10 +1236,12 @@ SELECT con.conname ALTER TABLE AT_AddConstNoName.citus_local_partitioned_table DROP CONSTRAINT citus_local_partitioned_table_check; \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' + AND con.contype = 'c' + ORDER BY con.conname ASC; conname --------------------------------------------------------------------- (0 rows) @@ -1168,12 +1256,13 @@ SELECT create_distributed_table('AT_AddConstNoName."2nd table"','2nd id'); (1 row) -- Check "ADD PRIMARY KEY" -ALTER TABLE AT_AddConstNoName."2nd table" ADD PRIMARY KEY ("2nd id", "3rd id"); +ALTER TABLE AT_AddConstNoName."2nd table" ADD PRIMARY KEY ("2nd id", "3rd id"); SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = '2nd table'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = '2nd table' + AND con.contype = 'p'; conname --------------------------------------------------------------------- 2nd table_pkey @@ -1182,10 +1271,12 @@ SELECT con.conname -- Check if a primary key constraint is created for the shard tables on the workers \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE '2nd table%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE '2nd table%' + AND con.contype = 'p' + ORDER BY con.conname ASC; conname --------------------------------------------------------------------- 2nd table_pkey @@ -1201,10 +1292,11 @@ ALTER TABLE AT_AddConstNoName."2nd table" DROP CONSTRAINT "2nd table_pkey"; \c - - :master_host :master_port ALTER TABLE AT_AddConstNoName."2nd table" ADD UNIQUE ("2nd id", "3rd id"); SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = '2nd table'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = '2nd table' + AND con.contype = 'u'; conname --------------------------------------------------------------------- 2nd table_2nd id_3rd id_key @@ -1212,10 +1304,12 @@ SELECT con.conname \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE '2nd table%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE '2nd table%' + AND con.contype = 'u' + ORDER BY con.conname ASC; conname --------------------------------------------------------------------- 2nd table_2nd id_3rd id_key @@ -1231,10 +1325,11 @@ ALTER TABLE AT_AddConstNoName."2nd table" DROP CONSTRAINT "2nd table_2nd id_3rd \c - - :master_host :master_port ALTER TABLE AT_AddConstNoName."2nd table" ADD EXCLUDE ("2nd id" WITH =); SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = '2nd table'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = '2nd table' + AND con.contype = 'x'; conname --------------------------------------------------------------------- 2nd table_2nd id_excl @@ -1242,10 +1337,12 @@ SELECT con.conname \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE '2nd table%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE '2nd table%' + AND con.contype = 'x' + ORDER BY con.conname ASC; conname --------------------------------------------------------------------- 2nd table_2nd id_excl @@ -1261,10 +1358,11 @@ ALTER TABLE AT_AddConstNoName."2nd table" DROP CONSTRAINT "2nd table_2nd id_exc \c - - :master_host :master_port ALTER TABLE AT_AddConstNoName."2nd table" ADD CHECK ("2nd id" > 0 ); SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = '2nd table'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = '2nd table' + AND con.contype = 'c'; conname --------------------------------------------------------------------- 2nd table_check @@ -1272,10 +1370,12 @@ SELECT con.conname \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE '2nd table%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE '2nd table%' + AND con.contype = 'c' + ORDER BY con.conname ASC; conname --------------------------------------------------------------------- 2nd table_check diff --git a/src/test/regress/sql/multi_alter_table_add_constraints_without_name.sql b/src/test/regress/sql/multi_alter_table_add_constraints_without_name.sql index 206decaa7..351bbfeb8 100644 --- a/src/test/regress/sql/multi_alter_table_add_constraints_without_name.sql +++ b/src/test/regress/sql/multi_alter_table_add_constraints_without_name.sql @@ -20,41 +20,46 @@ SELECT create_distributed_table('AT_AddConstNoName.products', 'product_no'); ALTER TABLE AT_AddConstNoName.products ADD PRIMARY KEY(product_no); SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products' + AND con.contype = 'p'; -- Check that the primary key name created on the coordinator is sent to workers and -- the constraints created for the shard tables conform to the _shardid naming scheme. \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products_5410000'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products_5410000' + AND con.contype = 'p'; \c - - :master_host :master_port ALTER TABLE AT_AddConstNoName.products DROP CONSTRAINT products_pkey; -- Check "ADD PRIMARY KEY USING INDEX ..." -CREATE TABLE AT_AddConstNoName.tbl(col1 int, col2 int); +CREATE TABLE AT_AddConstNoName.tbl(col1 int, col2 int); SELECT create_distributed_table('AT_AddConstNoName.tbl', 'col1'); CREATE UNIQUE INDEX my_index ON AT_AddConstNoName.tbl(col1); ALTER TABLE AT_AddConstNoName.tbl ADD PRIMARY KEY USING INDEX my_index; SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'tbl'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'tbl' + AND con.contype = 'p'; \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'tbl%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'tbl%' + AND con.contype = 'p' + ORDER BY con.conname ASC; \c - - :master_host :master_port ALTER TABLE AT_AddConstNoName.tbl DROP CONSTRAINT my_index; @@ -64,17 +69,20 @@ CREATE UNIQUE INDEX my_index ON AT_AddConstNoName.tbl(col1); ALTER TABLE AT_AddConstNoName.tbl ADD UNIQUE USING INDEX my_index; SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'tbl'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'tbl' + AND con.contype = 'u'; \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'tbl%'ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'tbl%' + AND con.contype = 'u' + ORDER BY con.conname ASC; \c - - :master_host :master_port ALTER TABLE AT_AddConstNoName.tbl DROP CONSTRAINT my_index; @@ -102,19 +110,21 @@ ALTER TABLE AT_AddConstNoName.products DROP CONSTRAINT products_pkey; ALTER TABLE AT_AddConstNoName.products ADD UNIQUE(product_no); SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products' + AND con.contype = 'u'; -- Check that UNIQUE constraint name created on the coordinator is sent to workers and -- the constraints created for the shard tables conform to the _shardid scheme. \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products_5410000'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products_5410000' + AND con.contype = 'u'; \c - - :master_host :master_port ALTER TABLE AT_AddConstNoName.products DROP CONSTRAINT products_product_no_key; @@ -122,17 +132,19 @@ ALTER TABLE AT_AddConstNoName.products DROP CONSTRAINT products_product_no_key; -- Check "ADD UNIQUE" with column name list ALTER TABLE AT_AddConstNoName.products ADD UNIQUE(product_no,name); SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products' + AND con.contype = 'u'; \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products_5410000'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products_5410000' + AND con.contype = 'u'; \c - - :master_host :master_port ALTER TABLE AT_AddConstNoName.products DROP CONSTRAINT products_product_no_name_key; @@ -141,17 +153,19 @@ ALTER TABLE AT_AddConstNoName.products DROP CONSTRAINT products_product_no_name_ ALTER TABLE AT_AddConstNoName.products ADD UNIQUE(product_no) INCLUDE(price); SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products' + AND con.contype = 'u'; \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products_5410000'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products_5410000' + AND con.contype = 'u'; \c - - :master_host :master_port ALTER TABLE AT_AddConstNoName.products DROP CONSTRAINT products_product_no_key; @@ -186,17 +200,19 @@ CREATE EXTENSION btree_gist; ALTER TABLE AT_AddConstNoName.products ADD EXCLUDE USING gist (name WITH <> , product_no WITH =); SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products' + AND con.contype = 'x'; \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products_5410000'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products_5410000' + AND con.contype = 'x'; \c - - :master_host :master_port ALTER TABLE AT_AddConstNoName.products DROP CONSTRAINT products_name_product_no_excl; @@ -223,17 +239,19 @@ ALTER TABLE AT_AddConstNoName.products DROP CONSTRAINT products_name_product_no_ -- Check "ADD CHECK" ALTER TABLE AT_AddConstNoName.products ADD CHECK (product_no > 0 AND price > 0); SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products' + AND con.contype = 'c'; \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products_5410000'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products_5410000' + AND con.contype = 'c'; \c - - :master_host :master_port ALTER TABLE AT_AddConstNoName.products DROP CONSTRAINT products_check; @@ -242,17 +260,19 @@ ALTER TABLE AT_AddConstNoName.products DROP CONSTRAINT products_check; ALTER TABLE AT_AddConstNoName.products ADD CHECK (product_no > 0 AND price > 0) NO INHERIT; SELECT con.conname, con.connoinherit - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products' + AND con.contype = 'c'; \c - - :public_worker_1_host :worker_1_port SELECT con.conname, con.connoinherit - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products_5410000'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products_5410000' + AND con.contype = 'c'; \c - - :master_host :master_port ALTER TABLE AT_AddConstNoName.products DROP CONSTRAINT products_check; @@ -261,17 +281,19 @@ ALTER TABLE AT_AddConstNoName.products DROP CONSTRAINT products_check; ALTER TABLE AT_AddConstNoName.products ADD CHECK (product_no > 0 AND price > 0) NOT VALID; SELECT con.conname, con.convalidated - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products' + AND con.contype = 'c'; \c - - :public_worker_1_host :worker_1_port SELECT con.conname, con.convalidated - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products_5410000'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products_5410000' + AND con.contype = 'c'; \c - - :master_host :master_port ALTER TABLE AT_AddConstNoName.products DROP CONSTRAINT products_check; @@ -306,10 +328,11 @@ ALTER TABLE AT_AddConstNoName.products_ref_2 ADD CONSTRAINT products_ref_pkey1 P ALTER TABLE AT_AddConstNoName.products_ref ADD PRIMARY KEY(name); SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products_ref'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products_ref' + AND con.contype = 'p'; ALTER TABLE AT_AddConstNoName.products_ref DROP CONSTRAINT products_ref_pkey2; @@ -319,10 +342,11 @@ ALTER TABLE AT_AddConstNoName.products_ref_2 ADD CONSTRAINT products_ref_name_ke ALTER TABLE AT_AddConstNoName.products_ref ADD UNIQUE(name); SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products_ref'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products_ref' + AND con.contype = 'u'; ALTER TABLE AT_AddConstNoName.products_ref DROP CONSTRAINT products_ref_name_key2; @@ -332,10 +356,11 @@ ALTER TABLE AT_AddConstNoName.products_ref_2 ADD CONSTRAINT products_ref_product ALTER TABLE AT_AddConstNoName.products_ref ADD EXCLUDE(product_no WITH =); SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products_ref'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products_ref' + AND con.contype = 'x'; ALTER TABLE AT_AddConstNoName.products_ref DROP CONSTRAINT products_ref_product_no_excl2; @@ -345,10 +370,11 @@ ALTER TABLE AT_AddConstNoName.products_ref_2 ADD CONSTRAINT products_ref_check1 ALTER TABLE AT_AddConstNoName.products_ref ADD CHECK (product_no > 0); SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'products_ref'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'products_ref' + AND con.contype = 'c'; ALTER TABLE AT_AddConstNoName.products_ref DROP CONSTRAINT products_ref_check2; @@ -367,18 +393,21 @@ ALTER TABLE AT_AddConstNoName.verylonglonglonglonglonglonglonglonglonglonglonglo -- Constraint should be created on the coordinator with a shortened name SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'very%'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'very%' + AND con.contype = 'p'; -- Constraints for the main table and the shards should be created on the worker with a shortened name \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'very%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'very%' + AND con.contype = 'p' + ORDER BY con.conname ASC; -- Constraint can be deleted via the coordinator \c - - :master_host :master_port @@ -386,10 +415,11 @@ ALTER TABLE AT_AddConstNoName.verylonglonglonglonglonglonglonglonglonglonglonglo \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'very%'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'very%' + AND con.contype = 'p'; -- Check "ADD UNIQUE" with max table name (63 chars) \c - - :master_host :master_port @@ -397,18 +427,21 @@ ALTER TABLE AT_AddConstNoName.verylonglonglonglonglonglonglonglonglonglonglonglo -- Constraint should be created on the coordinator with a shortened name SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'very%'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'very%' + AND con.contype = 'u'; -- Constraints for the main table and the shards should be created on the worker with a shortened name \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'very%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'very%' + AND con.contype = 'u' + ORDER BY con.conname ASC; -- UNIQUE constraint can be deleted via the coordinator \c - - :master_host :master_port @@ -416,10 +449,11 @@ ALTER TABLE AT_AddConstNoName.verylonglonglonglonglonglonglonglonglonglonglonglo \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'very%'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'very%' + AND con.contype = 'u'; -- Check "ADD EXCLUDE" with max table name (63 chars) \c - - :master_host :master_port @@ -427,18 +461,21 @@ ALTER TABLE AT_AddConstNoName.verylonglonglonglonglonglonglonglonglonglonglonglo -- Constraint should be created on the coordinator with a shortened name SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'very%'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'very%' + AND con.contype = 'x'; -- Constraints for the main table and the shards should be created on the worker with a shortened name \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'very%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'very%' + AND con.contype = 'x' + ORDER BY con.conname ASC; -- EXCLUDE constraint can be deleted via the coordinator \c - - :master_host :master_port @@ -446,28 +483,32 @@ ALTER TABLE AT_AddConstNoName.verylonglonglonglonglonglonglonglonglonglonglonglo \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'very%'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'very%' + AND con.contype = 'x'; -- Check "ADD CHECK" with max table name (63 chars) \c - - :master_host :master_port ALTER TABLE AT_AddConstNoName.verylonglonglonglonglonglonglonglonglonglonglonglonglonglonglon ADD CHECK (product_no > 0); -- Constraint should be created on the coordinator with a shortened name SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'very%'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'very%' + AND con.contype = 'c'; -- Constraints for the main table and the shards should be created on the worker with a shortened name \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'very%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'very%' + AND con.contype = 'c' + ORDER BY con.conname ASC; -- CHECK constraint can be deleted via the coordinator \c - - :master_host :master_port @@ -475,10 +516,11 @@ ALTER TABLE AT_AddConstNoName.verylonglonglonglonglonglonglonglonglonglonglonglo \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'very%'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'very%' + AND con.contype = 'c'; -- Test the scenario where a partitioned distributed table has a child with max allowed name -- Verify that we switch to sequential execution mode to avoid deadlock in this scenario @@ -494,27 +536,32 @@ ALTER TABLE AT_AddConstNoName.dist_partitioned_table ADD PRIMARY KEY(partition_c RESET client_min_messages; SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'dist_partitioned_table'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'dist_partitioned_table' + AND con.contype = 'p'; \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' + AND con.contype = 'p' + ORDER BY con.conname ASC; \c - - :master_host :master_port ALTER TABLE AT_AddConstNoName.dist_partitioned_table DROP CONSTRAINT dist_partitioned_table_pkey; \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' + AND con.contype = 'p' + ORDER BY con.conname ASC; -- Check "ADD UNIQUE" \c - - :master_host :master_port @@ -523,20 +570,23 @@ ALTER TABLE AT_AddConstNoName.dist_partitioned_table ADD UNIQUE(partition_col); RESET client_min_messages; SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'dist_partitioned_table'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'dist_partitioned_table' + AND con.contype = 'u'; \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' + AND con.contype = 'u' + ORDER BY con.conname ASC; \c - - :master_host :master_port -ALTER TABLE AT_AddConstNoName.dist_partitioned_table DROP CONSTRAINT dist_partitioned_table_partition_col_key; +ALTER TABLE AT_AddConstNoName.dist_partitioned_table DROP CONSTRAINT dist_partitioned_table_partition_col_key; -- Check "ADD CHECK" SET client_min_messages TO DEBUG1; @@ -544,27 +594,32 @@ ALTER TABLE AT_AddConstNoName.dist_partitioned_table ADD CHECK(dist_col >= anoth RESET client_min_messages; SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'dist_partitioned_table'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'dist_partitioned_table' + AND con.contype = 'c'; \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' + AND con.contype = 'c' + ORDER BY con.conname ASC; \c - - :master_host :master_port ALTER TABLE AT_AddConstNoName.dist_partitioned_table DROP CONSTRAINT dist_partitioned_table_check; \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' + AND con.contype = 'c' + ORDER BY con.conname ASC; -- Test we error out when creating a constraint on a partition table with a long name if we cannot -- switch to sequential execution @@ -621,25 +676,31 @@ ALTER TABLE AT_AddConstNoName.citus_local_table ADD PRIMARY KEY(id); -- Check the primary key is created for the local table and its shard SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'citus_local_table%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'citus_local_table%' + AND con.contype = 'p' + ORDER BY con.conname ASC; SELECT create_distributed_table('AT_AddConstNoName.citus_local_table','id'); SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'citus_local_table%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'citus_local_table%' + AND con.contype = 'p' + ORDER BY con.conname ASC; \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'citus_local_table%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'citus_local_table%' + AND con.contype = 'p' + ORDER BY con.conname ASC; \c - - :master_host :master_port ALTER TABLE AT_AddConstNoName.citus_local_table DROP CONSTRAINT citus_local_table_pkey; @@ -650,17 +711,21 @@ ALTER TABLE AT_AddConstNoName.citus_local_table ADD UNIQUE(id); -- Check the UNIQUE constraint is created for the local table and its shard SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'citus_local_table%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'citus_local_table%' + AND con.contype = 'u' + ORDER BY con.conname ASC; \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'citus_local_table%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'citus_local_table%' + AND con.contype = 'u' + ORDER BY con.conname ASC; \c - - :master_host :master_port ALTER TABLE AT_AddConstNoName.citus_local_table DROP CONSTRAINT citus_local_table_id_key; @@ -671,23 +736,29 @@ ALTER TABLE AT_AddConstNoName.citus_local_table ADD EXCLUDE(id WITH =); -- Check the EXCLUDE constraint is created for the local table and its shard SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'citus_local_table%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'citus_local_table%' + AND con.contype = 'x' + ORDER BY con.conname ASC; SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'citus_local_table%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'citus_local_table%' + AND con.contype = 'x' + ORDER BY con.conname ASC; \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'citus_local_table%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'citus_local_table%' + AND con.contype = 'x' + ORDER BY con.conname ASC; \c - - :master_host :master_port ALTER TABLE AT_AddConstNoName.citus_local_table DROP CONSTRAINT citus_local_table_id_excl; @@ -698,23 +769,29 @@ ALTER TABLE AT_AddConstNoName.citus_local_table ADD CHECK(id > 100); -- Check the CHECK constraint is created for the local table and its shard SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'citus_local_table%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'citus_local_table%' + AND con.contype = 'c' + ORDER BY con.conname ASC; SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'citus_local_table%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'citus_local_table%' + AND con.contype = 'c' + ORDER BY con.conname ASC; \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'citus_local_table%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'citus_local_table%' + AND con.contype = 'c' + ORDER BY con.conname ASC; \c - - :master_host :master_port ALTER TABLE AT_AddConstNoName.citus_local_table DROP CONSTRAINT citus_local_table_check; @@ -739,17 +816,20 @@ ALTER TABLE AT_AddConstNoName.citus_local_partitioned_table ADD PRIMARY KEY(part RESET client_min_messages; SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'citus_local_partitioned_table'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'citus_local_partitioned_table' + AND con.contype = 'p'; \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' + AND con.contype = 'p' + ORDER BY con.conname ASC; \c - - :master_host :master_port ALTER TABLE AT_AddConstNoName.citus_local_partitioned_table DROP CONSTRAINT citus_local_partitioned_table_pkey; @@ -764,17 +844,20 @@ ALTER TABLE AT_AddConstNoName.citus_local_partitioned_table ADD UNIQUE(partition RESET client_min_messages; SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'citus_local_partitioned_table'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'citus_local_partitioned_table' + AND con.contype = 'u'; \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' + AND con.contype = 'u' + ORDER BY con.conname ASC; \c - - :master_host :master_port ALTER TABLE AT_AddConstNoName.citus_local_partitioned_table DROP CONSTRAINT citus_local_partitioned_table_partition_col_key; @@ -785,27 +868,32 @@ ALTER TABLE AT_AddConstNoName.citus_local_partitioned_table ADD CHECK (dist_col RESET client_min_messages; SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = 'citus_local_partitioned_table'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = 'citus_local_partitioned_table' + AND con.contype = 'c'; \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' + AND con.contype = 'c' + ORDER BY con.conname ASC; \c - - :master_host :master_port ALTER TABLE AT_AddConstNoName.citus_local_partitioned_table DROP CONSTRAINT citus_local_partitioned_table_check; \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE 'longlonglonglonglonglonglonglonglong%' + AND con.contype = 'c' + ORDER BY con.conname ASC; \c - - :master_host :master_port -- Test with unusual table and column names @@ -813,20 +901,23 @@ CREATE TABLE AT_AddConstNoName."2nd table" ( "2nd id" INTEGER, "3rd id" INTEGER) SELECT create_distributed_table('AT_AddConstNoName."2nd table"','2nd id'); -- Check "ADD PRIMARY KEY" -ALTER TABLE AT_AddConstNoName."2nd table" ADD PRIMARY KEY ("2nd id", "3rd id"); +ALTER TABLE AT_AddConstNoName."2nd table" ADD PRIMARY KEY ("2nd id", "3rd id"); SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = '2nd table'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = '2nd table' + AND con.contype = 'p'; -- Check if a primary key constraint is created for the shard tables on the workers \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE '2nd table%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE '2nd table%' + AND con.contype = 'p' + ORDER BY con.conname ASC; \c - - :master_host :master_port ALTER TABLE AT_AddConstNoName."2nd table" DROP CONSTRAINT "2nd table_pkey"; @@ -836,17 +927,20 @@ ALTER TABLE AT_AddConstNoName."2nd table" DROP CONSTRAINT "2nd table_pkey"; ALTER TABLE AT_AddConstNoName."2nd table" ADD UNIQUE ("2nd id", "3rd id"); SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = '2nd table'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = '2nd table' + AND con.contype = 'u'; \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE '2nd table%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE '2nd table%' + AND con.contype = 'u' + ORDER BY con.conname ASC; \c - - :master_host :master_port ALTER TABLE AT_AddConstNoName."2nd table" DROP CONSTRAINT "2nd table_2nd id_3rd id_key"; @@ -856,17 +950,20 @@ ALTER TABLE AT_AddConstNoName."2nd table" DROP CONSTRAINT "2nd table_2nd id_3rd ALTER TABLE AT_AddConstNoName."2nd table" ADD EXCLUDE ("2nd id" WITH =); SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = '2nd table'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = '2nd table' + AND con.contype = 'x'; \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE '2nd table%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE '2nd table%' + AND con.contype = 'x' + ORDER BY con.conname ASC; \c - - :master_host :master_port ALTER TABLE AT_AddConstNoName."2nd table" DROP CONSTRAINT "2nd table_2nd id_excl"; @@ -876,17 +973,20 @@ ALTER TABLE AT_AddConstNoName."2nd table" DROP CONSTRAINT "2nd table_2nd id_exc ALTER TABLE AT_AddConstNoName."2nd table" ADD CHECK ("2nd id" > 0 ); SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname = '2nd table'; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname = '2nd table' + AND con.contype = 'c'; \c - - :public_worker_1_host :worker_1_port SELECT con.conname - FROM pg_catalog.pg_constraint con - INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid - INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace - WHERE rel.relname LIKE '2nd table%' ORDER BY con.conname ASC; + FROM pg_catalog.pg_constraint con + INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid + INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = rel.relnamespace + WHERE rel.relname LIKE '2nd table%' + AND con.contype = 'c' + ORDER BY con.conname ASC; \c - - :master_host :master_port ALTER TABLE AT_AddConstNoName."2nd table" DROP CONSTRAINT "2nd table_check"; From 360fbe3b997ccdf1c353596264f1344972700701 Mon Sep 17 00:00:00 2001 From: eaydingol <60466783+eaydingol@users.noreply.github.com> Date: Wed, 17 Sep 2025 17:01:45 +0300 Subject: [PATCH 03/14] Technical document update for outer join pushdown (#8200) Outer join pushdown entry and an example. --- src/backend/distributed/README.md | 46 ++++++++++++++++++++++++++++--- 1 file changed, 42 insertions(+), 4 deletions(-) diff --git a/src/backend/distributed/README.md b/src/backend/distributed/README.md index 17d5321c3..a0e9c980d 100644 --- a/src/backend/distributed/README.md +++ b/src/backend/distributed/README.md @@ -797,14 +797,13 @@ WHERE l.user_id = o.user_id AND o.primary_key = 55; -### Ref table LEFT JOIN distributed table JOINs via recursive planning +### Outer joins between reference and distributed tables -Very much like local-distributed table joins, Citus can't push down queries formatted as: +In general, when the outer side of an outer join is a recurring tuple (e.g., reference table, intermediate results, or set returning functions), it is not safe to push down the join. ```sql "... ref_table LEFT JOIN distributed_table ..." +"... distributed_table RIGHT JOIN ref_table ..." ``` -This is the case when the outer side is a recurring tuple (e.g., reference table, intermediate results, or set returning functions). - In these situations, Citus recursively plans the "distributed" part of the join. Even though it may seem excessive to recursively plan a distributed table, remember that Citus pushes down the filters and projections. Functions involved here include `RequiredAttrNumbersForRelation()` and `ReplaceRTERelationWithRteSubquery()`. The core function handling this logic is `RecursivelyPlanRecurringTupleOuterJoinWalker()`. There are likely numerous optimizations possible (e.g., first pushing down an inner JOIN then an outer join), but these have not been implemented due to their complexity. @@ -828,6 +827,45 @@ DEBUG: Wrapping relation "orders_table" "o" to a subquery DEBUG: generating subplan 45_1 for subquery SELECT order_id, status FROM public.orders_table o WHERE true ``` +As of Citus 13.2, under certain conditions, Citus can push down these types of LEFT and RIGHT outer joins by injecting constraints—derived from the shard intervals of distributed tables—into shard queries for the reference table. The eligibility rules for pushdown are defined in `CanPushdownRecurringOuterJoin()`, while the logic for computing and injecting the constraints is implemented in `UpdateWhereClauseToPushdownRecurringOuterJoin()`. + +#### Example Query + +In the example below, Citus pushes down the query by injecting interval constraints on the reference table. The injected constraints are visible in the EXPLAIN output. + +```sql +SELECT pc.category_name, count(pt.product_id) +FROM product_categories pc +LEFT JOIN products_table pt ON pc.category_id = pt.product_id +GROUP BY pc.category_name; +``` + +#### Debug Messages +``` +DEBUG: Router planner cannot handle multi-shard select queries +DEBUG: a push down safe left join with recurring left side +``` + +#### Explain Output +``` +HashAggregate + Group Key: remote_scan.category_name + -> Custom Scan (Citus Adaptive) + Task Count: 32 + Tasks Shown: One of 32 + -> Task + Node: host=localhost port=9701 dbname=ebru + -> HashAggregate + Group Key: pc.category_name + -> Hash Right Join + Hash Cond: (pt.product_id = pc.category_id) + -> Seq Scan on products_table_102072 pt + -> Hash + -> Seq Scan on product_categories_102106 pc + Filter: ((category_id IS NULL) OR ((btint4cmp('-2147483648'::integer, hashint8((category_id)::bigint)) < 0) AND (btint4cmp(hashint8((category_id::bigint), '-2013265921'::integer) <= 0))) +``` + + ### Recursive Planning When FROM Clause has Reference Table (or Recurring Tuples) This section discusses a specific scenario in Citus's recursive query planning: handling queries where the main query's `FROM` clause is recurring, but there are subqueries in the `SELECT` or `WHERE` clauses involving distributed tables. From becc02b398858e3226f1360897c149a0a9ad118c Mon Sep 17 00:00:00 2001 From: Naisila Puka <37271756+naisila@users.noreply.github.com> Date: Fri, 19 Sep 2025 12:01:29 +0300 Subject: [PATCH 04/14] Cleanup from dropping pg14 in merge isolation tests (#8204) These alternative test outputs are redundant since we have dropped PG14 support on main. --- src/test/regress/expected/isolation_merge_0.out | 5 ----- .../expected/isolation_merge_replicated_0.out | 5 ----- src/test/regress/spec/isolation_merge.spec | 12 ------------ .../regress/spec/isolation_merge_replicated.spec | 12 ------------ 4 files changed, 34 deletions(-) delete mode 100644 src/test/regress/expected/isolation_merge_0.out delete mode 100644 src/test/regress/expected/isolation_merge_replicated_0.out diff --git a/src/test/regress/expected/isolation_merge_0.out b/src/test/regress/expected/isolation_merge_0.out deleted file mode 100644 index 3b43a25e6..000000000 --- a/src/test/regress/expected/isolation_merge_0.out +++ /dev/null @@ -1,5 +0,0 @@ -Parsed test spec with 2 sessions - -starting permutation: s1-begin s1-upd-ins s2-result s1-commit s2-result -setup failed: ERROR: MERGE is not supported on PG versions below 15 -CONTEXT: PL/pgSQL function inline_code_block line XX at RAISE diff --git a/src/test/regress/expected/isolation_merge_replicated_0.out b/src/test/regress/expected/isolation_merge_replicated_0.out deleted file mode 100644 index 51161dfb7..000000000 --- a/src/test/regress/expected/isolation_merge_replicated_0.out +++ /dev/null @@ -1,5 +0,0 @@ -Parsed test spec with 2 sessions - -starting permutation: s1-begin s1-upd-ins s2-begin s2-update s1-commit s2-commit s1-result s2-result -setup failed: ERROR: MERGE is not supported on PG versions below 15 -CONTEXT: PL/pgSQL function inline_code_block line XX at RAISE diff --git a/src/test/regress/spec/isolation_merge.spec b/src/test/regress/spec/isolation_merge.spec index 042ce9155..baa90f469 100644 --- a/src/test/regress/spec/isolation_merge.spec +++ b/src/test/regress/spec/isolation_merge.spec @@ -5,18 +5,6 @@ // create distributed tables to test behavior of MERGE in concurrent operations setup { - DO - $do$ - DECLARE ver int; - BEGIN - SELECT substring(version(), '\d+')::int into ver; - IF (ver < 15) - THEN - RAISE EXCEPTION 'MERGE is not supported on PG versions below 15'; - END IF; - END - $do$; - SET citus.shard_replication_factor TO 1; SELECT 1 FROM master_add_node('localhost', 57637); SELECT 1 FROM master_add_node('localhost', 57638); diff --git a/src/test/regress/spec/isolation_merge_replicated.spec b/src/test/regress/spec/isolation_merge_replicated.spec index a586bdfa1..296b64816 100644 --- a/src/test/regress/spec/isolation_merge_replicated.spec +++ b/src/test/regress/spec/isolation_merge_replicated.spec @@ -5,18 +5,6 @@ // create distributed tables to test behavior of MERGE in concurrent operations setup { - DO - $do$ - DECLARE ver int; - BEGIN - SELECT substring(version(), '\d+')::int into ver; - IF (ver < 15) - THEN - RAISE EXCEPTION 'MERGE is not supported on PG versions below 15'; - END IF; - END - $do$; - SET citus.shard_replication_factor TO 2; SELECT 1 FROM master_add_node('localhost', 57637); SELECT 1 FROM master_add_node('localhost', 57638); From b4cb1a94e920a2a1ad5666c5c4b4db4d40cc4098 Mon Sep 17 00:00:00 2001 From: Naisila Puka <37271756+naisila@users.noreply.github.com> Date: Fri, 19 Sep 2025 12:54:55 +0300 Subject: [PATCH 05/14] Bump citus and citus_columnar to 14.0devel (#8170) --- configure | 18 +++++++-------- configure.ac | 2 +- src/backend/columnar/citus_columnar.control | 2 +- .../sql/citus_columnar--13.2-1--14.0-1.sql | 2 ++ .../citus_columnar--14.0-1--13.2-1.sql | 2 ++ src/backend/distributed/citus.control | 2 +- .../distributed/sql/citus--13.2-1--14.0-1.sql | 2 ++ .../sql/downgrades/citus--14.0-1--13.2-1.sql | 2 ++ src/test/regress/citus_tests/config.py | 2 +- src/test/regress/expected/multi_extension.out | 22 ++++++++++++++++--- src/test/regress/sql/multi_extension.sql | 10 +++++++++ 11 files changed, 50 insertions(+), 16 deletions(-) create mode 100644 src/backend/columnar/sql/citus_columnar--13.2-1--14.0-1.sql create mode 100644 src/backend/columnar/sql/downgrades/citus_columnar--14.0-1--13.2-1.sql create mode 100644 src/backend/distributed/sql/citus--13.2-1--14.0-1.sql create mode 100644 src/backend/distributed/sql/downgrades/citus--14.0-1--13.2-1.sql diff --git a/configure b/configure index 8deed181e..cdaf0e78b 100755 --- a/configure +++ b/configure @@ -1,6 +1,6 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.69 for Citus 13.2devel. +# Generated by GNU Autoconf 2.69 for Citus 14.0devel. # # # Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. @@ -579,8 +579,8 @@ MAKEFLAGS= # Identity of this package. PACKAGE_NAME='Citus' PACKAGE_TARNAME='citus' -PACKAGE_VERSION='13.2devel' -PACKAGE_STRING='Citus 13.2devel' +PACKAGE_VERSION='14.0devel' +PACKAGE_STRING='Citus 14.0devel' PACKAGE_BUGREPORT='' PACKAGE_URL='' @@ -1262,7 +1262,7 @@ if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures Citus 13.2devel to adapt to many kinds of systems. +\`configure' configures Citus 14.0devel to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... @@ -1324,7 +1324,7 @@ fi if test -n "$ac_init_help"; then case $ac_init_help in - short | recursive ) echo "Configuration of Citus 13.2devel:";; + short | recursive ) echo "Configuration of Citus 14.0devel:";; esac cat <<\_ACEOF @@ -1429,7 +1429,7 @@ fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF -Citus configure 13.2devel +Citus configure 14.0devel generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. @@ -1912,7 +1912,7 @@ cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by Citus $as_me 13.2devel, which was +It was created by Citus $as_me 14.0devel, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ @@ -5393,7 +5393,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by Citus $as_me 13.2devel, which was +This file was extended by Citus $as_me 14.0devel, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -5455,7 +5455,7 @@ _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ -Citus config.status 13.2devel +Citus config.status 14.0devel configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" diff --git a/configure.ac b/configure.ac index 137323c1a..c7b5ba1de 100644 --- a/configure.ac +++ b/configure.ac @@ -5,7 +5,7 @@ # everyone needing autoconf installed, the resulting files are checked # into the SCM. -AC_INIT([Citus], [13.2devel]) +AC_INIT([Citus], [14.0devel]) AC_COPYRIGHT([Copyright (c) Citus Data, Inc.]) # we'll need sed and awk for some of the version commands diff --git a/src/backend/columnar/citus_columnar.control b/src/backend/columnar/citus_columnar.control index 9047037a0..872831bd3 100644 --- a/src/backend/columnar/citus_columnar.control +++ b/src/backend/columnar/citus_columnar.control @@ -1,6 +1,6 @@ # Columnar extension comment = 'Citus Columnar extension' -default_version = '13.2-1' +default_version = '14.0-1' module_pathname = '$libdir/citus_columnar' relocatable = false schema = pg_catalog diff --git a/src/backend/columnar/sql/citus_columnar--13.2-1--14.0-1.sql b/src/backend/columnar/sql/citus_columnar--13.2-1--14.0-1.sql new file mode 100644 index 000000000..016c78f6b --- /dev/null +++ b/src/backend/columnar/sql/citus_columnar--13.2-1--14.0-1.sql @@ -0,0 +1,2 @@ +-- citus_columnar--13.2-1--14.0-1 +-- bump version to 14.0-1 diff --git a/src/backend/columnar/sql/downgrades/citus_columnar--14.0-1--13.2-1.sql b/src/backend/columnar/sql/downgrades/citus_columnar--14.0-1--13.2-1.sql new file mode 100644 index 000000000..0504d0048 --- /dev/null +++ b/src/backend/columnar/sql/downgrades/citus_columnar--14.0-1--13.2-1.sql @@ -0,0 +1,2 @@ +-- citus_columnar--14.0-1--13.2-1 +-- downgrade version to 13.2-1 diff --git a/src/backend/distributed/citus.control b/src/backend/distributed/citus.control index 3dfaf3c38..11531abbe 100644 --- a/src/backend/distributed/citus.control +++ b/src/backend/distributed/citus.control @@ -1,6 +1,6 @@ # Citus extension comment = 'Citus distributed database' -default_version = '13.2-1' +default_version = '14.0-1' module_pathname = '$libdir/citus' relocatable = false schema = pg_catalog diff --git a/src/backend/distributed/sql/citus--13.2-1--14.0-1.sql b/src/backend/distributed/sql/citus--13.2-1--14.0-1.sql new file mode 100644 index 000000000..ebf34b7fd --- /dev/null +++ b/src/backend/distributed/sql/citus--13.2-1--14.0-1.sql @@ -0,0 +1,2 @@ +-- citus--13.2-1--14.0-1 +-- bump version to 14.0-1 diff --git a/src/backend/distributed/sql/downgrades/citus--14.0-1--13.2-1.sql b/src/backend/distributed/sql/downgrades/citus--14.0-1--13.2-1.sql new file mode 100644 index 000000000..3ef630c4b --- /dev/null +++ b/src/backend/distributed/sql/downgrades/citus--14.0-1--13.2-1.sql @@ -0,0 +1,2 @@ +-- citus--14.0-1--13.2-1 +-- downgrade version to 13.2-1 diff --git a/src/test/regress/citus_tests/config.py b/src/test/regress/citus_tests/config.py index 2e3375856..bb825deb4 100644 --- a/src/test/regress/citus_tests/config.py +++ b/src/test/regress/citus_tests/config.py @@ -49,7 +49,7 @@ CITUS_ARBITRARY_TEST_DIR = "./tmp_citus_test" MASTER = "master" # This should be updated when citus version changes -MASTER_VERSION = "13.2" +MASTER_VERSION = "14.0" HOME = expanduser("~") diff --git a/src/test/regress/expected/multi_extension.out b/src/test/regress/expected/multi_extension.out index f11f76bc6..d3ed19f21 100644 --- a/src/test/regress/expected/multi_extension.out +++ b/src/test/regress/expected/multi_extension.out @@ -1652,12 +1652,28 @@ SELECT * FROM multi_extension.print_extension_changes(); | view citus_stats (12 rows) +-- Test downgrade to 13.2-1 from 14.0-1 +ALTER EXTENSION citus UPDATE TO '14.0-1'; +ALTER EXTENSION citus UPDATE TO '13.2-1'; +-- Should be empty result since upgrade+downgrade should be a no-op +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Snapshot of state at 14.0-1 +ALTER EXTENSION citus UPDATE TO '14.0-1'; +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff; -- show running version SHOW citus.version; citus.version --------------------------------------------------------------------- - 13.2devel + 14.0devel (1 row) -- ensure no unexpected objects were created outside pg_catalog @@ -1692,7 +1708,7 @@ DROP EXTENSION citus; DROP EXTENSION citus_columnar; CREATE EXTENSION citus VERSION '8.0-1'; ERROR: specified version incompatible with loaded Citus library -DETAIL: Loaded library requires 13.2, but 8.0-1 was specified. +DETAIL: Loaded library requires 14.0, but 8.0-1 was specified. HINT: If a newer library is present, restart the database and try the command again. -- Test non-distributed queries work even in version mismatch SET citus.enable_version_checks TO 'false'; @@ -1737,7 +1753,7 @@ ORDER BY 1; -- We should not distribute table in version mistmatch SELECT create_distributed_table('version_mismatch_table', 'column1'); ERROR: loaded Citus library version differs from installed extension version -DETAIL: Loaded library requires 13.2, but the installed extension version is 8.1-1. +DETAIL: Loaded library requires 14.0, but the installed extension version is 8.1-1. HINT: Run ALTER EXTENSION citus UPDATE and try again. -- This function will cause fail in next ALTER EXTENSION CREATE OR REPLACE FUNCTION pg_catalog.relation_is_a_known_shard(regclass) diff --git a/src/test/regress/sql/multi_extension.sql b/src/test/regress/sql/multi_extension.sql index 8877133f9..0a82075c3 100644 --- a/src/test/regress/sql/multi_extension.sql +++ b/src/test/regress/sql/multi_extension.sql @@ -750,6 +750,16 @@ SELECT * FROM multi_extension.print_extension_changes(); ALTER EXTENSION citus UPDATE TO '13.2-1'; SELECT * FROM multi_extension.print_extension_changes(); +-- Test downgrade to 13.2-1 from 14.0-1 +ALTER EXTENSION citus UPDATE TO '14.0-1'; +ALTER EXTENSION citus UPDATE TO '13.2-1'; +-- Should be empty result since upgrade+downgrade should be a no-op +SELECT * FROM multi_extension.print_extension_changes(); + +-- Snapshot of state at 14.0-1 +ALTER EXTENSION citus UPDATE TO '14.0-1'; +SELECT * FROM multi_extension.print_extension_changes(); + DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff; -- show running version From 10d62d50eaec9561e0968c57ed5e2c24490ccca3 Mon Sep 17 00:00:00 2001 From: Mehmet YILMAZ Date: Mon, 22 Sep 2025 15:50:32 +0300 Subject: [PATCH 06/14] =?UTF-8?q?Stabilize=20table=5Fchecks=20across=20PG1?= =?UTF-8?q?5=E2=80=93PG18:=20switch=20to=20pg=5Fconstraint,=20remove=20dup?= =?UTF-8?q?es,=20exclude=20NOT=20NULL=20(#8140)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit DESCRIPTION: Stabilize table_checks across PG15–PG18: switch to pg_constraint, remove dupes, exclude NOT NUL fixes #8138 fixes #8131 **Problem** ```diff diff -dU10 -w /__w/citus/citus/src/test/regress/expected/multi_create_table_constraints.out /__w/citus/citus/src/test/regress/results/multi_create_table_constraints.out --- /__w/citus/citus/src/test/regress/expected/multi_create_table_constraints.out.modified 2025-08-18 12:26:51.991598284 +0000 +++ /__w/citus/citus/src/test/regress/results/multi_create_table_constraints.out.modified 2025-08-18 12:26:52.004598519 +0000 @@ -403,22 +403,30 @@ relid = 'check_example_partition_col_key_365068'::regclass; Column | Type | Definition ---------------+---------+--------------- partition_col | integer | partition_col (1 row) SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.check_example_365068'::regclass; Constraint | Definition -------------------------------------+----------------------------------- check_example_other_col_check | CHECK other_col >= 100 + check_example_other_col_check | CHECK other_col >= 100 + check_example_other_col_check | CHECK other_col >= 100 + check_example_other_col_check | CHECK other_col >= 100 + check_example_other_col_check | CHECK other_col >= 100 check_example_other_other_col_check | CHECK abs(other_other_col) >= 100 -(2 rows) + check_example_other_other_col_check | CHECK abs(other_other_col) >= 100 + check_example_other_other_col_check | CHECK abs(other_other_col) >= 100 + check_example_other_other_col_check | CHECK abs(other_other_col) >= 100 + check_example_other_other_col_check | CHECK abs(other_other_col) >= 100 +(10 rows) ``` On PostgreSQL 18, `NOT NULL` is represented as a cataloged constraint and surfaces through `information_schema.check_constraints`. https://github.com/postgres/postgres/commit/14e87ffa5c543b5f30ead7413084c25f7735039f Our helper view `table_checks` (built on `information_schema.check_constraints` + `constraint_column_usage`) started returning: * Extra `…_not_null` rows (noise for our tests) * Duplicate rows for real CHECKs due to the one-to-many join via `constraint_column_usage` * Occasional literal formatting differences (e.g., dates) coming from the information\_schema deparser ### What changed 1. **Rewrite `table_checks` to use system catalogs directly** We now select only expression-based, table-level constraints—excluding NOT NULL—by filtering on `contype <> 'n'` and requiring `conbin IS NOT NULL`. This yields the same effective set as real CHECKs while remaining future-proof against non-CHECK constraint types. ```sql CREATE OR REPLACE VIEW table_checks AS SELECT c.conname AS "Constraint", 'CHECK ' || -- drop a single pair of outer parens if the deparser adds them regexp_replace(pg_get_expr(c.conbin, c.conrelid, true), '^\((.*)\)$', '\1') AS "Definition", c.conrelid AS relid FROM pg_catalog.pg_constraint AS c WHERE c.contype <> 'n' -- drop NOT NULL (PG18) AND c.conbin IS NOT NULL -- only expression-bearing constraints (i.e., CHECKs) AND c.conrelid <> 0 -- table-level only (exclude domains) ORDER BY "Constraint", "Definition"; ``` Why this filter? * `contype <> 'n'` excludes PG18’s NOT NULL rows. * `conbin IS NOT NULL` restricts to expression-backed constraints (CHECKs); PK/UNIQUE/FK/EXCLUSION don’t have `conbin`. * `conrelid <> 0` removes domain constraints. 2. **Add a PG18-specific regression test for `contype = 'n'`** New test (`pg18_not_null_constraints`) verifies: * Coordinator tables have `n` rows for NOT NULL (columns `a`, `c`), * A worker shard has matching `n` rows, * Dropping a NOT NULL on the coordinator propagates to shards (count goes from 2 → 1), * `table_checks` *never* reports NOT NULL, but does report a real CHECK added for the test. --- ### Why this works (PG15–PG18) * **Stable source of truth:** Directly reads `pg_constraint` instead of `information_schema`. * **No duplicates:** Eliminates the `constraint_column_usage` join, removing multiplicity. * **No NOT NULL noise:** PG18’s `contype = 'n'` is filtered out by design. * **Deterministic text:** Uses `pg_get_expr` and strips a single outer set of parentheses for consistent output. --- ### Impact on tests * Removes spurious `…_not_null` entries and duplicate `checky_…` rows (e.g., in `multi_name_lengths` and similar). * Existing expected files stabilize without adding brittle normalizations. * New PG18 test asserts correct catalog behavior and Citus propagation while remaining a no-op on earlier PG versions. --- --- .../expected/multi_test_catalog_views.out | 22 ++- src/test/regress/expected/pg18.out | 163 ++++++++++++++++++ src/test/regress/expected/pg18_0.out | 9 + src/test/regress/multi_schedule | 1 + .../regress/sql/multi_test_catalog_views.sql | 22 ++- src/test/regress/sql/pg18.sql | 129 ++++++++++++++ 6 files changed, 328 insertions(+), 18 deletions(-) create mode 100644 src/test/regress/expected/pg18.out create mode 100644 src/test/regress/expected/pg18_0.out create mode 100644 src/test/regress/sql/pg18.sql diff --git a/src/test/regress/expected/multi_test_catalog_views.out b/src/test/regress/expected/multi_test_catalog_views.out index 8c255f94e..65ca8f637 100644 --- a/src/test/regress/expected/multi_test_catalog_views.out +++ b/src/test/regress/expected/multi_test_catalog_views.out @@ -70,15 +70,19 @@ SELECT "name" AS "Column", "relid" FROM table_attrs; -CREATE VIEW table_checks AS -SELECT cc.constraint_name AS "Constraint", - ('CHECK ' || regexp_replace(check_clause, '^\((.*)\)$', '\1')) AS "Definition", - format('%I.%I', ccu.table_schema, ccu.table_name)::regclass::oid AS relid -FROM information_schema.check_constraints cc, - information_schema.constraint_column_usage ccu -WHERE cc.constraint_schema = ccu.constraint_schema AND - cc.constraint_name = ccu.constraint_name -ORDER BY cc.constraint_name ASC; +CREATE OR REPLACE VIEW table_checks AS +SELECT + c.conname AS "Constraint", + 'CHECK ' || + -- drop a single pair of outer parens if the deparser adds them + regexp_replace(pg_get_expr(c.conbin, c.conrelid, true), '^\((.*)\)$', '\1') + AS "Definition", + c.conrelid AS relid +FROM pg_catalog.pg_constraint AS c +WHERE c.contype <> 'n' -- drop NOT NULL + AND c.conbin IS NOT NULL -- only things with an expression (i.e., CHECKs) + AND c.conrelid <> 0 -- table-level (exclude domain checks) +ORDER BY "Constraint", "Definition"; CREATE VIEW index_attrs AS WITH indexoid AS ( diff --git a/src/test/regress/expected/pg18.out b/src/test/regress/expected/pg18.out new file mode 100644 index 000000000..f5d35a47e --- /dev/null +++ b/src/test/regress/expected/pg18.out @@ -0,0 +1,163 @@ +-- +-- PG18 +-- +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int >= 18 AS server_version_ge_18 +\gset +\if :server_version_ge_18 +\else +\q +\endif +-- PG17-specific tests go here. +-- +-- Purpose: Verify PG18 behavior that NOT NULL constraints are materialized +-- as pg_constraint rows with contype = 'n' on both coordinator and +-- worker shards. Also confirm our helper view (table_checks) does +-- NOT surface NOT NULL entries. +-- https://github.com/postgres/postgres/commit/14e87ffa5c543b5f30ead7413084c25f7735039f +CREATE SCHEMA pg18_nn; +SET search_path TO pg18_nn; +-- Local control table +DROP TABLE IF EXISTS nn_local CASCADE; +NOTICE: table "nn_local" does not exist, skipping +CREATE TABLE nn_local( + a int NOT NULL, + b int, + c text NOT NULL +); +-- Distributed table +DROP TABLE IF EXISTS nn_dist CASCADE; +NOTICE: table "nn_dist" does not exist, skipping +CREATE TABLE nn_dist( + a int NOT NULL, + b int, + c text NOT NULL +); +SELECT create_distributed_table('nn_dist', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- Coordinator: count NOT NULL constraint rows +SELECT 'local_n_count' AS label, contype, count(*) +FROM pg_constraint +WHERE conrelid = 'pg18_nn.nn_local'::regclass +GROUP BY contype +ORDER BY contype; + label | contype | count +--------------------------------------------------------------------- + local_n_count | n | 2 +(1 row) + +SELECT 'dist_n_count' AS label, contype, count(*) +FROM pg_constraint +WHERE conrelid = 'pg18_nn.nn_dist'::regclass +GROUP BY contype +ORDER BY contype; + label | contype | count +--------------------------------------------------------------------- + dist_n_count | n | 2 +(1 row) + +-- Our helper view should exclude NOT NULL +SELECT 'table_checks_local_count' AS label, count(*) +FROM public.table_checks +WHERE relid = 'pg18_nn.nn_local'::regclass; + label | count +--------------------------------------------------------------------- + table_checks_local_count | 0 +(1 row) + +SELECT 'table_checks_dist_count' AS label, count(*) +FROM public.table_checks +WHERE relid = 'pg18_nn.nn_dist'::regclass; + label | count +--------------------------------------------------------------------- + table_checks_dist_count | 0 +(1 row) + +-- Add a real CHECK to ensure table_checks still reports real checks +ALTER TABLE nn_dist ADD CONSTRAINT nn_dist_check CHECK (b IS DISTINCT FROM 42); +SELECT 'table_checks_dist_with_real_check' AS label, count(*) +FROM public.table_checks +WHERE relid = 'pg18_nn.nn_dist'::regclass; + label | count +--------------------------------------------------------------------- + table_checks_dist_with_real_check | 1 +(1 row) + +-- === Worker checks === +\c - - - :worker_1_port +SET client_min_messages TO WARNING; +SET search_path TO pg18_nn; +-- Pick one heap shard of nn_dist in our schema +SELECT format('%I.%I', n.nspname, c.relname) AS shard_regclass +FROM pg_class c +JOIN pg_namespace n ON n.oid = c.relnamespace +WHERE n.nspname = 'pg18_nn' + AND c.relname LIKE 'nn_dist_%' + AND c.relkind = 'r' +ORDER BY c.relname +LIMIT 1 +\gset +-- Expect: 2 NOT NULL rows (a,c) + 1 CHECK row on the shard +SELECT 'worker_shard_n_count' AS label, contype, count(*) +FROM pg_constraint +WHERE conrelid = :'shard_regclass'::regclass +GROUP BY contype +ORDER BY contype; + label | contype | count +--------------------------------------------------------------------- + worker_shard_n_count | c | 1 + worker_shard_n_count | n | 2 +(2 rows) + +-- table_checks on shard should hide NOT NULL +SELECT 'table_checks_worker_shard_count' AS label, count(*) +FROM public.table_checks +WHERE relid = :'shard_regclass'::regclass; + label | count +--------------------------------------------------------------------- + table_checks_worker_shard_count | 1 +(1 row) + +-- Drop one NOT NULL on coordinator; verify propagation +\c - - - :master_port +SET search_path TO pg18_nn; +ALTER TABLE nn_dist ALTER COLUMN c DROP NOT NULL; +-- Re-check on worker: NOT NULL count should drop to 1 +\c - - - :worker_1_port +SET search_path TO pg18_nn; +SELECT 'worker_shard_n_after_drop' AS label, contype, count(*) +FROM pg_constraint +WHERE conrelid = :'shard_regclass'::regclass +GROUP BY contype +ORDER BY contype; + label | contype | count +--------------------------------------------------------------------- + worker_shard_n_after_drop | c | 1 + worker_shard_n_after_drop | n | 1 +(2 rows) + +-- And on coordinator +\c - - - :master_port +SET search_path TO pg18_nn; +SELECT 'dist_n_after_drop' AS label, contype, count(*) +FROM pg_constraint +WHERE conrelid = 'pg18_nn.nn_dist'::regclass +GROUP BY contype +ORDER BY contype; + label | contype | count +--------------------------------------------------------------------- + dist_n_after_drop | c | 1 + dist_n_after_drop | n | 1 +(2 rows) + +-- cleanup +RESET client_min_messages; +RESET search_path; +DROP SCHEMA pg18_nn CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to table pg18_nn.nn_local +drop cascades to table pg18_nn.nn_dist diff --git a/src/test/regress/expected/pg18_0.out b/src/test/regress/expected/pg18_0.out new file mode 100644 index 000000000..b682ea190 --- /dev/null +++ b/src/test/regress/expected/pg18_0.out @@ -0,0 +1,9 @@ +-- +-- PG18 +-- +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int >= 18 AS server_version_ge_18 +\gset +\if :server_version_ge_18 +\else +\q diff --git a/src/test/regress/multi_schedule b/src/test/regress/multi_schedule index 0b1d4ce67..98bc01ac5 100644 --- a/src/test/regress/multi_schedule +++ b/src/test/regress/multi_schedule @@ -68,6 +68,7 @@ test: pg14 test: pg15 test: pg15_jsonpath detect_conn_close test: pg17 pg17_json +test: pg18 test: drop_column_partitioned_table test: tableam diff --git a/src/test/regress/sql/multi_test_catalog_views.sql b/src/test/regress/sql/multi_test_catalog_views.sql index bb1442edf..249b2d274 100644 --- a/src/test/regress/sql/multi_test_catalog_views.sql +++ b/src/test/regress/sql/multi_test_catalog_views.sql @@ -71,15 +71,19 @@ SELECT "name" AS "Column", "relid" FROM table_attrs; -CREATE VIEW table_checks AS -SELECT cc.constraint_name AS "Constraint", - ('CHECK ' || regexp_replace(check_clause, '^\((.*)\)$', '\1')) AS "Definition", - format('%I.%I', ccu.table_schema, ccu.table_name)::regclass::oid AS relid -FROM information_schema.check_constraints cc, - information_schema.constraint_column_usage ccu -WHERE cc.constraint_schema = ccu.constraint_schema AND - cc.constraint_name = ccu.constraint_name -ORDER BY cc.constraint_name ASC; +CREATE OR REPLACE VIEW table_checks AS +SELECT + c.conname AS "Constraint", + 'CHECK ' || + -- drop a single pair of outer parens if the deparser adds them + regexp_replace(pg_get_expr(c.conbin, c.conrelid, true), '^\((.*)\)$', '\1') + AS "Definition", + c.conrelid AS relid +FROM pg_catalog.pg_constraint AS c +WHERE c.contype <> 'n' -- drop NOT NULL + AND c.conbin IS NOT NULL -- only things with an expression (i.e., CHECKs) + AND c.conrelid <> 0 -- table-level (exclude domain checks) +ORDER BY "Constraint", "Definition"; CREATE VIEW index_attrs AS WITH indexoid AS ( diff --git a/src/test/regress/sql/pg18.sql b/src/test/regress/sql/pg18.sql new file mode 100644 index 000000000..e18e7455b --- /dev/null +++ b/src/test/regress/sql/pg18.sql @@ -0,0 +1,129 @@ +-- +-- PG18 +-- +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int >= 18 AS server_version_ge_18 +\gset + +\if :server_version_ge_18 +\else +\q +\endif + +-- PG17-specific tests go here. +-- + +-- Purpose: Verify PG18 behavior that NOT NULL constraints are materialized +-- as pg_constraint rows with contype = 'n' on both coordinator and +-- worker shards. Also confirm our helper view (table_checks) does +-- NOT surface NOT NULL entries. +-- https://github.com/postgres/postgres/commit/14e87ffa5c543b5f30ead7413084c25f7735039f + +CREATE SCHEMA pg18_nn; +SET search_path TO pg18_nn; + +-- Local control table +DROP TABLE IF EXISTS nn_local CASCADE; +CREATE TABLE nn_local( + a int NOT NULL, + b int, + c text NOT NULL +); + +-- Distributed table +DROP TABLE IF EXISTS nn_dist CASCADE; +CREATE TABLE nn_dist( + a int NOT NULL, + b int, + c text NOT NULL +); + +SELECT create_distributed_table('nn_dist', 'a'); + +-- Coordinator: count NOT NULL constraint rows +SELECT 'local_n_count' AS label, contype, count(*) +FROM pg_constraint +WHERE conrelid = 'pg18_nn.nn_local'::regclass +GROUP BY contype +ORDER BY contype; + +SELECT 'dist_n_count' AS label, contype, count(*) +FROM pg_constraint +WHERE conrelid = 'pg18_nn.nn_dist'::regclass +GROUP BY contype +ORDER BY contype; + +-- Our helper view should exclude NOT NULL +SELECT 'table_checks_local_count' AS label, count(*) +FROM public.table_checks +WHERE relid = 'pg18_nn.nn_local'::regclass; + +SELECT 'table_checks_dist_count' AS label, count(*) +FROM public.table_checks +WHERE relid = 'pg18_nn.nn_dist'::regclass; + +-- Add a real CHECK to ensure table_checks still reports real checks +ALTER TABLE nn_dist ADD CONSTRAINT nn_dist_check CHECK (b IS DISTINCT FROM 42); + +SELECT 'table_checks_dist_with_real_check' AS label, count(*) +FROM public.table_checks +WHERE relid = 'pg18_nn.nn_dist'::regclass; + +-- === Worker checks === +\c - - - :worker_1_port +SET client_min_messages TO WARNING; +SET search_path TO pg18_nn; + +-- Pick one heap shard of nn_dist in our schema +SELECT format('%I.%I', n.nspname, c.relname) AS shard_regclass +FROM pg_class c +JOIN pg_namespace n ON n.oid = c.relnamespace +WHERE n.nspname = 'pg18_nn' + AND c.relname LIKE 'nn_dist_%' + AND c.relkind = 'r' +ORDER BY c.relname +LIMIT 1 +\gset + +-- Expect: 2 NOT NULL rows (a,c) + 1 CHECK row on the shard +SELECT 'worker_shard_n_count' AS label, contype, count(*) +FROM pg_constraint +WHERE conrelid = :'shard_regclass'::regclass +GROUP BY contype +ORDER BY contype; + +-- table_checks on shard should hide NOT NULL +SELECT 'table_checks_worker_shard_count' AS label, count(*) +FROM public.table_checks +WHERE relid = :'shard_regclass'::regclass; + +-- Drop one NOT NULL on coordinator; verify propagation +\c - - - :master_port +SET search_path TO pg18_nn; + +ALTER TABLE nn_dist ALTER COLUMN c DROP NOT NULL; + +-- Re-check on worker: NOT NULL count should drop to 1 +\c - - - :worker_1_port +SET search_path TO pg18_nn; + +SELECT 'worker_shard_n_after_drop' AS label, contype, count(*) +FROM pg_constraint +WHERE conrelid = :'shard_regclass'::regclass +GROUP BY contype +ORDER BY contype; + +-- And on coordinator +\c - - - :master_port +SET search_path TO pg18_nn; + +SELECT 'dist_n_after_drop' AS label, contype, count(*) +FROM pg_constraint +WHERE conrelid = 'pg18_nn.nn_dist'::regclass +GROUP BY contype +ORDER BY contype; + +-- cleanup +RESET client_min_messages; +RESET search_path; +DROP SCHEMA pg18_nn CASCADE; From d2ea4043d44b9cfc66404ff621b3b6d54071bbb6 Mon Sep 17 00:00:00 2001 From: Colm Date: Thu, 18 Sep 2025 13:54:14 +0100 Subject: [PATCH 07/14] Postgres 18: fix 'column does not exist' errors in grouping regress tests. (#8199) DESCRIPTION: Fix 'column does not exist' errors in grouping regress tests. Postgres 18's GROUP RTE was being ignored by query pushdown planning when constructing the query tree for the worker subquery. The solution is straightforward - ensure the worker subquery tree has the same groupRTE property as the original query. Postgres ruleutils then does the right thing when generating the pushed down query. Fixes category 1 in #7992. --- src/backend/distributed/planner/query_pushdown_planning.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/backend/distributed/planner/query_pushdown_planning.c b/src/backend/distributed/planner/query_pushdown_planning.c index 84f94dbb6..f7232e2bb 100644 --- a/src/backend/distributed/planner/query_pushdown_planning.c +++ b/src/backend/distributed/planner/query_pushdown_planning.c @@ -2029,7 +2029,9 @@ SubqueryPushdownMultiNodeTree(Query *originalQuery) pushedDownQuery->setOperations = copyObject(queryTree->setOperations); pushedDownQuery->querySource = queryTree->querySource; pushedDownQuery->hasSubLinks = queryTree->hasSubLinks; - +#if PG_VERSION_NUM >= PG_VERSION_18 + pushedDownQuery->hasGroupRTE = queryTree->hasGroupRTE; +#endif MultiTable *subqueryNode = MultiSubqueryPushdownTable(pushedDownQuery); SetChild((MultiUnaryNode *) subqueryCollectNode, (MultiNode *) subqueryNode); From b5e70f56ab1044c3629ca0b64a4853f4197c5e4d Mon Sep 17 00:00:00 2001 From: Colm Date: Mon, 22 Sep 2025 09:06:39 +0100 Subject: [PATCH 08/14] Postgres 18: Fix regress tests caused by GROUP RTE. (#8206) The change in `merge_planner.c` fixes _unrecognized range table entry_ diffs in merge regress tests (category 2 diffs in #7992), the change in `multi_router_planner.c` fixes _column reference ... is ambiguous_ diffs in `multi_insert_select` and `multi_insert_select_window` (category 3 diffs in #7992). Edit to `common.py` enables standalone regress tests with pg18 (e..g `citus_tests/run_test.py merge`). --- .../distributed/planner/merge_planner.c | 11 +++++++---- .../planner/multi_router_planner.c | 19 +++++++++++++++++++ 2 files changed, 26 insertions(+), 4 deletions(-) diff --git a/src/backend/distributed/planner/merge_planner.c b/src/backend/distributed/planner/merge_planner.c index b1c441f92..e848141bc 100644 --- a/src/backend/distributed/planner/merge_planner.c +++ b/src/backend/distributed/planner/merge_planner.c @@ -422,10 +422,13 @@ ErrorIfMergeHasUnsupportedTables(Oid targetRelationId, List *rangeTableList) case RTE_VALUES: case RTE_JOIN: case RTE_CTE: - { - /* Skip them as base table(s) will be checked */ - continue; - } +#if PG_VERSION_NUM >= PG_VERSION_18 + case RTE_GROUP: +#endif + { + /* Skip them as base table(s) will be checked */ + continue; + } /* * RTE_NAMEDTUPLESTORE is typically used in ephmeral named relations, diff --git a/src/backend/distributed/planner/multi_router_planner.c b/src/backend/distributed/planner/multi_router_planner.c index 43f79f30b..d67dd6885 100644 --- a/src/backend/distributed/planner/multi_router_planner.c +++ b/src/backend/distributed/planner/multi_router_planner.c @@ -372,6 +372,25 @@ AddPartitionKeyNotNullFilterToSelect(Query *subqery) /* we should have found target partition column */ Assert(targetPartitionColumnVar != NULL); +#if PG_VERSION_NUM >= PG_VERSION_18 + if (subqery->hasGroupRTE) + { + /* if the partition column is a grouped column, we need to flatten it + * to ensure query deparsing works correctly. We choose to do this here + * instead of in ruletils.c because we want to keep the flattening logic + * close to the NOT NULL filter injection. + */ + RangeTblEntry *partitionRTE = rt_fetch(targetPartitionColumnVar->varno, + subqery->rtable); + if (partitionRTE->rtekind == RTE_GROUP) + { + targetPartitionColumnVar = (Var *) flatten_group_exprs(NULL, subqery, + (Node *) + targetPartitionColumnVar); + } + } +#endif + /* create expression for partition_column IS NOT NULL */ NullTest *nullTest = makeNode(NullTest); nullTest->nulltesttype = IS_NOT_NULL; From 83b25e1fb1c635f5c7be0d3b32d6ae27cbb9d3bf Mon Sep 17 00:00:00 2001 From: Onur Tirtir Date: Tue, 23 Sep 2025 14:17:51 +0300 Subject: [PATCH 09/14] Fix unexpected column index error for repartitioned merge (#8201) DESCRIPTION: Fixes a bug that causes an unexpected error when executing repartitioned merge. Fixes #8180. This was happening because of a bug in SourceResultPartitionColumnIndex(). And to fix it, this PR avoids using DistributionColumnIndex() in SourceResultPartitionColumnIndex(). Instead, invents FindTargetListEntryWithVarExprAttno(), which finds the index of the target entry in the source query's target list that can be used to repartition the source for a repartitioned merge. In short, to find the source target entry that refences the Var used in ON (..) clause and that references the source rte, we should check the varattno of the underlying expr, which presumably is always a Var for repartitioned merge as we always wrap the source rte with a subquery, where all target entries point to the columns of the original source relation. Using DistributionColumnIndex() prior to 13.0 wasn't causing such an issue because prior to 13.0, the varattno of the underlying expr of the source target entries was almost (*1) always equal to resno of the target entry as we were including all target entries of the source relation. However, starting with #7659, which is merged to main before 13.0, we started using CreateFilteredTargetListForRelation() instead of CreateAllTargetListForRelation() to compute the target entry list for the source rte to fix another bug. So we cannot revert to using CreateAllTargetListForRelation() because otherwise we would re-introduce bug that it helped fixing, so we instead had to find a way to properly deal with the "filtered target list"s, as in this commit. Plus (*1), even before #7659, probably we would still fail when the source relation has dropped attributes or such because that would probably also cause such a mismatch between the varattno of the underlying expr of the target entry and its resno. --- .../distributed/planner/merge_planner.c | 31 +++- .../regress/expected/merge_repartition2.out | 153 ++++++++++++++++-- src/test/regress/sql/merge_repartition2.sql | 125 +++++++++++++- 3 files changed, 298 insertions(+), 11 deletions(-) diff --git a/src/backend/distributed/planner/merge_planner.c b/src/backend/distributed/planner/merge_planner.c index e848141bc..566ac9c3b 100644 --- a/src/backend/distributed/planner/merge_planner.c +++ b/src/backend/distributed/planner/merge_planner.c @@ -41,6 +41,7 @@ static int SourceResultPartitionColumnIndex(Query *mergeQuery, List *sourceTargetList, CitusTableCacheEntry *targetRelation); +static int FindTargetListEntryWithVarExprAttno(List *targetList, AttrNumber varattno); static Var * ValidateAndReturnVarIfSupported(Node *entryExpr); static DeferredErrorMessage * DeferErrorIfTargetHasFalseClause(Oid targetRelationId, PlannerRestrictionContext * @@ -1414,7 +1415,8 @@ SourceResultPartitionColumnIndex(Query *mergeQuery, List *sourceTargetList, Assert(sourceRepartitionVar); int sourceResultRepartitionColumnIndex = - DistributionColumnIndex(sourceTargetList, sourceRepartitionVar); + FindTargetListEntryWithVarExprAttno(sourceTargetList, + sourceRepartitionVar->varattno); if (sourceResultRepartitionColumnIndex == -1) { @@ -1565,6 +1567,33 @@ FetchAndValidateInsertVarIfExists(Oid targetRelationId, Query *query) } +/* + * FindTargetListEntryWithVarExprAttno finds the index of the target + * entry whose expr is a Var that points to input varattno. + * + * If no such target entry is found, it returns -1. + */ +static int +FindTargetListEntryWithVarExprAttno(List *targetList, AttrNumber varattno) +{ + int targetEntryIndex = 0; + + TargetEntry *targetEntry = NULL; + foreach_declared_ptr(targetEntry, targetList) + { + if (IsA(targetEntry->expr, Var) && + ((Var *) targetEntry->expr)->varattno == varattno) + { + return targetEntryIndex; + } + + targetEntryIndex++; + } + + return -1; +} + + /* * IsLocalTableModification returns true if the table modified is a Postgres table. * We do not support recursive planning for MERGE yet, so we could have a join diff --git a/src/test/regress/expected/merge_repartition2.out b/src/test/regress/expected/merge_repartition2.out index 524ae84f7..99cb8fbba 100644 --- a/src/test/regress/expected/merge_repartition2.out +++ b/src/test/regress/expected/merge_repartition2.out @@ -193,13 +193,148 @@ SQL function "compare_data" statement 2 (1 row) +---- https://github.com/citusdata/citus/issues/8180 ---- +CREATE TABLE dist_1 (a int, b int, c int); +CREATE TABLE dist_2 (a int, b int, c int); +CREATE TABLE dist_different_order_1 (b int, a int, c int); +SELECT create_distributed_table('dist_1', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('dist_2', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('dist_different_order_1', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +MERGE INTO dist_1 +USING dist_2 +ON (dist_1.a = dist_2.b) +WHEN MATCHED THEN UPDATE SET b = dist_2.b; +MERGE INTO dist_1 +USING dist_1 src +ON (dist_1.a = src.b) +WHEN MATCHED THEN UPDATE SET b = src.b; +MERGE INTO dist_different_order_1 +USING dist_1 +ON (dist_different_order_1.a = dist_1.b) +WHEN MATCHED THEN UPDATE SET b = dist_1.b; +CREATE TABLE dist_1_cast (a int, b int); +CREATE TABLE dist_2_cast (a int, b numeric); +SELECT create_distributed_table('dist_1_cast', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('dist_2_cast', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +MERGE INTO dist_1_cast +USING dist_2_cast +ON (dist_1_cast.a = dist_2_cast.b) +WHEN MATCHED THEN UPDATE SET b = dist_2_cast.b; +ERROR: In the MERGE ON clause, there is a datatype mismatch between target's distribution column and the expression originating from the source. +DETAIL: If the types are different, Citus uses different hash functions for the two column types, which might lead to incorrect repartitioning of the result data +MERGE INTO dist_1_cast +USING (SELECT a, b::int as b FROM dist_2_cast) dist_2_cast +ON (dist_1_cast.a = dist_2_cast.b) +WHEN MATCHED THEN UPDATE SET b = dist_2_cast.b; +-- a more sophisticated example +CREATE TABLE dist_source (tstamp_col timestamp, int_col int, text_arr_col text[], text_col text, json_col jsonb); +CREATE TABLE dist_target (text_col text, tstamp_col timestamp, json_col jsonb, text_arr_col text[], int_col int); +CREATE TABLE local_source (tstamp_col timestamp, int_col int, text_arr_col text[], text_col text, json_col jsonb); +CREATE TABLE local_target (text_col text, tstamp_col timestamp, json_col jsonb, text_arr_col text[], int_col int); +SELECT create_distributed_table('dist_source', 'tstamp_col'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('dist_target', 'int_col'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO dist_source (tstamp_col, int_col, text_arr_col, text_col, json_col) +SELECT TIMESTAMP '2025-01-01 00:00:00' + (i || ' days')::interval, + i, + ARRAY[i::text, (i+1)::text, (i+2)::text], + 'source_' || i, + ('{"a": ' || i || ', "b": ' || i+1 || '}')::jsonb +FROM generate_series(1001, 2000) i; +INSERT INTO dist_source (tstamp_col, int_col, text_arr_col, text_col, json_col) +SELECT TIMESTAMP '2025-01-01 00:00:00' + (i || ' days')::interval, + i, + ARRAY[i::text, (i+1)::text, (i+2)::text], + 'source_' || i, + ('{"a": ' || i || ', "b": ' || i+1 || '}')::jsonb +FROM generate_series(901, 1000) i; +INSERT INTO dist_target (tstamp_col, int_col, text_arr_col, text_col, json_col) +SELECT TIMESTAMP '2025-01-01 00:00:00' + (i || ' days')::interval, + i, + ARRAY[(i-1)::text, (i)::text, (i+1)::text], + 'source_' || i, + ('{"a": ' || i*5 || ', "b": ' || i+20 || '}')::jsonb +FROM generate_series(1501, 2000) i; +INSERT INTO dist_target (tstamp_col, int_col, text_arr_col, text_col, json_col) +SELECT TIMESTAMP '2025-01-01 00:00:00' + (i || ' days')::interval, + i, + ARRAY[(i-1)::text, (i)::text, (i+1)::text], + 'source_' || i-1, + ('{"a": ' || i*5 || ', "b": ' || i+20 || '}')::jsonb +FROM generate_series(1401, 1500) i; +INSERT INTO local_source SELECT * FROM dist_source; +INSERT INTO local_target SELECT * FROM dist_target; +-- execute the query on distributed tables +MERGE INTO dist_target target_alias +USING dist_source source_alias +ON (target_alias.text_col = source_alias.text_col AND target_alias.int_col = source_alias.int_col) +WHEN MATCHED THEN UPDATE SET + tstamp_col = source_alias.tstamp_col + interval '3 day', + text_arr_col = array_append(source_alias.text_arr_col, 'updated_' || source_alias.text_col), + json_col = ('{"a": "' || replace(source_alias.text_col, '"', '\"') || '"}')::jsonb, + text_col = source_alias.json_col->>'a' +WHEN NOT MATCHED THEN + INSERT VALUES (source_alias.text_col, source_alias.tstamp_col, source_alias.json_col, source_alias.text_arr_col, source_alias.int_col ); +-- execute the same query on local tables, everything is the same except table names behind the aliases +MERGE INTO local_target target_alias +USING local_source source_alias +ON (target_alias.text_col = source_alias.text_col AND target_alias.int_col = source_alias.int_col) +WHEN MATCHED THEN UPDATE SET + tstamp_col = source_alias.tstamp_col + interval '3 day', + text_arr_col = array_append(source_alias.text_arr_col, 'updated_' || source_alias.text_col), + json_col = ('{"a": "' || replace(source_alias.text_col, '"', '\"') || '"}')::jsonb, + text_col = source_alias.json_col->>'a' +WHEN NOT MATCHED THEN + INSERT VALUES (source_alias.text_col, source_alias.tstamp_col, source_alias.json_col, source_alias.text_arr_col, source_alias.int_col ); +-- compare both targets +SELECT COUNT(*) = 0 AS targets_match +FROM ( + SELECT * FROM dist_target + EXCEPT + SELECT * FROM local_target + UNION ALL + SELECT * FROM local_target + EXCEPT + SELECT * FROM dist_target +) q; + targets_match +--------------------------------------------------------------------- + t +(1 row) + +SET client_min_messages TO WARNING; DROP SCHEMA merge_repartition2_schema CASCADE; -NOTICE: drop cascades to 8 other objects -DETAIL: drop cascades to table pg_target -drop cascades to table pg_source -drop cascades to function cleanup_data() -drop cascades to function setup_data() -drop cascades to function check_data(text,text,text,text) -drop cascades to function compare_data() -drop cascades to table citus_target -drop cascades to table citus_source diff --git a/src/test/regress/sql/merge_repartition2.sql b/src/test/regress/sql/merge_repartition2.sql index 354f0605b..6da816bb5 100644 --- a/src/test/regress/sql/merge_repartition2.sql +++ b/src/test/regress/sql/merge_repartition2.sql @@ -126,5 +126,128 @@ WHEN NOT MATCHED THEN SELECT compare_data(); -DROP SCHEMA merge_repartition2_schema CASCADE; +---- https://github.com/citusdata/citus/issues/8180 ---- +CREATE TABLE dist_1 (a int, b int, c int); +CREATE TABLE dist_2 (a int, b int, c int); +CREATE TABLE dist_different_order_1 (b int, a int, c int); + +SELECT create_distributed_table('dist_1', 'a'); +SELECT create_distributed_table('dist_2', 'a'); +SELECT create_distributed_table('dist_different_order_1', 'a'); + +MERGE INTO dist_1 +USING dist_2 +ON (dist_1.a = dist_2.b) +WHEN MATCHED THEN UPDATE SET b = dist_2.b; + +MERGE INTO dist_1 +USING dist_1 src +ON (dist_1.a = src.b) +WHEN MATCHED THEN UPDATE SET b = src.b; + +MERGE INTO dist_different_order_1 +USING dist_1 +ON (dist_different_order_1.a = dist_1.b) +WHEN MATCHED THEN UPDATE SET b = dist_1.b; + +CREATE TABLE dist_1_cast (a int, b int); +CREATE TABLE dist_2_cast (a int, b numeric); + +SELECT create_distributed_table('dist_1_cast', 'a'); +SELECT create_distributed_table('dist_2_cast', 'a'); + +MERGE INTO dist_1_cast +USING dist_2_cast +ON (dist_1_cast.a = dist_2_cast.b) +WHEN MATCHED THEN UPDATE SET b = dist_2_cast.b; + +MERGE INTO dist_1_cast +USING (SELECT a, b::int as b FROM dist_2_cast) dist_2_cast +ON (dist_1_cast.a = dist_2_cast.b) +WHEN MATCHED THEN UPDATE SET b = dist_2_cast.b; + +-- a more sophisticated example +CREATE TABLE dist_source (tstamp_col timestamp, int_col int, text_arr_col text[], text_col text, json_col jsonb); +CREATE TABLE dist_target (text_col text, tstamp_col timestamp, json_col jsonb, text_arr_col text[], int_col int); + +CREATE TABLE local_source (tstamp_col timestamp, int_col int, text_arr_col text[], text_col text, json_col jsonb); +CREATE TABLE local_target (text_col text, tstamp_col timestamp, json_col jsonb, text_arr_col text[], int_col int); + +SELECT create_distributed_table('dist_source', 'tstamp_col'); +SELECT create_distributed_table('dist_target', 'int_col'); + +INSERT INTO dist_source (tstamp_col, int_col, text_arr_col, text_col, json_col) +SELECT TIMESTAMP '2025-01-01 00:00:00' + (i || ' days')::interval, + i, + ARRAY[i::text, (i+1)::text, (i+2)::text], + 'source_' || i, + ('{"a": ' || i || ', "b": ' || i+1 || '}')::jsonb +FROM generate_series(1001, 2000) i; + +INSERT INTO dist_source (tstamp_col, int_col, text_arr_col, text_col, json_col) +SELECT TIMESTAMP '2025-01-01 00:00:00' + (i || ' days')::interval, + i, + ARRAY[i::text, (i+1)::text, (i+2)::text], + 'source_' || i, + ('{"a": ' || i || ', "b": ' || i+1 || '}')::jsonb +FROM generate_series(901, 1000) i; + +INSERT INTO dist_target (tstamp_col, int_col, text_arr_col, text_col, json_col) +SELECT TIMESTAMP '2025-01-01 00:00:00' + (i || ' days')::interval, + i, + ARRAY[(i-1)::text, (i)::text, (i+1)::text], + 'source_' || i, + ('{"a": ' || i*5 || ', "b": ' || i+20 || '}')::jsonb +FROM generate_series(1501, 2000) i; + +INSERT INTO dist_target (tstamp_col, int_col, text_arr_col, text_col, json_col) +SELECT TIMESTAMP '2025-01-01 00:00:00' + (i || ' days')::interval, + i, + ARRAY[(i-1)::text, (i)::text, (i+1)::text], + 'source_' || i-1, + ('{"a": ' || i*5 || ', "b": ' || i+20 || '}')::jsonb +FROM generate_series(1401, 1500) i; + +INSERT INTO local_source SELECT * FROM dist_source; +INSERT INTO local_target SELECT * FROM dist_target; + +-- execute the query on distributed tables +MERGE INTO dist_target target_alias +USING dist_source source_alias +ON (target_alias.text_col = source_alias.text_col AND target_alias.int_col = source_alias.int_col) +WHEN MATCHED THEN UPDATE SET + tstamp_col = source_alias.tstamp_col + interval '3 day', + text_arr_col = array_append(source_alias.text_arr_col, 'updated_' || source_alias.text_col), + json_col = ('{"a": "' || replace(source_alias.text_col, '"', '\"') || '"}')::jsonb, + text_col = source_alias.json_col->>'a' +WHEN NOT MATCHED THEN + INSERT VALUES (source_alias.text_col, source_alias.tstamp_col, source_alias.json_col, source_alias.text_arr_col, source_alias.int_col ); + +-- execute the same query on local tables, everything is the same except table names behind the aliases +MERGE INTO local_target target_alias +USING local_source source_alias +ON (target_alias.text_col = source_alias.text_col AND target_alias.int_col = source_alias.int_col) +WHEN MATCHED THEN UPDATE SET + tstamp_col = source_alias.tstamp_col + interval '3 day', + text_arr_col = array_append(source_alias.text_arr_col, 'updated_' || source_alias.text_col), + json_col = ('{"a": "' || replace(source_alias.text_col, '"', '\"') || '"}')::jsonb, + text_col = source_alias.json_col->>'a' +WHEN NOT MATCHED THEN + INSERT VALUES (source_alias.text_col, source_alias.tstamp_col, source_alias.json_col, source_alias.text_arr_col, source_alias.int_col ); + +-- compare both targets + +SELECT COUNT(*) = 0 AS targets_match +FROM ( + SELECT * FROM dist_target + EXCEPT + SELECT * FROM local_target + UNION ALL + SELECT * FROM local_target + EXCEPT + SELECT * FROM dist_target +) q; + +SET client_min_messages TO WARNING; +DROP SCHEMA merge_repartition2_schema CASCADE; From 80945212ae967f54763782065cfb6b040b83dc80 Mon Sep 17 00:00:00 2001 From: Colm Date: Fri, 26 Sep 2025 13:19:47 +0100 Subject: [PATCH 10/14] PG18 regress sanity: update pg18 ruleutils with fix #7675 (#8216) Fix deparsing of UPDATE statements with indirection (#7675) involved changing ruleutils of our supported Postgres versions. It means that when integrating a new Postgres version we need to update its ruleutils with the relevant parts of #7675; basically PG ruleutils needs to call the `citus_ruleutils.c` functions added by #7675. --- src/backend/distributed/deparser/ruleutils_18.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/backend/distributed/deparser/ruleutils_18.c b/src/backend/distributed/deparser/ruleutils_18.c index bd044f5e7..44e4c8d38 100644 --- a/src/backend/distributed/deparser/ruleutils_18.c +++ b/src/backend/distributed/deparser/ruleutils_18.c @@ -3804,6 +3804,8 @@ get_update_query_targetlist_def(Query *query, List *targetList, SubLink *cur_ma_sublink; List *ma_sublinks; + targetList = ExpandMergedSubscriptingRefEntries(targetList); + /* * Prepare to deal with MULTIEXPR assignments: collect the source SubLinks * into a list. We expect them to appear, in ID order, in resjunk tlist @@ -3827,6 +3829,8 @@ get_update_query_targetlist_def(Query *query, List *targetList, } } } + + ensure_update_targetlist_in_param_order(targetList); } next_ma_cell = list_head(ma_sublinks); cur_ma_sublink = NULL; From 81776fe1900d6d6a25524b651f2e792f3a3a403e Mon Sep 17 00:00:00 2001 From: Colm McHugh Date: Wed, 24 Sep 2025 08:20:54 +0000 Subject: [PATCH 11/14] Fix crash in Range Table identity check. The range table entry array created by the Postgres planner for each SELECT in a query may have NULL entries as of PG18. Add a NULL check to skip over these when looking for matches in rte identities. --- .../planner/relation_restriction_equivalence.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/src/backend/distributed/planner/relation_restriction_equivalence.c b/src/backend/distributed/planner/relation_restriction_equivalence.c index 94c99ef20..c657c7c03 100644 --- a/src/backend/distributed/planner/relation_restriction_equivalence.c +++ b/src/backend/distributed/planner/relation_restriction_equivalence.c @@ -2431,7 +2431,7 @@ FilterJoinRestrictionContext(JoinRestrictionContext *joinRestrictionContext, Rel /* * RangeTableArrayContainsAnyRTEIdentities returns true if any of the range table entries - * int rangeTableEntries array is an range table relation specified in queryRteIdentities. + * in rangeTableEntries array is a range table relation specified in queryRteIdentities. */ static bool RangeTableArrayContainsAnyRTEIdentities(RangeTblEntry **rangeTableEntries, int @@ -2444,6 +2444,18 @@ RangeTableArrayContainsAnyRTEIdentities(RangeTblEntry **rangeTableEntries, int List *rangeTableRelationList = NULL; ListCell *rteRelationCell = NULL; +#if PG_VERSION_NUM >= PG_VERSION_18 + + /* + * In PG18+, planner array simple_rte_array may contain NULL entries + * for "dead relations". See PG commits 5f6f951 and e9a20e4 for details. + */ + if (rangeTableEntry == NULL) + { + continue; + } +#endif + /* * Get list of all RTE_RELATIONs in the given range table entry * (i.e.,rangeTableEntry could be a subquery where we're interested From de045402f301594ba923c7c93ecc736f7cb40105 Mon Sep 17 00:00:00 2001 From: Naisila Puka <37271756+naisila@users.noreply.github.com> Date: Fri, 26 Sep 2025 18:04:34 +0300 Subject: [PATCH 12/14] PG18 - register snapshot where needed (#8196) Register and push snapshots as needed per the relevant PG18 commits https://github.com/postgres/postgres/commit/8076c00592e40e8dbd1fce7a98b20d4bf075e4ba https://github.com/postgres/postgres/commit/706054b `citus_split_shard_columnar_partitioned`, `multi_partitioning` tests are handled. Fixes #8195 --- src/backend/columnar/columnar_metadata.c | 16 +++++++++++++++- src/backend/distributed/commands/index.c | 6 ++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/src/backend/columnar/columnar_metadata.c b/src/backend/columnar/columnar_metadata.c index e3264311c..f699553b6 100644 --- a/src/backend/columnar/columnar_metadata.c +++ b/src/backend/columnar/columnar_metadata.c @@ -1267,7 +1267,21 @@ StripesForRelfilelocator(RelFileLocator relfilelocator) { uint64 storageId = LookupStorageId(relfilelocator); - return ReadDataFileStripeList(storageId, GetTransactionSnapshot()); + /* + * PG18 requires snapshot to be active or registered before it's used + * Without this, we hit + * Assert(snapshot->regd_count > 0 || snapshot->active_count > 0); + * when reading columnar stripes. + * Relevant PG18 commit: + * 8076c00592e40e8dbd1fce7a98b20d4bf075e4ba + */ + Snapshot snapshot = RegisterSnapshot(GetTransactionSnapshot()); + + List *readDataFileStripeList = ReadDataFileStripeList(storageId, snapshot); + + UnregisterSnapshot(snapshot); + + return readDataFileStripeList; } diff --git a/src/backend/distributed/commands/index.c b/src/backend/distributed/commands/index.c index 1401da0a6..d95c53fb5 100644 --- a/src/backend/distributed/commands/index.c +++ b/src/backend/distributed/commands/index.c @@ -854,8 +854,11 @@ PostprocessIndexStmt(Node *node, const char *queryString) table_close(relation, NoLock); index_close(indexRelation, NoLock); + PushActiveSnapshot(GetTransactionSnapshot()); + /* mark index as invalid, in-place (cannot be rolled back) */ index_set_state_flags(indexRelationId, INDEX_DROP_CLEAR_VALID); + PopActiveSnapshot(); /* re-open a transaction command from here on out */ CommitTransactionCommand(); @@ -1370,8 +1373,11 @@ MarkIndexValid(IndexStmt *indexStmt) schemaId); Relation indexRelation = index_open(indexRelationId, RowExclusiveLock); + PushActiveSnapshot(GetTransactionSnapshot()); + /* mark index as valid, in-place (cannot be rolled back) */ index_set_state_flags(indexRelationId, INDEX_CREATE_SET_VALID); + PopActiveSnapshot(); table_close(relation, NoLock); index_close(indexRelation, NoLock); From 5eb1d93be17c9aa70b45d325ac69e1caecb5e4f7 Mon Sep 17 00:00:00 2001 From: Onur Tirtir Date: Tue, 30 Sep 2025 13:13:47 +0300 Subject: [PATCH 13/14] Properly detect no-op shard-key updates via UPDATE / MERGE (#8214) DESCRIPTION: Fixes a bug that causes allowing UPDATE / MERGE queries that may change the distribution column value. Fixes: #8087. Probably as of #769, we were not properly checking if UPDATE may change the distribution column. In #769, we had these checks: ```c if (targetEntry->resno != column->varattno) { /* target entry of the form SET some_other_col = */ isColumnValueChanged = false; } else if (IsA(setExpr, Var)) { Var *newValue = (Var *) setExpr; if (newValue->varattno == column->varattno) { /* target entry of the form SET col = table.col */ isColumnValueChanged = false; } } ``` However, what we check in "if" and in the "else if" are not so different in the sense they both attempt to verify if SET expr of the target entry points to the attno of given column. So, in #5220, we even removed the first check because it was redundant. Also see this PR comment from #5220: https://github.com/citusdata/citus/pull/5220#discussion_r699230597. In #769, probably we actually wanted to first check whether both SET expr of the target entry and given variable are pointing to the same range var entry, but this wasn't what the "if" was checking, so removed. As a result, in the cases that are mentioned in the linked issue, we were incorrectly concluding that the SET expr of the target entry won't change given column just because it's pointing to the same attno as given variable, regardless of what range var entries the column and the SET expr are pointing to. Then we also started using the same function to check for such cases for update action of MERGE, so we have the same bug there as well. So with this PR, we properly check for such cases by comparing varno as well in TargetEntryChangesValue(). However, then some of the existing tests started failing where the SET expr doesn't directly assign the column to itself but the "where" clause could actually imply that the distribution column won't change. Even before we were not attempting to verify if "where" cluse quals could imply a no-op assignment for the SET expr in such cases but that was not a problem. This is because, for the most cases, we were always qualifying such SET expressions as a no-op update as long as the SET expr's attno is the same as given column's. For this reason, to prevent regressions, this PR also adds some extra logic as well to understand if the "where" clause quals could imply that SET expr for the distribution key is a no-op. Ideally, we should instead use "relation restriction equivalence" mechanism to understand if the "where" clause implies a no-op update. This is because, for instance, right now we're not able to deduce that the update is a no-op when the "where" clause transitively implies a no-op update, as in the case where we're setting "column a" to "column c" and where clause looks like: "column a = column b AND column b = column c". If this means a regression for some users, we can consider doing it that way. Until then, as a workaround, we can suggest adding additional quals to "where" clause that would directly imply equivalence. Also, after fixing TargetEntryChangesValue(), we started successfully deducing that the update action is a no-op for such MERGE queries: ```sql MERGE INTO dist_1 USING dist_1 src ON (dist_1.a = src.b) WHEN MATCHED THEN UPDATE SET a = src.b; ``` However, we then started seeing below error for above query even though now the update is qualified as a no-op update: ``` ERROR: Unexpected column index of the source list ``` This was because of #8180 and #8201 fixed that. In summary, with this PR: * We disallow such queries, ```sql -- attno for dist_1.a, dist_1.b: 1, 2 -- attno for dist_different_order_1.a, dist_different_order_1.b: 2, 1 UPDATE dist_1 SET a = dist_different_order_1.b FROM dist_different_order_1 WHERE dist_1.a dist_different_order_1.a; -- attno for dist_1.a, dist_1.b: 1, 2 -- but ON (..) doesn't imply a no-op update for SET expr MERGE INTO dist_1 USING dist_1 src ON (dist_1.a = src.b) WHEN MATCHED THEN UPDATE SET a = src.a; ``` * .. and allow such queries, ```sql MERGE INTO dist_1 USING dist_1 src ON (dist_1.a = src.b) WHEN MATCHED THEN UPDATE SET a = src.b; ``` --- .../distributed/planner/merge_planner.c | 16 + .../planner/multi_physical_planner.c | 58 ++- .../planner/multi_router_planner.c | 51 ++- .../distributed/multi_physical_planner.h | 3 +- .../regress/expected/mixed_relkind_tests.out | 4 +- .../regress/expected/multi_modifications.out | 401 +++++++++++++++++- src/test/regress/sql/multi_modifications.sql | 399 ++++++++++++++++- 7 files changed, 873 insertions(+), 59 deletions(-) diff --git a/src/backend/distributed/planner/merge_planner.c b/src/backend/distributed/planner/merge_planner.c index 566ac9c3b..c456fa341 100644 --- a/src/backend/distributed/planner/merge_planner.c +++ b/src/backend/distributed/planner/merge_planner.c @@ -632,6 +632,22 @@ MergeQualAndTargetListFunctionsSupported(Oid resultRelationId, Query *query, } } + /* + * joinTree->quals, retrieved by GetMergeJoinTree() - either from + * mergeJoinCondition (PG >= 17) or jointree->quals (PG < 17), + * only contains the quals that present in "ON (..)" clause. Action + * quals that can be specified for each specific action, as in + * "WHEN AND THEN "", are + * saved into "qual" field of the corresponding action's entry in + * mergeActionList, see + * https://github.com/postgres/postgres/blob/e6da68a6e1d60a037b63a9c9ed36e5ef0a996769/src/backend/parser/parse_merge.c#L285-L293. + * + * For this reason, even if TargetEntryChangesValue() could prove that + * an action's quals ensure that the action cannot change the distribution + * key, this is not the case as we don't provide action quals to + * TargetEntryChangesValue(), but just joinTree, which only contains + * the "ON (..)" clause quals. + */ if (targetEntryDistributionColumn && TargetEntryChangesValue(targetEntry, distributionColumn, joinTree)) { diff --git a/src/backend/distributed/planner/multi_physical_planner.c b/src/backend/distributed/planner/multi_physical_planner.c index 2c0a17d16..3584246ef 100644 --- a/src/backend/distributed/planner/multi_physical_planner.c +++ b/src/backend/distributed/planner/multi_physical_planner.c @@ -3173,16 +3173,25 @@ BuildBaseConstraint(Var *column) /* - * MakeOpExpression builds an operator expression node. This operator expression - * implements the operator clause as defined by the variable and the strategy - * number. + * MakeOpExpressionExtended builds an operator expression node that's of + * the form "Var Expr", where, Expr must either be a Const or a Var + * (*1). + * + * This operator expression implements the operator clause as defined by + * the variable and the strategy number. */ OpExpr * -MakeOpExpression(Var *variable, int16 strategyNumber) +MakeOpExpressionExtended(Var *leftVar, Expr *rightArg, int16 strategyNumber) { - Oid typeId = variable->vartype; - Oid typeModId = variable->vartypmod; - Oid collationId = variable->varcollid; + /* + * Other types of expressions are probably also fine to be used, but + * none of the callers need support for them for now, so we haven't + * tested them (*1). + */ + Assert(IsA(rightArg, Const) || IsA(rightArg, Var)); + + Oid typeId = leftVar->vartype; + Oid collationId = leftVar->varcollid; Oid accessMethodId = BTREE_AM_OID; @@ -3200,18 +3209,16 @@ MakeOpExpression(Var *variable, int16 strategyNumber) */ if (operatorClassInputType != typeId && typeType != TYPTYPE_PSEUDO) { - variable = (Var *) makeRelabelType((Expr *) variable, operatorClassInputType, - -1, collationId, COERCE_IMPLICIT_CAST); + leftVar = (Var *) makeRelabelType((Expr *) leftVar, operatorClassInputType, + -1, collationId, COERCE_IMPLICIT_CAST); } - Const *constantValue = makeNullConst(operatorClassInputType, typeModId, collationId); - /* Now make the expression with the given variable and a null constant */ OpExpr *expression = (OpExpr *) make_opclause(operatorId, InvalidOid, /* no result type yet */ false, /* no return set */ - (Expr *) variable, - (Expr *) constantValue, + (Expr *) leftVar, + rightArg, InvalidOid, collationId); /* Set implementing function id and result type */ @@ -3222,6 +3229,31 @@ MakeOpExpression(Var *variable, int16 strategyNumber) } +/* + * MakeOpExpression is a wrapper around MakeOpExpressionExtended + * that creates a null constant of the appropriate type for right + * hand side operator class input type. As a result, it builds an + * operator expression node that's of the form "Var NULL". + */ +OpExpr * +MakeOpExpression(Var *leftVar, int16 strategyNumber) +{ + Oid typeId = leftVar->vartype; + Oid typeModId = leftVar->vartypmod; + Oid collationId = leftVar->varcollid; + + Oid accessMethodId = BTREE_AM_OID; + + OperatorCacheEntry *operatorCacheEntry = LookupOperatorByType(typeId, accessMethodId, + strategyNumber); + Oid operatorClassInputType = operatorCacheEntry->operatorClassInputType; + + Const *constantValue = makeNullConst(operatorClassInputType, typeModId, collationId); + + return MakeOpExpressionExtended(leftVar, (Expr *) constantValue, strategyNumber); +} + + /* * LookupOperatorByType is a wrapper around GetOperatorByType(), * operatorClassInputType() and get_typtype() functions that uses a cache to avoid diff --git a/src/backend/distributed/planner/multi_router_planner.c b/src/backend/distributed/planner/multi_router_planner.c index d67dd6885..017dceef6 100644 --- a/src/backend/distributed/planner/multi_router_planner.c +++ b/src/backend/distributed/planner/multi_router_planner.c @@ -1628,10 +1628,19 @@ MasterIrreducibleExpressionFunctionChecker(Oid func_id, void *context) /* * TargetEntryChangesValue determines whether the given target entry may - * change the value in a given column, given a join tree. The result is - * true unless the expression refers directly to the column, or the - * expression is a value that is implied by the qualifiers of the join - * tree, or the target entry sets a different column. + * change the value given a column and a join tree. + * + * The function assumes that the "targetEntry" references given "column" + * Var via its "resname" and is used as part of a modify query. This means + * that, for example, for an update query, the input "targetEntry" constructs + * the following assignment operation as part of the SET clause: + * "col_a = expr_a ", where, "col_a" refers to input "column" Var (via + * "resname") as per the assumption written above. And we want to understand + * if "expr_a" (which is pointed to by targetEntry->expr) refers directly to + * the "column" Var, or "expr_a" is a value that is implied to be equal + * to "column" Var by the qualifiers of the join tree. If so, we know that + * the value of "col_a" effectively cannot be changed by this assignment + * operation. */ bool TargetEntryChangesValue(TargetEntry *targetEntry, Var *column, FromExpr *joinTree) @@ -1642,11 +1651,36 @@ TargetEntryChangesValue(TargetEntry *targetEntry, Var *column, FromExpr *joinTre if (IsA(setExpr, Var)) { Var *newValue = (Var *) setExpr; - if (newValue->varattno == column->varattno) + if (column->varno == newValue->varno && + column->varattno == newValue->varattno) { - /* target entry of the form SET col = table.col */ + /* + * Target entry is of the form "SET col_a = foo.col_b", + * where foo also points to the same range table entry + * and col_a and col_b are the same. So, effectively + * they're literally referring to the same column. + */ isColumnValueChanged = false; } + else + { + List *restrictClauseList = WhereClauseList(joinTree); + OpExpr *equalityExpr = MakeOpExpressionExtended(column, (Expr *) newValue, + BTEqualStrategyNumber); + + bool predicateIsImplied = predicate_implied_by(list_make1(equalityExpr), + restrictClauseList, false); + if (predicateIsImplied) + { + /* + * Target entry is of the form + * "SET col_a = foo.col_b WHERE col_a = foo.col_b (AND (...))", + * where foo points to a different relation or it points + * to the same relation but col_a is not the same column as col_b. + */ + isColumnValueChanged = false; + } + } } else if (IsA(setExpr, Const)) { @@ -1667,7 +1701,10 @@ TargetEntryChangesValue(TargetEntry *targetEntry, Var *column, FromExpr *joinTre restrictClauseList, false); if (predicateIsImplied) { - /* target entry of the form SET col = WHERE col = AND ... */ + /* + * Target entry is of the form + * "SET col_a = const_a WHERE col_a = const_a (AND (...))". + */ isColumnValueChanged = false; } } diff --git a/src/include/distributed/multi_physical_planner.h b/src/include/distributed/multi_physical_planner.h index 1040b4149..25ca24ec7 100644 --- a/src/include/distributed/multi_physical_planner.h +++ b/src/include/distributed/multi_physical_planner.h @@ -586,7 +586,8 @@ extern DistributedPlan * CreatePhysicalDistributedPlan(MultiTreeRoot *multiTree, plannerRestrictionContext); extern Task * CreateBasicTask(uint64 jobId, uint32 taskId, TaskType taskType, char *queryString); - +extern OpExpr * MakeOpExpressionExtended(Var *leftVar, Expr *rightArg, + int16 strategyNumber); extern OpExpr * MakeOpExpression(Var *variable, int16 strategyNumber); extern Node * WrapUngroupedVarsInAnyValueAggregate(Node *expression, List *groupClauseList, diff --git a/src/test/regress/expected/mixed_relkind_tests.out b/src/test/regress/expected/mixed_relkind_tests.out index b168cd7be..b2c30d1e4 100644 --- a/src/test/regress/expected/mixed_relkind_tests.out +++ b/src/test/regress/expected/mixed_relkind_tests.out @@ -394,9 +394,9 @@ DEBUG: Wrapping relation "mat_view_on_part_dist" "foo" to a subquery DEBUG: generating subplan XXX_1 for subquery SELECT a FROM mixed_relkind_tests.mat_view_on_part_dist foo WHERE true DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE mixed_relkind_tests.partitioned_distributed_table SET a = foo.a FROM (SELECT foo_1.a, NULL::integer AS b FROM (SELECT intermediate_result.a FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(a integer)) foo_1) foo WHERE (foo.a OPERATOR(pg_catalog.=) partitioned_distributed_table.a) UPDATE partitioned_distributed_table SET a = foo.a FROM partitioned_distributed_table AS foo WHERE foo.a < partitioned_distributed_table.a; -ERROR: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns +ERROR: modifying the partition value of rows is not allowed UPDATE partitioned_distributed_table SET a = foo.a FROM distributed_table AS foo WHERE foo.a < partitioned_distributed_table.a; -ERROR: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns +ERROR: modifying the partition value of rows is not allowed -- should work UPDATE partitioned_distributed_table SET a = foo.a FROM partitioned_distributed_table AS foo WHERE foo.a = partitioned_distributed_table.a; UPDATE partitioned_distributed_table SET a = foo.a FROM view_on_part_dist AS foo WHERE foo.a = partitioned_distributed_table.a; diff --git a/src/test/regress/expected/multi_modifications.out b/src/test/regress/expected/multi_modifications.out index cebef0526..748ce7728 100644 --- a/src/test/regress/expected/multi_modifications.out +++ b/src/test/regress/expected/multi_modifications.out @@ -2,6 +2,7 @@ SET citus.shard_count TO 32; SET citus.next_shard_id TO 750000; SET citus.next_placement_id TO 750000; CREATE SCHEMA multi_modifications; +SET search_path TO multi_modifications; -- some failure messages that comes from the worker nodes -- might change due to parallel executions, so suppress those -- using \set VERBOSITY terse @@ -31,8 +32,12 @@ SELECT create_distributed_table('limit_orders', 'id', 'hash'); (1 row) -SELECT create_distributed_table('multiple_hash', 'id', 'hash'); -ERROR: column "id" of relation "multiple_hash" does not exist +SELECT create_distributed_table('multiple_hash', 'category', 'hash'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + SELECT create_distributed_table('range_partitioned', 'id', 'range'); create_distributed_table --------------------------------------------------------------------- @@ -344,22 +349,26 @@ ERROR: duplicate key value violates unique constraint "limit_orders_pkey_750001 -- Test that shards which miss a modification are marked unhealthy -- First: Connect to the second worker node \c - - - :worker_2_port +SET search_path TO multi_modifications; -- Second: Move aside limit_orders shard on the second worker node ALTER TABLE limit_orders_750000 RENAME TO renamed_orders; -- Third: Connect back to master node \c - - - :master_port +SET search_path TO multi_modifications; -- Fourth: Perform an INSERT on the remaining node -- the whole transaction should fail \set VERBOSITY terse INSERT INTO limit_orders VALUES (276, 'ADR', 140, '2007-07-02 16:32:15', 'sell', 43.67); -ERROR: relation "public.limit_orders_750000" does not exist +ERROR: relation "multi_modifications.limit_orders_750000" does not exist -- set the shard name back \c - - - :worker_2_port +SET search_path TO multi_modifications; -- Second: Move aside limit_orders shard on the second worker node ALTER TABLE renamed_orders RENAME TO limit_orders_750000; -- Verify the insert failed and both placements are healthy -- or the insert succeeded and placement marked unhealthy \c - - - :worker_1_port +SET search_path TO multi_modifications; SELECT count(*) FROM limit_orders_750000 WHERE id = 276; count --------------------------------------------------------------------- @@ -367,6 +376,7 @@ SELECT count(*) FROM limit_orders_750000 WHERE id = 276; (1 row) \c - - - :worker_2_port +SET search_path TO multi_modifications; SELECT count(*) FROM limit_orders_750000 WHERE id = 276; count --------------------------------------------------------------------- @@ -374,6 +384,7 @@ SELECT count(*) FROM limit_orders_750000 WHERE id = 276; (1 row) \c - - - :master_port +SET search_path TO multi_modifications; SELECT count(*) FROM limit_orders WHERE id = 276; count --------------------------------------------------------------------- @@ -394,14 +405,16 @@ AND s.logicalrelid = 'limit_orders'::regclass; -- Test that if all shards miss a modification, no state change occurs -- First: Connect to the first worker node \c - - - :worker_1_port +SET search_path TO multi_modifications; -- Second: Move aside limit_orders shard on the second worker node ALTER TABLE limit_orders_750000 RENAME TO renamed_orders; -- Third: Connect back to master node \c - - - :master_port +SET search_path TO multi_modifications; -- Fourth: Perform an INSERT on the remaining node \set VERBOSITY terse INSERT INTO limit_orders VALUES (276, 'ADR', 140, '2007-07-02 16:32:15', 'sell', 43.67); -ERROR: relation "public.limit_orders_750000" does not exist +ERROR: relation "multi_modifications.limit_orders_750000" does not exist \set VERBOSITY DEFAULT -- Last: Verify worker is still healthy SELECT count(*) @@ -420,10 +433,12 @@ AND s.logicalrelid = 'limit_orders'::regclass; -- Undo our change... -- First: Connect to the first worker node \c - - - :worker_1_port +SET search_path TO multi_modifications; -- Second: Move aside limit_orders shard on the second worker node ALTER TABLE renamed_orders RENAME TO limit_orders_750000; -- Third: Connect back to master node \c - - - :master_port +SET search_path TO multi_modifications; -- attempting to change the partition key is unsupported UPDATE limit_orders SET id = 0 WHERE id = 246; ERROR: modifying the partition value of rows is not allowed @@ -433,6 +448,368 @@ ERROR: modifying the partition value of rows is not allowed UPDATE limit_orders SET id = 246 WHERE id = 246; UPDATE limit_orders SET id = 246 WHERE id = 246 AND symbol = 'GM'; UPDATE limit_orders SET id = limit_orders.id WHERE id = 246; +CREATE TABLE dist_1 (a int, b int, c int); +CREATE TABLE dist_2 (a int, b int, c int); +CREATE TABLE dist_non_colocated (a int, b int, c int); +CREATE TABLE dist_different_order_1 (b int, a int, c int); +SELECT create_distributed_table('dist_1', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('dist_2', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('dist_non_colocated', 'a', colocate_with=>'none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('dist_different_order_1', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- +-- https://github.com/citusdata/citus/issues/8087 +-- +---- update: should work ---- +-- setting shard key to itself -- +UPDATE dist_1 SET a = dist_1.a; +UPDATE dist_1 SET a = dist_1.a WHERE dist_1.a > dist_1.b AND dist_1.b > 10; +UPDATE dist_1 SET a = dist_1.a FROM dist_2 WHERE dist_1.a = dist_2.a; +-- setting shard key to another var that's implied to be equal to shard key -- +UPDATE dist_1 SET a = b WHERE a = b; +UPDATE dist_1 SET a = dist_2.a FROM dist_2 WHERE dist_1.a = dist_2.a; +UPDATE dist_1 SET a = dist_2.a FROM dist_2 WHERE dist_1.a = dist_2.a AND dist_1.b = dist_2.c AND (dist_2.c > 5 OR dist_2.c < 0); +with cte as ( +select a, b from dist_1 +) +update dist_1 set a = cte.a from cte where dist_1.a = cte.a; +with cte as ( +select a as x, b as y from (select a, b from dist_1 limit 100) dt where b > 100 +) +update dist_1 set a = cte.x from cte where dist_1.a = cte.x; +with cte as ( +select d2.a as x, d1.b as y +from dist_1 d1, dist_different_order_1 d2 +where d1.a=d2.a) +update dist_1 set a = cte.x from cte where y != 0 and dist_1.a = cte.x; +with cte as ( +select * from (select a as x, b as y from dist_2 limit 100) q +) +update dist_1 set a = cte.x from cte where b = cte.y and cte.y = a and a = cte.x; +-- supported although the where clause will certainly eval to false +UPDATE dist_1 SET a = dist_2.a FROM dist_2 WHERE dist_1.a = dist_2.a AND dist_1.a = 5 AND dist_2.a = 7; +-- setting shard key to another var that's implied to be equal to shard key, repeat with dist_different_order_1 -- +UPDATE dist_1 SET a = dist_different_order_1.a FROM dist_different_order_1 WHERE dist_1.a = dist_different_order_1.a; +-- test with extra quals +UPDATE dist_1 SET a = dist_different_order_1.a FROM dist_different_order_1 WHERE dist_1.a = dist_different_order_1.a AND dist_1.b = dist_different_order_1.c AND (dist_different_order_1.c > 5 OR dist_different_order_1.c < 0); +---- update: errors in router planner ---- +-- different column of the same relation, which is not implied to be equal to shard key -- +UPDATE dist_1 SET a = dist_1.b; +ERROR: modifying the partition value of rows is not allowed +-- another range table entry's column with the same attno, which is not implied to be equal to shard key -- +UPDATE dist_1 SET a = dist_2.a FROM dist_2; +ERROR: modifying the partition value of rows is not allowed +UPDATE dist_1 SET a = dist_2.a FROM dist_2 WHERE dist_1.a != dist_2.a; +ERROR: modifying the partition value of rows is not allowed +UPDATE dist_1 SET a = dist_2.a FROM dist_2 WHERE dist_1.a >= dist_2.a; +ERROR: modifying the partition value of rows is not allowed +UPDATE dist_1 SET a = dist_2.a FROM dist_2 WHERE dist_1.a = dist_2.a OR dist_1.a > dist_2.a; +ERROR: modifying the partition value of rows is not allowed +UPDATE dist_1 SET a = dist_different_order_1.b FROM dist_different_order_1 WHERE dist_1.a = dist_different_order_1.a; +ERROR: modifying the partition value of rows is not allowed +UPDATE dist_1 SET a = foo.a FROM dist_1 foo; +ERROR: modifying the partition value of rows is not allowed +UPDATE dist_1 SET a = foo.a FROM dist_1 foo WHERE dist_1.a != foo.a; +ERROR: modifying the partition value of rows is not allowed +-- (*1) Would normally expect this to not throw an error because +-- dist_1.a = dist_2.b AND dist_2.b = dist_2.a, +-- so dist_1.a = dist_2.a, so we should be able to deduce +-- that (dist_1.)a = dist_2.a, but seems predicate_implied_by() +-- is not that smart. +UPDATE dist_1 SET a = dist_2.a FROM dist_2 WHERE dist_1.a = dist_2.b AND dist_2.b = dist_2.a; +ERROR: modifying the partition value of rows is not allowed +-- and same here +with cte as ( +select * from (select a as x, b as y from dist_different_order_1 limit 100) q +) +update dist_1 set a = cte.x from cte where a = cte.y and cte.y = b and b = cte.x; +ERROR: modifying the partition value of rows is not allowed +---- update: errors later (in logical or physical planner) ---- +-- setting shard key to itself -- +UPDATE dist_1 SET a = dist_1.a FROM dist_1 foo; +ERROR: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns +UPDATE dist_1 SET a = dist_1.a FROM dist_2 foo; +ERROR: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns +-- setting shard key to another var that's implied to be equal to shard key -- +UPDATE dist_1 SET a = dist_non_colocated.a FROM dist_non_colocated WHERE dist_1.a = dist_non_colocated.a; +ERROR: cannot push down this subquery +DETAIL: dist_1 and dist_non_colocated are not colocated +UPDATE dist_1 SET a = dist_2.b FROM dist_2 WHERE dist_1.a = dist_2.b; +ERROR: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns +---- update: a more sophisticated example ---- +CREATE TABLE dist_source (tstamp_col timestamp, int_col int, text_arr_col text[], text_col text, json_col jsonb); +CREATE TABLE dist_target (text_col text, tstamp_col timestamp, json_col jsonb, text_arr_col text[], int_col int); +CREATE TABLE local_source (tstamp_col timestamp, int_col int, text_arr_col text[], text_col text, json_col jsonb); +CREATE TABLE local_target (text_col text, tstamp_col timestamp, json_col jsonb, text_arr_col text[], int_col int); +SELECT create_distributed_table('dist_source', 'int_col'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('dist_target', 'int_col'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO dist_source (tstamp_col, int_col, text_arr_col, text_col, json_col) +SELECT TIMESTAMP '2025-01-01 00:00:00' + (i || ' days')::interval, + i, + ARRAY[i::text, (i+1)::text, (i+2)::text], + 'source_' || i, + ('{"a": ' || i || ', "b": ' || i+1 || '}')::jsonb +FROM generate_series(1001, 2000) i; +INSERT INTO dist_source (tstamp_col, int_col, text_arr_col, text_col, json_col) +SELECT TIMESTAMP '2025-01-01 00:00:00' + (i || ' days')::interval, + i, + ARRAY[i::text, (i+1)::text, (i+2)::text], + 'source_' || i, + ('{"a": ' || i || ', "b": ' || i+1 || '}')::jsonb +FROM generate_series(901, 1000) i; +INSERT INTO dist_target (tstamp_col, int_col, text_arr_col, text_col, json_col) +SELECT TIMESTAMP '2025-01-01 00:00:00' + (i || ' days')::interval, + i, + ARRAY[(i-1)::text, (i)::text, (i+1)::text], + 'source_' || i, + ('{"a": ' || i*5 || ', "b": ' || i+20 || '}')::jsonb +FROM generate_series(1501, 2000) i; +INSERT INTO dist_target (tstamp_col, int_col, text_arr_col, text_col, json_col) +SELECT TIMESTAMP '2025-01-01 00:00:00' + (i || ' days')::interval, + i, + ARRAY[(i-1)::text, (i)::text, (i+1)::text], + 'source_' || i-1, + ('{"a": ' || i*5 || ', "b": ' || i+20 || '}')::jsonb +FROM generate_series(1401, 1500) i; +INSERT INTO local_source SELECT * FROM dist_source; +INSERT INTO local_target SELECT * FROM dist_target; +-- execute the query on distributed tables +UPDATE dist_target target_alias +SET int_col = source_alias.int_col, + tstamp_col = source_alias.tstamp_col + interval '3 day', + text_arr_col = array_append(source_alias.text_arr_col, 'updated_' || source_alias.text_col), + json_col = ('{"a": "' || replace(source_alias.text_col, '"', '\"') || '"}')::jsonb, + text_col = source_alias.json_col->>'a' +FROM dist_source source_alias +WHERE target_alias.text_col = source_alias.text_col AND target_alias.int_col = source_alias.int_col; +-- execute the same query on local tables, everything is the same except table names behind the aliases +UPDATE local_target target_alias +SET int_col = source_alias.int_col, + tstamp_col = source_alias.tstamp_col + interval '3 day', + text_arr_col = array_append(source_alias.text_arr_col, 'updated_' || source_alias.text_col), + json_col = ('{"a": "' || replace(source_alias.text_col, '"', '\"') || '"}')::jsonb, + text_col = source_alias.json_col->>'a' +FROM local_source source_alias +WHERE target_alias.text_col = source_alias.text_col AND target_alias.int_col = source_alias.int_col; +-- compare both targets +SELECT COUNT(*) = 0 AS targets_match +FROM ( + SELECT * FROM dist_target + EXCEPT + SELECT * FROM local_target + UNION ALL + SELECT * FROM local_target + EXCEPT + SELECT * FROM dist_target +) q; + targets_match +--------------------------------------------------------------------- + t +(1 row) + +---- merge: should work ---- +-- setting shard key to itself -- +MERGE INTO dist_1 +USING dist_1 src +ON (dist_1.a = src.a) +WHEN MATCHED THEN UPDATE SET a = dist_1.a; +-- We don't care about action quals when deciding if the update +-- could change the shard key, but still add some action quals for +-- testing. See the comments written on top of the line we call +-- TargetEntryChangesValue() in MergeQualAndTargetListFunctionsSupported(). +MERGE INTO dist_1 +USING dist_1 src +ON (dist_1.a = src.a) +WHEN MATCHED AND dist_1.a > dist_1.b AND dist_1.b > 10 THEN UPDATE SET a = dist_1.a; +MERGE INTO dist_1 +USING dist_2 src +ON (dist_1.a = src.a) +WHEN MATCHED THEN UPDATE SET a = dist_1.a; +MERGE INTO dist_1 +USING dist_2 src +ON (dist_1.a = src.a) +WHEN MATCHED THEN UPDATE SET a = src.a; +-- setting shard key to another var that's implied to be equal to shard key -- +MERGE INTO dist_1 +USING dist_1 src +ON (dist_1.a = src.a AND dist_1.a = dist_1.b) +WHEN MATCHED THEN UPDATE SET a = dist_1.b; +MERGE INTO dist_1 +USING dist_1 src +ON (dist_1.a = src.b) +WHEN MATCHED THEN UPDATE SET a = src.b; +MERGE INTO dist_1 +USING dist_2 src +ON (dist_1.a = src.b) +WHEN MATCHED THEN UPDATE SET a = src.b; +MERGE INTO dist_1 +USING dist_1 src +ON (dist_1.a = src.a AND dist_1.a = src.b) +WHEN MATCHED THEN UPDATE SET a = src.b; +MERGE INTO dist_1 +USING dist_2 src +ON (dist_1.a = src.a AND dist_1.a = src.b) +WHEN MATCHED THEN UPDATE SET a = src.b; +-- test with extra quals +MERGE INTO dist_1 +USING dist_1 src +ON (dist_1.a = src.a AND dist_1.a = src.b AND (dist_1.b > 1000 OR (dist_1.b < 500))) +WHEN MATCHED THEN UPDATE SET a = src.b; +-- setting shard key to another var that's implied to be equal to shard key, repeat with dist_different_order_1 -- +MERGE INTO dist_1 +USING dist_different_order_1 src +ON (dist_1.a = src.a AND dist_1.a = src.b) +WHEN MATCHED THEN UPDATE SET a = src.b; +MERGE INTO dist_1 +USING dist_1 src +ON (dist_1.a = src.a) +WHEN MATCHED THEN UPDATE SET a = src.a; +---- merge: errors in router planner ---- +-- different column of the same relation, which is not implied to be equal to shard key -- +MERGE INTO dist_1 +USING dist_1 src +ON (dist_1.a = src.a) +WHEN MATCHED THEN UPDATE SET a = dist_1.b; +ERROR: updating the distribution column is not allowed in MERGE actions +-- another range table entry's column with the same attno, which is not implied to be equal to shard key -- +MERGE INTO dist_1 +USING dist_1 src +ON (dist_1.a = src.b) +WHEN MATCHED THEN UPDATE SET a = src.a; +ERROR: updating the distribution column is not allowed in MERGE actions +-- as in (*1), this is not supported +MERGE INTO dist_1 +USING dist_1 src +ON (dist_1.a = src.b AND src.b = src.a) +WHEN MATCHED THEN UPDATE SET a = src.a; +ERROR: updating the distribution column is not allowed in MERGE actions +MERGE INTO dist_1 +USING dist_2 src +ON (true) +WHEN MATCHED THEN UPDATE SET a = src.a; +ERROR: updating the distribution column is not allowed in MERGE actions +MERGE INTO dist_1 +USING dist_2 src +ON (dist_1.a <= src.a) +WHEN MATCHED THEN UPDATE SET a = src.a; +ERROR: updating the distribution column is not allowed in MERGE actions +---- merge: a more sophisticated example ---- +DROP TABLE dist_source, dist_target, local_source, local_target; +CREATE TABLE dist_source (tstamp_col timestamp, int_col int, text_arr_col text[], text_col text, json_col jsonb); +CREATE TABLE dist_target (text_col text, tstamp_col timestamp, json_col jsonb, text_arr_col text[], int_col int); +CREATE TABLE local_source (tstamp_col timestamp, int_col int, text_arr_col text[], text_col text, json_col jsonb); +CREATE TABLE local_target (text_col text, tstamp_col timestamp, json_col jsonb, text_arr_col text[], int_col int); +SELECT create_distributed_table('dist_source', 'tstamp_col'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('dist_target', 'int_col'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO dist_source (tstamp_col, int_col, text_arr_col, text_col, json_col) +SELECT TIMESTAMP '2025-01-01 00:00:00' + (i || ' days')::interval, + i, + ARRAY[i::text, (i+1)::text, (i+2)::text], + 'source_' || i, + ('{"a": ' || i || ', "b": ' || i+1 || '}')::jsonb +FROM generate_series(1001, 2000) i; +INSERT INTO dist_source (tstamp_col, int_col, text_arr_col, text_col, json_col) +SELECT TIMESTAMP '2025-01-01 00:00:00' + (i || ' days')::interval, + i, + ARRAY[i::text, (i+1)::text, (i+2)::text], + 'source_' || i, + ('{"a": ' || i || ', "b": ' || i+1 || '}')::jsonb +FROM generate_series(901, 1000) i; +INSERT INTO dist_target (tstamp_col, int_col, text_arr_col, text_col, json_col) +SELECT TIMESTAMP '2025-01-01 00:00:00' + (i || ' days')::interval, + i, + ARRAY[(i-1)::text, (i)::text, (i+1)::text], + 'source_' || i, + ('{"a": ' || i*5 || ', "b": ' || i+20 || '}')::jsonb +FROM generate_series(1501, 2000) i; +INSERT INTO dist_target (tstamp_col, int_col, text_arr_col, text_col, json_col) +SELECT TIMESTAMP '2025-01-01 00:00:00' + (i || ' days')::interval, + i, + ARRAY[(i-1)::text, (i)::text, (i+1)::text], + 'source_' || i-1, + ('{"a": ' || i*5 || ', "b": ' || i+20 || '}')::jsonb +FROM generate_series(1401, 1500) i; +INSERT INTO local_source SELECT * FROM dist_source; +INSERT INTO local_target SELECT * FROM dist_target; +-- execute the query on distributed tables +MERGE INTO dist_target target_alias +USING dist_source source_alias +ON (target_alias.text_col = source_alias.text_col AND target_alias.int_col = source_alias.int_col) +WHEN MATCHED THEN UPDATE SET + int_col = source_alias.int_col, + tstamp_col = source_alias.tstamp_col + interval '3 day', + text_arr_col = array_append(source_alias.text_arr_col, 'updated_' || source_alias.text_col), + json_col = ('{"a": "' || replace(source_alias.text_col, '"', '\"') || '"}')::jsonb, + text_col = source_alias.json_col->>'a' +WHEN NOT MATCHED THEN + INSERT VALUES (source_alias.text_col, source_alias.tstamp_col, source_alias.json_col, source_alias.text_arr_col, source_alias.int_col ); +-- execute the same query on local tables, everything is the same except table names behind the aliases +MERGE INTO local_target target_alias +USING local_source source_alias +ON (target_alias.text_col = source_alias.text_col AND target_alias.int_col = source_alias.int_col) +WHEN MATCHED THEN UPDATE SET + int_col = source_alias.int_col, + tstamp_col = source_alias.tstamp_col + interval '3 day', + text_arr_col = array_append(source_alias.text_arr_col, 'updated_' || source_alias.text_col), + json_col = ('{"a": "' || replace(source_alias.text_col, '"', '\"') || '"}')::jsonb, + text_col = source_alias.json_col->>'a' +WHEN NOT MATCHED THEN + INSERT VALUES (source_alias.text_col, source_alias.tstamp_col, source_alias.json_col, source_alias.text_arr_col, source_alias.int_col ); +-- compare both targets +SELECT COUNT(*) = 0 AS targets_match +FROM ( + SELECT * FROM dist_target + EXCEPT + SELECT * FROM local_target + UNION ALL + SELECT * FROM local_target + EXCEPT + SELECT * FROM dist_target +) q; + targets_match +--------------------------------------------------------------------- + t +(1 row) + -- UPDATEs with a FROM clause are supported even with local tables UPDATE limit_orders SET limit_price = 0.00 FROM bidders WHERE limit_orders.id = 246 AND @@ -1353,19 +1730,5 @@ CREATE TABLE multi_modifications.local (a int default 1, b int); INSERT INTO multi_modifications.local VALUES (default, (SELECT min(id) FROM summary_table)); ERROR: subqueries are not supported within INSERT queries HINT: Try rewriting your queries with 'INSERT INTO ... SELECT' syntax. -DROP TABLE insufficient_shards; -DROP TABLE raw_table; -DROP TABLE summary_table; -DROP TABLE reference_raw_table; -DROP TABLE reference_summary_table; -DROP TABLE limit_orders; -DROP TABLE multiple_hash; -DROP TABLE range_partitioned; -DROP TABLE append_partitioned; -DROP TABLE bidders; -DROP FUNCTION stable_append; -DROP FUNCTION immutable_append; -DROP FUNCTION temp_strict_func; -DROP TYPE order_side; +SET client_min_messages TO WARNING; DROP SCHEMA multi_modifications CASCADE; -NOTICE: drop cascades to table multi_modifications.local diff --git a/src/test/regress/sql/multi_modifications.sql b/src/test/regress/sql/multi_modifications.sql index 958791e44..ada5707bb 100644 --- a/src/test/regress/sql/multi_modifications.sql +++ b/src/test/regress/sql/multi_modifications.sql @@ -3,6 +3,7 @@ SET citus.next_shard_id TO 750000; SET citus.next_placement_id TO 750000; CREATE SCHEMA multi_modifications; +SET search_path TO multi_modifications; -- some failure messages that comes from the worker nodes -- might change due to parallel executions, so suppress those @@ -36,7 +37,7 @@ CREATE TABLE append_partitioned ( LIKE limit_orders ); SET citus.shard_count TO 2; SELECT create_distributed_table('limit_orders', 'id', 'hash'); -SELECT create_distributed_table('multiple_hash', 'id', 'hash'); +SELECT create_distributed_table('multiple_hash', 'category', 'hash'); SELECT create_distributed_table('range_partitioned', 'id', 'range'); SELECT create_distributed_table('append_partitioned', 'id', 'append'); @@ -245,12 +246,14 @@ INSERT INTO limit_orders VALUES (275, 'ADR', 140, '2007-07-02 16:32:15', 'sell', -- First: Connect to the second worker node \c - - - :worker_2_port +SET search_path TO multi_modifications; -- Second: Move aside limit_orders shard on the second worker node ALTER TABLE limit_orders_750000 RENAME TO renamed_orders; -- Third: Connect back to master node \c - - - :master_port +SET search_path TO multi_modifications; -- Fourth: Perform an INSERT on the remaining node -- the whole transaction should fail @@ -259,6 +262,7 @@ INSERT INTO limit_orders VALUES (276, 'ADR', 140, '2007-07-02 16:32:15', 'sell', -- set the shard name back \c - - - :worker_2_port +SET search_path TO multi_modifications; -- Second: Move aside limit_orders shard on the second worker node ALTER TABLE renamed_orders RENAME TO limit_orders_750000; @@ -266,12 +270,15 @@ ALTER TABLE renamed_orders RENAME TO limit_orders_750000; -- Verify the insert failed and both placements are healthy -- or the insert succeeded and placement marked unhealthy \c - - - :worker_1_port +SET search_path TO multi_modifications; SELECT count(*) FROM limit_orders_750000 WHERE id = 276; \c - - - :worker_2_port +SET search_path TO multi_modifications; SELECT count(*) FROM limit_orders_750000 WHERE id = 276; \c - - - :master_port +SET search_path TO multi_modifications; SELECT count(*) FROM limit_orders WHERE id = 276; @@ -286,12 +293,14 @@ AND s.logicalrelid = 'limit_orders'::regclass; -- First: Connect to the first worker node \c - - - :worker_1_port +SET search_path TO multi_modifications; -- Second: Move aside limit_orders shard on the second worker node ALTER TABLE limit_orders_750000 RENAME TO renamed_orders; -- Third: Connect back to master node \c - - - :master_port +SET search_path TO multi_modifications; -- Fourth: Perform an INSERT on the remaining node \set VERBOSITY terse @@ -312,12 +321,14 @@ AND s.logicalrelid = 'limit_orders'::regclass; -- First: Connect to the first worker node \c - - - :worker_1_port +SET search_path TO multi_modifications; -- Second: Move aside limit_orders shard on the second worker node ALTER TABLE renamed_orders RENAME TO limit_orders_750000; -- Third: Connect back to master node \c - - - :master_port +SET search_path TO multi_modifications; -- attempting to change the partition key is unsupported UPDATE limit_orders SET id = 0 WHERE id = 246; @@ -328,6 +339,375 @@ UPDATE limit_orders SET id = 246 WHERE id = 246; UPDATE limit_orders SET id = 246 WHERE id = 246 AND symbol = 'GM'; UPDATE limit_orders SET id = limit_orders.id WHERE id = 246; +CREATE TABLE dist_1 (a int, b int, c int); +CREATE TABLE dist_2 (a int, b int, c int); +CREATE TABLE dist_non_colocated (a int, b int, c int); +CREATE TABLE dist_different_order_1 (b int, a int, c int); + +SELECT create_distributed_table('dist_1', 'a'); +SELECT create_distributed_table('dist_2', 'a'); +SELECT create_distributed_table('dist_non_colocated', 'a', colocate_with=>'none'); +SELECT create_distributed_table('dist_different_order_1', 'a'); + +-- +-- https://github.com/citusdata/citus/issues/8087 +-- + +---- update: should work ---- + +-- setting shard key to itself -- + +UPDATE dist_1 SET a = dist_1.a; +UPDATE dist_1 SET a = dist_1.a WHERE dist_1.a > dist_1.b AND dist_1.b > 10; +UPDATE dist_1 SET a = dist_1.a FROM dist_2 WHERE dist_1.a = dist_2.a; + +-- setting shard key to another var that's implied to be equal to shard key -- + +UPDATE dist_1 SET a = b WHERE a = b; +UPDATE dist_1 SET a = dist_2.a FROM dist_2 WHERE dist_1.a = dist_2.a; +UPDATE dist_1 SET a = dist_2.a FROM dist_2 WHERE dist_1.a = dist_2.a AND dist_1.b = dist_2.c AND (dist_2.c > 5 OR dist_2.c < 0); + +with cte as ( +select a, b from dist_1 +) +update dist_1 set a = cte.a from cte where dist_1.a = cte.a; + +with cte as ( +select a as x, b as y from (select a, b from dist_1 limit 100) dt where b > 100 +) +update dist_1 set a = cte.x from cte where dist_1.a = cte.x; + +with cte as ( +select d2.a as x, d1.b as y +from dist_1 d1, dist_different_order_1 d2 +where d1.a=d2.a) +update dist_1 set a = cte.x from cte where y != 0 and dist_1.a = cte.x; + +with cte as ( +select * from (select a as x, b as y from dist_2 limit 100) q +) +update dist_1 set a = cte.x from cte where b = cte.y and cte.y = a and a = cte.x; + +-- supported although the where clause will certainly eval to false +UPDATE dist_1 SET a = dist_2.a FROM dist_2 WHERE dist_1.a = dist_2.a AND dist_1.a = 5 AND dist_2.a = 7; + +-- setting shard key to another var that's implied to be equal to shard key, repeat with dist_different_order_1 -- + +UPDATE dist_1 SET a = dist_different_order_1.a FROM dist_different_order_1 WHERE dist_1.a = dist_different_order_1.a; + +-- test with extra quals +UPDATE dist_1 SET a = dist_different_order_1.a FROM dist_different_order_1 WHERE dist_1.a = dist_different_order_1.a AND dist_1.b = dist_different_order_1.c AND (dist_different_order_1.c > 5 OR dist_different_order_1.c < 0); + +---- update: errors in router planner ---- + +-- different column of the same relation, which is not implied to be equal to shard key -- + +UPDATE dist_1 SET a = dist_1.b; + +-- another range table entry's column with the same attno, which is not implied to be equal to shard key -- + +UPDATE dist_1 SET a = dist_2.a FROM dist_2; +UPDATE dist_1 SET a = dist_2.a FROM dist_2 WHERE dist_1.a != dist_2.a; +UPDATE dist_1 SET a = dist_2.a FROM dist_2 WHERE dist_1.a >= dist_2.a; +UPDATE dist_1 SET a = dist_2.a FROM dist_2 WHERE dist_1.a = dist_2.a OR dist_1.a > dist_2.a; +UPDATE dist_1 SET a = dist_different_order_1.b FROM dist_different_order_1 WHERE dist_1.a = dist_different_order_1.a; + +UPDATE dist_1 SET a = foo.a FROM dist_1 foo; +UPDATE dist_1 SET a = foo.a FROM dist_1 foo WHERE dist_1.a != foo.a; + +-- (*1) Would normally expect this to not throw an error because +-- dist_1.a = dist_2.b AND dist_2.b = dist_2.a, +-- so dist_1.a = dist_2.a, so we should be able to deduce +-- that (dist_1.)a = dist_2.a, but seems predicate_implied_by() +-- is not that smart. +UPDATE dist_1 SET a = dist_2.a FROM dist_2 WHERE dist_1.a = dist_2.b AND dist_2.b = dist_2.a; + +-- and same here +with cte as ( +select * from (select a as x, b as y from dist_different_order_1 limit 100) q +) +update dist_1 set a = cte.x from cte where a = cte.y and cte.y = b and b = cte.x; + +---- update: errors later (in logical or physical planner) ---- + +-- setting shard key to itself -- + +UPDATE dist_1 SET a = dist_1.a FROM dist_1 foo; +UPDATE dist_1 SET a = dist_1.a FROM dist_2 foo; + +-- setting shard key to another var that's implied to be equal to shard key -- + +UPDATE dist_1 SET a = dist_non_colocated.a FROM dist_non_colocated WHERE dist_1.a = dist_non_colocated.a; +UPDATE dist_1 SET a = dist_2.b FROM dist_2 WHERE dist_1.a = dist_2.b; + +---- update: a more sophisticated example ---- +CREATE TABLE dist_source (tstamp_col timestamp, int_col int, text_arr_col text[], text_col text, json_col jsonb); +CREATE TABLE dist_target (text_col text, tstamp_col timestamp, json_col jsonb, text_arr_col text[], int_col int); + +CREATE TABLE local_source (tstamp_col timestamp, int_col int, text_arr_col text[], text_col text, json_col jsonb); +CREATE TABLE local_target (text_col text, tstamp_col timestamp, json_col jsonb, text_arr_col text[], int_col int); + +SELECT create_distributed_table('dist_source', 'int_col'); +SELECT create_distributed_table('dist_target', 'int_col'); + +INSERT INTO dist_source (tstamp_col, int_col, text_arr_col, text_col, json_col) +SELECT TIMESTAMP '2025-01-01 00:00:00' + (i || ' days')::interval, + i, + ARRAY[i::text, (i+1)::text, (i+2)::text], + 'source_' || i, + ('{"a": ' || i || ', "b": ' || i+1 || '}')::jsonb +FROM generate_series(1001, 2000) i; + +INSERT INTO dist_source (tstamp_col, int_col, text_arr_col, text_col, json_col) +SELECT TIMESTAMP '2025-01-01 00:00:00' + (i || ' days')::interval, + i, + ARRAY[i::text, (i+1)::text, (i+2)::text], + 'source_' || i, + ('{"a": ' || i || ', "b": ' || i+1 || '}')::jsonb +FROM generate_series(901, 1000) i; + +INSERT INTO dist_target (tstamp_col, int_col, text_arr_col, text_col, json_col) +SELECT TIMESTAMP '2025-01-01 00:00:00' + (i || ' days')::interval, + i, + ARRAY[(i-1)::text, (i)::text, (i+1)::text], + 'source_' || i, + ('{"a": ' || i*5 || ', "b": ' || i+20 || '}')::jsonb +FROM generate_series(1501, 2000) i; + +INSERT INTO dist_target (tstamp_col, int_col, text_arr_col, text_col, json_col) +SELECT TIMESTAMP '2025-01-01 00:00:00' + (i || ' days')::interval, + i, + ARRAY[(i-1)::text, (i)::text, (i+1)::text], + 'source_' || i-1, + ('{"a": ' || i*5 || ', "b": ' || i+20 || '}')::jsonb +FROM generate_series(1401, 1500) i; + +INSERT INTO local_source SELECT * FROM dist_source; +INSERT INTO local_target SELECT * FROM dist_target; + +-- execute the query on distributed tables +UPDATE dist_target target_alias +SET int_col = source_alias.int_col, + tstamp_col = source_alias.tstamp_col + interval '3 day', + text_arr_col = array_append(source_alias.text_arr_col, 'updated_' || source_alias.text_col), + json_col = ('{"a": "' || replace(source_alias.text_col, '"', '\"') || '"}')::jsonb, + text_col = source_alias.json_col->>'a' +FROM dist_source source_alias +WHERE target_alias.text_col = source_alias.text_col AND target_alias.int_col = source_alias.int_col; + +-- execute the same query on local tables, everything is the same except table names behind the aliases +UPDATE local_target target_alias +SET int_col = source_alias.int_col, + tstamp_col = source_alias.tstamp_col + interval '3 day', + text_arr_col = array_append(source_alias.text_arr_col, 'updated_' || source_alias.text_col), + json_col = ('{"a": "' || replace(source_alias.text_col, '"', '\"') || '"}')::jsonb, + text_col = source_alias.json_col->>'a' +FROM local_source source_alias +WHERE target_alias.text_col = source_alias.text_col AND target_alias.int_col = source_alias.int_col; + +-- compare both targets + +SELECT COUNT(*) = 0 AS targets_match +FROM ( + SELECT * FROM dist_target + EXCEPT + SELECT * FROM local_target + UNION ALL + SELECT * FROM local_target + EXCEPT + SELECT * FROM dist_target +) q; + +---- merge: should work ---- + +-- setting shard key to itself -- + +MERGE INTO dist_1 +USING dist_1 src +ON (dist_1.a = src.a) +WHEN MATCHED THEN UPDATE SET a = dist_1.a; + +-- We don't care about action quals when deciding if the update +-- could change the shard key, but still add some action quals for +-- testing. See the comments written on top of the line we call +-- TargetEntryChangesValue() in MergeQualAndTargetListFunctionsSupported(). +MERGE INTO dist_1 +USING dist_1 src +ON (dist_1.a = src.a) +WHEN MATCHED AND dist_1.a > dist_1.b AND dist_1.b > 10 THEN UPDATE SET a = dist_1.a; + +MERGE INTO dist_1 +USING dist_2 src +ON (dist_1.a = src.a) +WHEN MATCHED THEN UPDATE SET a = dist_1.a; + +MERGE INTO dist_1 +USING dist_2 src +ON (dist_1.a = src.a) +WHEN MATCHED THEN UPDATE SET a = src.a; + +-- setting shard key to another var that's implied to be equal to shard key -- + +MERGE INTO dist_1 +USING dist_1 src +ON (dist_1.a = src.a AND dist_1.a = dist_1.b) +WHEN MATCHED THEN UPDATE SET a = dist_1.b; + +MERGE INTO dist_1 +USING dist_1 src +ON (dist_1.a = src.b) +WHEN MATCHED THEN UPDATE SET a = src.b; + +MERGE INTO dist_1 +USING dist_2 src +ON (dist_1.a = src.b) +WHEN MATCHED THEN UPDATE SET a = src.b; + +MERGE INTO dist_1 +USING dist_1 src +ON (dist_1.a = src.a AND dist_1.a = src.b) +WHEN MATCHED THEN UPDATE SET a = src.b; + +MERGE INTO dist_1 +USING dist_2 src +ON (dist_1.a = src.a AND dist_1.a = src.b) +WHEN MATCHED THEN UPDATE SET a = src.b; + +-- test with extra quals +MERGE INTO dist_1 +USING dist_1 src +ON (dist_1.a = src.a AND dist_1.a = src.b AND (dist_1.b > 1000 OR (dist_1.b < 500))) +WHEN MATCHED THEN UPDATE SET a = src.b; + +-- setting shard key to another var that's implied to be equal to shard key, repeat with dist_different_order_1 -- + +MERGE INTO dist_1 +USING dist_different_order_1 src +ON (dist_1.a = src.a AND dist_1.a = src.b) +WHEN MATCHED THEN UPDATE SET a = src.b; + +MERGE INTO dist_1 +USING dist_1 src +ON (dist_1.a = src.a) +WHEN MATCHED THEN UPDATE SET a = src.a; + +---- merge: errors in router planner ---- + +-- different column of the same relation, which is not implied to be equal to shard key -- + +MERGE INTO dist_1 +USING dist_1 src +ON (dist_1.a = src.a) +WHEN MATCHED THEN UPDATE SET a = dist_1.b; + +-- another range table entry's column with the same attno, which is not implied to be equal to shard key -- + +MERGE INTO dist_1 +USING dist_1 src +ON (dist_1.a = src.b) +WHEN MATCHED THEN UPDATE SET a = src.a; + +-- as in (*1), this is not supported +MERGE INTO dist_1 +USING dist_1 src +ON (dist_1.a = src.b AND src.b = src.a) +WHEN MATCHED THEN UPDATE SET a = src.a; + +MERGE INTO dist_1 +USING dist_2 src +ON (true) +WHEN MATCHED THEN UPDATE SET a = src.a; + +MERGE INTO dist_1 +USING dist_2 src +ON (dist_1.a <= src.a) +WHEN MATCHED THEN UPDATE SET a = src.a; + +---- merge: a more sophisticated example ---- +DROP TABLE dist_source, dist_target, local_source, local_target; +CREATE TABLE dist_source (tstamp_col timestamp, int_col int, text_arr_col text[], text_col text, json_col jsonb); +CREATE TABLE dist_target (text_col text, tstamp_col timestamp, json_col jsonb, text_arr_col text[], int_col int); + +CREATE TABLE local_source (tstamp_col timestamp, int_col int, text_arr_col text[], text_col text, json_col jsonb); +CREATE TABLE local_target (text_col text, tstamp_col timestamp, json_col jsonb, text_arr_col text[], int_col int); + +SELECT create_distributed_table('dist_source', 'tstamp_col'); +SELECT create_distributed_table('dist_target', 'int_col'); + +INSERT INTO dist_source (tstamp_col, int_col, text_arr_col, text_col, json_col) +SELECT TIMESTAMP '2025-01-01 00:00:00' + (i || ' days')::interval, + i, + ARRAY[i::text, (i+1)::text, (i+2)::text], + 'source_' || i, + ('{"a": ' || i || ', "b": ' || i+1 || '}')::jsonb +FROM generate_series(1001, 2000) i; + +INSERT INTO dist_source (tstamp_col, int_col, text_arr_col, text_col, json_col) +SELECT TIMESTAMP '2025-01-01 00:00:00' + (i || ' days')::interval, + i, + ARRAY[i::text, (i+1)::text, (i+2)::text], + 'source_' || i, + ('{"a": ' || i || ', "b": ' || i+1 || '}')::jsonb +FROM generate_series(901, 1000) i; + +INSERT INTO dist_target (tstamp_col, int_col, text_arr_col, text_col, json_col) +SELECT TIMESTAMP '2025-01-01 00:00:00' + (i || ' days')::interval, + i, + ARRAY[(i-1)::text, (i)::text, (i+1)::text], + 'source_' || i, + ('{"a": ' || i*5 || ', "b": ' || i+20 || '}')::jsonb +FROM generate_series(1501, 2000) i; + +INSERT INTO dist_target (tstamp_col, int_col, text_arr_col, text_col, json_col) +SELECT TIMESTAMP '2025-01-01 00:00:00' + (i || ' days')::interval, + i, + ARRAY[(i-1)::text, (i)::text, (i+1)::text], + 'source_' || i-1, + ('{"a": ' || i*5 || ', "b": ' || i+20 || '}')::jsonb +FROM generate_series(1401, 1500) i; + +INSERT INTO local_source SELECT * FROM dist_source; +INSERT INTO local_target SELECT * FROM dist_target; + +-- execute the query on distributed tables +MERGE INTO dist_target target_alias +USING dist_source source_alias +ON (target_alias.text_col = source_alias.text_col AND target_alias.int_col = source_alias.int_col) +WHEN MATCHED THEN UPDATE SET + int_col = source_alias.int_col, + tstamp_col = source_alias.tstamp_col + interval '3 day', + text_arr_col = array_append(source_alias.text_arr_col, 'updated_' || source_alias.text_col), + json_col = ('{"a": "' || replace(source_alias.text_col, '"', '\"') || '"}')::jsonb, + text_col = source_alias.json_col->>'a' +WHEN NOT MATCHED THEN + INSERT VALUES (source_alias.text_col, source_alias.tstamp_col, source_alias.json_col, source_alias.text_arr_col, source_alias.int_col ); + +-- execute the same query on local tables, everything is the same except table names behind the aliases +MERGE INTO local_target target_alias +USING local_source source_alias +ON (target_alias.text_col = source_alias.text_col AND target_alias.int_col = source_alias.int_col) +WHEN MATCHED THEN UPDATE SET + int_col = source_alias.int_col, + tstamp_col = source_alias.tstamp_col + interval '3 day', + text_arr_col = array_append(source_alias.text_arr_col, 'updated_' || source_alias.text_col), + json_col = ('{"a": "' || replace(source_alias.text_col, '"', '\"') || '"}')::jsonb, + text_col = source_alias.json_col->>'a' +WHEN NOT MATCHED THEN + INSERT VALUES (source_alias.text_col, source_alias.tstamp_col, source_alias.json_col, source_alias.text_arr_col, source_alias.int_col ); + +-- compare both targets + +SELECT COUNT(*) = 0 AS targets_match +FROM ( + SELECT * FROM dist_target + EXCEPT + SELECT * FROM local_target + UNION ALL + SELECT * FROM local_target + EXCEPT + SELECT * FROM dist_target +) q; + -- UPDATEs with a FROM clause are supported even with local tables UPDATE limit_orders SET limit_price = 0.00 FROM bidders WHERE limit_orders.id = 246 AND @@ -914,20 +1294,5 @@ DELETE FROM summary_table WHERE id < ( CREATE TABLE multi_modifications.local (a int default 1, b int); INSERT INTO multi_modifications.local VALUES (default, (SELECT min(id) FROM summary_table)); -DROP TABLE insufficient_shards; -DROP TABLE raw_table; -DROP TABLE summary_table; -DROP TABLE reference_raw_table; -DROP TABLE reference_summary_table; -DROP TABLE limit_orders; -DROP TABLE multiple_hash; -DROP TABLE range_partitioned; -DROP TABLE append_partitioned; -DROP TABLE bidders; - -DROP FUNCTION stable_append; -DROP FUNCTION immutable_append; -DROP FUNCTION temp_strict_func; -DROP TYPE order_side; - +SET client_min_messages TO WARNING; DROP SCHEMA multi_modifications CASCADE; From bb840e58a79f902f7c3b45a28815d6613da325f4 Mon Sep 17 00:00:00 2001 From: Naisila Puka <37271756+naisila@users.noreply.github.com> Date: Wed, 1 Oct 2025 00:09:11 +0300 Subject: [PATCH 14/14] Fix crash on create statistics with non-RangeVar type (#8213) This crash has been there for a while but wasn't tested before pg18. PG18 added this test: CREATE STATISTICS tst ON a FROM (VALUES (x)) AS foo; which tries to create statistics on a derived-on-the-fly table (which is not allowed) However Citus assumes we always have a valid table when intercepting CREATE STATISTICS command to check for Citus tables Added a check to return early if needed. pg18 commit: https://github.com/postgres/postgres/commit/3eea4dc2c Fixes #8212 --- src/backend/distributed/commands/statistics.c | 10 +++++++++- src/test/regress/expected/pg18.out | 8 +++++++- src/test/regress/expected/pg18_0.out | 5 +++++ src/test/regress/sql/pg18.sql | 7 ++++++- 4 files changed, 27 insertions(+), 3 deletions(-) diff --git a/src/backend/distributed/commands/statistics.c b/src/backend/distributed/commands/statistics.c index b43f6335e..7a77b6b3d 100644 --- a/src/backend/distributed/commands/statistics.c +++ b/src/backend/distributed/commands/statistics.c @@ -69,7 +69,15 @@ PreprocessCreateStatisticsStmt(Node *node, const char *queryString, { CreateStatsStmt *stmt = castNode(CreateStatsStmt, node); - RangeVar *relation = (RangeVar *) linitial(stmt->relations); + Node *relationNode = (Node *) linitial(stmt->relations); + + if (!IsA(relationNode, RangeVar)) + { + return NIL; + } + + RangeVar *relation = (RangeVar *) relationNode; + Oid relationId = RangeVarGetRelid(relation, ShareUpdateExclusiveLock, false); if (!IsCitusTable(relationId) || !ShouldPropagate()) diff --git a/src/test/regress/expected/pg18.out b/src/test/regress/expected/pg18.out index f5d35a47e..fd42f4070 100644 --- a/src/test/regress/expected/pg18.out +++ b/src/test/regress/expected/pg18.out @@ -4,11 +4,17 @@ SHOW server_version \gset SELECT substring(:'server_version', '\d+')::int >= 18 AS server_version_ge_18 \gset +-- test invalid statistics +-- behavior is same among PG versions, error message differs +-- relevant PG18 commit: 3eea4dc2c7 +CREATE STATISTICS tst ON a FROM (VALUES (x)) AS foo; +ERROR: cannot create statistics on the specified relation +DETAIL: CREATE STATISTICS only supports tables, foreign tables and materialized views. \if :server_version_ge_18 \else \q \endif --- PG17-specific tests go here. +-- PG18-specific tests go here. -- -- Purpose: Verify PG18 behavior that NOT NULL constraints are materialized -- as pg_constraint rows with contype = 'n' on both coordinator and diff --git a/src/test/regress/expected/pg18_0.out b/src/test/regress/expected/pg18_0.out index b682ea190..8d8c55727 100644 --- a/src/test/regress/expected/pg18_0.out +++ b/src/test/regress/expected/pg18_0.out @@ -4,6 +4,11 @@ SHOW server_version \gset SELECT substring(:'server_version', '\d+')::int >= 18 AS server_version_ge_18 \gset +-- test invalid statistics +-- behavior is same among PG versions, error message differs +-- relevant PG18 commit: 3eea4dc2c7 +CREATE STATISTICS tst ON a FROM (VALUES (x)) AS foo; +ERROR: only a single relation is allowed in CREATE STATISTICS \if :server_version_ge_18 \else \q diff --git a/src/test/regress/sql/pg18.sql b/src/test/regress/sql/pg18.sql index e18e7455b..94c0ad997 100644 --- a/src/test/regress/sql/pg18.sql +++ b/src/test/regress/sql/pg18.sql @@ -5,12 +5,17 @@ SHOW server_version \gset SELECT substring(:'server_version', '\d+')::int >= 18 AS server_version_ge_18 \gset +-- test invalid statistics +-- behavior is same among PG versions, error message differs +-- relevant PG18 commit: 3eea4dc2c7 +CREATE STATISTICS tst ON a FROM (VALUES (x)) AS foo; + \if :server_version_ge_18 \else \q \endif --- PG17-specific tests go here. +-- PG18-specific tests go here. -- -- Purpose: Verify PG18 behavior that NOT NULL constraints are materialized