From 458299035bd2e492cebad12136c2fa868748e416 Mon Sep 17 00:00:00 2001 From: Colm Date: Thu, 30 Oct 2025 11:49:28 +0000 Subject: [PATCH 01/10] PG18: fix regress test failures in subquery_in_targetlist. The failing queries all have a GROUP BY, and the fix teaches the Citus recursive planner how to handle a PG18 GROUP range table in the outer query: - In recursive query planning, don't recurse into subquery expressions in a GROUP BY clause - Flatten references to a GROUP rte before creating the worker subquery in pushdown planning - If a PARAM node points to a GROUP rte then tunnel through to the underlying expression Fixes #8296. --- .../planner/insert_select_planner.c | 23 ++++--------- .../planner/multi_logical_planner.c | 13 ++++++- .../planner/query_pushdown_planning.c | 33 +++++++++++++++++- .../distributed/planner/recursive_planning.c | 13 +++---- .../relation_restriction_equivalence.c | 34 +++++++++++++++++++ .../distributed/query_pushdown_planning.h | 2 +- src/test/regress/citus_tests/run_test.py | 3 ++ 7 files changed, 92 insertions(+), 29 deletions(-) diff --git a/src/backend/distributed/planner/insert_select_planner.c b/src/backend/distributed/planner/insert_select_planner.c index 1cd4161b6..554ac631e 100644 --- a/src/backend/distributed/planner/insert_select_planner.c +++ b/src/backend/distributed/planner/insert_select_planner.c @@ -512,23 +512,12 @@ PrepareInsertSelectForCitusPlanner(Query *insertSelectQuery) bool isWrapped = false; -#if PG_VERSION_NUM >= PG_VERSION_18 - -/* - * PG18 is stricter about GroupRTE/GroupVar. For INSERT … SELECT with a GROUP BY, - * flatten the SELECT’s targetList and havingQual so Vars point to base RTEs and - * avoid Unrecognized range table id. - */ - if (selectRte->subquery->hasGroupRTE) - { - Query *selectQuery = selectRte->subquery; - selectQuery->targetList = (List *) - flatten_group_exprs(NULL, selectQuery, - (Node *) selectQuery->targetList); - selectQuery->havingQual = - flatten_group_exprs(NULL, selectQuery, selectQuery->havingQual); - } -#endif + /* + * PG18 is stricter about GroupRTE/GroupVar. For INSERT … SELECT with a GROUP BY, + * flatten the SELECT’s targetList and havingQual so Vars point to base RTEs and + * avoid Unrecognized range table id. + */ + FlattenGroupExprs(selectRte->subquery); if (selectRte->subquery->setOperations != NULL) { diff --git a/src/backend/distributed/planner/multi_logical_planner.c b/src/backend/distributed/planner/multi_logical_planner.c index e4141644f..8b50efc1e 100644 --- a/src/backend/distributed/planner/multi_logical_planner.c +++ b/src/backend/distributed/planner/multi_logical_planner.c @@ -297,8 +297,19 @@ TargetListOnPartitionColumn(Query *query, List *targetEntryList) bool FindNodeMatchingCheckFunctionInRangeTableList(List *rtable, CheckNodeFunc checker) { + int rtWalkFlags = QTW_EXAMINE_RTES_BEFORE; + +#if PG_VERSION_NUM >= PG_VERSION_18 + + /* + * PG18+: Do not descend into GROUP BY expressions subqueries, they + * have already been visited as recursive planning is depth-first. + */ + rtWalkFlags |= QTW_IGNORE_GROUPEXPRS; +#endif + return range_table_walker(rtable, FindNodeMatchingCheckFunction, checker, - QTW_EXAMINE_RTES_BEFORE); + rtWalkFlags); } diff --git a/src/backend/distributed/planner/query_pushdown_planning.c b/src/backend/distributed/planner/query_pushdown_planning.c index f7232e2bb..b94412f2b 100644 --- a/src/backend/distributed/planner/query_pushdown_planning.c +++ b/src/backend/distributed/planner/query_pushdown_planning.c @@ -333,7 +333,9 @@ WhereOrHavingClauseContainsSubquery(Query *query) bool TargetListContainsSubquery(List *targetList) { - return FindNodeMatchingCheckFunction((Node *) targetList, IsNodeSubquery); + bool hasSubquery = FindNodeMatchingCheckFunction((Node *) targetList, IsNodeSubquery); + + return hasSubquery; } @@ -1093,6 +1095,28 @@ DeferErrorIfCannotPushdownSubquery(Query *subqueryTree, bool outerMostQueryHasLi } +/* + * FlattenGroupExprs flattens the GROUP BY expressions in the query tree + * by replacing VAR nodes referencing the GROUP range table with the actual + * GROUP BY expression. This is used by Citus planning to ensure correctness + * when analysing and building the distributed plan. + */ +void +FlattenGroupExprs(Query *queryTree) +{ +#if PG_VERSION_NUM >= PG_VERSION_18 + if (queryTree->hasGroupRTE) + { + queryTree->targetList = (List *) + flatten_group_exprs(NULL, queryTree, + (Node *) queryTree->targetList); + queryTree->havingQual = + flatten_group_exprs(NULL, queryTree, queryTree->havingQual); + } +#endif +} + + /* * DeferErrorIfSubqueryRequiresMerge returns a deferred error if the subquery * requires a merge step on the coordinator (e.g. limit, group by non-distribution @@ -1953,6 +1977,13 @@ static MultiNode * SubqueryPushdownMultiNodeTree(Query *originalQuery) { Query *queryTree = copyObject(originalQuery); + + /* + * PG18+ need to flatten GROUP BY expressions to ensure correct processing + * later on, such as identification of partition columns in GROUP BY. + */ + FlattenGroupExprs(queryTree); + List *targetEntryList = queryTree->targetList; MultiCollect *subqueryCollectNode = CitusMakeNode(MultiCollect); diff --git a/src/backend/distributed/planner/recursive_planning.c b/src/backend/distributed/planner/recursive_planning.c index 07aa442b0..139b30231 100644 --- a/src/backend/distributed/planner/recursive_planning.c +++ b/src/backend/distributed/planner/recursive_planning.c @@ -261,7 +261,6 @@ GenerateSubplansForSubqueriesAndCTEs(uint64 planId, Query *originalQuery, */ context.allDistributionKeysInQueryAreEqual = AllDistributionKeysInQueryAreEqual(originalQuery, plannerRestrictionContext); - DeferredErrorMessage *error = RecursivelyPlanSubqueriesAndCTEs(originalQuery, &context); if (error != NULL) @@ -1123,14 +1122,10 @@ ExtractSublinkWalker(Node *node, List **sublinkList) static bool ShouldRecursivelyPlanSublinks(Query *query) { - if (FindNodeMatchingCheckFunctionInRangeTableList(query->rtable, - IsDistributedTableRTE)) - { - /* there is a distributed table in the FROM clause */ - return false; - } - - return true; + bool hasDistributedTable = (FindNodeMatchingCheckFunctionInRangeTableList( + query->rtable, + IsDistributedTableRTE)); + return !hasDistributedTable; } diff --git a/src/backend/distributed/planner/relation_restriction_equivalence.c b/src/backend/distributed/planner/relation_restriction_equivalence.c index c657c7c03..5a63503f0 100644 --- a/src/backend/distributed/planner/relation_restriction_equivalence.c +++ b/src/backend/distributed/planner/relation_restriction_equivalence.c @@ -971,6 +971,40 @@ GetVarFromAssignedParam(List *outerPlanParamsList, Param *plannerParam, } } +#if PG_VERSION_NUM >= PG_VERSION_18 + + /* + * In PG18+, the dereferenced PARAM node could be a GroupVar if the + * query has a GROUP BY. In that case, we need to make an extra + * hop to get the underlying Var from the grouping expressions. + */ + if (assignedVar != NULL) + { + Query *parse = (*rootContainingVar)->parse; + if (parse->hasGroupRTE) + { + RangeTblEntry *rte = rt_fetch(assignedVar->varno, parse->rtable); + if (rte->rtekind == RTE_GROUP) + { + Assert(assignedVar->varattno >= 1 && + assignedVar->varattno <= list_length(rte->groupexprs)); + Node *groupVar = list_nth(rte->groupexprs, assignedVar->varattno - 1); + if (IsA(groupVar, Var)) + { + assignedVar = (Var *) groupVar; + } + else + { + /* todo: handle PlaceHolderVar case if needed */ + ereport(DEBUG2, (errmsg( + "GroupVar maps to non-Var group expr; bailing out"))); + assignedVar = NULL; + } + } + } + } +#endif + return assignedVar; } diff --git a/src/include/distributed/query_pushdown_planning.h b/src/include/distributed/query_pushdown_planning.h index ba92a0462..287c8da62 100644 --- a/src/include/distributed/query_pushdown_planning.h +++ b/src/include/distributed/query_pushdown_planning.h @@ -49,6 +49,6 @@ extern DeferredErrorMessage * DeferErrorIfCannotPushdownSubquery(Query *subquery extern DeferredErrorMessage * DeferErrorIfUnsupportedUnionQuery(Query *queryTree); extern bool IsJsonTableRTE(RangeTblEntry *rte); extern bool IsOuterJoinExpr(Node *node); - +extern void FlattenGroupExprs(Query *query); #endif /* QUERY_PUSHDOWN_PLANNING_H */ diff --git a/src/test/regress/citus_tests/run_test.py b/src/test/regress/citus_tests/run_test.py index e5a567af8..03384ac28 100755 --- a/src/test/regress/citus_tests/run_test.py +++ b/src/test/regress/citus_tests/run_test.py @@ -265,6 +265,9 @@ DEPS = { "subquery_in_where": TestDeps( "minimal_schedule", ["multi_behavioral_analytics_create_table"] ), + "subquery_in_targetlist": TestDeps( + "minimal_schedule", ["multi_behavioral_analytics_create_table"] + ), } From 86010de73398646b4ae879261792199bea094f43 Mon Sep 17 00:00:00 2001 From: Vinod Sridharan <14185211+visridha@users.noreply.github.com> Date: Fri, 31 Oct 2025 00:21:58 -0700 Subject: [PATCH 02/10] Update GUC setting to not crash with ASAN (#8301) The GUC configuration for SkipAdvisoryLockPermissionChecks had misconfigured the settings for GUC_SUPERUSER_ONLY for PGC_SUSET - when PostgreSQL running with ASAN, this fails when querying pg_settings due to exceeding the size of the array GucContext_Names. Fix up this GUC declaration to not crash with ASAN. --- src/backend/distributed/shared_library_init.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index c3bd09f17..ac8b0a7d6 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -2510,8 +2510,8 @@ RegisterCitusConfigVariables(void) NULL, &SkipAdvisoryLockPermissionChecks, false, - GUC_SUPERUSER_ONLY, - GUC_NO_SHOW_ALL | GUC_NOT_IN_SAMPLE, + PGC_SUSET, + GUC_SUPERUSER_ONLY | GUC_NO_SHOW_ALL | GUC_NOT_IN_SAMPLE, NULL, NULL, NULL); DefineCustomBoolVariable( From 503a2aba7305d2cd63b9af3707e321f251258682 Mon Sep 17 00:00:00 2001 From: Ivan Kush Date: Fri, 31 Oct 2025 21:20:12 +0300 Subject: [PATCH 03/10] Move PushActiveSnapshot outside a for loop (#8253) In the PR [8142](https://github.com/citusdata/citus/pull/8142) was added `PushActiveSnapshot`. This commit places it outside a for loop as taking a snapshot is a resource heavy operation. --- src/backend/columnar/columnar_metadata.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/backend/columnar/columnar_metadata.c b/src/backend/columnar/columnar_metadata.c index 0b4f2400c..159c126f7 100644 --- a/src/backend/columnar/columnar_metadata.c +++ b/src/backend/columnar/columnar_metadata.c @@ -618,7 +618,13 @@ SaveStripeSkipList(RelFileLocator relfilelocator, uint64 stripe, Oid columnarChunkOid = ColumnarChunkRelationId(); Relation columnarChunk = table_open(columnarChunkOid, RowExclusiveLock); ModifyState *modifyState = StartModifyRelation(columnarChunk); + bool pushed_snapshot = false; + if (!ActiveSnapshotSet()) + { + PushActiveSnapshot(GetTransactionSnapshot()); + pushed_snapshot = true; + } for (columnIndex = 0; columnIndex < columnCount; columnIndex++) { for (chunkIndex = 0; chunkIndex < chunkList->chunkCount; chunkIndex++) @@ -659,11 +665,13 @@ SaveStripeSkipList(RelFileLocator relfilelocator, uint64 stripe, nulls[Anum_columnar_chunk_minimum_value - 1] = true; nulls[Anum_columnar_chunk_maximum_value - 1] = true; } - PushActiveSnapshot(GetTransactionSnapshot()); InsertTupleAndEnforceConstraints(modifyState, values, nulls); - PopActiveSnapshot(); } } + if (pushed_snapshot) + { + PopActiveSnapshot(); + } FinishModifyRelation(modifyState); table_close(columnarChunk, RowExclusiveLock); From e0570baad66ca0bd0ea6bc266e7a1653f990c198 Mon Sep 17 00:00:00 2001 From: Naisila Puka <37271756+naisila@users.noreply.github.com> Date: Sat, 1 Nov 2025 22:36:16 +0300 Subject: [PATCH 04/10] PG18 - Remove REINDEX SYSTEM test because of aio debug lines (#8305) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes #8268 PG18 added core async I/O infrastructure. Relevant PG commit: https://github.com/postgres/postgres/commit/da72269 In `pg16.sql test`, we tested `REINDEX SYSTEM` command, which would later cause io debug lines in a command of `multi_insert_select.sql` test, that runs _after_ `pg16.sql` test. "This is expected because the I/O subsystem is repopulating its internal callback pool after heavy catalog I/O churn caused by REINDEX SYSTEM." - chatgpt (seems right 😄) This PR removes `REINDEX SYSTEM` test as its not crucial anyway. `REINDEX DATABASE` is sufficient for the purpose presented in pg16.sql test. --- src/test/regress/expected/pg16.out | 10 +--------- src/test/regress/sql/pg16.sql | 5 +---- 2 files changed, 2 insertions(+), 13 deletions(-) diff --git a/src/test/regress/expected/pg16.out b/src/test/regress/expected/pg16.out index 80c2bb3df..a172c54c0 100644 --- a/src/test/regress/expected/pg16.out +++ b/src/test/regress/expected/pg16.out @@ -878,6 +878,7 @@ SET search_path TO pg16; -- We already don't propagate these commands automatically -- Testing here with run_command_on_workers -- Relevant PG commit: https://github.com/postgres/postgres/commit/2cbc3c1 +-- Testing only DATABASE here as SYSTEM produces aio output in PG18 REINDEX DATABASE; SELECT result FROM run_command_on_workers ($$REINDEX DATABASE$$); @@ -887,15 +888,6 @@ SELECT result FROM run_command_on_workers REINDEX (2 rows) -REINDEX SYSTEM; -SELECT result FROM run_command_on_workers -($$REINDEX SYSTEM$$); - result ---------------------------------------------------------------------- - REINDEX - REINDEX -(2 rows) - -- -- random_normal() to provide normally-distributed random numbers -- adding here the same tests as the ones with random() in aggregate_support.sql diff --git a/src/test/regress/sql/pg16.sql b/src/test/regress/sql/pg16.sql index a57c4c5b4..bada3649c 100644 --- a/src/test/regress/sql/pg16.sql +++ b/src/test/regress/sql/pg16.sql @@ -521,15 +521,12 @@ SET search_path TO pg16; -- We already don't propagate these commands automatically -- Testing here with run_command_on_workers -- Relevant PG commit: https://github.com/postgres/postgres/commit/2cbc3c1 +-- Testing only DATABASE here as SYSTEM produces aio output in PG18 REINDEX DATABASE; SELECT result FROM run_command_on_workers ($$REINDEX DATABASE$$); -REINDEX SYSTEM; -SELECT result FROM run_command_on_workers -($$REINDEX SYSTEM$$); - -- -- random_normal() to provide normally-distributed random numbers -- adding here the same tests as the ones with random() in aggregate_support.sql From 6251eab9b7a9bdde60b6a63c9ce949c817e0827f Mon Sep 17 00:00:00 2001 From: Mehmet YILMAZ Date: Mon, 3 Nov 2025 14:51:39 +0300 Subject: [PATCH 05/10] PG18: Make SSL tests resilient & validate TLSv1.3 cipher config (#8298) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit fixes #8277 https://git.postgresql.org/gitweb/?p=postgresql.git;a=commitdiff;h=45188c2ea PostgreSQL 18 + newer OpenSSL builds surface `ssl_ciphers` as a **rule string** (e.g., `HIGH:MEDIUM:+3DES:!aNULL`) instead of an expanded cipher list. Our tests hard-pinned the literal list and started failing on PG18. Also, with TLS 1.3 in the picture, we need to assert that cipher configuration is sane without coupling to OpenSSL’s expansion. **What changed** * **sql/ssl_by_default.sql** * Replace brittle `SHOW ssl_ciphers` string matching with invariant checks: * non-empty ciphers: `current_setting('ssl_ciphers') <> ''` * looks like a rule/list: `position(':' in current_setting('ssl_ciphers')) > 0` * Run the same checks on **workers** via `run_command_on_workers`. * Keep existing validations for `ssl=on`, `sslmode=require` in `citus.node_conninfo`, and `pg_stat_ssl.ssl = true`. * **expected/ssl_by_default.out** * Update expected output to booleans for the new checks (less diff-prone across PG/SSL variants). --- src/test/regress/expected/ssl_by_default.out | 52 ++++++++++++++------ src/test/regress/sql/ssl_by_default.sql | 30 ++++++++--- 2 files changed, 60 insertions(+), 22 deletions(-) diff --git a/src/test/regress/expected/ssl_by_default.out b/src/test/regress/expected/ssl_by_default.out index 9a0357143..4738c2e5e 100644 --- a/src/test/regress/expected/ssl_by_default.out +++ b/src/test/regress/expected/ssl_by_default.out @@ -1,16 +1,21 @@ -- Citus uses ssl by default now. It does so by turning on ssl and if needed will generate -- self-signed certificates. --- To test this we will verify that SSL is set to ON for all machines, and we will make --- sure connections to workers use SSL by having it required in citus.conn_nodeinfo and --- lastly we will inspect the ssl state for connections to the workers --- ssl can only be enabled by default on installations that are OpenSSL-enabled. +-- +-- This test verifies: +-- 1) ssl=on on coordinator and workers +-- 2) coordinator->workers connections use SSL (pg_stat_ssl true) +-- 3) ssl_ciphers is non-empty and has a colon-separated rule/list on both coordinator and workers +-- (PG18/OpenSSL may report a rule string like HIGH:MEDIUM:+3DES:!aNULL instead of an expanded list) +-- 0) Is this an OpenSSL-enabled build? (if not, ssl_ciphers is 'none') +-- Keep the “hasssl” signal but don’t rely on the literal cipher list value. SHOW ssl_ciphers \gset -SELECT :'ssl_ciphers' != 'none' AS hasssl; +SELECT :'ssl_ciphers' <> 'none' AS hasssl; hasssl --------------------------------------------------------------------- t (1 row) +-- 1) ssl must be on (coordinator + workers) SHOW ssl; ssl --------------------------------------------------------------------- @@ -26,6 +31,7 @@ $$); (localhost,57638,t,on) (2 rows) +-- 2) connections to workers carry sslmode=require SHOW citus.node_conninfo; citus.node_conninfo --------------------------------------------------------------------- @@ -41,6 +47,7 @@ $$); (localhost,57638,t,sslmode=require) (2 rows) +-- 3) pg_stat_ssl says SSL is active on each worker connection SELECT run_command_on_workers($$ SELECT ssl FROM pg_stat_ssl WHERE pid = pg_backend_pid(); $$); @@ -50,18 +57,35 @@ $$); (localhost,57638,t,t) (2 rows) -SHOW ssl_ciphers; - ssl_ciphers +-- 4) ssl_ciphers checks (coordinator): non-empty and contains at least one ':' +SELECT current_setting('ssl_ciphers') <> '' AS has_ssl_ciphers; + has_ssl_ciphers --------------------------------------------------------------------- - ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384 + t (1 row) -SELECT run_command_on_workers($$ - SHOW ssl_ciphers; -$$); - run_command_on_workers +SELECT position(':' in current_setting('ssl_ciphers')) > 0 AS has_colon; + has_colon --------------------------------------------------------------------- - (localhost,57637,t,ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384) - (localhost,57638,t,ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384) + t +(1 row) + +-- 5) ssl_ciphers checks (workers) +SELECT run_command_on_workers($$ + SELECT current_setting('ssl_ciphers') <> '' AS has_ssl_ciphers +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,t) + (localhost,57638,t,t) +(2 rows) + +SELECT run_command_on_workers($$ + SELECT position(':' in current_setting('ssl_ciphers')) > 0 AS has_at_least_two_ciphers +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,t) + (localhost,57638,t,t) (2 rows) diff --git a/src/test/regress/sql/ssl_by_default.sql b/src/test/regress/sql/ssl_by_default.sql index 564b5f778..a7f2c8657 100644 --- a/src/test/regress/sql/ssl_by_default.sql +++ b/src/test/regress/sql/ssl_by_default.sql @@ -1,29 +1,43 @@ -- Citus uses ssl by default now. It does so by turning on ssl and if needed will generate -- self-signed certificates. +-- +-- This test verifies: +-- 1) ssl=on on coordinator and workers +-- 2) coordinator->workers connections use SSL (pg_stat_ssl true) +-- 3) ssl_ciphers is non-empty and has a colon-separated rule/list on both coordinator and workers +-- (PG18/OpenSSL may report a rule string like HIGH:MEDIUM:+3DES:!aNULL instead of an expanded list) --- To test this we will verify that SSL is set to ON for all machines, and we will make --- sure connections to workers use SSL by having it required in citus.conn_nodeinfo and --- lastly we will inspect the ssl state for connections to the workers - --- ssl can only be enabled by default on installations that are OpenSSL-enabled. +-- 0) Is this an OpenSSL-enabled build? (if not, ssl_ciphers is 'none') +-- Keep the “hasssl” signal but don’t rely on the literal cipher list value. SHOW ssl_ciphers \gset -SELECT :'ssl_ciphers' != 'none' AS hasssl; +SELECT :'ssl_ciphers' <> 'none' AS hasssl; +-- 1) ssl must be on (coordinator + workers) SHOW ssl; SELECT run_command_on_workers($$ SHOW ssl; $$); +-- 2) connections to workers carry sslmode=require SHOW citus.node_conninfo; SELECT run_command_on_workers($$ SHOW citus.node_conninfo; $$); +-- 3) pg_stat_ssl says SSL is active on each worker connection SELECT run_command_on_workers($$ SELECT ssl FROM pg_stat_ssl WHERE pid = pg_backend_pid(); $$); -SHOW ssl_ciphers; +-- 4) ssl_ciphers checks (coordinator): non-empty and contains at least one ':' +SELECT current_setting('ssl_ciphers') <> '' AS has_ssl_ciphers; +SELECT position(':' in current_setting('ssl_ciphers')) > 0 AS has_colon; + +-- 5) ssl_ciphers checks (workers) SELECT run_command_on_workers($$ - SHOW ssl_ciphers; + SELECT current_setting('ssl_ciphers') <> '' AS has_ssl_ciphers +$$); + +SELECT run_command_on_workers($$ + SELECT position(':' in current_setting('ssl_ciphers')) > 0 AS has_at_least_two_ciphers $$); From 61b491f0f43e29041ef750ea3d6e02d8268b73c9 Mon Sep 17 00:00:00 2001 From: Mehmet YILMAZ Date: Mon, 3 Nov 2025 15:34:28 +0300 Subject: [PATCH 06/10] PG18: Add _plan_json() test helper and switch columnar index tests to JSON plan checks (#8299) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit fixes #8263 * Introduces `columnar_test_helpers._plan_json(q text) -> jsonb`, which runs `EXPLAIN (FORMAT JSON, COSTS OFF, ANALYZE OFF)` and returns the plan as `jsonb`. This lets us assert on plan structure instead of text matching. * Updates `columnar_indexes.sql` tests to detect whether any index-based scan is used by searching the JSON plan’s `"Node Type"` (e.g., `Index Scan`, `Index Only Scan`, `Bitmap Index Scan`). ## Notable changes * New helper in `src/test/regress/sql/columnar_test_helpers.sql`: * `EXECUTE format('EXPLAIN (FORMAT JSON, COSTS OFF, ANALYZE OFF) %s', q) INTO j;` * Returns the `jsonb` plan for downstream assertions. ```sql SELECT NOT jsonb_path_exists( columnar_test_helpers._plan_json('SELECT b FROM columnar_table WHERE b = 30000'), '$[*].Plan.** ? (@."Node Type" like_regex "^(Index|Bitmap Index).*Scan$")' ) AS uses_no_index_scan; ``` to verify no index scan occurs at the partial index boundary (`b = 30000`) and that an index scan is used where expected (`b = 30001`). --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- src/test/regress/expected/columnar_indexes.out | 13 ++++++++----- src/test/regress/expected/columnar_test_helpers.out | 8 ++++++++ src/test/regress/sql/columnar_indexes.sql | 6 +++++- src/test/regress/sql/columnar_test_helpers.sql | 9 +++++++++ 4 files changed, 30 insertions(+), 6 deletions(-) diff --git a/src/test/regress/expected/columnar_indexes.out b/src/test/regress/expected/columnar_indexes.out index d5e4b1cbb..341dc6b63 100644 --- a/src/test/regress/expected/columnar_indexes.out +++ b/src/test/regress/expected/columnar_indexes.out @@ -176,12 +176,15 @@ SELECT pg_total_relation_size('columnar_table_b_idx') * 5 < (1 row) -- can't use index scan due to partial index boundaries -EXPLAIN (COSTS OFF) SELECT b FROM columnar_table WHERE b = 30000; - QUERY PLAN +SELECT NOT jsonb_path_exists( + columnar_test_helpers._plan_json('SELECT b FROM columnar_table WHERE b = 30000'), + -- Regex matches any index-based scan: "Index Scan", "Index Only Scan", "Bitmap Index Scan". + '$[*].Plan.** ? (@."Node Type" like_regex "^(Index|Bitmap Index).*Scan$")' + ) AS uses_no_index_scan; -- expect: t + uses_no_index_scan --------------------------------------------------------------------- - Seq Scan on columnar_table - Filter: (b = 30000) -(2 rows) + t +(1 row) -- can use index scan EXPLAIN (COSTS OFF) SELECT b FROM columnar_table WHERE b = 30001; diff --git a/src/test/regress/expected/columnar_test_helpers.out b/src/test/regress/expected/columnar_test_helpers.out index f4f179e55..a0061688d 100644 --- a/src/test/regress/expected/columnar_test_helpers.out +++ b/src/test/regress/expected/columnar_test_helpers.out @@ -146,3 +146,11 @@ BEGIN RETURN NEXT; END LOOP; END; $$ language plpgsql; +CREATE OR REPLACE FUNCTION _plan_json(q text) +RETURNS jsonb +LANGUAGE plpgsql AS $$ +DECLARE j jsonb; +BEGIN + EXECUTE format('EXPLAIN (FORMAT JSON, COSTS OFF, ANALYZE OFF) %s', q) INTO j; + RETURN j; +END $$; diff --git a/src/test/regress/sql/columnar_indexes.sql b/src/test/regress/sql/columnar_indexes.sql index afb56e01c..6e54b8591 100644 --- a/src/test/regress/sql/columnar_indexes.sql +++ b/src/test/regress/sql/columnar_indexes.sql @@ -114,7 +114,11 @@ SELECT pg_total_relation_size('columnar_table_b_idx') * 5 < pg_total_relation_size('columnar_table_a_idx'); -- can't use index scan due to partial index boundaries -EXPLAIN (COSTS OFF) SELECT b FROM columnar_table WHERE b = 30000; +SELECT NOT jsonb_path_exists( + columnar_test_helpers._plan_json('SELECT b FROM columnar_table WHERE b = 30000'), + -- Regex matches any index-based scan: "Index Scan", "Index Only Scan", "Bitmap Index Scan". + '$[*].Plan.** ? (@."Node Type" like_regex "^(Index|Bitmap Index).*Scan$")' + ) AS uses_no_index_scan; -- expect: t -- can use index scan EXPLAIN (COSTS OFF) SELECT b FROM columnar_table WHERE b = 30001; diff --git a/src/test/regress/sql/columnar_test_helpers.sql b/src/test/regress/sql/columnar_test_helpers.sql index 9cff79bbe..09105c0e7 100644 --- a/src/test/regress/sql/columnar_test_helpers.sql +++ b/src/test/regress/sql/columnar_test_helpers.sql @@ -158,3 +158,12 @@ BEGIN RETURN NEXT; END LOOP; END; $$ language plpgsql; + +CREATE OR REPLACE FUNCTION _plan_json(q text) +RETURNS jsonb +LANGUAGE plpgsql AS $$ +DECLARE j jsonb; +BEGIN + EXECUTE format('EXPLAIN (FORMAT JSON, COSTS OFF, ANALYZE OFF) %s', q) INTO j; + RETURN j; +END $$; From be2fcda071fc79d42c49c28259e0f9ab767abef6 Mon Sep 17 00:00:00 2001 From: Mehmet YILMAZ Date: Mon, 3 Nov 2025 15:59:00 +0300 Subject: [PATCH 07/10] =?UTF-8?q?PG18=20-=20Normalize=20PG18=20EXPLAIN:=20?= =?UTF-8?q?hide=20=E2=80=9CStorage=20=E2=80=A6=20Maximum=20Storage=20?= =?UTF-8?q?=E2=80=A6=E2=80=9D=20line=20(#8292)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit fixes #8267 * Extend `src/test/regress/bin/normalize.sed` to drop the new PG18 EXPLAIN instrumentation line: ``` Storage: Maximum Storage: ``` which appears under `Materialize`, some `CTE Scan`s, etc. when `ANALYZE` is on. **Why** * PG18 added storage usage reporting for materialization/tuplestore nodes. It’s useful for humans but creates noisy, non-semantic diffs in regression output. There’s no EXPLAIN flag to suppress it, so we normalize in tests instead. This PR wires that normalization into our sed pipeline. https://git.postgresql.org/gitweb/?p=postgresql.git;a=commit;h=1eff8279d **How** * Add a narrowly scoped sed rule that matches only lines starting with `Storage:` (keeping `Sort Method`, `Hash`, `Buffers`, etc. intact). Use ERE compatible with `sed -Ef` and Python `re` (no POSIX character classes), e.g.: ``` /^[ \t]*Storage:[ \t].*$/d ``` --- src/test/regress/bin/normalize.sed | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/test/regress/bin/normalize.sed b/src/test/regress/bin/normalize.sed index 06729bd16..e3df83fa4 100644 --- a/src/test/regress/bin/normalize.sed +++ b/src/test/regress/bin/normalize.sed @@ -378,3 +378,7 @@ s/\/is still referenced from table/g # PG18: EXPLAIN ANALYZE prints "Index Searches: N" for index scans — remove it /^\s*Index Searches:\s*\d+\s*$/d + +# EXPLAIN (PG18+): hide Materialize storage instrumentation +# this rule can be removed when PG18 is the minimum supported version +/^[ \t]*Storage:[ \t].*$/d From 94653c1f4e84353978d5bd5571109f69edb83f86 Mon Sep 17 00:00:00 2001 From: Naisila Puka <37271756+naisila@users.noreply.github.com> Date: Mon, 3 Nov 2025 16:38:56 +0300 Subject: [PATCH 08/10] PG18 - Exclude child fk constraints from output (#8307) Fixes #8280 Similar to https://github.com/citusdata/citus/commit/432b69e --- .../expected/citus_local_tables_mx.out | 104 +++++++++--------- .../regress/sql/citus_local_tables_mx.sql | 26 +++-- 2 files changed, 71 insertions(+), 59 deletions(-) diff --git a/src/test/regress/expected/citus_local_tables_mx.out b/src/test/regress/expected/citus_local_tables_mx.out index 98c03fea3..29df22fb6 100644 --- a/src/test/regress/expected/citus_local_tables_mx.out +++ b/src/test/regress/expected/citus_local_tables_mx.out @@ -371,11 +371,11 @@ CREATE TABLE cas_1 (a INT UNIQUE); CREATE TABLE cas_par (a INT UNIQUE) PARTITION BY RANGE(a); CREATE TABLE cas_par_1 PARTITION OF cas_par FOR VALUES FROM (1) TO (4); CREATE TABLE cas_par_2 PARTITION OF cas_par FOR VALUES FROM (5) TO (8); -ALTER TABLE cas_par_1 ADD CONSTRAINT fkey_cas_test_1 FOREIGN KEY (a) REFERENCES cas_1(a); +ALTER TABLE cas_par_1 ADD CONSTRAINT fkey_cas_test_first FOREIGN KEY (a) REFERENCES cas_1(a); CREATE TABLE cas_par2 (a INT UNIQUE) PARTITION BY RANGE(a); CREATE TABLE cas_par2_1 PARTITION OF cas_par2 FOR VALUES FROM (1) TO (4); CREATE TABLE cas_par2_2 PARTITION OF cas_par2 FOR VALUES FROM (5) TO (8); -ALTER TABLE cas_par2_1 ADD CONSTRAINT fkey_cas_test_2 FOREIGN KEY (a) REFERENCES cas_1(a); +ALTER TABLE cas_par2_1 ADD CONSTRAINT fkey_cas_test_second FOREIGN KEY (a) REFERENCES cas_1(a); CREATE TABLE cas_par3 (a INT UNIQUE) PARTITION BY RANGE(a); CREATE TABLE cas_par3_1 PARTITION OF cas_par3 FOR VALUES FROM (1) TO (4); CREATE TABLE cas_par3_2 PARTITION OF cas_par3 FOR VALUES FROM (5) TO (8); @@ -390,11 +390,11 @@ ERROR: cannot cascade operation via foreign keys as partition table citus_local SELECT citus_add_local_table_to_metadata('cas_par2', cascade_via_foreign_keys=>true); ERROR: cannot cascade operation via foreign keys as partition table citus_local_tables_mx.cas_par2_1 involved in a foreign key relationship that is not inherited from its parent table -- drop the foreign keys and establish them again using the parent table -ALTER TABLE cas_par_1 DROP CONSTRAINT fkey_cas_test_1; -ALTER TABLE cas_par2_1 DROP CONSTRAINT fkey_cas_test_2; -ALTER TABLE cas_par ADD CONSTRAINT fkey_cas_test_1 FOREIGN KEY (a) REFERENCES cas_1(a); -ALTER TABLE cas_par2 ADD CONSTRAINT fkey_cas_test_2 FOREIGN KEY (a) REFERENCES cas_1(a); -ALTER TABLE cas_par3 ADD CONSTRAINT fkey_cas_test_3 FOREIGN KEY (a) REFERENCES cas_par(a); +ALTER TABLE cas_par_1 DROP CONSTRAINT fkey_cas_test_first; +ALTER TABLE cas_par2_1 DROP CONSTRAINT fkey_cas_test_second; +ALTER TABLE cas_par ADD CONSTRAINT fkey_cas_test_first FOREIGN KEY (a) REFERENCES cas_1(a); +ALTER TABLE cas_par2 ADD CONSTRAINT fkey_cas_test_second FOREIGN KEY (a) REFERENCES cas_1(a); +ALTER TABLE cas_par3 ADD CONSTRAINT fkey_cas_test_third FOREIGN KEY (a) REFERENCES cas_par(a); -- this should error out as cascade_via_foreign_keys is not set to true SELECT citus_add_local_table_to_metadata('cas_par2'); ERROR: relation citus_local_tables_mx.cas_par2 is involved in a foreign key relationship with another table @@ -414,27 +414,29 @@ select inhrelid::regclass from pg_inherits where inhparent='cas_par'::regclass o (2 rows) -- verify the fkeys + fkeys with shard ids are created -select conname from pg_constraint where conname like 'fkey_cas_test%' order by conname; - conname +select conname from pg_constraint +where conname like 'fkey_cas_test%' and conname not like '%_1' and conname not like '%_2' +order by conname; + conname --------------------------------------------------------------------- - fkey_cas_test_1 - fkey_cas_test_1 - fkey_cas_test_1 - fkey_cas_test_1_1330008 - fkey_cas_test_1_1330008 - fkey_cas_test_1_1330008 - fkey_cas_test_2 - fkey_cas_test_2 - fkey_cas_test_2 - fkey_cas_test_2_1330006 - fkey_cas_test_2_1330006 - fkey_cas_test_2_1330006 - fkey_cas_test_3 - fkey_cas_test_3 - fkey_cas_test_3 - fkey_cas_test_3_1330013 - fkey_cas_test_3_1330013 - fkey_cas_test_3_1330013 + fkey_cas_test_first + fkey_cas_test_first + fkey_cas_test_first + fkey_cas_test_first_1330008 + fkey_cas_test_first_1330008 + fkey_cas_test_first_1330008 + fkey_cas_test_second + fkey_cas_test_second + fkey_cas_test_second + fkey_cas_test_second_1330006 + fkey_cas_test_second_1330006 + fkey_cas_test_second_1330006 + fkey_cas_test_third + fkey_cas_test_third + fkey_cas_test_third + fkey_cas_test_third_1330013 + fkey_cas_test_third_1330013 + fkey_cas_test_third_1330013 (18 rows) -- when all partitions are converted, there should be 40 tables and indexes @@ -457,18 +459,20 @@ SELECT count(*) FROM pg_class WHERE relname LIKE 'cas\_%' AND relnamespace IN (1 row) -- verify that the shell foreign keys are created on the worker as well -select conname from pg_constraint where conname like 'fkey_cas_test%' order by conname; - conname +select conname from pg_constraint +where conname like 'fkey_cas_test%' and conname not like '%_1' and conname not like '%_2' +order by conname; + conname --------------------------------------------------------------------- - fkey_cas_test_1 - fkey_cas_test_1 - fkey_cas_test_1 - fkey_cas_test_2 - fkey_cas_test_2 - fkey_cas_test_2 - fkey_cas_test_3 - fkey_cas_test_3 - fkey_cas_test_3 + fkey_cas_test_first + fkey_cas_test_first + fkey_cas_test_first + fkey_cas_test_second + fkey_cas_test_second + fkey_cas_test_second + fkey_cas_test_third + fkey_cas_test_third + fkey_cas_test_third (9 rows) \c - - - :master_port @@ -494,18 +498,20 @@ select inhrelid::regclass from pg_inherits where inhparent='cas_par'::regclass o (2 rows) -- verify that the foreign keys with shard ids are gone, due to undistribution -select conname from pg_constraint where conname like 'fkey_cas_test%' order by conname; - conname +select conname from pg_constraint +where conname like 'fkey_cas_test%' and conname not like '%_1' and conname not like '%_2' +order by conname; + conname --------------------------------------------------------------------- - fkey_cas_test_1 - fkey_cas_test_1 - fkey_cas_test_1 - fkey_cas_test_2 - fkey_cas_test_2 - fkey_cas_test_2 - fkey_cas_test_3 - fkey_cas_test_3 - fkey_cas_test_3 + fkey_cas_test_first + fkey_cas_test_first + fkey_cas_test_first + fkey_cas_test_second + fkey_cas_test_second + fkey_cas_test_second + fkey_cas_test_third + fkey_cas_test_third + fkey_cas_test_third (9 rows) -- add a non-inherited fkey and verify it fails when trying to convert diff --git a/src/test/regress/sql/citus_local_tables_mx.sql b/src/test/regress/sql/citus_local_tables_mx.sql index 2f7c76d6e..f4d0f0ffb 100644 --- a/src/test/regress/sql/citus_local_tables_mx.sql +++ b/src/test/regress/sql/citus_local_tables_mx.sql @@ -237,11 +237,11 @@ CREATE TABLE cas_1 (a INT UNIQUE); CREATE TABLE cas_par (a INT UNIQUE) PARTITION BY RANGE(a); CREATE TABLE cas_par_1 PARTITION OF cas_par FOR VALUES FROM (1) TO (4); CREATE TABLE cas_par_2 PARTITION OF cas_par FOR VALUES FROM (5) TO (8); -ALTER TABLE cas_par_1 ADD CONSTRAINT fkey_cas_test_1 FOREIGN KEY (a) REFERENCES cas_1(a); +ALTER TABLE cas_par_1 ADD CONSTRAINT fkey_cas_test_first FOREIGN KEY (a) REFERENCES cas_1(a); CREATE TABLE cas_par2 (a INT UNIQUE) PARTITION BY RANGE(a); CREATE TABLE cas_par2_1 PARTITION OF cas_par2 FOR VALUES FROM (1) TO (4); CREATE TABLE cas_par2_2 PARTITION OF cas_par2 FOR VALUES FROM (5) TO (8); -ALTER TABLE cas_par2_1 ADD CONSTRAINT fkey_cas_test_2 FOREIGN KEY (a) REFERENCES cas_1(a); +ALTER TABLE cas_par2_1 ADD CONSTRAINT fkey_cas_test_second FOREIGN KEY (a) REFERENCES cas_1(a); CREATE TABLE cas_par3 (a INT UNIQUE) PARTITION BY RANGE(a); CREATE TABLE cas_par3_1 PARTITION OF cas_par3 FOR VALUES FROM (1) TO (4); CREATE TABLE cas_par3_2 PARTITION OF cas_par3 FOR VALUES FROM (5) TO (8); @@ -252,11 +252,11 @@ SELECT citus_add_local_table_to_metadata('cas_par2_2', cascade_via_foreign_keys= SELECT citus_add_local_table_to_metadata('cas_par2'); SELECT citus_add_local_table_to_metadata('cas_par2', cascade_via_foreign_keys=>true); -- drop the foreign keys and establish them again using the parent table -ALTER TABLE cas_par_1 DROP CONSTRAINT fkey_cas_test_1; -ALTER TABLE cas_par2_1 DROP CONSTRAINT fkey_cas_test_2; -ALTER TABLE cas_par ADD CONSTRAINT fkey_cas_test_1 FOREIGN KEY (a) REFERENCES cas_1(a); -ALTER TABLE cas_par2 ADD CONSTRAINT fkey_cas_test_2 FOREIGN KEY (a) REFERENCES cas_1(a); -ALTER TABLE cas_par3 ADD CONSTRAINT fkey_cas_test_3 FOREIGN KEY (a) REFERENCES cas_par(a); +ALTER TABLE cas_par_1 DROP CONSTRAINT fkey_cas_test_first; +ALTER TABLE cas_par2_1 DROP CONSTRAINT fkey_cas_test_second; +ALTER TABLE cas_par ADD CONSTRAINT fkey_cas_test_first FOREIGN KEY (a) REFERENCES cas_1(a); +ALTER TABLE cas_par2 ADD CONSTRAINT fkey_cas_test_second FOREIGN KEY (a) REFERENCES cas_1(a); +ALTER TABLE cas_par3 ADD CONSTRAINT fkey_cas_test_third FOREIGN KEY (a) REFERENCES cas_par(a); -- this should error out as cascade_via_foreign_keys is not set to true SELECT citus_add_local_table_to_metadata('cas_par2'); -- this should work @@ -264,7 +264,9 @@ SELECT citus_add_local_table_to_metadata('cas_par2', cascade_via_foreign_keys=>t -- verify the partitioning hierarchy is preserved select inhrelid::regclass from pg_inherits where inhparent='cas_par'::regclass order by 1; -- verify the fkeys + fkeys with shard ids are created -select conname from pg_constraint where conname like 'fkey_cas_test%' order by conname; +select conname from pg_constraint +where conname like 'fkey_cas_test%' and conname not like '%_1' and conname not like '%_2' +order by conname; -- when all partitions are converted, there should be 40 tables and indexes -- the individual names are not much relevant, so we only print the count SELECT count(*) FROM pg_class WHERE relname LIKE 'cas\_%' AND relnamespace IN @@ -275,7 +277,9 @@ SELECT count(*) FROM pg_class WHERE relname LIKE 'cas\_%' AND relnamespace IN SELECT count(*) FROM pg_class WHERE relname LIKE 'cas\_%' AND relnamespace IN (SELECT oid FROM pg_namespace WHERE nspname = 'citus_local_tables_mx'); -- verify that the shell foreign keys are created on the worker as well -select conname from pg_constraint where conname like 'fkey_cas_test%' order by conname; +select conname from pg_constraint +where conname like 'fkey_cas_test%' and conname not like '%_1' and conname not like '%_2' +order by conname; \c - - - :master_port SET search_path TO citus_local_tables_mx; -- undistribute table @@ -287,7 +291,9 @@ SELECT undistribute_table('cas_par2', cascade_via_foreign_keys=>true); -- verify the partitioning hierarchy is preserved select inhrelid::regclass from pg_inherits where inhparent='cas_par'::regclass order by 1; -- verify that the foreign keys with shard ids are gone, due to undistribution -select conname from pg_constraint where conname like 'fkey_cas_test%' order by conname; +select conname from pg_constraint +where conname like 'fkey_cas_test%' and conname not like '%_1' and conname not like '%_2' +order by conname; -- add a non-inherited fkey and verify it fails when trying to convert ALTER TABLE cas_par2_1 ADD CONSTRAINT fkey_cas_test_3 FOREIGN KEY (a) REFERENCES cas_1(a); SELECT citus_add_local_table_to_metadata('cas_par2', cascade_via_foreign_keys=>true); From fa7ca79c6f4076c61275a71e2e250b84b5c9f7f8 Mon Sep 17 00:00:00 2001 From: Naisila Puka <37271756+naisila@users.noreply.github.com> Date: Mon, 3 Nov 2025 21:39:11 +0300 Subject: [PATCH 09/10] Change tupledesc->attrs[n] to TupleDescAttr(tupledesc, n) (#8240) TupleDescAttr is available since PG11. https://github.com/postgres/postgres/commit/2cd7084 PG18 simply forces you to use it, but there is no need to guard it with pg18 version. --- src/backend/columnar/columnar_metadata.c | 10 ++++++---- src/backend/columnar/columnar_tableam.c | 6 +++--- src/include/columnar/columnar_version_compat.h | 8 -------- 3 files changed, 9 insertions(+), 15 deletions(-) diff --git a/src/backend/columnar/columnar_metadata.c b/src/backend/columnar/columnar_metadata.c index 159c126f7..52b1082d7 100644 --- a/src/backend/columnar/columnar_metadata.c +++ b/src/backend/columnar/columnar_metadata.c @@ -655,10 +655,12 @@ SaveStripeSkipList(RelFileLocator relfilelocator, uint64 stripe, { values[Anum_columnar_chunk_minimum_value - 1] = PointerGetDatum(DatumToBytea(chunk->minimumValue, - Attr(tupleDescriptor, columnIndex))); + TupleDescAttr(tupleDescriptor, + columnIndex))); values[Anum_columnar_chunk_maximum_value - 1] = PointerGetDatum(DatumToBytea(chunk->maximumValue, - Attr(tupleDescriptor, columnIndex))); + TupleDescAttr(tupleDescriptor, + columnIndex))); } else { @@ -816,9 +818,9 @@ ReadStripeSkipList(RelFileLocator relfilelocator, uint64 stripe, datumArray[Anum_columnar_chunk_maximum_value - 1]); chunk->minimumValue = - ByteaToDatum(minValue, Attr(tupleDescriptor, columnIndex)); + ByteaToDatum(minValue, TupleDescAttr(tupleDescriptor, columnIndex)); chunk->maximumValue = - ByteaToDatum(maxValue, Attr(tupleDescriptor, columnIndex)); + ByteaToDatum(maxValue, TupleDescAttr(tupleDescriptor, columnIndex)); chunk->hasMinMax = true; } diff --git a/src/backend/columnar/columnar_tableam.c b/src/backend/columnar/columnar_tableam.c index 1158005c8..8e3796934 100644 --- a/src/backend/columnar/columnar_tableam.c +++ b/src/backend/columnar/columnar_tableam.c @@ -1012,7 +1012,7 @@ NeededColumnsList(TupleDesc tupdesc, Bitmapset *attr_needed) for (int i = 0; i < tupdesc->natts; i++) { - if (Attr(tupdesc, i)->attisdropped) + if (TupleDescAttr(tupdesc, i)->attisdropped) { continue; } @@ -1255,7 +1255,7 @@ LogRelationStats(Relation rel, int elevel) for (uint32 column = 0; column < skiplist->columnCount; column++) { - bool attrDropped = Attr(tupdesc, column)->attisdropped; + bool attrDropped = TupleDescAttr(tupdesc, column)->attisdropped; for (uint32 chunk = 0; chunk < skiplist->chunkCount; chunk++) { ColumnChunkSkipNode *skipnode = @@ -2638,7 +2638,7 @@ detoast_values(TupleDesc tupleDesc, Datum *orig_values, bool *isnull) for (int i = 0; i < tupleDesc->natts; i++) { - if (!isnull[i] && Attr(tupleDesc, i)->attlen == -1 && + if (!isnull[i] && TupleDescAttr(tupleDesc, i)->attlen == -1 && VARATT_IS_EXTENDED(values[i])) { /* make a copy */ diff --git a/src/include/columnar/columnar_version_compat.h b/src/include/columnar/columnar_version_compat.h index 0aaff6409..be5c3e379 100644 --- a/src/include/columnar/columnar_version_compat.h +++ b/src/include/columnar/columnar_version_compat.h @@ -25,12 +25,4 @@ #define ExplainPropertyLong(qlabel, value, es) \ ExplainPropertyInteger(qlabel, NULL, value, es) - -/* tuple-descriptor attributes moved in PostgreSQL 18: */ -#if PG_VERSION_NUM >= PG_VERSION_18 -#define Attr(tupdesc, colno) TupleDescAttr((tupdesc), (colno)) -#else -#define Attr(tupdesc, colno) (&((tupdesc)->attrs[(colno)])) -#endif - #endif /* COLUMNAR_COMPAT_H */ From 5a71f0d1cac8e205a0630a97d882a3ae940dbad7 Mon Sep 17 00:00:00 2001 From: Colm Date: Mon, 3 Nov 2025 18:56:17 +0000 Subject: [PATCH 10/10] PG18: Print names in order in tables are not colocated error detail. (#8319) Fixes #8275 by printing the names in order so that in every message `DETAIL: x and y are not co-located` x precedes (or is lexicographically less than) y. --- .../distributed/planner/multi_physical_planner.c | 9 +++++++-- .../regress/expected/multi_shard_update_delete.out | 2 +- .../regress/expected/query_single_shard_table.out | 14 +++++++------- .../regress/expected/recurring_join_pushdown.out | 4 ++-- 4 files changed, 17 insertions(+), 12 deletions(-) diff --git a/src/backend/distributed/planner/multi_physical_planner.c b/src/backend/distributed/planner/multi_physical_planner.c index 3584246ef..336a0140c 100644 --- a/src/backend/distributed/planner/multi_physical_planner.c +++ b/src/backend/distributed/planner/multi_physical_planner.c @@ -2501,11 +2501,16 @@ ErrorIfUnsupportedShardDistribution(Query *query) currentRelationId); if (!coPartitionedTables) { + char *firstRelName = get_rel_name(firstTableRelationId); + char *currentRelName = get_rel_name(currentRelationId); + int compareResult = strcmp(firstRelName, currentRelName); + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot push down this subquery"), errdetail("%s and %s are not colocated", - get_rel_name(firstTableRelationId), - get_rel_name(currentRelationId)))); + (compareResult > 0 ? currentRelName : firstRelName), + (compareResult > 0 ? firstRelName : + currentRelName)))); } } } diff --git a/src/test/regress/expected/multi_shard_update_delete.out b/src/test/regress/expected/multi_shard_update_delete.out index a42f90475..55f8139f1 100644 --- a/src/test/regress/expected/multi_shard_update_delete.out +++ b/src/test/regress/expected/multi_shard_update_delete.out @@ -725,7 +725,7 @@ SET value_2 = 5 FROM events_test_table_2 WHERE users_test_table.user_id = events_test_table_2.user_id; ERROR: cannot push down this subquery -DETAIL: users_test_table and events_test_table_2 are not colocated +DETAIL: events_test_table_2 and users_test_table are not colocated -- Should error out due to multiple row return from subquery, but we can not get this information within -- subquery pushdown planner. This query will be sent to worker with recursive planner. \set VERBOSITY terse diff --git a/src/test/regress/expected/query_single_shard_table.out b/src/test/regress/expected/query_single_shard_table.out index 74e499066..1b3b0f753 100644 --- a/src/test/regress/expected/query_single_shard_table.out +++ b/src/test/regress/expected/query_single_shard_table.out @@ -549,21 +549,21 @@ WHERE EXISTS ( ); DEBUG: router planner does not support queries that reference non-colocated distributed tables ERROR: cannot push down this subquery -DETAIL: nullkey_c2_t2 and nullkey_c1_t1 are not colocated +DETAIL: nullkey_c1_t1 and nullkey_c2_t2 are not colocated SELECT COUNT(*) FROM nullkey_c1_t1 t1 WHERE t1.b IN ( SELECT b+1 FROM nullkey_c2_t2 t2 WHERE t2.b = t1.a ); DEBUG: router planner does not support queries that reference non-colocated distributed tables ERROR: cannot push down this subquery -DETAIL: nullkey_c2_t2 and nullkey_c1_t1 are not colocated +DETAIL: nullkey_c1_t1 and nullkey_c2_t2 are not colocated SELECT COUNT(*) FROM nullkey_c1_t1 t1 WHERE t1.b NOT IN ( SELECT a FROM nullkey_c2_t2 t2 WHERE t2.b > t1.a ); DEBUG: router planner does not support queries that reference non-colocated distributed tables ERROR: cannot push down this subquery -DETAIL: nullkey_c2_t2 and nullkey_c1_t1 are not colocated +DETAIL: nullkey_c1_t1 and nullkey_c2_t2 are not colocated -- join with a reference table SELECT COUNT(*) FROM nullkey_c1_t1, reference_table WHERE nullkey_c1_t1.a = reference_table.a; DEBUG: Creating router plan @@ -3009,14 +3009,14 @@ ORDER BY 1,2 LIMIT 1; DEBUG: router planner does not support queries that reference non-colocated distributed tables DEBUG: push down of limit count: 1 ERROR: cannot push down this subquery -DETAIL: users_table and non_colocated_events_table are not colocated +DETAIL: non_colocated_events_table and users_table are not colocated SELECT event_type, (SELECT max(time) FROM users_table WHERE user_id = e.value_2) FROM non_colocated_events_table e ORDER BY 1,2 LIMIT 1; DEBUG: router planner does not support queries that reference non-colocated distributed tables DEBUG: push down of limit count: 1 ERROR: cannot push down this subquery -DETAIL: users_table and non_colocated_events_table are not colocated +DETAIL: non_colocated_events_table and users_table are not colocated SELECT event_type, (SELECT max(time) FROM users_table) FROM non_colocated_events_table e ORDER BY 1,2 LIMIT 1; @@ -3118,7 +3118,7 @@ ORDER BY 1 LIMIT 3; DEBUG: router planner does not support queries that reference non-colocated distributed tables DEBUG: push down of limit count: 3 ERROR: cannot push down this subquery -DETAIL: users_table and non_colocated_events_table are not colocated +DETAIL: non_colocated_events_table and users_table are not colocated SELECT (SELECT (SELECT e.user_id + user_id) FROM users_reference_table WHERE user_id = e.user_id GROUP BY user_id) FROM non_colocated_events_table e GROUP BY 1 @@ -3169,7 +3169,7 @@ ORDER BY 1 LIMIT 3; DEBUG: router planner does not support queries that reference non-colocated distributed tables DEBUG: push down of limit count: 3 ERROR: cannot push down this subquery -DETAIL: users_table and non_colocated_events_table are not colocated +DETAIL: non_colocated_events_table and users_table are not colocated SELECT user_id, count(*) FROM diff --git a/src/test/regress/expected/recurring_join_pushdown.out b/src/test/regress/expected/recurring_join_pushdown.out index 4bc19fcea..530f25e02 100644 --- a/src/test/regress/expected/recurring_join_pushdown.out +++ b/src/test/regress/expected/recurring_join_pushdown.out @@ -120,11 +120,11 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS c -- Test that the join is not pushed down when we have non-colocated tables in the RHS SELECT count(*) FROM r1 LEFT JOIN (SELECT d1.a, d3_not_colocated.b FROM d3_not_colocated FULL JOIN d1 ON d3_not_colocated.a = d1.a) AS t1 USING (a); ERROR: cannot push down this subquery -DETAIL: d3_not_colocated and d1 are not colocated +DETAIL: d1 and d3_not_colocated are not colocated -- The same error with its RIGHT JOIN variant SELECT count(*) FROM r1 LEFT JOIN (SELECT d1.a, d3_not_colocated.b FROM d3_not_colocated JOIN d1 ON d3_not_colocated.a = d1.a) AS t1 USING (a); ERROR: cannot push down this subquery -DETAIL: d3_not_colocated and d1 are not colocated +DETAIL: d1 and d3_not_colocated are not colocated -- Basic test cases with ON syntax -- Test that the join is pushed down to the worker nodes, using "on" syntax SET client_min_messages TO DEBUG3;