Drops PG15 support (#8372)

DESCRIPTION: Drops PG15 support

Steps involved:

- Remove pg15 from configure
- Remove PG_VERSION_15 lines
- Delete ruleutils_15.c
- Remove PG_VERSION_NUM >= 16 and PG_VERSION_NUM < 16
- Remove pg_version_compat.h '>= PGVERSION_16' entries
- Clean up server_version_ge_16 from tests
- Remove pg15 CI tests
- Remove normalize rules specific to PG15 diffs
- Rename `pg_get_object_address_13_14_15.c` to
`pg_get_object_address_16_17_18.c`
- Remove PG15 obsolete comments
- Remove PG15 from pseudoconstant qual error

Fixes #8351 

Sister PR https://github.com/citusdata/the-process/pull/178
pull/8378/merge
Naisila Puka 2025-12-15 14:38:51 +03:00 committed by GitHub
parent 0e110ee5a9
commit 62a9190667
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
102 changed files with 130 additions and 15001 deletions

View File

@ -72,18 +72,6 @@ ENV PATH="/home/citus/.pgenv/pgsql/bin:${PATH}"
USER citus
# build postgres versions separately for effective parrallelism and caching of already built versions when changing only certain versions
FROM base AS pg15
RUN MAKEFLAGS="-j $(nproc)" pgenv build 15.14
RUN rm .pgenv/src/*.tar*
RUN make -C .pgenv/src/postgresql-*/ clean
RUN make -C .pgenv/src/postgresql-*/src/include install
# create a staging directory with all files we want to copy from our pgenv build
# we will copy the contents of the staged folder into the final image at once
RUN mkdir .pgenv-staging/
RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/
RUN rm .pgenv-staging/config/default.conf
FROM base AS pg16
RUN MAKEFLAGS="-j $(nproc)" pgenv build 16.10
RUN rm .pgenv/src/*.tar*
@ -198,7 +186,6 @@ RUN git clone https://github.com/so-fancy/diff-so-fancy.git \
COPY --link --from=uncrustify-builder /uncrustify/usr/ /usr/
COPY --link --from=pg15 /home/citus/.pgenv-staging/ /home/citus/.pgenv/
COPY --link --from=pg16 /home/citus/.pgenv-staging/ /home/citus/.pgenv/
COPY --link --from=pg17 /home/citus/.pgenv-staging/ /home/citus/.pgenv/

1
.gitattributes vendored
View File

@ -25,7 +25,6 @@ configure -whitespace
# except these exceptions...
src/backend/distributed/utils/citus_outfuncs.c -citus-style
src/backend/distributed/deparser/ruleutils_15.c -citus-style
src/backend/distributed/deparser/ruleutils_16.c -citus-style
src/backend/distributed/deparser/ruleutils_17.c -citus-style
src/backend/distributed/deparser/ruleutils_18.c -citus-style

View File

@ -32,11 +32,10 @@ jobs:
style_checker_image_name: "ghcr.io/citusdata/stylechecker"
style_checker_tools_version: "0.8.33"
sql_snapshot_pg_version: "17.6"
image_suffix: "-ve4d3aa0"
pg15_version: '{ "major": "15", "full": "15.14" }'
image_suffix: "-v9555df2"
pg16_version: '{ "major": "16", "full": "16.10" }'
pg17_version: '{ "major": "17", "full": "17.6" }'
upgrade_pg_versions: "15.14-16.10-17.6"
upgrade_pg_versions: "16.10-17.6"
steps:
# Since GHA jobs need at least one step we use a noop step here.
- name: Set up parameters
@ -110,7 +109,6 @@ jobs:
image_suffix:
- ${{ needs.params.outputs.image_suffix}}
pg_version:
- ${{ needs.params.outputs.pg15_version }}
- ${{ needs.params.outputs.pg16_version }}
- ${{ needs.params.outputs.pg17_version }}
runs-on: ubuntu-latest
@ -141,7 +139,6 @@ jobs:
image_name:
- ${{ needs.params.outputs.test_image_name }}
pg_version:
- ${{ needs.params.outputs.pg15_version }}
- ${{ needs.params.outputs.pg16_version }}
- ${{ needs.params.outputs.pg17_version }}
make:
@ -162,10 +159,6 @@ jobs:
- check-enterprise-isolation-logicalrep-2
- check-enterprise-isolation-logicalrep-3
include:
- make: check-failure
pg_version: ${{ needs.params.outputs.pg15_version }}
suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-failure
pg_version: ${{ needs.params.outputs.pg16_version }}
suite: regress
@ -174,10 +167,6 @@ jobs:
pg_version: ${{ needs.params.outputs.pg17_version }}
suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-enterprise-failure
pg_version: ${{ needs.params.outputs.pg15_version }}
suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-enterprise-failure
pg_version: ${{ needs.params.outputs.pg16_version }}
suite: regress
@ -186,10 +175,6 @@ jobs:
pg_version: ${{ needs.params.outputs.pg17_version }}
suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-pytest
pg_version: ${{ needs.params.outputs.pg15_version }}
suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-pytest
pg_version: ${{ needs.params.outputs.pg16_version }}
suite: regress
@ -198,10 +183,6 @@ jobs:
pg_version: ${{ needs.params.outputs.pg17_version }}
suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: installcheck
suite: cdc
image_name: ${{ needs.params.outputs.test_image_name }}
pg_version: ${{ needs.params.outputs.pg15_version }}
- make: installcheck
suite: cdc
image_name: ${{ needs.params.outputs.test_image_name }}
@ -210,10 +191,6 @@ jobs:
suite: cdc
image_name: ${{ needs.params.outputs.test_image_name }}
pg_version: ${{ needs.params.outputs.pg17_version }}
- make: check-query-generator
pg_version: ${{ needs.params.outputs.pg15_version }}
suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-query-generator
pg_version: ${{ needs.params.outputs.pg16_version }}
suite: regress
@ -268,7 +245,6 @@ jobs:
image_name:
- ${{ needs.params.outputs.fail_test_image_name }}
pg_version:
- ${{ needs.params.outputs.pg15_version }}
- ${{ needs.params.outputs.pg16_version }}
- ${{ needs.params.outputs.pg17_version }}
parallel: [0,1,2,3,4,5] # workaround for running 6 parallel jobs
@ -315,12 +291,8 @@ jobs:
fail-fast: false
matrix:
include:
- old_pg_major: 15
new_pg_major: 16
- old_pg_major: 16
new_pg_major: 17
- old_pg_major: 15
new_pg_major: 17
env:
old_pg_major: ${{ matrix.old_pg_major }}
new_pg_major: ${{ matrix.new_pg_major }}
@ -376,7 +348,6 @@ jobs:
fail-fast: false
matrix:
pg_version:
- ${{ needs.params.outputs.pg15_version }}
- ${{ needs.params.outputs.pg16_version }}
steps:
- uses: actions/checkout@v4

View File

@ -25,7 +25,7 @@ jobs:
name: Build Citus
runs-on: ubuntu-latest
container:
image: ${{ vars.build_image_name }}:${{ vars.pg15_version }}${{ vars.image_suffix }}
image: ${{ vars.build_image_name }}:${{ vars.pg16_version }}${{ vars.image_suffix }}
options: --user root
steps:
- uses: actions/checkout@v4
@ -55,7 +55,7 @@ jobs:
name: Test flakyness
runs-on: ubuntu-latest
container:
image: ${{ vars.fail_test_image_name }}:${{ vars.pg15_version }}${{ vars.image_suffix }}
image: ${{ vars.fail_test_image_name }}:${{ vars.pg16_version }}${{ vars.image_suffix }}
options: --user root
needs:
[build, prepare_parallelization_matrix]

2
configure vendored
View File

@ -2588,7 +2588,7 @@ fi
if test "$with_pg_version_check" = no; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num (skipped compatibility check)" >&5
$as_echo "$as_me: building against PostgreSQL $version_num (skipped compatibility check)" >&6;}
elif test "$version_num" != '15' -a "$version_num" != '16' -a "$version_num" != '17'; then
elif test "$version_num" != '16' -a "$version_num" != '17'; then
as_fn_error $? "Citus is not compatible with the detected PostgreSQL version ${version_num}." "$LINENO" 5
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num" >&5

View File

@ -80,7 +80,7 @@ AC_SUBST(with_pg_version_check)
if test "$with_pg_version_check" = no; then
AC_MSG_NOTICE([building against PostgreSQL $version_num (skipped compatibility check)])
elif test "$version_num" != '15' -a "$version_num" != '16' -a "$version_num" != '17'; then
elif test "$version_num" != '16' -a "$version_num" != '17'; then
AC_MSG_ERROR([Citus is not compatible with the detected PostgreSQL version ${version_num}.])
else
AC_MSG_NOTICE([building against PostgreSQL $version_num])

View File

@ -25,9 +25,7 @@
#include <lz4.h>
#endif
#if PG_VERSION_NUM >= PG_VERSION_16
#include "varatt.h"
#endif
#if HAVE_LIBZSTD
#include <zstd.h>

View File

@ -39,11 +39,10 @@
#include "optimizer/paths.h"
#include "optimizer/plancat.h"
#include "optimizer/restrictinfo.h"
#if PG_VERSION_NUM >= PG_VERSION_16
#include "parser/parse_relation.h"
#include "parser/parsetree.h"
#endif
#include "utils/builtins.h"
#include "utils/guc.h"
#include "utils/lsyscache.h"
#include "utils/relcache.h"
#include "utils/ruleutils.h"
@ -140,9 +139,7 @@ static List * set_deparse_context_planstate(List *dpcontext, Node *node,
/* other helpers */
static List * ColumnarVarNeeded(ColumnarScanState *columnarScanState);
static Bitmapset * ColumnarAttrNeeded(ScanState *ss);
#if PG_VERSION_NUM >= PG_VERSION_16
static Bitmapset * fixup_inherited_columns(Oid parentId, Oid childId, Bitmapset *columns);
#endif
/* saved hook value in case of unload */
static set_rel_pathlist_hook_type PreviousSetRelPathlistHook = NULL;
@ -551,7 +548,7 @@ ColumnarIndexScanAdditionalCost(PlannerInfo *root, RelOptInfo *rel,
* "anti-correlated" (-1) since both help us avoiding from reading the
* same stripe again and again.
*/
double absIndexCorrelation = float_abs(indexCorrelation);
double absIndexCorrelation = fabs(indexCorrelation);
/*
* To estimate the number of stripes that we need to read, we do linear
@ -670,7 +667,7 @@ CheckVarStats(PlannerInfo *root, Var *var, Oid sortop, float4 *absVarCorrelation
* If the Var is not highly correlated, then the chunk's min/max bounds
* will be nearly useless.
*/
if (float_abs(varCorrelation) < ColumnarQualPushdownCorrelationThreshold)
if (fabs(varCorrelation) < ColumnarQualPushdownCorrelationThreshold)
{
if (absVarCorrelation)
{
@ -678,7 +675,7 @@ CheckVarStats(PlannerInfo *root, Var *var, Oid sortop, float4 *absVarCorrelation
* Report absVarCorrelation if caller wants to know why given
* var is rejected.
*/
*absVarCorrelation = float_abs(varCorrelation);
*absVarCorrelation = fabs(varCorrelation);
}
return false;
}
@ -1063,9 +1060,7 @@ FindCandidateRelids(PlannerInfo *root, RelOptInfo *rel, List *joinClauses)
* For the relevant PG16 commit requiring this addition:
* postgres/postgres@2489d76
*/
#if PG_VERSION_NUM >= PG_VERSION_16
candidateRelids = bms_del_members(candidateRelids, root->outer_join_rels);
#endif
return candidateRelids;
}
@ -1394,7 +1389,6 @@ AddColumnarScanPath(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte,
}
int numberOfColumnsRead = 0;
#if PG_VERSION_NUM >= PG_VERSION_16
if (rte->perminfoindex > 0)
{
/*
@ -1426,9 +1420,6 @@ AddColumnarScanPath(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte,
perminfo->
selectedCols));
}
#else
numberOfColumnsRead = bms_num_members(rte->selectedCols);
#endif
int numberOfClausesPushed = list_length(allClauses);
@ -1449,8 +1440,6 @@ AddColumnarScanPath(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte,
}
#if PG_VERSION_NUM >= PG_VERSION_16
/*
* fixup_inherited_columns
*
@ -1509,9 +1498,6 @@ fixup_inherited_columns(Oid parentId, Oid childId, Bitmapset *columns)
}
#endif
/*
* CostColumnarScan calculates the cost of scanning the columnar table. The
* cost is estimated by using all stripe metadata to estimate based on the

View File

@ -43,15 +43,18 @@
#include "executor/spi.h"
#include "lib/stringinfo.h"
#include "nodes/execnodes.h"
#include "parser/parse_relation.h"
#include "storage/fd.h"
#include "storage/lmgr.h"
#include "storage/procarray.h"
#include "storage/relfilelocator.h"
#include "storage/smgr.h"
#include "utils/builtins.h"
#include "utils/fmgroids.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/rel.h"
#include "utils/relfilenumbermap.h"
#include "citus_version.h"
#include "pg_version_constants.h"
@ -62,14 +65,6 @@
#include "distributed/listutils.h"
#if PG_VERSION_NUM >= PG_VERSION_16
#include "parser/parse_relation.h"
#include "storage/relfilelocator.h"
#include "utils/relfilenumbermap.h"
#else
#include "utils/relfilenodemap.h"
#endif
#define COLUMNAR_RELOPTION_NAMESPACE "columnar"
#define SLOW_METADATA_ACCESS_WARNING \
"Metadata index %s is not available, this might mean slower read/writes " \
@ -730,7 +725,7 @@ ReadStripeSkipList(Relation rel, uint64 stripe,
ScanKeyData scanKey[2];
uint64 storageId = LookupStorageId(RelationPrecomputeOid(rel),
RelationPhysicalIdentifier_compat(rel));
rel->rd_locator);
Oid columnarChunkOid = ColumnarChunkRelationId();
Relation columnarChunk = table_open(columnarChunkOid, AccessShareLock);
@ -1277,7 +1272,7 @@ List *
StripesForRelfilelocator(Relation rel)
{
uint64 storageId = LookupStorageId(RelationPrecomputeOid(rel),
RelationPhysicalIdentifier_compat(rel));
rel->rd_locator);
/*
* PG18 requires snapshot to be active or registered before it's used
@ -1309,7 +1304,7 @@ uint64
GetHighestUsedAddress(Relation rel)
{
uint64 storageId = LookupStorageId(RelationPrecomputeOid(rel),
RelationPhysicalIdentifier_compat(rel));
rel->rd_locator);
uint64 highestUsedAddress = 0;
uint64 highestUsedId = 0;
@ -1330,10 +1325,8 @@ GetHighestUsedAddress(Relation rel)
Oid
ColumnarRelationId(Oid relid, RelFileLocator relfilelocator)
{
return OidIsValid(relid) ? relid : RelidByRelfilenumber(RelationTablespace_compat
(relfilelocator),
RelationPhysicalIdentifierNumber_compat
(relfilelocator));
return OidIsValid(relid) ? relid : RelidByRelfilenumber(relfilelocator.spcOid,
relfilelocator.relNumber);
}
@ -1624,7 +1617,7 @@ DeleteMetadataRows(Relation rel)
}
uint64 storageId = LookupStorageId(RelationPrecomputeOid(rel),
RelationPhysicalIdentifier_compat(rel));
rel->rd_locator);
DeleteStorageFromColumnarMetadataTable(ColumnarStripeRelationId(),
Anum_columnar_stripe_storageid,
@ -1789,10 +1782,8 @@ create_estate_for_relation(Relation rel)
rte->rellockmode = AccessShareLock;
/* Prepare permission info on PG 16+ */
#if PG_VERSION_NUM >= PG_VERSION_16
List *perminfos = NIL;
addRTEPermissionInfo(&perminfos, rte);
#endif
/* Initialize the range table, with the right signature for each PG version */
#if PG_VERSION_NUM >= PG_VERSION_18
@ -1804,7 +1795,7 @@ create_estate_for_relation(Relation rel)
perminfos,
NULL /* unpruned_relids: not used by columnar */
);
#elif PG_VERSION_NUM >= PG_VERSION_16
#else
/* PG 1617: three-arg signature (permInfos) */
ExecInitRangeTable(
@ -1812,13 +1803,6 @@ create_estate_for_relation(Relation rel)
list_make1(rte),
perminfos
);
#else
/* PG 15: two-arg signature */
ExecInitRangeTable(
estate,
list_make1(rte)
);
#endif
estate->es_output_cid = GetCurrentCommandId(true);

View File

@ -255,8 +255,7 @@ ColumnarReadFlushPendingWrites(ColumnarReadState *readState)
{
Assert(!readState->snapshotRegisteredByUs);
RelFileNumber relfilenumber = RelationPhysicalIdentifierNumber_compat(
RelationPhysicalIdentifier_compat(readState->relation));
RelFileNumber relfilenumber = readState->relation->rd_locator.relNumber;
FlushWriteStateForRelfilenumber(relfilenumber, GetCurrentSubTransactionId());
if (readState->snapshot == InvalidSnapshot || !IsMVCCSnapshot(readState->snapshot))

View File

@ -169,11 +169,7 @@ ColumnarStorageInit(SMgrRelation srel, uint64 storageId)
}
/* create two pages */
#if PG_VERSION_NUM >= PG_VERSION_16
PGIOAlignedBlock block;
#else
PGAlignedBlock block;
#endif
Page page = block.data;
/* write metapage */
@ -192,7 +188,7 @@ ColumnarStorageInit(SMgrRelation srel, uint64 storageId)
(char *) &metapage, sizeof(ColumnarMetapage));
phdr->pd_lower += sizeof(ColumnarMetapage);
log_newpage(RelationPhysicalIdentifierBackend_compat(&srel), MAIN_FORKNUM,
log_newpage(&srel->smgr_rlocator.locator, MAIN_FORKNUM,
COLUMNAR_METAPAGE_BLOCKNO, page, true);
PageSetChecksumInplace(page, COLUMNAR_METAPAGE_BLOCKNO);
smgrextend(srel, MAIN_FORKNUM, COLUMNAR_METAPAGE_BLOCKNO, page, true);
@ -200,7 +196,7 @@ ColumnarStorageInit(SMgrRelation srel, uint64 storageId)
/* write empty page */
PageInit(page, BLCKSZ, 0);
log_newpage(RelationPhysicalIdentifierBackend_compat(&srel), MAIN_FORKNUM,
log_newpage(&srel->smgr_rlocator.locator, MAIN_FORKNUM,
COLUMNAR_EMPTY_BLOCKNO, page, true);
PageSetChecksumInplace(page, COLUMNAR_EMPTY_BLOCKNO);
smgrextend(srel, MAIN_FORKNUM, COLUMNAR_EMPTY_BLOCKNO, page, true);

View File

@ -208,8 +208,7 @@ columnar_beginscan_extended(Relation relation, Snapshot snapshot,
uint32 flags, Bitmapset *attr_needed, List *scanQual)
{
CheckCitusColumnarVersion(ERROR);
RelFileNumber relfilenumber = RelationPhysicalIdentifierNumber_compat(
RelationPhysicalIdentifier_compat(relation));
RelFileNumber relfilenumber = relation->rd_locator.relNumber;
/*
* A memory context to use for scan-wide data, including the lazily
@ -435,8 +434,7 @@ columnar_index_fetch_begin(Relation rel)
{
CheckCitusColumnarVersion(ERROR);
RelFileNumber relfilenumber = RelationPhysicalIdentifierNumber_compat(
RelationPhysicalIdentifier_compat(rel));
RelFileNumber relfilenumber = rel->rd_locator.relNumber;
if (PendingWritesInUpperTransactions(relfilenumber, GetCurrentSubTransactionId()))
{
/* XXX: maybe we can just flush the data and continue */
@ -865,11 +863,9 @@ columnar_relation_set_new_filelocator(Relation rel,
* state. If they are equal, this is a new relation object and we don't
* need to clean anything.
*/
if (RelationPhysicalIdentifierNumber_compat(RelationPhysicalIdentifier_compat(rel)) !=
RelationPhysicalIdentifierNumberPtr_compat(newrlocator))
if (rel->rd_locator.relNumber != newrlocator->relNumber)
{
MarkRelfilenumberDropped(RelationPhysicalIdentifierNumber_compat(
RelationPhysicalIdentifier_compat(rel)),
MarkRelfilenumberDropped(rel->rd_locator.relNumber,
GetCurrentSubTransactionId());
DeleteMetadataRows(rel);
@ -892,9 +888,9 @@ static void
columnar_relation_nontransactional_truncate(Relation rel)
{
CheckCitusColumnarVersion(ERROR);
RelFileLocator relfilelocator = RelationPhysicalIdentifier_compat(rel);
RelFileLocator relfilelocator = rel->rd_locator;
NonTransactionDropWriteState(RelationPhysicalIdentifierNumber_compat(relfilelocator));
NonTransactionDropWriteState(relfilelocator.relNumber);
/* Delete old relfilenode metadata */
DeleteMetadataRows(rel);
@ -1098,7 +1094,6 @@ columnar_vacuum_rel(Relation rel, VacuumParams *params,
List *indexList = RelationGetIndexList(rel);
int nindexes = list_length(indexList);
#if PG_VERSION_NUM >= PG_VERSION_16
struct VacuumCutoffs cutoffs;
vacuum_get_cutoffs(rel, params, &cutoffs);
@ -1140,68 +1135,6 @@ columnar_vacuum_rel(Relation rel, VacuumParams *params,
false);
#endif
#else
TransactionId oldestXmin;
TransactionId freezeLimit;
MultiXactId multiXactCutoff;
/* initialize xids */
#if (PG_VERSION_NUM >= PG_VERSION_15) && (PG_VERSION_NUM < PG_VERSION_16)
MultiXactId oldestMxact;
vacuum_set_xid_limits(rel,
params->freeze_min_age,
params->freeze_table_age,
params->multixact_freeze_min_age,
params->multixact_freeze_table_age,
&oldestXmin, &oldestMxact,
&freezeLimit, &multiXactCutoff);
Assert(MultiXactIdPrecedesOrEquals(multiXactCutoff, oldestMxact));
#else
TransactionId xidFullScanLimit;
MultiXactId mxactFullScanLimit;
vacuum_set_xid_limits(rel,
params->freeze_min_age,
params->freeze_table_age,
params->multixact_freeze_min_age,
params->multixact_freeze_table_age,
&oldestXmin, &freezeLimit, &xidFullScanLimit,
&multiXactCutoff, &mxactFullScanLimit);
#endif
Assert(TransactionIdPrecedesOrEquals(freezeLimit, oldestXmin));
/*
* Columnar storage doesn't hold any transaction IDs, so we can always
* just advance to the most aggressive value.
*/
TransactionId newRelFrozenXid = oldestXmin;
#if (PG_VERSION_NUM >= PG_VERSION_15) && (PG_VERSION_NUM < PG_VERSION_16)
MultiXactId newRelminMxid = oldestMxact;
#else
MultiXactId newRelminMxid = multiXactCutoff;
#endif
double new_live_tuples = ColumnarTableTupleCount(rel);
/* all visible pages are always 0 */
BlockNumber new_rel_allvisible = 0;
#if (PG_VERSION_NUM >= PG_VERSION_15) && (PG_VERSION_NUM < PG_VERSION_16)
bool frozenxid_updated;
bool minmulti_updated;
vac_update_relstats(rel, new_rel_pages, new_live_tuples,
new_rel_allvisible, nindexes > 0,
newRelFrozenXid, newRelminMxid,
&frozenxid_updated, &minmulti_updated, false);
#else
vac_update_relstats(rel, new_rel_pages, new_live_tuples,
new_rel_allvisible, nindexes > 0,
newRelFrozenXid, newRelminMxid, false);
#endif
#endif
#if PG_VERSION_NUM >= PG_VERSION_18
pgstat_report_vacuum(RelationGetRelid(rel),
rel->rd_rel->relisshared,
@ -1906,8 +1839,8 @@ TupleSortSkipSmallerItemPointers(Tuplesortstate *tupleSort, ItemPointer targetIt
Datum *abbrev = NULL;
Datum tsDatum;
bool tsDatumIsNull;
if (!tuplesort_getdatum_compat(tupleSort, forwardDirection, false,
&tsDatum, &tsDatumIsNull, abbrev))
if (!tuplesort_getdatum(tupleSort, forwardDirection, false,
&tsDatum, &tsDatumIsNull, abbrev))
{
ItemPointerSetInvalid(&tsItemPointerData);
break;
@ -2148,12 +2081,12 @@ ColumnarTableDropHook(Oid relid)
* tableam tables storage is managed by postgres.
*/
Relation rel = table_open(relid, AccessExclusiveLock);
RelFileLocator relfilelocator = RelationPhysicalIdentifier_compat(rel);
RelFileLocator relfilelocator = rel->rd_locator;
DeleteMetadataRows(rel);
DeleteColumnarTableOptions(rel->rd_id, true);
MarkRelfilenumberDropped(RelationPhysicalIdentifierNumber_compat(relfilelocator),
MarkRelfilenumberDropped(relfilelocator.relNumber,
GetCurrentSubTransactionId());
/* keep the lock since we did physical changes to the relation */
@ -2572,11 +2505,7 @@ static const TableAmRoutine columnar_am_methods = {
.tuple_lock = columnar_tuple_lock,
.finish_bulk_insert = columnar_finish_bulk_insert,
#if PG_VERSION_NUM >= PG_VERSION_16
.relation_set_new_filelocator = columnar_relation_set_new_filelocator,
#else
.relation_set_new_filenode = columnar_relation_set_new_filelocator,
#endif
.relation_nontransactional_truncate = columnar_relation_nontransactional_truncate,
.relation_copy_data = columnar_relation_copy_data,
.relation_copy_for_cluster = columnar_relation_copy_for_cluster,

View File

@ -23,10 +23,12 @@
#include "access/nbtree.h"
#include "catalog/pg_am.h"
#include "storage/fd.h"
#include "storage/relfilelocator.h"
#include "storage/smgr.h"
#include "utils/guc.h"
#include "utils/memutils.h"
#include "utils/rel.h"
#include "utils/relfilenumbermap.h"
#include "pg_version_compat.h"
#include "pg_version_constants.h"
@ -35,13 +37,6 @@
#include "columnar/columnar_storage.h"
#include "columnar/columnar_version_compat.h"
#if PG_VERSION_NUM >= PG_VERSION_16
#include "storage/relfilelocator.h"
#include "utils/relfilenumbermap.h"
#else
#include "utils/relfilenodemap.h"
#endif
struct ColumnarWriteState
{
TupleDesc tupleDescriptor;
@ -103,7 +98,7 @@ ColumnarBeginWrite(Relation rel,
ColumnarOptions options,
TupleDesc tupleDescriptor)
{
RelFileLocator relfilelocator = RelationPhysicalIdentifier_compat(rel);
RelFileLocator relfilelocator = rel->rd_locator;
/* get comparison function pointers for each of the columns */
uint32 columnCount = tupleDescriptor->natts;

View File

@ -146,9 +146,7 @@ columnar_init_write_state(Relation relation, TupleDesc tupdesc,
}
WriteStateMapEntry *hashEntry = hash_search(WriteStateMap,
&RelationPhysicalIdentifierNumber_compat(
RelationPhysicalIdentifier_compat(
relation)),
&(relation->rd_locator.relNumber),
HASH_ENTER, &found);
if (!found)
{

View File

@ -1476,20 +1476,10 @@ InsertMetadataForCitusLocalTable(Oid citusLocalTableId, uint64 shardId,
static void
FinalizeCitusLocalTableCreation(Oid relationId)
{
#if PG_VERSION_NUM >= PG_VERSION_16
/*
* PG16+ supports truncate triggers on foreign tables
*/
if (RegularTable(relationId) || IsForeignTable(relationId))
#else
/*
* If it is a foreign table, then skip creating citus truncate trigger
* as foreign tables do not support truncate triggers.
*/
if (RegularTable(relationId))
#endif
{
CreateTruncateTrigger(relationId);
}

View File

@ -161,7 +161,6 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati
pfree(collctype);
}
#if PG_VERSION_NUM >= PG_VERSION_16
char *collicurules = NULL;
datum = SysCacheGetAttr(COLLOID, heapTuple, Anum_pg_collation_collicurules, &isnull);
if (!isnull)
@ -170,7 +169,6 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati
appendStringInfo(&collationNameDef, ", rules = %s",
quote_literal_cstr(collicurules));
}
#endif
if (!collisdeterministic)
{
appendStringInfoString(&collationNameDef, ", deterministic = false");

View File

@ -1272,17 +1272,10 @@ CreateCitusTable(Oid relationId, CitusTableType tableType,
colocationId, citusTableParams.replicationModel,
autoConverted);
#if PG_VERSION_NUM >= PG_VERSION_16
/*
* PG16+ supports truncate triggers on foreign tables
*/
if (RegularTable(relationId) || IsForeignTable(relationId))
#else
/* foreign tables do not support TRUNCATE trigger */
if (RegularTable(relationId))
#endif
{
CreateTruncateTrigger(relationId);
}

View File

@ -81,10 +81,7 @@ typedef struct DatabaseCollationInfo
char *datctype;
char *daticulocale;
char *datcollversion;
#if PG_VERSION_NUM >= PG_VERSION_16
char *daticurules;
#endif
} DatabaseCollationInfo;
static char * GenerateCreateDatabaseStatementFromPgDatabase(Form_pg_database
@ -853,14 +850,12 @@ GetDatabaseCollation(Oid dbOid)
info.datcollversion = TextDatumGetCString(collverDatum);
}
#if PG_VERSION_NUM >= PG_VERSION_16
Datum icurulesDatum = heap_getattr(tup, Anum_pg_database_daticurules, tupdesc,
&isNull);
if (!isNull)
{
info.daticurules = TextDatumGetCString(icurulesDatum);
}
#endif
table_close(rel, AccessShareLock);
heap_freetuple(tup);
@ -954,13 +949,11 @@ GenerateCreateDatabaseStatementFromPgDatabase(Form_pg_database databaseForm)
quote_identifier(GetLocaleProviderString(
databaseForm->datlocprovider)));
#if PG_VERSION_NUM >= PG_VERSION_16
if (collInfo.daticurules != NULL)
{
appendStringInfo(&str, " ICU_RULES = %s", quote_identifier(
collInfo.daticurules));
}
#endif
return str.data;
}

View File

@ -64,8 +64,8 @@ CreateDomainStmt *
RecreateDomainStmt(Oid domainOid)
{
CreateDomainStmt *stmt = makeNode(CreateDomainStmt);
stmt->domainname = stringToQualifiedNameList_compat(format_type_be_qualified(
domainOid));
stmt->domainname = stringToQualifiedNameList(format_type_be_qualified(domainOid),
NULL);
HeapTuple tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(domainOid));
if (!HeapTupleIsValid(tup))

View File

@ -19,6 +19,7 @@
#include "catalog/index.h"
#include "catalog/namespace.h"
#include "catalog/pg_class.h"
#include "catalog/pg_namespace.h"
#include "commands/defrem.h"
#include "commands/tablecmds.h"
#include "lib/stringinfo.h"
@ -53,10 +54,6 @@
#include "distributed/version_compat.h"
#include "distributed/worker_manager.h"
#if PG_VERSION_NUM >= PG_VERSION_16
#include "catalog/pg_namespace.h"
#endif
/* Local functions forward declarations for helper functions */
static void ErrorIfCreateIndexHasTooManyColumns(IndexStmt *createIndexStatement);

View File

@ -100,6 +100,7 @@
#include "distributed/multi_router_planner.h"
#include "distributed/placement_connection.h"
#include "distributed/relation_access_tracking.h"
#include "distributed/relation_utils.h"
#include "distributed/remote_commands.h"
#include "distributed/remote_transaction.h"
#include "distributed/replication_origin_session_utils.h"
@ -111,10 +112,6 @@
#include "distributed/version_compat.h"
#include "distributed/worker_protocol.h"
#if PG_VERSION_NUM >= PG_VERSION_16
#include "distributed/relation_utils.h"
#endif
/* constant used in binary protocol */
static const char BinarySignature[11] = "PGCOPY\n\377\r\n\0";
@ -3253,12 +3250,8 @@ CheckCopyPermissions(CopyStmt *copyStatement)
RangeTblEntry *rte = (RangeTblEntry*) linitial(range_table);
tupDesc = RelationGetDescr(rel);
#if PG_VERSION_NUM >= PG_VERSION_16
/* create permission info for rte */
RTEPermissionInfo *perminfo = GetFilledPermissionInfo(rel->rd_id, rte->inh, required_access);
#else
rte->requiredPerms = required_access;
#endif
attnums = CopyGetAttnums(tupDesc, rel, copyStatement->attlist);
foreach(cur, attnums)
@ -3267,29 +3260,17 @@ CheckCopyPermissions(CopyStmt *copyStatement)
if (is_from)
{
#if PG_VERSION_NUM >= PG_VERSION_16
perminfo->insertedCols = bms_add_member(perminfo->insertedCols, attno);
#else
rte->insertedCols = bms_add_member(rte->insertedCols, attno);
#endif
}
else
{
#if PG_VERSION_NUM >= PG_VERSION_16
perminfo->selectedCols = bms_add_member(perminfo->selectedCols, attno);
#else
rte->selectedCols = bms_add_member(rte->selectedCols, attno);
#endif
}
}
#if PG_VERSION_NUM >= PG_VERSION_16
/* link rte to its permission info then check permissions */
rte->perminfoindex = 1;
ExecCheckPermissions(list_make1(rte), list_make1(perminfo), true);
#else
ExecCheckRTPerms(range_table, true);
#endif
/* TODO: Perform RLS checks once supported */

View File

@ -734,7 +734,7 @@ MakeSetStatementArguments(char *configurationName, char *configurationValue)
* using this function
*/
int gucCount = 0;
struct config_generic **gucVariables = get_guc_variables_compat(&gucCount);
struct config_generic **gucVariables = get_guc_variables(&gucCount);
struct config_generic **matchingConfig =
(struct config_generic **) SafeBsearch((void *) &key,
@ -851,12 +851,8 @@ GenerateGrantRoleStmtsFromOptions(RoleSpec *roleSpec, List *options)
if (strcmp(option->defname, "adminmembers") == 0)
{
#if PG_VERSION_NUM >= PG_VERSION_16
DefElem *opt = makeDefElem("admin", (Node *) makeBoolean(true), -1);
grantRoleStmt->opt = list_make1(opt);
#else
grantRoleStmt->admin_opt = true;
#endif
}
stmts = lappend(stmts, grantRoleStmt);
@ -916,8 +912,6 @@ GenerateGrantRoleStmtsOfRole(Oid roleid)
grantorRole->rolename = GetUserNameFromId(membership->grantor, false);
grantRoleStmt->grantor = grantorRole;
#if PG_VERSION_NUM >= PG_VERSION_16
/* inherit option is always included */
DefElem *inherit_opt;
if (membership->inherit_option)
@ -943,9 +937,6 @@ GenerateGrantRoleStmtsOfRole(Oid roleid)
DefElem *set_opt = makeDefElem("set", (Node *) makeBoolean(false), -1);
grantRoleStmt->opt = lappend(grantRoleStmt->opt, set_opt);
}
#else
grantRoleStmt->admin_opt = membership->admin_option;
#endif
stmts = lappend(stmts, grantRoleStmt);
}

View File

@ -184,7 +184,7 @@ truncate_local_data_after_distributing_table(PG_FUNCTION_ARGS)
TruncateStmt *truncateStmt = makeNode(TruncateStmt);
char *relationName = generate_qualified_relation_name(relationId);
List *names = stringToQualifiedNameList_compat(relationName);
List *names = stringToQualifiedNameList(relationName, NULL);
truncateStmt->relations = list_make1(makeRangeVarFromNameList(names));
truncateStmt->restart_seqs = false;
truncateStmt->behavior = DROP_CASCADE;

View File

@ -189,7 +189,7 @@ RecreateCompositeTypeStmt(Oid typeOid)
Assert(get_typtype(typeOid) == TYPTYPE_COMPOSITE);
CompositeTypeStmt *stmt = makeNode(CompositeTypeStmt);
List *names = stringToQualifiedNameList_compat(format_type_be_qualified(typeOid));
List *names = stringToQualifiedNameList(format_type_be_qualified(typeOid), NULL);
stmt->typevar = makeRangeVarFromNameList(names);
stmt->coldeflist = CompositeTypeColumnDefList(typeOid);
@ -254,7 +254,7 @@ RecreateEnumStmt(Oid typeOid)
Assert(get_typtype(typeOid) == TYPTYPE_ENUM);
CreateEnumStmt *stmt = makeNode(CreateEnumStmt);
stmt->typeName = stringToQualifiedNameList_compat(format_type_be_qualified(typeOid));
stmt->typeName = stringToQualifiedNameList(format_type_be_qualified(typeOid), NULL);
stmt->vals = EnumValsList(typeOid);
return stmt;
@ -567,8 +567,8 @@ CreateTypeDDLCommandsIdempotent(const ObjectAddress *typeAddress)
char *
GenerateBackupNameForTypeCollision(const ObjectAddress *address)
{
List *names = stringToQualifiedNameList_compat(format_type_be_qualified(
address->objectId));
List *names = stringToQualifiedNameList(format_type_be_qualified(address->objectId),
NULL);
RangeVar *rel = makeRangeVarFromNameList(names);
char *newName = palloc0(NAMEDATALEN);

View File

@ -43,9 +43,7 @@ typedef struct CitusVacuumParams
VacOptValue truncate;
VacOptValue index_cleanup;
int nworkers;
#if PG_VERSION_NUM >= PG_VERSION_16
int ring_size;
#endif
} CitusVacuumParams;
/*
@ -353,19 +351,12 @@ DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams)
}
/* if no flags remain, exit early */
#if PG_VERSION_NUM >= PG_VERSION_16
if (vacuumFlags & VACOPT_PROCESS_TOAST &&
vacuumFlags & VACOPT_PROCESS_MAIN)
{
/* process toast and process main are true by default */
if (((vacuumFlags & ~VACOPT_PROCESS_TOAST) & ~VACOPT_PROCESS_MAIN) == 0 &&
vacuumParams.ring_size == -1 &&
#else
if (vacuumFlags & VACOPT_PROCESS_TOAST)
{
/* process toast is true by default */
if ((vacuumFlags & ~VACOPT_PROCESS_TOAST) == 0 &&
#endif
vacuumParams.truncate == VACOPTVALUE_UNSPECIFIED &&
vacuumParams.index_cleanup == VACOPTVALUE_UNSPECIFIED &&
vacuumParams.nworkers == VACUUM_PARALLEL_NOTSET
@ -413,7 +404,6 @@ DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams)
appendStringInfoString(vacuumPrefix, "PROCESS_TOAST FALSE,");
}
#if PG_VERSION_NUM >= PG_VERSION_16
if (!(vacuumFlags & VACOPT_PROCESS_MAIN))
{
appendStringInfoString(vacuumPrefix, "PROCESS_MAIN FALSE,");
@ -433,7 +423,6 @@ DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams)
{
appendStringInfo(vacuumPrefix, "BUFFER_USAGE_LIMIT %d,", vacuumParams.ring_size);
}
#endif
if (vacuumParams.truncate != VACOPTVALUE_UNSPECIFIED)
{
@ -537,13 +526,10 @@ VacuumStmtParams(VacuumStmt *vacstmt)
bool full = false;
bool disable_page_skipping = false;
bool process_toast = true;
#if PG_VERSION_NUM >= PG_VERSION_16
bool process_main = true;
bool skip_database_stats = false;
bool only_database_stats = false;
params.ring_size = -1;
#endif
/* Set default value */
params.index_cleanup = VACOPTVALUE_UNSPECIFIED;
@ -563,13 +549,11 @@ VacuumStmtParams(VacuumStmt *vacstmt)
{
skip_locked = defGetBoolean(opt);
}
#if PG_VERSION_NUM >= PG_VERSION_16
else if (strcmp(opt->defname, "buffer_usage_limit") == 0)
{
char *vac_buffer_size = defGetString(opt);
parse_int(vac_buffer_size, &params.ring_size, GUC_UNIT_KB, NULL);
}
#endif
else if (!vacstmt->is_vacuumcmd)
{
ereport(ERROR,
@ -594,7 +578,6 @@ VacuumStmtParams(VacuumStmt *vacstmt)
{
disable_page_skipping = defGetBoolean(opt);
}
#if PG_VERSION_NUM >= PG_VERSION_16
else if (strcmp(opt->defname, "process_main") == 0)
{
process_main = defGetBoolean(opt);
@ -607,7 +590,6 @@ VacuumStmtParams(VacuumStmt *vacstmt)
{
only_database_stats = defGetBoolean(opt);
}
#endif
else if (strcmp(opt->defname, "process_toast") == 0)
{
process_toast = defGetBoolean(opt);
@ -678,11 +660,9 @@ VacuumStmtParams(VacuumStmt *vacstmt)
(analyze ? VACOPT_ANALYZE : 0) |
(freeze ? VACOPT_FREEZE : 0) |
(full ? VACOPT_FULL : 0) |
#if PG_VERSION_NUM >= PG_VERSION_16
(process_main ? VACOPT_PROCESS_MAIN : 0) |
(skip_database_stats ? VACOPT_SKIP_DATABASE_STATS : 0) |
(only_database_stats ? VACOPT_ONLY_DATABASE_STATS : 0) |
#endif
(process_toast ? VACOPT_PROCESS_TOAST : 0) |
(disable_page_skipping ? VACOPT_DISABLE_PAGE_SKIPPING : 0);
return params;

View File

@ -400,7 +400,6 @@ DeparseGrantRoleStmt(Node *node)
static void
AppendRevokeAdminOptionFor(StringInfo buf, GrantRoleStmt *stmt)
{
#if PG_VERSION_NUM >= PG_VERSION_16
if (!stmt->is_grant)
{
DefElem *opt = NULL;
@ -423,12 +422,6 @@ AppendRevokeAdminOptionFor(StringInfo buf, GrantRoleStmt *stmt)
}
}
}
#else
if (!stmt->is_grant && stmt->admin_opt)
{
appendStringInfo(buf, "ADMIN OPTION FOR ");
}
#endif
}
@ -437,7 +430,6 @@ AppendGrantWithAdminOption(StringInfo buf, GrantRoleStmt *stmt)
{
if (stmt->is_grant)
{
#if PG_VERSION_NUM >= PG_VERSION_16
int opt_count = 0;
DefElem *opt = NULL;
foreach_declared_ptr(opt, stmt->opt)
@ -463,12 +455,6 @@ AppendGrantWithAdminOption(StringInfo buf, GrantRoleStmt *stmt)
}
}
}
#else
if (stmt->admin_opt)
{
appendStringInfo(buf, " WITH ADMIN OPTION");
}
#endif
}
}

View File

@ -176,12 +176,6 @@ AppendAlterTableCmdConstraint(StringInfo buf, Constraint *constraint,
appendStringInfo(buf, "%s ", quote_identifier(constraint->conname));
/* postgres version >= PG15
* UNIQUE [ NULLS [ NOT ] DISTINCT ] ( column_name [, ... ] ) [ INCLUDE ( column_name [, ...]) ]
* postgres version < PG15
* UNIQUE ( column_name [, ... ] ) [ INCLUDE ( column_name [, ...]) ]
* PRIMARY KEY ( column_name [, ... ] ) [ INCLUDE ( column_name [, ...]) ]
*/
if (constraint->contype == CONSTR_PRIMARY || constraint->contype == CONSTR_UNIQUE)
{
if (constraint->contype == CONSTR_PRIMARY)

File diff suppressed because it is too large Load Diff

View File

@ -2680,32 +2680,6 @@ OpenNewConnections(WorkerPool *workerPool, int newConnectionCount,
DistributedExecution *execution = workerPool->distributedExecution;
/*
* Although not ideal, there is a slight difference in the implementations
* of PG15+ and others.
*
* Recreating the WaitEventSet even once is prohibitively expensive (almost
* ~7% overhead for select-only pgbench). For all versions, the aim is to
* be able to create the WaitEventSet only once after any new connections
* are added to the execution. That is the main reason behind the implementation
* differences.
*
* For pre-PG15 versions, we leave the waitEventSet recreation to the main
* execution loop. For PG15+, we do it right here.
*
* We require this difference because for PG15+, there is a new type of
* WaitEvent (WL_SOCKET_CLOSED). We can provide this new event at this point,
* and check RemoteSocketClosedForAnySession(). For earlier versions, we have
* to defer the rebuildWaitEventSet as there is no other event to waitFor
* at this point. We could have forced to re-build, but that would mean we try to
* create waitEventSet without any actual events. That has some other implications
* such that we have to avoid certain optimizations of WaitEventSet creation.
*
* Instead, we prefer this slight difference, which in effect has almost no
* difference, but doing things in different points in time.
*/
/* we added new connections, rebuild the waitEventSet */
RebuildWaitEventSetForSessions(execution);

View File

@ -88,10 +88,6 @@
#include "distributed/worker_manager.h"
#include "distributed/worker_protocol.h"
#if PG_VERSION_NUM < PG_VERSION_16
#include "utils/relfilenodemap.h"
#endif
/* user configuration */
int ReadFromSecondaries = USE_SECONDARY_NODES_NEVER;

View File

@ -3195,7 +3195,7 @@ SignalMetadataSyncDaemon(Oid database, int sig)
int backendCount = pgstat_fetch_stat_numbackends();
for (int backend = 1; backend <= backendCount; backend++)
{
LocalPgBackendStatus *localBeEntry = pgstat_fetch_stat_local_beentry(backend);
LocalPgBackendStatus *localBeEntry = pgstat_get_local_beentry_by_index(backend);
if (!localBeEntry)
{
continue;

View File

@ -29,6 +29,7 @@
#include "catalog/pg_constraint.h"
#include "catalog/pg_extension.h"
#include "catalog/pg_namespace.h"
#include "catalog/pg_proc_d.h"
#include "catalog/pg_type.h"
#include "commands/extension.h"
#include "commands/sequence.h"
@ -81,10 +82,6 @@
#include "distributed/worker_manager.h"
#include "distributed/worker_protocol.h"
#if PG_VERSION_NUM >= PG_VERSION_16
#include "catalog/pg_proc_d.h"
#endif
#define DISK_SPACE_FIELDS 2
/* Local functions forward declarations */

View File

@ -1,6 +1,6 @@
/*-------------------------------------------------------------------------
*
* pg_get_object_address_13_14_15.c
* pg_get_object_address_16_17_18.c
*
* Copied functions from Postgres pg_get_object_address with acl/owner check.
* Since we need to use intermediate data types Relation and Node from
@ -96,7 +96,7 @@ PgGetObjectAddress(char *ttype, ArrayType *namearr, ArrayType *argsarr)
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("name or argument lists may not contain nulls")));
}
typename = typeStringToTypeName_compat(TextDatumGetCString(elems[0]), NULL);
typename = typeStringToTypeName(TextDatumGetCString(elems[0]), NULL);
}
else if (type == OBJECT_LARGEOBJECT)
{
@ -163,8 +163,8 @@ PgGetObjectAddress(char *ttype, ArrayType *namearr, ArrayType *argsarr)
errmsg("name or argument lists may not contain nulls")));
}
args = lappend(args,
typeStringToTypeName_compat(TextDatumGetCString(elems[i]),
NULL));
typeStringToTypeName(TextDatumGetCString(elems[i]),
NULL));
}
}
else

View File

@ -2476,7 +2476,7 @@ GetSetCommandListForNewConnections(void)
List *commandList = NIL;
int gucCount = 0;
struct config_generic **guc_vars = get_guc_variables_compat(&gucCount);
struct config_generic **guc_vars = get_guc_variables(&gucCount);
for (int gucIndex = 0; gucIndex < gucCount; gucIndex++)
{

View File

@ -610,11 +610,10 @@ ConvertRteToSubqueryWithEmptyResult(RangeTblEntry *rte)
subquery->jointree = joinTree;
rte->rtekind = RTE_SUBQUERY;
#if PG_VERSION_NUM >= PG_VERSION_16
/* no permission checking for this RTE */
rte->perminfoindex = 0;
#endif
rte->subquery = subquery;
rte->alias = copyObject(rte->eref);
}

View File

@ -29,6 +29,7 @@
#include "optimizer/plancat.h"
#include "optimizer/planmain.h"
#include "optimizer/planner.h"
#include "parser/parse_relation.h"
#include "parser/parse_type.h"
#include "parser/parsetree.h"
#include "utils/builtins.h"
@ -71,10 +72,6 @@
#include "distributed/version_compat.h"
#include "distributed/worker_shard_visibility.h"
#if PG_VERSION_NUM >= PG_VERSION_16
#include "parser/parse_relation.h"
#endif
static List *plannerRestrictionContextList = NIL;
int MultiTaskQueryLogLevel = CITUS_LOG_LEVEL_OFF; /* multi-task query log level */
@ -1510,7 +1507,6 @@ static void
ConcatenateRTablesAndPerminfos(PlannedStmt *mainPlan, PlannedStmt *concatPlan)
{
mainPlan->rtable = list_concat(mainPlan->rtable, concatPlan->rtable);
#if PG_VERSION_NUM >= PG_VERSION_16
/*
* concatPlan's range table list is concatenated to mainPlan's range table list
@ -1532,7 +1528,6 @@ ConcatenateRTablesAndPerminfos(PlannedStmt *mainPlan, PlannedStmt *concatPlan)
/* finally, concatenate perminfos as well */
mainPlan->permInfos = list_concat(mainPlan->permInfos, concatPlan->permInfos);
#endif
}
@ -2018,18 +2013,6 @@ multi_relation_restriction_hook(PlannerInfo *root, RelOptInfo *relOptInfo,
{
cacheEntry = GetCitusTableCacheEntry(rte->relid);
#if PG_VERSION_NUM == PG_VERSION_15
/*
* Postgres 15.0 had a bug regarding inherited statistics expressions,
* which is fixed in 15.1 via Postgres commit
* 1f1865e9083625239769c26f68b9c2861b8d4b1c.
*
* Hence, we only set this value on exactly PG15.0
*/
relOptInfo->statlist = NIL;
#endif
relationRestrictionContext->allReferenceTables &=
IsCitusTableTypeCacheEntry(cacheEntry, REFERENCE_TABLE);
}

View File

@ -116,24 +116,15 @@ PlannedStmt *
GeneratePlaceHolderPlannedStmt(Query *parse)
{
PlannedStmt *result = makeNode(PlannedStmt);
#if PG_VERSION_NUM >= PG_VERSION_16
SeqScan *scanNode = makeNode(SeqScan);
Plan *plan = &(scanNode->scan.plan);
#else
Scan *scanNode = makeNode(Scan);
Plan *plan = &scanNode->plan;
#endif
FastPathRestrictionContext fprCtxt PG_USED_FOR_ASSERTS_ONLY = { 0 };
Assert(FastPathRouterQuery(parse, &fprCtxt));
/* there is only a single relation rte */
#if PG_VERSION_NUM >= PG_VERSION_16
scanNode->scan.scanrelid = 1;
#else
scanNode->scanrelid = 1;
#endif
plan->targetlist =
copyObject(FetchStatementTargetList((Node *) parse));
@ -149,9 +140,7 @@ GeneratePlaceHolderPlannedStmt(Query *parse)
result->stmt_len = parse->stmt_len;
result->rtable = copyObject(parse->rtable);
#if PG_VERSION_NUM >= PG_VERSION_16
result->permInfos = copyObject(parse->rteperminfos);
#endif
result->planTree = (Plan *) plan;
result->hasReturning = (parse->returningList != NIL);

View File

@ -624,8 +624,6 @@ CreateCombineQueryForRouterPlan(DistributedPlan *distPlan)
combineQuery->canSetTag = true;
combineQuery->rtable = list_make1(rangeTableEntry);
#if PG_VERSION_NUM >= PG_VERSION_16
/*
* This part of the code is more of a sanity check for readability,
* it doesn't really do anything.
@ -637,7 +635,6 @@ CreateCombineQueryForRouterPlan(DistributedPlan *distPlan)
Assert(rangeTableEntry->rtekind == RTE_FUNCTION &&
rangeTableEntry->perminfoindex == 0);
combineQuery->rteperminfos = NIL;
#endif
combineQuery->targetList = targetList;
combineQuery->jointree = joinTree;
@ -1601,13 +1598,10 @@ WrapSubquery(Query *subquery)
outerQuery->rtable = list_make1(rte_subq);
#if PG_VERSION_NUM >= PG_VERSION_16
/* Ensure RTE_SUBQUERY has proper permission handling */
Assert(rte_subq->rtekind == RTE_SUBQUERY &&
rte_subq->perminfoindex == 0);
outerQuery->rteperminfos = NIL;
#endif
RangeTblRef *rtref = makeNode(RangeTblRef);
rtref->rtindex = 1; /* Only one RTE, so index is 1 */

View File

@ -135,9 +135,7 @@ typedef struct RangeTableEntryDetails
RangeTblEntry *rangeTableEntry;
List *requiredAttributeNumbers;
bool hasConstantFilterOnUniqueColumn;
#if PG_VERSION_NUM >= PG_VERSION_16
RTEPermissionInfo *perminfo;
#endif
} RangeTableEntryDetails;
/*
@ -208,17 +206,11 @@ RecursivelyPlanLocalTableJoins(Query *query,
GetPlannerRestrictionContext(context);
List *rangeTableList = query->rtable;
#if PG_VERSION_NUM >= PG_VERSION_16
List *rteperminfos = query->rteperminfos;
#endif
int resultRTEIdentity = ResultRTEIdentity(query);
ConversionCandidates *conversionCandidates =
CreateConversionCandidates(plannerRestrictionContext,
#if PG_VERSION_NUM >= PG_VERSION_16
rangeTableList, resultRTEIdentity, rteperminfos);
#else
rangeTableList, resultRTEIdentity, NIL);
#endif
ConversionChoice conversionChoise =
GetConversionChoice(conversionCandidates, plannerRestrictionContext);
@ -333,12 +325,8 @@ ConvertRTEsToSubquery(List *rangeTableEntryDetailsList, RecursivePlanningContext
RangeTblEntry *rangeTableEntry = rangeTableEntryDetails->rangeTableEntry;
List *requiredAttributeNumbers = rangeTableEntryDetails->requiredAttributeNumbers;
ReplaceRTERelationWithRteSubquery(rangeTableEntry,
#if PG_VERSION_NUM >= PG_VERSION_16
requiredAttributeNumbers, context,
rangeTableEntryDetails->perminfo);
#else
requiredAttributeNumbers, context, NULL);
#endif
}
}
@ -581,14 +569,12 @@ CreateConversionCandidates(PlannerRestrictionContext *plannerRestrictionContext,
RequiredAttrNumbersForRelation(rangeTableEntry, plannerRestrictionContext);
rangeTableEntryDetails->hasConstantFilterOnUniqueColumn =
HasConstantFilterOnUniqueColumn(rangeTableEntry, relationRestriction);
#if PG_VERSION_NUM >= PG_VERSION_16
rangeTableEntryDetails->perminfo = NULL;
if (rangeTableEntry->perminfoindex)
{
rangeTableEntryDetails->perminfo = getRTEPermissionInfo(rteperminfos,
rangeTableEntry);
}
#endif
bool referenceOrDistributedTable =
IsCitusTableType(rangeTableEntry->relid, REFERENCE_TABLE) ||

View File

@ -836,11 +836,9 @@ ConvertCteRTEIntoSubquery(Query *mergeQuery, RangeTblEntry *sourceRte)
Query *cteQuery = (Query *) copyObject(sourceCte->ctequery);
sourceRte->rtekind = RTE_SUBQUERY;
#if PG_VERSION_NUM >= PG_VERSION_16
/* sanity check - sourceRte was RTE_CTE previously so it should have no perminfo */
Assert(sourceRte->perminfoindex == 0);
#endif
/*
* As we are delinking the CTE from main query, we have to walk through the
@ -890,8 +888,6 @@ ConvertRelationRTEIntoSubquery(Query *mergeQuery, RangeTblEntry *sourceRte,
/* we copy the input rteRelation to preserve the rteIdentity */
RangeTblEntry *newRangeTableEntry = copyObject(sourceRte);
sourceResultsQuery->rtable = list_make1(newRangeTableEntry);
#if PG_VERSION_NUM >= PG_VERSION_16
sourceResultsQuery->rteperminfos = NIL;
if (sourceRte->perminfoindex)
{
@ -903,7 +899,6 @@ ConvertRelationRTEIntoSubquery(Query *mergeQuery, RangeTblEntry *sourceRte,
newRangeTableEntry->perminfoindex = 1;
sourceResultsQuery->rteperminfos = list_make1(perminfo);
}
#endif
/* set the FROM expression to the subquery */
newRangeTableRef->rtindex = SINGLE_RTE_INDEX;
@ -930,9 +925,7 @@ ConvertRelationRTEIntoSubquery(Query *mergeQuery, RangeTblEntry *sourceRte,
/* replace the function with the constructed subquery */
sourceRte->rtekind = RTE_SUBQUERY;
#if PG_VERSION_NUM >= PG_VERSION_16
sourceRte->perminfoindex = 0;
#endif
sourceRte->subquery = sourceResultsQuery;
sourceRte->inh = false;
}

View File

@ -287,13 +287,11 @@ PG_FUNCTION_INFO_V1(worker_save_query_explain_analyze);
void
CitusExplainScan(CustomScanState *node, List *ancestors, struct ExplainState *es)
{
#if PG_VERSION_NUM >= PG_VERSION_16
if (es->generic)
{
ereport(ERROR, (errmsg(
"EXPLAIN GENERIC_PLAN is currently not supported for Citus tables")));
}
#endif
CitusScanState *scanState = (CitusScanState *) node;
DistributedPlan *distributedPlan = scanState->distributedPlan;

View File

@ -2260,13 +2260,10 @@ ConvertToQueryOnShard(Query *query, Oid citusTableOid, Oid shardId)
Assert(shardRelationId != InvalidOid);
citusTableRte->relid = shardRelationId;
#if PG_VERSION_NUM >= PG_VERSION_16
/* Change the range table permission oid to that of the shard's (PG16+) */
Assert(list_length(query->rteperminfos) == 1);
RTEPermissionInfo *rtePermInfo = (RTEPermissionInfo *) linitial(query->rteperminfos);
rtePermInfo->relid = shardRelationId;
#endif
return true;
}
@ -2574,18 +2571,6 @@ SelectsFromDistributedTable(List *rangeTableList, Query *query)
continue;
}
#if PG_VERSION_NUM >= 150013 && PG_VERSION_NUM < PG_VERSION_16
if (rangeTableEntry->rtekind == RTE_SUBQUERY && rangeTableEntry->relkind == 0)
{
/*
* In PG15.13 commit https://github.com/postgres/postgres/commit/317aba70e
* relid is retained when converting views to subqueries,
* so we need an extra check identifying those views
*/
continue;
}
#endif
if (rangeTableEntry->relkind == RELKIND_VIEW ||
rangeTableEntry->relkind == RELKIND_MATVIEW)
{

View File

@ -81,16 +81,12 @@ CreateColocatedJoinChecker(Query *subquery, PlannerRestrictionContext *restricti
* functions (i.e., FilterPlannerRestrictionForQuery()) rely on queries
* not relations.
*/
#if PG_VERSION_NUM >= PG_VERSION_16
RTEPermissionInfo *perminfo = NULL;
if (anchorRangeTblEntry->perminfoindex)
{
perminfo = getRTEPermissionInfo(subquery->rteperminfos, anchorRangeTblEntry);
}
anchorSubquery = WrapRteRelationIntoSubquery(anchorRangeTblEntry, NIL, perminfo);
#else
anchorSubquery = WrapRteRelationIntoSubquery(anchorRangeTblEntry, NIL, NULL);
#endif
}
else if (anchorRangeTblEntry->rtekind == RTE_SUBQUERY)
{
@ -133,7 +129,7 @@ static RangeTblEntry *
AnchorRte(Query *subquery)
{
FromExpr *joinTree = subquery->jointree;
Relids joinRelIds = get_relids_in_jointree_compat((Node *) joinTree, false, false);
Relids joinRelIds = get_relids_in_jointree((Node *) joinTree, false, false);
int currentRTEIndex = -1;
RangeTblEntry *anchorRangeTblEntry = NULL;
@ -286,13 +282,11 @@ WrapRteRelationIntoSubquery(RangeTblEntry *rteRelation,
RangeTblEntry *newRangeTableEntry = copyObject(rteRelation);
subquery->rtable = list_make1(newRangeTableEntry);
#if PG_VERSION_NUM >= PG_VERSION_16
if (perminfo)
{
newRangeTableEntry->perminfoindex = 1;
subquery->rteperminfos = list_make1(perminfo);
}
#endif
/* set the FROM expression to the subquery */
newRangeTableRef = makeNode(RangeTblRef);

View File

@ -2058,9 +2058,7 @@ SubqueryPushdownMultiNodeTree(Query *originalQuery)
pushedDownQuery->targetList = subqueryTargetEntryList;
pushedDownQuery->jointree = copyObject(queryTree->jointree);
pushedDownQuery->rtable = copyObject(queryTree->rtable);
#if PG_VERSION_NUM >= PG_VERSION_16
pushedDownQuery->rteperminfos = copyObject(queryTree->rteperminfos);
#endif
pushedDownQuery->setOperations = copyObject(queryTree->setOperations);
pushedDownQuery->querySource = queryTree->querySource;
pushedDownQuery->hasSubLinks = queryTree->hasSubLinks;
@ -2194,9 +2192,7 @@ CreateSubqueryTargetListAndAdjustVars(List *columnList)
* the var - is empty. Otherwise, when given the query, the Postgres planner
* may attempt to access a non-existent range table and segfault, as in #7787.
*/
#if PG_VERSION_NUM >= PG_VERSION_16
column->varnullingrels = NULL;
#endif
}
return subqueryTargetEntryList;

View File

@ -513,7 +513,6 @@ ShouldRecursivelyPlanOuterJoins(Query *query, RecursivePlanningContext *context)
if (!EnableOuterJoinsWithPseudoconstantQualsPrePG17 && !hasOuterJoin)
{
/*
* PG15 commit d1ef5631e620f9a5b6480a32bb70124c857af4f1
* PG16 commit 695f5deb7902865901eb2d50a70523af655c3a00
* disallows replacing joins with scans in queries with pseudoconstant quals.
* This commit prevents the set_join_pathlist_hook from being called
@ -529,9 +528,9 @@ ShouldRecursivelyPlanOuterJoins(Query *query, RecursivePlanningContext *context)
FindNodeMatchingCheckFunction((Node *) query->jointree, IsOuterJoinExpr))
{
ereport(ERROR, (errmsg("Distributed queries with outer joins and "
"pseudoconstant quals are not supported in PG15 and PG16."),
"pseudoconstant quals are not supported in PG16."),
errdetail(
"PG15 and PG16 disallow replacing joins with scans when the"
"PG16 disallows replacing joins with scans when the"
" query has pseudoconstant quals"),
errhint("Consider upgrading your PG version to PG17+")));
}
@ -974,7 +973,6 @@ RecursivelyPlanDistributedJoinNode(Node *node, Query *query,
List *requiredAttributes =
RequiredAttrNumbersForRelation(distributedRte, restrictionContext);
#if PG_VERSION_NUM >= PG_VERSION_16
RTEPermissionInfo *perminfo = NULL;
if (distributedRte->perminfoindex)
{
@ -983,10 +981,6 @@ RecursivelyPlanDistributedJoinNode(Node *node, Query *query,
ReplaceRTERelationWithRteSubquery(distributedRte, requiredAttributes,
recursivePlanningContext, perminfo);
#else
ReplaceRTERelationWithRteSubquery(distributedRte, requiredAttributes,
recursivePlanningContext, NULL);
#endif
}
else if (distributedRte->rtekind == RTE_SUBQUERY)
{
@ -1875,9 +1869,7 @@ ReplaceRTERelationWithRteSubquery(RangeTblEntry *rangeTableEntry,
/* replace the function with the constructed subquery */
rangeTableEntry->rtekind = RTE_SUBQUERY;
#if PG_VERSION_NUM >= PG_VERSION_16
rangeTableEntry->perminfoindex = 0;
#endif
rangeTableEntry->subquery = subquery;
/*
@ -1950,13 +1942,10 @@ CreateOuterSubquery(RangeTblEntry *rangeTableEntry, List *outerSubqueryTargetLis
innerSubqueryRTE->eref->colnames = innerSubqueryColNames;
outerSubquery->rtable = list_make1(innerSubqueryRTE);
#if PG_VERSION_NUM >= PG_VERSION_16
/* sanity check */
Assert(innerSubqueryRTE->rtekind == RTE_SUBQUERY &&
innerSubqueryRTE->perminfoindex == 0);
outerSubquery->rteperminfos = NIL;
#endif
/* set the FROM expression to the subquery */
@ -2132,13 +2121,10 @@ TransformFunctionRTE(RangeTblEntry *rangeTblEntry)
/* set the FROM expression to the subquery */
subquery->rtable = list_make1(newRangeTableEntry);
#if PG_VERSION_NUM >= PG_VERSION_16
/* sanity check */
Assert(newRangeTableEntry->rtekind == RTE_FUNCTION &&
newRangeTableEntry->perminfoindex == 0);
subquery->rteperminfos = NIL;
#endif
newRangeTableRef->rtindex = 1;
subquery->jointree = makeFromExpr(list_make1(newRangeTableRef), NULL);
@ -2462,9 +2448,7 @@ BuildEmptyResultQuery(List *targetEntryList, char *resultId)
valuesQuery->canSetTag = true;
valuesQuery->commandType = CMD_SELECT;
valuesQuery->rtable = list_make1(valuesRangeTable);
#if PG_VERSION_NUM >= PG_VERSION_16
valuesQuery->rteperminfos = NIL;
#endif
valuesQuery->jointree = valuesJoinTree;
valuesQuery->targetList = valueTargetList;
@ -2481,9 +2465,7 @@ BuildEmptyResultQuery(List *targetEntryList, char *resultId)
resultQuery->commandType = CMD_SELECT;
resultQuery->canSetTag = true;
resultQuery->rtable = list_make1(emptyRangeTable);
#if PG_VERSION_NUM >= PG_VERSION_16
resultQuery->rteperminfos = NIL;
#endif
RangeTblRef *rangeTableRef = makeNode(RangeTblRef);
rangeTableRef->rtindex = 1;
@ -2633,9 +2615,7 @@ BuildReadIntermediateResultsQuery(List *targetEntryList, List *columnAliasList,
Query *resultQuery = makeNode(Query);
resultQuery->commandType = CMD_SELECT;
resultQuery->rtable = list_make1(rangeTableEntry);
#if PG_VERSION_NUM >= PG_VERSION_16
resultQuery->rteperminfos = NIL;
#endif
resultQuery->jointree = joinTree;
resultQuery->targetList = targetList;

View File

@ -1510,7 +1510,6 @@ GetTargetSubquery(PlannerInfo *root, RangeTblEntry *rangeTableEntry, Var *varToB
bool
IsRelOptOuterJoin(PlannerInfo *root, int varNo)
{
#if PG_VERSION_NUM >= PG_VERSION_16
if (root->simple_rel_array_size <= varNo)
{
return true;
@ -1522,7 +1521,6 @@ IsRelOptOuterJoin(PlannerInfo *root, int varNo)
/* must be an outer join */
return true;
}
#endif
return false;
}

View File

@ -1189,9 +1189,6 @@ PublicationName(LogicalRepType type, uint32_t nodeId, Oid ownerId)
/*
* ReplicationSlotNameForNodeAndOwnerForOperation returns the name of the
* replication slot for the given node, table owner and operation id.
*
* Note that PG15 introduced a new ReplicationSlotName function that caused name conflicts
* and we renamed this function.
*/
char *
ReplicationSlotNameForNodeAndOwnerForOperation(LogicalRepType type, uint32_t nodeId,
@ -1515,7 +1512,6 @@ CreateSubscriptions(MultiConnection *sourceConnection,
appendStringInfo(createSubscriptionCommand,
"CREATE SUBSCRIPTION %s CONNECTION %s PUBLICATION %s "
"WITH (citus_use_authinfo=true, create_slot=false, "
#if PG_VERSION_NUM >= PG_VERSION_16
/*
* password_required specifies whether connections to the publisher
@ -1529,9 +1525,6 @@ CreateSubscriptions(MultiConnection *sourceConnection,
* it will be ignored anyway
*/
"copy_data=false, enabled=false, slot_name=%s, password_required=false",
#else
"copy_data=false, enabled=false, slot_name=%s",
#endif
quote_identifier(target->subscriptionName),
quote_literal_cstr(conninfo->data),
quote_identifier(target->publication->name),

View File

@ -94,42 +94,6 @@ replication_origin_filter_cb(LogicalDecodingContext *ctx, RepOriginId origin_id)
}
/*
* update_replication_progress is copied from Postgres 15. We use it to send keepalive
* messages when we are filtering out the wal changes resulting from the initial copy.
* If we do not send out messages long enough, wal reciever will time out.
* Postgres 16 has refactored this code such that keepalive messages are sent during
* reordering phase which is above change_cb. So we do not need to send keepalive in
* change_cb.
*/
#if (PG_VERSION_NUM < PG_VERSION_16)
static void
update_replication_progress(LogicalDecodingContext *ctx, bool skipped_xact)
{
static int changes_count = 0;
/*
* We don't want to try sending a keepalive message after processing each
* change as that can have overhead. Tests revealed that there is no
* noticeable overhead in doing it after continuously processing 100 or so
* changes.
*/
#define CHANGES_THRESHOLD 100
/*
* After continuously processing CHANGES_THRESHOLD changes, we
* try to send a keepalive message if required.
*/
if (ctx->end_xact || ++changes_count >= CHANGES_THRESHOLD)
{
OutputPluginUpdateProgress(ctx, skipped_xact);
changes_count = 0;
}
}
#endif
/*
* shard_split_change_cb function emits the incoming tuple change
* to the appropriate destination shard.
@ -148,12 +112,6 @@ shard_split_change_cb(LogicalDecodingContext *ctx, ReorderBufferTXN *txn,
return;
}
#if (PG_VERSION_NUM < PG_VERSION_16)
/* Send replication keepalive. */
update_replication_progress(ctx, false);
#endif
/* check if the relation is publishable.*/
if (!is_publishable_relation(relation))
{

View File

@ -2803,7 +2803,7 @@ static void
OverridePostgresConfigProperties(void)
{
int gucCount = 0;
struct config_generic **guc_vars = get_guc_variables_compat(&gucCount);
struct config_generic **guc_vars = get_guc_variables(&gucCount);
for (int gucIndex = 0; gucIndex < gucCount; gucIndex++)
{
@ -2982,7 +2982,7 @@ ShowShardsForAppNamePrefixesCheckHook(char **newval, void **extra, GucSource sou
}
char *prefixAscii = pstrdup(appNamePrefix);
pg_clean_ascii_compat(prefixAscii, 0);
pg_clean_ascii(prefixAscii, 0);
if (strcmp(prefixAscii, appNamePrefix) != 0)
{

View File

@ -562,11 +562,7 @@ static const TableAmRoutine fake_methods = {
.tuple_satisfies_snapshot = fake_tuple_satisfies_snapshot,
.index_delete_tuples = fake_index_delete_tuples,
#if PG_VERSION_NUM >= PG_VERSION_16
.relation_set_new_filelocator = fake_relation_set_new_filenode,
#else
.relation_set_new_filenode = fake_relation_set_new_filenode,
#endif
.relation_nontransactional_truncate = fake_relation_nontransactional_truncate,
.relation_copy_data = fake_copy_data,
.relation_copy_for_cluster = fake_copy_for_cluster,

View File

@ -740,8 +740,6 @@ UnlockLockData(void)
* We have separate blocks for PG16 and <PG16 because SHM_QUEUE is completely
* removed from PG16
*/
#if PG_VERSION_NUM >= PG_VERSION_16
static void
AddEdgesForLockWaits(WaitGraph *waitGraph, PGPROC *waitingProc, PROCStack *remaining)
{
@ -820,86 +818,6 @@ AddEdgesForWaitQueue(WaitGraph *waitGraph, PGPROC *waitingProc, PROCStack *remai
}
#else
static void
AddEdgesForLockWaits(WaitGraph *waitGraph, PGPROC *waitingProc, PROCStack *remaining)
{
/* the lock for which this process is waiting */
LOCK *waitLock = waitingProc->waitLock;
/* determine the conflict mask for the lock level used by the process */
LockMethod lockMethodTable = GetLocksMethodTable(waitLock);
int conflictMask = lockMethodTable->conflictTab[waitingProc->waitLockMode];
/* iterate through the queue of processes holding the lock */
SHM_QUEUE *procLocks = &waitLock->procLocks;
PROCLOCK *procLock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
offsetof(PROCLOCK, lockLink));
while (procLock != NULL)
{
PGPROC *currentProc = procLock->tag.myProc;
/*
* Skip processes from the same lock group, processes that don't conflict,
* and processes that are waiting on safe operations.
*/
if (!IsSameLockGroup(waitingProc, currentProc) &&
IsConflictingLockMask(procLock->holdMask, conflictMask) &&
!IsProcessWaitingForSafeOperations(currentProc))
{
AddWaitEdge(waitGraph, waitingProc, currentProc, remaining);
}
procLock = (PROCLOCK *) SHMQueueNext(procLocks, &procLock->lockLink,
offsetof(PROCLOCK, lockLink));
}
}
static void
AddEdgesForWaitQueue(WaitGraph *waitGraph, PGPROC *waitingProc, PROCStack *remaining)
{
/* the lock for which this process is waiting */
LOCK *waitLock = waitingProc->waitLock;
/* determine the conflict mask for the lock level used by the process */
LockMethod lockMethodTable = GetLocksMethodTable(waitLock);
int conflictMask = lockMethodTable->conflictTab[waitingProc->waitLockMode];
/* iterate through the wait queue */
PROC_QUEUE *waitQueue = &(waitLock->waitProcs);
int queueSize = waitQueue->size;
PGPROC *currentProc = (PGPROC *) waitQueue->links.next;
/*
* Iterate through the queue from the start until we encounter waitingProc,
* since we only care about processes in front of waitingProc in the queue.
*/
while (queueSize-- > 0 && currentProc != waitingProc)
{
int awaitMask = LOCKBIT_ON(currentProc->waitLockMode);
/*
* Skip processes from the same lock group, processes that don't conflict,
* and processes that are waiting on safe operations.
*/
if (!IsSameLockGroup(waitingProc, currentProc) &&
IsConflictingLockMask(awaitMask, conflictMask) &&
!IsProcessWaitingForSafeOperations(currentProc))
{
AddWaitEdge(waitGraph, waitingProc, currentProc, remaining);
}
currentProc = (PGPROC *) currentProc->links.next;
}
}
#endif
/*
* AddWaitEdge adds a new wait edge to a wait graph. The nodes in the graph are
* transactions and an edge indicates the "waiting" process is blocked on a lock

View File

@ -25,6 +25,7 @@
#include "storage/fd.h"
#include "utils/datum.h"
#include "utils/guc.h"
#include "utils/guc_tables.h"
#include "utils/hsearch.h"
#include "utils/memutils.h"
@ -807,13 +808,9 @@ AdjustMaxPreparedTransactions(void)
* really check if max_prepared_xacts is configured by the user explicitly,
* so check if it's value is default.
*/
#if PG_VERSION_NUM >= PG_VERSION_16
struct config_generic *gconf = find_option("max_prepared_transactions",
false, false, ERROR);
if (gconf->source == PGC_S_DEFAULT)
#else
if (max_prepared_xacts == 0)
#endif
{
char newvalue[12];

View File

@ -42,8 +42,8 @@ FunctionOidExtended(const char *schemaName, const char *functionName, int argume
bool missingOK)
{
char *qualifiedFunctionName = quote_qualified_identifier(schemaName, functionName);
List *qualifiedFunctionNameList = stringToQualifiedNameList_compat(
qualifiedFunctionName);
List *qualifiedFunctionNameList = stringToQualifiedNameList(qualifiedFunctionName,
NULL);
List *argumentList = NIL;
const bool findVariadics = false;
const bool findDefaults = false;

View File

@ -12,14 +12,13 @@
#include "postgres.h"
#include "distributed/relation_utils.h"
#if PG_VERSION_NUM >= PG_VERSION_16
#include "miscadmin.h"
#endif
#include "utils/lsyscache.h"
#include "utils/rel.h"
#include "distributed/relation_utils.h"
/*
* RelationGetNamespaceName returns the relation's namespace name.
@ -33,8 +32,6 @@ RelationGetNamespaceName(Relation relation)
}
#if PG_VERSION_NUM >= PG_VERSION_16
/*
* GetFilledPermissionInfo creates RTEPermissionInfo for a given RTE
* and fills it with given data and returns this RTEPermissionInfo object.
@ -56,6 +53,3 @@ GetFilledPermissionInfo(Oid relid, bool inh, AclMode requiredPerms)
perminfo->checkAsUser = GetUserId();
return perminfo;
}
#endif

View File

@ -526,8 +526,9 @@ CreateRenameTypeStmt(const ObjectAddress *address, char *newName)
RenameStmt *stmt = makeNode(RenameStmt);
stmt->renameType = OBJECT_TYPE;
stmt->object = (Node *) stringToQualifiedNameList_compat(format_type_be_qualified(
address->objectId));
stmt->object = (Node *) stringToQualifiedNameList(format_type_be_qualified(address->
objectId),
NULL);
stmt->newname = newName;

View File

@ -19,6 +19,7 @@
#include "nodes/parsenodes.h"
#include "storage/bufpage.h"
#include "storage/lockdefs.h"
#include "storage/relfilelocator.h"
#include "utils/relcache.h"
#include "utils/snapmgr.h"
@ -27,12 +28,6 @@
#include "columnar/columnar_compression.h"
#include "columnar/columnar_metadata.h"
#if PG_VERSION_NUM >= PG_VERSION_16
#include "storage/relfilelocator.h"
#else
#include "storage/relfilenode.h"
#endif
#define COLUMNAR_AM_NAME "columnar"
#define COLUMNAR_MODULE_NAME "citus_columnar"

View File

@ -14,15 +14,11 @@
#include "postgres.h"
#include "storage/relfilelocator.h"
#include "pg_version_compat.h"
#include "pg_version_constants.h"
#if PG_VERSION_NUM >= PG_VERSION_16
#include "storage/relfilelocator.h"
#else
#include "storage/relfilenode.h"
#endif
/*
* StripeMetadata represents information about a stripe. This information is

View File

@ -38,7 +38,7 @@ typedef struct DeferredErrorMessage
*/
#define DeferredError(code, message, detail, hint) \
DeferredErrorInternal(code, message, detail, hint, __FILE__, __LINE__, \
PG_FUNCNAME_MACRO)
__func__)
DeferredErrorMessage * DeferredErrorInternal(int code, const char *message,
const char *detail, const char *hint,

View File

@ -13,16 +13,13 @@
#include "postgres.h"
#include "pg_version_constants.h"
#if PG_VERSION_NUM >= PG_VERSION_16
#include "parser/parse_relation.h"
#endif
#include "utils/relcache.h"
#include "pg_version_constants.h"
extern char * RelationGetNamespaceName(Relation relation);
#if PG_VERSION_NUM >= PG_VERSION_16
extern RTEPermissionInfo * GetFilledPermissionInfo(Oid relid, bool inh,
AclMode requiredPerms);
#endif
#endif /* RELATION_UTILS_H */

View File

@ -180,9 +180,7 @@ IsNodeWideObjectClass(ObjectClass objectClass)
case OCLASS_DATABASE:
case OCLASS_TBLSPACE:
case OCLASS_PARAMETER_ACL:
#if PG_VERSION_NUM >= PG_VERSION_16
case OCLASS_ROLE_MEMBERSHIP:
#endif
{
return true;
}

View File

@ -463,178 +463,6 @@ getStxstattarget_compat(HeapTuple tup)
#endif
#if PG_VERSION_NUM >= PG_VERSION_16
#include "utils/guc_tables.h"
#define pg_clean_ascii_compat(a, b) pg_clean_ascii(a, b)
#define RelationPhysicalIdentifier_compat(a) ((a)->rd_locator)
#define RelationTablespace_compat(a) (a.spcOid)
#define RelationPhysicalIdentifierNumber_compat(a) (a.relNumber)
#define RelationPhysicalIdentifierNumberPtr_compat(a) (a->relNumber)
#define RelationPhysicalIdentifierBackend_compat(a) (a->smgr_rlocator.locator)
#define float_abs(a) fabs(a)
#define tuplesort_getdatum_compat(a, b, c, d, e, f) tuplesort_getdatum(a, b, c, d, e, f)
static inline struct config_generic **
get_guc_variables_compat(int *gucCount)
{
return get_guc_variables(gucCount);
}
#define PG_FUNCNAME_MACRO __func__
#define stringToQualifiedNameList_compat(a) stringToQualifiedNameList(a, NULL)
#define typeStringToTypeName_compat(a, b) typeStringToTypeName(a, b)
#define get_relids_in_jointree_compat(a, b, c) get_relids_in_jointree(a, b, c)
#define object_ownercheck(a, b, c) object_ownercheck(a, b, c)
#define object_aclcheck(a, b, c, d) object_aclcheck(a, b, c, d)
#define pgstat_fetch_stat_local_beentry(a) pgstat_get_local_beentry_by_index(a)
#define have_createdb_privilege() have_createdb_privilege()
#else
#include "miscadmin.h"
#include "catalog/pg_authid.h"
#include "catalog/pg_class_d.h"
#include "catalog/pg_database_d.h"
#include "catalog/pg_namespace.h"
#include "catalog/pg_proc_d.h"
#include "storage/relfilenode.h"
#include "utils/guc.h"
#include "utils/guc_tables.h"
#include "utils/syscache.h"
#define pg_clean_ascii_compat(a, b) pg_clean_ascii(a)
#define RelationPhysicalIdentifier_compat(a) ((a)->rd_node)
#define RelationTablespace_compat(a) (a.spcNode)
#define RelationPhysicalIdentifierNumber_compat(a) (a.relNode)
#define RelationPhysicalIdentifierNumberPtr_compat(a) (a->relNode)
#define RelationPhysicalIdentifierBackend_compat(a) (a->smgr_rnode.node)
typedef RelFileNode RelFileLocator;
typedef Oid RelFileNumber;
#define RelidByRelfilenumber(a, b) RelidByRelfilenode(a, b)
#define float_abs(a) Abs(a)
#define tuplesort_getdatum_compat(a, b, c, d, e, f) tuplesort_getdatum(a, b, d, e, f)
static inline struct config_generic **
get_guc_variables_compat(int *gucCount)
{
*gucCount = GetNumConfigOptions();
return get_guc_variables();
}
#define stringToQualifiedNameList_compat(a) stringToQualifiedNameList(a)
#define typeStringToTypeName_compat(a, b) typeStringToTypeName(a)
#define get_relids_in_jointree_compat(a, b, c) get_relids_in_jointree(a, b)
static inline bool
object_ownercheck(Oid classid, Oid objectid, Oid roleid)
{
switch (classid)
{
case RelationRelationId:
{
return pg_class_ownercheck(objectid, roleid);
}
case NamespaceRelationId:
{
return pg_namespace_ownercheck(objectid, roleid);
}
case ProcedureRelationId:
{
return pg_proc_ownercheck(objectid, roleid);
}
case DatabaseRelationId:
{
return pg_database_ownercheck(objectid, roleid);
}
default:
{
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("Missing classid:%d",
classid)));
}
}
}
static inline AclResult
object_aclcheck(Oid classid, Oid objectid, Oid roleid, AclMode mode)
{
switch (classid)
{
case NamespaceRelationId:
{
return pg_namespace_aclcheck(objectid, roleid, mode);
}
case ProcedureRelationId:
{
return pg_proc_aclcheck(objectid, roleid, mode);
}
default:
{
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("Missing classid:%d",
classid)));
}
}
}
static inline bool
have_createdb_privilege(void)
{
bool result = false;
HeapTuple utup;
/* Superusers can always do everything */
if (superuser())
{
return true;
}
utup = SearchSysCache1(AUTHOID, ObjectIdGetDatum(GetUserId()));
if (HeapTupleIsValid(utup))
{
result = ((Form_pg_authid) GETSTRUCT(utup))->rolcreatedb;
ReleaseSysCache(utup);
}
return result;
}
typedef bool TU_UpdateIndexes;
/*
* we define RTEPermissionInfo for PG16 compatibility
* There are some functions that need to include RTEPermissionInfo in their signature
* for PG14/PG15 we pass a NULL argument in these functions
*/
typedef RangeTblEntry RTEPermissionInfo;
#endif
#define SetListCellPtr(a, b) ((a)->ptr_value = (b))
#define RangeTableEntryFromNSItem(a) ((a)->p_rte)
#define fcGetArgValue(fc, n) ((fc)->args[n].value)

View File

@ -11,7 +11,6 @@
#ifndef PG_VERSION_CONSTANTS
#define PG_VERSION_CONSTANTS
#define PG_VERSION_15 150000
#define PG_VERSION_16 160000
#define PG_VERSION_17 170000
#define PG_VERSION_18 180000

View File

@ -290,30 +290,10 @@ s/\/\*\{"cId":.*\*\///g
# Notice message that contains current columnar version that makes it harder to bump versions
s/(NOTICE: issuing CREATE EXTENSION IF NOT EXISTS citus_columnar WITH SCHEMA pg_catalog VERSION )"[0-9]+\.[0-9]+-[0-9]+"/\1 "x.y-z"/
# pg16 changes
# can be removed when dropping PG14&15 support
#if PG_VERSION_NUM < PG_VERSION_16
# (This is not preprocessor directive, but a reminder for the developer that will drop PG14&15 support )
s/, password_required=false//g
s/provide the file or change sslmode/provide the file, use the system's trusted roots with sslrootcert=system, or change sslmode/g
#pg18 varreturningtype - change needed for PG16, PG17 tests
s/(:varnullingrels \(b\) :varlevelsup 0) (:varnosyn 1)/\1 :varreturningtype 0 \2/g
#pg16 varnullingrels and pg18 varreturningtype - change needed for PG15 tests
s/(:varcollid [0-9]+) :varlevelsup 0/\1 :varnullingrels (b) :varlevelsup 0 :varreturningtype 0/g
s/table_name_for_view\.([_a-z0-9]+)(,| |$)/\1\2/g
s/permission denied to terminate process/must be a superuser to terminate superuser process/g
s/permission denied to cancel query/must be a superuser to cancel superuser query/g
#endif /* PG_VERSION_NUM < PG_VERSION_16 */
# pg17 changes
# can be removed when dropping PG15&16 support
# can be removed when dropping PG16 support
#if PG_VERSION_NUM < PG_VERSION_17
# (This is not preprocessor directive, but a reminder for the developer that will drop PG15&16 support )
# (This is not preprocessor directive, but a reminder for the developer that will drop PG16 support )
s/COPY DEFAULT only available using COPY FROM/COPY DEFAULT cannot be used with COPY TO/
s/COPY delimiter must not appear in the DEFAULT specification/COPY delimiter character must not appear in the DEFAULT specification/
@ -321,7 +301,7 @@ s/COPY delimiter must not appear in the DEFAULT specification/COPY delimiter cha
#endif /* PG_VERSION_NUM < PG_VERSION_17 */
# PG 17 Removes outer parentheses from CHECK constraints
# we add them back for pg15,pg16 compatibility
# we add them back for pg16 compatibility
# e.g. change CHECK other_col >= 100 to CHECK (other_col >= 100)
s/\| CHECK ([a-zA-Z])(.*)/| CHECK \(\1\2\)/g
@ -403,3 +383,8 @@ s/^[[:space:]]*ERROR:[[:space:]]+could not connect to the publisher:[[:space:]]*
# Output
/^[[:space:]]*Output:/ s/(OVER[[:space:]]+)w[0-9]+/\1(?)/g
# end PG18 window ref normalization
# pg18 varreturningtype - change needed for PG16, PG17 tests
# can be removed when dropping pg17 support
s/(:varnullingrels \(b\) :varlevelsup 0) (:varnosyn 1)/\1 :varreturningtype 0 \2/g
# end pg18 varreturningtype

View File

@ -776,15 +776,8 @@ RESET client_min_messages;
create table events (event_id bigserial, event_time timestamptz default now(), payload text);
create index on events (event_id);
insert into events (payload) select 'hello-'||s from generate_series(1,10) s;
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
\gset
BEGIN;
\if :server_version_ge_16
SET LOCAL debug_parallel_query = regress;
\else
SET LOCAL force_parallel_mode = regress;
\endif
SET LOCAL min_parallel_table_scan_size = 1;
SET LOCAL parallel_tuple_cost = 0;
SET LOCAL max_parallel_workers = 4;

View File

@ -21,14 +21,7 @@ select count(*), min(i), max(i), avg(i) from fallback_scan;
-- Negative test: try to force a parallel plan with at least two
-- workers, but columnar should reject it and use a non-parallel scan.
--
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
\gset
\if :server_version_ge_16
set debug_parallel_query = regress;
\else
set force_parallel_mode = regress;
\endif
set min_parallel_table_scan_size = 1;
set parallel_tuple_cost = 0;
set max_parallel_workers = 4;
@ -46,11 +39,7 @@ select count(*), min(i), max(i), avg(i) from fallback_scan;
150000 | 1 | 150000 | 75000.500000000000
(1 row)
\if :server_version_ge_16
set debug_parallel_query = default;
\else
set force_parallel_mode = default;
\endif
set min_parallel_table_scan_size to default;
set parallel_tuple_cost to default;
set max_parallel_workers to default;

View File

@ -556,9 +556,6 @@ create table events (event_id bigserial, event_time timestamptz default now(), p
BEGIN;
-- this wouldn't flush any data
insert into events (payload) select 'hello-'||s from generate_series(1, 10) s;
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
\gset
-- Since table is large enough, normally postgres would prefer using
-- parallel workers when building the index.
--
@ -570,11 +567,7 @@ BEGIN;
-- by postgres and throws an error. For this reason, here we don't expect
-- following commnad to fail since we prevent using parallel workers for
-- columnar tables.
\if :server_version_ge_16
SET LOCAL debug_parallel_query = regress;
\else
SET LOCAL force_parallel_mode = regress;
\endif
SET LOCAL min_parallel_table_scan_size = 1;
SET LOCAL parallel_tuple_cost = 0;
SET LOCAL max_parallel_workers = 4;

View File

@ -20,15 +20,8 @@ INSERT INTO parent SELECT '2020-03-15', 30, 300, 'three thousand'
FROM generate_series(1,100000);
INSERT INTO parent SELECT '2020-04-15', 30, 300, 'three thousand'
FROM generate_series(1,100000);
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
\gset
-- run parallel plans
\if :server_version_ge_16
SET debug_parallel_query = regress;
\else
SET force_parallel_mode = regress;
\endif
SET min_parallel_table_scan_size = 1;
SET parallel_tuple_cost = 0;
SET max_parallel_workers = 4;
@ -104,11 +97,7 @@ SELECT count(*), sum(i), min(i), max(i) FROM parent;
(1 row)
SET columnar.enable_custom_scan TO DEFAULT;
\if :server_version_ge_16
SET debug_parallel_query TO DEFAULT;
\else
SET force_parallel_mode TO DEFAULT;
\endif
SET min_parallel_table_scan_size TO DEFAULT;
SET parallel_tuple_cost TO DEFAULT;
SET max_parallel_workers TO DEFAULT;

View File

@ -86,13 +86,13 @@ SET search_path TO cpu_priority;
SET citus.log_remote_commands TO ON;
SET citus.grep_remote_commands = '%CREATE SUBSCRIPTION%';
SELECT master_move_shard_placement(11568900, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical');
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, password_required=false, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, password_required=false, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, password_required=false, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, password_required=false, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
master_move_shard_placement
---------------------------------------------------------------------
@ -101,13 +101,13 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
SET citus.cpu_priority_for_logical_replication_senders = 15;
SELECT master_move_shard_placement(11568900, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'force_logical');
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, password_required=false, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, password_required=false, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, password_required=false, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, password_required=false, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
master_move_shard_placement
---------------------------------------------------------------------
@ -116,13 +116,13 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
SET citus.max_high_priority_background_processes = 3;
SELECT master_move_shard_placement(11568900, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical');
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, password_required=false, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, password_required=false, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, password_required=false, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, password_required=false, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
master_move_shard_placement
---------------------------------------------------------------------
@ -142,21 +142,21 @@ SELECT pg_catalog.citus_split_shard_by_split_points(
ARRAY['-1500000000'],
ARRAY[:worker_1_node, :worker_2_node],
'force_logical');
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, password_required=false, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, password_required=false, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, password_required=false, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, password_required=false, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, password_required=false, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, password_required=false, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, password_required=false, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, password_required=false, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
citus_split_shard_by_split_points
---------------------------------------------------------------------

View File

@ -1,13 +1,3 @@
--
-- PG16
--
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
\gset
\if :server_version_ge_16
\else
\q
\endif
-- create/drop database for pg >= 16
set citus.enable_create_database_propagation=on;
-- test icu_rules

View File

@ -1,9 +0,0 @@
--
-- PG16
--
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
\gset
\if :server_version_ge_16
\else
\q

View File

@ -74,9 +74,9 @@ SELECT pg_typeof(:maintenance_daemon_gpid);
\set VERBOSITY terse
SELECT pg_cancel_backend(:maintenance_daemon_gpid);
ERROR: must be a superuser to cancel superuser query
ERROR: permission denied to cancel query
SELECT pg_terminate_backend(:maintenance_daemon_gpid);
ERROR: must be a superuser to terminate superuser process
ERROR: permission denied to terminate process
\set VERBOSITY default
-- we can cancel our own backend
SELECT pg_cancel_backend(citus_backend_gpid());

View File

@ -1287,21 +1287,15 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
SET application_name to 'citus_internal gpid=10000000001';
-- with an ugly trick, update the vartype of table from int to bigint
-- so that making two tables colocated fails
-- include varnullingrels for PG16+
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
\gset
-- include varreturningtype for PG18+
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 18 AS server_version_ge_18
\gset
\if :server_version_ge_18
UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 1 :varreturningtype 0 :varnoold 1 :varoattno 1 :location -1}'
WHERE logicalrelid = 'test_2'::regclass;
\elif :server_version_ge_16
UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 1 :varnoold 1 :varoattno 1 :location -1}'
WHERE logicalrelid = 'test_2'::regclass;
\else
UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varlevelsup 1 :varnoold 1 :varoattno 1 :location -1}'
UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 1 :varnoold 1 :varoattno 1 :location -1}'
WHERE logicalrelid = 'test_2'::regclass;
\endif
SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251);

View File

@ -1,18 +1,6 @@
--
-- COMPLEX_COUNT_DISTINCT
--
-- This test file has an alternative output because of the following in PG16:
-- https://github.com/postgres/postgres/commit/1349d2790bf48a4de072931c722f39337e72055e
-- https://github.com/postgres/postgres/commit/f4c7c410ee4a7baa06f51ebb8d5333c169691dd3
-- The alternative output can be deleted when we drop support for PG15
--
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16;
server_version_ge_16
---------------------------------------------------------------------
t
(1 row)
SET citus.next_shard_id TO 240000;
SET citus.shard_count TO 8;
SET citus.shard_replication_factor TO 1;

File diff suppressed because it is too large Load Diff

View File

@ -1,22 +1,11 @@
--
-- MULTI_EXPLAIN
--
-- This test file has an alternative output because of the following in PG16:
-- https://github.com/postgres/postgres/commit/1349d2790bf48a4de072931c722f39337e72055e
-- https://github.com/postgres/postgres/commit/f4c7c410ee4a7baa06f51ebb8d5333c169691dd3
-- The alternative output can be deleted when we drop support for PG15
--
-- This test file has an alternative output because of the following in PG18:
-- https://github.com/postgres/postgres/commit/161320b4b960ee4fe918959be6529ae9b106ea5a
-- The alternative output can be deleted when we drop support for PG17
--
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16;
server_version_ge_16
---------------------------------------------------------------------
t
(1 row)
SELECT substring(:'server_version', '\d+')::int >= 18 AS server_version_ge_18;
server_version_ge_18
---------------------------------------------------------------------

View File

@ -1,22 +1,11 @@
--
-- MULTI_EXPLAIN
--
-- This test file has an alternative output because of the following in PG16:
-- https://github.com/postgres/postgres/commit/1349d2790bf48a4de072931c722f39337e72055e
-- https://github.com/postgres/postgres/commit/f4c7c410ee4a7baa06f51ebb8d5333c169691dd3
-- The alternative output can be deleted when we drop support for PG15
--
-- This test file has an alternative output because of the following in PG18:
-- https://github.com/postgres/postgres/commit/161320b4b960ee4fe918959be6529ae9b106ea5a
-- The alternative output can be deleted when we drop support for PG17
--
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16;
server_version_ge_16
---------------------------------------------------------------------
t
(1 row)
SELECT substring(:'server_version', '\d+')::int >= 18 AS server_version_ge_18;
server_version_ge_18
---------------------------------------------------------------------

File diff suppressed because it is too large Load Diff

View File

@ -58,8 +58,6 @@ CREATE OPERATOR citus_mx_test_schema.=== (
);
SET search_path TO public;
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
\gset
SELECT substring(:'server_version', '\d+')::int >= 17 AS server_version_ge_17
\gset
\if :server_version_ge_17
@ -67,12 +65,10 @@ SELECT substring(:'server_version', '\d+')::int >= 17 AS server_version_ge_17
-- Relevant PG commit:
-- https://github.com/postgres/postgres/commit/f696c0cd5f299f1b51e214efc55a22a782cc175d
SELECT quote_ident((SELECT CASE WHEN datlocprovider='i' THEN datlocale ELSE datcollate END FROM pg_database WHERE datname = current_database())) as current_locale \gset
\elif :server_version_ge_16
\else
-- In PG16, read-only server settings lc_collate and lc_ctype are removed
-- Relevant PG commit: b0f6c437160db640d4ea3e49398ebc3ba39d1982
SELECT quote_ident((SELECT CASE WHEN datlocprovider='i' THEN daticulocale ELSE datcollate END FROM pg_database WHERE datname = current_database())) as current_locale \gset
\else
SELECT quote_ident(current_setting('lc_collate')) as current_locale \gset
\endif
CREATE COLLATION citus_mx_test_schema.english (LOCALE=:current_locale);
CREATE TYPE citus_mx_test_schema.new_composite_type as (key1 text, key2 text);

View File

@ -471,10 +471,6 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name
test_table_2_1130000
(4 rows)
-- PG16 added one more backend type B_STANDALONE_BACKEND
-- and also alphabetized the backend types, hence the orders changed
-- Relevant PG16 commit:
-- https://github.com/postgres/postgres/commit/0c679464a837079acc75ff1d45eaa83f79e05690
-- Relevant Pg17 commit:
-- https://github.com/postgres/postgres/commit/067701f57758f9baed5bd9d868539738d77bfa92
-- Relevant PG18 commit:
@ -482,7 +478,6 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 18 AS server_version_ge_18 \gset
SELECT substring(:'server_version', '\d+')::int >= 17 AS server_version_ge_17 \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 \gset
\if :server_version_ge_18
SELECT 1 AS client_backend \gset
SELECT 5 AS bgworker \gset
@ -491,14 +486,10 @@ SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 \g
SELECT 1 AS client_backend \gset
SELECT 4 AS bgworker \gset
SELECT 5 AS walsender \gset
\elif :server_version_ge_16
\else
SELECT 4 AS client_backend \gset
SELECT 5 AS bgworker \gset
SELECT 12 AS walsender \gset
\else
SELECT 3 AS client_backend \gset
SELECT 4 AS bgworker \gset
SELECT 9 AS walsender \gset
\endif
-- say, we set it to bgworker
-- the shards and indexes do not show up

View File

@ -472,10 +472,6 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name
test_table_2_1130000
(4 rows)
-- PG16 added one more backend type B_STANDALONE_BACKEND
-- and also alphabetized the backend types, hence the orders changed
-- Relevant PG16 commit:
-- https://github.com/postgres/postgres/commit/0c679464a837079acc75ff1d45eaa83f79e05690
-- Relevant Pg17 commit:
-- https://github.com/postgres/postgres/commit/067701f57758f9baed5bd9d868539738d77bfa92
-- Relevant PG18 commit:
@ -483,7 +479,6 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 18 AS server_version_ge_18 \gset
SELECT substring(:'server_version', '\d+')::int >= 17 AS server_version_ge_17 \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 \gset
\if :server_version_ge_18
SELECT 1 AS client_backend \gset
SELECT 5 AS bgworker \gset
@ -492,14 +487,10 @@ SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 \g
SELECT 1 AS client_backend \gset
SELECT 4 AS bgworker \gset
SELECT 5 AS walsender \gset
\elif :server_version_ge_16
\else
SELECT 4 AS client_backend \gset
SELECT 5 AS bgworker \gset
SELECT 12 AS walsender \gset
\else
SELECT 3 AS client_backend \gset
SELECT 4 AS bgworker \gset
SELECT 9 AS walsender \gset
\endif
-- say, we set it to bgworker
-- the shards and indexes do not show up

View File

@ -2,16 +2,6 @@
--- do not cause issues for the postgres planner, in particular postgres versions 16+, where the
--- varnullingrels field of a VAR node may contain relids of join relations that can make the var
--- NULL; in a rewritten distributed query without a join such relids do not have a meaning.
-- This test has an alternative goldfile because of the following feature in Postgres 16:
-- https://github.com/postgres/postgres/commit/1349d2790bf48a4de072931c722f39337e72055e
--
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16;
server_version_ge_16
---------------------------------------------------------------------
t
(1 row)
CREATE SCHEMA outer_join_columns_testing;
SET search_path to 'outer_join_columns_testing';
SET citus.next_shard_id TO 30070000;

View File

@ -1,422 +0,0 @@
--- Test for verifying that column references (var nodes) in targets that cannot be pushed down
--- do not cause issues for the postgres planner, in particular postgres versions 16+, where the
--- varnullingrels field of a VAR node may contain relids of join relations that can make the var
--- NULL; in a rewritten distributed query without a join such relids do not have a meaning.
-- This test has an alternative goldfile because of the following feature in Postgres 16:
-- https://github.com/postgres/postgres/commit/1349d2790bf48a4de072931c722f39337e72055e
--
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16;
server_version_ge_16
---------------------------------------------------------------------
f
(1 row)
CREATE SCHEMA outer_join_columns_testing;
SET search_path to 'outer_join_columns_testing';
SET citus.next_shard_id TO 30070000;
SET citus.shard_replication_factor TO 1;
SET citus.enable_local_execution TO ON;
CREATE TABLE t1 (id INT PRIMARY KEY);
INSERT INTO t1 VALUES (1), (2);
CREATE TABLE t2 (id INT, account_id INT, a2 INT, PRIMARY KEY(id, account_id));
INSERT INTO t2 VALUES (3, 1, 10), (4, 2, 20), (5, 1, NULL);
SELECT create_distributed_table('t1', 'id');
NOTICE: Copying data from local table...
NOTICE: copying the data has completed
DETAIL: The local data in the table is no longer visible, but is still on disk.
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$outer_join_columns_testing.t1$$)
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT create_distributed_table('t2', 'account_id');
NOTICE: Copying data from local table...
NOTICE: copying the data has completed
DETAIL: The local data in the table is no longer visible, but is still on disk.
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$outer_join_columns_testing.t2$$)
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- Test the issue seen in #7705; a target expression with
-- a window function that cannot be pushed down because the
-- partion by is not on the distribution column also includes
-- a column from the inner side of a left outer join, which
-- produces a non-empty varnullingrels set in PG 16 (and higher)
SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id)
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id;
id | max
---------------------------------------------------------------------
1 | 10
2 | 20
1 |
(3 rows)
select public.explain_filter('
EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF)
SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id)
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id
', true);
explain_filter
---------------------------------------------------------------------
WindowAgg
Output: remote_scan.id, max(remote_scan.max) OVER (?), remote_scan.worker_column_3
-> Sort
Output: remote_scan.worker_column_3, remote_scan.id, remote_scan.max
Sort Key: remote_scan.worker_column_3
-> Custom Scan (Citus Adaptive)
Output: remote_scan.worker_column_3, remote_scan.id, remote_scan.max
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT worker_column_1 AS id, worker_column_2 AS max, worker_column_3 FROM (SELECT t1.id AS worker_column_1, t2.a2 AS worker_column_2, t2.id AS worker_column_3 FROM (outer_join_columns_testing.t1_30070000 t1 LEFT JOIN outer_join_columns_testing.t2_30070004 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery
Node: host=localhost port=xxxxx dbname=regression
-> Hash Right Join
Output: t1.id, t2.a2, t2.id
Inner Unique: true
Hash Cond: (t2.account_id = t1.id)
-> Seq Scan on outer_join_columns_testing.t2_30070004 t2
Output: t2.id, t2.account_id, t2.a2
-> Hash
Output: t1.id
-> Seq Scan on outer_join_columns_testing.t1_30070000 t1
Output: t1.id
(22 rows)
SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id)
FROM t2 RIGHT OUTER JOIN t1 ON t1.id = t2.account_id;
id | max
---------------------------------------------------------------------
1 | 10
2 | 20
1 |
(3 rows)
select public.explain_filter('
EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF)
SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id)
FROM t2 RIGHT OUTER JOIN t1 ON t1.id = t2.account_id
', true);
explain_filter
---------------------------------------------------------------------
WindowAgg
Output: remote_scan.id, max(remote_scan.max) OVER (?), remote_scan.worker_column_3
-> Sort
Output: remote_scan.worker_column_3, remote_scan.id, remote_scan.max
Sort Key: remote_scan.worker_column_3
-> Custom Scan (Citus Adaptive)
Output: remote_scan.worker_column_3, remote_scan.id, remote_scan.max
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT worker_column_1 AS id, worker_column_2 AS max, worker_column_3 FROM (SELECT t1.id AS worker_column_1, t2.a2 AS worker_column_2, t2.id AS worker_column_3 FROM (outer_join_columns_testing.t2_30070004 t2 RIGHT JOIN outer_join_columns_testing.t1_30070000 t1 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery
Node: host=localhost port=xxxxx dbname=regression
-> Hash Right Join
Output: t1.id, t2.a2, t2.id
Inner Unique: true
Hash Cond: (t2.account_id = t1.id)
-> Seq Scan on outer_join_columns_testing.t2_30070004 t2
Output: t2.id, t2.account_id, t2.a2
-> Hash
Output: t1.id
-> Seq Scan on outer_join_columns_testing.t1_30070000 t1
Output: t1.id
(22 rows)
SELECT DISTINCT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id)
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id;
id | max
---------------------------------------------------------------------
1 |
1 | 10
2 | 20
(3 rows)
select public.explain_filter('
EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF)
SELECT DISTINCT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id)
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id
', true);
explain_filter
---------------------------------------------------------------------
HashAggregate
Output: remote_scan.id, (max(remote_scan.max) OVER (?)), remote_scan.worker_column_3
Group Key: remote_scan.id, max(remote_scan.max) OVER (?)
-> WindowAgg
Output: remote_scan.id, max(remote_scan.max) OVER (?), remote_scan.worker_column_3
-> Sort
Output: remote_scan.worker_column_3, remote_scan.id, remote_scan.max
Sort Key: remote_scan.worker_column_3
-> Custom Scan (Citus Adaptive)
Output: remote_scan.worker_column_3, remote_scan.id, remote_scan.max
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT worker_column_1 AS id, worker_column_2 AS max, worker_column_3 FROM (SELECT t1.id AS worker_column_1, t2.a2 AS worker_column_2, t2.id AS worker_column_3 FROM (outer_join_columns_testing.t1_30070000 t1 LEFT JOIN outer_join_columns_testing.t2_30070004 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery
Node: host=localhost port=xxxxx dbname=regression
-> Hash Right Join
Output: t1.id, t2.a2, t2.id
Inner Unique: true
Hash Cond: (t2.account_id = t1.id)
-> Seq Scan on outer_join_columns_testing.t2_30070004 t2
Output: t2.id, t2.account_id, t2.a2
-> Hash
Output: t1.id
-> Seq Scan on outer_join_columns_testing.t1_30070000 t1
Output: t1.id
(25 rows)
CREATE SEQUENCE test_seq START 101;
CREATE OR REPLACE FUNCTION TEST_F(int) returns INT language sql stable as $$ select $1 + 42; $$ ;
-- Issue #7705 also occurs if a target expression includes a column
-- of a distributed table that is on the inner side of a left outer
-- join and a call to nextval(), because nextval() cannot be pushed
-- down, and must be run on the coordinator
SELECT t1.id, TEST_F(t2.a2 + nextval('test_seq') :: int)
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id
ORDER BY t1.id;
id | test_f
---------------------------------------------------------------------
1 | 153
1 |
2 | 165
(3 rows)
EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF)
SELECT t1.id, TEST_F(t2.a2 + nextval('test_seq') :: int)
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id
ORDER BY t1.id;
QUERY PLAN
---------------------------------------------------------------------
Result
Output: remote_scan.id, ((remote_scan.test_f + (nextval('test_seq'::regclass))::integer) + 42)
-> Sort
Output: remote_scan.id, remote_scan.test_f
Sort Key: remote_scan.id
-> Custom Scan (Citus Adaptive)
Output: remote_scan.id, remote_scan.test_f
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT worker_column_1 AS id, worker_column_2 AS test_f FROM (SELECT t1.id AS worker_column_1, t2.a2 AS worker_column_2 FROM (outer_join_columns_testing.t1_30070000 t1 LEFT JOIN outer_join_columns_testing.t2_30070004 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery
Node: host=localhost port=xxxxx dbname=regression
-> Hash Right Join
Output: t1.id, t2.a2
Inner Unique: true
Hash Cond: (t2.account_id = t1.id)
-> Seq Scan on outer_join_columns_testing.t2_30070004 t2
Output: t2.id, t2.account_id, t2.a2
-> Hash
Output: t1.id
-> Seq Scan on outer_join_columns_testing.t1_30070000 t1
Output: t1.id
(22 rows)
SELECT t1.id, CASE nextval('test_seq') % 2 = 0 WHEN true THEN t2.a2 ELSE 1 END
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id
ORDER BY t1.id;
id | case
---------------------------------------------------------------------
1 | 10
1 | 1
2 | 20
(3 rows)
EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF)
SELECT t1.id, CASE nextval('test_seq') %2 = 0 WHEN true THEN t2.a2 ELSE 1 END
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id
ORDER BY t1.id;
QUERY PLAN
---------------------------------------------------------------------
Result
Output: remote_scan.id, CASE ((nextval('test_seq'::regclass) % '2'::bigint) = 0) WHEN CASE_TEST_EXPR THEN remote_scan."case" ELSE 1 END
-> Sort
Output: remote_scan.id, remote_scan."case"
Sort Key: remote_scan.id
-> Custom Scan (Citus Adaptive)
Output: remote_scan.id, remote_scan."case"
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT worker_column_1 AS id, worker_column_2 AS "case" FROM (SELECT t1.id AS worker_column_1, t2.a2 AS worker_column_2 FROM (outer_join_columns_testing.t1_30070000 t1 LEFT JOIN outer_join_columns_testing.t2_30070004 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery
Node: host=localhost port=xxxxx dbname=regression
-> Hash Right Join
Output: t1.id, t2.a2
Inner Unique: true
Hash Cond: (t2.account_id = t1.id)
-> Seq Scan on outer_join_columns_testing.t2_30070004 t2
Output: t2.id, t2.account_id, t2.a2
-> Hash
Output: t1.id
-> Seq Scan on outer_join_columns_testing.t1_30070000 t1
Output: t1.id
(22 rows)
-- Issue #7787: count distinct of a column from the inner side of a
-- left outer join will have a non-empty varnullingrels in the query
-- tree returned by Postgres 16+, so ensure this is not reflected in
-- the worker subquery constructed by Citus; it has just one relation,
-- for the pushed down subquery.
SELECT COUNT(DISTINCT a2)
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id;
count
---------------------------------------------------------------------
2
(1 row)
EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF)
SELECT COUNT(DISTINCT a2)
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id;
QUERY PLAN
---------------------------------------------------------------------
Aggregate
Output: count(DISTINCT remote_scan.count)
-> Custom Scan (Citus Adaptive)
Output: remote_scan.count
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT worker_column_1 AS count FROM (SELECT t2.a2 AS worker_column_1 FROM (outer_join_columns_testing.t1_30070000 t1 LEFT JOIN outer_join_columns_testing.t2_30070004 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery GROUP BY worker_column_1
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: t2.a2
Group Key: t2.a2
-> Hash Right Join
Output: t2.a2
Inner Unique: true
Hash Cond: (t2.account_id = t1.id)
-> Seq Scan on outer_join_columns_testing.t2_30070004 t2
Output: t2.id, t2.account_id, t2.a2
-> Hash
Output: t1.id
-> Seq Scan on outer_join_columns_testing.t1_30070000 t1
Output: t1.id
(22 rows)
-- Issue #7787 also occurs with a HAVING clause
SELECT 1
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id
HAVING COUNT(DISTINCT a2) > 1;
?column?
---------------------------------------------------------------------
1
(1 row)
select public.explain_filter('
EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF)
SELECT 1
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id
HAVING COUNT(DISTINCT a2) > 1;
', true);
explain_filter
---------------------------------------------------------------------
Aggregate
Output: remote_scan."?column?"
Filter: (count(DISTINCT remote_scan.worker_column_2) > 1)
-> Custom Scan (Citus Adaptive)
Output: remote_scan."?column?", remote_scan.worker_column_2
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT 1, worker_column_1 AS worker_column_2 FROM (SELECT t2.a2 AS worker_column_1 FROM (outer_join_columns_testing.t1_30070000 t1 LEFT JOIN outer_join_columns_testing.t2_30070004 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery GROUP BY worker_column_1
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: 1, t2.a2
Group Key: t2.a2
-> Hash Right Join
Output: t2.a2
Inner Unique: true
Hash Cond: (t2.account_id = t1.id)
-> Seq Scan on outer_join_columns_testing.t2_30070004 t2
Output: t2.id, t2.account_id, t2.a2
-> Hash
Output: t1.id
-> Seq Scan on outer_join_columns_testing.t1_30070000 t1
Output: t1.id
(23 rows)
-- Check right outer join
SELECT COUNT(DISTINCT a2)
FROM t2 RIGHT OUTER JOIN t1 ON t2.account_id = t1.id;
count
---------------------------------------------------------------------
2
(1 row)
EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF)
SELECT COUNT(DISTINCT a2)
FROM t2 RIGHT OUTER JOIN t1 ON t2.account_id = t1.id;
QUERY PLAN
---------------------------------------------------------------------
Aggregate
Output: count(DISTINCT remote_scan.count)
-> Custom Scan (Citus Adaptive)
Output: remote_scan.count
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT worker_column_1 AS count FROM (SELECT t2.a2 AS worker_column_1 FROM (outer_join_columns_testing.t2_30070004 t2 RIGHT JOIN outer_join_columns_testing.t1_30070000 t1 ON ((t2.account_id OPERATOR(pg_catalog.=) t1.id)))) worker_subquery GROUP BY worker_column_1
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: t2.a2
Group Key: t2.a2
-> Hash Right Join
Output: t2.a2
Inner Unique: true
Hash Cond: (t2.account_id = t1.id)
-> Seq Scan on outer_join_columns_testing.t2_30070004 t2
Output: t2.id, t2.account_id, t2.a2
-> Hash
Output: t1.id
-> Seq Scan on outer_join_columns_testing.t1_30070000 t1
Output: t1.id
(22 rows)
-- Check both count distinct and having clause
SELECT COUNT(DISTINCT a2)
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id
HAVING COUNT(DISTINCT t2.id) > 1;
count
---------------------------------------------------------------------
2
(1 row)
EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF)
SELECT COUNT(DISTINCT a2)
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id
HAVING COUNT(DISTINCT t2.id) > 1;
QUERY PLAN
---------------------------------------------------------------------
Aggregate
Output: count(DISTINCT remote_scan.count)
Filter: (count(DISTINCT remote_scan.worker_column_2) > 1)
-> Custom Scan (Citus Adaptive)
Output: remote_scan.count, remote_scan.worker_column_2
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT worker_column_1 AS count, worker_column_2 FROM (SELECT t2.a2 AS worker_column_1, t2.id AS worker_column_2 FROM (outer_join_columns_testing.t1_30070000 t1 LEFT JOIN outer_join_columns_testing.t2_30070004 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery GROUP BY worker_column_1, worker_column_2
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: t2.a2, t2.id
Group Key: t2.a2, t2.id
-> Hash Right Join
Output: t2.a2, t2.id
Inner Unique: true
Hash Cond: (t2.account_id = t1.id)
-> Seq Scan on outer_join_columns_testing.t2_30070004 t2
Output: t2.id, t2.account_id, t2.a2
-> Hash
Output: t1.id
-> Seq Scan on outer_join_columns_testing.t1_30070000 t1
Output: t1.id
(23 rows)
--- cleanup
\set VERBOSITY TERSE
DROP SCHEMA outer_join_columns_testing CASCADE;
NOTICE: drop cascades to 4 other objects
RESET all;

View File

@ -348,8 +348,6 @@ SELECT * FROM nation_hash ORDER BY 1,2,3,4;
--test COLLATION with schema
SET search_path TO public;
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
\gset
SELECT substring(:'server_version', '\d+')::int >= 17 AS server_version_ge_17
\gset
\if :server_version_ge_17
@ -357,12 +355,10 @@ SELECT substring(:'server_version', '\d+')::int >= 17 AS server_version_ge_17
-- Relevant PG commit:
-- https://github.com/postgres/postgres/commit/f696c0cd5f299f1b51e214efc55a22a782cc175d
SELECT quote_ident((SELECT CASE WHEN datlocprovider='i' THEN datlocale ELSE datcollate END FROM pg_database WHERE datname = current_database())) as current_locale \gset
\elif :server_version_ge_16
\else
-- In PG16, read-only server settings lc_collate and lc_ctype are removed
-- Relevant PG commit: b0f6c437160db640d4ea3e49398ebc3ba39d1982
SELECT quote_ident((SELECT CASE WHEN datlocprovider='i' THEN daticulocale ELSE datcollate END FROM pg_database WHERE datname = current_database())) as current_locale \gset
\else
SELECT quote_ident(current_setting('lc_collate')) as current_locale \gset
\endif
CREATE COLLATION test_schema_support.english (LOCALE = :current_locale);
\c - - - :master_port

View File

@ -1441,7 +1441,7 @@ BEGIN;
SET LOCAL citus.log_remote_commands TO ON;
SET LOCAL citus.grep_remote_commands = '%CREATE SUBSCRIPTION%';
SELECT citus_move_shard_placement(980042, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode := 'force_logical');
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, password_required=false, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
citus_move_shard_placement
---------------------------------------------------------------------
@ -1456,7 +1456,7 @@ SET LOCAL citus.log_remote_commands TO ON;
SET LOCAL citus.grep_remote_commands = '%CREATE SUBSCRIPTION%';
SET LOCAL citus.enable_binary_protocol = FALSE;
SELECT citus_move_shard_placement(980042, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode := 'force_logical');
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx)
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, password_required=false)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
citus_move_shard_placement
---------------------------------------------------------------------

View File

@ -1,13 +1,6 @@
--
-- PG16
--
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
\gset
\if :server_version_ge_16
\else
\q
\endif
CREATE SCHEMA pg16;
SET search_path TO pg16;
SET citus.next_shard_id TO 950000;

View File

@ -1,9 +0,0 @@
--
-- PG16
--
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
\gset
\if :server_version_ge_16
\else
\q

View File

@ -379,7 +379,7 @@ drop cascades to table pg17_corr_subq_folding.users
drop cascades to table pg17_corr_subq_folding.events
drop cascades to table pg17_corr_subq_folding.users_ref
drop cascades to table pg17_corr_subq_folding.users_ref_20240023
-- Queries with outer joins with pseudoconstant quals work only in PG17
-- Queries with outer joins with pseudoconstant quals work only in PG17+
-- Relevant PG17 commit:
-- https://github.com/postgres/postgres/commit/9e9931d2b
CREATE SCHEMA pg17_outerjoin;

View File

@ -334,7 +334,7 @@ drop cascades to table pg17_corr_subq_folding.users
drop cascades to table pg17_corr_subq_folding.events
drop cascades to table pg17_corr_subq_folding.users_ref
drop cascades to table pg17_corr_subq_folding.users_ref_20240023
-- Queries with outer joins with pseudoconstant quals work only in PG17
-- Queries with outer joins with pseudoconstant quals work only in PG17+
-- Relevant PG17 commit:
-- https://github.com/postgres/postgres/commit/9e9931d2b
CREATE SCHEMA pg17_outerjoin;
@ -371,8 +371,8 @@ select * from
(t0 full outer join t3
on (t0.c3 = t3.c26 ))
where (exists (select * from t4)) order by 1, 2, 3;
ERROR: Distributed queries with outer joins and pseudoconstant quals are not supported in PG15 and PG16.
DETAIL: PG15 and PG16 disallow replacing joins with scans when the query has pseudoconstant quals
ERROR: Distributed queries with outer joins and pseudoconstant quals are not supported in PG16.
DETAIL: PG16 disallows replacing joins with scans when the query has pseudoconstant quals
HINT: Consider upgrading your PG version to PG17+
SET citus.enable_outer_joins_with_pseudoconstant_quals_pre_pg17 TO true;
-- wrong result pre-pg17
@ -417,8 +417,8 @@ HINT: To remove the local data, run: SELECT truncate_local_data_after_distribut
select * from (t2 full outer join t1 on(t2.vkey = t1.vkey ))
where not((85) in (select 1 from t2));
ERROR: Distributed queries with outer joins and pseudoconstant quals are not supported in PG15 and PG16.
DETAIL: PG15 and PG16 disallow replacing joins with scans when the query has pseudoconstant quals
ERROR: Distributed queries with outer joins and pseudoconstant quals are not supported in PG16.
DETAIL: PG16 disallows replacing joins with scans when the query has pseudoconstant quals
HINT: Consider upgrading your PG version to PG17+
SET citus.enable_outer_joins_with_pseudoconstant_quals_pre_pg17 TO true;
-- wrong result pre-pg17
@ -459,8 +459,8 @@ select t6.vkey
from (t5 right outer join t6
on (t5.c10 = t6.vkey))
where exists (select * from t6);
ERROR: Distributed queries with outer joins and pseudoconstant quals are not supported in PG15 and PG16.
DETAIL: PG15 and PG16 disallow replacing joins with scans when the query has pseudoconstant quals
ERROR: Distributed queries with outer joins and pseudoconstant quals are not supported in PG16.
DETAIL: PG16 disallows replacing joins with scans when the query has pseudoconstant quals
HINT: Consider upgrading your PG version to PG17+
SET citus.enable_outer_joins_with_pseudoconstant_quals_pre_pg17 TO true;
-- wrong result pre-pg17

View File

@ -266,16 +266,8 @@ create table events (event_id bigserial, event_time timestamptz default now(), p
create index on events (event_id);
insert into events (payload) select 'hello-'||s from generate_series(1,10) s;
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
\gset
BEGIN;
\if :server_version_ge_16
SET LOCAL debug_parallel_query = regress;
\else
SET LOCAL force_parallel_mode = regress;
\endif
SET LOCAL min_parallel_table_scan_size = 1;
SET LOCAL parallel_tuple_cost = 0;
SET LOCAL max_parallel_workers = 4;

View File

@ -20,16 +20,7 @@ select count(*), min(i), max(i), avg(i) from fallback_scan;
-- Negative test: try to force a parallel plan with at least two
-- workers, but columnar should reject it and use a non-parallel scan.
--
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
\gset
\if :server_version_ge_16
set debug_parallel_query = regress;
\else
set force_parallel_mode = regress;
\endif
set min_parallel_table_scan_size = 1;
set parallel_tuple_cost = 0;
set max_parallel_workers = 4;
@ -37,11 +28,7 @@ set max_parallel_workers_per_gather = 4;
explain (costs off) select count(*), min(i), max(i), avg(i) from fallback_scan;
select count(*), min(i), max(i), avg(i) from fallback_scan;
\if :server_version_ge_16
set debug_parallel_query = default;
\else
set force_parallel_mode = default;
\endif
set min_parallel_table_scan_size to default;
set parallel_tuple_cost to default;
set max_parallel_workers to default;

View File

@ -414,10 +414,6 @@ BEGIN;
-- this wouldn't flush any data
insert into events (payload) select 'hello-'||s from generate_series(1, 10) s;
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
\gset
-- Since table is large enough, normally postgres would prefer using
-- parallel workers when building the index.
--
@ -430,11 +426,7 @@ BEGIN;
-- following commnad to fail since we prevent using parallel workers for
-- columnar tables.
\if :server_version_ge_16
SET LOCAL debug_parallel_query = regress;
\else
SET LOCAL force_parallel_mode = regress;
\endif
SET LOCAL min_parallel_table_scan_size = 1;
SET LOCAL parallel_tuple_cost = 0;
SET LOCAL max_parallel_workers = 4;

View File

@ -24,16 +24,9 @@ INSERT INTO parent SELECT '2020-03-15', 30, 300, 'three thousand'
INSERT INTO parent SELECT '2020-04-15', 30, 300, 'three thousand'
FROM generate_series(1,100000);
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
\gset
-- run parallel plans
\if :server_version_ge_16
SET debug_parallel_query = regress;
\else
SET force_parallel_mode = regress;
\endif
SET min_parallel_table_scan_size = 1;
SET parallel_tuple_cost = 0;
SET max_parallel_workers = 4;
@ -57,11 +50,8 @@ EXPLAIN (costs off) SELECT count(*), sum(i), min(i), max(i) FROM parent;
SELECT count(*), sum(i), min(i), max(i) FROM parent;
SET columnar.enable_custom_scan TO DEFAULT;
\if :server_version_ge_16
SET debug_parallel_query TO DEFAULT;
\else
SET force_parallel_mode TO DEFAULT;
\endif
SET min_parallel_table_scan_size TO DEFAULT;
SET parallel_tuple_cost TO DEFAULT;
SET max_parallel_workers TO DEFAULT;

View File

@ -1,14 +1,3 @@
--
-- PG16
--
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
\gset
\if :server_version_ge_16
\else
\q
\endif
-- create/drop database for pg >= 16
set citus.enable_create_database_propagation=on;

View File

@ -798,21 +798,15 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
-- with an ugly trick, update the vartype of table from int to bigint
-- so that making two tables colocated fails
-- include varnullingrels for PG16+
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
\gset
-- include varreturningtype for PG18+
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 18 AS server_version_ge_18
\gset
\if :server_version_ge_18
UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 1 :varreturningtype 0 :varnoold 1 :varoattno 1 :location -1}'
WHERE logicalrelid = 'test_2'::regclass;
\elif :server_version_ge_16
UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 1 :varnoold 1 :varoattno 1 :location -1}'
WHERE logicalrelid = 'test_2'::regclass;
\else
UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varlevelsup 1 :varnoold 1 :varoattno 1 :location -1}'
UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 1 :varnoold 1 :varoattno 1 :location -1}'
WHERE logicalrelid = 'test_2'::regclass;
\endif

View File

@ -1,14 +1,6 @@
--
-- COMPLEX_COUNT_DISTINCT
--
-- This test file has an alternative output because of the following in PG16:
-- https://github.com/postgres/postgres/commit/1349d2790bf48a4de072931c722f39337e72055e
-- https://github.com/postgres/postgres/commit/f4c7c410ee4a7baa06f51ebb8d5333c169691dd3
-- The alternative output can be deleted when we drop support for PG15
--
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16;
SET citus.next_shard_id TO 240000;
SET citus.shard_count TO 8;
SET citus.shard_replication_factor TO 1;

View File

@ -1,17 +1,11 @@
--
-- MULTI_EXPLAIN
--
-- This test file has an alternative output because of the following in PG16:
-- https://github.com/postgres/postgres/commit/1349d2790bf48a4de072931c722f39337e72055e
-- https://github.com/postgres/postgres/commit/f4c7c410ee4a7baa06f51ebb8d5333c169691dd3
-- The alternative output can be deleted when we drop support for PG15
--
-- This test file has an alternative output because of the following in PG18:
-- https://github.com/postgres/postgres/commit/161320b4b960ee4fe918959be6529ae9b106ea5a
-- The alternative output can be deleted when we drop support for PG17
--
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16;
SELECT substring(:'server_version', '\d+')::int >= 18 AS server_version_ge_18;
SET citus.next_shard_id TO 570000;

View File

@ -59,8 +59,6 @@ CREATE OPERATOR citus_mx_test_schema.=== (
SET search_path TO public;
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
\gset
SELECT substring(:'server_version', '\d+')::int >= 17 AS server_version_ge_17
\gset
@ -69,12 +67,10 @@ SELECT substring(:'server_version', '\d+')::int >= 17 AS server_version_ge_17
-- Relevant PG commit:
-- https://github.com/postgres/postgres/commit/f696c0cd5f299f1b51e214efc55a22a782cc175d
SELECT quote_ident((SELECT CASE WHEN datlocprovider='i' THEN datlocale ELSE datcollate END FROM pg_database WHERE datname = current_database())) as current_locale \gset
\elif :server_version_ge_16
\else
-- In PG16, read-only server settings lc_collate and lc_ctype are removed
-- Relevant PG commit: b0f6c437160db640d4ea3e49398ebc3ba39d1982
SELECT quote_ident((SELECT CASE WHEN datlocprovider='i' THEN daticulocale ELSE datcollate END FROM pg_database WHERE datname = current_database())) as current_locale \gset
\else
SELECT quote_ident(current_setting('lc_collate')) as current_locale \gset
\endif
CREATE COLLATION citus_mx_test_schema.english (LOCALE=:current_locale);

View File

@ -245,10 +245,6 @@ RESET citus.enable_metadata_sync;
-- the shards and indexes do not show up
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
-- PG16 added one more backend type B_STANDALONE_BACKEND
-- and also alphabetized the backend types, hence the orders changed
-- Relevant PG16 commit:
-- https://github.com/postgres/postgres/commit/0c679464a837079acc75ff1d45eaa83f79e05690
-- Relevant Pg17 commit:
-- https://github.com/postgres/postgres/commit/067701f57758f9baed5bd9d868539738d77bfa92
-- Relevant PG18 commit:
@ -256,7 +252,6 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 18 AS server_version_ge_18 \gset
SELECT substring(:'server_version', '\d+')::int >= 17 AS server_version_ge_17 \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 \gset
\if :server_version_ge_18
SELECT 1 AS client_backend \gset
SELECT 5 AS bgworker \gset
@ -265,14 +260,10 @@ SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 \g
SELECT 1 AS client_backend \gset
SELECT 4 AS bgworker \gset
SELECT 5 AS walsender \gset
\elif :server_version_ge_16
\else
SELECT 4 AS client_backend \gset
SELECT 5 AS bgworker \gset
SELECT 12 AS walsender \gset
\else
SELECT 3 AS client_backend \gset
SELECT 4 AS bgworker \gset
SELECT 9 AS walsender \gset
\endif
-- say, we set it to bgworker

View File

@ -3,13 +3,6 @@
--- varnullingrels field of a VAR node may contain relids of join relations that can make the var
--- NULL; in a rewritten distributed query without a join such relids do not have a meaning.
-- This test has an alternative goldfile because of the following feature in Postgres 16:
-- https://github.com/postgres/postgres/commit/1349d2790bf48a4de072931c722f39337e72055e
--
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16;
CREATE SCHEMA outer_join_columns_testing;
SET search_path to 'outer_join_columns_testing';
SET citus.next_shard_id TO 30070000;

View File

@ -295,8 +295,6 @@ SELECT * FROM nation_hash ORDER BY 1,2,3,4;
SET search_path TO public;
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
\gset
SELECT substring(:'server_version', '\d+')::int >= 17 AS server_version_ge_17
\gset
@ -305,12 +303,10 @@ SELECT substring(:'server_version', '\d+')::int >= 17 AS server_version_ge_17
-- Relevant PG commit:
-- https://github.com/postgres/postgres/commit/f696c0cd5f299f1b51e214efc55a22a782cc175d
SELECT quote_ident((SELECT CASE WHEN datlocprovider='i' THEN datlocale ELSE datcollate END FROM pg_database WHERE datname = current_database())) as current_locale \gset
\elif :server_version_ge_16
\else
-- In PG16, read-only server settings lc_collate and lc_ctype are removed
-- Relevant PG commit: b0f6c437160db640d4ea3e49398ebc3ba39d1982
SELECT quote_ident((SELECT CASE WHEN datlocprovider='i' THEN daticulocale ELSE datcollate END FROM pg_database WHERE datname = current_database())) as current_locale \gset
\else
SELECT quote_ident(current_setting('lc_collate')) as current_locale \gset
\endif
CREATE COLLATION test_schema_support.english (LOCALE = :current_locale);

Some files were not shown because too many files have changed in this diff Show More