pull/7981/merge
Mehmet YILMAZ 2025-06-20 12:46:52 +03:00 committed by GitHub
commit dcfd4f7c59
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
39 changed files with 18611 additions and 1619 deletions

View File

@ -108,6 +108,18 @@ RUN mkdir .pgenv-staging/
RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/ RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/
RUN rm .pgenv-staging/config/default.conf RUN rm .pgenv-staging/config/default.conf
FROM base AS pg18
RUN MAKEFLAGS="-j $(nproc)" pgenv build 18beta1
RUN rm .pgenv/src/*.tar*
RUN make -C .pgenv/src/postgresql-*/ clean
RUN make -C .pgenv/src/postgresql-*/src/include install
# Stage the pgenv artifacts for PG18
RUN mkdir .pgenv-staging/
RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/
RUN rm .pgenv-staging/config/default.conf
FROM base AS uncrustify-builder FROM base AS uncrustify-builder
RUN sudo apt update && sudo apt install -y cmake tree RUN sudo apt update && sudo apt install -y cmake tree
@ -201,6 +213,7 @@ COPY --link --from=uncrustify-builder /uncrustify/usr/ /usr/
COPY --link --from=pg15 /home/citus/.pgenv-staging/ /home/citus/.pgenv/ COPY --link --from=pg15 /home/citus/.pgenv-staging/ /home/citus/.pgenv/
COPY --link --from=pg16 /home/citus/.pgenv-staging/ /home/citus/.pgenv/ COPY --link --from=pg16 /home/citus/.pgenv-staging/ /home/citus/.pgenv/
COPY --link --from=pg17 /home/citus/.pgenv-staging/ /home/citus/.pgenv/ COPY --link --from=pg17 /home/citus/.pgenv-staging/ /home/citus/.pgenv/
COPY --link --from=pg18 /home/citus/.pgenv-staging/ /home/citus/.pgenv/
COPY --link --from=pipenv /home/citus/.local/share/virtualenvs/ /home/citus/.local/share/virtualenvs/ COPY --link --from=pipenv /home/citus/.local/share/virtualenvs/ /home/citus/.local/share/virtualenvs/
@ -216,7 +229,7 @@ COPY --chown=citus:citus .psqlrc .
RUN sudo chown --from=root:root citus:citus -R ~ RUN sudo chown --from=root:root citus:citus -R ~
# sets default pg version # sets default pg version
RUN pgenv switch 17.5 RUN pgenv switch 18beta1
# make connecting to the coordinator easy # make connecting to the coordinator easy
ENV PGPORT=9700 ENV PGPORT=9700

1
.gitattributes vendored
View File

@ -28,6 +28,7 @@ src/backend/distributed/utils/citus_outfuncs.c -citus-style
src/backend/distributed/deparser/ruleutils_15.c -citus-style src/backend/distributed/deparser/ruleutils_15.c -citus-style
src/backend/distributed/deparser/ruleutils_16.c -citus-style src/backend/distributed/deparser/ruleutils_16.c -citus-style
src/backend/distributed/deparser/ruleutils_17.c -citus-style src/backend/distributed/deparser/ruleutils_17.c -citus-style
src/backend/distributed/deparser/ruleutils_18.c -citus-style
src/backend/distributed/commands/index_pg_source.c -citus-style src/backend/distributed/commands/index_pg_source.c -citus-style
src/include/distributed/citus_nodes.h -citus-style src/include/distributed/citus_nodes.h -citus-style

View File

@ -32,11 +32,12 @@ jobs:
style_checker_image_name: "ghcr.io/citusdata/stylechecker" style_checker_image_name: "ghcr.io/citusdata/stylechecker"
style_checker_tools_version: "0.8.18" style_checker_tools_version: "0.8.18"
sql_snapshot_pg_version: "17.5" sql_snapshot_pg_version: "17.5"
image_suffix: "-dev-d28f316" image_suffix: "-dev-aa7482a"
pg15_version: '{ "major": "15", "full": "15.13" }' pg15_version: '{ "major": "15", "full": "15.13" }'
pg16_version: '{ "major": "16", "full": "16.9" }' pg16_version: '{ "major": "16", "full": "16.9" }'
pg17_version: '{ "major": "17", "full": "17.5" }' pg17_version: '{ "major": "17", "full": "17.5" }'
upgrade_pg_versions: "15.13-16.9-17.5" pg18_version: '{ "major": "18", "full": "18beta1" }'
upgrade_pg_versions: "15.13-16.9-17.5-18beta1"
steps: steps:
# Since GHA jobs need at least one step we use a noop step here. # Since GHA jobs need at least one step we use a noop step here.
- name: Set up parameters - name: Set up parameters
@ -113,6 +114,7 @@ jobs:
- ${{ needs.params.outputs.pg15_version }} - ${{ needs.params.outputs.pg15_version }}
- ${{ needs.params.outputs.pg16_version }} - ${{ needs.params.outputs.pg16_version }}
- ${{ needs.params.outputs.pg17_version }} - ${{ needs.params.outputs.pg17_version }}
- ${{ needs.params.outputs.pg18_version }}
runs-on: ubuntu-latest runs-on: ubuntu-latest
container: container:
image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ matrix.image_suffix }}" image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ matrix.image_suffix }}"
@ -144,6 +146,7 @@ jobs:
- ${{ needs.params.outputs.pg15_version }} - ${{ needs.params.outputs.pg15_version }}
- ${{ needs.params.outputs.pg16_version }} - ${{ needs.params.outputs.pg16_version }}
- ${{ needs.params.outputs.pg17_version }} - ${{ needs.params.outputs.pg17_version }}
- ${{ needs.params.outputs.pg18_version }}
make: make:
- check-split - check-split
- check-multi - check-multi
@ -173,6 +176,10 @@ jobs:
pg_version: ${{ needs.params.outputs.pg17_version }} pg_version: ${{ needs.params.outputs.pg17_version }}
suite: regress suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }} image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-failure
pg_version: ${{ needs.params.outputs.pg18_version }}
suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-enterprise-failure - make: check-enterprise-failure
pg_version: ${{ needs.params.outputs.pg15_version }} pg_version: ${{ needs.params.outputs.pg15_version }}
suite: regress suite: regress
@ -185,6 +192,10 @@ jobs:
pg_version: ${{ needs.params.outputs.pg17_version }} pg_version: ${{ needs.params.outputs.pg17_version }}
suite: regress suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }} image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-enterprise-failure
pg_version: ${{ needs.params.outputs.pg18_version }}
suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-pytest - make: check-pytest
pg_version: ${{ needs.params.outputs.pg15_version }} pg_version: ${{ needs.params.outputs.pg15_version }}
suite: regress suite: regress
@ -197,6 +208,10 @@ jobs:
pg_version: ${{ needs.params.outputs.pg17_version }} pg_version: ${{ needs.params.outputs.pg17_version }}
suite: regress suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }} image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-pytest
pg_version: ${{ needs.params.outputs.pg18_version }}
suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: installcheck - make: installcheck
suite: cdc suite: cdc
image_name: ${{ needs.params.outputs.test_image_name }} image_name: ${{ needs.params.outputs.test_image_name }}
@ -209,6 +224,10 @@ jobs:
suite: cdc suite: cdc
image_name: ${{ needs.params.outputs.test_image_name }} image_name: ${{ needs.params.outputs.test_image_name }}
pg_version: ${{ needs.params.outputs.pg17_version }} pg_version: ${{ needs.params.outputs.pg17_version }}
- make: installcheck
suite: cdc
image_name: ${{ needs.params.outputs.test_image_name }}
pg_version: ${{ needs.params.outputs.pg18_version }}
- make: check-query-generator - make: check-query-generator
pg_version: ${{ needs.params.outputs.pg15_version }} pg_version: ${{ needs.params.outputs.pg15_version }}
suite: regress suite: regress
@ -221,6 +240,10 @@ jobs:
pg_version: ${{ needs.params.outputs.pg17_version }} pg_version: ${{ needs.params.outputs.pg17_version }}
suite: regress suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }} image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-query-generator
pg_version: ${{ needs.params.outputs.pg18_version }}
suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }}
runs-on: ubuntu-latest runs-on: ubuntu-latest
container: container:
image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ needs.params.outputs.image_suffix }}" image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ needs.params.outputs.image_suffix }}"
@ -264,6 +287,7 @@ jobs:
- ${{ needs.params.outputs.pg15_version }} - ${{ needs.params.outputs.pg15_version }}
- ${{ needs.params.outputs.pg16_version }} - ${{ needs.params.outputs.pg16_version }}
- ${{ needs.params.outputs.pg17_version }} - ${{ needs.params.outputs.pg17_version }}
- ${{ needs.params.outputs.pg18_version }}
parallel: [0,1,2,3,4,5] # workaround for running 6 parallel jobs parallel: [0,1,2,3,4,5] # workaround for running 6 parallel jobs
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
@ -314,6 +338,10 @@ jobs:
new_pg_major: 17 new_pg_major: 17
- old_pg_major: 15 - old_pg_major: 15
new_pg_major: 17 new_pg_major: 17
- old_pg_major: 17
new_pg_major: 18
- old_pg_major: 16
new_pg_major: 18
env: env:
old_pg_major: ${{ matrix.old_pg_major }} old_pg_major: ${{ matrix.old_pg_major }}
new_pg_major: ${{ matrix.new_pg_major }} new_pg_major: ${{ matrix.new_pg_major }}
@ -401,7 +429,7 @@ jobs:
CC_TEST_REPORTER_ID: ${{ secrets.CC_TEST_REPORTER_ID }} CC_TEST_REPORTER_ID: ${{ secrets.CC_TEST_REPORTER_ID }}
runs-on: ubuntu-latest runs-on: ubuntu-latest
container: container:
image: ${{ needs.params.outputs.test_image_name }}:${{ fromJson(needs.params.outputs.pg17_version).full }}${{ needs.params.outputs.image_suffix }} image: ${{ needs.params.outputs.test_image_name }}:${{ fromJson(needs.params.outputs.pg18_version).full }}${{ needs.params.outputs.image_suffix }}
needs: needs:
- params - params
- test-citus - test-citus
@ -513,7 +541,7 @@ jobs:
name: Test flakyness name: Test flakyness
runs-on: ubuntu-latest runs-on: ubuntu-latest
container: container:
image: ${{ needs.params.outputs.fail_test_image_name }}:${{ fromJson(needs.params.outputs.pg17_version).full }}${{ needs.params.outputs.image_suffix }} image: ${{ needs.params.outputs.fail_test_image_name }}:${{ fromJson(needs.params.outputs.pg18_version).full }}${{ needs.params.outputs.image_suffix }}
options: --user root options: --user root
env: env:
runs: 8 runs: 8

View File

@ -29,7 +29,7 @@ jobs:
# Postgres versions are stored in .github/workflows/build_and_test.yml # Postgres versions are stored in .github/workflows/build_and_test.yml
# file in json strings with major and full keys. # file in json strings with major and full keys.
# Below command extracts the versions and get the unique values. # Below command extracts the versions and get the unique values.
pg_versions=$(cat .github/workflows/build_and_test.yml | grep -oE '"major": "[0-9]+", "full": "[0-9.]+"' | sed -E 's/"major": "([0-9]+)", "full": "([0-9.]+)"/\1/g' | sort | uniq | tr '\n', ',') pg_versions=$(cat .github/workflows/build_and_test.yml | grep -oE '"major": "[0-9]+", "full": "[^"]+"' | sed -E 's/.*"major": "([0-9]+)".*/\1/' | sort -n | uniq | tr '\n' ',')
pg_versions_array="[ ${pg_versions} ]" pg_versions_array="[ ${pg_versions} ]"
echo "Supported PG Versions: ${pg_versions_array}" echo "Supported PG Versions: ${pg_versions_array}"
# Below line is needed to set the output variable to be used in the next job # Below line is needed to set the output variable to be used in the next job

3053
configure vendored

File diff suppressed because it is too large Load Diff

View File

@ -80,7 +80,7 @@ AC_SUBST(with_pg_version_check)
if test "$with_pg_version_check" = no; then if test "$with_pg_version_check" = no; then
AC_MSG_NOTICE([building against PostgreSQL $version_num (skipped compatibility check)]) AC_MSG_NOTICE([building against PostgreSQL $version_num (skipped compatibility check)])
elif test "$version_num" != '15' -a "$version_num" != '16' -a "$version_num" != '17'; then elif test "$version_num" != '15' -a "$version_num" != '16' -a "$version_num" != '17' -a "$version_num" != '18'; then
AC_MSG_ERROR([Citus is not compatible with the detected PostgreSQL version ${version_num}.]) AC_MSG_ERROR([Citus is not compatible with the detected PostgreSQL version ${version_num}.])
else else
AC_MSG_NOTICE([building against PostgreSQL $version_num]) AC_MSG_NOTICE([building against PostgreSQL $version_num])

6163
configure~ Executable file

File diff suppressed because it is too large Load Diff

View File

@ -21,6 +21,13 @@
#include "catalog/pg_am.h" #include "catalog/pg_am.h"
#include "catalog/pg_statistic.h" #include "catalog/pg_statistic.h"
#include "commands/defrem.h" #include "commands/defrem.h"
#include "columnar/columnar_version_compat.h"
#if PG_VERSION_NUM >= PG_VERSION_18
#include "commands/explain_format.h"
#endif
#include "executor/executor.h" /* for ExecInitExprWithParams(), ExecEvalExpr() */
#include "nodes/execnodes.h" /* for ExprState, ExprContext, etc. */
#include "nodes/extensible.h" #include "nodes/extensible.h"
#include "nodes/makefuncs.h" #include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h" #include "nodes/nodeFuncs.h"

View File

@ -645,10 +645,10 @@ SaveStripeSkipList(RelFileLocator relfilelocator, uint64 stripe,
{ {
values[Anum_columnar_chunk_minimum_value - 1] = values[Anum_columnar_chunk_minimum_value - 1] =
PointerGetDatum(DatumToBytea(chunk->minimumValue, PointerGetDatum(DatumToBytea(chunk->minimumValue,
&tupleDescriptor->attrs[columnIndex])); Attr(tupleDescriptor, columnIndex)));
values[Anum_columnar_chunk_maximum_value - 1] = values[Anum_columnar_chunk_maximum_value - 1] =
PointerGetDatum(DatumToBytea(chunk->maximumValue, PointerGetDatum(DatumToBytea(chunk->maximumValue,
&tupleDescriptor->attrs[columnIndex])); Attr(tupleDescriptor, columnIndex)));
} }
else else
{ {
@ -803,9 +803,9 @@ ReadStripeSkipList(RelFileLocator relfilelocator, uint64 stripe,
datumArray[Anum_columnar_chunk_maximum_value - 1]); datumArray[Anum_columnar_chunk_maximum_value - 1]);
chunk->minimumValue = chunk->minimumValue =
ByteaToDatum(minValue, &tupleDescriptor->attrs[columnIndex]); ByteaToDatum(minValue, Attr(tupleDescriptor, columnIndex));
chunk->maximumValue = chunk->maximumValue =
ByteaToDatum(maxValue, &tupleDescriptor->attrs[columnIndex]); ByteaToDatum(maxValue, Attr(tupleDescriptor, columnIndex));
chunk->hasMinMax = true; chunk->hasMinMax = true;
} }
@ -1414,16 +1414,28 @@ UpdateStripeMetadataRow(uint64 storageId, uint64 stripeId, bool *update,
storageId, stripeId))); storageId, stripeId)));
} }
/*
* heap_modify_tuple + heap_inplace_update only exist on PG < 18;
* on PG18 the in-place helper was removed upstream, so we skip the whole block.
*/
#if PG_VERSION_NUM < PG_VERSION_18
/* /*
* heap_inplace_update already doesn't allow changing size of the original * heap_inplace_update already doesn't allow changing size of the original
* tuple, so we don't allow setting any Datum's to NULL values. * tuple, so we don't allow setting any Datum's to NULL values.
*/ */
bool newNulls[Natts_columnar_stripe] = { false }; bool newNulls[Natts_columnar_stripe] = { false };
TupleDesc tupleDescriptor = RelationGetDescr(columnarStripes); TupleDesc tupleDescriptor = RelationGetDescr(columnarStripes);
HeapTuple modifiedTuple = heap_modify_tuple(oldTuple, tupleDescriptor, HeapTuple modifiedTuple = heap_modify_tuple(oldTuple,
newValues, newNulls, update); tupleDescriptor,
newValues,
newNulls,
update);
heap_inplace_update(columnarStripes, modifiedTuple); heap_inplace_update(columnarStripes, modifiedTuple);
#endif
/* /*
* Existing tuple now contains modifications, because we used * Existing tuple now contains modifications, because we used
@ -1727,12 +1739,37 @@ create_estate_for_relation(Relation rel)
rte->relkind = rel->rd_rel->relkind; rte->relkind = rel->rd_rel->relkind;
rte->rellockmode = AccessShareLock; rte->rellockmode = AccessShareLock;
/* Prepare permission info on PG 16+ */
#if PG_VERSION_NUM >= PG_VERSION_16 #if PG_VERSION_NUM >= PG_VERSION_16
List *perminfos = NIL; List *perminfos = NIL;
addRTEPermissionInfo(&perminfos, rte); addRTEPermissionInfo(&perminfos, rte);
ExecInitRangeTable(estate, list_make1(rte), perminfos); #endif
/* Initialize the range table, with the right signature for each PG version */
#if PG_VERSION_NUM >= PG_VERSION_18
/* PG 18+ needs four arguments (unpruned_relids) */
ExecInitRangeTable(
estate,
list_make1(rte),
perminfos,
NULL /* unpruned_relids: not used by columnar */
);
#elif PG_VERSION_NUM >= PG_VERSION_16
/* PG 1617: three-arg signature (permInfos) */
ExecInitRangeTable(
estate,
list_make1(rte),
perminfos
);
#else #else
ExecInitRangeTable(estate, list_make1(rte));
/* PG 15: two-arg signature */
ExecInitRangeTable(
estate,
list_make1(rte)
);
#endif #endif
estate->es_output_cid = GetCurrentCommandId(true); estate->es_output_cid = GetCurrentCommandId(true);

View File

@ -1012,7 +1012,7 @@ NeededColumnsList(TupleDesc tupdesc, Bitmapset *attr_needed)
for (int i = 0; i < tupdesc->natts; i++) for (int i = 0; i < tupdesc->natts; i++)
{ {
if (tupdesc->attrs[i].attisdropped) if (Attr(tupdesc, i)->attisdropped)
{ {
continue; continue;
} }
@ -1121,10 +1121,23 @@ columnar_vacuum_rel(Relation rel, VacuumParams *params,
bool frozenxid_updated; bool frozenxid_updated;
bool minmulti_updated; bool minmulti_updated;
/* for PG 18+, vac_update_relstats gained a new “all_frozen” param */
#if PG_VERSION_NUM >= PG_VERSION_18
vac_update_relstats(rel, new_rel_pages, new_live_tuples,
new_rel_allvisible, /* allvisible */
0, /* all_frozen */
nindexes > 0,
newRelFrozenXid, newRelminMxid,
&frozenxid_updated, &minmulti_updated,
false);
#else
vac_update_relstats(rel, new_rel_pages, new_live_tuples, vac_update_relstats(rel, new_rel_pages, new_live_tuples,
new_rel_allvisible, nindexes > 0, new_rel_allvisible, nindexes > 0,
newRelFrozenXid, newRelminMxid, newRelFrozenXid, newRelminMxid,
&frozenxid_updated, &minmulti_updated, false); &frozenxid_updated, &minmulti_updated,
false);
#endif
#else #else
TransactionId oldestXmin; TransactionId oldestXmin;
TransactionId freezeLimit; TransactionId freezeLimit;
@ -1187,10 +1200,19 @@ columnar_vacuum_rel(Relation rel, VacuumParams *params,
#endif #endif
#endif #endif
#if PG_VERSION_NUM >= PG_VERSION_18
pgstat_report_vacuum(RelationGetRelid(rel),
rel->rd_rel->relisshared,
Max(new_live_tuples, 0), /* live tuples */
0, /* dead tuples */
GetCurrentTimestamp()); /* start time */
#else
pgstat_report_vacuum(RelationGetRelid(rel), pgstat_report_vacuum(RelationGetRelid(rel),
rel->rd_rel->relisshared, rel->rd_rel->relisshared,
Max(new_live_tuples, 0), Max(new_live_tuples, 0),
0); 0);
#endif
pgstat_progress_end_command(); pgstat_progress_end_command();
} }
@ -1225,7 +1247,7 @@ LogRelationStats(Relation rel, int elevel)
GetTransactionSnapshot()); GetTransactionSnapshot());
for (uint32 column = 0; column < skiplist->columnCount; column++) for (uint32 column = 0; column < skiplist->columnCount; column++)
{ {
bool attrDropped = tupdesc->attrs[column].attisdropped; bool attrDropped = Attr(tupdesc, column)->attisdropped;
for (uint32 chunk = 0; chunk < skiplist->chunkCount; chunk++) for (uint32 chunk = 0; chunk < skiplist->chunkCount; chunk++)
{ {
ColumnChunkSkipNode *skipnode = ColumnChunkSkipNode *skipnode =
@ -2564,8 +2586,13 @@ static const TableAmRoutine columnar_am_methods = {
.relation_estimate_size = columnar_estimate_rel_size, .relation_estimate_size = columnar_estimate_rel_size,
#if PG_VERSION_NUM < PG_VERSION_18
/* these two fields were removed in PG18 */
.scan_bitmap_next_block = NULL, .scan_bitmap_next_block = NULL,
.scan_bitmap_next_tuple = NULL, .scan_bitmap_next_tuple = NULL,
#endif
.scan_sample_next_block = columnar_scan_sample_next_block, .scan_sample_next_block = columnar_scan_sample_next_block,
.scan_sample_next_tuple = columnar_scan_sample_next_tuple .scan_sample_next_tuple = columnar_scan_sample_next_tuple
}; };
@ -2603,7 +2630,7 @@ detoast_values(TupleDesc tupleDesc, Datum *orig_values, bool *isnull)
for (int i = 0; i < tupleDesc->natts; i++) for (int i = 0; i < tupleDesc->natts; i++)
{ {
if (!isnull[i] && tupleDesc->attrs[i].attlen == -1 && if (!isnull[i] && Attr(tupleDesc, i)->attlen == -1 &&
VARATT_IS_EXTENDED(values[i])) VARATT_IS_EXTENDED(values[i]))
{ {
/* make a copy */ /* make a copy */

View File

@ -3049,7 +3049,7 @@ CitusCopySelect(CopyStmt *copyStatement)
for (int i = 0; i < tupleDescriptor->natts; i++) for (int i = 0; i < tupleDescriptor->natts; i++)
{ {
Form_pg_attribute attr = &tupleDescriptor->attrs[i]; Form_pg_attribute attr = TupleDescAttr(tupleDescriptor, i);
if (attr->attisdropped || if (attr->attisdropped ||
attr->attgenerated attr->attgenerated

View File

@ -14,6 +14,7 @@
#include "miscadmin.h" #include "miscadmin.h"
#include "pgstat.h" #include "pgstat.h"
#include "catalog/pg_collation.h"
#include "lib/stringinfo.h" #include "lib/stringinfo.h"
#include "storage/latch.h" #include "storage/latch.h"
#include "utils/builtins.h" #include "utils/builtins.h"
@ -371,8 +372,9 @@ CommandMatchesLogGrepPattern(const char *command)
if (GrepRemoteCommands && strnlen(GrepRemoteCommands, NAMEDATALEN) > 0) if (GrepRemoteCommands && strnlen(GrepRemoteCommands, NAMEDATALEN) > 0)
{ {
Datum boolDatum = Datum boolDatum =
DirectFunctionCall2(textlike, CStringGetTextDatum(command), DirectFunctionCall2Coll(textlike, DEFAULT_COLLATION_OID,
CStringGetTextDatum(GrepRemoteCommands)); CStringGetTextDatum(command),
CStringGetTextDatum(GrepRemoteCommands));
return DatumGetBool(boolDatum); return DatumGetBool(boolDatum);
} }

View File

@ -11,6 +11,7 @@
#include "postgres.h" #include "postgres.h"
#include "utils/elog.h" #include "utils/elog.h"
#include "utils/memutils.h" /* for TopTransactionContext */
#include "distributed/connection_management.h" #include "distributed/connection_management.h"
#include "distributed/error_codes.h" #include "distributed/error_codes.h"

View File

@ -1,6 +1,6 @@
/*------------------------------------------------------------------------- /*-------------------------------------------------------------------------
* *
* ruleutils_16.c * ruleutils_17.c
* Functions to convert stored expressions/querytrees back to * Functions to convert stored expressions/querytrees back to
* source text * source text
* *
@ -9,7 +9,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* src/backend/distributed/deparser/ruleutils_16.c * src/backend/distributed/deparser/ruleutils_17.c
* *
* This needs to be closely in sync with the core code. * This needs to be closely in sync with the core code.
*------------------------------------------------------------------------- *-------------------------------------------------------------------------

File diff suppressed because it is too large Load Diff

View File

@ -755,13 +755,48 @@ ExecuteTaskPlan(PlannedStmt *taskPlan, char *queryString,
CreateDestReceiver(DestNone); CreateDestReceiver(DestNone);
/* Create a QueryDesc for the query */ /* Create a QueryDesc for the query */
QueryDesc *queryDesc = CreateQueryDesc(taskPlan, queryString, #if PG_VERSION_NUM >= PG_VERSION_18
GetActiveSnapshot(), InvalidSnapshot,
destReceiver, paramListInfo, /* PG18+: ninearg CreateQueryDesc with a CachedPlan slot */
queryEnv, 0); QueryDesc *queryDesc = CreateQueryDesc(
taskPlan, /* PlannedStmt *plannedstmt */
NULL, /* CachedPlan *cplan (none) */
queryString, /* const char *sourceText */
GetActiveSnapshot(), /* Snapshot snapshot */
InvalidSnapshot, /* Snapshot crosscheck_snapshot */
destReceiver, /* DestReceiver *dest */
paramListInfo, /* ParamListInfo params */
queryEnv, /* QueryEnvironment *queryEnv */
0 /* int instrument_options */
);
#else
/* PG1517: eightarg CreateQueryDesc without CachedPlan */
QueryDesc *queryDesc = CreateQueryDesc(
taskPlan, /* PlannedStmt *plannedstmt */
queryString, /* const char *sourceText */
GetActiveSnapshot(), /* Snapshot snapshot */
InvalidSnapshot, /* Snapshot crosscheck_snapshot */
destReceiver, /* DestReceiver *dest */
paramListInfo, /* ParamListInfo params */
queryEnv, /* QueryEnvironment *queryEnv */
0 /* int instrument_options */
);
#endif
ExecutorStart(queryDesc, eflags); ExecutorStart(queryDesc, eflags);
/* run the plan: count = 0 (all rows) */
#if PG_VERSION_NUM >= PG_VERSION_18
/* PG 18+ dropped the “execute_once” boolean */
ExecutorRun(queryDesc, scanDirection, 0L);
#else
/* PG 17 and prevs still expect the 4th once argument */
ExecutorRun(queryDesc, scanDirection, 0L, true); ExecutorRun(queryDesc, scanDirection, 0L, true);
#endif
/* /*
* We'll set the executorState->es_processed later, for now only remember * We'll set the executorState->es_processed later, for now only remember

View File

@ -235,7 +235,20 @@ CitusExecutorRun(QueryDesc *queryDesc,
/* postgres will switch here again and will restore back on its own */ /* postgres will switch here again and will restore back on its own */
MemoryContextSwitchTo(oldcontext); MemoryContextSwitchTo(oldcontext);
standard_ExecutorRun(queryDesc, direction, count, execute_once); #if PG_VERSION_NUM >= PG_VERSION_18
/* PG18+ drops the “execute_once” argument */
standard_ExecutorRun(queryDesc,
direction,
count);
#else
/* PG17-: original four-arg signature */
standard_ExecutorRun(queryDesc,
direction,
count,
execute_once);
#endif
} }
if (totalTime) if (totalTime)
@ -688,15 +701,55 @@ ExecutePlanIntoDestReceiver(PlannedStmt *queryPlan, ParamListInfo params,
/* don't display the portal in pg_cursors, it is for internal use only */ /* don't display the portal in pg_cursors, it is for internal use only */
portal->visible = false; portal->visible = false;
PortalDefineQuery(portal, #if PG_VERSION_NUM >= PG_VERSION_18
NULL,
"", /* PostgreSQL 18+ adds a seventh “plansource” argument */
CMDTAG_SELECT, PortalDefineQuery(
list_make1(queryPlan), portal,
NULL); NULL, /* no prepared statement name */
"", /* query text */
CMDTAG_SELECT, /* command tag */
list_make1(queryPlan),/* list of PlannedStmt* */
NULL, /* no CachedPlan */
NULL /* no CachedPlanSource */
);
#else
/* PostgreSQL 17-: six-arg signature */
PortalDefineQuery(
portal,
NULL, /* no prepared statement name */
"", /* query text */
CMDTAG_SELECT, /* command tag */
list_make1(queryPlan),/* list of PlannedStmt* */
NULL /* no CachedPlan */
);
#endif
PortalStart(portal, params, eflags, GetActiveSnapshot()); PortalStart(portal, params, eflags, GetActiveSnapshot());
PortalRun(portal, count, false, true, dest, dest, NULL);
#if PG_VERSION_NUM >= PG_VERSION_18
/* PG 18+: six-arg signature (drop the run_once bool) */
PortalRun(portal,
count, /* how many rows to fetch */
false, /* isTopLevel */
dest, /* DestReceiver *dest */
dest, /* DestReceiver *altdest */
NULL); /* QueryCompletion *qc */
#else
/* PG 17-: original seven-arg signature */
PortalRun(portal,
count, /* how many rows to fetch */
false, /* isTopLevel */
true, /* run_once */
dest, /* DestReceiver *dest */
dest, /* DestReceiver *altdest */
NULL); /* QueryCompletion *qc */
#endif
PortalDrop(portal, false); PortalDrop(portal, false);
} }

View File

@ -242,7 +242,27 @@ worker_partition_query_result(PG_FUNCTION_ARGS)
allowNullPartitionColumnValues); allowNullPartitionColumnValues);
/* execute the query */ /* execute the query */
PortalRun(portal, FETCH_ALL, false, true, dest, dest, NULL); #if PG_VERSION_NUM >= PG_VERSION_18
/* PG18+: drop the “run_once” bool */
PortalRun(portal,
FETCH_ALL, /* count */
false, /* isTopLevel */
dest, /* dest receiver */
dest, /* alternative dest */
NULL); /* QueryCompletion *qc */
#else
/* PG1517: original sevenarg signature */
PortalRun(portal,
FETCH_ALL, /* count */
false, /* isTopLevel */
true, /* run_once */
dest, /* dest receiver */
dest, /* alternative dest */
NULL); /* QueryCompletion *qc */
#endif
/* construct the output result */ /* construct the output result */
TupleDesc returnTupleDesc = NULL; TupleDesc returnTupleDesc = NULL;
@ -295,8 +315,31 @@ StartPortalForQueryExecution(const char *queryString)
/* don't display the portal in pg_cursors, it is for internal use only */ /* don't display the portal in pg_cursors, it is for internal use only */
portal->visible = false; portal->visible = false;
PortalDefineQuery(portal, NULL, queryString, CMDTAG_SELECT, #if PG_VERSION_NUM >= PG_VERSION_18
list_make1(queryPlan), NULL);
/* PG 18+: new CachedPlanSource slot */
PortalDefineQuery(
portal,
NULL, /* no preparedstmt name */
queryString, /* the SQL text */
CMDTAG_SELECT, /* were running a SELECT */
list_make1(queryPlan), /* plan trees */
NULL, /* no CachedPlan */
NULL /* no CachedPlanSource */
);
#else
/* PG 1517: sixarg signature */
PortalDefineQuery(
portal,
NULL,
queryString,
CMDTAG_SELECT,
list_make1(queryPlan),
NULL /* no CachedPlan */
);
#endif
int eflags = 0; int eflags = 0;
PortalStart(portal, NULL, eflags, GetActiveSnapshot()); PortalStart(portal, NULL, eflags, GetActiveSnapshot());

View File

@ -2965,8 +2965,18 @@ DeleteNodeRow(char *nodeName, int32 nodePort)
* https://github.com/citusdata/citus/pull/2855#discussion_r313628554 * https://github.com/citusdata/citus/pull/2855#discussion_r313628554
* https://github.com/citusdata/citus/issues/1890 * https://github.com/citusdata/citus/issues/1890
*/ */
Relation replicaIndex = index_open(RelationGetPrimaryKeyIndex(pgDistNode), #if PG_VERSION_NUM >= PG_VERSION_18
AccessShareLock);
/* PG 18+ adds a bool “deferrable_ok” parameter */
Relation replicaIndex =
index_open(RelationGetPrimaryKeyIndex(pgDistNode, false),
AccessShareLock);
#else
Relation replicaIndex =
index_open(RelationGetPrimaryKeyIndex(pgDistNode),
AccessShareLock);
#endif
ScanKeyInit(&scanKey[0], Anum_pg_dist_node_nodename, ScanKeyInit(&scanKey[0], Anum_pg_dist_node_nodename,
BTEqualStrategyNumber, F_TEXTEQ, CStringGetTextDatum(nodeName)); BTEqualStrategyNumber, F_TEXTEQ, CStringGetTextDatum(nodeName));

View File

@ -746,7 +746,12 @@ GetRelationIdentityOrPK(Relation rel)
if (!OidIsValid(idxoid)) if (!OidIsValid(idxoid))
{ {
/* Determine the index OID of the primary key (PG18 adds a second parameter) */
#if PG_VERSION_NUM >= PG_VERSION_18
idxoid = RelationGetPrimaryKeyIndex(rel, false);
#else
idxoid = RelationGetPrimaryKeyIndex(rel); idxoid = RelationGetPrimaryKeyIndex(rel);
#endif
} }
return idxoid; return idxoid;

View File

@ -13,6 +13,7 @@
#include "postgres.h" #include "postgres.h"
#include "executor/executor.h" /* for CreateExecutorState(), FreeExecutorState(), CreateExprContext(), etc. */
#include "utils/builtins.h" #include "utils/builtins.h"
#include "utils/lsyscache.h" #include "utils/lsyscache.h"

View File

@ -44,6 +44,11 @@
#include "utils/snapmgr.h" #include "utils/snapmgr.h"
#include "pg_version_constants.h" #include "pg_version_constants.h"
#if PG_VERSION_NUM >= PG_VERSION_18
#include "commands/explain_dr.h" /* CreateExplainSerializeDestReceiver() */
#include "commands/explain_format.h"
#endif
#include "distributed/citus_depended_object.h" #include "distributed/citus_depended_object.h"
#include "distributed/citus_nodefuncs.h" #include "distributed/citus_nodefuncs.h"
@ -134,7 +139,7 @@ typedef struct ExplainAnalyzeDestination
TupleDesc lastSavedExplainAnalyzeTupDesc; TupleDesc lastSavedExplainAnalyzeTupDesc;
} ExplainAnalyzeDestination; } ExplainAnalyzeDestination;
#if PG_VERSION_NUM >= PG_VERSION_17 #if PG_VERSION_NUM >= PG_VERSION_17 && PG_VERSION_NUM < PG_VERSION_18
/* /*
* Various places within need to convert bytes to kilobytes. Round these up * Various places within need to convert bytes to kilobytes. Round these up
@ -529,19 +534,53 @@ ExplainSubPlans(DistributedPlan *distributedPlan, ExplainState *es)
ExplainOpenGroup("PlannedStmt", "PlannedStmt", false, es); ExplainOpenGroup("PlannedStmt", "PlannedStmt", false, es);
/* Capture memory stats on PG17+ */
#if PG_VERSION_NUM >= PG_VERSION_17 #if PG_VERSION_NUM >= PG_VERSION_17
if (es->memory) if (es->memory)
{ {
MemoryContextSwitchTo(saved_ctx); MemoryContextSwitchTo(saved_ctx);
MemoryContextMemConsumed(planner_ctx, &mem_counters); MemoryContextMemConsumed(planner_ctx, &mem_counters);
} }
#endif
ExplainOnePlan(plan, into, es, queryString, params, NULL, &planduration, #if PG_VERSION_NUM >= PG_VERSION_18
(es->buffers ? &bufusage : NULL), ExplainOnePlan(
(es->memory ? &mem_counters : NULL)); plan, /* PlannedStmt *plannedstmt */
NULL, /* CachedPlan *cplan */
NULL, /* CachedPlanSource *plansource */
0, /* query_index */
into, /* IntoClause *into */
es, /* struct ExplainState *es */
queryString, /* const char *queryString */
params, /* ParamListInfo params */
NULL, /* QueryEnvironment *queryEnv */
&planduration, /* const instr_time *planduration */
(es->buffers ? &bufusage : NULL),/* const BufferUsage *bufusage */
(es->memory ? &mem_counters : NULL) /* const MemoryContextCounters *mem_counters */
);
#elif PG_VERSION_NUM >= PG_VERSION_17
ExplainOnePlan(
plan,
into,
es,
queryString,
params,
NULL, /* QueryEnvironment *queryEnv */
&planduration,
(es->buffers ? &bufusage : NULL),
(es->memory ? &mem_counters : NULL)
);
#else #else
ExplainOnePlan(plan, into, es, queryString, params, NULL, &planduration, ExplainOnePlan(
(es->buffers ? &bufusage : NULL)); plan,
into,
es,
queryString,
params,
NULL, /* QueryEnvironment *queryEnv */
&planduration,
(es->buffers ? &bufusage : NULL)
);
#endif #endif
ExplainCloseGroup("PlannedStmt", "PlannedStmt", false, es); ExplainCloseGroup("PlannedStmt", "PlannedStmt", false, es);
@ -1558,22 +1597,55 @@ CitusExplainOneQuery(Query *query, int cursorOptions, IntoClause *into,
BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start); BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start);
} }
/* capture memory stats on PG17+ */
#if PG_VERSION_NUM >= PG_VERSION_17 #if PG_VERSION_NUM >= PG_VERSION_17
if (es->memory) if (es->memory)
{ {
MemoryContextSwitchTo(saved_ctx); MemoryContextSwitchTo(saved_ctx);
MemoryContextMemConsumed(planner_ctx, &mem_counters); MemoryContextMemConsumed(planner_ctx, &mem_counters);
} }
#endif
/* run it (if needed) and produce output */ #if PG_VERSION_NUM >= PG_VERSION_18
ExplainOnePlan(plan, into, es, queryString, params, queryEnv, ExplainOnePlan(
&planduration, (es->buffers ? &bufusage : NULL), plan, /* PlannedStmt *plannedstmt */
(es->memory ? &mem_counters : NULL)); NULL, /* no CachedPlan */
NULL, /* no CachedPlanSource */
0, /* query_index */
into, /* IntoClause *into */
es, /* struct ExplainState *es */
queryString, /* const char *queryString */
params, /* ParamListInfo params */
queryEnv, /* QueryEnvironment *queryEnv */
&planduration, /* const instr_time *planduration */
(es->buffers ? &bufusage : NULL), /* const BufferUsage *bufusage */
(es->memory ? &mem_counters : NULL) /* const MemoryContextCounters *mem_counters */
);
#elif PG_VERSION_NUM >= PG_VERSION_17
/* PostgreSQL 17 signature (9 args: includes mem_counters) */
ExplainOnePlan(
plan,
into,
es,
queryString,
params,
queryEnv,
&planduration,
(es->buffers ? &bufusage : NULL),
(es->memory ? &mem_counters : NULL)
);
#else #else
ExplainOnePlan(
/* run it (if needed) and produce output */ plan,
ExplainOnePlan(plan, into, es, queryString, params, queryEnv, into,
&planduration, (es->buffers ? &bufusage : NULL)); es,
queryString,
params,
queryEnv,
&planduration,
(es->buffers ? &bufusage : NULL)
);
#endif #endif
} }
@ -1805,7 +1877,7 @@ WrapQueryForExplainAnalyze(const char *queryString, TupleDesc tupleDesc,
appendStringInfoString(columnDef, ", "); appendStringInfoString(columnDef, ", ");
} }
Form_pg_attribute attr = &tupleDesc->attrs[columnIndex]; Form_pg_attribute attr = TupleDescAttr(tupleDesc, columnIndex);
char *attrType = format_type_extended(attr->atttypid, attr->atttypmod, char *attrType = format_type_extended(attr->atttypid, attr->atttypmod,
FORMAT_TYPE_TYPEMOD_GIVEN | FORMAT_TYPE_TYPEMOD_GIVEN |
FORMAT_TYPE_FORCE_QUALIFY); FORMAT_TYPE_FORCE_QUALIFY);
@ -2026,21 +2098,55 @@ ExplainOneQuery(Query *query, int cursorOptions,
BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start); BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start);
} }
/* 1) Capture memory counters on PG17+ only once: */
#if PG_VERSION_NUM >= PG_VERSION_17 #if PG_VERSION_NUM >= PG_VERSION_17
if (es->memory) if (es->memory)
{ {
MemoryContextSwitchTo(saved_ctx); MemoryContextSwitchTo(saved_ctx);
MemoryContextMemConsumed(planner_ctx, &mem_counters); MemoryContextMemConsumed(planner_ctx, &mem_counters);
} }
/* run it (if needed) and produce output */
ExplainOnePlan(plan, into, es, queryString, params, queryEnv,
&planduration, (es->buffers ? &bufusage : NULL),
(es->memory ? &mem_counters : NULL));
#else
/* run it (if needed) and produce output */
ExplainOnePlan(plan, into, es, queryString, params, queryEnv,
&planduration, (es->buffers ? &bufusage : NULL));
#endif #endif
#if PG_VERSION_NUM >= PG_VERSION_18
ExplainOnePlan(
plan, /* PlannedStmt *plannedstmt */
NULL, /* CachedPlan *cplan */
NULL, /* CachedPlanSource *plansource */
0, /* query_index */
into, /* IntoClause *into */
es, /* struct ExplainState *es */
queryString, /* const char *queryString */
params, /* ParamListInfo params */
queryEnv, /* QueryEnvironment *queryEnv */
&planduration, /* const instr_time *planduration */
(es->buffers ? &bufusage : NULL),
(es->memory ? &mem_counters: NULL)
);
#elif PG_VERSION_NUM >= PG_VERSION_17
ExplainOnePlan(
plan,
into,
es,
queryString,
params,
queryEnv,
&planduration,
(es->buffers ? &bufusage : NULL),
(es->memory ? &mem_counters: NULL)
);
#else
ExplainOnePlan(
plan,
into,
es,
queryString,
params,
queryEnv,
&planduration,
(es->buffers ? &bufusage : NULL)
);
#endif
} }
} }
@ -2102,9 +2208,30 @@ ExplainWorkerPlan(PlannedStmt *plannedstmt, DestReceiver *dest, ExplainState *es
UpdateActiveSnapshotCommandId(); UpdateActiveSnapshotCommandId();
/* Create a QueryDesc for the query */ /* Create a QueryDesc for the query */
queryDesc = CreateQueryDesc(plannedstmt, queryString, #if PG_VERSION_NUM >= PG_VERSION_18
GetActiveSnapshot(), InvalidSnapshot, queryDesc = CreateQueryDesc(
dest, params, queryEnv, instrument_option); plannedstmt, /* PlannedStmt *plannedstmt */
NULL, /* CachedPlan *cplan (none) */
queryString, /* const char *sourceText */
GetActiveSnapshot(), /* Snapshot snapshot */
InvalidSnapshot, /* Snapshot crosscheck_snapshot */
dest, /* DestReceiver *dest */
params, /* ParamListInfo params */
queryEnv, /* QueryEnvironment *queryEnv */
instrument_option /* int instrument_options */
);
#else
queryDesc = CreateQueryDesc(
plannedstmt, /* PlannedStmt *plannedstmt */
queryString, /* const char *sourceText */
GetActiveSnapshot(), /* Snapshot snapshot */
InvalidSnapshot, /* Snapshot crosscheck_snapshot */
dest, /* DestReceiver *dest */
params, /* ParamListInfo params */
queryEnv, /* QueryEnvironment *queryEnv */
instrument_option /* int instrument_options */
);
#endif
/* Select execution options */ /* Select execution options */
if (es->analyze) if (es->analyze)
@ -2121,7 +2248,14 @@ ExplainWorkerPlan(PlannedStmt *plannedstmt, DestReceiver *dest, ExplainState *es
ScanDirection dir = ForwardScanDirection; ScanDirection dir = ForwardScanDirection;
/* run the plan */ /* run the plan */
ExecutorRun(queryDesc, dir, 0L, true); /* run the plan: count = 0 (all rows) */
#if PG_VERSION_NUM >= PG_VERSION_18
/* PG 18+ dropped the “execute_once” boolean */
ExecutorRun(queryDesc, dir, 0L);
#else
/* PG 17- still expect the 4th once argument */
ExecutorRun(queryDesc, dir, 0L, true);
#endif
/* run cleanup too */ /* run cleanup too */
ExecutorFinish(queryDesc); ExecutorFinish(queryDesc);
@ -2135,7 +2269,7 @@ ExplainWorkerPlan(PlannedStmt *plannedstmt, DestReceiver *dest, ExplainState *es
/* Create textual dump of plan tree */ /* Create textual dump of plan tree */
ExplainPrintPlan(es, queryDesc); ExplainPrintPlan(es, queryDesc);
#if PG_VERSION_NUM >= PG_VERSION_17 #if PG_VERSION_NUM >= PG_VERSION_17 && PG_VERSION_NUM < PG_VERSION_18
/* Show buffer and/or memory usage in planning */ /* Show buffer and/or memory usage in planning */
if (peek_buffer_usage(es, bufusage) || mem_counters) if (peek_buffer_usage(es, bufusage) || mem_counters)
{ {
@ -2181,7 +2315,7 @@ ExplainWorkerPlan(PlannedStmt *plannedstmt, DestReceiver *dest, ExplainState *es
if (es->costs) if (es->costs)
ExplainPrintJITSummary(es, queryDesc); ExplainPrintJITSummary(es, queryDesc);
#if PG_VERSION_NUM >= PG_VERSION_17 #if PG_VERSION_NUM >= PG_VERSION_17 && PG_VERSION_NUM < PG_VERSION_18
if (es->serialize != EXPLAIN_SERIALIZE_NONE) if (es->serialize != EXPLAIN_SERIALIZE_NONE)
{ {
/* the SERIALIZE option requires its own tuple receiver */ /* the SERIALIZE option requires its own tuple receiver */
@ -2248,7 +2382,7 @@ elapsed_time(instr_time *starttime)
} }
#if PG_VERSION_NUM >= PG_VERSION_17 #if PG_VERSION_NUM >= PG_VERSION_17 && PG_VERSION_NUM < PG_VERSION_18
/* /*
* Return whether show_buffer_usage would have anything to print, if given * Return whether show_buffer_usage would have anything to print, if given
* the same 'usage' data. Note that when the format is anything other than * the same 'usage' data. Note that when the format is anything other than
@ -2560,7 +2694,7 @@ ExplainPrintSerialize(ExplainState *es, SerializeMetrics *metrics)
ExplainPropertyFloat("Time", "ms", ExplainPropertyFloat("Time", "ms",
1000.0 * INSTR_TIME_GET_DOUBLE(metrics->timeSpent), 1000.0 * INSTR_TIME_GET_DOUBLE(metrics->timeSpent),
3, es); 3, es);
ExplainPropertyUInteger("Output Volume", "kB", ExplainPropertyInteger("Output Volume", "kB",
BYTES_TO_KILOBYTES(metrics->bytesSent), es); BYTES_TO_KILOBYTES(metrics->bytesSent), es);
ExplainPropertyText("Format", format, es); ExplainPropertyText("Format", format, es);
if (es->buffers) if (es->buffers)

View File

@ -35,6 +35,10 @@
#include "utils/syscache.h" #include "utils/syscache.h"
#include "pg_version_constants.h" #include "pg_version_constants.h"
#if PG_VERSION_NUM >= PG_VERSION_18
typedef OpIndexInterpretation OpBtreeInterpretation;
#endif
#include "distributed/citus_clauses.h" #include "distributed/citus_clauses.h"
#include "distributed/colocation_utils.h" #include "distributed/colocation_utils.h"
@ -61,6 +65,11 @@ typedef struct QualifierWalkerContext
} QualifierWalkerContext; } QualifierWalkerContext;
#if PG_VERSION_NUM >= PG_VERSION_18
#define get_op_btree_interpretation(opno) get_op_index_interpretation(opno)
#endif
/* Function pointer type definition for apply join rule functions */ /* Function pointer type definition for apply join rule functions */
typedef MultiNode *(*RuleApplyFunction) (MultiNode *leftNode, MultiNode *rightNode, typedef MultiNode *(*RuleApplyFunction) (MultiNode *leftNode, MultiNode *rightNode,
List *partitionColumnList, JoinType joinType, List *partitionColumnList, JoinType joinType,
@ -2293,7 +2302,12 @@ OperatorImplementsEquality(Oid opno)
{ {
OpBtreeInterpretation *btreeIntepretation = (OpBtreeInterpretation *) OpBtreeInterpretation *btreeIntepretation = (OpBtreeInterpretation *)
lfirst(btreeInterpretationCell); lfirst(btreeInterpretationCell);
#if PG_VERSION_NUM >= PG_VERSION_18
if (btreeIntepretation->cmptype == BTEqualStrategyNumber)
#else
if (btreeIntepretation->strategy == BTEqualStrategyNumber) if (btreeIntepretation->strategy == BTEqualStrategyNumber)
#endif
{ {
equalityOperator = true; equalityOperator = true;
break; break;

View File

@ -1418,8 +1418,24 @@ ExtractColumns(RangeTblEntry *callingRTE, int rangeTableId,
int subLevelsUp = 0; int subLevelsUp = 0;
int location = -1; int location = -1;
bool includeDroppedColumns = false; bool includeDroppedColumns = false;
expandRTE(callingRTE, rangeTableId, subLevelsUp, location, includeDroppedColumns, #if PG_VERSION_NUM >= PG_VERSION_18
columnNames, columnVars); expandRTE(callingRTE,
rangeTableId,
subLevelsUp,
VAR_RETURNING_DEFAULT, /* new argument on PG18+ */
location,
includeDroppedColumns,
columnNames,
columnVars);
#else
expandRTE(callingRTE,
rangeTableId,
subLevelsUp,
location,
includeDroppedColumns,
columnNames,
columnVars);
#endif
} }

View File

@ -85,6 +85,10 @@
#include "utils/ruleutils.h" #include "utils/ruleutils.h"
#include "pg_version_constants.h" #include "pg_version_constants.h"
#if PG_VERSION_NUM >= PG_VERSION_18
typedef OpIndexInterpretation OpBtreeInterpretation;
#endif
#include "distributed/distributed_planner.h" #include "distributed/distributed_planner.h"
#include "distributed/listutils.h" #include "distributed/listutils.h"
@ -177,6 +181,11 @@ typedef struct PruningInstance
bool isPartial; bool isPartial;
} PruningInstance; } PruningInstance;
#if PG_VERSION_NUM >= PG_VERSION_18
#define get_op_btree_interpretation(opno) get_op_index_interpretation(opno)
#define ROWCOMPARE_NE COMPARE_NE
#endif
/* /*
* Partial instances that need to be finished building. This is used to * Partial instances that need to be finished building. This is used to
@ -1078,7 +1087,11 @@ IsValidPartitionKeyRestriction(OpExpr *opClause)
OpBtreeInterpretation *btreeInterpretation = OpBtreeInterpretation *btreeInterpretation =
(OpBtreeInterpretation *) lfirst(btreeInterpretationCell); (OpBtreeInterpretation *) lfirst(btreeInterpretationCell);
#if PG_VERSION_NUM >= PG_VERSION_18
if (btreeInterpretation->cmptype == ROWCOMPARE_NE)
#else
if (btreeInterpretation->strategy == ROWCOMPARE_NE) if (btreeInterpretation->strategy == ROWCOMPARE_NE)
#endif
{ {
/* TODO: could add support for this, if we feel like it */ /* TODO: could add support for this, if we feel like it */
return false; return false;
@ -1130,7 +1143,11 @@ AddPartitionKeyRestrictionToInstance(ClauseWalkerContext *context, OpExpr *opCla
OpBtreeInterpretation *btreeInterpretation = OpBtreeInterpretation *btreeInterpretation =
(OpBtreeInterpretation *) lfirst(btreeInterpretationCell); (OpBtreeInterpretation *) lfirst(btreeInterpretationCell);
#if PG_VERSION_NUM >= PG_VERSION_18
switch (btreeInterpretation->cmptype)
#else
switch (btreeInterpretation->strategy) switch (btreeInterpretation->strategy)
#endif
{ {
case BTLessStrategyNumber: case BTLessStrategyNumber:
{ {
@ -1299,7 +1316,11 @@ IsValidHashRestriction(OpExpr *opClause)
OpBtreeInterpretation *btreeInterpretation = OpBtreeInterpretation *btreeInterpretation =
(OpBtreeInterpretation *) lfirst(btreeInterpretationCell); (OpBtreeInterpretation *) lfirst(btreeInterpretationCell);
#if PG_VERSION_NUM >= PG_VERSION_18
if (btreeInterpretation->cmptype == BTGreaterEqualStrategyNumber)
#else
if (btreeInterpretation->strategy == BTGreaterEqualStrategyNumber) if (btreeInterpretation->strategy == BTGreaterEqualStrategyNumber)
#endif
{ {
return true; return true;
} }

View File

@ -383,6 +383,33 @@ static const struct config_enum_entry metadata_sync_mode_options[] = {
/* *INDENT-ON* */ /* *INDENT-ON* */
/*----------------------------------------------------------------------*
* On PG 18+ the hook signatures changed; we wrap the old Citus handlers
* in fresh functions that match the new typedefs exactly.
*----------------------------------------------------------------------*/
#if PG_VERSION_NUM >= PG_VERSION_18
static bool
citus_executor_start_adapter(QueryDesc *queryDesc, int eflags)
{
/* call the original Citus hook (void) and always return “true” */
CitusExecutorStart(queryDesc, eflags);
return true;
}
static void
citus_executor_run_adapter(QueryDesc *queryDesc,
ScanDirection direction,
uint64 count)
{
/* call the original Citus hook (which still expects the old 4-arg form) */
CitusExecutorRun(queryDesc, direction, count, true);
}
#endif
/* shared library initialization function */ /* shared library initialization function */
void void
_PG_init(void) _PG_init(void)
@ -457,8 +484,13 @@ _PG_init(void)
set_rel_pathlist_hook = multi_relation_restriction_hook; set_rel_pathlist_hook = multi_relation_restriction_hook;
get_relation_info_hook = multi_get_relation_info_hook; get_relation_info_hook = multi_get_relation_info_hook;
set_join_pathlist_hook = multi_join_restriction_hook; set_join_pathlist_hook = multi_join_restriction_hook;
#if PG_VERSION_NUM >= PG_VERSION_18
ExecutorStart_hook = citus_executor_start_adapter;
ExecutorRun_hook = citus_executor_run_adapter;
#else
ExecutorStart_hook = CitusExecutorStart; ExecutorStart_hook = CitusExecutorStart;
ExecutorRun_hook = CitusExecutorRun; ExecutorRun_hook = CitusExecutorRun;
#endif
ExplainOneQuery_hook = CitusExplainOneQuery; ExplainOneQuery_hook = CitusExplainOneQuery;
prev_ExecutorEnd = ExecutorEnd_hook; prev_ExecutorEnd = ExecutorEnd_hook;
ExecutorEnd_hook = CitusAttributeToEnd; ExecutorEnd_hook = CitusAttributeToEnd;

View File

@ -485,6 +485,7 @@ fake_estimate_rel_size(Relation rel, int32 *attr_widths,
* Executor related callbacks for the fake AM * Executor related callbacks for the fake AM
* ------------------------------------------------------------------------ * ------------------------------------------------------------------------
*/ */
#if PG_VERSION_NUM < PG_VERSION_18
static bool static bool
fake_scan_bitmap_next_block(TableScanDesc scan, fake_scan_bitmap_next_block(TableScanDesc scan,
TBMIterateResult *tbmres) TBMIterateResult *tbmres)
@ -502,6 +503,8 @@ fake_scan_bitmap_next_tuple(TableScanDesc scan,
} }
#endif
static bool static bool
fake_scan_sample_next_block(TableScanDesc scan, fake_scan_sample_next_block(TableScanDesc scan,
SampleScanState *scanstate) SampleScanState *scanstate)
@ -578,8 +581,13 @@ static const TableAmRoutine fake_methods = {
.relation_estimate_size = fake_estimate_rel_size, .relation_estimate_size = fake_estimate_rel_size,
#if PG_VERSION_NUM < PG_VERSION_18
/* these two fields were removed in PG18 */
.scan_bitmap_next_block = fake_scan_bitmap_next_block, .scan_bitmap_next_block = fake_scan_bitmap_next_block,
.scan_bitmap_next_tuple = fake_scan_bitmap_next_tuple, .scan_bitmap_next_tuple = fake_scan_bitmap_next_tuple,
#endif
.scan_sample_next_block = fake_scan_sample_next_block, .scan_sample_next_block = fake_scan_sample_next_block,
.scan_sample_next_tuple = fake_scan_sample_next_tuple .scan_sample_next_tuple = fake_scan_sample_next_tuple
}; };

View File

@ -21,6 +21,7 @@
#include "common/hashfn.h" #include "common/hashfn.h"
#include "utils/hsearch.h" #include "utils/hsearch.h"
#include "utils/lsyscache.h" #include "utils/lsyscache.h"
#include "utils/memutils.h" /* for ALLOCSET_DEFAULT_MINSIZE, _INITSIZE, _MAXSIZE */
#include "pg_version_constants.h" #include "pg_version_constants.h"

View File

@ -514,8 +514,9 @@ PendingWorkerTransactionList(MultiConnection *connection)
List *transactionNames = NIL; List *transactionNames = NIL;
int32 coordinatorId = GetLocalGroupId(); int32 coordinatorId = GetLocalGroupId();
appendStringInfo(command, "SELECT gid FROM pg_prepared_xacts " appendStringInfo(command,
"WHERE gid LIKE 'citus\\_%d\\_%%' and database = current_database()", "SELECT gid FROM pg_prepared_xacts "
"WHERE gid COLLATE pg_catalog.default LIKE 'citus\\_%d\\_%%' COLLATE pg_catalog.default AND database = current_database()",
coordinatorId); coordinatorId);
int querySent = SendRemoteCommand(connection, command->data); int querySent = SendRemoteCommand(connection, command->data);

View File

@ -918,7 +918,7 @@ TypecheckWorkerPartialAggArgType(FunctionCallInfo fcinfo, StypeBox *box)
true, 'i', &argtypesNull); true, 'i', &argtypesNull);
Assert(!argtypesNull); Assert(!argtypesNull);
TupleDesc tupleDesc = box->aggregationArgumentContext->tupleDesc; TupleDesc tupleDesc = box->aggregationArgumentContext->tupleDesc;
if (argType != tupleDesc->attrs[aggregateArgIndex].atttypid) if (argType != TupleDescAttr(tupleDesc, aggregateArgIndex)->atttypid)
{ {
return false; return false;
} }

View File

@ -1906,7 +1906,32 @@ ExecuteSqlString(const char *sql)
/* Don't display the portal in pg_cursors */ /* Don't display the portal in pg_cursors */
portal->visible = false; portal->visible = false;
PortalDefineQuery(portal, NULL, sql, commandTag, plantree_list, NULL);
#if PG_VERSION_NUM >= PG_VERSION_18
/* PG18+ added a seventh “plansource” argument */
PortalDefineQuery(
portal,
NULL, /* no preparedstmt name */
sql, /* the query text */
commandTag, /* the CommandTag */
plantree_list, /* List of PlannedStmt* */
NULL, /* no CachedPlan */
NULL /* no CachedPlanSource */
);
#else
/* PG17-: sixarg signature */
PortalDefineQuery(
portal,
NULL, /* no preparedstmt name */
sql, /* the query text */
commandTag, /* the CommandTag */
plantree_list, /* List of PlannedStmt* */
NULL /* no CachedPlan */
);
#endif
PortalStart(portal, NULL, 0, InvalidSnapshot); PortalStart(portal, NULL, 0, InvalidSnapshot);
int16 format[] = { 1 }; int16 format[] = { 1 };
PortalSetResultFormat(portal, lengthof(format), format); /* binary format */ PortalSetResultFormat(portal, lengthof(format), format); /* binary format */
@ -1923,7 +1948,28 @@ ExecuteSqlString(const char *sql)
/* Here's where we actually execute the command. */ /* Here's where we actually execute the command. */
QueryCompletion qc = { 0 }; QueryCompletion qc = { 0 };
(void) PortalRun(portal, FETCH_ALL, isTopLevel, true, receiver, receiver, &qc);
/* Execute the portal, dropping the `run_once` arg on PG18+ */
#if PG_VERSION_NUM >= PG_VERSION_18
(void) PortalRun(
portal,
FETCH_ALL, /* count */
isTopLevel, /* isTopLevel */
receiver, /* DestReceiver *dest */
receiver, /* DestReceiver *altdest */
&qc /* QueryCompletion *qc */
);
#else
(void) PortalRun(
portal,
FETCH_ALL, /* count */
isTopLevel, /* isTopLevel */
true, /* run_once */
receiver, /* DestReceiver *dest */
receiver, /* DestReceiver *altdest */
&qc /* QueryCompletion *qc */
);
#endif
/* Clean up the receiver. */ /* Clean up the receiver. */
(*receiver->rDestroy)(receiver); (*receiver->rDestroy)(receiver);

View File

@ -42,6 +42,7 @@
#include "parser/parse_type.h" #include "parser/parse_type.h"
#include "storage/large_object.h" #include "storage/large_object.h"
#include "utils/lsyscache.h" #include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/syscache.h" #include "utils/syscache.h"
#include "distributed/citus_depended_object.h" #include "distributed/citus_depended_object.h"

View File

@ -324,6 +324,16 @@ GetRangeTblKind(RangeTblEntry *rte)
break; break;
} }
#if PG_VERSION_NUM >= PG_VERSION_18
/* new in PG18: GROUP RTE, just map it straight through */
case RTE_GROUP:
{
rteKind = (CitusRTEKind) rte->rtekind;
break;
}
#endif
case RTE_FUNCTION: case RTE_FUNCTION:
{ {
/* /*

View File

@ -1362,9 +1362,21 @@ DeleteColocationGroupLocally(uint32 colocationId)
* https://github.com/citusdata/citus/pull/2855#discussion_r313628554 * https://github.com/citusdata/citus/pull/2855#discussion_r313628554
* https://github.com/citusdata/citus/issues/1890 * https://github.com/citusdata/citus/issues/1890
*/ */
Relation replicaIndex = #if PG_VERSION_NUM >= PG_VERSION_18
index_open(RelationGetPrimaryKeyIndex(pgDistColocation),
AccessShareLock); /* PG 18+ expects a second “deferrable_ok” flag */
Relation replicaIndex = index_open(
RelationGetPrimaryKeyIndex(pgDistColocation, false),
AccessShareLock
);
#else
/* PG 17- had a single-arg signature */
Relation replicaIndex = index_open(
RelationGetPrimaryKeyIndex(pgDistColocation),
AccessShareLock
);
#endif
simple_heap_delete(pgDistColocation, &(heapTuple->t_self)); simple_heap_delete(pgDistColocation, &(heapTuple->t_self));
CitusInvalidateRelcacheByRelid(DistColocationRelationId()); CitusInvalidateRelcacheByRelid(DistColocationRelationId());

View File

@ -239,18 +239,28 @@ CreateCertificatesWhenNeeded()
SSL_CTX *sslContext = NULL; SSL_CTX *sslContext = NULL;
/* /*
* Since postgres might not have initialized ssl at this point we need to initialize * Ensure the OpenSSL library is initialized so we can create our SSL context.
* it our self to be able to create a context. This code is less extensive then * On OpenSSL 1.1.0 we call OPENSSL_init_ssl() (which also loads the default
* postgres' initialization but that will happen when postgres reloads its * config), and on older versions we fall back to SSL_library_init().
* configuration with ssl enabled. * PostgreSQL itself will perform its full SSL setup when it reloads
* its configuration with ssl enabled.
*/ */
#ifdef HAVE_OPENSSL_INIT_SSL #if defined(OPENSSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER >= 0x10100000L
/* OpenSSL 1.1.0+ */
OPENSSL_init_ssl(OPENSSL_INIT_LOAD_CONFIG, NULL); OPENSSL_init_ssl(OPENSSL_INIT_LOAD_CONFIG, NULL);
#else #else
/* OpenSSL < 1.1.0 */
SSL_library_init(); SSL_library_init();
#endif #endif
#if defined(OPENSSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER >= 0x10100000L
sslContext = SSL_CTX_new(TLS_method());
#else
sslContext = SSL_CTX_new(SSLv23_method()); sslContext = SSL_CTX_new(SSLv23_method());
#endif
if (!sslContext) if (!sslContext)
{ {
ereport(WARNING, (errmsg("unable to create ssl context, please verify ssl " ereport(WARNING, (errmsg("unable to create ssl context, please verify ssl "
@ -379,8 +389,17 @@ CreateCertificate(EVP_PKEY *privateKey)
* would fail right after an upgrade. Instead of working until the certificate * would fail right after an upgrade. Instead of working until the certificate
* expiration date and then suddenly erroring out. * expiration date and then suddenly erroring out.
*/ */
#if defined(OPENSSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER >= 0x10100000L
/* New mutable accessors (present in 1.1, 3.x). */
X509_gmtime_adj(X509_getm_notBefore(certificate), 0);
X509_gmtime_adj(X509_getm_notAfter(certificate), 0);
#else
/* Legacy functions kept for 1.0.x compatibility. */
X509_gmtime_adj(X509_get_notBefore(certificate), 0); X509_gmtime_adj(X509_get_notBefore(certificate), 0);
X509_gmtime_adj(X509_get_notAfter(certificate), 0); X509_gmtime_adj(X509_get_notAfter(certificate), 0);
#endif
/* Set the public key for our certificate */ /* Set the public key for our certificate */
X509_set_pubkey(certificate, privateKey); X509_set_pubkey(certificate, privateKey);

View File

@ -9,14 +9,28 @@
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
#ifndef COLUMNAR_COMPAT_H #ifndef COLUMNAR_VERSION_COMPAT_H
#define COLUMNAR_COMPAT_H #define COLUMNAR_VERSION_COMPAT_H
#include "pg_version_constants.h" #include "pg_version_constants.h"
/* for PG_VERSION_NUM and TupleDescAttr() */
#include "postgres.h"
#include "access/htup_details.h"
#define ACLCHECK_OBJECT_TABLE OBJECT_TABLE #define ACLCHECK_OBJECT_TABLE OBJECT_TABLE
#define ExplainPropertyLong(qlabel, value, es) \ #define ExplainPropertyLong(qlabel, value, es) \
ExplainPropertyInteger(qlabel, NULL, value, es) ExplainPropertyInteger(qlabel, NULL, value, es)
/* tuple-descriptor attributes moved in PostgreSQL 18: */
#if PG_VERSION_NUM >= PG_VERSION_18
#define Attr(tupdesc, colno) TupleDescAttr((tupdesc), (colno))
#else
#define Attr(tupdesc, colno) (&((tupdesc)->attrs[(colno)]))
#endif
#endif /* COLUMNAR_COMPAT_H */ #endif /* COLUMNAR_COMPAT_H */

View File

@ -169,7 +169,7 @@ IsNodeWideObjectClass(ObjectClass objectClass)
* If new object classes are added and none of them are node-wide, then update * If new object classes are added and none of them are node-wide, then update
* this assertion check based on latest supported major Postgres version. * this assertion check based on latest supported major Postgres version.
*/ */
StaticAssertStmt(PG_MAJORVERSION_NUM <= 17, StaticAssertStmt(PG_MAJORVERSION_NUM <= 18,
"better to check if any of newly added ObjectClass'es are node-wide"); "better to check if any of newly added ObjectClass'es are node-wide");
switch (objectClass) switch (objectClass)

View File

@ -384,9 +384,35 @@ getStxstattarget_compat(HeapTuple tup)
#define matched_compat(a) (a->matchKind == MERGE_WHEN_MATCHED) #define matched_compat(a) (a->matchKind == MERGE_WHEN_MATCHED)
#define create_foreignscan_path_compat(a, b, c, d, e, f, g, h, i, j, \ #include "nodes/bitmapset.h" /* for Relids */
k) create_foreignscan_path(a, b, c, d, e, f, g, h, \ #include "nodes/pg_list.h" /* for List */
i, j, k) #include "optimizer/pathnode.h" /* for create_foreignscan_path() */
#if PG_VERSION_NUM >= PG_VERSION_18
#define create_foreignscan_path_compat(a, b, c, d, e, f, g, h, i, j, k) \
create_foreignscan_path( \
(a), /* root */ \
(b), /* rel */ \
(c), /* target */ \
(d), /* rows */ \
0, /* disabled_nodes */ \
(e), /* startup_cost */ \
(f), /* total_cost */ \
(g), /* pathkeys */ \
(h), /* required_outer */ \
(i), /* fdw_outerpath */ \
(j), /* fdw_restrictinfo*/ \
(k) /* fdw_private */ \
)
#else
#define create_foreignscan_path_compat(a, b, c, d, e, f, g, h, i, j, k) \
create_foreignscan_path( \
(a), (b), (c), (d), \
(e), (f), \
(g), (h), (i), (j), (k) \
)
#endif
#define getProcNo_compat(a) (a->vxid.procNumber) #define getProcNo_compat(a) (a->vxid.procNumber)
#define getLxid_compat(a) (a->vxid.lxid) #define getLxid_compat(a) (a->vxid.lxid)

View File

@ -15,5 +15,6 @@
#define PG_VERSION_16 160000 #define PG_VERSION_16 160000
#define PG_VERSION_17 170000 #define PG_VERSION_17 170000
#define PG_VERSION_18 180000 #define PG_VERSION_18 180000
#define PG_VERSION_19 190000
#endif /* PG_VERSION_CONSTANTS */ #endif /* PG_VERSION_CONSTANTS */