Merge branch 'main' into contributing-dev

pull/7347/head
Onur Tirtir 2025-03-14 18:26:43 +03:00 committed by GitHub
commit 77cd55939d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
441 changed files with 16854 additions and 22649 deletions

View File

@ -6,9 +6,12 @@ RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
# install build tools
RUN apt update && apt install -y \
bison \
bzip2 \
cpanminus \
curl \
docbook-xml \
docbook-xsl \
flex \
gcc \
git \
@ -20,6 +23,7 @@ RUN apt update && apt install -y \
libreadline-dev \
libselinux1-dev \
libssl-dev \
libxml2-utils \
libxslt-dev \
libzstd-dev \
locales \
@ -32,6 +36,7 @@ RUN apt update && apt install -y \
sudo \
uuid-dev \
valgrind \
xsltproc \
zlib1g-dev \
&& add-apt-repository ppa:deadsnakes/ppa -y \
&& apt install -y \
@ -67,20 +72,8 @@ ENV PATH="/home/citus/.pgenv/pgsql/bin:${PATH}"
USER citus
# build postgres versions separately for effective parrallelism and caching of already built versions when changing only certain versions
FROM base AS pg14
RUN MAKEFLAGS="-j $(nproc)" pgenv build 14.15
RUN rm .pgenv/src/*.tar*
RUN make -C .pgenv/src/postgresql-*/ clean
RUN make -C .pgenv/src/postgresql-*/src/include install
# create a staging directory with all files we want to copy from our pgenv build
# we will copy the contents of the staged folder into the final image at once
RUN mkdir .pgenv-staging/
RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/
RUN rm .pgenv-staging/config/default.conf
FROM base AS pg15
RUN MAKEFLAGS="-j $(nproc)" pgenv build 15.10
RUN MAKEFLAGS="-j $(nproc)" pgenv build 15.12
RUN rm .pgenv/src/*.tar*
RUN make -C .pgenv/src/postgresql-*/ clean
RUN make -C .pgenv/src/postgresql-*/src/include install
@ -92,7 +85,19 @@ RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/
RUN rm .pgenv-staging/config/default.conf
FROM base AS pg16
RUN MAKEFLAGS="-j $(nproc)" pgenv build 16.6
RUN MAKEFLAGS="-j $(nproc)" pgenv build 16.8
RUN rm .pgenv/src/*.tar*
RUN make -C .pgenv/src/postgresql-*/ clean
RUN make -C .pgenv/src/postgresql-*/src/include install
# create a staging directory with all files we want to copy from our pgenv build
# we will copy the contents of the staged folder into the final image at once
RUN mkdir .pgenv-staging/
RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/
RUN rm .pgenv-staging/config/default.conf
FROM base AS pg17
RUN MAKEFLAGS="-j $(nproc)" pgenv build 17.4
RUN rm .pgenv/src/*.tar*
RUN make -C .pgenv/src/postgresql-*/ clean
RUN make -C .pgenv/src/postgresql-*/src/include install
@ -193,9 +198,9 @@ RUN git clone https://github.com/so-fancy/diff-so-fancy.git \
COPY --link --from=uncrustify-builder /uncrustify/usr/ /usr/
COPY --link --from=pg14 /home/citus/.pgenv-staging/ /home/citus/.pgenv/
COPY --link --from=pg15 /home/citus/.pgenv-staging/ /home/citus/.pgenv/
COPY --link --from=pg16 /home/citus/.pgenv-staging/ /home/citus/.pgenv/
COPY --link --from=pg17 /home/citus/.pgenv-staging/ /home/citus/.pgenv/
COPY --link --from=pipenv /home/citus/.local/share/virtualenvs/ /home/citus/.local/share/virtualenvs/
@ -211,7 +216,7 @@ COPY --chown=citus:citus .psqlrc .
RUN sudo chown --from=root:root citus:citus -R ~
# sets default pg version
RUN pgenv switch 16.6
RUN pgenv switch 17.4
# make connecting to the coordinator easy
ENV PGPORT=9700

3
.gitattributes vendored
View File

@ -25,10 +25,9 @@ configure -whitespace
# except these exceptions...
src/backend/distributed/utils/citus_outfuncs.c -citus-style
src/backend/distributed/deparser/ruleutils_13.c -citus-style
src/backend/distributed/deparser/ruleutils_14.c -citus-style
src/backend/distributed/deparser/ruleutils_15.c -citus-style
src/backend/distributed/deparser/ruleutils_16.c -citus-style
src/backend/distributed/deparser/ruleutils_17.c -citus-style
src/backend/distributed/commands/index_pg_source.c -citus-style
src/include/distributed/citus_nodes.h -citus-style

View File

@ -31,12 +31,12 @@ jobs:
pgupgrade_image_name: "ghcr.io/citusdata/pgupgradetester"
style_checker_image_name: "ghcr.io/citusdata/stylechecker"
style_checker_tools_version: "0.8.18"
sql_snapshot_pg_version: "16.6"
image_suffix: "-v5779674"
pg14_version: '{ "major": "14", "full": "14.15" }'
pg15_version: '{ "major": "15", "full": "15.10" }'
pg16_version: '{ "major": "16", "full": "16.6" }'
upgrade_pg_versions: "14.15-15.10-16.6"
sql_snapshot_pg_version: "17.4"
image_suffix: "-veab367a"
pg15_version: '{ "major": "15", "full": "15.12" }'
pg16_version: '{ "major": "16", "full": "16.8" }'
pg17_version: '{ "major": "17", "full": "17.4" }'
upgrade_pg_versions: "15.12-16.8-17.4"
steps:
# Since GHA jobs need at least one step we use a noop step here.
- name: Set up parameters
@ -110,9 +110,9 @@ jobs:
image_suffix:
- ${{ needs.params.outputs.image_suffix}}
pg_version:
- ${{ needs.params.outputs.pg14_version }}
- ${{ needs.params.outputs.pg15_version }}
- ${{ needs.params.outputs.pg16_version }}
- ${{ needs.params.outputs.pg17_version }}
runs-on: ubuntu-20.04
container:
image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ matrix.image_suffix }}"
@ -141,9 +141,9 @@ jobs:
image_name:
- ${{ needs.params.outputs.test_image_name }}
pg_version:
- ${{ needs.params.outputs.pg14_version }}
- ${{ needs.params.outputs.pg15_version }}
- ${{ needs.params.outputs.pg16_version }}
- ${{ needs.params.outputs.pg17_version }}
make:
- check-split
- check-multi
@ -161,10 +161,6 @@ jobs:
- check-enterprise-isolation-logicalrep-2
- check-enterprise-isolation-logicalrep-3
include:
- make: check-failure
pg_version: ${{ needs.params.outputs.pg14_version }}
suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-failure
pg_version: ${{ needs.params.outputs.pg15_version }}
suite: regress
@ -173,8 +169,8 @@ jobs:
pg_version: ${{ needs.params.outputs.pg16_version }}
suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-enterprise-failure
pg_version: ${{ needs.params.outputs.pg14_version }}
- make: check-failure
pg_version: ${{ needs.params.outputs.pg17_version }}
suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-enterprise-failure
@ -185,8 +181,8 @@ jobs:
pg_version: ${{ needs.params.outputs.pg16_version }}
suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-pytest
pg_version: ${{ needs.params.outputs.pg14_version }}
- make: check-enterprise-failure
pg_version: ${{ needs.params.outputs.pg17_version }}
suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-pytest
@ -197,6 +193,10 @@ jobs:
pg_version: ${{ needs.params.outputs.pg16_version }}
suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-pytest
pg_version: ${{ needs.params.outputs.pg17_version }}
suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: installcheck
suite: cdc
image_name: ${{ needs.params.outputs.test_image_name }}
@ -205,10 +205,10 @@ jobs:
suite: cdc
image_name: ${{ needs.params.outputs.test_image_name }}
pg_version: ${{ needs.params.outputs.pg16_version }}
- make: check-query-generator
pg_version: ${{ needs.params.outputs.pg14_version }}
suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: installcheck
suite: cdc
image_name: ${{ needs.params.outputs.test_image_name }}
pg_version: ${{ needs.params.outputs.pg17_version }}
- make: check-query-generator
pg_version: ${{ needs.params.outputs.pg15_version }}
suite: regress
@ -217,6 +217,10 @@ jobs:
pg_version: ${{ needs.params.outputs.pg16_version }}
suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-query-generator
pg_version: ${{ needs.params.outputs.pg17_version }}
suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }}
runs-on: ubuntu-20.04
container:
image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ needs.params.outputs.image_suffix }}"
@ -257,9 +261,9 @@ jobs:
image_name:
- ${{ needs.params.outputs.fail_test_image_name }}
pg_version:
- ${{ needs.params.outputs.pg14_version }}
- ${{ needs.params.outputs.pg15_version }}
- ${{ needs.params.outputs.pg16_version }}
- ${{ needs.params.outputs.pg17_version }}
parallel: [0,1,2,3,4,5] # workaround for running 6 parallel jobs
steps:
- uses: actions/checkout@v4
@ -304,12 +308,12 @@ jobs:
fail-fast: false
matrix:
include:
- old_pg_major: 14
new_pg_major: 15
- old_pg_major: 15
new_pg_major: 16
- old_pg_major: 14
new_pg_major: 16
- old_pg_major: 16
new_pg_major: 17
- old_pg_major: 15
new_pg_major: 17
env:
old_pg_major: ${{ matrix.old_pg_major }}
new_pg_major: ${{ matrix.new_pg_major }}
@ -345,10 +349,10 @@ jobs:
flags: ${{ env.old_pg_major }}_${{ env.new_pg_major }}_upgrade
codecov_token: ${{ secrets.CODECOV_TOKEN }}
test-citus-upgrade:
name: PG${{ fromJson(needs.params.outputs.pg14_version).major }} - check-citus-upgrade
name: PG${{ fromJson(needs.params.outputs.pg15_version).major }} - check-citus-upgrade
runs-on: ubuntu-20.04
container:
image: "${{ needs.params.outputs.citusupgrade_image_name }}:${{ fromJson(needs.params.outputs.pg14_version).full }}${{ needs.params.outputs.image_suffix }}"
image: "${{ needs.params.outputs.citusupgrade_image_name }}:${{ fromJson(needs.params.outputs.pg15_version).full }}${{ needs.params.outputs.image_suffix }}"
options: --user root
needs:
- params
@ -397,7 +401,7 @@ jobs:
CC_TEST_REPORTER_ID: ${{ secrets.CC_TEST_REPORTER_ID }}
runs-on: ubuntu-20.04
container:
image: ${{ needs.params.outputs.test_image_name }}:${{ fromJson(needs.params.outputs.pg16_version).full }}${{ needs.params.outputs.image_suffix }}
image: ${{ needs.params.outputs.test_image_name }}:${{ fromJson(needs.params.outputs.pg17_version).full }}${{ needs.params.outputs.image_suffix }}
needs:
- params
- test-citus
@ -451,7 +455,7 @@ jobs:
chmod +x run_hammerdb.sh
run_hammerdb.sh citusbot_tpcc_benchmark_rg
prepare_parallelization_matrix_32:
name: Parallel 32
name: Prepare parallelization matrix
if: ${{ needs.test-flakyness-pre.outputs.tests != ''}}
needs: test-flakyness-pre
runs-on: ubuntu-20.04
@ -509,7 +513,7 @@ jobs:
name: Test flakyness
runs-on: ubuntu-20.04
container:
image: ${{ needs.params.outputs.fail_test_image_name }}:${{ fromJson(needs.params.outputs.pg16_version).full }}${{ needs.params.outputs.image_suffix }}
image: ${{ needs.params.outputs.fail_test_image_name }}:${{ fromJson(needs.params.outputs.pg17_version).full }}${{ needs.params.outputs.image_suffix }}
options: --user root
env:
runs: 8
@ -537,3 +541,5 @@ jobs:
shell: bash
- uses: "./.github/actions/save_logs_and_results"
if: always()
with:
folder: test_flakyness_parallel_${{ matrix.id }}

View File

@ -16,6 +16,11 @@ on:
jobs:
docker:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
attestations: write
id-token: write
steps:
-
name: Docker meta

View File

@ -76,4 +76,4 @@ jobs:
- uses: "./.github/actions/save_logs_and_results"
if: always()
with:
folder: ${{ matrix.id }}
folder: check_flakyness_parallel_${{ matrix.id }}

View File

@ -1,8 +1,33 @@
### citus v13.0.2 (March 12th, 2025) ###
* Fixes a crash in columnar custom scan that happens when a columnar table is
used in a join. (#7647)
* Fixes a bug that breaks `UPDATE SET (...) = (SELECT some_func(),... )`
type of queries on Citus tables (#7914)
* Fixes a planning error caused by a redundant WHERE clause (#7907)
* Fixes a crash in left outer joins that can happen when there is an aggregate
on a column from the inner side of the join. (#7901)
* Fixes deadlock with transaction recovery that is possible during Citus
upgrades. (#7910)
* Fixes a bug that prevents inserting into Citus tables that uses
a GENERATED ALWAYS AS IDENTITY column. (#7920)
* Ensures that a MERGE command on a distributed table with a WHEN NOT MATCHED BY
SOURCE clause runs against all shards of the distributed table. (#7900)
* Fixes a bug that breaks router updates on distributed tables
when a reference table is used in the subquery (#7897)
### citus v13.0.1 (February 4th, 2025) ###
* Drops support for PostgreSQL 14 (#7753)
### citus v13.0.0 (January 17, 2025) ###
### citus v13.0.0 (January 22, 2025) ###
* Adds support for PostgreSQL 17 (#7699, #7661)

20
configure vendored
View File

@ -1,6 +1,6 @@
#! /bin/sh
# Guess values for system-dependent variables and create Makefiles.
# Generated by GNU Autoconf 2.69 for Citus 12.2devel.
# Generated by GNU Autoconf 2.69 for Citus 13.1devel.
#
#
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
@ -579,8 +579,8 @@ MAKEFLAGS=
# Identity of this package.
PACKAGE_NAME='Citus'
PACKAGE_TARNAME='citus'
PACKAGE_VERSION='12.2devel'
PACKAGE_STRING='Citus 12.2devel'
PACKAGE_VERSION='13.1devel'
PACKAGE_STRING='Citus 13.1devel'
PACKAGE_BUGREPORT=''
PACKAGE_URL=''
@ -1262,7 +1262,7 @@ if test "$ac_init_help" = "long"; then
# Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF
\`configure' configures Citus 12.2devel to adapt to many kinds of systems.
\`configure' configures Citus 13.1devel to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]...
@ -1324,7 +1324,7 @@ fi
if test -n "$ac_init_help"; then
case $ac_init_help in
short | recursive ) echo "Configuration of Citus 12.2devel:";;
short | recursive ) echo "Configuration of Citus 13.1devel:";;
esac
cat <<\_ACEOF
@ -1429,7 +1429,7 @@ fi
test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then
cat <<\_ACEOF
Citus configure 12.2devel
Citus configure 13.1devel
generated by GNU Autoconf 2.69
Copyright (C) 2012 Free Software Foundation, Inc.
@ -1912,7 +1912,7 @@ cat >config.log <<_ACEOF
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
It was created by Citus $as_me 12.2devel, which was
It was created by Citus $as_me 13.1devel, which was
generated by GNU Autoconf 2.69. Invocation command line was
$ $0 $@
@ -2588,7 +2588,7 @@ fi
if test "$with_pg_version_check" = no; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num (skipped compatibility check)" >&5
$as_echo "$as_me: building against PostgreSQL $version_num (skipped compatibility check)" >&6;}
elif test "$version_num" != '14' -a "$version_num" != '15' -a "$version_num" != '16'; then
elif test "$version_num" != '15' -a "$version_num" != '16' -a "$version_num" != '17'; then
as_fn_error $? "Citus is not compatible with the detected PostgreSQL version ${version_num}." "$LINENO" 5
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num" >&5
@ -5393,7 +5393,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# report actual input values of CONFIG_FILES etc. instead of their
# values after options handling.
ac_log="
This file was extended by Citus $as_me 12.2devel, which was
This file was extended by Citus $as_me 13.1devel, which was
generated by GNU Autoconf 2.69. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
@ -5455,7 +5455,7 @@ _ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\
Citus config.status 12.2devel
Citus config.status 13.1devel
configured by $0, generated by GNU Autoconf 2.69,
with options \\"\$ac_cs_config\\"

View File

@ -5,7 +5,7 @@
# everyone needing autoconf installed, the resulting files are checked
# into the SCM.
AC_INIT([Citus], [12.2devel])
AC_INIT([Citus], [13.1devel])
AC_COPYRIGHT([Copyright (c) Citus Data, Inc.])
# we'll need sed and awk for some of the version commands
@ -80,7 +80,7 @@ AC_SUBST(with_pg_version_check)
if test "$with_pg_version_check" = no; then
AC_MSG_NOTICE([building against PostgreSQL $version_num (skipped compatibility check)])
elif test "$version_num" != '14' -a "$version_num" != '15' -a "$version_num" != '16'; then
elif test "$version_num" != '15' -a "$version_num" != '16' -a "$version_num" != '17'; then
AC_MSG_ERROR([Citus is not compatible with the detected PostgreSQL version ${version_num}.])
else
AC_MSG_NOTICE([building against PostgreSQL $version_num])

View File

@ -363,7 +363,7 @@ ColumnarGetRelationInfoHook(PlannerInfo *root, Oid relationObjectId,
/* disable index-only scan */
IndexOptInfo *indexOptInfo = NULL;
foreach_ptr(indexOptInfo, rel->indexlist)
foreach_declared_ptr(indexOptInfo, rel->indexlist)
{
memset(indexOptInfo->canreturn, false, indexOptInfo->ncolumns * sizeof(bool));
}
@ -381,7 +381,7 @@ RemovePathsByPredicate(RelOptInfo *rel, PathPredicate removePathPredicate)
List *filteredPathList = NIL;
Path *path = NULL;
foreach_ptr(path, rel->pathlist)
foreach_declared_ptr(path, rel->pathlist)
{
if (!removePathPredicate(path))
{
@ -428,7 +428,7 @@ static void
CostColumnarPaths(PlannerInfo *root, RelOptInfo *rel, Oid relationId)
{
Path *path = NULL;
foreach_ptr(path, rel->pathlist)
foreach_declared_ptr(path, rel->pathlist)
{
if (IsA(path, IndexPath))
{
@ -783,7 +783,7 @@ ExtractPushdownClause(PlannerInfo *root, RelOptInfo *rel, Node *node)
List *pushdownableArgs = NIL;
Node *boolExprArg = NULL;
foreach_ptr(boolExprArg, boolExpr->args)
foreach_declared_ptr(boolExprArg, boolExpr->args)
{
Expr *pushdownableArg = ExtractPushdownClause(root, rel,
(Node *) boolExprArg);
@ -1051,6 +1051,15 @@ FindCandidateRelids(PlannerInfo *root, RelOptInfo *rel, List *joinClauses)
candidateRelids = bms_del_members(candidateRelids, rel->relids);
candidateRelids = bms_del_members(candidateRelids, rel->lateral_relids);
/*
* For the relevant PG16 commit requiring this addition:
* postgres/postgres@2489d76
*/
#if PG_VERSION_NUM >= PG_VERSION_16
candidateRelids = bms_del_members(candidateRelids, root->outer_join_rels);
#endif
return candidateRelids;
}
@ -1312,11 +1321,8 @@ AddColumnarScanPath(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte,
cpath->methods = &ColumnarScanPathMethods;
#if (PG_VERSION_NUM >= PG_VERSION_15)
/* necessary to avoid extra Result node in PG15 */
cpath->flags = CUSTOMPATH_SUPPORT_PROJECTION;
#endif
/*
* populate generic path information
@ -1550,7 +1556,7 @@ ColumnarPerStripeScanCost(RelOptInfo *rel, Oid relationId, int numberOfColumnsRe
uint32 maxColumnCount = 0;
uint64 totalStripeSize = 0;
StripeMetadata *stripeMetadata = NULL;
foreach_ptr(stripeMetadata, stripeList)
foreach_declared_ptr(stripeMetadata, stripeList)
{
totalStripeSize += stripeMetadata->dataLength;
maxColumnCount = Max(maxColumnCount, stripeMetadata->columnCount);
@ -1924,11 +1930,6 @@ ColumnarScan_EndCustomScan(CustomScanState *node)
*/
TableScanDesc scanDesc = node->ss.ss_currentScanDesc;
/*
* Free the exprcontext
*/
ExecFreeExprContext(&node->ss.ps);
/*
* clean out the tuple table
*/

View File

@ -1685,7 +1685,7 @@ DeleteTupleAndEnforceConstraints(ModifyState *state, HeapTuple heapTuple)
simple_heap_delete(state->rel, tid);
/* execute AFTER ROW DELETE Triggers to enforce constraints */
ExecARDeleteTriggers_compat(estate, resultRelInfo, tid, NULL, NULL, false);
ExecARDeleteTriggers(estate, resultRelInfo, tid, NULL, NULL, false);
}
@ -2041,7 +2041,7 @@ GetHighestUsedRowNumber(uint64 storageId)
List *stripeMetadataList = ReadDataFileStripeList(storageId,
GetTransactionSnapshot());
StripeMetadata *stripeMetadata = NULL;
foreach_ptr(stripeMetadata, stripeMetadataList)
foreach_declared_ptr(stripeMetadata, stripeMetadataList)
{
highestRowNumber = Max(highestRowNumber,
StripeGetHighestRowNumber(stripeMetadata));

View File

@ -880,7 +880,7 @@ ReadChunkGroupNextRow(ChunkGroupReadState *chunkGroupReadState, Datum *columnVal
memset(columnNulls, true, sizeof(bool) * chunkGroupReadState->columnCount);
int attno;
foreach_int(attno, chunkGroupReadState->projectedColumnList)
foreach_declared_int(attno, chunkGroupReadState->projectedColumnList)
{
const ChunkData *chunkGroupData = chunkGroupReadState->chunkGroupData;
const int rowIndex = chunkGroupReadState->currentRow;
@ -1489,7 +1489,7 @@ ProjectedColumnMask(uint32 columnCount, List *projectedColumnList)
bool *projectedColumnMask = palloc0(columnCount * sizeof(bool));
int attno;
foreach_int(attno, projectedColumnList)
foreach_declared_int(attno, projectedColumnList)
{
/* attno is 1-indexed; projectedColumnMask is 0-indexed */
int columnIndex = attno - 1;

View File

@ -877,7 +877,7 @@ columnar_relation_set_new_filelocator(Relation rel,
*freezeXid = RecentXmin;
*minmulti = GetOldestMultiXactId();
SMgrRelation srel = RelationCreateStorage_compat(*newrlocator, persistence, true);
SMgrRelation srel = RelationCreateStorage(*newrlocator, persistence, true);
ColumnarStorageInit(srel, ColumnarMetadataNewStorageId());
InitColumnarOptions(rel->rd_id);
@ -1424,15 +1424,32 @@ ConditionalLockRelationWithTimeout(Relation rel, LOCKMODE lockMode, int timeout,
static bool
columnar_scan_analyze_next_block(TableScanDesc scan, BlockNumber blockno,
columnar_scan_analyze_next_block(TableScanDesc scan,
#if PG_VERSION_NUM >= PG_VERSION_17
ReadStream *stream)
#else
BlockNumber blockno,
BufferAccessStrategy bstrategy)
#endif
{
/*
* Our access method is not pages based, i.e. tuples are not confined
* to pages boundaries. So not much to do here. We return true anyway
* so acquire_sample_rows() in analyze.c would call our
* columnar_scan_analyze_next_tuple() callback.
* In PG17, we return false in case there is no buffer left, since
* the outer loop changed in acquire_sample_rows(), and it is
* expected for the scan_analyze_next_block function to check whether
* there are any blocks left in the block sampler.
*/
#if PG_VERSION_NUM >= PG_VERSION_17
Buffer buf = read_stream_next_buffer(stream, NULL);
if (!BufferIsValid(buf))
{
return false;
}
ReleaseBuffer(buf);
#endif
return true;
}
@ -2228,7 +2245,6 @@ ColumnarProcessAlterTable(AlterTableStmt *alterTableStmt, List **columnarOptions
columnarRangeVar = alterTableStmt->relation;
}
}
#if PG_VERSION_NUM >= PG_VERSION_15
else if (alterTableCmd->subtype == AT_SetAccessMethod)
{
if (columnarRangeVar || *columnarOptions)
@ -2239,14 +2255,15 @@ ColumnarProcessAlterTable(AlterTableStmt *alterTableStmt, List **columnarOptions
"Specify SET ACCESS METHOD before storage parameters, or use separate ALTER TABLE commands.")));
}
destIsColumnar = (strcmp(alterTableCmd->name, COLUMNAR_AM_NAME) == 0);
destIsColumnar = (strcmp(alterTableCmd->name ? alterTableCmd->name :
default_table_access_method,
COLUMNAR_AM_NAME) == 0);
if (srcIsColumnar && !destIsColumnar)
{
DeleteColumnarTableOptions(RelationGetRelid(rel), true);
}
}
#endif /* PG_VERSION_15 */
}
relation_close(rel, NoLock);
@ -2630,21 +2647,12 @@ ColumnarCheckLogicalReplication(Relation rel)
return;
}
#if PG_VERSION_NUM >= PG_VERSION_15
{
PublicationDesc pubdesc;
RelationBuildPublicationDesc(rel, &pubdesc);
pubActionInsert = pubdesc.pubactions.pubinsert;
}
#else
if (rel->rd_pubactions == NULL)
{
GetRelationPublicationActions(rel);
Assert(rel->rd_pubactions != NULL);
}
pubActionInsert = rel->rd_pubactions->pubinsert;
#endif
if (pubActionInsert)
{
@ -3085,7 +3093,7 @@ DefElem *
GetExtensionOption(List *extensionOptions, const char *defname)
{
DefElem *defElement = NULL;
foreach_ptr(defElement, extensionOptions)
foreach_declared_ptr(defElement, extensionOptions)
{
if (IsA(defElement, DefElem) &&
strncmp(defElement->defname, defname, NAMEDATALEN) == 0)

View File

@ -22,6 +22,8 @@
#include "utils/rel.h"
#include "utils/typcache.h"
#include "pg_version_constants.h"
PG_MODULE_MAGIC;
extern void _PG_output_plugin_init(OutputPluginCallbacks *cb);
@ -435,6 +437,74 @@ TranslateChangesIfSchemaChanged(Relation sourceRelation, Relation targetRelation
return;
}
#if PG_VERSION_NUM >= PG_VERSION_17
/* Check the ReorderBufferChange's action type and handle them accordingly.*/
switch (change->action)
{
case REORDER_BUFFER_CHANGE_INSERT:
{
/* For insert action, only new tuple should always be translated*/
HeapTuple sourceRelationNewTuple = change->data.tp.newtuple;
HeapTuple targetRelationNewTuple = GetTupleForTargetSchemaForCdc(
sourceRelationNewTuple, sourceRelationDesc, targetRelationDesc);
change->data.tp.newtuple = targetRelationNewTuple;
break;
}
/*
* For update changes both old and new tuples need to be translated for target relation
* if the REPLICA IDENTITY is set to FULL. Otherwise, only the new tuple needs to be
* translated for target relation.
*/
case REORDER_BUFFER_CHANGE_UPDATE:
{
/* For update action, new tuple should always be translated*/
/* Get the new tuple from the ReorderBufferChange, and translate it to target relation. */
HeapTuple sourceRelationNewTuple = change->data.tp.newtuple;
HeapTuple targetRelationNewTuple = GetTupleForTargetSchemaForCdc(
sourceRelationNewTuple, sourceRelationDesc, targetRelationDesc);
change->data.tp.newtuple = targetRelationNewTuple;
/*
* Format oldtuple according to the target relation. If the column values of replica
* identiy change, then the old tuple is non-null and needs to be formatted according
* to the target relation schema.
*/
if (change->data.tp.oldtuple != NULL)
{
HeapTuple sourceRelationOldTuple = change->data.tp.oldtuple;
HeapTuple targetRelationOldTuple = GetTupleForTargetSchemaForCdc(
sourceRelationOldTuple,
sourceRelationDesc,
targetRelationDesc);
change->data.tp.oldtuple = targetRelationOldTuple;
}
break;
}
case REORDER_BUFFER_CHANGE_DELETE:
{
/* For delete action, only old tuple should be translated*/
HeapTuple sourceRelationOldTuple = change->data.tp.oldtuple;
HeapTuple targetRelationOldTuple = GetTupleForTargetSchemaForCdc(
sourceRelationOldTuple,
sourceRelationDesc,
targetRelationDesc);
change->data.tp.oldtuple = targetRelationOldTuple;
break;
}
default:
{
/* Do nothing for other action types. */
break;
}
}
#else
/* Check the ReorderBufferChange's action type and handle them accordingly.*/
switch (change->action)
{
@ -499,4 +569,5 @@ TranslateChangesIfSchemaChanged(Relation sourceRelation, Relation targetRelation
break;
}
}
#endif
}

View File

@ -1,6 +1,6 @@
# Citus extension
comment = 'Citus distributed database'
default_version = '12.2-1'
default_version = '13.1-1'
module_pathname = '$libdir/citus'
relocatable = false
schema = pg_catalog

View File

@ -145,17 +145,6 @@ LogicalClockShmemSize(void)
void
InitializeClusterClockMem(void)
{
/* On PG 15 and above, we use shmem_request_hook_type */
#if PG_VERSION_NUM < PG_VERSION_15
/* allocate shared memory for pre PG-15 versions */
if (!IsUnderPostmaster)
{
RequestAddinShmemSpace(LogicalClockShmemSize());
}
#endif
prev_shmem_startup_hook = shmem_startup_hook;
shmem_startup_hook = LogicalClockShmemInit;
}
@ -328,7 +317,7 @@ GetHighestClockInTransaction(List *nodeConnectionList)
{
MultiConnection *connection = NULL;
foreach_ptr(connection, nodeConnectionList)
foreach_declared_ptr(connection, nodeConnectionList)
{
int querySent =
SendRemoteCommand(connection, "SELECT citus_get_node_clock();");
@ -349,7 +338,7 @@ GetHighestClockInTransaction(List *nodeConnectionList)
globalClockValue->counter)));
/* fetch the results and pick the highest clock value of all the nodes */
foreach_ptr(connection, nodeConnectionList)
foreach_declared_ptr(connection, nodeConnectionList)
{
bool raiseInterrupts = true;
@ -431,6 +420,11 @@ PrepareAndSetTransactionClock(void)
MultiConnection *connection = dlist_container(MultiConnection, transactionNode,
iter.cur);
WorkerNode *workerNode = FindWorkerNode(connection->hostname, connection->port);
if (!workerNode)
{
ereport(WARNING, errmsg("Worker node is missing"));
continue;
}
/* Skip the node if we already in the list */
if (list_member_int(nodeList, workerNode->groupId))

View File

@ -414,7 +414,7 @@ UndistributeTables(List *relationIdList)
*/
List *originalForeignKeyRecreationCommands = NIL;
Oid relationId = InvalidOid;
foreach_oid(relationId, relationIdList)
foreach_declared_oid(relationId, relationIdList)
{
List *fkeyCommandsForRelation =
GetFKeyCreationCommandsRelationInvolvedWithTableType(relationId,
@ -802,7 +802,7 @@ ConvertTableInternal(TableConversionState *con)
List *partitionList = PartitionList(con->relationId);
Oid partitionRelationId = InvalidOid;
foreach_oid(partitionRelationId, partitionList)
foreach_declared_oid(partitionRelationId, partitionList)
{
char *tableQualifiedName = generate_qualified_relation_name(
partitionRelationId);
@ -873,7 +873,7 @@ ConvertTableInternal(TableConversionState *con)
}
TableDDLCommand *tableCreationCommand = NULL;
foreach_ptr(tableCreationCommand, preLoadCommands)
foreach_declared_ptr(tableCreationCommand, preLoadCommands)
{
Assert(CitusIsA(tableCreationCommand, TableDDLCommand));
@ -947,7 +947,7 @@ ConvertTableInternal(TableConversionState *con)
con->suppressNoticeMessages);
TableDDLCommand *tableConstructionCommand = NULL;
foreach_ptr(tableConstructionCommand, postLoadCommands)
foreach_declared_ptr(tableConstructionCommand, postLoadCommands)
{
Assert(CitusIsA(tableConstructionCommand, TableDDLCommand));
char *tableConstructionSQL = GetTableDDLCommand(tableConstructionCommand);
@ -965,7 +965,7 @@ ConvertTableInternal(TableConversionState *con)
MemoryContext oldContext = MemoryContextSwitchTo(citusPerPartitionContext);
char *attachPartitionCommand = NULL;
foreach_ptr(attachPartitionCommand, attachPartitionCommands)
foreach_declared_ptr(attachPartitionCommand, attachPartitionCommands)
{
MemoryContextReset(citusPerPartitionContext);
@ -990,7 +990,7 @@ ConvertTableInternal(TableConversionState *con)
/* For now we only support cascade to colocation for alter_distributed_table UDF */
Assert(con->conversionType == ALTER_DISTRIBUTED_TABLE);
foreach_oid(colocatedTableId, con->colocatedTableList)
foreach_declared_oid(colocatedTableId, con->colocatedTableList)
{
if (colocatedTableId == con->relationId)
{
@ -1018,7 +1018,7 @@ ConvertTableInternal(TableConversionState *con)
if (con->cascadeToColocated != CASCADE_TO_COLOCATED_NO_ALREADY_CASCADED)
{
char *foreignKeyCommand = NULL;
foreach_ptr(foreignKeyCommand, foreignKeyCommands)
foreach_declared_ptr(foreignKeyCommand, foreignKeyCommands)
{
ExecuteQueryViaSPI(foreignKeyCommand, SPI_OK_UTILITY);
}
@ -1054,7 +1054,7 @@ CopyTableConversionReturnIntoCurrentContext(TableConversionReturn *tableConversi
tableConversionReturnCopy = palloc0(sizeof(TableConversionReturn));
List *copyForeignKeyCommands = NIL;
char *foreignKeyCommand = NULL;
foreach_ptr(foreignKeyCommand, tableConversionReturn->foreignKeyCommands)
foreach_declared_ptr(foreignKeyCommand, tableConversionReturn->foreignKeyCommands)
{
char *copyForeignKeyCommand = MemoryContextStrdup(CurrentMemoryContext,
foreignKeyCommand);
@ -1129,7 +1129,7 @@ DropIndexesNotSupportedByColumnar(Oid relationId, bool suppressNoticeMessages)
RelationClose(columnarRelation);
Oid indexId = InvalidOid;
foreach_oid(indexId, indexIdList)
foreach_declared_oid(indexId, indexIdList)
{
char *indexAmName = GetIndexAccessMethodName(indexId);
if (extern_ColumnarSupportsIndexAM(indexAmName))
@ -1389,7 +1389,7 @@ CreateTableConversion(TableConversionParameters *params)
* since they will be handled separately.
*/
Oid colocatedTableId = InvalidOid;
foreach_oid(colocatedTableId, colocatedTableList)
foreach_declared_oid(colocatedTableId, colocatedTableList)
{
if (PartitionTable(colocatedTableId))
{
@ -1605,7 +1605,7 @@ DoesCascadeDropUnsupportedObject(Oid classId, Oid objectId, HTAB *nodeMap)
targetObjectId);
HeapTuple depTup = NULL;
foreach_ptr(depTup, dependencyTupleList)
foreach_declared_ptr(depTup, dependencyTupleList)
{
Form_pg_depend pg_depend = (Form_pg_depend) GETSTRUCT(depTup);
@ -1645,7 +1645,7 @@ GetViewCreationCommandsOfTable(Oid relationId)
List *commands = NIL;
Oid viewOid = InvalidOid;
foreach_oid(viewOid, views)
foreach_declared_oid(viewOid, views)
{
StringInfo query = makeStringInfo();
@ -1683,7 +1683,7 @@ WrapTableDDLCommands(List *commandStrings)
List *tableDDLCommands = NIL;
char *command = NULL;
foreach_ptr(command, commandStrings)
foreach_declared_ptr(command, commandStrings)
{
tableDDLCommands = lappend(tableDDLCommands, makeTableDDLCommandString(command));
}
@ -1840,7 +1840,7 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands,
*/
List *ownedSequences = getOwnedSequences_internal(sourceId, 0, DEPENDENCY_AUTO);
Oid sequenceOid = InvalidOid;
foreach_oid(sequenceOid, ownedSequences)
foreach_declared_oid(sequenceOid, ownedSequences)
{
changeDependencyFor(RelationRelationId, sequenceOid,
RelationRelationId, sourceId, targetId);
@ -1873,7 +1873,7 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands,
}
char *justBeforeDropCommand = NULL;
foreach_ptr(justBeforeDropCommand, justBeforeDropCommands)
foreach_declared_ptr(justBeforeDropCommand, justBeforeDropCommands)
{
ExecuteQueryViaSPI(justBeforeDropCommand, SPI_OK_UTILITY);
}
@ -1987,7 +1987,7 @@ CheckAlterDistributedTableConversionParameters(TableConversionState *con)
Oid colocatedTableOid = InvalidOid;
text *colocateWithText = cstring_to_text(con->colocateWith);
Oid colocateWithTableOid = ResolveRelationId(colocateWithText, false);
foreach_oid(colocatedTableOid, con->colocatedTableList)
foreach_declared_oid(colocatedTableOid, con->colocatedTableList)
{
if (colocateWithTableOid == colocatedTableOid)
{
@ -2214,7 +2214,7 @@ WillRecreateForeignKeyToReferenceTable(Oid relationId,
{
List *colocatedTableList = ColocatedTableList(relationId);
Oid colocatedTableOid = InvalidOid;
foreach_oid(colocatedTableOid, colocatedTableList)
foreach_declared_oid(colocatedTableOid, colocatedTableList)
{
if (HasForeignKeyToReferenceTable(colocatedTableOid))
{
@ -2242,7 +2242,7 @@ WarningsForDroppingForeignKeysWithDistributedTables(Oid relationId)
List *foreignKeys = list_concat(referencingForeingKeys, referencedForeignKeys);
Oid foreignKeyOid = InvalidOid;
foreach_oid(foreignKeyOid, foreignKeys)
foreach_declared_oid(foreignKeyOid, foreignKeys)
{
ereport(WARNING, (errmsg("foreign key %s will be dropped",
get_constraint_name(foreignKeyOid))));

View File

@ -33,7 +33,7 @@ SaveBeginCommandProperties(TransactionStmt *transactionStmt)
*
* While BEGIN can be quite frequent it will rarely have options set.
*/
foreach_ptr(item, transactionStmt->options)
foreach_declared_ptr(item, transactionStmt->options)
{
A_Const *constant = (A_Const *) item->arg;

View File

@ -168,7 +168,7 @@ GetPartitionRelationIds(List *relationIdList)
List *partitionRelationIdList = NIL;
Oid relationId = InvalidOid;
foreach_oid(relationId, relationIdList)
foreach_declared_oid(relationId, relationIdList)
{
if (PartitionTable(relationId))
{
@ -189,7 +189,7 @@ LockRelationsWithLockMode(List *relationIdList, LOCKMODE lockMode)
{
Oid relationId;
relationIdList = SortList(relationIdList, CompareOids);
foreach_oid(relationId, relationIdList)
foreach_declared_oid(relationId, relationIdList)
{
LockRelationOid(relationId, lockMode);
}
@ -207,7 +207,7 @@ static void
ErrorIfConvertingMultiLevelPartitionedTable(List *relationIdList)
{
Oid relationId;
foreach_oid(relationId, relationIdList)
foreach_declared_oid(relationId, relationIdList)
{
if (PartitionedTable(relationId) && PartitionTable(relationId))
{
@ -236,7 +236,7 @@ void
ErrorIfAnyPartitionRelationInvolvedInNonInheritedFKey(List *relationIdList)
{
Oid relationId = InvalidOid;
foreach_oid(relationId, relationIdList)
foreach_declared_oid(relationId, relationIdList)
{
if (!PartitionTable(relationId))
{
@ -300,7 +300,7 @@ bool
RelationIdListHasReferenceTable(List *relationIdList)
{
Oid relationId = InvalidOid;
foreach_oid(relationId, relationIdList)
foreach_declared_oid(relationId, relationIdList)
{
if (IsCitusTableType(relationId, REFERENCE_TABLE))
{
@ -322,7 +322,7 @@ GetFKeyCreationCommandsForRelationIdList(List *relationIdList)
List *fKeyCreationCommands = NIL;
Oid relationId = InvalidOid;
foreach_oid(relationId, relationIdList)
foreach_declared_oid(relationId, relationIdList)
{
List *relationFKeyCreationCommands =
GetReferencingForeignConstaintCommands(relationId);
@ -342,7 +342,7 @@ static void
DropRelationIdListForeignKeys(List *relationIdList, int fKeyFlags)
{
Oid relationId = InvalidOid;
foreach_oid(relationId, relationIdList)
foreach_declared_oid(relationId, relationIdList)
{
DropRelationForeignKeys(relationId, fKeyFlags);
}
@ -399,7 +399,7 @@ GetRelationDropFkeyCommands(Oid relationId, int fKeyFlags)
List *relationFKeyIdList = GetForeignKeyOids(relationId, fKeyFlags);
Oid foreignKeyId;
foreach_oid(foreignKeyId, relationFKeyIdList)
foreach_declared_oid(foreignKeyId, relationFKeyIdList)
{
char *dropFkeyCascadeCommand = GetDropFkeyCascadeCommand(foreignKeyId);
dropFkeyCascadeCommandList = lappend(dropFkeyCascadeCommandList,
@ -450,7 +450,7 @@ ExecuteCascadeOperationForRelationIdList(List *relationIdList,
cascadeOperationType)
{
Oid relationId = InvalidOid;
foreach_oid(relationId, relationIdList)
foreach_declared_oid(relationId, relationIdList)
{
/*
* The reason behind skipping certain table types in below loop is
@ -531,7 +531,7 @@ ExecuteAndLogUtilityCommandListInTableTypeConversionViaSPI(List *utilityCommandL
PG_TRY();
{
char *utilityCommand = NULL;
foreach_ptr(utilityCommand, utilityCommandList)
foreach_declared_ptr(utilityCommand, utilityCommandList)
{
/*
* CREATE MATERIALIZED VIEW commands need to be parsed/transformed,
@ -569,7 +569,7 @@ void
ExecuteAndLogUtilityCommandList(List *utilityCommandList)
{
char *utilityCommand = NULL;
foreach_ptr(utilityCommand, utilityCommandList)
foreach_declared_ptr(utilityCommand, utilityCommandList)
{
ExecuteAndLogUtilityCommand(utilityCommand);
}
@ -597,7 +597,7 @@ void
ExecuteForeignKeyCreateCommandList(List *ddlCommandList, bool skip_validation)
{
char *ddlCommand = NULL;
foreach_ptr(ddlCommand, ddlCommandList)
foreach_declared_ptr(ddlCommand, ddlCommandList)
{
ExecuteForeignKeyCreateCommand(ddlCommand, skip_validation);
}

View File

@ -588,7 +588,7 @@ ErrorIfOptionListHasNoTableName(List *optionList)
{
char *table_nameString = "table_name";
DefElem *option = NULL;
foreach_ptr(option, optionList)
foreach_declared_ptr(option, optionList)
{
char *optionName = option->defname;
if (strcmp(optionName, table_nameString) == 0)
@ -613,7 +613,7 @@ ForeignTableDropsTableNameOption(List *optionList)
{
char *table_nameString = "table_name";
DefElem *option = NULL;
foreach_ptr(option, optionList)
foreach_declared_ptr(option, optionList)
{
char *optionName = option->defname;
DefElemAction optionAction = option->defaction;
@ -732,7 +732,7 @@ UpdateAutoConvertedForConnectedRelations(List *relationIds, bool autoConverted)
List *relationIdList = NIL;
Oid relid = InvalidOid;
foreach_oid(relid, relationIds)
foreach_declared_oid(relid, relationIds)
{
List *connectedRelations = GetForeignKeyConnectedRelationIdList(relid);
relationIdList = list_concat_unique_oid(relationIdList, connectedRelations);
@ -740,7 +740,7 @@ UpdateAutoConvertedForConnectedRelations(List *relationIds, bool autoConverted)
relationIdList = SortList(relationIdList, CompareOids);
foreach_oid(relid, relationIdList)
foreach_declared_oid(relid, relationIdList)
{
UpdatePgDistPartitionAutoConverted(relid, autoConverted);
}
@ -776,7 +776,7 @@ GetShellTableDDLEventsForCitusLocalTable(Oid relationId)
List *shellTableDDLEvents = NIL;
TableDDLCommand *tableDDLCommand = NULL;
foreach_ptr(tableDDLCommand, tableDDLCommands)
foreach_declared_ptr(tableDDLCommand, tableDDLCommands)
{
Assert(CitusIsA(tableDDLCommand, TableDDLCommand));
shellTableDDLEvents = lappend(shellTableDDLEvents,
@ -863,7 +863,7 @@ RenameShardRelationConstraints(Oid shardRelationId, uint64 shardId)
List *constraintNameList = GetConstraintNameList(shardRelationId);
char *constraintName = NULL;
foreach_ptr(constraintName, constraintNameList)
foreach_declared_ptr(constraintName, constraintNameList)
{
const char *commandString =
GetRenameShardConstraintCommand(shardRelationId, constraintName, shardId);
@ -958,7 +958,7 @@ RenameShardRelationIndexes(Oid shardRelationId, uint64 shardId)
List *indexOidList = GetExplicitIndexOidList(shardRelationId);
Oid indexOid = InvalidOid;
foreach_oid(indexOid, indexOidList)
foreach_declared_oid(indexOid, indexOidList)
{
const char *commandString = GetRenameShardIndexCommand(indexOid, shardId);
ExecuteAndLogUtilityCommand(commandString);
@ -1008,7 +1008,7 @@ RenameShardRelationStatistics(Oid shardRelationId, uint64 shardId)
List *statsCommandList = GetRenameStatsCommandList(statsOidList, shardId);
char *command = NULL;
foreach_ptr(command, statsCommandList)
foreach_declared_ptr(command, statsCommandList)
{
ExecuteAndLogUtilityCommand(command);
}
@ -1044,7 +1044,7 @@ RenameShardRelationNonTruncateTriggers(Oid shardRelationId, uint64 shardId)
List *triggerIdList = GetExplicitTriggerIdList(shardRelationId);
Oid triggerId = InvalidOid;
foreach_oid(triggerId, triggerIdList)
foreach_declared_oid(triggerId, triggerIdList)
{
bool missingOk = false;
HeapTuple triggerTuple = GetTriggerTupleById(triggerId, missingOk);
@ -1097,7 +1097,7 @@ DropRelationTruncateTriggers(Oid relationId)
List *triggerIdList = GetExplicitTriggerIdList(relationId);
Oid triggerId = InvalidOid;
foreach_oid(triggerId, triggerIdList)
foreach_declared_oid(triggerId, triggerIdList)
{
bool missingOk = false;
HeapTuple triggerTuple = GetTriggerTupleById(triggerId, missingOk);
@ -1175,7 +1175,7 @@ DropIdentitiesOnTable(Oid relationId)
relation_close(relation, NoLock);
char *dropCommand = NULL;
foreach_ptr(dropCommand, dropCommandList)
foreach_declared_ptr(dropCommand, dropCommandList)
{
/*
* We need to disable/enable ddl propagation for this command, to prevent
@ -1218,7 +1218,7 @@ DropViewsOnTable(Oid relationId)
List *reverseOrderedViews = ReversedOidList(views);
Oid viewId = InvalidOid;
foreach_oid(viewId, reverseOrderedViews)
foreach_declared_oid(viewId, reverseOrderedViews)
{
char *qualifiedViewName = generate_qualified_relation_name(viewId);
@ -1241,7 +1241,7 @@ ReversedOidList(List *oidList)
{
List *reversed = NIL;
Oid oid = InvalidOid;
foreach_oid(oid, oidList)
foreach_declared_oid(oid, oidList)
{
reversed = lcons_oid(oid, reversed);
}
@ -1293,7 +1293,7 @@ GetRenameStatsCommandList(List *statsOidList, uint64 shardId)
{
List *statsCommandList = NIL;
Oid statsOid;
foreach_oid(statsOid, statsOidList)
foreach_declared_oid(statsOid, statsOidList)
{
HeapTuple tup = SearchSysCache1(STATEXTOID, ObjectIdGetDatum(statsOid));

View File

@ -115,7 +115,7 @@ static bool
IsClusterStmtVerbose_compat(ClusterStmt *clusterStmt)
{
DefElem *opt = NULL;
foreach_ptr(opt, clusterStmt->params)
foreach_declared_ptr(opt, clusterStmt->params)
{
if (strcmp(opt->defname, "verbose") == 0)
{

View File

@ -68,8 +68,6 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati
char *collcollate;
char *collctype;
#if PG_VERSION_NUM >= PG_VERSION_15
/*
* In PG15, there is an added option to use ICU as global locale provider.
* pg_collation has three locale-related fields: collcollate and collctype,
@ -77,7 +75,7 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati
* ICU-related field. Only the libc-related fields or the ICU-related field
* is set, never both.
*/
char *colliculocale;
char *colllocale;
bool isnull;
Datum datum = SysCacheGetAttr(COLLOID, heapTuple, Anum_pg_collation_collcollate,
@ -101,27 +99,17 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati
collctype = NULL;
}
datum = SysCacheGetAttr(COLLOID, heapTuple, Anum_pg_collation_colliculocale, &isnull);
datum = SysCacheGetAttr(COLLOID, heapTuple, Anum_pg_collation_colllocale, &isnull);
if (!isnull)
{
colliculocale = TextDatumGetCString(datum);
colllocale = TextDatumGetCString(datum);
}
else
{
colliculocale = NULL;
colllocale = NULL;
}
Assert((collcollate && collctype) || colliculocale);
#else
/*
* In versions before 15, collcollate and collctype were type "name". Use
* pstrdup() to match the interface of 15 so that we consistently free the
* result later.
*/
collcollate = pstrdup(NameStr(collationForm->collcollate));
collctype = pstrdup(NameStr(collationForm->collctype));
#endif
Assert((collcollate && collctype) || colllocale);
if (collowner != NULL)
{
@ -132,6 +120,7 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati
char *schemaName = get_namespace_name(collnamespace);
*quotedCollationName = quote_qualified_identifier(schemaName, collname);
const char *providerString =
collprovider == COLLPROVIDER_BUILTIN ? "builtin" :
collprovider == COLLPROVIDER_DEFAULT ? "default" :
collprovider == COLLPROVIDER_ICU ? "icu" :
collprovider == COLLPROVIDER_LIBC ? "libc" : NULL;
@ -146,13 +135,12 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati
"CREATE COLLATION %s (provider = '%s'",
*quotedCollationName, providerString);
#if PG_VERSION_NUM >= PG_VERSION_15
if (colliculocale)
if (colllocale)
{
appendStringInfo(&collationNameDef,
", locale = %s",
quote_literal_cstr(colliculocale));
pfree(colliculocale);
quote_literal_cstr(colllocale));
pfree(colllocale);
}
else
{
@ -172,24 +160,7 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati
pfree(collcollate);
pfree(collctype);
}
#else
if (strcmp(collcollate, collctype) == 0)
{
appendStringInfo(&collationNameDef,
", locale = %s",
quote_literal_cstr(collcollate));
}
else
{
appendStringInfo(&collationNameDef,
", lc_collate = %s, lc_ctype = %s",
quote_literal_cstr(collcollate),
quote_literal_cstr(collctype));
}
pfree(collcollate);
pfree(collctype);
#endif
#if PG_VERSION_NUM >= PG_VERSION_16
char *collicurules = NULL;
datum = SysCacheGetAttr(COLLOID, heapTuple, Anum_pg_collation_collicurules, &isnull);

View File

@ -235,7 +235,7 @@ PreprocessDropDistributedObjectStmt(Node *node, const char *queryString,
List *distributedObjects = NIL;
List *distributedObjectAddresses = NIL;
Node *object = NULL;
foreach_ptr(object, stmt->objects)
foreach_declared_ptr(object, stmt->objects)
{
/* TODO understand if the lock should be sth else */
Relation rel = NULL; /* not used, but required to pass to get_object_address */
@ -267,7 +267,7 @@ PreprocessDropDistributedObjectStmt(Node *node, const char *queryString,
* remove the entries for the distributed objects on dropping
*/
ObjectAddress *address = NULL;
foreach_ptr(address, distributedObjectAddresses)
foreach_declared_ptr(address, distributedObjectAddresses)
{
UnmarkObjectDistributed(address);
}
@ -303,7 +303,7 @@ DropTextSearchDictObjectAddress(Node *node, bool missing_ok, bool isPostprocess)
List *objectAddresses = NIL;
List *objNameList = NIL;
foreach_ptr(objNameList, stmt->objects)
foreach_declared_ptr(objNameList, stmt->objects)
{
Oid tsdictOid = get_ts_dict_oid(objNameList, missing_ok);
@ -328,7 +328,7 @@ DropTextSearchConfigObjectAddress(Node *node, bool missing_ok, bool isPostproces
List *objectAddresses = NIL;
List *objNameList = NIL;
foreach_ptr(objNameList, stmt->objects)
foreach_declared_ptr(objNameList, stmt->objects)
{
Oid tsconfigOid = get_ts_config_oid(objNameList, missing_ok);

View File

@ -170,12 +170,10 @@ static void EnsureDistributedSequencesHaveOneType(Oid relationId,
static void CopyLocalDataIntoShards(Oid distributedTableId);
static List * TupleDescColumnNameList(TupleDesc tupleDescriptor);
#if (PG_VERSION_NUM >= PG_VERSION_15)
static bool DistributionColumnUsesNumericColumnNegativeScale(TupleDesc relationDesc,
Var *distributionColumn);
static int numeric_typmod_scale(int32 typmod);
static bool is_valid_numeric_typmod(int32 typmod);
#endif
static bool DistributionColumnUsesGeneratedStoredColumn(TupleDesc relationDesc,
Var *distributionColumn);
@ -834,7 +832,7 @@ HashSplitPointsForShardList(List *shardList)
List *splitPointList = NIL;
ShardInterval *shardInterval = NULL;
foreach_ptr(shardInterval, shardList)
foreach_declared_ptr(shardInterval, shardList)
{
int32 shardMaxValue = DatumGetInt32(shardInterval->maxValue);
@ -890,7 +888,7 @@ WorkerNodesForShardList(List *shardList)
List *nodeIdList = NIL;
ShardInterval *shardInterval = NULL;
foreach_ptr(shardInterval, shardList)
foreach_declared_ptr(shardInterval, shardList)
{
WorkerNode *workerNode = ActiveShardPlacementWorkerNode(shardInterval->shardId);
nodeIdList = lappend_int(nodeIdList, workerNode->nodeId);
@ -1337,7 +1335,7 @@ CreateCitusTable(Oid relationId, CitusTableType tableType,
ALLOCSET_DEFAULT_SIZES);
MemoryContext oldContext = MemoryContextSwitchTo(citusPartitionContext);
foreach_oid(partitionRelationId, partitionList)
foreach_declared_oid(partitionRelationId, partitionList)
{
MemoryContextReset(citusPartitionContext);
@ -1551,7 +1549,7 @@ ConvertCitusLocalTableToTableType(Oid relationId, CitusTableType tableType,
MemoryContext oldContext = MemoryContextSwitchTo(citusPartitionContext);
Oid partitionRelationId = InvalidOid;
foreach_oid(partitionRelationId, partitionList)
foreach_declared_oid(partitionRelationId, partitionList)
{
MemoryContextReset(citusPartitionContext);
@ -1701,7 +1699,7 @@ EnsureSequenceTypeSupported(Oid seqOid, Oid attributeTypeId, Oid ownerRelationId
Oid attrDefOid;
List *attrDefOids = GetAttrDefsFromSequence(seqOid);
foreach_oid(attrDefOid, attrDefOids)
foreach_declared_oid(attrDefOid, attrDefOids)
{
ObjectAddress columnAddress = GetAttrDefaultColumnAddress(attrDefOid);
@ -1783,7 +1781,7 @@ static void
EnsureDistributedSequencesHaveOneType(Oid relationId, List *seqInfoList)
{
SequenceInfo *seqInfo = NULL;
foreach_ptr(seqInfo, seqInfoList)
foreach_declared_ptr(seqInfo, seqInfoList)
{
if (!seqInfo->isNextValDefault)
{
@ -2114,8 +2112,6 @@ EnsureRelationCanBeDistributed(Oid relationId, Var *distributionColumn,
"AS (...) STORED.")));
}
#if (PG_VERSION_NUM >= PG_VERSION_15)
/* verify target relation is not distributed by a column of type numeric with negative scale */
if (distributionMethod != DISTRIBUTE_BY_NONE &&
DistributionColumnUsesNumericColumnNegativeScale(relationDesc,
@ -2126,7 +2122,6 @@ EnsureRelationCanBeDistributed(Oid relationId, Var *distributionColumn,
errdetail("Distribution column must not use numeric type "
"with negative scale")));
}
#endif
/* check for support function needed by specified partition method */
if (distributionMethod == DISTRIBUTE_BY_HASH)
@ -2844,8 +2839,6 @@ TupleDescColumnNameList(TupleDesc tupleDescriptor)
}
#if (PG_VERSION_NUM >= PG_VERSION_15)
/*
* is_valid_numeric_typmod checks if the typmod value is valid
*
@ -2895,8 +2888,6 @@ DistributionColumnUsesNumericColumnNegativeScale(TupleDesc relationDesc,
}
#endif
/*
* DistributionColumnUsesGeneratedStoredColumn returns whether a given relation uses
* GENERATED ALWAYS AS (...) STORED on distribution column

View File

@ -79,11 +79,8 @@ typedef struct DatabaseCollationInfo
{
char *datcollate;
char *datctype;
#if PG_VERSION_NUM >= PG_VERSION_15
char *daticulocale;
char *datcollversion;
#endif
#if PG_VERSION_NUM >= PG_VERSION_16
char *daticurules;
@ -94,9 +91,7 @@ static char * GenerateCreateDatabaseStatementFromPgDatabase(Form_pg_database
databaseForm);
static DatabaseCollationInfo GetDatabaseCollation(Oid dbOid);
static AlterOwnerStmt * RecreateAlterDatabaseOwnerStmt(Oid databaseOid);
#if PG_VERSION_NUM >= PG_VERSION_15
static char * GetLocaleProviderString(char datlocprovider);
#endif
static char * GetTablespaceName(Oid tablespaceOid);
static ObjectAddress * GetDatabaseAddressFromDatabaseName(char *databaseName,
bool missingOk);
@ -235,7 +230,7 @@ FilterDistributedDatabases(List *databases)
{
List *distributedDatabases = NIL;
String *databaseName = NULL;
foreach_ptr(databaseName, databases)
foreach_declared_ptr(databaseName, databases)
{
bool missingOk = true;
ObjectAddress *dbAddress =
@ -258,7 +253,7 @@ static bool
IsSetTablespaceStatement(AlterDatabaseStmt *stmt)
{
DefElem *def = NULL;
foreach_ptr(def, stmt->options)
foreach_declared_ptr(def, stmt->options)
{
if (strcmp(def->defname, "tablespace") == 0)
{
@ -320,8 +315,6 @@ PreprocessAlterDatabaseStmt(Node *node, const char *queryString,
}
#if PG_VERSION_NUM >= PG_VERSION_15
/*
* PreprocessAlterDatabaseRefreshCollStmt is executed before the statement is applied to
* the local postgres instance.
@ -359,9 +352,6 @@ PreprocessAlterDatabaseRefreshCollStmt(Node *node, const char *queryString,
}
#endif
/*
* PreprocessAlterDatabaseRenameStmt is executed before the statement is applied to
* the local postgres instance.
@ -510,7 +500,7 @@ PreprocessCreateDatabaseStmt(Node *node, const char *queryString,
List *remoteNodes = TargetWorkerSetNodeList(ALL_SHARD_NODES, RowShareLock);
WorkerNode *remoteNode = NULL;
foreach_ptr(remoteNode, remoteNodes)
foreach_declared_ptr(remoteNode, remoteNodes)
{
InsertCleanupRecordOutsideTransaction(
CLEANUP_OBJECT_DATABASE,
@ -733,7 +723,7 @@ void
EnsureSupportedCreateDatabaseCommand(CreatedbStmt *stmt)
{
DefElem *option = NULL;
foreach_ptr(option, stmt->options)
foreach_declared_ptr(option, stmt->options)
{
if (strcmp(option->defname, "oid") == 0)
{
@ -849,9 +839,7 @@ GetDatabaseCollation(Oid dbOid)
Datum ctypeDatum = heap_getattr(tup, Anum_pg_database_datctype, tupdesc, &isNull);
info.datctype = TextDatumGetCString(ctypeDatum);
#if PG_VERSION_NUM >= PG_VERSION_15
Datum icuLocaleDatum = heap_getattr(tup, Anum_pg_database_daticulocale, tupdesc,
Datum icuLocaleDatum = heap_getattr(tup, Anum_pg_database_datlocale, tupdesc,
&isNull);
if (!isNull)
{
@ -864,7 +852,6 @@ GetDatabaseCollation(Oid dbOid)
{
info.datcollversion = TextDatumGetCString(collverDatum);
}
#endif
#if PG_VERSION_NUM >= PG_VERSION_16
Datum icurulesDatum = heap_getattr(tup, Anum_pg_database_daticurules, tupdesc,
@ -882,8 +869,6 @@ GetDatabaseCollation(Oid dbOid)
}
#if PG_VERSION_NUM >= PG_VERSION_15
/*
* GetLocaleProviderString gets the datlocprovider stored in pg_database
* and returns the string representation of the datlocprovider
@ -912,9 +897,6 @@ GetLocaleProviderString(char datlocprovider)
}
#endif
/*
* GenerateCreateDatabaseStatementFromPgDatabase gets the pg_database tuple and returns the
* CREATE DATABASE statement that can be used to create given database.
@ -956,7 +938,6 @@ GenerateCreateDatabaseStatementFromPgDatabase(Form_pg_database databaseForm)
appendStringInfo(&str, " ENCODING = %s",
quote_literal_cstr(pg_encoding_to_char(databaseForm->encoding)));
#if PG_VERSION_NUM >= PG_VERSION_15
if (collInfo.datcollversion != NULL)
{
appendStringInfo(&str, " COLLATION_VERSION = %s",
@ -972,7 +953,6 @@ GenerateCreateDatabaseStatementFromPgDatabase(Form_pg_database databaseForm)
appendStringInfo(&str, " LOCALE_PROVIDER = %s",
quote_identifier(GetLocaleProviderString(
databaseForm->datlocprovider)));
#endif
#if PG_VERSION_NUM >= PG_VERSION_16
if (collInfo.daticurules != NULL)

View File

@ -162,7 +162,7 @@ EnsureRequiredObjectSetExistOnAllNodes(const ObjectAddress *target,
}
ObjectAddress *object = NULL;
foreach_ptr(object, objectsToBeCreated)
foreach_declared_ptr(object, objectsToBeCreated)
{
List *dependencyCommands = GetDependencyCreateDDLCommands(object);
ddlCommands = list_concat(ddlCommands, dependencyCommands);
@ -201,7 +201,7 @@ EnsureRequiredObjectSetExistOnAllNodes(const ObjectAddress *target,
*/
List *addressSortedDependencies = SortList(objectsWithCommands,
ObjectAddressComparator);
foreach_ptr(object, addressSortedDependencies)
foreach_declared_ptr(object, addressSortedDependencies)
{
LockDatabaseObject(object->classId, object->objectId,
object->objectSubId, ExclusiveLock);
@ -240,7 +240,7 @@ EnsureRequiredObjectSetExistOnAllNodes(const ObjectAddress *target,
else
{
WorkerNode *workerNode = NULL;
foreach_ptr(workerNode, remoteNodeList)
foreach_declared_ptr(workerNode, remoteNodeList)
{
const char *nodeName = workerNode->workerName;
uint32 nodePort = workerNode->workerPort;
@ -256,7 +256,7 @@ EnsureRequiredObjectSetExistOnAllNodes(const ObjectAddress *target,
* that objects have been created on remote nodes before marking them
* distributed, so MarkObjectDistributed wouldn't fail.
*/
foreach_ptr(object, objectsWithCommands)
foreach_declared_ptr(object, objectsWithCommands)
{
/*
* pg_dist_object entries must be propagated with the super user, since
@ -279,7 +279,7 @@ void
EnsureAllObjectDependenciesExistOnAllNodes(const List *targets)
{
ObjectAddress *target = NULL;
foreach_ptr(target, targets)
foreach_declared_ptr(target, targets)
{
EnsureDependenciesExistOnAllNodes(target);
}
@ -336,7 +336,7 @@ DeferErrorIfCircularDependencyExists(const ObjectAddress *objectAddress)
List *dependencies = GetAllDependenciesForObject(objectAddress);
ObjectAddress *dependency = NULL;
foreach_ptr(dependency, dependencies)
foreach_declared_ptr(dependency, dependencies)
{
if (dependency->classId == objectAddress->classId &&
dependency->objectId == objectAddress->objectId &&
@ -424,7 +424,7 @@ GetDistributableDependenciesForObject(const ObjectAddress *target)
/* filter the ones that can be distributed */
ObjectAddress *dependency = NULL;
foreach_ptr(dependency, dependencies)
foreach_declared_ptr(dependency, dependencies)
{
/*
* TODO: maybe we can optimize the logic applied in below line. Actually we
@ -508,7 +508,7 @@ GetDependencyCreateDDLCommands(const ObjectAddress *dependency)
INCLUDE_IDENTITY,
creatingShellTableOnRemoteNode);
TableDDLCommand *tableDDLCommand = NULL;
foreach_ptr(tableDDLCommand, tableDDLCommands)
foreach_declared_ptr(tableDDLCommand, tableDDLCommands)
{
Assert(CitusIsA(tableDDLCommand, TableDDLCommand));
commandList = lappend(commandList, GetTableDDLCommand(
@ -683,7 +683,7 @@ GetAllDependencyCreateDDLCommands(const List *dependencies)
List *commands = NIL;
ObjectAddress *dependency = NULL;
foreach_ptr(dependency, dependencies)
foreach_declared_ptr(dependency, dependencies)
{
commands = list_concat(commands, GetDependencyCreateDDLCommands(dependency));
}
@ -831,7 +831,7 @@ bool
ShouldPropagateAnyObject(List *addresses)
{
ObjectAddress *address = NULL;
foreach_ptr(address, addresses)
foreach_declared_ptr(address, addresses)
{
if (ShouldPropagateObject(address))
{
@ -853,7 +853,7 @@ FilterObjectAddressListByPredicate(List *objectAddressList, AddressPredicate pre
List *result = NIL;
ObjectAddress *address = NULL;
foreach_ptr(address, objectAddressList)
foreach_declared_ptr(address, objectAddressList)
{
if (predicate(address))
{

View File

@ -521,7 +521,6 @@ static DistributeObjectOps Database_Drop = {
.markDistributed = false,
};
#if PG_VERSION_NUM >= PG_VERSION_15
static DistributeObjectOps Database_RefreshColl = {
.deparse = DeparseAlterDatabaseRefreshCollStmt,
.qualify = NULL,
@ -532,7 +531,6 @@ static DistributeObjectOps Database_RefreshColl = {
.address = NULL,
.markDistributed = false,
};
#endif
static DistributeObjectOps Database_Set = {
.deparse = DeparseAlterDatabaseSetStmt,
@ -926,7 +924,6 @@ static DistributeObjectOps Sequence_AlterOwner = {
.address = AlterSequenceOwnerStmtObjectAddress,
.markDistributed = false,
};
#if (PG_VERSION_NUM >= PG_VERSION_15)
static DistributeObjectOps Sequence_AlterPersistence = {
.deparse = DeparseAlterSequencePersistenceStmt,
.qualify = QualifyAlterSequencePersistenceStmt,
@ -936,7 +933,6 @@ static DistributeObjectOps Sequence_AlterPersistence = {
.address = AlterSequencePersistenceStmtObjectAddress,
.markDistributed = false,
};
#endif
static DistributeObjectOps Sequence_Drop = {
.deparse = DeparseDropSequenceStmt,
.qualify = QualifyDropSequenceStmt,
@ -1393,7 +1389,7 @@ static DistributeObjectOps View_Rename = {
static DistributeObjectOps Trigger_Rename = {
.deparse = NULL,
.qualify = NULL,
.preprocess = PreprocessAlterTriggerRenameStmt,
.preprocess = NULL,
.operationType = DIST_OPS_ALTER,
.postprocess = PostprocessAlterTriggerRenameStmt,
.address = NULL,
@ -1425,14 +1421,11 @@ GetDistributeObjectOps(Node *node)
return &Database_Drop;
}
#if PG_VERSION_NUM >= PG_VERSION_15
case T_AlterDatabaseRefreshCollStmt:
{
return &Database_RefreshColl;
}
#endif
case T_AlterDatabaseSetStmt:
{
return &Database_Set;
@ -1723,7 +1716,6 @@ GetDistributeObjectOps(Node *node)
case OBJECT_SEQUENCE:
{
#if (PG_VERSION_NUM >= PG_VERSION_15)
ListCell *cmdCell = NULL;
foreach(cmdCell, stmt->cmds)
{
@ -1751,7 +1743,6 @@ GetDistributeObjectOps(Node *node)
}
}
}
#endif
/*
* Prior to PG15, the only Alter Table statement

View File

@ -210,7 +210,7 @@ MakeCollateClauseFromOid(Oid collationOid)
getObjectIdentityParts(&collateAddress, &objName, &objArgs, false);
char *name = NULL;
foreach_ptr(name, objName)
foreach_declared_ptr(name, objName)
{
collateClause->collname = lappend(collateClause->collname, makeString(name));
}

View File

@ -274,7 +274,7 @@ PreprocessDropExtensionStmt(Node *node, const char *queryString,
/* unmark each distributed extension */
ObjectAddress *address = NULL;
foreach_ptr(address, distributedExtensionAddresses)
foreach_declared_ptr(address, distributedExtensionAddresses)
{
UnmarkObjectDistributed(address);
}
@ -313,7 +313,7 @@ FilterDistributedExtensions(List *extensionObjectList)
List *extensionNameList = NIL;
String *objectName = NULL;
foreach_ptr(objectName, extensionObjectList)
foreach_declared_ptr(objectName, extensionObjectList)
{
const char *extensionName = strVal(objectName);
const bool missingOk = true;
@ -351,7 +351,7 @@ ExtensionNameListToObjectAddressList(List *extensionObjectList)
List *extensionObjectAddressList = NIL;
String *objectName;
foreach_ptr(objectName, extensionObjectList)
foreach_declared_ptr(objectName, extensionObjectList)
{
/*
* We set missingOk to false as we assume all the objects in
@ -527,7 +527,7 @@ MarkExistingObjectDependenciesDistributedIfSupported()
List *citusTableIdList = CitusTableTypeIdList(ANY_CITUS_TABLE_TYPE);
Oid citusTableId = InvalidOid;
foreach_oid(citusTableId, citusTableIdList)
foreach_declared_oid(citusTableId, citusTableIdList)
{
if (!ShouldMarkRelationDistributed(citusTableId))
{
@ -571,7 +571,7 @@ MarkExistingObjectDependenciesDistributedIfSupported()
*/
List *viewList = GetAllViews();
Oid viewOid = InvalidOid;
foreach_oid(viewOid, viewList)
foreach_declared_oid(viewOid, viewList)
{
if (!ShouldMarkRelationDistributed(viewOid))
{
@ -605,7 +605,7 @@ MarkExistingObjectDependenciesDistributedIfSupported()
List *distributedObjectAddressList = GetDistributedObjectAddressList();
ObjectAddress *distributedObjectAddress = NULL;
foreach_ptr(distributedObjectAddress, distributedObjectAddressList)
foreach_declared_ptr(distributedObjectAddress, distributedObjectAddressList)
{
List *distributableDependencyObjectAddresses =
GetDistributableDependenciesForObject(distributedObjectAddress);
@ -627,7 +627,7 @@ MarkExistingObjectDependenciesDistributedIfSupported()
SetLocalEnableMetadataSync(false);
ObjectAddress *objectAddress = NULL;
foreach_ptr(objectAddress, uniqueObjectAddresses)
foreach_declared_ptr(objectAddress, uniqueObjectAddresses)
{
MarkObjectDistributed(objectAddress);
}
@ -831,7 +831,7 @@ IsDropCitusExtensionStmt(Node *parseTree)
/* now that we have a DropStmt, check if citus extension is among the objects to dropped */
String *objectName;
foreach_ptr(objectName, dropStmt->objects)
foreach_declared_ptr(objectName, dropStmt->objects)
{
const char *extensionName = strVal(objectName);
@ -1061,7 +1061,7 @@ GenerateGrantCommandsOnExtensionDependentFDWs(Oid extensionId)
List *FDWOids = GetDependentFDWsToExtension(extensionId);
Oid FDWOid = InvalidOid;
foreach_oid(FDWOid, FDWOids)
foreach_declared_oid(FDWOid, FDWOids)
{
Acl *aclEntry = GetPrivilegesForFDW(FDWOid);

View File

@ -202,7 +202,7 @@ ErrorIfUnsupportedForeignConstraintExists(Relation relation, char referencingDis
List *foreignKeyOids = GetForeignKeyOids(referencingTableId, flags);
Oid foreignKeyOid = InvalidOid;
foreach_oid(foreignKeyOid, foreignKeyOids)
foreach_declared_oid(foreignKeyOid, foreignKeyOids)
{
HeapTuple heapTuple = SearchSysCache1(CONSTROID, ObjectIdGetDatum(foreignKeyOid));
@ -414,7 +414,7 @@ ForeignKeySetsNextValColumnToDefault(HeapTuple pgConstraintTuple)
List *setDefaultAttrs = ForeignKeyGetDefaultingAttrs(pgConstraintTuple);
AttrNumber setDefaultAttr = InvalidAttrNumber;
foreach_int(setDefaultAttr, setDefaultAttrs)
foreach_declared_int(setDefaultAttr, setDefaultAttrs)
{
if (ColumnDefaultsToNextVal(pgConstraintForm->conrelid, setDefaultAttr))
{
@ -467,7 +467,6 @@ ForeignKeyGetDefaultingAttrs(HeapTuple pgConstraintTuple)
}
List *onDeleteSetDefColumnList = NIL;
#if PG_VERSION_NUM >= PG_VERSION_15
Datum onDeleteSetDefColumnsDatum = SysCacheGetAttr(CONSTROID, pgConstraintTuple,
Anum_pg_constraint_confdelsetcols,
&isNull);
@ -482,7 +481,6 @@ ForeignKeyGetDefaultingAttrs(HeapTuple pgConstraintTuple)
onDeleteSetDefColumnList =
IntegerArrayTypeToList(DatumGetArrayTypeP(onDeleteSetDefColumnsDatum));
}
#endif
if (list_length(onDeleteSetDefColumnList) == 0)
{
@ -727,7 +725,7 @@ ColumnAppearsInForeignKeyToReferenceTable(char *columnName, Oid relationId)
GetForeignKeyIdsForColumn(columnName, relationId, searchForeignKeyColumnFlags);
Oid foreignKeyId = InvalidOid;
foreach_oid(foreignKeyId, foreignKeyIdsColumnAppeared)
foreach_declared_oid(foreignKeyId, foreignKeyIdsColumnAppeared)
{
Oid referencedTableId = GetReferencedTableId(foreignKeyId);
if (IsCitusTableType(referencedTableId, REFERENCE_TABLE))
@ -901,7 +899,7 @@ GetForeignConstraintCommandsInternal(Oid relationId, int flags)
int saveNestLevel = PushEmptySearchPath();
Oid foreignKeyOid = InvalidOid;
foreach_oid(foreignKeyOid, foreignKeyOids)
foreach_declared_oid(foreignKeyOid, foreignKeyOids)
{
char *statementDef = pg_get_constraintdef_command(foreignKeyOid);
@ -1157,7 +1155,7 @@ static Oid
FindForeignKeyOidWithName(List *foreignKeyOids, const char *inputConstraintName)
{
Oid foreignKeyOid = InvalidOid;
foreach_oid(foreignKeyOid, foreignKeyOids)
foreach_declared_oid(foreignKeyOid, foreignKeyOids)
{
char *constraintName = get_constraint_name(foreignKeyOid);
@ -1472,7 +1470,7 @@ RelationInvolvedInAnyNonInheritedForeignKeys(Oid relationId)
List *foreignKeysRelationInvolved = list_concat(referencingForeignKeys,
referencedForeignKeys);
Oid foreignKeyId = InvalidOid;
foreach_oid(foreignKeyId, foreignKeysRelationInvolved)
foreach_declared_oid(foreignKeyId, foreignKeysRelationInvolved)
{
HeapTuple heapTuple = SearchSysCache1(CONSTROID, ObjectIdGetDatum(foreignKeyId));
if (!HeapTupleIsValid(heapTuple))

View File

@ -86,7 +86,7 @@ static bool
NameListHasFDWOwnedByDistributedExtension(List *FDWNames)
{
String *FDWValue = NULL;
foreach_ptr(FDWValue, FDWNames)
foreach_declared_ptr(FDWValue, FDWNames)
{
/* captures the extension address during lookup */
ObjectAddress *extensionAddress = palloc0(sizeof(ObjectAddress));

View File

@ -229,7 +229,7 @@ RecreateForeignServerStmt(Oid serverId)
int location = -1;
DefElem *option = NULL;
foreach_ptr(option, server->options)
foreach_declared_ptr(option, server->options)
{
DefElem *copyOption = makeDefElem(option->defname, option->arg, location);
createStmt->options = lappend(createStmt->options, copyOption);
@ -247,7 +247,7 @@ static bool
NameListHasDistributedServer(List *serverNames)
{
String *serverValue = NULL;
foreach_ptr(serverValue, serverNames)
foreach_declared_ptr(serverValue, serverNames)
{
List *addresses = GetObjectAddressByServerName(strVal(serverValue), false);

View File

@ -256,7 +256,7 @@ create_distributed_function(PG_FUNCTION_ARGS)
createFunctionSQL, alterFunctionOwnerSQL);
List *grantDDLCommands = GrantOnFunctionDDLCommands(funcOid);
char *grantOnFunctionSQL = NULL;
foreach_ptr(grantOnFunctionSQL, grantDDLCommands)
foreach_declared_ptr(grantOnFunctionSQL, grantDDLCommands)
{
appendStringInfo(&ddlCommand, ";%s", grantOnFunctionSQL);
}
@ -370,7 +370,7 @@ ErrorIfAnyNodeDoesNotHaveMetadata(void)
ActivePrimaryNonCoordinatorNodeList(ShareLock);
WorkerNode *workerNode = NULL;
foreach_ptr(workerNode, workerNodeList)
foreach_declared_ptr(workerNode, workerNodeList)
{
if (!workerNode->hasMetadata)
{
@ -1476,7 +1476,7 @@ CreateFunctionStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess)
objectWithArgs->objname = stmt->funcname;
FunctionParameter *funcParam = NULL;
foreach_ptr(funcParam, stmt->parameters)
foreach_declared_ptr(funcParam, stmt->parameters)
{
if (ShouldAddFunctionSignature(funcParam->mode))
{
@ -1519,7 +1519,7 @@ DefineAggregateStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess
if (stmt->args != NIL)
{
FunctionParameter *funcParam = NULL;
foreach_ptr(funcParam, linitial(stmt->args))
foreach_declared_ptr(funcParam, linitial(stmt->args))
{
objectWithArgs->objargs = lappend(objectWithArgs->objargs,
funcParam->argType);
@ -1528,7 +1528,7 @@ DefineAggregateStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess
else
{
DefElem *defItem = NULL;
foreach_ptr(defItem, stmt->definition)
foreach_declared_ptr(defItem, stmt->definition)
{
/*
* If no explicit args are given, pg includes basetype in the signature.
@ -1933,7 +1933,7 @@ static void
ErrorIfUnsupportedAlterFunctionStmt(AlterFunctionStmt *stmt)
{
DefElem *action = NULL;
foreach_ptr(action, stmt->actions)
foreach_declared_ptr(action, stmt->actions)
{
if (strcmp(action->defname, "set") == 0)
{
@ -2040,7 +2040,7 @@ PreprocessGrantOnFunctionStmt(Node *node, const char *queryString,
List *grantFunctionList = NIL;
ObjectAddress *functionAddress = NULL;
foreach_ptr(functionAddress, distributedFunctions)
foreach_declared_ptr(functionAddress, distributedFunctions)
{
ObjectWithArgs *distFunction = ObjectWithArgsFromOid(
functionAddress->objectId);
@ -2083,7 +2083,7 @@ PostprocessGrantOnFunctionStmt(Node *node, const char *queryString)
}
ObjectAddress *functionAddress = NULL;
foreach_ptr(functionAddress, distributedFunctions)
foreach_declared_ptr(functionAddress, distributedFunctions)
{
EnsureAllObjectDependenciesExistOnAllNodes(list_make1(functionAddress));
}
@ -2120,7 +2120,7 @@ FilterDistributedFunctions(GrantStmt *grantStmt)
/* iterate over all namespace names provided to get their oid's */
String *namespaceValue = NULL;
foreach_ptr(namespaceValue, grantStmt->objects)
foreach_declared_ptr(namespaceValue, grantStmt->objects)
{
char *nspname = strVal(namespaceValue);
bool missing_ok = false;
@ -2132,7 +2132,7 @@ FilterDistributedFunctions(GrantStmt *grantStmt)
* iterate over all distributed functions to filter the ones
* that belong to one of the namespaces from above
*/
foreach_ptr(distributedFunction, distributedFunctionList)
foreach_declared_ptr(distributedFunction, distributedFunctionList)
{
Oid namespaceOid = get_func_namespace(distributedFunction->objectId);
@ -2151,7 +2151,7 @@ FilterDistributedFunctions(GrantStmt *grantStmt)
{
bool missingOk = false;
ObjectWithArgs *objectWithArgs = NULL;
foreach_ptr(objectWithArgs, grantStmt->objects)
foreach_declared_ptr(objectWithArgs, grantStmt->objects)
{
ObjectAddress *functionAddress = palloc0(sizeof(ObjectAddress));
functionAddress->classId = ProcedureRelationId;

View File

@ -337,7 +337,7 @@ ExecuteFunctionOnEachTableIndex(Oid relationId, PGIndexProcessor pgIndexProcesso
List *indexIdList = RelationGetIndexList(relation);
Oid indexId = InvalidOid;
foreach_oid(indexId, indexIdList)
foreach_declared_oid(indexId, indexIdList)
{
HeapTuple indexTuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(indexId));
if (!HeapTupleIsValid(indexTuple))
@ -708,7 +708,7 @@ PreprocessDropIndexStmt(Node *node, const char *dropIndexCommand,
/* check if any of the indexes being dropped belong to a distributed table */
List *objectNameList = NULL;
foreach_ptr(objectNameList, dropIndexStatement->objects)
foreach_declared_ptr(objectNameList, dropIndexStatement->objects)
{
struct DropRelationCallbackState state;
uint32 rvrFlags = RVR_MISSING_OK;
@ -880,7 +880,7 @@ ErrorIfUnsupportedAlterIndexStmt(AlterTableStmt *alterTableStatement)
/* error out if any of the subcommands are unsupported */
List *commandList = alterTableStatement->cmds;
AlterTableCmd *command = NULL;
foreach_ptr(command, commandList)
foreach_declared_ptr(command, commandList)
{
AlterTableType alterTableType = command->subtype;
@ -932,7 +932,7 @@ CreateIndexTaskList(IndexStmt *indexStmt)
LockShardListMetadata(shardIntervalList, ShareLock);
ShardInterval *shardInterval = NULL;
foreach_ptr(shardInterval, shardIntervalList)
foreach_declared_ptr(shardInterval, shardIntervalList)
{
uint64 shardId = shardInterval->shardId;
@ -977,7 +977,7 @@ CreateReindexTaskList(Oid relationId, ReindexStmt *reindexStmt)
LockShardListMetadata(shardIntervalList, ShareLock);
ShardInterval *shardInterval = NULL;
foreach_ptr(shardInterval, shardIntervalList)
foreach_declared_ptr(shardInterval, shardIntervalList)
{
uint64 shardId = shardInterval->shardId;
@ -1115,6 +1115,7 @@ RangeVarCallbackForReindexIndex(const RangeVar *relation, Oid relId, Oid oldRelI
char relkind;
struct ReindexIndexCallbackState *state = arg;
LOCKMODE table_lockmode;
Oid table_oid;
/*
* Lock level here should match table lock in reindex_index() for
@ -1152,13 +1153,24 @@ RangeVarCallbackForReindexIndex(const RangeVar *relation, Oid relId, Oid oldRelI
errmsg("\"%s\" is not an index", relation->relname)));
/* Check permissions */
#if PG_VERSION_NUM >= PG_VERSION_17
table_oid = IndexGetRelation(relId, true);
if (OidIsValid(table_oid))
{
AclResult aclresult = pg_class_aclcheck(table_oid, GetUserId(), ACL_MAINTAIN);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_INDEX, relation->relname);
}
#else
if (!object_ownercheck(RelationRelationId, relId, GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_INDEX, relation->relname);
#endif
/* Lock heap before index to avoid deadlock. */
if (relId != oldRelId)
{
Oid table_oid = IndexGetRelation(relId, true);
table_oid = IndexGetRelation(relId, true);
/*
* If the OID isn't valid, it means the index was concurrently
@ -1226,7 +1238,7 @@ ErrorIfUnsupportedIndexStmt(IndexStmt *createIndexStatement)
Var *partitionKey = DistPartitionKeyOrError(relationId);
List *indexParameterList = createIndexStatement->indexParams;
IndexElem *indexElement = NULL;
foreach_ptr(indexElement, indexParameterList)
foreach_declared_ptr(indexElement, indexParameterList)
{
const char *columnName = indexElement->name;
@ -1295,7 +1307,7 @@ DropIndexTaskList(Oid relationId, Oid indexId, DropStmt *dropStmt)
LockShardListMetadata(shardIntervalList, ShareLock);
ShardInterval *shardInterval = NULL;
foreach_ptr(shardInterval, shardIntervalList)
foreach_declared_ptr(shardInterval, shardIntervalList)
{
uint64 shardId = shardInterval->shardId;
char *shardIndexName = pstrdup(indexName);

View File

@ -301,6 +301,7 @@ static SelectStmt * CitusCopySelect(CopyStmt *copyStatement);
static void CitusCopyTo(CopyStmt *copyStatement, QueryCompletion *completionTag);
static int64 ForwardCopyDataFromConnection(CopyOutState copyOutState,
MultiConnection *connection);
static void ErrorIfCopyHasOnErrorLogVerbosity(CopyStmt *copyStatement);
/* Private functions copied and adapted from copy.c in PostgreSQL */
static void SendCopyBegin(CopyOutState cstate);
@ -346,6 +347,7 @@ static LocalCopyStatus GetLocalCopyStatus(void);
static bool ShardIntervalListHasLocalPlacements(List *shardIntervalList);
static void LogLocalCopyToRelationExecution(uint64 shardId);
static void LogLocalCopyToFileExecution(uint64 shardId);
static void ErrorIfMergeInCopy(CopyStmt *copyStatement);
/* exports for SQL callable functions */
@ -1957,7 +1959,7 @@ ShardIntervalListHasLocalPlacements(List *shardIntervalList)
{
int32 localGroupId = GetLocalGroupId();
ShardInterval *shardInterval = NULL;
foreach_ptr(shardInterval, shardIntervalList)
foreach_declared_ptr(shardInterval, shardIntervalList)
{
if (ActiveShardPlacementOnGroup(localGroupId, shardInterval->shardId) != NULL)
{
@ -2452,7 +2454,7 @@ ProcessAppendToShardOption(Oid relationId, CopyStmt *copyStatement)
bool appendToShardSet = false;
DefElem *defel = NULL;
foreach_ptr(defel, copyStatement->options)
foreach_declared_ptr(defel, copyStatement->options)
{
if (strncmp(defel->defname, APPEND_TO_SHARD_OPTION, NAMEDATALEN) == 0)
{
@ -2823,6 +2825,70 @@ CopyStatementHasFormat(CopyStmt *copyStatement, char *formatName)
}
/*
* ErrorIfCopyHasOnErrorLogVerbosity errors out if the COPY statement
* has on_error option or log_verbosity option specified
*/
static void
ErrorIfCopyHasOnErrorLogVerbosity(CopyStmt *copyStatement)
{
#if PG_VERSION_NUM >= PG_VERSION_17
bool log_verbosity = false;
foreach_ptr(DefElem, option, copyStatement->options)
{
if (strcmp(option->defname, "on_error") == 0)
{
ereport(ERROR, (errmsg(
"Citus does not support COPY FROM with ON_ERROR option.")));
}
else if (strcmp(option->defname, "log_verbosity") == 0)
{
log_verbosity = true;
}
}
/*
* Given that log_verbosity is currently used in COPY FROM
* when ON_ERROR option is set to ignore, it makes more
* sense to error out for ON_ERROR option first. For this reason,
* we don't error out in the previous loop directly.
* Relevant PG17 commit: https://github.com/postgres/postgres/commit/f5a227895
*/
if (log_verbosity)
{
ereport(ERROR, (errmsg(
"Citus does not support COPY FROM with LOG_VERBOSITY option.")));
}
#endif
}
/*
* ErrorIfMergeInCopy Raises an exception if the MERGE is called in the COPY
* where Citus tables are involved, as we don't support this yet
* Relevant PG17 commit: c649fa24a
*/
static void
ErrorIfMergeInCopy(CopyStmt *copyStatement)
{
#if PG_VERSION_NUM < 170000
return;
#else
if (!copyStatement->relation && (IsA(copyStatement->query, MergeStmt)))
{
/*
* This path is currently not reachable because Merge in COPY can
* only work with a RETURNING clause, and a RETURNING check
* will error out sooner for Citus
*/
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("MERGE with Citus tables "
"is not yet supported in COPY")));
}
#endif
}
/*
* ProcessCopyStmt handles Citus specific concerns for COPY like supporting
* COPYing from distributed tables and preventing unsupported actions. The
@ -2860,6 +2926,8 @@ ProcessCopyStmt(CopyStmt *copyStatement, QueryCompletion *completionTag, const
*/
if (copyStatement->relation != NULL)
{
ErrorIfMergeInCopy(copyStatement);
bool isFrom = copyStatement->is_from;
/* consider using RangeVarGetRelidExtended to check perms before locking */
@ -2897,6 +2965,8 @@ ProcessCopyStmt(CopyStmt *copyStatement, QueryCompletion *completionTag, const
"Citus does not support COPY FROM with WHERE")));
}
ErrorIfCopyHasOnErrorLogVerbosity(copyStatement);
/* check permissions, we're bypassing postgres' normal checks */
CheckCopyPermissions(copyStatement);
CitusCopyFrom(copyStatement, completionTag);

View File

@ -255,7 +255,7 @@ static void
DropRoleStmtUnmarkDistOnLocalMainDb(DropRoleStmt *dropRoleStmt)
{
RoleSpec *roleSpec = NULL;
foreach_ptr(roleSpec, dropRoleStmt->roles)
foreach_declared_ptr(roleSpec, dropRoleStmt->roles)
{
Oid roleOid = get_role_oid(roleSpec->rolename,
dropRoleStmt->missing_ok);

View File

@ -48,7 +48,7 @@ CreatePolicyCommands(Oid relationId)
List *policyList = GetPolicyListForRelation(relationId);
RowSecurityPolicy *policy;
foreach_ptr(policy, policyList)
foreach_declared_ptr(policy, policyList)
{
char *createPolicyCommand = CreatePolicyCommandForPolicy(relationId, policy);
commands = lappend(commands, makeTableDDLCommandString(createPolicyCommand));
@ -88,7 +88,7 @@ GetPolicyListForRelation(Oid relationId)
List *policyList = NIL;
RowSecurityPolicy *policy;
foreach_ptr(policy, relation->rd_rsdesc->policies)
foreach_declared_ptr(policy, relation->rd_rsdesc->policies)
{
policyList = lappend(policyList, policy);
}
@ -310,7 +310,7 @@ GetPolicyByName(Oid relationId, const char *policyName)
List *policyList = GetPolicyListForRelation(relationId);
RowSecurityPolicy *policy = NULL;
foreach_ptr(policy, policyList)
foreach_declared_ptr(policy, policyList)
{
if (strncmp(policy->policy_name, policyName, NAMEDATALEN) == 0)
{

View File

@ -33,11 +33,9 @@
static CreatePublicationStmt * BuildCreatePublicationStmt(Oid publicationId);
#if (PG_VERSION_NUM >= PG_VERSION_15)
static PublicationObjSpec * BuildPublicationRelationObjSpec(Oid relationId,
Oid publicationId,
bool tableOnly);
#endif
static void AppendPublishOptionList(StringInfo str, List *strings);
static char * AlterPublicationOwnerCommand(Oid publicationId);
static bool ShouldPropagateCreatePublication(CreatePublicationStmt *stmt);
@ -154,11 +152,10 @@ BuildCreatePublicationStmt(Oid publicationId)
ReleaseSysCache(publicationTuple);
#if (PG_VERSION_NUM >= PG_VERSION_15)
List *schemaIds = GetPublicationSchemas(publicationId);
Oid schemaId = InvalidOid;
foreach_oid(schemaId, schemaIds)
foreach_declared_oid(schemaId, schemaIds)
{
char *schemaName = get_namespace_name(schemaId);
@ -170,7 +167,6 @@ BuildCreatePublicationStmt(Oid publicationId)
createPubStmt->pubobjects = lappend(createPubStmt->pubobjects, publicationObject);
}
#endif
List *relationIds = GetPublicationRelations(publicationId,
publicationForm->pubviaroot ?
@ -181,9 +177,8 @@ BuildCreatePublicationStmt(Oid publicationId)
/* mainly for consistent ordering in test output */
relationIds = SortList(relationIds, CompareOids);
foreach_oid(relationId, relationIds)
foreach_declared_oid(relationId, relationIds)
{
#if (PG_VERSION_NUM >= PG_VERSION_15)
bool tableOnly = false;
/* since postgres 15, tables can have a column list and filter */
@ -191,15 +186,6 @@ BuildCreatePublicationStmt(Oid publicationId)
BuildPublicationRelationObjSpec(relationId, publicationId, tableOnly);
createPubStmt->pubobjects = lappend(createPubStmt->pubobjects, publicationObject);
#else
/* before postgres 15, only full tables are supported */
char *schemaName = get_namespace_name(get_rel_namespace(relationId));
char *tableName = get_rel_name(relationId);
RangeVar *rangeVar = makeRangeVar(schemaName, tableName, -1);
createPubStmt->tables = lappend(createPubStmt->tables, rangeVar);
#endif
}
/* WITH (publish_via_partition_root = true) option */
@ -270,8 +256,6 @@ AppendPublishOptionList(StringInfo str, List *options)
}
#if (PG_VERSION_NUM >= PG_VERSION_15)
/*
* BuildPublicationRelationObjSpec returns a PublicationObjSpec that
* can be included in a CREATE or ALTER PUBLICATION statement.
@ -351,9 +335,6 @@ BuildPublicationRelationObjSpec(Oid relationId, Oid publicationId,
}
#endif
/*
* PreprocessAlterPublicationStmt handles ALTER PUBLICATION statements
* in a way that is mostly similar to PreprocessAlterDistributedObjectStmt,
@ -414,7 +395,7 @@ GetAlterPublicationDDLCommandsForTable(Oid relationId, bool isAdd)
List *publicationIds = GetRelationPublications(relationId);
Oid publicationId = InvalidOid;
foreach_oid(publicationId, publicationIds)
foreach_declared_oid(publicationId, publicationIds)
{
char *command = GetAlterPublicationTableDDLCommand(publicationId,
relationId, isAdd);
@ -452,7 +433,6 @@ GetAlterPublicationTableDDLCommand(Oid publicationId, Oid relationId,
ReleaseSysCache(pubTuple);
#if (PG_VERSION_NUM >= PG_VERSION_15)
bool tableOnly = !isAdd;
/* since postgres 15, tables can have a column list and filter */
@ -461,16 +441,6 @@ GetAlterPublicationTableDDLCommand(Oid publicationId, Oid relationId,
alterPubStmt->pubobjects = lappend(alterPubStmt->pubobjects, publicationObject);
alterPubStmt->action = isAdd ? AP_AddObjects : AP_DropObjects;
#else
/* before postgres 15, only full tables are supported */
char *schemaName = get_namespace_name(get_rel_namespace(relationId));
char *tableName = get_rel_name(relationId);
RangeVar *rangeVar = makeRangeVar(schemaName, tableName, -1);
alterPubStmt->tables = lappend(alterPubStmt->tables, rangeVar);
alterPubStmt->tableAction = isAdd ? DEFELEM_ADD : DEFELEM_DROP;
#endif
/* we take the WHERE clause from the catalog where it is already transformed */
bool whereClauseNeedsTransform = false;

View File

@ -74,7 +74,9 @@ static char * GetRoleNameFromDbRoleSetting(HeapTuple tuple,
TupleDesc DbRoleSettingDescription);
static char * GetDatabaseNameFromDbRoleSetting(HeapTuple tuple,
TupleDesc DbRoleSettingDescription);
#if PG_VERSION_NUM < PG_VERSION_17
static Node * makeStringConst(char *str, int location);
#endif
static Node * makeIntConst(int val, int location);
static Node * makeFloatConst(char *str, int location);
static const char * WrapQueryInAlterRoleIfExistsCall(const char *query, RoleSpec *role);
@ -163,7 +165,7 @@ PostprocessAlterRoleStmt(Node *node, const char *queryString)
AlterRoleStmt *stmt = castNode(AlterRoleStmt, node);
DefElem *option = NULL;
foreach_ptr(option, stmt->options)
foreach_declared_ptr(option, stmt->options)
{
if (strcasecmp(option->defname, "password") == 0)
{
@ -564,7 +566,7 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid)
{
List *grantRoleStmts = GenerateGrantRoleStmtsOfRole(roleOid);
Node *stmt = NULL;
foreach_ptr(stmt, grantRoleStmts)
foreach_declared_ptr(stmt, grantRoleStmts)
{
completeRoleList = lappend(completeRoleList, DeparseTreeNode(stmt));
}
@ -578,7 +580,7 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid)
*/
List *secLabelOnRoleStmts = GenerateSecLabelOnRoleStmts(roleOid, rolename);
stmt = NULL;
foreach_ptr(stmt, secLabelOnRoleStmts)
foreach_declared_ptr(stmt, secLabelOnRoleStmts)
{
completeRoleList = lappend(completeRoleList, DeparseTreeNode(stmt));
}
@ -787,7 +789,7 @@ MakeSetStatementArguments(char *configurationName, char *configurationValue)
}
char *configuration = NULL;
foreach_ptr(configuration, configurationList)
foreach_declared_ptr(configuration, configurationList)
{
Node *arg = makeStringConst(configuration, -1);
args = lappend(args, arg);
@ -823,7 +825,7 @@ GenerateGrantRoleStmtsFromOptions(RoleSpec *roleSpec, List *options)
List *stmts = NIL;
DefElem *option = NULL;
foreach_ptr(option, options)
foreach_declared_ptr(option, options)
{
if (strcmp(option->defname, "adminmembers") != 0 &&
strcmp(option->defname, "rolemembers") != 0 &&
@ -1047,7 +1049,7 @@ PreprocessCreateRoleStmt(Node *node, const char *queryString,
/* deparse all grant statements and add them to the commands list */
Node *stmt = NULL;
foreach_ptr(stmt, grantRoleStmts)
foreach_declared_ptr(stmt, grantRoleStmts)
{
commands = lappend(commands, DeparseTreeNode(stmt));
}
@ -1058,6 +1060,8 @@ PreprocessCreateRoleStmt(Node *node, const char *queryString,
}
#if PG_VERSION_NUM < PG_VERSION_17
/*
* makeStringConst creates a Const Node that stores a given string
*
@ -1068,19 +1072,17 @@ makeStringConst(char *str, int location)
{
A_Const *n = makeNode(A_Const);
#if PG_VERSION_NUM >= PG_VERSION_15
n->val.sval.type = T_String;
n->val.sval.sval = str;
#else
n->val.type = T_String;
n->val.val.str = str;
#endif
n->location = location;
return (Node *) n;
}
#endif
/*
* makeIntConst creates a Const Node that stores a given integer
*
@ -1091,13 +1093,8 @@ makeIntConst(int val, int location)
{
A_Const *n = makeNode(A_Const);
#if PG_VERSION_NUM >= PG_VERSION_15
n->val.ival.type = T_Integer;
n->val.ival.ival = val;
#else
n->val.type = T_Integer;
n->val.val.ival = val;
#endif
n->location = location;
return (Node *) n;
@ -1114,13 +1111,8 @@ makeFloatConst(char *str, int location)
{
A_Const *n = makeNode(A_Const);
#if PG_VERSION_NUM >= PG_VERSION_15
n->val.fval.type = T_Float;
n->val.fval.fval = str;
#else
n->val.type = T_Float;
n->val.val.str = str;
#endif
n->location = location;
return (Node *) n;
@ -1174,7 +1166,7 @@ void
UnmarkRolesDistributed(List *roles)
{
Node *roleNode = NULL;
foreach_ptr(roleNode, roles)
foreach_declared_ptr(roleNode, roles)
{
RoleSpec *role = castNode(RoleSpec, roleNode);
ObjectAddress roleAddress = { 0 };
@ -1204,7 +1196,7 @@ FilterDistributedRoles(List *roles)
{
List *distributedRoles = NIL;
Node *roleNode = NULL;
foreach_ptr(roleNode, roles)
foreach_declared_ptr(roleNode, roles)
{
RoleSpec *role = castNode(RoleSpec, roleNode);
Oid roleOid = get_rolespec_oid(role, true);
@ -1282,7 +1274,7 @@ PostprocessGrantRoleStmt(Node *node, const char *queryString)
GrantRoleStmt *stmt = castNode(GrantRoleStmt, node);
RoleSpec *role = NULL;
foreach_ptr(role, stmt->grantee_roles)
foreach_declared_ptr(role, stmt->grantee_roles)
{
Oid roleOid = get_rolespec_oid(role, false);
ObjectAddress *roleAddress = palloc0(sizeof(ObjectAddress));

View File

@ -162,7 +162,7 @@ PreprocessDropSchemaStmt(Node *node, const char *queryString,
EnsureSequentialMode(OBJECT_SCHEMA);
String *schemaVal = NULL;
foreach_ptr(schemaVal, distributedSchemas)
foreach_declared_ptr(schemaVal, distributedSchemas)
{
if (SchemaHasDistributedTableWithFKey(strVal(schemaVal)))
{
@ -322,7 +322,7 @@ FilterDistributedSchemas(List *schemas)
List *distributedSchemas = NIL;
String *schemaValue = NULL;
foreach_ptr(schemaValue, schemas)
foreach_declared_ptr(schemaValue, schemas)
{
const char *schemaName = strVal(schemaValue);
Oid schemaOid = get_namespace_oid(schemaName, true);
@ -443,7 +443,7 @@ GetGrantCommandsFromCreateSchemaStmt(Node *node)
CreateSchemaStmt *stmt = castNode(CreateSchemaStmt, node);
Node *element = NULL;
foreach_ptr(element, stmt->schemaElts)
foreach_declared_ptr(element, stmt->schemaElts)
{
if (!IsA(element, GrantStmt))
{
@ -480,7 +480,7 @@ static bool
CreateSchemaStmtCreatesTable(CreateSchemaStmt *stmt)
{
Node *element = NULL;
foreach_ptr(element, stmt->schemaElts)
foreach_declared_ptr(element, stmt->schemaElts)
{
/*
* CREATE TABLE AS and CREATE FOREIGN TABLE commands cannot be

View File

@ -174,7 +174,7 @@ EnsureTableKindSupportedForTenantSchema(Oid relationId)
List *partitionList = PartitionList(relationId);
Oid partitionRelationId = InvalidOid;
foreach_oid(partitionRelationId, partitionList)
foreach_declared_oid(partitionRelationId, partitionList)
{
ErrorIfIllegalPartitioningInTenantSchema(relationId, partitionRelationId);
}
@ -199,7 +199,7 @@ EnsureFKeysForTenantTable(Oid relationId)
int fKeyReferencingFlags = INCLUDE_REFERENCING_CONSTRAINTS | INCLUDE_ALL_TABLE_TYPES;
List *referencingForeignKeys = GetForeignKeyOids(relationId, fKeyReferencingFlags);
Oid foreignKeyId = InvalidOid;
foreach_oid(foreignKeyId, referencingForeignKeys)
foreach_declared_oid(foreignKeyId, referencingForeignKeys)
{
Oid referencingTableId = GetReferencingTableId(foreignKeyId);
Oid referencedTableId = GetReferencedTableId(foreignKeyId);
@ -232,7 +232,7 @@ EnsureFKeysForTenantTable(Oid relationId)
int fKeyReferencedFlags = INCLUDE_REFERENCED_CONSTRAINTS | INCLUDE_ALL_TABLE_TYPES;
List *referencedForeignKeys = GetForeignKeyOids(relationId, fKeyReferencedFlags);
foreach_oid(foreignKeyId, referencedForeignKeys)
foreach_declared_oid(foreignKeyId, referencedForeignKeys)
{
Oid referencingTableId = GetReferencingTableId(foreignKeyId);
Oid referencedTableId = GetReferencedTableId(foreignKeyId);
@ -429,7 +429,7 @@ EnsureSchemaCanBeDistributed(Oid schemaId, List *schemaTableIdList)
}
Oid relationId = InvalidOid;
foreach_oid(relationId, schemaTableIdList)
foreach_declared_oid(relationId, schemaTableIdList)
{
EnsureTenantTable(relationId, "citus_schema_distribute");
}
@ -637,7 +637,7 @@ citus_schema_distribute(PG_FUNCTION_ARGS)
List *tableIdListInSchema = SchemaGetNonShardTableIdList(schemaId);
List *tableIdListToConvert = NIL;
Oid relationId = InvalidOid;
foreach_oid(relationId, tableIdListInSchema)
foreach_declared_oid(relationId, tableIdListInSchema)
{
/* prevent concurrent drop of the relation */
LockRelationOid(relationId, AccessShareLock);
@ -675,7 +675,7 @@ citus_schema_distribute(PG_FUNCTION_ARGS)
* tables.
*/
List *originalForeignKeyRecreationCommands = NIL;
foreach_oid(relationId, tableIdListToConvert)
foreach_declared_oid(relationId, tableIdListToConvert)
{
List *fkeyCommandsForRelation =
GetFKeyCreationCommandsRelationInvolvedWithTableType(relationId,
@ -741,7 +741,7 @@ citus_schema_undistribute(PG_FUNCTION_ARGS)
List *tableIdListInSchema = SchemaGetNonShardTableIdList(schemaId);
List *tableIdListToConvert = NIL;
Oid relationId = InvalidOid;
foreach_oid(relationId, tableIdListInSchema)
foreach_declared_oid(relationId, tableIdListInSchema)
{
/* prevent concurrent drop of the relation */
LockRelationOid(relationId, AccessShareLock);
@ -883,7 +883,7 @@ TenantSchemaPickAnchorShardId(Oid schemaId)
}
Oid relationId = InvalidOid;
foreach_oid(relationId, tablesInSchema)
foreach_declared_oid(relationId, tablesInSchema)
{
/*
* Make sure the relation isn't dropped for the remainder of

View File

@ -123,7 +123,7 @@ static bool
OptionsSpecifyOwnedBy(List *optionList, Oid *ownedByTableId)
{
DefElem *defElem = NULL;
foreach_ptr(defElem, optionList)
foreach_declared_ptr(defElem, optionList)
{
if (strcmp(defElem->defname, "owned_by") == 0)
{
@ -202,7 +202,7 @@ ExtractDefaultColumnsAndOwnedSequences(Oid relationId, List **columnNameList,
}
Oid ownedSequenceId = InvalidOid;
foreach_oid(ownedSequenceId, columnOwnedSequences)
foreach_declared_oid(ownedSequenceId, columnOwnedSequences)
{
/*
* A column might have multiple sequences one via OWNED BY one another
@ -288,7 +288,7 @@ PreprocessDropSequenceStmt(Node *node, const char *queryString,
*/
List *deletingSequencesList = stmt->objects;
List *objectNameList = NULL;
foreach_ptr(objectNameList, deletingSequencesList)
foreach_declared_ptr(objectNameList, deletingSequencesList)
{
RangeVar *seq = makeRangeVarFromNameList(objectNameList);
@ -322,7 +322,7 @@ PreprocessDropSequenceStmt(Node *node, const char *queryString,
/* remove the entries for the distributed objects on dropping */
ObjectAddress *address = NULL;
foreach_ptr(address, distributedSequenceAddresses)
foreach_declared_ptr(address, distributedSequenceAddresses)
{
UnmarkObjectDistributed(address);
}
@ -356,7 +356,7 @@ SequenceDropStmtObjectAddress(Node *stmt, bool missing_ok, bool isPostprocess)
List *droppingSequencesList = dropSeqStmt->objects;
List *objectNameList = NULL;
foreach_ptr(objectNameList, droppingSequencesList)
foreach_declared_ptr(objectNameList, droppingSequencesList)
{
RangeVar *seq = makeRangeVarFromNameList(objectNameList);
@ -476,7 +476,7 @@ PreprocessAlterSequenceStmt(Node *node, const char *queryString,
{
List *options = stmt->options;
DefElem *defel = NULL;
foreach_ptr(defel, options)
foreach_declared_ptr(defel, options)
{
if (strcmp(defel->defname, "as") == 0)
{
@ -511,7 +511,7 @@ SequenceUsedInDistributedTable(const ObjectAddress *sequenceAddress, char depTyp
Oid relationId;
List *relations = GetDependentRelationsWithSequence(sequenceAddress->objectId,
depType);
foreach_oid(relationId, relations)
foreach_declared_oid(relationId, relations)
{
if (IsCitusTable(relationId))
{
@ -735,8 +735,6 @@ PostprocessAlterSequenceOwnerStmt(Node *node, const char *queryString)
}
#if (PG_VERSION_NUM >= PG_VERSION_15)
/*
* PreprocessAlterSequencePersistenceStmt is called for change of persistence
* of sequences before the persistence is changed on the local instance.
@ -847,9 +845,6 @@ PreprocessSequenceAlterTableStmt(Node *node, const char *queryString,
}
#endif
/*
* PreprocessGrantOnSequenceStmt is executed before the statement is applied to the local
* postgres instance.
@ -930,7 +925,7 @@ PostprocessGrantOnSequenceStmt(Node *node, const char *queryString)
EnsureCoordinator();
RangeVar *sequence = NULL;
foreach_ptr(sequence, distributedSequences)
foreach_declared_ptr(sequence, distributedSequences)
{
ObjectAddress *sequenceAddress = palloc0(sizeof(ObjectAddress));
Oid sequenceOid = RangeVarGetRelid(sequence, NoLock, false);
@ -1014,7 +1009,7 @@ FilterDistributedSequences(GrantStmt *stmt)
/* iterate over all namespace names provided to get their oid's */
List *namespaceOidList = NIL;
String *namespaceValue = NULL;
foreach_ptr(namespaceValue, stmt->objects)
foreach_declared_ptr(namespaceValue, stmt->objects)
{
char *nspname = strVal(namespaceValue);
bool missing_ok = false;
@ -1028,7 +1023,7 @@ FilterDistributedSequences(GrantStmt *stmt)
*/
List *distributedSequenceList = DistributedSequenceList();
ObjectAddress *sequenceAddress = NULL;
foreach_ptr(sequenceAddress, distributedSequenceList)
foreach_declared_ptr(sequenceAddress, distributedSequenceList)
{
Oid namespaceOid = get_rel_namespace(sequenceAddress->objectId);
@ -1052,7 +1047,7 @@ FilterDistributedSequences(GrantStmt *stmt)
{
bool missing_ok = false;
RangeVar *sequenceRangeVar = NULL;
foreach_ptr(sequenceRangeVar, stmt->objects)
foreach_declared_ptr(sequenceRangeVar, stmt->objects)
{
Oid sequenceOid = RangeVarGetRelid(sequenceRangeVar, NoLock, missing_ok);
ObjectAddress *sequenceAddress = palloc0(sizeof(ObjectAddress));

View File

@ -184,7 +184,7 @@ PreprocessDropStatisticsStmt(Node *node, const char *queryString,
List *ddlJobs = NIL;
List *processedStatsOids = NIL;
List *objectNameList = NULL;
foreach_ptr(objectNameList, dropStatisticsStmt->objects)
foreach_declared_ptr(objectNameList, dropStatisticsStmt->objects)
{
Oid statsOid = get_statistics_object_oid(objectNameList,
dropStatisticsStmt->missing_ok);
@ -234,7 +234,7 @@ DropStatisticsObjectAddress(Node *node, bool missing_ok, bool isPostprocess)
List *objectAddresses = NIL;
List *objectNameList = NULL;
foreach_ptr(objectNameList, dropStatisticsStmt->objects)
foreach_declared_ptr(objectNameList, dropStatisticsStmt->objects)
{
Oid statsOid = get_statistics_object_oid(objectNameList,
dropStatisticsStmt->missing_ok);
@ -535,7 +535,7 @@ GetExplicitStatisticsCommandList(Oid relationId)
int saveNestLevel = PushEmptySearchPath();
Oid statisticsId = InvalidOid;
foreach_oid(statisticsId, statisticsIdList)
foreach_declared_oid(statisticsId, statisticsIdList)
{
/* we need create commands for already created stats before distribution */
Datum commandText = DirectFunctionCall1(pg_get_statisticsobjdef,
@ -606,7 +606,7 @@ GetExplicitStatisticsSchemaIdList(Oid relationId)
RelationClose(relation);
Oid statsId = InvalidOid;
foreach_oid(statsId, statsIdList)
foreach_declared_oid(statsId, statsIdList)
{
HeapTuple heapTuple = SearchSysCache1(STATEXTOID, ObjectIdGetDatum(statsId));
if (!HeapTupleIsValid(heapTuple))
@ -651,14 +651,15 @@ GetAlterIndexStatisticsCommands(Oid indexOid)
}
Form_pg_attribute targetAttr = (Form_pg_attribute) GETSTRUCT(attTuple);
if (targetAttr->attstattarget != DEFAULT_STATISTICS_TARGET)
int32 targetAttstattarget = getAttstattarget_compat(attTuple);
if (targetAttstattarget != DEFAULT_STATISTICS_TARGET)
{
char *indexNameWithSchema = generate_qualified_relation_name(indexOid);
char *command =
GenerateAlterIndexColumnSetStatsCommand(indexNameWithSchema,
targetAttr->attnum,
targetAttr->attstattarget);
targetAttstattarget);
alterIndexStatisticsCommandList =
lappend(alterIndexStatisticsCommandList,
@ -773,9 +774,10 @@ CreateAlterCommandIfTargetNotDefault(Oid statsOid)
}
Form_pg_statistic_ext statisticsForm = (Form_pg_statistic_ext) GETSTRUCT(tup);
int16 currentStxstattarget = getStxstattarget_compat(tup);
ReleaseSysCache(tup);
if (statisticsForm->stxstattarget == -1)
if (currentStxstattarget == -1)
{
return NULL;
}
@ -785,7 +787,8 @@ CreateAlterCommandIfTargetNotDefault(Oid statsOid)
char *schemaName = get_namespace_name(statisticsForm->stxnamespace);
char *statName = NameStr(statisticsForm->stxname);
alterStatsStmt->stxstattarget = statisticsForm->stxstattarget;
alterStatsStmt->stxstattarget = getAlterStatsStxstattarget_compat(
currentStxstattarget);
alterStatsStmt->defnames = list_make2(makeString(schemaName), makeString(statName));
return DeparseAlterStatisticsStmt((Node *) alterStatsStmt);

View File

@ -154,7 +154,7 @@ PreprocessDropTableStmt(Node *node, const char *queryString,
Assert(dropTableStatement->removeType == OBJECT_TABLE);
List *tableNameList = NULL;
foreach_ptr(tableNameList, dropTableStatement->objects)
foreach_declared_ptr(tableNameList, dropTableStatement->objects)
{
RangeVar *tableRangeVar = makeRangeVarFromNameList(tableNameList);
bool missingOK = true;
@ -202,7 +202,7 @@ PreprocessDropTableStmt(Node *node, const char *queryString,
SendCommandToWorkersWithMetadata(DISABLE_DDL_PROPAGATION);
Oid partitionRelationId = InvalidOid;
foreach_oid(partitionRelationId, partitionList)
foreach_declared_oid(partitionRelationId, partitionList)
{
char *detachPartitionCommand =
GenerateDetachPartitionCommand(partitionRelationId);
@ -263,7 +263,7 @@ PostprocessCreateTableStmt(CreateStmt *createStatement, const char *queryString)
}
RangeVar *parentRelation = NULL;
foreach_ptr(parentRelation, createStatement->inhRelations)
foreach_declared_ptr(parentRelation, createStatement->inhRelations)
{
Oid parentRelationId = RangeVarGetRelid(parentRelation, NoLock,
missingOk);
@ -480,7 +480,7 @@ PreprocessAlterTableStmtAttachPartition(AlterTableStmt *alterTableStatement,
{
List *commandList = alterTableStatement->cmds;
AlterTableCmd *alterTableCommand = NULL;
foreach_ptr(alterTableCommand, commandList)
foreach_declared_ptr(alterTableCommand, commandList)
{
if (alterTableCommand->subtype == AT_AttachPartition)
{
@ -792,7 +792,7 @@ ChooseForeignKeyConstraintNameAddition(List *columnNames)
String *columnNameString = NULL;
foreach_ptr(columnNameString, columnNames)
foreach_declared_ptr(columnNameString, columnNames)
{
const char *name = strVal(columnNameString);
@ -1153,7 +1153,6 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
{
AlterTableStmt *stmtCopy = copyObject(alterTableStatement);
stmtCopy->objtype = OBJECT_SEQUENCE;
#if (PG_VERSION_NUM >= PG_VERSION_15)
/*
* it must be ALTER TABLE .. OWNER TO ..
@ -1163,16 +1162,6 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
*/
return PreprocessSequenceAlterTableStmt((Node *) stmtCopy, alterTableCommand,
processUtilityContext);
#else
/*
* it must be ALTER TABLE .. OWNER TO .. command
* since this is the only ALTER command of a sequence that
* passes through an AlterTableStmt
*/
return PreprocessAlterSequenceOwnerStmt((Node *) stmtCopy, alterTableCommand,
processUtilityContext);
#endif
}
else if (relKind == RELKIND_VIEW)
{
@ -1314,7 +1303,7 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
AlterTableCmd *newCmd = makeNode(AlterTableCmd);
AlterTableCmd *command = NULL;
foreach_ptr(command, commandList)
foreach_declared_ptr(command, commandList)
{
AlterTableType alterTableType = command->subtype;
@ -1418,7 +1407,7 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
List *columnConstraints = columnDefinition->constraints;
Constraint *constraint = NULL;
foreach_ptr(constraint, columnConstraints)
foreach_declared_ptr(constraint, columnConstraints)
{
if (constraint->contype == CONSTR_FOREIGN)
{
@ -1442,7 +1431,7 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
deparseAT = true;
constraint = NULL;
foreach_ptr(constraint, columnConstraints)
foreach_declared_ptr(constraint, columnConstraints)
{
if (ConstrTypeCitusCanDefaultName(constraint->contype))
{
@ -1467,7 +1456,7 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
*/
constraint = NULL;
int constraintIdx = 0;
foreach_ptr(constraint, columnConstraints)
foreach_declared_ptr(constraint, columnConstraints)
{
if (constraint->contype == CONSTR_DEFAULT)
{
@ -1696,7 +1685,7 @@ DeparserSupportsAlterTableAddColumn(AlterTableStmt *alterTableStatement,
{
ColumnDef *columnDefinition = (ColumnDef *) addColumnSubCommand->def;
Constraint *constraint = NULL;
foreach_ptr(constraint, columnDefinition->constraints)
foreach_declared_ptr(constraint, columnDefinition->constraints)
{
if (constraint->contype == CONSTR_CHECK)
{
@ -1792,7 +1781,7 @@ static bool
RelationIdListContainsCitusTableType(List *relationIdList, CitusTableType citusTableType)
{
Oid relationId = InvalidOid;
foreach_oid(relationId, relationIdList)
foreach_declared_oid(relationId, relationIdList)
{
if (IsCitusTableType(relationId, citusTableType))
{
@ -1812,7 +1801,7 @@ static bool
RelationIdListContainsPostgresTable(List *relationIdList)
{
Oid relationId = InvalidOid;
foreach_oid(relationId, relationIdList)
foreach_declared_oid(relationId, relationIdList)
{
if (OidIsValid(relationId) && !IsCitusTable(relationId))
{
@ -1851,7 +1840,7 @@ ConvertPostgresLocalTablesToCitusLocalTables(AlterTableStmt *alterTableStatement
* change in below loop due to CreateCitusLocalTable.
*/
RangeVar *relationRangeVar;
foreach_ptr(relationRangeVar, relationRangeVarList)
foreach_declared_ptr(relationRangeVar, relationRangeVarList)
{
List *commandList = alterTableStatement->cmds;
LOCKMODE lockMode = AlterTableGetLockLevel(commandList);
@ -1979,7 +1968,7 @@ RangeVarListHasLocalRelationConvertedByUser(List *relationRangeVarList,
AlterTableStmt *alterTableStatement)
{
RangeVar *relationRangeVar;
foreach_ptr(relationRangeVar, relationRangeVarList)
foreach_declared_ptr(relationRangeVar, relationRangeVarList)
{
/*
* Here we iterate the relation list, and if at least one of the relations
@ -2076,7 +2065,7 @@ GetAlterTableAddFKeyConstraintList(AlterTableStmt *alterTableStatement)
List *commandList = alterTableStatement->cmds;
AlterTableCmd *command = NULL;
foreach_ptr(command, commandList)
foreach_declared_ptr(command, commandList)
{
List *commandForeignKeyConstraintList =
GetAlterTableCommandFKeyConstraintList(command);
@ -2116,7 +2105,7 @@ GetAlterTableCommandFKeyConstraintList(AlterTableCmd *command)
List *columnConstraints = columnDefinition->constraints;
Constraint *constraint = NULL;
foreach_ptr(constraint, columnConstraints)
foreach_declared_ptr(constraint, columnConstraints)
{
if (constraint->contype == CONSTR_FOREIGN)
{
@ -2139,7 +2128,7 @@ GetRangeVarListFromFKeyConstraintList(List *fKeyConstraintList)
List *rightRelationRangeVarList = NIL;
Constraint *fKeyConstraint = NULL;
foreach_ptr(fKeyConstraint, fKeyConstraintList)
foreach_declared_ptr(fKeyConstraint, fKeyConstraintList)
{
RangeVar *rightRelationRangeVar = fKeyConstraint->pktable;
rightRelationRangeVarList = lappend(rightRelationRangeVarList,
@ -2160,7 +2149,7 @@ GetRelationIdListFromRangeVarList(List *rangeVarList, LOCKMODE lockMode, bool mi
List *relationIdList = NIL;
RangeVar *rangeVar = NULL;
foreach_ptr(rangeVar, rangeVarList)
foreach_declared_ptr(rangeVar, rangeVarList)
{
Oid rightRelationId = RangeVarGetRelid(rangeVar, lockMode, missingOk);
relationIdList = lappend_oid(relationIdList, rightRelationId);
@ -2234,7 +2223,7 @@ AlterTableDropsForeignKey(AlterTableStmt *alterTableStatement)
Oid relationId = AlterTableLookupRelation(alterTableStatement, lockmode);
AlterTableCmd *command = NULL;
foreach_ptr(command, alterTableStatement->cmds)
foreach_declared_ptr(command, alterTableStatement->cmds)
{
AlterTableType alterTableType = command->subtype;
@ -2296,7 +2285,7 @@ AnyForeignKeyDependsOnIndex(Oid indexId)
GetPgDependTuplesForDependingObjects(dependentObjectClassId, dependentObjectId);
HeapTuple dependencyTuple = NULL;
foreach_ptr(dependencyTuple, dependencyTupleList)
foreach_declared_ptr(dependencyTuple, dependencyTupleList)
{
Form_pg_depend dependencyForm = (Form_pg_depend) GETSTRUCT(dependencyTuple);
Oid dependingClassId = dependencyForm->classid;
@ -2484,7 +2473,7 @@ SkipForeignKeyValidationIfConstraintIsFkey(AlterTableStmt *alterTableStatement,
* shards anyway.
*/
AlterTableCmd *command = NULL;
foreach_ptr(command, alterTableStatement->cmds)
foreach_declared_ptr(command, alterTableStatement->cmds)
{
AlterTableType alterTableType = command->subtype;
@ -2565,7 +2554,7 @@ ErrorIfAlterDropsPartitionColumn(AlterTableStmt *alterTableStatement)
/* then check if any of subcommands drop partition column.*/
List *commandList = alterTableStatement->cmds;
AlterTableCmd *command = NULL;
foreach_ptr(command, commandList)
foreach_declared_ptr(command, commandList)
{
AlterTableType alterTableType = command->subtype;
if (alterTableType == AT_DropColumn)
@ -2634,7 +2623,7 @@ PostprocessAlterTableStmt(AlterTableStmt *alterTableStatement)
List *commandList = alterTableStatement->cmds;
AlterTableCmd *command = NULL;
foreach_ptr(command, commandList)
foreach_declared_ptr(command, commandList)
{
AlterTableType alterTableType = command->subtype;
@ -2670,7 +2659,7 @@ PostprocessAlterTableStmt(AlterTableStmt *alterTableStatement)
}
Constraint *constraint = NULL;
foreach_ptr(constraint, columnConstraints)
foreach_declared_ptr(constraint, columnConstraints)
{
if (constraint->conname == NULL &&
(constraint->contype == CONSTR_PRIMARY ||
@ -2690,7 +2679,7 @@ PostprocessAlterTableStmt(AlterTableStmt *alterTableStatement)
* that sequence is supported
*/
constraint = NULL;
foreach_ptr(constraint, columnConstraints)
foreach_declared_ptr(constraint, columnConstraints)
{
if (constraint->contype == CONSTR_DEFAULT)
{
@ -2802,7 +2791,7 @@ FixAlterTableStmtIndexNames(AlterTableStmt *alterTableStatement)
List *commandList = alterTableStatement->cmds;
AlterTableCmd *command = NULL;
foreach_ptr(command, commandList)
foreach_declared_ptr(command, commandList)
{
AlterTableType alterTableType = command->subtype;
@ -3165,7 +3154,7 @@ ErrorIfUnsupportedConstraint(Relation relation, char distributionMethod,
List *indexOidList = RelationGetIndexList(relation);
Oid indexOid = InvalidOid;
foreach_oid(indexOid, indexOidList)
foreach_declared_oid(indexOid, indexOidList)
{
Relation indexDesc = index_open(indexOid, RowExclusiveLock);
bool hasDistributionColumn = false;
@ -3310,7 +3299,7 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
/* error out if any of the subcommands are unsupported */
AlterTableCmd *command = NULL;
foreach_ptr(command, commandList)
foreach_declared_ptr(command, commandList)
{
AlterTableType alterTableType = command->subtype;
@ -3385,7 +3374,7 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
Constraint *columnConstraint = NULL;
foreach_ptr(columnConstraint, column->constraints)
foreach_declared_ptr(columnConstraint, column->constraints)
{
if (columnConstraint->contype == CONSTR_IDENTITY)
{
@ -3417,7 +3406,7 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
List *columnConstraints = column->constraints;
Constraint *constraint = NULL;
foreach_ptr(constraint, columnConstraints)
foreach_declared_ptr(constraint, columnConstraints)
{
if (constraint->contype == CONSTR_DEFAULT)
{
@ -3664,9 +3653,36 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
break;
}
#if PG_VERSION_NUM >= PG_VERSION_15
case AT_SetAccessMethod:
#if PG_VERSION_NUM >= PG_VERSION_17
case AT_SetExpression:
{
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg(
"ALTER TABLE ... ALTER COLUMN ... SET EXPRESSION commands "
"are currently unsupported.")));
break;
}
#endif
case AT_SetAccessMethod:
{
/*
* If command->name == NULL, that means the user is trying to use
* ALTER TABLE ... SET ACCESS METHOD DEFAULT
* which we don't support currently.
*/
if (command->name == NULL)
{
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg(
"DEFAULT option in ALTER TABLE ... SET ACCESS METHOD "
"is currently unsupported."),
errhint(
"You can rerun the command by explicitly writing the access method name.")));
}
break;
}
case AT_SetNotNull:
case AT_ReplicaIdentity:
case AT_ChangeOwner:
@ -3770,7 +3786,7 @@ SetupExecutionModeForAlterTable(Oid relationId, AlterTableCmd *command)
List *columnConstraints = columnDefinition->constraints;
Constraint *constraint = NULL;
foreach_ptr(constraint, columnConstraints)
foreach_declared_ptr(constraint, columnConstraints)
{
if (constraint->contype == CONSTR_FOREIGN)
{
@ -3970,10 +3986,10 @@ SetInterShardDDLTaskPlacementList(Task *task, ShardInterval *leftShardInterval,
List *intersectedPlacementList = NIL;
ShardPlacement *leftShardPlacement = NULL;
foreach_ptr(leftShardPlacement, leftShardPlacementList)
foreach_declared_ptr(leftShardPlacement, leftShardPlacementList)
{
ShardPlacement *rightShardPlacement = NULL;
foreach_ptr(rightShardPlacement, rightShardPlacementList)
foreach_declared_ptr(rightShardPlacement, rightShardPlacementList)
{
if (leftShardPlacement->nodeId == rightShardPlacement->nodeId)
{

View File

@ -57,9 +57,6 @@ static void ExtractDropStmtTriggerAndRelationName(DropStmt *dropTriggerStmt,
static void ErrorIfDropStmtDropsMultipleTriggers(DropStmt *dropTriggerStmt);
static char * GetTriggerNameById(Oid triggerId);
static int16 GetTriggerTypeById(Oid triggerId);
#if (PG_VERSION_NUM < PG_VERSION_15)
static void ErrorOutIfCloneTrigger(Oid tgrelid, const char *tgname);
#endif
/* GUC that overrides trigger checks for distributed tables and reference tables */
@ -81,7 +78,7 @@ GetExplicitTriggerCommandList(Oid relationId)
List *triggerIdList = GetExplicitTriggerIdList(relationId);
Oid triggerId = InvalidOid;
foreach_oid(triggerId, triggerIdList)
foreach_declared_oid(triggerId, triggerIdList)
{
bool prettyOutput = false;
Datum commandText = DirectFunctionCall2(pg_get_triggerdef_ext,
@ -404,40 +401,6 @@ CreateTriggerEventExtendNames(CreateTrigStmt *createTriggerStmt, char *schemaNam
}
/*
* PreprocessAlterTriggerRenameStmt is called before a ALTER TRIGGER RENAME
* command has been executed by standard process utility. This function errors
* out if we are trying to rename a child trigger on a partition of a distributed
* table. In PG15, this is not allowed anyway.
*/
List *
PreprocessAlterTriggerRenameStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext)
{
#if (PG_VERSION_NUM < PG_VERSION_15)
RenameStmt *renameTriggerStmt = castNode(RenameStmt, node);
Assert(renameTriggerStmt->renameType == OBJECT_TRIGGER);
RangeVar *relation = renameTriggerStmt->relation;
bool missingOk = false;
Oid relationId = RangeVarGetRelid(relation, ALTER_TRIGGER_LOCK_MODE, missingOk);
if (!IsCitusTable(relationId))
{
return NIL;
}
EnsureCoordinator();
ErrorOutForTriggerIfNotSupported(relationId);
ErrorOutIfCloneTrigger(relationId, renameTriggerStmt->subname);
#endif
return NIL;
}
/*
* PostprocessAlterTriggerRenameStmt is called after a ALTER TRIGGER RENAME
* command has been executed by standard process utility. This function errors
@ -742,7 +705,7 @@ ErrorIfRelationHasUnsupportedTrigger(Oid relationId)
List *relationTriggerList = GetExplicitTriggerIdList(relationId);
Oid triggerId = InvalidOid;
foreach_oid(triggerId, relationTriggerList)
foreach_declared_oid(triggerId, relationTriggerList)
{
ObjectAddress triggerObjectAddress = InvalidObjectAddress;
ObjectAddressSet(triggerObjectAddress, TriggerRelationId, triggerId);
@ -759,64 +722,6 @@ ErrorIfRelationHasUnsupportedTrigger(Oid relationId)
}
#if (PG_VERSION_NUM < PG_VERSION_15)
/*
* ErrorOutIfCloneTrigger is a helper function to error
* out if we are trying to rename a child trigger on a
* partition of a distributed table.
* A lot of this code is borrowed from PG15 because
* renaming clone triggers isn't allowed in PG15 anymore.
*/
static void
ErrorOutIfCloneTrigger(Oid tgrelid, const char *tgname)
{
HeapTuple tuple;
ScanKeyData key[2];
Relation tgrel = table_open(TriggerRelationId, RowExclusiveLock);
/*
* Search for the trigger to modify.
*/
ScanKeyInit(&key[0],
Anum_pg_trigger_tgrelid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(tgrelid));
ScanKeyInit(&key[1],
Anum_pg_trigger_tgname,
BTEqualStrategyNumber, F_NAMEEQ,
CStringGetDatum(tgname));
SysScanDesc tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
NULL, 2, key);
if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
{
Form_pg_trigger trigform = (Form_pg_trigger) GETSTRUCT(tuple);
/*
* If the trigger descends from a trigger on a parent partitioned
* table, reject the rename.
* Appended shard ids to find the trigger on the partition's shards
* are not correct. Hence we would fail to find the trigger on the
* partition's shard.
*/
if (OidIsValid(trigform->tgparentid))
{
ereport(ERROR, (
errmsg(
"cannot rename child triggers on distributed partitions")));
}
}
systable_endscan(tgscan);
table_close(tgrel, RowExclusiveLock);
}
#endif
/*
* GetDropTriggerStmtRelation takes a DropStmt for a trigger object and returns
* RangeVar for the relation that owns the trigger.

View File

@ -135,7 +135,7 @@ TruncateTaskList(Oid relationId)
LockShardListMetadata(shardIntervalList, ShareLock);
ShardInterval *shardInterval = NULL;
foreach_ptr(shardInterval, shardIntervalList)
foreach_declared_ptr(shardInterval, shardIntervalList)
{
uint64 shardId = shardInterval->shardId;
char *shardRelationName = pstrdup(relationName);
@ -264,7 +264,7 @@ ErrorIfUnsupportedTruncateStmt(TruncateStmt *truncateStatement)
{
List *relationList = truncateStatement->relations;
RangeVar *rangeVar = NULL;
foreach_ptr(rangeVar, relationList)
foreach_declared_ptr(rangeVar, relationList)
{
Oid relationId = RangeVarGetRelid(rangeVar, NoLock, false);
@ -294,7 +294,7 @@ static void
EnsurePartitionTableNotReplicatedForTruncate(TruncateStmt *truncateStatement)
{
RangeVar *rangeVar = NULL;
foreach_ptr(rangeVar, truncateStatement->relations)
foreach_declared_ptr(rangeVar, truncateStatement->relations)
{
Oid relationId = RangeVarGetRelid(rangeVar, NoLock, false);
@ -322,7 +322,7 @@ ExecuteTruncateStmtSequentialIfNecessary(TruncateStmt *command)
bool failOK = false;
RangeVar *rangeVar = NULL;
foreach_ptr(rangeVar, relationList)
foreach_declared_ptr(rangeVar, relationList)
{
Oid relationId = RangeVarGetRelid(rangeVar, NoLock, failOK);

View File

@ -454,7 +454,7 @@ citus_ProcessUtilityInternal(PlannedStmt *pstmt,
bool analyze = false;
DefElem *option = NULL;
foreach_ptr(option, explainStmt->options)
foreach_declared_ptr(option, explainStmt->options)
{
if (strcmp(option->defname, "analyze") == 0)
{
@ -695,7 +695,7 @@ citus_ProcessUtilityInternal(PlannedStmt *pstmt,
{
AlterTableStmt *alterTableStmt = (AlterTableStmt *) parsetree;
AlterTableCmd *command = NULL;
foreach_ptr(command, alterTableStmt->cmds)
foreach_declared_ptr(command, alterTableStmt->cmds)
{
AlterTableType alterTableType = command->subtype;
@ -879,7 +879,7 @@ citus_ProcessUtilityInternal(PlannedStmt *pstmt,
}
DDLJob *ddlJob = NULL;
foreach_ptr(ddlJob, ddlJobs)
foreach_declared_ptr(ddlJob, ddlJobs)
{
ExecuteDistributedDDLJob(ddlJob);
}
@ -939,7 +939,7 @@ citus_ProcessUtilityInternal(PlannedStmt *pstmt,
{
List *addresses = GetObjectAddressListFromParseTree(parsetree, false, true);
ObjectAddress *address = NULL;
foreach_ptr(address, addresses)
foreach_declared_ptr(address, addresses)
{
MarkObjectDistributed(address);
TrackPropagatedObject(address);
@ -962,7 +962,7 @@ UndistributeDisconnectedCitusLocalTables(void)
citusLocalTableIdList = SortList(citusLocalTableIdList, CompareOids);
Oid citusLocalTableId = InvalidOid;
foreach_oid(citusLocalTableId, citusLocalTableIdList)
foreach_declared_oid(citusLocalTableId, citusLocalTableIdList)
{
/* acquire ShareRowExclusiveLock to prevent concurrent foreign key creation */
LOCKMODE lockMode = ShareRowExclusiveLock;
@ -1349,7 +1349,7 @@ CurrentSearchPath(void)
bool schemaAdded = false;
Oid searchPathOid = InvalidOid;
foreach_oid(searchPathOid, searchPathList)
foreach_declared_oid(searchPathOid, searchPathList)
{
char *schemaName = get_namespace_name(searchPathOid);
@ -1483,7 +1483,7 @@ DDLTaskList(Oid relationId, const char *commandString)
LockShardListMetadata(shardIntervalList, ShareLock);
ShardInterval *shardInterval = NULL;
foreach_ptr(shardInterval, shardIntervalList)
foreach_declared_ptr(shardInterval, shardIntervalList)
{
uint64 shardId = shardInterval->shardId;
StringInfo applyCommand = makeStringInfo();
@ -1525,10 +1525,10 @@ NontransactionalNodeDDLTaskList(TargetWorkerSet targets, List *commands,
{
List *ddlJobs = NodeDDLTaskList(targets, commands);
DDLJob *ddlJob = NULL;
foreach_ptr(ddlJob, ddlJobs)
foreach_declared_ptr(ddlJob, ddlJobs)
{
Task *task = NULL;
foreach_ptr(task, ddlJob->taskList)
foreach_declared_ptr(task, ddlJob->taskList)
{
task->cannotBeExecutedInTransaction = true;
}
@ -1564,7 +1564,7 @@ NodeDDLTaskList(TargetWorkerSet targets, List *commands)
SetTaskQueryStringList(task, commands);
WorkerNode *workerNode = NULL;
foreach_ptr(workerNode, workerNodes)
foreach_declared_ptr(workerNode, workerNodes)
{
ShardPlacement *targetPlacement = CitusMakeNode(ShardPlacement);
targetPlacement->nodeName = workerNode->workerName;

View File

@ -135,7 +135,7 @@ VacuumRelationIdList(VacuumStmt *vacuumStmt, CitusVacuumParams vacuumParams)
List *relationIdList = NIL;
RangeVar *vacuumRelation = NULL;
foreach_ptr(vacuumRelation, vacuumRelationList)
foreach_declared_ptr(vacuumRelation, vacuumRelationList)
{
/*
* If skip_locked option is enabled, we are skipping that relation
@ -164,7 +164,7 @@ static bool
IsDistributedVacuumStmt(List *vacuumRelationIdList)
{
Oid relationId = InvalidOid;
foreach_oid(relationId, vacuumRelationIdList)
foreach_declared_oid(relationId, vacuumRelationIdList)
{
if (OidIsValid(relationId) && IsCitusTable(relationId))
{
@ -187,7 +187,7 @@ ExecuteVacuumOnDistributedTables(VacuumStmt *vacuumStmt, List *relationIdList,
int relationIndex = 0;
Oid relationId = InvalidOid;
foreach_oid(relationId, relationIdList)
foreach_declared_oid(relationId, relationIdList)
{
if (IsCitusTable(relationId))
{
@ -252,7 +252,7 @@ VacuumTaskList(Oid relationId, CitusVacuumParams vacuumParams, List *vacuumColum
LockShardListMetadata(shardIntervalList, ShareLock);
ShardInterval *shardInterval = NULL;
foreach_ptr(shardInterval, shardIntervalList)
foreach_declared_ptr(shardInterval, shardIntervalList)
{
uint64 shardId = shardInterval->shardId;
char *shardRelationName = pstrdup(relationName);
@ -473,7 +473,7 @@ DeparseVacuumColumnNames(List *columnNameList)
appendStringInfoString(columnNames, " (");
String *columnName = NULL;
foreach_ptr(columnName, columnNameList)
foreach_declared_ptr(columnName, columnNameList)
{
appendStringInfo(columnNames, "%s,", strVal(columnName));
}
@ -508,7 +508,7 @@ ExtractVacuumTargetRels(VacuumStmt *vacuumStmt)
List *vacuumList = NIL;
VacuumRelation *vacuumRelation = NULL;
foreach_ptr(vacuumRelation, vacuumStmt->rels)
foreach_declared_ptr(vacuumRelation, vacuumStmt->rels)
{
vacuumList = lappend(vacuumList, vacuumRelation->relation);
}
@ -552,7 +552,7 @@ VacuumStmtParams(VacuumStmt *vacstmt)
/* Parse options list */
DefElem *opt = NULL;
foreach_ptr(opt, vacstmt->options)
foreach_declared_ptr(opt, vacstmt->options)
{
/* Parse common options for VACUUM and ANALYZE */
if (strcmp(opt->defname, "verbose") == 0)
@ -725,7 +725,7 @@ ExecuteUnqualifiedVacuumTasks(VacuumStmt *vacuumStmt, CitusVacuumParams vacuumPa
int32 localNodeGroupId = GetLocalGroupId();
WorkerNode *workerNode = NULL;
foreach_ptr(workerNode, workerNodes)
foreach_declared_ptr(workerNode, workerNodes)
{
if (workerNode->groupId != localNodeGroupId)
{

View File

@ -69,7 +69,7 @@ ViewHasDistributedRelationDependency(ObjectAddress *viewObjectAddress)
List *dependencies = GetAllDependenciesForObject(viewObjectAddress);
ObjectAddress *dependency = NULL;
foreach_ptr(dependency, dependencies)
foreach_declared_ptr(dependency, dependencies)
{
if (dependency->classId == RelationRelationId && IsAnyObjectDistributed(
list_make1(dependency)))
@ -304,7 +304,7 @@ DropViewStmtObjectAddress(Node *stmt, bool missing_ok, bool isPostprocess)
List *objectAddresses = NIL;
List *possiblyQualifiedViewName = NULL;
foreach_ptr(possiblyQualifiedViewName, dropStmt->objects)
foreach_declared_ptr(possiblyQualifiedViewName, dropStmt->objects)
{
RangeVar *viewRangeVar = makeRangeVarFromNameList(possiblyQualifiedViewName);
Oid viewOid = RangeVarGetRelid(viewRangeVar, AccessShareLock,
@ -332,7 +332,7 @@ FilterNameListForDistributedViews(List *viewNamesList, bool missing_ok)
List *distributedViewNames = NIL;
List *possiblyQualifiedViewName = NULL;
foreach_ptr(possiblyQualifiedViewName, viewNamesList)
foreach_declared_ptr(possiblyQualifiedViewName, viewNamesList)
{
char *viewName = NULL;
char *schemaName = NULL;

View File

@ -866,7 +866,8 @@ WaitEventSetFromMultiConnectionStates(List *connections, int *waitCount)
*waitCount = 0;
}
WaitEventSet *waitEventSet = CreateWaitEventSet(CurrentMemoryContext, eventSetSize);
WaitEventSet *waitEventSet = CreateWaitEventSet(WaitEventSetTracker_compat,
eventSetSize);
EnsureReleaseResource((MemoryContextCallbackFunction) (&FreeWaitEventSet),
waitEventSet);
@ -879,7 +880,7 @@ WaitEventSetFromMultiConnectionStates(List *connections, int *waitCount)
numEventsAdded += 2;
MultiConnectionPollState *connectionState = NULL;
foreach_ptr(connectionState, connections)
foreach_declared_ptr(connectionState, connections)
{
if (numEventsAdded >= eventSetSize)
{
@ -961,7 +962,7 @@ FinishConnectionListEstablishment(List *multiConnectionList)
int waitCount = 0;
MultiConnection *connection = NULL;
foreach_ptr(connection, multiConnectionList)
foreach_declared_ptr(connection, multiConnectionList)
{
MultiConnectionPollState *connectionState =
palloc0(sizeof(MultiConnectionPollState));
@ -1160,7 +1161,7 @@ static void
CloseNotReadyMultiConnectionStates(List *connectionStates)
{
MultiConnectionPollState *connectionState = NULL;
foreach_ptr(connectionState, connectionStates)
foreach_declared_ptr(connectionState, connectionStates)
{
MultiConnection *connection = connectionState->connection;

View File

@ -360,7 +360,7 @@ EnsureConnectionPossibilityForNodeList(List *nodeList)
nodeList = SortList(nodeList, CompareWorkerNodes);
WorkerNode *workerNode = NULL;
foreach_ptr(workerNode, nodeList)
foreach_declared_ptr(workerNode, nodeList)
{
bool waitForConnection = true;
EnsureConnectionPossibilityForNode(workerNode, waitForConnection);

View File

@ -370,7 +370,7 @@ AssignPlacementListToConnection(List *placementAccessList, MultiConnection *conn
const char *userName = connection->user;
ShardPlacementAccess *placementAccess = NULL;
foreach_ptr(placementAccess, placementAccessList)
foreach_declared_ptr(placementAccess, placementAccessList)
{
ShardPlacement *placement = placementAccess->placement;
ShardPlacementAccessType accessType = placementAccess->accessType;
@ -533,7 +533,7 @@ FindPlacementListConnection(int flags, List *placementAccessList, const char *us
* suitable connection found for a placement in the placementAccessList.
*/
ShardPlacementAccess *placementAccess = NULL;
foreach_ptr(placementAccess, placementAccessList)
foreach_declared_ptr(placementAccess, placementAccessList)
{
ShardPlacement *placement = placementAccess->placement;
ShardPlacementAccessType accessType = placementAccess->accessType;

View File

@ -392,7 +392,7 @@ void
ExecuteCriticalRemoteCommandList(MultiConnection *connection, List *commandList)
{
const char *command = NULL;
foreach_ptr(command, commandList)
foreach_declared_ptr(command, commandList)
{
ExecuteCriticalRemoteCommand(connection, command);
}
@ -435,7 +435,7 @@ ExecuteRemoteCommandInConnectionList(List *nodeConnectionList, const char *comma
{
MultiConnection *connection = NULL;
foreach_ptr(connection, nodeConnectionList)
foreach_declared_ptr(connection, nodeConnectionList)
{
int querySent = SendRemoteCommand(connection, command);
@ -446,7 +446,7 @@ ExecuteRemoteCommandInConnectionList(List *nodeConnectionList, const char *comma
}
/* Process the result */
foreach_ptr(connection, nodeConnectionList)
foreach_declared_ptr(connection, nodeConnectionList)
{
bool raiseInterrupts = true;
PGresult *result = GetRemoteCommandResult(connection, raiseInterrupts);
@ -887,7 +887,7 @@ WaitForAllConnections(List *connectionList, bool raiseInterrupts)
/* convert connection list to an array such that we can move items around */
MultiConnection *connectionItem = NULL;
foreach_ptr(connectionItem, connectionList)
foreach_declared_ptr(connectionItem, connectionList)
{
allConnections[connectionIndex] = connectionItem;
connectionReady[connectionIndex] = false;
@ -1130,7 +1130,7 @@ BuildWaitEventSet(MultiConnection **allConnections, int totalConnectionCount,
/* allocate pending connections + 2 for the signal latch and postmaster death */
/* (CreateWaitEventSet makes room for pgwin32_signal_event automatically) */
WaitEventSet *waitEventSet = CreateWaitEventSet(CurrentMemoryContext,
WaitEventSet *waitEventSet = CreateWaitEventSet(WaitEventSetTracker_compat,
pendingConnectionCount + 2);
for (int connectionIndex = 0; connectionIndex < pendingConnectionCount;

View File

@ -614,16 +614,6 @@ WaitForSharedConnection(void)
void
InitializeSharedConnectionStats(void)
{
/* on PG 15, we use shmem_request_hook_type */
#if PG_VERSION_NUM < PG_VERSION_15
/* allocate shared memory */
if (!IsUnderPostmaster)
{
RequestAddinShmemSpace(SharedConnectionStatsShmemSize());
}
#endif
prev_shmem_startup_hook = shmem_startup_hook;
shmem_startup_hook = SharedConnectionStatsShmemInit;
}

View File

@ -258,10 +258,8 @@ pg_get_sequencedef_string(Oid sequenceRelationId)
char *typeName = format_type_be(pgSequenceForm->seqtypid);
char *sequenceDef = psprintf(CREATE_SEQUENCE_COMMAND,
#if (PG_VERSION_NUM >= PG_VERSION_15)
get_rel_persistence(sequenceRelationId) ==
RELPERSISTENCE_UNLOGGED ? "UNLOGGED " : "",
#endif
qualifiedSequenceName,
typeName,
pgSequenceForm->seqincrement, pgSequenceForm->seqmin,
@ -315,6 +313,7 @@ pg_get_tableschemadef_string(Oid tableRelationId, IncludeSequenceDefaults
AttrNumber defaultValueIndex = 0;
AttrNumber constraintIndex = 0;
AttrNumber constraintCount = 0;
bool relIsPartition = false;
StringInfoData buffer = { NULL, 0, 0, 0 };
/*
@ -342,6 +341,8 @@ pg_get_tableschemadef_string(Oid tableRelationId, IncludeSequenceDefaults
}
appendStringInfo(&buffer, "TABLE %s (", relationName);
relIsPartition = relation->rd_rel->relispartition;
}
else
{
@ -392,10 +393,18 @@ pg_get_tableschemadef_string(Oid tableRelationId, IncludeSequenceDefaults
GetCompressionMethodName(attributeForm->attcompression));
}
if (attributeForm->attidentity && includeIdentityDefaults)
/*
* If this is an identity column include its identity definition in the
* DDL only if its relation is not a partition. If it is a partition, any
* identity is inherited from the parent table by ATTACH PARTITION. This
* is Postgres 17+ behavior (commit 699586315); prior PG versions did not
* support identity columns in partitioned tables.
*/
if (attributeForm->attidentity && includeIdentityDefaults && !relIsPartition)
{
bool missing_ok = false;
Oid seqOid = getIdentitySequence(RelationGetRelid(relation),
Oid seqOid = getIdentitySequence(identitySequenceRelation_compat(
relation),
attributeForm->attnum, missing_ok);
if (includeIdentityDefaults == INCLUDE_IDENTITY)
@ -738,7 +747,18 @@ pg_get_tablecolumnoptionsdef_string(Oid tableRelationId)
* If the user changed the column's statistics target, create
* alter statement and add statement to a list for later processing.
*/
if (attributeForm->attstattarget >= 0)
HeapTuple atttuple = SearchSysCache2(ATTNUM,
ObjectIdGetDatum(tableRelationId),
Int16GetDatum(attributeForm->attnum));
if (!HeapTupleIsValid(atttuple))
{
elog(ERROR, "cache lookup failed for attribute %d of relation %u",
attributeForm->attnum, tableRelationId);
}
int32 targetAttstattarget = getAttstattarget_compat(atttuple);
ReleaseSysCache(atttuple);
if (targetAttstattarget >= 0)
{
StringInfoData statement = { NULL, 0, 0, 0 };
initStringInfo(&statement);
@ -746,7 +766,7 @@ pg_get_tablecolumnoptionsdef_string(Oid tableRelationId)
appendStringInfo(&statement, "ALTER COLUMN %s ",
quote_identifier(attributeName));
appendStringInfo(&statement, "SET STATISTICS %d",
attributeForm->attstattarget);
targetAttstattarget);
columnOptionList = lappend(columnOptionList, statement.data);
}
@ -835,12 +855,10 @@ deparse_shard_index_statement(IndexStmt *origStmt, Oid distrelid, int64 shardid,
appendStringInfoString(buffer, ") ");
}
#if PG_VERSION_NUM >= PG_VERSION_15
if (indexStmt->nulls_not_distinct)
{
appendStringInfoString(buffer, "NULLS NOT DISTINCT ");
}
#endif /* PG_VERSION_15 */
if (indexStmt->options != NIL)
{
@ -938,7 +956,7 @@ bool
IsReindexWithParam_compat(ReindexStmt *reindexStmt, char *param)
{
DefElem *opt = NULL;
foreach_ptr(opt, reindexStmt->params)
foreach_declared_ptr(opt, reindexStmt->params)
{
if (strcmp(opt->defname, param) == 0)
{
@ -963,7 +981,7 @@ AddVacuumParams(ReindexStmt *reindexStmt, StringInfo buffer)
char *tableSpaceName = NULL;
DefElem *opt = NULL;
foreach_ptr(opt, reindexStmt->params)
foreach_declared_ptr(opt, reindexStmt->params)
{
if (strcmp(opt->defname, "tablespace") == 0)
{
@ -1347,6 +1365,10 @@ convert_aclright_to_string(int aclright)
return "TEMPORARY";
case ACL_CONNECT:
return "CONNECT";
#if PG_VERSION_NUM >= PG_VERSION_17
case ACL_MAINTAIN:
return "MAINTAIN";
#endif
default:
elog(ERROR, "unrecognized aclright: %d", aclright);
return NULL;

View File

@ -47,7 +47,7 @@ DeparseTreeNodes(List *stmts)
{
List *sqls = NIL;
Node *stmt = NULL;
foreach_ptr(stmt, stmts)
foreach_declared_ptr(stmt, stmts)
{
sqls = lappend(sqls, DeparseTreeNode(stmt));
}

View File

@ -174,7 +174,7 @@ static void
AppendBasicAlterDatabaseOptions(StringInfo buf, AlterDatabaseStmt *stmt)
{
DefElem *def = NULL;
foreach_ptr(def, stmt->options)
foreach_declared_ptr(def, stmt->options)
{
DefElemOptionToStatement(buf, def, alterDatabaseOptionFormats, lengthof(
alterDatabaseOptionFormats));
@ -211,7 +211,6 @@ DeparseAlterDatabaseStmt(Node *node)
}
#if PG_VERSION_NUM >= PG_VERSION_15
char *
DeparseAlterDatabaseRefreshCollStmt(Node *node)
{
@ -228,8 +227,6 @@ DeparseAlterDatabaseRefreshCollStmt(Node *node)
}
#endif
static void
AppendAlterDatabaseSetStmt(StringInfo buf, AlterDatabaseSetStmt *stmt)
{
@ -290,7 +287,7 @@ AppendCreateDatabaseStmt(StringInfo buf, CreatedbStmt *stmt)
quote_identifier(stmt->dbname));
DefElem *option = NULL;
foreach_ptr(option, stmt->options)
foreach_declared_ptr(option, stmt->options)
{
DefElemOptionToStatement(buf, option, createDatabaseOptionFormats,
lengthof(createDatabaseOptionFormats));

View File

@ -70,7 +70,7 @@ DeparseCreateDomainStmt(Node *node)
}
Constraint *constraint = NULL;
foreach_ptr(constraint, stmt->constraints)
foreach_declared_ptr(constraint, stmt->constraints)
{
AppendConstraint(&buf, constraint, stmt->domainname, stmt->typeName);
}
@ -117,7 +117,7 @@ DeparseDropDomainStmt(Node *node)
TypeName *domainName = NULL;
bool first = true;
foreach_ptr(domainName, stmt->objects)
foreach_declared_ptr(domainName, stmt->objects)
{
if (!first)
{

View File

@ -40,7 +40,7 @@ DefElem *
GetExtensionOption(List *extensionOptions, const char *defname)
{
DefElem *defElement = NULL;
foreach_ptr(defElement, extensionOptions)
foreach_declared_ptr(defElement, extensionOptions)
{
if (IsA(defElement, DefElem) &&
strncmp(defElement->defname, defname, NAMEDATALEN) == 0)
@ -112,7 +112,7 @@ AppendCreateExtensionStmtOptions(StringInfo buf, List *options)
/* Add the options to the statement */
DefElem *defElem = NULL;
foreach_ptr(defElem, options)
foreach_declared_ptr(defElem, options)
{
if (strcmp(defElem->defname, "schema") == 0)
{
@ -181,7 +181,7 @@ AppendAlterExtensionStmt(StringInfo buf, AlterExtensionStmt *alterExtensionStmt)
* the options.
*/
DefElem *option = NULL;
foreach_ptr(option, optionsList)
foreach_declared_ptr(option, optionsList)
{
if (strcmp(option->defname, "new_version") == 0)
{

View File

@ -176,7 +176,7 @@ AppendAlterForeignServerOptions(StringInfo buf, AlterForeignServerStmt *stmt)
DefElemAction action = DEFELEM_UNSPEC;
DefElem *def = NULL;
foreach_ptr(def, stmt->options)
foreach_declared_ptr(def, stmt->options)
{
if (def->defaction != DEFELEM_UNSPEC)
{
@ -242,7 +242,7 @@ static void
AppendServerNames(StringInfo buf, DropStmt *stmt)
{
String *serverValue = NULL;
foreach_ptr(serverValue, stmt->objects)
foreach_declared_ptr(serverValue, stmt->objects)
{
const char *serverString = quote_identifier(strVal(serverValue));
appendStringInfo(buf, "%s", serverString);

View File

@ -32,7 +32,6 @@
static void AppendCreatePublicationStmt(StringInfo buf, CreatePublicationStmt *stmt,
bool whereClauseNeedsTransform,
bool includeLocalTables);
#if (PG_VERSION_NUM >= PG_VERSION_15)
static bool AppendPublicationObjects(StringInfo buf, List *publicationObjects,
bool whereClauseNeedsTransform,
bool includeLocalTables);
@ -40,10 +39,6 @@ static void AppendWhereClauseExpression(StringInfo buf, RangeVar *tableName,
Node *whereClause,
bool whereClauseNeedsTransform);
static void AppendAlterPublicationAction(StringInfo buf, AlterPublicationAction action);
#else
static bool AppendTables(StringInfo buf, List *tables, bool includeLocalTables);
static void AppendDefElemAction(StringInfo buf, DefElemAction action);
#endif
static bool AppendAlterPublicationStmt(StringInfo buf, AlterPublicationStmt *stmt,
bool whereClauseNeedsTransform,
bool includeLocalTables);
@ -108,7 +103,6 @@ AppendCreatePublicationStmt(StringInfo buf, CreatePublicationStmt *stmt,
{
appendStringInfoString(buf, " FOR ALL TABLES");
}
#if (PG_VERSION_NUM >= PG_VERSION_15)
else if (stmt->pubobjects != NIL)
{
bool hasObjects = false;
@ -118,7 +112,7 @@ AppendCreatePublicationStmt(StringInfo buf, CreatePublicationStmt *stmt,
* Check whether there are objects to propagate, mainly to know whether
* we should include "FOR".
*/
foreach_ptr(publicationObject, stmt->pubobjects)
foreach_declared_ptr(publicationObject, stmt->pubobjects)
{
if (publicationObject->pubobjtype == PUBLICATIONOBJ_TABLE)
{
@ -146,32 +140,6 @@ AppendCreatePublicationStmt(StringInfo buf, CreatePublicationStmt *stmt,
includeLocalTables);
}
}
#else
else if (stmt->tables != NIL)
{
bool hasTables = false;
RangeVar *rangeVar = NULL;
/*
* Check whether there are tables to propagate, mainly to know whether
* we should include "FOR".
*/
foreach_ptr(rangeVar, stmt->tables)
{
if (includeLocalTables || IsCitusTableRangeVar(rangeVar, NoLock, false))
{
hasTables = true;
break;
}
}
if (hasTables)
{
appendStringInfoString(buf, " FOR");
AppendTables(buf, stmt->tables, includeLocalTables);
}
}
#endif
if (stmt->options != NIL)
{
@ -182,8 +150,6 @@ AppendCreatePublicationStmt(StringInfo buf, CreatePublicationStmt *stmt,
}
#if (PG_VERSION_NUM >= PG_VERSION_15)
/*
* AppendPublicationObjects appends a string representing a list of publication
* objects to a buffer.
@ -198,7 +164,7 @@ AppendPublicationObjects(StringInfo buf, List *publicationObjects,
PublicationObjSpec *publicationObject = NULL;
bool appendedObject = false;
foreach_ptr(publicationObject, publicationObjects)
foreach_declared_ptr(publicationObject, publicationObjects)
{
if (publicationObject->pubobjtype == PUBLICATIONOBJ_TABLE)
{
@ -320,57 +286,6 @@ AppendWhereClauseExpression(StringInfo buf, RangeVar *tableName,
}
#else
/*
* AppendPublicationObjects appends a string representing a list of publication
* objects to a buffer.
*
* For instance: TABLE users, departments
*/
static bool
AppendTables(StringInfo buf, List *tables, bool includeLocalTables)
{
RangeVar *rangeVar = NULL;
bool appendedObject = false;
foreach_ptr(rangeVar, tables)
{
if (!includeLocalTables &&
!IsCitusTableRangeVar(rangeVar, NoLock, false))
{
/* do not propagate local tables */
continue;
}
char *schemaName = rangeVar->schemaname;
char *tableName = rangeVar->relname;
if (schemaName != NULL)
{
/* qualified table name */
appendStringInfo(buf, "%s %s",
appendedObject ? "," : " TABLE",
quote_qualified_identifier(schemaName, tableName));
}
else
{
/* unqualified table name */
appendStringInfo(buf, "%s %s",
appendedObject ? "," : " TABLE",
quote_identifier(tableName));
}
appendedObject = true;
}
return appendedObject;
}
#endif
/*
* DeparseAlterPublicationSchemaStmt builds and returns a string representing
* an AlterPublicationStmt.
@ -439,19 +354,12 @@ AppendAlterPublicationStmt(StringInfo buf, AlterPublicationStmt *stmt,
return true;
}
#if (PG_VERSION_NUM >= PG_VERSION_15)
AppendAlterPublicationAction(buf, stmt->action);
return AppendPublicationObjects(buf, stmt->pubobjects, whereClauseNeedsTransform,
includeLocalTables);
#else
AppendDefElemAction(buf, stmt->tableAction);
return AppendTables(buf, stmt->tables, includeLocalTables);
#endif
}
#if (PG_VERSION_NUM >= PG_VERSION_15)
/*
* AppendAlterPublicationAction appends a string representing an AlterPublicationAction
* to a buffer.
@ -487,46 +395,6 @@ AppendAlterPublicationAction(StringInfo buf, AlterPublicationAction action)
}
#else
/*
* AppendDefElemAction appends a string representing a DefElemAction
* to a buffer.
*/
static void
AppendDefElemAction(StringInfo buf, DefElemAction action)
{
switch (action)
{
case DEFELEM_ADD:
{
appendStringInfoString(buf, " ADD");
break;
}
case DEFELEM_DROP:
{
appendStringInfoString(buf, " DROP");
break;
}
case DEFELEM_SET:
{
appendStringInfoString(buf, " SET");
break;
}
default:
{
ereport(ERROR, (errmsg("unrecognized publication action: %d", action)));
}
}
}
#endif
/*
* DeparseDropPublicationStmt builds and returns a string representing the DropStmt
*/
@ -651,11 +519,7 @@ AppendPublicationOptions(StringInfo stringBuffer, List *optionList)
appendStringInfo(stringBuffer, "%s = ",
quote_identifier(optionName));
#if (PG_VERSION_NUM >= PG_VERSION_15)
if (valueType == T_Integer || valueType == T_Float || valueType == T_Boolean)
#else
if (valueType == T_Integer || valueType == T_Float)
#endif
{
/* string escaping is unnecessary for numeric types and can cause issues */
appendStringInfo(stringBuffer, "%s", optionValue);

View File

@ -404,7 +404,7 @@ AppendRevokeAdminOptionFor(StringInfo buf, GrantRoleStmt *stmt)
if (!stmt->is_grant)
{
DefElem *opt = NULL;
foreach_ptr(opt, stmt->opt)
foreach_declared_ptr(opt, stmt->opt)
{
if (strcmp(opt->defname, "admin") == 0)
{
@ -440,7 +440,7 @@ AppendGrantWithAdminOption(StringInfo buf, GrantRoleStmt *stmt)
#if PG_VERSION_NUM >= PG_VERSION_16
int opt_count = 0;
DefElem *opt = NULL;
foreach_ptr(opt, stmt->opt)
foreach_declared_ptr(opt, stmt->opt)
{
char *optval = defGetString(opt);
bool option_value = false;

View File

@ -152,7 +152,7 @@ AppendDropSchemaStmt(StringInfo buf, DropStmt *stmt)
}
String *schemaValue = NULL;
foreach_ptr(schemaValue, stmt->objects)
foreach_declared_ptr(schemaValue, stmt->objects)
{
const char *schemaString = quote_identifier(strVal(schemaValue));
appendStringInfo(buf, "%s", schemaString);

View File

@ -28,9 +28,7 @@ static void AppendSequenceNameList(StringInfo buf, List *objects, ObjectType obj
static void AppendRenameSequenceStmt(StringInfo buf, RenameStmt *stmt);
static void AppendAlterSequenceSchemaStmt(StringInfo buf, AlterObjectSchemaStmt *stmt);
static void AppendAlterSequenceOwnerStmt(StringInfo buf, AlterTableStmt *stmt);
#if (PG_VERSION_NUM >= PG_VERSION_15)
static void AppendAlterSequencePersistenceStmt(StringInfo buf, AlterTableStmt *stmt);
#endif
static void AppendGrantOnSequenceStmt(StringInfo buf, GrantStmt *stmt);
static void AppendGrantOnSequenceSequences(StringInfo buf, GrantStmt *stmt);
@ -262,8 +260,6 @@ AppendAlterSequenceOwnerStmt(StringInfo buf, AlterTableStmt *stmt)
}
#if (PG_VERSION_NUM >= PG_VERSION_15)
/*
* DeparseAlterSequencePersistenceStmt builds and returns a string representing
* the AlterTableStmt consisting of changing the persistence of a sequence
@ -349,9 +345,6 @@ AppendAlterSequencePersistenceStmt(StringInfo buf, AlterTableStmt *stmt)
}
#endif
/*
* DeparseGrantOnSequenceStmt builds and returns a string representing the GrantOnSequenceStmt
*/

View File

@ -177,8 +177,9 @@ AppendAlterStatisticsSchemaStmt(StringInfo buf, AlterObjectSchemaStmt *stmt)
static void
AppendAlterStatisticsStmt(StringInfo buf, AlterStatsStmt *stmt)
{
appendStringInfo(buf, "ALTER STATISTICS %s SET STATISTICS %d", NameListToQuotedString(
stmt->defnames), stmt->stxstattarget);
appendStringInfo(buf, "ALTER STATISTICS %s SET STATISTICS %d",
NameListToQuotedString(stmt->defnames),
getIntStxstattarget_compat(stmt->stxstattarget));
}
@ -216,7 +217,7 @@ AppendStatTypes(StringInfo buf, CreateStatsStmt *stmt)
appendStringInfoString(buf, " (");
String *statType = NULL;
foreach_ptr(statType, stmt->stat_types)
foreach_declared_ptr(statType, stmt->stat_types)
{
appendStringInfoString(buf, strVal(statType));
@ -235,7 +236,7 @@ AppendColumnNames(StringInfo buf, CreateStatsStmt *stmt)
{
StatsElem *column = NULL;
foreach_ptr(column, stmt->exprs)
foreach_declared_ptr(column, stmt->exprs)
{
if (!column->name)
{

View File

@ -193,12 +193,10 @@ AppendAlterTableCmdConstraint(StringInfo buf, Constraint *constraint,
{
appendStringInfoString(buf, " UNIQUE");
#if (PG_VERSION_NUM >= PG_VERSION_15)
if (constraint->nulls_not_distinct == true)
{
appendStringInfoString(buf, " NULLS NOT DISTINCT");
}
#endif
}
if (subtype == AT_AddConstraint)

View File

@ -86,7 +86,7 @@ AppendDefElemList(StringInfo buf, List *defelems, char *objectName)
{
DefElem *defelem = NULL;
bool first = true;
foreach_ptr(defelem, defelems)
foreach_declared_ptr(defelem, defelems)
{
if (!first)
{
@ -133,7 +133,7 @@ DeparseDropTextSearchConfigurationStmt(Node *node)
appendStringInfoString(&buf, "DROP TEXT SEARCH CONFIGURATION ");
List *nameList = NIL;
bool first = true;
foreach_ptr(nameList, stmt->objects)
foreach_declared_ptr(nameList, stmt->objects)
{
if (!first)
{
@ -171,7 +171,7 @@ DeparseDropTextSearchDictionaryStmt(Node *node)
appendStringInfoString(&buf, "DROP TEXT SEARCH DICTIONARY ");
List *nameList = NIL;
bool first = true;
foreach_ptr(nameList, stmt->objects)
foreach_declared_ptr(nameList, stmt->objects)
{
if (!first)
{
@ -404,7 +404,7 @@ AppendStringInfoTokentypeList(StringInfo buf, List *tokentypes)
{
String *tokentype = NULL;
bool first = true;
foreach_ptr(tokentype, tokentypes)
foreach_declared_ptr(tokentype, tokentypes)
{
if (nodeTag(tokentype) != T_String)
{
@ -432,7 +432,7 @@ AppendStringInfoDictnames(StringInfo buf, List *dicts)
{
List *dictNames = NIL;
bool first = true;
foreach_ptr(dictNames, dicts)
foreach_declared_ptr(dictNames, dicts)
{
if (!first)
{

View File

@ -88,7 +88,7 @@ AppendViewNameList(StringInfo buf, List *viewNamesList)
{
bool isFirstView = true;
List *qualifiedViewName = NULL;
foreach_ptr(qualifiedViewName, viewNamesList)
foreach_declared_ptr(qualifiedViewName, viewNamesList)
{
char *quotedQualifiedVieName = NameListToQuotedString(qualifiedViewName);
if (!isFirstView)

View File

@ -83,7 +83,7 @@ QualifyDropCollationStmt(Node *node)
List *names = NIL;
List *name = NIL;
foreach_ptr(name, stmt->objects)
foreach_declared_ptr(name, stmt->objects)
{
names = lappend(names, QualifyCollationName(name));
}

View File

@ -67,7 +67,7 @@ QualifyDropDomainStmt(Node *node)
DropStmt *stmt = castNode(DropStmt, node);
TypeName *domainName = NULL;
foreach_ptr(domainName, stmt->objects)
foreach_declared_ptr(domainName, stmt->objects)
{
QualifyTypeName(domainName, stmt->missing_ok);
}
@ -249,7 +249,7 @@ QualifyCollate(CollateClause *collClause, bool missing_ok)
collClause->collname = NIL;
char *name = NULL;
foreach_ptr(name, objName)
foreach_declared_ptr(name, objName)
{
collClause->collname = lappend(collClause->collname, makeString(name));
}

View File

@ -19,11 +19,7 @@
#include "distributed/deparser.h"
#include "distributed/listutils.h"
#if (PG_VERSION_NUM >= PG_VERSION_15)
static void QualifyPublicationObjects(List *publicationObjects);
#else
static void QualifyTables(List *tables);
#endif
static void QualifyPublicationRangeVar(RangeVar *publication);
@ -36,16 +32,10 @@ QualifyCreatePublicationStmt(Node *node)
{
CreatePublicationStmt *stmt = castNode(CreatePublicationStmt, node);
#if (PG_VERSION_NUM >= PG_VERSION_15)
QualifyPublicationObjects(stmt->pubobjects);
#else
QualifyTables(stmt->tables);
#endif
}
#if (PG_VERSION_NUM >= PG_VERSION_15)
/*
* QualifyPublicationObjects ensures all table names in a list of
* publication objects are fully qualified.
@ -55,7 +45,7 @@ QualifyPublicationObjects(List *publicationObjects)
{
PublicationObjSpec *publicationObject = NULL;
foreach_ptr(publicationObject, publicationObjects)
foreach_declared_ptr(publicationObject, publicationObjects)
{
if (publicationObject->pubobjtype == PUBLICATIONOBJ_TABLE)
{
@ -68,26 +58,6 @@ QualifyPublicationObjects(List *publicationObjects)
}
#else
/*
* QualifyTables ensures all table names in a list are fully qualified.
*/
static void
QualifyTables(List *tables)
{
RangeVar *rangeVar = NULL;
foreach_ptr(rangeVar, tables)
{
QualifyPublicationRangeVar(rangeVar);
}
}
#endif
/*
* QualifyPublicationObjects ensures all table names in a list of
* publication objects are fully qualified.
@ -97,11 +67,7 @@ QualifyAlterPublicationStmt(Node *node)
{
AlterPublicationStmt *stmt = castNode(AlterPublicationStmt, node);
#if (PG_VERSION_NUM >= PG_VERSION_15)
QualifyPublicationObjects(stmt->pubobjects);
#else
QualifyTables(stmt->tables);
#endif
}

View File

@ -52,8 +52,6 @@ QualifyAlterSequenceOwnerStmt(Node *node)
}
#if (PG_VERSION_NUM >= PG_VERSION_15)
/*
* QualifyAlterSequencePersistenceStmt transforms a
* ALTER SEQUENCE .. SET LOGGED/UNLOGGED
@ -80,9 +78,6 @@ QualifyAlterSequencePersistenceStmt(Node *node)
}
#endif
/*
* QualifyAlterSequenceSchemaStmt transforms a
* ALTER SEQUENCE .. SET SCHEMA ..
@ -148,7 +143,7 @@ QualifyDropSequenceStmt(Node *node)
List *objectNameListWithSchema = NIL;
List *objectNameList = NULL;
foreach_ptr(objectNameList, stmt->objects)
foreach_declared_ptr(objectNameList, stmt->objects)
{
RangeVar *seq = makeRangeVarFromNameList(objectNameList);
@ -192,7 +187,7 @@ QualifyGrantOnSequenceStmt(Node *node)
}
List *qualifiedSequenceRangeVars = NIL;
RangeVar *sequenceRangeVar = NULL;
foreach_ptr(sequenceRangeVar, stmt->objects)
foreach_declared_ptr(sequenceRangeVar, stmt->objects)
{
if (sequenceRangeVar->schemaname == NULL)
{

View File

@ -73,7 +73,7 @@ QualifyDropStatisticsStmt(Node *node)
List *objectNameListWithSchema = NIL;
List *objectNameList = NULL;
foreach_ptr(objectNameList, dropStatisticsStmt->objects)
foreach_declared_ptr(objectNameList, dropStatisticsStmt->objects)
{
RangeVar *stat = makeRangeVarFromNameList(objectNameList);

View File

@ -46,7 +46,7 @@ QualifyDropTextSearchConfigurationStmt(Node *node)
List *qualifiedObjects = NIL;
List *objName = NIL;
foreach_ptr(objName, stmt->objects)
foreach_declared_ptr(objName, stmt->objects)
{
char *schemaName = NULL;
char *tsconfigName = NULL;
@ -87,7 +87,7 @@ QualifyDropTextSearchDictionaryStmt(Node *node)
List *qualifiedObjects = NIL;
List *objName = NIL;
foreach_ptr(objName, stmt->objects)
foreach_declared_ptr(objName, stmt->objects)
{
char *schemaName = NULL;
char *tsdictName = NULL;
@ -141,7 +141,7 @@ QualifyAlterTextSearchConfigurationStmt(Node *node)
bool useNewDicts = false;
List *dicts = NULL;
List *dictName = NIL;
foreach_ptr(dictName, stmt->dicts)
foreach_declared_ptr(dictName, stmt->dicts)
{
DeconstructQualifiedName(dictName, &schemaName, &objName);

View File

@ -31,7 +31,7 @@ QualifyDropViewStmt(Node *node)
List *qualifiedViewNames = NIL;
List *possiblyQualifiedViewName = NULL;
foreach_ptr(possiblyQualifiedViewName, stmt->objects)
foreach_declared_ptr(possiblyQualifiedViewName, stmt->objects)
{
char *viewName = NULL;
char *schemaName = NULL;

View File

@ -718,10 +718,8 @@ static void RebuildWaitEventSetForSessions(DistributedExecution *execution);
static void AddLatchWaitEventToExecution(DistributedExecution *execution);
static void ProcessWaitEvents(DistributedExecution *execution, WaitEvent *events, int
eventCount, bool *cancellationReceived);
#if PG_VERSION_NUM >= PG_VERSION_15
static void RemoteSocketClosedForAnySession(DistributedExecution *execution);
static void ProcessWaitEventsForSocketClosed(WaitEvent *events, int eventCount);
#endif
static long MillisecondsBetweenTimestamps(instr_time startTime, instr_time endTime);
static uint64 MicrosecondsBetweenTimestamps(instr_time startTime, instr_time endTime);
static int WorkerPoolCompare(const void *lhsKey, const void *rhsKey);
@ -1430,7 +1428,7 @@ AssignTasksToConnectionsOrWorkerPool(DistributedExecution *execution)
List *taskList = execution->remoteTaskList;
Task *task = NULL;
foreach_ptr(task, taskList)
foreach_declared_ptr(task, taskList)
{
bool placementExecutionReady = true;
int placementExecutionIndex = 0;
@ -1453,7 +1451,7 @@ AssignTasksToConnectionsOrWorkerPool(DistributedExecution *execution)
SetAttributeInputMetadata(execution, shardCommandExecution);
ShardPlacement *taskPlacement = NULL;
foreach_ptr(taskPlacement, task->taskPlacementList)
foreach_declared_ptr(taskPlacement, task->taskPlacementList)
{
int connectionFlags = 0;
char *nodeName = NULL;
@ -1598,7 +1596,7 @@ AssignTasksToConnectionsOrWorkerPool(DistributedExecution *execution)
* connection may be be returned multiple times by GetPlacementListConnectionIfCached.
*/
WorkerSession *session = NULL;
foreach_ptr(session, execution->sessionList)
foreach_declared_ptr(session, execution->sessionList)
{
MultiConnection *connection = session->connection;
@ -1721,7 +1719,7 @@ static WorkerPool *
FindOrCreateWorkerPool(DistributedExecution *execution, char *nodeName, int nodePort)
{
WorkerPool *workerPool = NULL;
foreach_ptr(workerPool, execution->workerList)
foreach_declared_ptr(workerPool, execution->workerList)
{
if (strncmp(nodeName, workerPool->nodeName, WORKER_LENGTH) == 0 &&
nodePort == workerPool->nodePort)
@ -1768,7 +1766,7 @@ FindOrCreateWorkerSession(WorkerPool *workerPool, MultiConnection *connection)
static uint64 sessionId = 1;
WorkerSession *session = NULL;
foreach_ptr(session, workerPool->sessionList)
foreach_declared_ptr(session, workerPool->sessionList)
{
if (session->connection == connection)
{
@ -1784,11 +1782,8 @@ FindOrCreateWorkerSession(WorkerPool *workerPool, MultiConnection *connection)
session->commandsSent = 0;
session->waitEventSetIndex = WAIT_EVENT_SET_INDEX_NOT_INITIALIZED;
#if PG_VERSION_NUM >= PG_VERSION_15
/* always detect closed sockets */
UpdateConnectionWaitFlags(session, WL_SOCKET_CLOSED);
#endif
dlist_init(&session->pendingTaskQueue);
dlist_init(&session->readyTaskQueue);
@ -1832,7 +1827,6 @@ FindOrCreateWorkerSession(WorkerPool *workerPool, MultiConnection *connection)
* the events, even ignores cancellation events. Future callers of this
* function should consider its limitations.
*/
#if PG_VERSION_NUM >= PG_VERSION_15
static void
RemoteSocketClosedForAnySession(DistributedExecution *execution)
{
@ -1850,9 +1844,6 @@ RemoteSocketClosedForAnySession(DistributedExecution *execution)
}
#endif
/*
* SequentialRunDistributedExecution gets a distributed execution and
* executes each individual task in the execution sequentially, one
@ -1871,7 +1862,7 @@ SequentialRunDistributedExecution(DistributedExecution *execution)
*/
MultiShardConnectionType = SEQUENTIAL_CONNECTION;
Task *taskToExecute = NULL;
foreach_ptr(taskToExecute, taskList)
foreach_declared_ptr(taskToExecute, taskList)
{
execution->remoteAndLocalTaskList = list_make1(taskToExecute);
execution->remoteTaskList = list_make1(taskToExecute);
@ -1911,7 +1902,7 @@ RunDistributedExecution(DistributedExecution *execution)
{
/* Preemptively step state machines in case of immediate errors */
WorkerSession *session = NULL;
foreach_ptr(session, execution->sessionList)
foreach_declared_ptr(session, execution->sessionList)
{
ConnectionStateMachine(session);
}
@ -1943,7 +1934,7 @@ RunDistributedExecution(DistributedExecution *execution)
HasIncompleteConnectionEstablishment(execution)))
{
WorkerPool *workerPool = NULL;
foreach_ptr(workerPool, execution->workerList)
foreach_declared_ptr(workerPool, execution->workerList)
{
ManageWorkerPool(workerPool);
}
@ -2028,7 +2019,7 @@ ProcessSessionsWithFailedWaitEventSetOperations(DistributedExecution *execution)
{
bool foundFailedSession = false;
WorkerSession *session = NULL;
foreach_ptr(session, execution->sessionList)
foreach_declared_ptr(session, execution->sessionList)
{
if (session->waitEventSetIndex == WAIT_EVENT_SET_INDEX_FAILED)
{
@ -2072,7 +2063,7 @@ HasIncompleteConnectionEstablishment(DistributedExecution *execution)
}
WorkerSession *session = NULL;
foreach_ptr(session, execution->sessionList)
foreach_declared_ptr(session, execution->sessionList)
{
MultiConnection *connection = session->connection;
if (connection->connectionState == MULTI_CONNECTION_INITIAL ||
@ -2188,8 +2179,6 @@ ProcessWaitEvents(DistributedExecution *execution, WaitEvent *events, int eventC
}
#if PG_VERSION_NUM >= PG_VERSION_15
/*
* ProcessWaitEventsForSocketClosed mainly checks for WL_SOCKET_CLOSED event.
* If WL_SOCKET_CLOSED is found, the function sets the underlying connection's
@ -2222,9 +2211,6 @@ ProcessWaitEventsForSocketClosed(WaitEvent *events, int eventCount)
}
#endif
/*
* ManageWorkerPool ensures the worker pool has the appropriate number of connections
* based on the number of pending tasks.
@ -2550,7 +2536,7 @@ AvgTaskExecutionTimeApproximation(WorkerPool *workerPool)
INSTR_TIME_SET_CURRENT(now);
WorkerSession *session = NULL;
foreach_ptr(session, workerPool->sessionList)
foreach_declared_ptr(session, workerPool->sessionList)
{
/*
* Involve the tasks that are currently running. We do this to
@ -2588,7 +2574,7 @@ AvgConnectionEstablishmentTime(WorkerPool *workerPool)
int sessionCount = 0;
WorkerSession *session = NULL;
foreach_ptr(session, workerPool->sessionList)
foreach_declared_ptr(session, workerPool->sessionList)
{
MultiConnection *connection = session->connection;
@ -2719,7 +2705,6 @@ OpenNewConnections(WorkerPool *workerPool, int newConnectionCount,
* Instead, we prefer this slight difference, which in effect has almost no
* difference, but doing things in different points in time.
*/
#if PG_VERSION_NUM >= PG_VERSION_15
/* we added new connections, rebuild the waitEventSet */
RebuildWaitEventSetForSessions(execution);
@ -2739,12 +2724,9 @@ OpenNewConnections(WorkerPool *workerPool, int newConnectionCount,
* of the execution.
*/
AddLatchWaitEventToExecution(execution);
#else
execution->rebuildWaitEventSet = true;
#endif
WorkerSession *session = NULL;
foreach_ptr(session, newSessionsList)
foreach_declared_ptr(session, newSessionsList)
{
/* immediately run the state machine to handle potential failure */
ConnectionStateMachine(session);
@ -2862,7 +2844,7 @@ static void
MarkEstablishingSessionsTimedOut(WorkerPool *workerPool)
{
WorkerSession *session = NULL;
foreach_ptr(session, workerPool->sessionList)
foreach_declared_ptr(session, workerPool->sessionList)
{
MultiConnection *connection = session->connection;
@ -2914,7 +2896,7 @@ NextEventTimeout(DistributedExecution *execution)
long eventTimeout = 1000; /* milliseconds */
WorkerPool *workerPool = NULL;
foreach_ptr(workerPool, execution->workerList)
foreach_declared_ptr(workerPool, execution->workerList)
{
if (workerPool->failureState == WORKER_POOL_FAILED)
{
@ -3678,13 +3660,8 @@ UpdateConnectionWaitFlags(WorkerSession *session, int waitFlags)
return;
}
#if PG_VERSION_NUM >= PG_VERSION_15
/* always detect closed sockets */
connection->waitFlags = waitFlags | WL_SOCKET_CLOSED;
#else
connection->waitFlags = waitFlags;
#endif
/* without signalling the execution, the flag changes won't be reflected */
execution->waitFlagsChanged = true;
@ -3709,13 +3686,11 @@ CheckConnectionReady(WorkerSession *session)
return false;
}
#if PG_VERSION_NUM >= PG_VERSION_15
if ((session->latestUnconsumedWaitEvents & WL_SOCKET_CLOSED) != 0)
{
connection->connectionState = MULTI_CONNECTION_LOST;
return false;
}
#endif
/* try to send all pending data */
int sendStatus = PQflush(connection->pgConn);
@ -4255,7 +4230,7 @@ WorkerPoolFailed(WorkerPool *workerPool)
}
WorkerSession *session = NULL;
foreach_ptr(session, workerPool->sessionList)
foreach_declared_ptr(session, workerPool->sessionList)
{
WorkerSessionFailed(session);
}
@ -4280,7 +4255,7 @@ WorkerPoolFailed(WorkerPool *workerPool)
List *workerList = workerPool->distributedExecution->workerList;
WorkerPool *pool = NULL;
foreach_ptr(pool, workerList)
foreach_declared_ptr(pool, workerList)
{
/* failed pools or pools without any connection attempts ignored */
if (pool->failureState == WORKER_POOL_FAILED ||
@ -4633,7 +4608,7 @@ PlacementExecutionReady(TaskPlacementExecution *placementExecution)
/* wake up an idle connection by checking whether the connection is writeable */
WorkerSession *session = NULL;
foreach_ptr(session, workerPool->sessionList)
foreach_declared_ptr(session, workerPool->sessionList)
{
MultiConnection *connection = session->connection;
RemoteTransaction *transaction = &(connection->remoteTransaction);
@ -4755,10 +4730,10 @@ BuildWaitEventSet(List *sessionList)
int eventSetSize = GetEventSetSize(sessionList);
WaitEventSet *waitEventSet =
CreateWaitEventSet(CurrentMemoryContext, eventSetSize);
CreateWaitEventSet(WaitEventSetTracker_compat, eventSetSize);
WorkerSession *session = NULL;
foreach_ptr(session, sessionList)
foreach_declared_ptr(session, sessionList)
{
AddSessionToWaitEventSet(session, waitEventSet);
}
@ -4856,7 +4831,7 @@ static void
RebuildWaitEventSetFlags(WaitEventSet *waitEventSet, List *sessionList)
{
WorkerSession *session = NULL;
foreach_ptr(session, sessionList)
foreach_declared_ptr(session, sessionList)
{
MultiConnection *connection = session->connection;
int waitEventSetIndex = session->waitEventSetIndex;
@ -4912,7 +4887,7 @@ CleanUpSessions(DistributedExecution *execution)
/* always trigger wait event set in the first round */
WorkerSession *session = NULL;
foreach_ptr(session, sessionList)
foreach_declared_ptr(session, sessionList)
{
MultiConnection *connection = session->connection;
@ -4993,7 +4968,7 @@ static void
UnclaimAllSessionConnections(List *sessionList)
{
WorkerSession *session = NULL;
foreach_ptr(session, sessionList)
foreach_declared_ptr(session, sessionList)
{
MultiConnection *connection = session->connection;

View File

@ -524,7 +524,7 @@ static bool
AnchorShardsInTaskListExist(List *taskList)
{
Task *task = NULL;
foreach_ptr(task, taskList)
foreach_declared_ptr(task, taskList)
{
if (!ShardExists(task->anchorShardId))
{

View File

@ -94,7 +94,7 @@ FindExecutableTasks(List *allTasks, HTAB *completedTasks)
List *curTasks = NIL;
Task *task = NULL;
foreach_ptr(task, allTasks)
foreach_declared_ptr(task, allTasks)
{
if (IsAllDependencyCompleted(task, completedTasks) &&
!IsTaskAlreadyCompleted(task, completedTasks))
@ -118,7 +118,7 @@ RemoveMergeTasks(List *taskList)
List *prunedTaskList = NIL;
Task *task = NULL;
foreach_ptr(task, taskList)
foreach_declared_ptr(task, taskList)
{
if (task->taskType != MERGE_TASK)
{
@ -139,7 +139,7 @@ AddCompletedTasks(List *curCompletedTasks, HTAB *completedTasks)
bool found;
Task *task = NULL;
foreach_ptr(task, curCompletedTasks)
foreach_declared_ptr(task, curCompletedTasks)
{
TaskHashKey taskKey = { task->jobId, task->taskId };
hash_search(completedTasks, &taskKey, HASH_ENTER, &found);
@ -172,7 +172,7 @@ IsAllDependencyCompleted(Task *targetTask, HTAB *completedTasks)
bool found = false;
Task *task = NULL;
foreach_ptr(task, targetTask->dependentTaskList)
foreach_declared_ptr(task, targetTask->dependentTaskList)
{
TaskHashKey taskKey = { task->jobId, task->taskId };

View File

@ -198,7 +198,7 @@ AcquireExecutorShardLocksForExecution(RowModifyLevel modLevel, List *taskList)
List *requiresConsistentSnapshotRelationShardList = NIL;
Task *task = NULL;
foreach_ptr(task, taskList)
foreach_declared_ptr(task, taskList)
{
ShardInterval *anchorShardInterval = LoadShardInterval(task->anchorShardId);
anchorShardIntervalList = lappend(anchorShardIntervalList, anchorShardInterval);
@ -344,7 +344,7 @@ AcquireMetadataLocks(List *taskList)
*/
Task *task = NULL;
foreach_ptr(task, taskList)
foreach_declared_ptr(task, taskList)
{
LockShardDistributionMetadata(task->anchorShardId, ShareLock);
}
@ -379,7 +379,7 @@ AcquireExecutorShardLocksForRelationRowLockList(List *relationRowLockList)
* them.
*/
RelationRowLock *relationRowLock = NULL;
foreach_ptr(relationRowLock, relationRowLockList)
foreach_declared_ptr(relationRowLock, relationRowLockList)
{
LockClauseStrength rowLockStrength = relationRowLock->rowLockStrength;
Oid relationId = relationRowLock->relationId;
@ -412,7 +412,7 @@ void
LockPartitionsInRelationList(List *relationIdList, LOCKMODE lockmode)
{
Oid relationId = InvalidOid;
foreach_oid(relationId, relationIdList)
foreach_declared_oid(relationId, relationIdList)
{
if (PartitionedTable(relationId))
{
@ -437,7 +437,7 @@ LockPartitionRelations(Oid relationId, LOCKMODE lockMode)
*/
List *partitionList = PartitionList(relationId);
Oid partitionRelationId = InvalidOid;
foreach_oid(partitionRelationId, partitionList)
foreach_declared_oid(partitionRelationId, partitionList)
{
LockRelationOid(partitionRelationId, lockMode);
}

View File

@ -206,7 +206,7 @@ WrapTasksForPartitioning(const char *resultIdPrefix, List *selectTaskList,
intervalTypeMod);
Task *selectTask = NULL;
foreach_ptr(selectTask, selectTaskList)
foreach_declared_ptr(selectTask, selectTaskList)
{
char *taskPrefix = SourceShardPrefix(resultIdPrefix, selectTask->anchorShardId);
char *partitionMethodString = targetRelation->partitionMethod == 'h' ?
@ -490,7 +490,7 @@ ColocateFragmentsWithRelation(List *fragmentList, CitusTableCacheEntry *targetRe
List **shardResultIdList = palloc0(shardCount * sizeof(List *));
DistributedResultFragment *sourceFragment = NULL;
foreach_ptr(sourceFragment, fragmentList)
foreach_declared_ptr(sourceFragment, fragmentList)
{
int shardIndex = sourceFragment->targetShardIndex;
@ -520,11 +520,11 @@ ColocationTransfers(List *fragmentList, CitusTableCacheEntry *targetRelation)
HASH_ELEM | HASH_CONTEXT | HASH_BLOBS);
DistributedResultFragment *fragment = NULL;
foreach_ptr(fragment, fragmentList)
foreach_declared_ptr(fragment, fragmentList)
{
List *placementList = ActiveShardPlacementList(fragment->targetShardId);
ShardPlacement *placement = NULL;
foreach_ptr(placement, placementList)
foreach_declared_ptr(placement, placementList)
{
NodePair transferKey = {
.sourceNodeId = fragment->nodeId,
@ -576,7 +576,7 @@ FragmentTransferTaskList(List *fragmentListTransfers)
List *fetchTaskList = NIL;
NodeToNodeFragmentsTransfer *fragmentsTransfer = NULL;
foreach_ptr(fragmentsTransfer, fragmentListTransfers)
foreach_declared_ptr(fragmentsTransfer, fragmentListTransfers)
{
uint32 targetNodeId = fragmentsTransfer->nodes.targetNodeId;
@ -629,7 +629,7 @@ QueryStringForFragmentsTransfer(NodeToNodeFragmentsTransfer *fragmentsTransfer)
appendStringInfoString(fragmentNamesArrayString, "ARRAY[");
DistributedResultFragment *fragment = NULL;
foreach_ptr(fragment, fragmentsTransfer->fragmentList)
foreach_declared_ptr(fragment, fragmentsTransfer->fragmentList)
{
const char *fragmentName = fragment->resultId;

View File

@ -163,7 +163,7 @@ bool
TaskListCannotBeExecutedInTransaction(List *taskList)
{
Task *task = NULL;
foreach_ptr(task, taskList)
foreach_declared_ptr(task, taskList)
{
if (task->cannotBeExecutedInTransaction)
{
@ -190,7 +190,7 @@ SelectForUpdateOnReferenceTable(List *taskList)
Task *task = (Task *) linitial(taskList);
RelationRowLock *relationRowLock = NULL;
foreach_ptr(relationRowLock, task->relationRowLockList)
foreach_declared_ptr(relationRowLock, task->relationRowLockList)
{
Oid relationId = relationRowLock->relationId;
@ -239,7 +239,7 @@ bool
ModifiedTableReplicated(List *taskList)
{
Task *task = NULL;
foreach_ptr(task, taskList)
foreach_declared_ptr(task, taskList)
{
int64 shardId = task->anchorShardId;

View File

@ -239,7 +239,7 @@ NonPushableInsertSelectExecScan(CustomScanState *node)
* on shards with connections.
*/
Task *task = NULL;
foreach_ptr(task, taskList)
foreach_declared_ptr(task, taskList)
{
uint64 shardId = task->anchorShardId;
bool shardModified = false;
@ -376,7 +376,7 @@ BuildColumnNameListFromTargetList(Oid targetRelationId, List *insertTargetList)
/* build the list of column names for the COPY statement */
TargetEntry *insertTargetEntry = NULL;
foreach_ptr(insertTargetEntry, insertTargetList)
foreach_declared_ptr(insertTargetEntry, insertTargetList)
{
columnNameList = lappend(columnNameList, insertTargetEntry->resname);
}
@ -397,7 +397,7 @@ PartitionColumnIndexFromColumnList(Oid relationId, List *columnNameList)
int partitionColumnIndex = 0;
const char *columnName = NULL;
foreach_ptr(columnName, columnNameList)
foreach_declared_ptr(columnName, columnNameList)
{
AttrNumber attrNumber = get_attnum(relationId, columnName);
@ -423,7 +423,7 @@ DistributionColumnIndex(List *insertTargetList, Var *distributionColumn)
{
TargetEntry *insertTargetEntry = NULL;
int targetEntryIndex = 0;
foreach_ptr(insertTargetEntry, insertTargetList)
foreach_declared_ptr(insertTargetEntry, insertTargetList)
{
if (insertTargetEntry->resno == distributionColumn->varattno)
{
@ -447,7 +447,7 @@ WrapTaskListForProjection(List *taskList, List *projectedTargetEntries)
StringInfo projectedColumnsString = makeStringInfo();
int entryIndex = 0;
TargetEntry *targetEntry = NULL;
foreach_ptr(targetEntry, projectedTargetEntries)
foreach_declared_ptr(targetEntry, projectedTargetEntries)
{
if (entryIndex != 0)
{
@ -462,7 +462,7 @@ WrapTaskListForProjection(List *taskList, List *projectedTargetEntries)
}
Task *task = NULL;
foreach_ptr(task, taskList)
foreach_declared_ptr(task, taskList)
{
StringInfo wrappedQuery = makeStringInfo();
appendStringInfo(wrappedQuery, "SELECT %s FROM (%s) subquery",

View File

@ -306,7 +306,7 @@ PrepareIntermediateResultBroadcast(RemoteFileDestReceiver *resultDest)
}
WorkerNode *workerNode = NULL;
foreach_ptr(workerNode, initialNodeList)
foreach_declared_ptr(workerNode, initialNodeList)
{
int flags = 0;
@ -326,7 +326,7 @@ PrepareIntermediateResultBroadcast(RemoteFileDestReceiver *resultDest)
RemoteTransactionsBeginIfNecessary(connectionList);
MultiConnection *connection = NULL;
foreach_ptr(connection, connectionList)
foreach_declared_ptr(connection, connectionList)
{
StringInfo copyCommand = ConstructCopyResultStatement(resultId);
@ -337,7 +337,7 @@ PrepareIntermediateResultBroadcast(RemoteFileDestReceiver *resultDest)
}
}
foreach_ptr(connection, connectionList)
foreach_declared_ptr(connection, connectionList)
{
bool raiseInterrupts = true;
@ -516,7 +516,7 @@ static void
BroadcastCopyData(StringInfo dataBuffer, List *connectionList)
{
MultiConnection *connection = NULL;
foreach_ptr(connection, connectionList)
foreach_declared_ptr(connection, connectionList)
{
SendCopyDataOverConnection(dataBuffer, connection);
}
@ -712,7 +712,7 @@ void
RemoveIntermediateResultsDirectories(void)
{
char *directoryElement = NULL;
foreach_ptr(directoryElement, CreatedResultsDirectories)
foreach_declared_ptr(directoryElement, CreatedResultsDirectories)
{
/*
* The shared directory is renamed before deleting it. Otherwise it

View File

@ -253,7 +253,7 @@ ExecuteLocalTaskListExtended(List *taskList,
ALLOCSET_DEFAULT_SIZES);
Task *task = NULL;
foreach_ptr(task, taskList)
foreach_declared_ptr(task, taskList)
{
MemoryContext oldContext = MemoryContextSwitchTo(loopContext);
@ -304,7 +304,7 @@ ExecuteLocalTaskListExtended(List *taskList,
LOCKMODE lockMode = GetQueryLockMode(jobQuery);
Oid relationId = InvalidOid;
foreach_oid(relationId, localPlan->relationOids)
foreach_declared_oid(relationId, localPlan->relationOids)
{
LockRelationOid(relationId, lockMode);
}
@ -393,7 +393,7 @@ SetColocationIdAndPartitionKeyValueForTasks(List *taskList, Job *workerJob)
if (workerJob->colocationId != INVALID_COLOCATION_ID)
{
Task *task = NULL;
foreach_ptr(task, taskList)
foreach_declared_ptr(task, taskList)
{
task->colocationId = workerJob->colocationId;
task->partitionKeyValue = workerJob->partitionKeyValue;
@ -412,7 +412,7 @@ LocallyPlanAndExecuteMultipleQueries(List *queryStrings, TupleDestination *tuple
{
char *queryString = NULL;
uint64 totalProcessedRows = 0;
foreach_ptr(queryString, queryStrings)
foreach_declared_ptr(queryString, queryStrings)
{
Query *shardQuery = ParseQueryString(queryString,
NULL,
@ -490,7 +490,7 @@ ExecuteUtilityCommand(const char *taskQueryCommand)
List *parseTreeList = pg_parse_query(taskQueryCommand);
RawStmt *taskRawStmt = NULL;
foreach_ptr(taskRawStmt, parseTreeList)
foreach_declared_ptr(taskRawStmt, parseTreeList)
{
Node *taskRawParseTree = taskRawStmt->stmt;
@ -580,7 +580,7 @@ ExtractLocalAndRemoteTasks(bool readOnly, List *taskList, List **localTaskList,
*localTaskList = NIL;
Task *task = NULL;
foreach_ptr(task, taskList)
foreach_declared_ptr(task, taskList)
{
List *localTaskPlacementList = NULL;
List *remoteTaskPlacementList = NULL;
@ -645,7 +645,7 @@ SplitLocalAndRemotePlacements(List *taskPlacementList, List **localTaskPlacement
*remoteTaskPlacementList = NIL;
ShardPlacement *taskPlacement = NULL;
foreach_ptr(taskPlacement, taskPlacementList)
foreach_declared_ptr(taskPlacement, taskPlacementList)
{
if (taskPlacement->groupId == localGroupId)
{
@ -817,7 +817,7 @@ RecordNonDistTableAccessesForTask(Task *task)
List *placementAccessList = PlacementAccessListForTask(task, taskPlacement);
ShardPlacementAccess *placementAccess = NULL;
foreach_ptr(placementAccess, placementAccessList)
foreach_declared_ptr(placementAccess, placementAccessList)
{
uint64 placementAccessShardId = placementAccess->placement->shardId;
if (placementAccessShardId == INVALID_SHARD_ID)
@ -968,7 +968,7 @@ AnyTaskAccessesLocalNode(List *taskList)
{
Task *task = NULL;
foreach_ptr(task, taskList)
foreach_declared_ptr(task, taskList)
{
if (TaskAccessesLocalNode(task))
{
@ -990,7 +990,7 @@ TaskAccessesLocalNode(Task *task)
int32 localGroupId = GetLocalGroupId();
ShardPlacement *taskPlacement = NULL;
foreach_ptr(taskPlacement, task->taskPlacementList)
foreach_declared_ptr(taskPlacement, task->taskPlacementList)
{
if (taskPlacement->groupId == localGroupId)
{

View File

@ -219,6 +219,7 @@ ExecuteSourceAtCoordAndRedistribution(CitusScanState *scanState)
copyObject(distributedPlan->selectPlanForModifyViaCoordinatorOrRepartition);
char *intermediateResultIdPrefix = distributedPlan->intermediateResultIdPrefix;
bool hasReturning = distributedPlan->expectResults;
bool hasNotMatchedBySource = HasMergeNotMatchedBySource(mergeQuery);
int partitionColumnIndex = distributedPlan->sourceResultRepartitionColumnIndex;
/*
@ -233,7 +234,7 @@ ExecuteSourceAtCoordAndRedistribution(CitusScanState *scanState)
ereport(DEBUG1, (errmsg("Collect source query results on coordinator")));
List *prunedTaskList = NIL;
List *prunedTaskList = NIL, *emptySourceTaskList = NIL;
HTAB *shardStateHash =
ExecuteMergeSourcePlanIntoColocatedIntermediateResults(
targetRelationId,
@ -255,10 +256,11 @@ ExecuteSourceAtCoordAndRedistribution(CitusScanState *scanState)
* We cannot actually execute MERGE INTO ... tasks that read from
* intermediate results that weren't created because no rows were
* written to them. Prune those tasks out by only including tasks
* on shards with connections.
* on shards with connections; however, if the MERGE INTO includes
* a NOT MATCHED BY SOURCE clause we need to include the task.
*/
Task *task = NULL;
foreach_ptr(task, taskList)
foreach_declared_ptr(task, taskList)
{
uint64 shardId = task->anchorShardId;
bool shardModified = false;
@ -268,6 +270,19 @@ ExecuteSourceAtCoordAndRedistribution(CitusScanState *scanState)
{
prunedTaskList = lappend(prunedTaskList, task);
}
else if (hasNotMatchedBySource)
{
emptySourceTaskList = lappend(emptySourceTaskList, task);
}
}
if (emptySourceTaskList != NIL)
{
ereport(DEBUG1, (errmsg("MERGE has NOT MATCHED BY SOURCE clause, "
"execute MERGE on all shards")));
AdjustTaskQueryForEmptySource(targetRelationId, mergeQuery, emptySourceTaskList,
intermediateResultIdPrefix);
prunedTaskList = list_concat(prunedTaskList, emptySourceTaskList);
}
if (prunedTaskList == NIL)

View File

@ -224,7 +224,7 @@ CitusExecutorRun(QueryDesc *queryDesc,
*/
List *citusCustomScanStates = FindCitusCustomScanStates(queryDesc->planstate);
CitusScanState *citusScanState = NULL;
foreach_ptr(citusScanState, citusCustomScanStates)
foreach_declared_ptr(citusScanState, citusCustomScanStates)
{
if (citusScanState->PreExecScan)
{
@ -512,7 +512,7 @@ SortTupleStore(CitusScanState *scanState)
* for sorting the tuples.
*/
TargetEntry *returningEntry = NULL;
foreach_ptr(returningEntry, targetList)
foreach_declared_ptr(returningEntry, targetList)
{
Oid sortop = InvalidOid;

View File

@ -126,7 +126,7 @@ BuildPlacementAccessList(int32 groupId, List *relationShardList,
List *placementAccessList = NIL;
RelationShard *relationShard = NULL;
foreach_ptr(relationShard, relationShardList)
foreach_declared_ptr(relationShard, relationShardList)
{
ShardPlacement *placement = ActiveShardPlacementOnGroup(groupId,
relationShard->shardId);

View File

@ -140,19 +140,6 @@ static void CitusQueryStatsRemoveExpiredEntries(HTAB *existingQueryIdHash);
void
InitializeCitusQueryStats(void)
{
/* on PG 15, we use shmem_request_hook_type */
#if PG_VERSION_NUM < PG_VERSION_15
/* allocate shared memory */
if (!IsUnderPostmaster)
{
RequestAddinShmemSpace(CitusQueryStatsSharedMemSize());
elog(LOG, "requesting named LWLockTranch for %s", STATS_SHARED_MEM_NAME);
RequestNamedLWLockTranche(STATS_SHARED_MEM_NAME, 1);
}
#endif
/* Install hook */
prev_shmem_startup_hook = shmem_startup_hook;
shmem_startup_hook = CitusQueryStatsShmemStartup;
@ -759,9 +746,6 @@ citus_query_stats(PG_FUNCTION_ARGS)
LWLockRelease(queryStats->lock);
/* clean up and return the tuplestore */
tuplestore_donestoring(tupstore);
return (Datum) 0;
}

View File

@ -17,6 +17,7 @@
#include "nodes/parsenodes.h"
#include "distributed/citus_custom_scan.h"
#include "distributed/deparse_shard_query.h"
#include "distributed/intermediate_results.h"
#include "distributed/listutils.h"
#include "distributed/multi_physical_planner.h"
@ -101,6 +102,40 @@ IsRedistributablePlan(Plan *selectPlan)
}
/*
* HasMergeNotMatchedBySource returns true if the MERGE query has a
* WHEN NOT MATCHED BY SOURCE clause. If it does, we need to execute
* the MERGE query on all shards of the target table, regardless of
* whether or not the source shard has any rows.
*/
bool
HasMergeNotMatchedBySource(Query *query)
{
if (!IsMergeQuery(query))
{
return false;
}
bool haveNotMatchedBySource = false;
#if PG_VERSION_NUM >= PG_VERSION_17
ListCell *lc;
foreach(lc, query->mergeActionList)
{
MergeAction *action = lfirst_node(MergeAction, lc);
if (action->matchKind == MERGE_WHEN_NOT_MATCHED_BY_SOURCE)
{
haveNotMatchedBySource = true;
break;
}
}
#endif
return haveNotMatchedBySource;
}
/*
* GenerateTaskListWithColocatedIntermediateResults generates a list of tasks
* for a query that inserts into a target relation and selects from a set of
@ -200,6 +235,61 @@ GenerateTaskListWithColocatedIntermediateResults(Oid targetRelationId,
}
/*
* AdjustTaskQueryForEmptySource adjusts the query for tasks that read from an
* intermediate result to instead read from an empty relation. This ensures that
* the MERGE query is executed on all shards of the target table, because it has
* a NOT MATCHED BY SOURCE clause, which will be true for all target shards where
* the source shard has no rows.
*/
void
AdjustTaskQueryForEmptySource(Oid targetRelationId,
Query *mergeQuery,
List *tasks,
char *resultIdPrefix)
{
Query *mergeQueryCopy = copyObject(mergeQuery);
RangeTblEntry *selectRte = ExtractSourceResultRangeTableEntry(mergeQueryCopy);
RangeTblEntry *mergeRte = ExtractResultRelationRTE(mergeQueryCopy);
List *targetList = selectRte->subquery->targetList;
ListCell *taskCell = NULL;
foreach(taskCell, tasks)
{
Task *task = lfirst(taskCell);
uint64 shardId = task->anchorShardId;
StringInfo queryString = makeStringInfo();
StringInfo resultId = makeStringInfo();
appendStringInfo(resultId, "%s_" UINT64_FORMAT, resultIdPrefix, shardId);
/* Generate a query for an empty relation */
selectRte->subquery = BuildEmptyResultQuery(targetList, resultId->data);
/* setting an alias simplifies deparsing of RETURNING */
if (mergeRte->alias == NULL)
{
Alias *alias = makeAlias(CITUS_TABLE_ALIAS, NIL);
mergeRte->alias = alias;
}
/*
* Generate a query string for the query that merges into a shard and reads
* from an empty relation.
*
* Since CTEs have already been converted to intermediate results, they need
* to removed from the query. Otherwise, worker queries include both
* intermediate results and CTEs in the query.
*/
mergeQueryCopy->cteList = NIL;
deparse_shard_query(mergeQueryCopy, targetRelationId, shardId, queryString);
ereport(DEBUG2, (errmsg("distributed statement: %s", queryString->data)));
SetTaskQueryString(task, queryString->data);
}
}
/*
* GenerateTaskListWithRedistributedResults returns a task list to insert given
* redistributedResults into the given target relation.
@ -223,6 +313,7 @@ GenerateTaskListWithRedistributedResults(Query *modifyQueryViaCoordinatorOrRepar
Query *modifyResultQuery = copyObject(modifyQueryViaCoordinatorOrRepartition);
RangeTblEntry *insertRte = ExtractResultRelationRTE(modifyResultQuery);
Oid targetRelationId = targetRelation->relationId;
bool hasNotMatchedBySource = HasMergeNotMatchedBySource(modifyResultQuery);
int shardCount = targetRelation->shardIntervalArrayLength;
int shardOffset = 0;
@ -242,19 +333,33 @@ GenerateTaskListWithRedistributedResults(Query *modifyQueryViaCoordinatorOrRepar
StringInfo queryString = makeStringInfo();
/* skip empty tasks */
if (resultIdList == NIL)
if (resultIdList == NIL && !hasNotMatchedBySource)
{
continue;
}
/* sort result ids for consistent test output */
List *sortedResultIds = SortList(resultIdList, pg_qsort_strcmp);
Query *fragmentSetQuery = NULL;
/* generate the query on the intermediate result */
Query *fragmentSetQuery = BuildReadIntermediateResultsArrayQuery(selectTargetList,
NIL,
sortedResultIds,
useBinaryFormat);
if (resultIdList != NIL)
{
/* sort result ids for consistent test output */
List *sortedResultIds = SortList(resultIdList, pg_qsort_strcmp);
/* generate the query on the intermediate result */
fragmentSetQuery = BuildReadIntermediateResultsArrayQuery(selectTargetList,
NIL,
sortedResultIds,
useBinaryFormat);
}
else
{
/* No source data, but MERGE query has NOT MATCHED BY SOURCE */
StringInfo emptyFragmentId = makeStringInfo();
appendStringInfo(emptyFragmentId, "%s_" UINT64_FORMAT, "temp_empty_rel_",
shardId);
fragmentSetQuery = BuildEmptyResultQuery(selectTargetList,
emptyFragmentId->data);
}
/* put the intermediate result query in the INSERT..SELECT */
selectRte->subquery = fragmentSetQuery;

View File

@ -93,7 +93,7 @@ TraverseJobTree(Job *curJob, List **jobIds)
*jobIds = lappend(*jobIds, jobIdPointer);
Job *childJob = NULL;
foreach_ptr(childJob, curJob->dependentJobList)
foreach_declared_ptr(childJob, curJob->dependentJobList)
{
TraverseJobTree(childJob, jobIds);
}

View File

@ -59,7 +59,7 @@ ExecuteSubPlans(DistributedPlan *distributedPlan)
UseCoordinatedTransaction();
DistributedSubPlan *subPlan = NULL;
foreach_ptr(subPlan, subPlanList)
foreach_declared_ptr(subPlan, subPlanList)
{
PlannedStmt *plannedStmt = subPlan->plan;
uint32 subPlanId = subPlan->subPlanId;

View File

@ -207,7 +207,7 @@ GetUniqueDependenciesList(List *objectAddressesList)
InitObjectAddressCollector(&objectAddressCollector);
ObjectAddress *objectAddress = NULL;
foreach_ptr(objectAddress, objectAddressesList)
foreach_declared_ptr(objectAddress, objectAddressesList)
{
if (IsObjectAddressCollected(*objectAddress, &objectAddressCollector))
{
@ -334,7 +334,7 @@ OrderObjectAddressListInDependencyOrder(List *objectAddressList)
InitObjectAddressCollector(&collector);
ObjectAddress *objectAddress = NULL;
foreach_ptr(objectAddress, objectAddressList)
foreach_declared_ptr(objectAddress, objectAddressList)
{
if (IsObjectAddressCollected(*objectAddress, &collector))
{
@ -403,7 +403,7 @@ RecurseObjectDependencies(ObjectAddress target, expandFn expand, followFn follow
/* iterate all entries and recurse depth first */
DependencyDefinition *dependencyDefinition = NULL;
foreach_ptr(dependencyDefinition, dependenyDefinitionList)
foreach_declared_ptr(dependencyDefinition, dependenyDefinitionList)
{
if (follow == NULL || !follow(collector, dependencyDefinition))
{
@ -869,7 +869,7 @@ bool
ErrorOrWarnIfAnyObjectHasUnsupportedDependency(List *objectAddresses)
{
ObjectAddress *objectAddress = NULL;
foreach_ptr(objectAddress, objectAddresses)
foreach_declared_ptr(objectAddress, objectAddresses)
{
if (ErrorOrWarnIfObjectHasUnsupportedDependency(objectAddress))
{
@ -962,7 +962,7 @@ DeferErrorIfAnyObjectHasUnsupportedDependency(const List *objectAddresses)
{
DeferredErrorMessage *deferredErrorMessage = NULL;
ObjectAddress *objectAddress = NULL;
foreach_ptr(objectAddress, objectAddresses)
foreach_declared_ptr(objectAddress, objectAddresses)
{
deferredErrorMessage = DeferErrorIfHasUnsupportedDependency(objectAddress);
if (deferredErrorMessage)
@ -995,7 +995,7 @@ GetUndistributableDependency(const ObjectAddress *objectAddress)
return NULL;
}
foreach_ptr(dependency, dependencies)
foreach_declared_ptr(dependency, dependencies)
{
/*
* Objects with the id smaller than FirstNormalObjectId should be created within
@ -1172,7 +1172,7 @@ IsAnyObjectAddressOwnedByExtension(const List *targets,
ObjectAddress *extensionAddress)
{
ObjectAddress *target = NULL;
foreach_ptr(target, targets)
foreach_declared_ptr(target, targets)
{
if (IsObjectAddressOwnedByExtension(target, extensionAddress))
{
@ -1563,7 +1563,7 @@ ExpandCitusSupportedTypes(ObjectAddressCollector *collector, ObjectAddress targe
List *FDWOids = GetDependentFDWsToExtension(extensionId);
Oid FDWOid = InvalidOid;
foreach_oid(FDWOid, FDWOids)
foreach_declared_oid(FDWOid, FDWOids)
{
List *dependentRoleIds = GetDependentRoleIdsFDW(FDWOid);
List *dependencies =
@ -1716,13 +1716,11 @@ ExpandCitusSupportedTypes(ObjectAddressCollector *collector, ObjectAddress targe
/*
* As of PostgreSQL 15, the same applies to schemas.
*/
#if PG_VERSION_NUM >= PG_VERSION_15
List *schemaIdList =
GetPublicationSchemas(publicationId);
List *schemaDependencyList =
CreateObjectAddressDependencyDefList(NamespaceRelationId, schemaIdList);
result = list_concat(result, schemaDependencyList);
#endif
break;
}
@ -1849,7 +1847,7 @@ GetViewRuleReferenceDependencyList(Oid viewId)
List *nonInternalDependenciesOfDependingRules = NIL;
HeapTuple depTup = NULL;
foreach_ptr(depTup, dependencyTupleList)
foreach_declared_ptr(depTup, dependencyTupleList)
{
Form_pg_depend pg_depend = (Form_pg_depend) GETSTRUCT(depTup);
@ -1872,7 +1870,7 @@ GetViewRuleReferenceDependencyList(Oid viewId)
List *ruleDependencies = DependencyDefinitionFromPgDepend(ruleAddress);
DependencyDefinition *dependencyDef = NULL;
foreach_ptr(dependencyDef, ruleDependencies)
foreach_declared_ptr(dependencyDef, ruleDependencies)
{
/*
* Follow all dependencies of the internally dependent rule dependencies
@ -1907,7 +1905,7 @@ GetRelationSequenceDependencyList(Oid relationId)
List *seqIdList = NIL;
SequenceInfo *seqInfo = NULL;
foreach_ptr(seqInfo, seqInfoList)
foreach_declared_ptr(seqInfo, seqInfoList)
{
seqIdList = lappend_oid(seqIdList, seqInfo->sequenceOid);
}
@ -1980,7 +1978,7 @@ GetRelationTriggerFunctionDependencyList(Oid relationId)
List *triggerIdList = GetExplicitTriggerIdList(relationId);
Oid triggerId = InvalidOid;
foreach_oid(triggerId, triggerIdList)
foreach_declared_oid(triggerId, triggerIdList)
{
Oid functionId = GetTriggerFunctionId(triggerId);
DependencyDefinition *dependency =
@ -2005,7 +2003,7 @@ GetPublicationRelationsDependencyList(Oid publicationId)
Oid relationId = InvalidOid;
foreach_oid(relationId, allRelationIds)
foreach_declared_oid(relationId, allRelationIds)
{
if (!IsCitusTable(relationId))
{
@ -2087,7 +2085,7 @@ CreateObjectAddressDependencyDefList(Oid classId, List *objectIdList)
{
List *dependencyList = NIL;
Oid objectId = InvalidOid;
foreach_oid(objectId, objectIdList)
foreach_declared_oid(objectId, objectIdList)
{
DependencyDefinition *dependency =
CreateObjectAddressDependencyDef(classId, objectId);
@ -2161,7 +2159,7 @@ BuildViewDependencyGraph(Oid relationId, HTAB *nodeMap)
targetObjectId);
HeapTuple depTup = NULL;
foreach_ptr(depTup, dependencyTupleList)
foreach_declared_ptr(depTup, dependencyTupleList)
{
Form_pg_depend pg_depend = (Form_pg_depend) GETSTRUCT(depTup);
@ -2240,7 +2238,7 @@ GetDependingViews(Oid relationId)
foreach_ptr_append(node, nodeQueue)
{
ViewDependencyNode *dependingNode = NULL;
foreach_ptr(dependingNode, node->dependingNodes)
foreach_declared_ptr(dependingNode, node->dependingNodes)
{
ObjectAddress relationAddress = { 0 };
ObjectAddressSet(relationAddress, RelationRelationId, dependingNode->id);

View File

@ -554,7 +554,7 @@ bool
IsAnyObjectDistributed(const List *addresses)
{
ObjectAddress *address = NULL;
foreach_ptr(address, addresses)
foreach_declared_ptr(address, addresses)
{
if (IsObjectDistributed(address))
{

View File

@ -661,6 +661,18 @@ GetTableTypeName(Oid tableId)
bool
IsCitusTable(Oid relationId)
{
/*
* PostgreSQL's OID generator assigns user operation OIDs starting
* from FirstNormalObjectId. This means no user object can have
* an OID lower than FirstNormalObjectId. Therefore, if the
* relationId is less than FirstNormalObjectId
* (i.e. in PostgreSQL's reserved range), we can immediately
* return false, since such objects cannot be Citus tables.
*/
if (relationId < FirstNormalObjectId)
{
return false;
}
return LookupCitusTableCacheEntry(relationId) != NULL;
}
@ -920,7 +932,7 @@ CitusTableList(void)
List *citusTableIdList = CitusTableTypeIdList(ANY_CITUS_TABLE_TYPE);
Oid relationId = InvalidOid;
foreach_oid(relationId, citusTableIdList)
foreach_declared_oid(relationId, citusTableIdList)
{
CitusTableCacheEntry *cacheEntry = GetCitusTableCacheEntry(relationId);
@ -1891,7 +1903,7 @@ BuildCachedShardList(CitusTableCacheEntry *cacheEntry)
sizeof(int));
HeapTuple shardTuple = NULL;
foreach_ptr(shardTuple, distShardTupleList)
foreach_declared_ptr(shardTuple, distShardTupleList)
{
ShardInterval *shardInterval = TupleToShardInterval(shardTuple,
distShardTupleDesc,
@ -2029,7 +2041,7 @@ BuildCachedShardList(CitusTableCacheEntry *cacheEntry)
GroupShardPlacement *placementArray = palloc0(numberOfPlacements *
sizeof(GroupShardPlacement));
GroupShardPlacement *srcPlacement = NULL;
foreach_ptr(srcPlacement, placementList)
foreach_declared_ptr(srcPlacement, placementList)
{
placementArray[placementOffset] = *srcPlacement;
placementOffset++;
@ -4335,7 +4347,7 @@ InitializeWorkerNodeCache(void)
/* iterate over the worker node list */
WorkerNode *currentNode = NULL;
foreach_ptr(currentNode, workerNodeList)
foreach_declared_ptr(currentNode, workerNodeList)
{
bool handleFound = false;
@ -4512,7 +4524,7 @@ GetLocalNodeId(void)
List *workerNodeList = ReadDistNode(includeNodesFromOtherClusters);
WorkerNode *workerNode = NULL;
foreach_ptr(workerNode, workerNodeList)
foreach_declared_ptr(workerNode, workerNodeList)
{
if (workerNode->groupId == localGroupId &&
workerNode->isActive)
@ -5100,7 +5112,7 @@ CitusTableCacheFlushInvalidatedEntries()
if (DistTableCacheHash != NULL && DistTableCacheExpired != NIL)
{
CitusTableCacheEntry *cacheEntry = NULL;
foreach_ptr(cacheEntry, DistTableCacheExpired)
foreach_declared_ptr(cacheEntry, DistTableCacheExpired)
{
ResetCitusTableCacheEntry(cacheEntry);
}

View File

@ -307,7 +307,7 @@ CreateDependingViewsOnWorkers(Oid relationId)
SendCommandToWorkersWithMetadata(DISABLE_DDL_PROPAGATION);
Oid viewOid = InvalidOid;
foreach_oid(viewOid, views)
foreach_declared_oid(viewOid, views)
{
if (!ShouldMarkRelationDistributed(viewOid))
{
@ -347,7 +347,7 @@ AddTableToPublications(Oid relationId)
SendCommandToWorkersWithMetadata(DISABLE_DDL_PROPAGATION);
foreach_oid(publicationId, publicationIds)
foreach_declared_oid(publicationId, publicationIds)
{
ObjectAddress *publicationAddress = palloc0(sizeof(ObjectAddress));
ObjectAddressSet(*publicationAddress, PublicationRelationId, publicationId);
@ -818,7 +818,7 @@ NodeListInsertCommand(List *workerNodeList)
/* iterate over the worker nodes, add the values */
WorkerNode *workerNode = NULL;
foreach_ptr(workerNode, workerNodeList)
foreach_declared_ptr(workerNode, workerNodeList)
{
char *hasMetadataString = workerNode->hasMetadata ? "TRUE" : "FALSE";
char *metadataSyncedString = workerNode->metadataSynced ? "TRUE" : "FALSE";
@ -946,7 +946,7 @@ MarkObjectsDistributedCreateCommand(List *addresses,
char *name = NULL;
bool firstInNameLoop = true;
foreach_ptr(name, names)
foreach_declared_ptr(name, names)
{
if (!firstInNameLoop)
{
@ -961,7 +961,7 @@ MarkObjectsDistributedCreateCommand(List *addresses,
char *arg;
bool firstInArgLoop = true;
foreach_ptr(arg, args)
foreach_declared_ptr(arg, args)
{
if (!firstInArgLoop)
{
@ -1217,13 +1217,13 @@ ShardListInsertCommand(List *shardIntervalList)
ShardInterval *shardInterval = NULL;
bool firstPlacementProcessed = false;
foreach_ptr(shardInterval, shardIntervalList)
foreach_declared_ptr(shardInterval, shardIntervalList)
{
uint64 shardId = shardInterval->shardId;
List *shardPlacementList = ActiveShardPlacementList(shardId);
ShardPlacement *placement = NULL;
foreach_ptr(placement, shardPlacementList)
foreach_declared_ptr(placement, shardPlacementList)
{
if (firstPlacementProcessed)
{
@ -1257,7 +1257,7 @@ ShardListInsertCommand(List *shardIntervalList)
"WITH shard_data(relationname, shardid, storagetype, "
"shardminvalue, shardmaxvalue) AS (VALUES ");
foreach_ptr(shardInterval, shardIntervalList)
foreach_declared_ptr(shardInterval, shardIntervalList)
{
uint64 shardId = shardInterval->shardId;
Oid distributedRelationId = shardInterval->relationId;
@ -1694,7 +1694,7 @@ GetDependentRelationsWithSequence(Oid sequenceOid, char depType)
Oid attrDefOid;
List *attrDefOids = GetAttrDefsFromSequence(sequenceOid);
foreach_oid(attrDefOid, attrDefOids)
foreach_declared_oid(attrDefOid, attrDefOids)
{
ObjectAddress columnAddress = GetAttrDefaultColumnAddress(attrDefOid);
relations = lappend_oid(relations, columnAddress.objectId);
@ -1750,48 +1750,6 @@ GetSequencesFromAttrDef(Oid attrdefOid)
}
#if PG_VERSION_NUM < PG_VERSION_15
/*
* Given a pg_attrdef OID, return the relation OID and column number of
* the owning column (represented as an ObjectAddress for convenience).
*
* Returns InvalidObjectAddress if there is no such pg_attrdef entry.
*/
ObjectAddress
GetAttrDefaultColumnAddress(Oid attrdefoid)
{
ObjectAddress result = InvalidObjectAddress;
ScanKeyData skey[1];
HeapTuple tup;
Relation attrdef = table_open(AttrDefaultRelationId, AccessShareLock);
ScanKeyInit(&skey[0],
Anum_pg_attrdef_oid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(attrdefoid));
SysScanDesc scan = systable_beginscan(attrdef, AttrDefaultOidIndexId, true,
NULL, 1, skey);
if (HeapTupleIsValid(tup = systable_getnext(scan)))
{
Form_pg_attrdef atdform = (Form_pg_attrdef) GETSTRUCT(tup);
result.classId = RelationRelationId;
result.objectId = atdform->adrelid;
result.objectSubId = atdform->adnum;
}
systable_endscan(scan);
table_close(attrdef, AccessShareLock);
return result;
}
#endif
/*
* GetAttrDefsFromSequence returns a list of attrdef OIDs that have
* a dependency on the given sequence
@ -1890,7 +1848,7 @@ GetDependentFunctionsWithRelation(Oid relationId)
table_close(depRel, AccessShareLock);
ObjectAddress *referencingObject = NULL;
foreach_ptr(referencingObject, referencingObjects)
foreach_declared_ptr(referencingObject, referencingObjects)
{
functionOids = list_concat(functionOids,
GetFunctionDependenciesForObjects(referencingObject));
@ -2771,7 +2729,7 @@ HasMetadataWorkers(void)
List *workerNodeList = ActiveReadableNonCoordinatorNodeList();
WorkerNode *workerNode = NULL;
foreach_ptr(workerNode, workerNodeList)
foreach_declared_ptr(workerNode, workerNodeList)
{
if (workerNode->hasMetadata)
{
@ -2804,7 +2762,7 @@ CreateInterTableRelationshipOfRelationOnWorkers(Oid relationId)
SendCommandToWorkersWithMetadata(DISABLE_DDL_PROPAGATION);
const char *command = NULL;
foreach_ptr(command, commandList)
foreach_declared_ptr(command, commandList)
{
SendCommandToWorkersWithMetadata(command);
}
@ -2857,14 +2815,14 @@ CreateShellTableOnWorkers(Oid relationId)
creatingShellTableOnRemoteNode);
TableDDLCommand *tableDDLCommand = NULL;
foreach_ptr(tableDDLCommand, tableDDLCommands)
foreach_declared_ptr(tableDDLCommand, tableDDLCommands)
{
Assert(CitusIsA(tableDDLCommand, TableDDLCommand));
commandList = lappend(commandList, GetTableDDLCommand(tableDDLCommand));
}
const char *command = NULL;
foreach_ptr(command, commandList)
foreach_declared_ptr(command, commandList)
{
SendCommandToWorkersWithMetadata(command);
}
@ -2888,7 +2846,7 @@ CreateTableMetadataOnWorkers(Oid relationId)
/* send the commands one by one */
const char *command = NULL;
foreach_ptr(command, commandList)
foreach_declared_ptr(command, commandList)
{
SendCommandToWorkersWithMetadata(command);
}
@ -2912,7 +2870,7 @@ DetachPartitionCommandList(void)
/* we iterate over all distributed partitioned tables and DETACH their partitions */
CitusTableCacheEntry *cacheEntry = NULL;
foreach_ptr(cacheEntry, distributedTableList)
foreach_declared_ptr(cacheEntry, distributedTableList)
{
if (!PartitionedTable(cacheEntry->relationId))
{
@ -2976,7 +2934,7 @@ SyncNodeMetadataToNodesOptional(void)
List *syncedWorkerList = NIL;
List *workerList = ActivePrimaryNonCoordinatorNodeList(NoLock);
WorkerNode *workerNode = NULL;
foreach_ptr(workerNode, workerList)
foreach_declared_ptr(workerNode, workerList)
{
if (workerNode->hasMetadata && !workerNode->metadataSynced)
{
@ -2996,7 +2954,7 @@ SyncNodeMetadataToNodesOptional(void)
}
}
foreach_ptr(workerNode, syncedWorkerList)
foreach_declared_ptr(workerNode, syncedWorkerList)
{
SetWorkerColumnOptional(workerNode, Anum_pg_dist_node_metadatasynced,
BoolGetDatum(true));
@ -3041,7 +2999,7 @@ SyncNodeMetadataToNodes(void)
List *workerList = ActivePrimaryNonCoordinatorNodeList(NoLock);
WorkerNode *workerNode = NULL;
foreach_ptr(workerNode, workerList)
foreach_declared_ptr(workerNode, workerList)
{
if (workerNode->hasMetadata)
{
@ -3113,7 +3071,6 @@ SyncNodeMetadataToNodesMain(Datum main_arg)
PopActiveSnapshot();
CommitTransactionCommand();
ProcessCompletedNotifies();
if (syncedAllNodes)
{
@ -3280,7 +3237,7 @@ ShouldInitiateMetadataSync(bool *lockFailure)
List *workerList = ActivePrimaryNonCoordinatorNodeList(NoLock);
WorkerNode *workerNode = NULL;
foreach_ptr(workerNode, workerList)
foreach_declared_ptr(workerNode, workerList)
{
if (workerNode->hasMetadata && !workerNode->metadataSynced)
{
@ -3638,7 +3595,7 @@ EnsureShardMetadataIsSane(Oid relationId, int64 shardId, char storageType,
GetFunctionInfo(intervalTypeId, BTREE_AM_OID, BTORDER_PROC);
HeapTuple shardTuple = NULL;
foreach_ptr(shardTuple, distShardTupleList)
foreach_declared_ptr(shardTuple, distShardTupleList)
{
ShardInterval *shardInterval =
TupleToShardInterval(shardTuple, distShardTupleDesc,
@ -3934,7 +3891,7 @@ citus_internal_delete_shard_metadata(PG_FUNCTION_ARGS)
List *shardPlacementList = ShardPlacementList(shardId);
ShardPlacement *shardPlacement = NULL;
foreach_ptr(shardPlacement, shardPlacementList)
foreach_declared_ptr(shardPlacement, shardPlacementList)
{
DeleteShardPlacementRow(shardPlacement->placementId);
}
@ -4503,7 +4460,7 @@ SetMetadataSyncNodesFromNodeList(MetadataSyncContext *context, List *nodeList)
List *activatedWorkerNodeList = NIL;
WorkerNode *node = NULL;
foreach_ptr(node, nodeList)
foreach_declared_ptr(node, nodeList)
{
if (NodeIsPrimary(node))
{
@ -4538,7 +4495,7 @@ EstablishAndSetMetadataSyncBareConnections(MetadataSyncContext *context)
/* establish bare connections to activated worker nodes */
List *bareConnectionList = NIL;
WorkerNode *node = NULL;
foreach_ptr(node, context->activatedWorkerNodeList)
foreach_declared_ptr(node, context->activatedWorkerNodeList)
{
MultiConnection *connection = GetNodeUserDatabaseConnection(connectionFlags,
node->workerName,
@ -5147,7 +5104,7 @@ SendDependencyCreationCommands(MetadataSyncContext *context)
ALLOCSET_DEFAULT_SIZES);
MemoryContextSwitchTo(commandsContext);
ObjectAddress *dependency = NULL;
foreach_ptr(dependency, dependencies)
foreach_declared_ptr(dependency, dependencies)
{
if (!MetadataSyncCollectsCommands(context))
{

View File

@ -420,7 +420,7 @@ OpenConnectionToNodes(List *workerNodeList)
{
List *connectionList = NIL;
WorkerNode *workerNode = NULL;
foreach_ptr(workerNode, workerNodeList)
foreach_declared_ptr(workerNode, workerNodeList)
{
const char *nodeName = workerNode->workerName;
int nodePort = workerNode->workerPort;
@ -444,7 +444,7 @@ GenerateShardStatisticsQueryList(List *workerNodeList, List *citusTableIds)
{
List *shardStatisticsQueryList = NIL;
WorkerNode *workerNode = NULL;
foreach_ptr(workerNode, workerNodeList)
foreach_declared_ptr(workerNode, workerNodeList)
{
char *shardStatisticsQuery =
GenerateAllShardStatisticsQueryForNode(workerNode, citusTableIds);
@ -465,7 +465,7 @@ ReceiveShardIdAndSizeResults(List *connectionList, Tuplestorestate *tupleStore,
TupleDesc tupleDescriptor)
{
MultiConnection *connection = NULL;
foreach_ptr(connection, connectionList)
foreach_declared_ptr(connection, connectionList)
{
bool raiseInterrupts = true;
Datum values[SHARD_SIZES_COLUMN_COUNT];
@ -559,7 +559,7 @@ DistributedRelationSize(Oid relationId, SizeQueryType sizeQueryType,
List *workerNodeList = ActiveReadableNodeList();
WorkerNode *workerNode = NULL;
foreach_ptr(workerNode, workerNodeList)
foreach_declared_ptr(workerNode, workerNodeList)
{
uint64 relationSizeOnNode = 0;
@ -780,7 +780,7 @@ GenerateSizeQueryOnMultiplePlacements(List *shardIntervalList,
List *nonPartitionedShardNames = NIL;
ShardInterval *shardInterval = NULL;
foreach_ptr(shardInterval, shardIntervalList)
foreach_declared_ptr(shardInterval, shardIntervalList)
{
if (optimizePartitionCalculations && PartitionTable(shardInterval->relationId))
{
@ -859,7 +859,7 @@ GenerateSizeQueryForRelationNameList(List *quotedShardNames, char *sizeFunction)
bool addComma = false;
char *quotedShardName = NULL;
foreach_ptr(quotedShardName, quotedShardNames)
foreach_declared_ptr(quotedShardName, quotedShardNames)
{
if (addComma)
{
@ -960,7 +960,7 @@ GenerateAllShardStatisticsQueryForNode(WorkerNode *workerNode, List *citusTableI
appendStringInfoString(allShardStatisticsQuery, " FROM (VALUES ");
Oid relationId = InvalidOid;
foreach_oid(relationId, citusTableIds)
foreach_declared_oid(relationId, citusTableIds)
{
/*
* Ensure the table still exists by trying to acquire a lock on it
@ -1007,7 +1007,7 @@ GenerateShardIdNameValuesForShardList(List *shardIntervalList, bool firstValue)
StringInfo selectQuery = makeStringInfo();
ShardInterval *shardInterval = NULL;
foreach_ptr(shardInterval, shardIntervalList)
foreach_declared_ptr(shardInterval, shardIntervalList)
{
if (!firstValue)
{
@ -1147,7 +1147,7 @@ TableShardReplicationFactor(Oid relationId)
List *shardIntervalList = LoadShardIntervalList(relationId);
ShardInterval *shardInterval = NULL;
foreach_ptr(shardInterval, shardIntervalList)
foreach_declared_ptr(shardInterval, shardIntervalList)
{
uint64 shardId = shardInterval->shardId;
@ -1238,7 +1238,7 @@ LoadUnsortedShardIntervalListViaCatalog(Oid relationId)
&intervalTypeMod);
HeapTuple distShardTuple = NULL;
foreach_ptr(distShardTuple, distShardTuples)
foreach_declared_ptr(distShardTuple, distShardTuples)
{
ShardInterval *interval = TupleToShardInterval(distShardTuple,
distShardTupleDesc,
@ -1487,7 +1487,7 @@ FilterShardPlacementList(List *shardPlacementList, bool (*filter)(ShardPlacement
List *filteredShardPlacementList = NIL;
ShardPlacement *shardPlacement = NULL;
foreach_ptr(shardPlacement, shardPlacementList)
foreach_declared_ptr(shardPlacement, shardPlacementList)
{
if (filter(shardPlacement))
{
@ -1511,7 +1511,7 @@ FilterActiveShardPlacementListByNode(List *shardPlacementList, WorkerNode *worke
List *filteredShardPlacementList = NIL;
ShardPlacement *shardPlacement = NULL;
foreach_ptr(shardPlacement, activeShardPlacementList)
foreach_declared_ptr(shardPlacement, activeShardPlacementList)
{
if (IsPlacementOnWorkerNode(shardPlacement, workerNode))
{
@ -1535,7 +1535,7 @@ ActiveShardPlacementListOnGroup(uint64 shardId, int32 groupId)
List *activePlacementList = ActiveShardPlacementList(shardId);
ShardPlacement *shardPlacement = NULL;
foreach_ptr(shardPlacement, activePlacementList)
foreach_declared_ptr(shardPlacement, activePlacementList)
{
if (shardPlacement->groupId == groupId)
{
@ -3331,7 +3331,7 @@ ResetRunningBackgroundTasks(void)
/* there are tasks that need to release their lock before we can continue */
int64 *taskId = NULL;
foreach_ptr(taskId, taskIdsToWait)
foreach_declared_ptr(taskId, taskIdsToWait)
{
LOCKTAG locktag = { 0 };
SET_LOCKTAG_BACKGROUND_TASK(locktag, *taskId);

View File

@ -990,7 +990,7 @@ MarkNodesNotSyncedInLoopBackConnection(MetadataSyncContext *context,
List *commandList = NIL;
WorkerNode *workerNode = NULL;
foreach_ptr(workerNode, context->activatedWorkerNodeList)
foreach_declared_ptr(workerNode, context->activatedWorkerNodeList)
{
/*
* We need to prevent self deadlock when we access pg_dist_node using separate
@ -1023,7 +1023,7 @@ SetNodeMetadata(MetadataSyncContext *context, bool localOnly)
List *updatedActivatedNodeList = NIL;
WorkerNode *node = NULL;
foreach_ptr(node, context->activatedWorkerNodeList)
foreach_declared_ptr(node, context->activatedWorkerNodeList)
{
node = SetWorkerColumnLocalOnly(node, Anum_pg_dist_node_isactive,
BoolGetDatum(true));
@ -1042,7 +1042,7 @@ SetNodeMetadata(MetadataSyncContext *context, bool localOnly)
if (!localOnly && EnableMetadataSync)
{
WorkerNode *node = NULL;
foreach_ptr(node, context->activatedWorkerNodeList)
foreach_declared_ptr(node, context->activatedWorkerNodeList)
{
SetNodeStateViaMetadataContext(context, node, BoolGetDatum(true));
}
@ -1847,7 +1847,7 @@ FindNodeAnyClusterByNodeId(uint32 nodeId)
List *nodeList = ReadDistNode(includeNodesFromOtherClusters);
WorkerNode *node = NULL;
foreach_ptr(node, nodeList)
foreach_declared_ptr(node, nodeList)
{
if (node->nodeId == nodeId)
{
@ -1869,7 +1869,7 @@ FindNodeWithNodeId(int nodeId, bool missingOk)
List *nodeList = ActiveReadableNodeList();
WorkerNode *node = NULL;
foreach_ptr(node, nodeList)
foreach_declared_ptr(node, nodeList)
{
if (node->nodeId == nodeId)
{
@ -1897,7 +1897,7 @@ FindCoordinatorNodeId()
List *nodeList = ReadDistNode(includeNodesFromOtherClusters);
WorkerNode *node = NULL;
foreach_ptr(node, nodeList)
foreach_declared_ptr(node, nodeList)
{
if (NodeIsCoordinator(node))
{
@ -2027,7 +2027,7 @@ ErrorIfNodeContainsNonRemovablePlacements(WorkerNode *workerNode)
shardPlacements = SortList(shardPlacements, CompareGroupShardPlacements);
GroupShardPlacement *placement = NULL;
foreach_ptr(placement, shardPlacements)
foreach_declared_ptr(placement, shardPlacements)
{
if (!PlacementHasActivePlacementOnAnotherGroup(placement))
{
@ -2063,7 +2063,7 @@ PlacementHasActivePlacementOnAnotherGroup(GroupShardPlacement *sourcePlacement)
bool foundActivePlacementOnAnotherGroup = false;
ShardPlacement *activePlacement = NULL;
foreach_ptr(activePlacement, activePlacementList)
foreach_declared_ptr(activePlacement, activePlacementList)
{
if (activePlacement->groupId != sourcePlacement->groupId)
{
@ -2414,7 +2414,7 @@ SetWorkerColumnOptional(WorkerNode *workerNode, int columnIndex, Datum value)
/* open connections in parallel */
WorkerNode *worker = NULL;
foreach_ptr(worker, workerNodeList)
foreach_declared_ptr(worker, workerNodeList)
{
bool success = SendOptionalMetadataCommandListToWorkerInCoordinatedTransaction(
worker->workerName, worker->workerPort,
@ -3147,7 +3147,7 @@ static void
ErrorIfAnyNodeNotExist(List *nodeList)
{
WorkerNode *node = NULL;
foreach_ptr(node, nodeList)
foreach_declared_ptr(node, nodeList)
{
/*
* First, locally mark the node is active, if everything goes well,
@ -3196,7 +3196,7 @@ static void
SendDeletionCommandsForReplicatedTablePlacements(MetadataSyncContext *context)
{
WorkerNode *node = NULL;
foreach_ptr(node, context->activatedWorkerNodeList)
foreach_declared_ptr(node, context->activatedWorkerNodeList)
{
if (!node->isActive)
{

View File

@ -283,9 +283,7 @@ PgGetObjectAddress(char *ttype, ArrayType *namearr, ArrayType *argsarr)
case OBJECT_FDW:
case OBJECT_FOREIGN_SERVER:
case OBJECT_LANGUAGE:
#if PG_VERSION_NUM >= PG_VERSION_15
case OBJECT_PARAMETER_ACL:
#endif
case OBJECT_PUBLICATION:
case OBJECT_ROLE:
case OBJECT_SCHEMA:
@ -323,9 +321,7 @@ PgGetObjectAddress(char *ttype, ArrayType *namearr, ArrayType *argsarr)
break;
}
#if PG_VERSION_NUM >= PG_VERSION_15
case OBJECT_PUBLICATION_NAMESPACE:
#endif
case OBJECT_USER_MAPPING:
{
objnode = (Node *) list_make2(linitial(name), linitial(args));

View File

@ -122,7 +122,7 @@ OpenConnectionsToAllWorkerNodes(LOCKMODE lockMode)
List *workerNodeList = ActivePrimaryNonCoordinatorNodeList(lockMode);
WorkerNode *workerNode = NULL;
foreach_ptr(workerNode, workerNodeList)
foreach_declared_ptr(workerNode, workerNodeList)
{
MultiConnection *connection = StartNodeConnection(connectionFlags,
workerNode->workerName,
@ -164,7 +164,7 @@ CreateRemoteRestorePoints(char *restoreName, List *connectionList)
const char *parameterValues[1] = { restoreName };
MultiConnection *connection = NULL;
foreach_ptr(connection, connectionList)
foreach_declared_ptr(connection, connectionList)
{
int querySent = SendRemoteCommandParams(connection, CREATE_RESTORE_POINT_COMMAND,
parameterCount, parameterTypes,
@ -175,7 +175,7 @@ CreateRemoteRestorePoints(char *restoreName, List *connectionList)
}
}
foreach_ptr(connection, connectionList)
foreach_declared_ptr(connection, connectionList)
{
PGresult *result = GetRemoteCommandResult(connection, true);
if (!IsResponseOK(result))

View File

@ -200,7 +200,7 @@ CreateShardsWithRoundRobinPolicy(Oid distributedTableId, int32 shardCount,
* each placement insertion.
*/
uint64 *shardIdPtr;
foreach_ptr(shardIdPtr, insertedShardIds)
foreach_declared_ptr(shardIdPtr, insertedShardIds)
{
List *placementsForShard = ShardPlacementList(*shardIdPtr);
insertedShardPlacements = list_concat(insertedShardPlacements,
@ -258,7 +258,7 @@ CreateColocatedShards(Oid targetRelationId, Oid sourceRelationId, bool
char targetShardStorageType = ShardStorageType(targetRelationId);
ShardInterval *sourceShardInterval = NULL;
foreach_ptr(sourceShardInterval, sourceShardIntervalList)
foreach_declared_ptr(sourceShardInterval, sourceShardIntervalList)
{
uint64 sourceShardId = sourceShardInterval->shardId;
uint64 *newShardIdPtr = (uint64 *) palloc0(sizeof(uint64));
@ -286,7 +286,7 @@ CreateColocatedShards(Oid targetRelationId, Oid sourceRelationId, bool
shardMinValueText, shardMaxValueText);
ShardPlacement *sourcePlacement = NULL;
foreach_ptr(sourcePlacement, sourceShardPlacementList)
foreach_declared_ptr(sourcePlacement, sourceShardPlacementList)
{
int32 groupId = sourcePlacement->groupId;
const uint64 shardSize = 0;
@ -304,7 +304,7 @@ CreateColocatedShards(Oid targetRelationId, Oid sourceRelationId, bool
* each placement insertion.
*/
uint64 *shardIdPtr;
foreach_ptr(shardIdPtr, insertedShardIds)
foreach_declared_ptr(shardIdPtr, insertedShardIds)
{
List *placementsForShard = ShardPlacementList(*shardIdPtr);
insertedShardPlacements = list_concat(insertedShardPlacements,

Some files were not shown because too many files have changed in this diff Show More