Merge branch 'release-13.0' into fix-issue-7676

pull/7914/head
Colm 2025-03-07 08:47:09 +00:00 committed by GitHub
commit b201eee280
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
252 changed files with 9349 additions and 33423 deletions

View File

@ -68,7 +68,7 @@ USER citus
# build postgres versions separately for effective parrallelism and caching of already built versions when changing only certain versions # build postgres versions separately for effective parrallelism and caching of already built versions when changing only certain versions
FROM base AS pg14 FROM base AS pg14
RUN MAKEFLAGS="-j $(nproc)" pgenv build 14.14 RUN MAKEFLAGS="-j $(nproc)" pgenv build 14.15
RUN rm .pgenv/src/*.tar* RUN rm .pgenv/src/*.tar*
RUN make -C .pgenv/src/postgresql-*/ clean RUN make -C .pgenv/src/postgresql-*/ clean
RUN make -C .pgenv/src/postgresql-*/src/include install RUN make -C .pgenv/src/postgresql-*/src/include install
@ -80,7 +80,7 @@ RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/
RUN rm .pgenv-staging/config/default.conf RUN rm .pgenv-staging/config/default.conf
FROM base AS pg15 FROM base AS pg15
RUN MAKEFLAGS="-j $(nproc)" pgenv build 15.9 RUN MAKEFLAGS="-j $(nproc)" pgenv build 15.10
RUN rm .pgenv/src/*.tar* RUN rm .pgenv/src/*.tar*
RUN make -C .pgenv/src/postgresql-*/ clean RUN make -C .pgenv/src/postgresql-*/ clean
RUN make -C .pgenv/src/postgresql-*/src/include install RUN make -C .pgenv/src/postgresql-*/src/include install
@ -92,7 +92,7 @@ RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/
RUN rm .pgenv-staging/config/default.conf RUN rm .pgenv-staging/config/default.conf
FROM base AS pg16 FROM base AS pg16
RUN MAKEFLAGS="-j $(nproc)" pgenv build 16.5 RUN MAKEFLAGS="-j $(nproc)" pgenv build 16.6
RUN rm .pgenv/src/*.tar* RUN rm .pgenv/src/*.tar*
RUN make -C .pgenv/src/postgresql-*/ clean RUN make -C .pgenv/src/postgresql-*/ clean
RUN make -C .pgenv/src/postgresql-*/src/include install RUN make -C .pgenv/src/postgresql-*/src/include install
@ -104,7 +104,7 @@ RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/
RUN rm .pgenv-staging/config/default.conf RUN rm .pgenv-staging/config/default.conf
FROM base AS pg17 FROM base AS pg17
RUN MAKEFLAGS="-j $(nproc)" pgenv build 17.1 RUN MAKEFLAGS="-j $(nproc)" pgenv build 17.2
RUN rm .pgenv/src/*.tar* RUN rm .pgenv/src/*.tar*
RUN make -C .pgenv/src/postgresql-*/ clean RUN make -C .pgenv/src/postgresql-*/ clean
RUN make -C .pgenv/src/postgresql-*/src/include install RUN make -C .pgenv/src/postgresql-*/src/include install
@ -223,7 +223,7 @@ COPY --chown=citus:citus .psqlrc .
RUN sudo chown --from=root:root citus:citus -R ~ RUN sudo chown --from=root:root citus:citus -R ~
# sets default pg version # sets default pg version
RUN pgenv switch 17.1 RUN pgenv switch 17.2
# make connecting to the coordinator easy # make connecting to the coordinator easy
ENV PGPORT=9700 ENV PGPORT=9700

2
.gitattributes vendored
View File

@ -25,8 +25,6 @@ configure -whitespace
# except these exceptions... # except these exceptions...
src/backend/distributed/utils/citus_outfuncs.c -citus-style src/backend/distributed/utils/citus_outfuncs.c -citus-style
src/backend/distributed/deparser/ruleutils_13.c -citus-style
src/backend/distributed/deparser/ruleutils_14.c -citus-style
src/backend/distributed/deparser/ruleutils_15.c -citus-style src/backend/distributed/deparser/ruleutils_15.c -citus-style
src/backend/distributed/deparser/ruleutils_16.c -citus-style src/backend/distributed/deparser/ruleutils_16.c -citus-style
src/backend/distributed/deparser/ruleutils_17.c -citus-style src/backend/distributed/deparser/ruleutils_17.c -citus-style

View File

@ -6,7 +6,7 @@ inputs:
runs: runs:
using: composite using: composite
steps: steps:
- uses: actions/upload-artifact@v3.1.1 - uses: actions/upload-artifact@v4.6.0
name: Upload logs name: Upload logs
with: with:
name: ${{ inputs.folder }} name: ${{ inputs.folder }}

View File

@ -17,7 +17,7 @@ runs:
echo "PG_MAJOR=${{ inputs.pg_major }}" >> $GITHUB_ENV echo "PG_MAJOR=${{ inputs.pg_major }}" >> $GITHUB_ENV
fi fi
shell: bash shell: bash
- uses: actions/download-artifact@v3.0.1 - uses: actions/download-artifact@v4.1.8
with: with:
name: build-${{ env.PG_MAJOR }} name: build-${{ env.PG_MAJOR }}
- name: Install Extension - name: Install Extension

View File

@ -21,7 +21,7 @@ runs:
mkdir -p /tmp/codeclimate mkdir -p /tmp/codeclimate
cc-test-reporter format-coverage -t lcov -o /tmp/codeclimate/${{ inputs.flags }}.json lcov.info cc-test-reporter format-coverage -t lcov -o /tmp/codeclimate/${{ inputs.flags }}.json lcov.info
shell: bash shell: bash
- uses: actions/upload-artifact@v3.1.1 - uses: actions/upload-artifact@v4.6.0
with: with:
path: "/tmp/codeclimate/*.json" path: "/tmp/codeclimate/*.json"
name: codeclimate name: codeclimate-${{ inputs.flags }}

View File

@ -26,13 +26,13 @@ jobs:
pgupgrade_image_name: "ghcr.io/citusdata/pgupgradetester" pgupgrade_image_name: "ghcr.io/citusdata/pgupgradetester"
style_checker_image_name: "ghcr.io/citusdata/stylechecker" style_checker_image_name: "ghcr.io/citusdata/stylechecker"
style_checker_tools_version: "0.8.18" style_checker_tools_version: "0.8.18"
sql_snapshot_pg_version: "17.1" sql_snapshot_pg_version: "17.2"
image_suffix: "-v84c0cf8" image_suffix: "-v889e4c1"
pg14_version: '{ "major": "14", "full": "14.14" }' image_suffix_citus_upgrade: "-dev-2ad1f90"
pg15_version: '{ "major": "15", "full": "15.9" }' pg15_version: '{ "major": "15", "full": "15.10" }'
pg16_version: '{ "major": "16", "full": "16.5" }' pg16_version: '{ "major": "16", "full": "16.6" }'
pg17_version: '{ "major": "17", "full": "17.1" }' pg17_version: '{ "major": "17", "full": "17.2" }'
upgrade_pg_versions: "14.14-15.9-16.5-17.1" upgrade_pg_versions: "14.15-15.10-16.6-17.2"
steps: steps:
# Since GHA jobs need at least one step we use a noop step here. # Since GHA jobs need at least one step we use a noop step here.
- name: Set up parameters - name: Set up parameters
@ -44,7 +44,7 @@ jobs:
image: ${{ needs.params.outputs.build_image_name }}:${{ needs.params.outputs.sql_snapshot_pg_version }}${{ needs.params.outputs.image_suffix }} image: ${{ needs.params.outputs.build_image_name }}:${{ needs.params.outputs.sql_snapshot_pg_version }}${{ needs.params.outputs.image_suffix }}
options: --user root options: --user root
steps: steps:
- uses: actions/checkout@v3.5.0 - uses: actions/checkout@v4
- name: Check Snapshots - name: Check Snapshots
run: | run: |
git config --global --add safe.directory ${GITHUB_WORKSPACE} git config --global --add safe.directory ${GITHUB_WORKSPACE}
@ -58,7 +58,7 @@ jobs:
- name: Check Snapshots - name: Check Snapshots
run: | run: |
git config --global --add safe.directory ${GITHUB_WORKSPACE} git config --global --add safe.directory ${GITHUB_WORKSPACE}
- uses: actions/checkout@v3.5.0 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Check C Style - name: Check C Style
@ -106,7 +106,6 @@ jobs:
image_suffix: image_suffix:
- ${{ needs.params.outputs.image_suffix}} - ${{ needs.params.outputs.image_suffix}}
pg_version: pg_version:
- ${{ needs.params.outputs.pg14_version }}
- ${{ needs.params.outputs.pg15_version }} - ${{ needs.params.outputs.pg15_version }}
- ${{ needs.params.outputs.pg16_version }} - ${{ needs.params.outputs.pg16_version }}
- ${{ needs.params.outputs.pg17_version }} - ${{ needs.params.outputs.pg17_version }}
@ -115,14 +114,14 @@ jobs:
image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ matrix.image_suffix }}" image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ matrix.image_suffix }}"
options: --user root options: --user root
steps: steps:
- uses: actions/checkout@v3.5.0 - uses: actions/checkout@v4
- name: Expose $PG_MAJOR to Github Env - name: Expose $PG_MAJOR to Github Env
run: echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV run: echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV
shell: bash shell: bash
- name: Build - name: Build
run: "./ci/build-citus.sh" run: "./ci/build-citus.sh"
shell: bash shell: bash
- uses: actions/upload-artifact@v3.1.1 - uses: actions/upload-artifact@v4.6.0
with: with:
name: build-${{ env.PG_MAJOR }} name: build-${{ env.PG_MAJOR }}
path: |- path: |-
@ -138,7 +137,6 @@ jobs:
image_name: image_name:
- ${{ needs.params.outputs.test_image_name }} - ${{ needs.params.outputs.test_image_name }}
pg_version: pg_version:
- ${{ needs.params.outputs.pg14_version }}
- ${{ needs.params.outputs.pg15_version }} - ${{ needs.params.outputs.pg15_version }}
- ${{ needs.params.outputs.pg16_version }} - ${{ needs.params.outputs.pg16_version }}
- ${{ needs.params.outputs.pg17_version }} - ${{ needs.params.outputs.pg17_version }}
@ -159,10 +157,6 @@ jobs:
- check-enterprise-isolation-logicalrep-2 - check-enterprise-isolation-logicalrep-2
- check-enterprise-isolation-logicalrep-3 - check-enterprise-isolation-logicalrep-3
include: include:
- make: check-failure
pg_version: ${{ needs.params.outputs.pg14_version }}
suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-failure - make: check-failure
pg_version: ${{ needs.params.outputs.pg15_version }} pg_version: ${{ needs.params.outputs.pg15_version }}
suite: regress suite: regress
@ -175,10 +169,6 @@ jobs:
pg_version: ${{ needs.params.outputs.pg17_version }} pg_version: ${{ needs.params.outputs.pg17_version }}
suite: regress suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }} image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-enterprise-failure
pg_version: ${{ needs.params.outputs.pg14_version }}
suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-enterprise-failure - make: check-enterprise-failure
pg_version: ${{ needs.params.outputs.pg15_version }} pg_version: ${{ needs.params.outputs.pg15_version }}
suite: regress suite: regress
@ -191,10 +181,6 @@ jobs:
pg_version: ${{ needs.params.outputs.pg17_version }} pg_version: ${{ needs.params.outputs.pg17_version }}
suite: regress suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }} image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-pytest
pg_version: ${{ needs.params.outputs.pg14_version }}
suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-pytest - make: check-pytest
pg_version: ${{ needs.params.outputs.pg15_version }} pg_version: ${{ needs.params.outputs.pg15_version }}
suite: regress suite: regress
@ -219,10 +205,6 @@ jobs:
suite: cdc suite: cdc
image_name: ${{ needs.params.outputs.test_image_name }} image_name: ${{ needs.params.outputs.test_image_name }}
pg_version: ${{ needs.params.outputs.pg17_version }} pg_version: ${{ needs.params.outputs.pg17_version }}
- make: check-query-generator
pg_version: ${{ needs.params.outputs.pg14_version }}
suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-query-generator - make: check-query-generator
pg_version: ${{ needs.params.outputs.pg15_version }} pg_version: ${{ needs.params.outputs.pg15_version }}
suite: regress suite: regress
@ -246,7 +228,7 @@ jobs:
- params - params
- build - build
steps: steps:
- uses: actions/checkout@v3.5.0 - uses: actions/checkout@v4
- uses: "./.github/actions/setup_extension" - uses: "./.github/actions/setup_extension"
- name: Run Test - name: Run Test
run: gosu circleci make -C src/test/${{ matrix.suite }} ${{ matrix.make }} run: gosu circleci make -C src/test/${{ matrix.suite }} ${{ matrix.make }}
@ -275,13 +257,12 @@ jobs:
image_name: image_name:
- ${{ needs.params.outputs.fail_test_image_name }} - ${{ needs.params.outputs.fail_test_image_name }}
pg_version: pg_version:
- ${{ needs.params.outputs.pg14_version }}
- ${{ needs.params.outputs.pg15_version }} - ${{ needs.params.outputs.pg15_version }}
- ${{ needs.params.outputs.pg16_version }} - ${{ needs.params.outputs.pg16_version }}
- ${{ needs.params.outputs.pg17_version }} - ${{ needs.params.outputs.pg17_version }}
parallel: [0,1,2,3,4,5] # workaround for running 6 parallel jobs parallel: [0,1,2,3,4,5] # workaround for running 6 parallel jobs
steps: steps:
- uses: actions/checkout@v3.5.0 - uses: actions/checkout@v4
- uses: "./.github/actions/setup_extension" - uses: "./.github/actions/setup_extension"
- name: Test arbitrary configs - name: Test arbitrary configs
run: |- run: |-
@ -303,10 +284,12 @@ jobs:
check-arbitrary-configs parallel=4 CONFIGS=$TESTS check-arbitrary-configs parallel=4 CONFIGS=$TESTS
- uses: "./.github/actions/save_logs_and_results" - uses: "./.github/actions/save_logs_and_results"
if: always() if: always()
with:
folder: ${{ env.PG_MAJOR }}_arbitrary_configs_${{ matrix.parallel }}
- uses: "./.github/actions/upload_coverage" - uses: "./.github/actions/upload_coverage"
if: always() if: always()
with: with:
flags: ${{ env.pg_major }}_upgrade flags: ${{ env.PG_MAJOR }}_arbitrary_configs_${{ matrix.parallel }}
codecov_token: ${{ secrets.CODECOV_TOKEN }} codecov_token: ${{ secrets.CODECOV_TOKEN }}
test-pg-upgrade: test-pg-upgrade:
name: PG${{ matrix.old_pg_major }}-PG${{ matrix.new_pg_major }} - check-pg-upgrade name: PG${{ matrix.old_pg_major }}-PG${{ matrix.new_pg_major }} - check-pg-upgrade
@ -321,23 +304,17 @@ jobs:
fail-fast: false fail-fast: false
matrix: matrix:
include: include:
- old_pg_major: 14
new_pg_major: 15
- old_pg_major: 15 - old_pg_major: 15
new_pg_major: 16 new_pg_major: 16
- old_pg_major: 14
new_pg_major: 16
- old_pg_major: 16 - old_pg_major: 16
new_pg_major: 17 new_pg_major: 17
- old_pg_major: 15 - old_pg_major: 15
new_pg_major: 17 new_pg_major: 17
- old_pg_major: 14
new_pg_major: 17
env: env:
old_pg_major: ${{ matrix.old_pg_major }} old_pg_major: ${{ matrix.old_pg_major }}
new_pg_major: ${{ matrix.new_pg_major }} new_pg_major: ${{ matrix.new_pg_major }}
steps: steps:
- uses: actions/checkout@v3.5.0 - uses: actions/checkout@v4
- uses: "./.github/actions/setup_extension" - uses: "./.github/actions/setup_extension"
with: with:
pg_major: "${{ env.old_pg_major }}" pg_major: "${{ env.old_pg_major }}"
@ -360,22 +337,24 @@ jobs:
if: failure() if: failure()
- uses: "./.github/actions/save_logs_and_results" - uses: "./.github/actions/save_logs_and_results"
if: always() if: always()
with:
folder: ${{ env.old_pg_major }}_${{ env.new_pg_major }}_upgrade
- uses: "./.github/actions/upload_coverage" - uses: "./.github/actions/upload_coverage"
if: always() if: always()
with: with:
flags: ${{ env.old_pg_major }}_${{ env.new_pg_major }}_upgrade flags: ${{ env.old_pg_major }}_${{ env.new_pg_major }}_upgrade
codecov_token: ${{ secrets.CODECOV_TOKEN }} codecov_token: ${{ secrets.CODECOV_TOKEN }}
test-citus-upgrade: test-citus-upgrade:
name: PG${{ fromJson(needs.params.outputs.pg14_version).major }} - check-citus-upgrade name: PG${{ fromJson(needs.params.outputs.pg15_version).major }} - check-citus-upgrade
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
container: container:
image: "${{ needs.params.outputs.citusupgrade_image_name }}:${{ fromJson(needs.params.outputs.pg14_version).full }}${{ needs.params.outputs.image_suffix }}" image: "${{ needs.params.outputs.citusupgrade_image_name }}:${{ fromJson(needs.params.outputs.pg15_version).full }}${{ needs.params.outputs.image_suffix_citus_upgrade }}"
options: --user root options: --user root
needs: needs:
- params - params
- build - build
steps: steps:
- uses: actions/checkout@v3.5.0 - uses: actions/checkout@v4
- uses: "./.github/actions/setup_extension" - uses: "./.github/actions/setup_extension"
with: with:
skip_installation: true skip_installation: true
@ -405,10 +384,12 @@ jobs:
done; done;
- uses: "./.github/actions/save_logs_and_results" - uses: "./.github/actions/save_logs_and_results"
if: always() if: always()
with:
folder: ${{ env.PG_MAJOR }}_citus_upgrade
- uses: "./.github/actions/upload_coverage" - uses: "./.github/actions/upload_coverage"
if: always() if: always()
with: with:
flags: ${{ env.pg_major }}_upgrade flags: ${{ env.PG_MAJOR }}_citus_upgrade
codecov_token: ${{ secrets.CODECOV_TOKEN }} codecov_token: ${{ secrets.CODECOV_TOKEN }}
upload-coverage: upload-coverage:
if: always() if: always()
@ -424,10 +405,11 @@ jobs:
- test-citus-upgrade - test-citus-upgrade
- test-pg-upgrade - test-pg-upgrade
steps: steps:
- uses: actions/download-artifact@v3.0.1 - uses: actions/download-artifact@v4.1.8
with: with:
name: "codeclimate" pattern: codeclimate*
path: "codeclimate" path: codeclimate
merge-multiple: true
- name: Upload coverage results to Code Climate - name: Upload coverage results to Code Climate
run: |- run: |-
cc-test-reporter sum-coverage codeclimate/*.json -o total.json cc-test-reporter sum-coverage codeclimate/*.json -o total.json
@ -439,7 +421,7 @@ jobs:
needs: needs:
- build - build
steps: steps:
- uses: actions/checkout@v3.5.0 - uses: actions/checkout@v4
- uses: azure/login@v1 - uses: azure/login@v1
with: with:
creds: ${{ secrets.AZURE_CREDENTIALS }} creds: ${{ secrets.AZURE_CREDENTIALS }}
@ -457,7 +439,7 @@ jobs:
needs: needs:
- build - build
steps: steps:
- uses: actions/checkout@v3.5.0 - uses: actions/checkout@v4
- uses: azure/login@v1 - uses: azure/login@v1
with: with:
creds: ${{ secrets.AZURE_CREDENTIALS }} creds: ${{ secrets.AZURE_CREDENTIALS }}
@ -476,7 +458,7 @@ jobs:
outputs: outputs:
json: ${{ steps.parallelization.outputs.json }} json: ${{ steps.parallelization.outputs.json }}
steps: steps:
- uses: actions/checkout@v3.5.0 - uses: actions/checkout@v4
- uses: "./.github/actions/parallelization" - uses: "./.github/actions/parallelization"
id: parallelization id: parallelization
with: with:
@ -489,7 +471,7 @@ jobs:
outputs: outputs:
tests: ${{ steps.detect-regression-tests.outputs.tests }} tests: ${{ steps.detect-regression-tests.outputs.tests }}
steps: steps:
- uses: actions/checkout@v3.5.0 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Detect regression tests need to be ran - name: Detect regression tests need to be ran
@ -524,8 +506,8 @@ jobs:
fail-fast: false fail-fast: false
matrix: ${{ fromJson(needs.prepare_parallelization_matrix_32.outputs.json) }} matrix: ${{ fromJson(needs.prepare_parallelization_matrix_32.outputs.json) }}
steps: steps:
- uses: actions/checkout@v3.5.0 - uses: actions/checkout@v4
- uses: actions/download-artifact@v3.0.1 - uses: actions/download-artifact@v4.1.8
- uses: "./.github/actions/setup_extension" - uses: "./.github/actions/setup_extension"
- name: Run minimal tests - name: Run minimal tests
run: |- run: |-

View File

@ -21,7 +21,7 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v3 uses: actions/checkout@v4
- name: Initialize CodeQL - name: Initialize CodeQL
uses: github/codeql-action/init@v2 uses: github/codeql-action/init@v2

View File

@ -28,13 +28,13 @@ jobs:
image: ${{ vars.build_image_name }}:${{ vars.pg15_version }}${{ vars.image_suffix }} image: ${{ vars.build_image_name }}:${{ vars.pg15_version }}${{ vars.image_suffix }}
options: --user root options: --user root
steps: steps:
- uses: actions/checkout@v3.5.0 - uses: actions/checkout@v4
- name: Configure, Build, and Install - name: Configure, Build, and Install
run: | run: |
echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV
./ci/build-citus.sh ./ci/build-citus.sh
shell: bash shell: bash
- uses: actions/upload-artifact@v3.1.1 - uses: actions/upload-artifact@v4.6.0
with: with:
name: build-${{ env.PG_MAJOR }} name: build-${{ env.PG_MAJOR }}
path: |- path: |-
@ -46,7 +46,7 @@ jobs:
outputs: outputs:
json: ${{ steps.parallelization.outputs.json }} json: ${{ steps.parallelization.outputs.json }}
steps: steps:
- uses: actions/checkout@v3.5.0 - uses: actions/checkout@v4
- uses: "./.github/actions/parallelization" - uses: "./.github/actions/parallelization"
id: parallelization id: parallelization
with: with:
@ -67,7 +67,7 @@ jobs:
fail-fast: false fail-fast: false
matrix: ${{ fromJson(needs.prepare_parallelization_matrix.outputs.json) }} matrix: ${{ fromJson(needs.prepare_parallelization_matrix.outputs.json) }}
steps: steps:
- uses: actions/checkout@v3.5.0 - uses: actions/checkout@v4
- uses: "./.github/actions/setup_extension" - uses: "./.github/actions/setup_extension"
- name: Run minimal tests - name: Run minimal tests
run: |- run: |-

View File

@ -115,7 +115,6 @@ jobs:
# for each deb based image and we use POSTGRES_VERSION to set # for each deb based image and we use POSTGRES_VERSION to set
# PG_CONFIG variable in each of those runs. # PG_CONFIG variable in each of those runs.
packaging_docker_image: packaging_docker_image:
- debian-buster-all
- debian-bookworm-all - debian-bookworm-all
- debian-bullseye-all - debian-bullseye-all
- ubuntu-focal-all - ubuntu-focal-all
@ -129,7 +128,7 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v3 uses: actions/checkout@v4
- name: Set pg_config path and python parameters for deb based distros - name: Set pg_config path and python parameters for deb based distros
run: | run: |

View File

@ -1,3 +1,45 @@
### citus v13.0.1 (February 4th, 2025) ###
* Drops support for PostgreSQL 14 (#7753)
### citus v13.0.0 (January 22, 2025) ###
* Adds support for PostgreSQL 17 (#7699, #7661)
* Adds `JSON_TABLE()` support in distributed queries (#7816)
* Propagates `MERGE ... WHEN NOT MATCHED BY SOURCE` (#7807)
* Propagates `MEMORY` and `SERIALIZE` options of `EXPLAIN` (#7802)
* Adds support for identity columns in distributed partitioned tables (#7785)
* Allows specifying an access method for distributed partitioned tables (#7818)
* Allows exclusion constraints on distributed partitioned tables (#7733)
* Allows configuring sslnegotiation using `citus.node_conn_info` (#7821)
* Avoids wal receiver timeouts during large shard splits (#7229)
* Fixes a bug causing incorrect writing of data to target `MERGE` repartition
command (#7659)
* Fixes a crash that happens because of unsafe catalog access when re-assigning
the global pid after `application_name` changes (#7791)
* Fixes incorrect `VALID UNTIL` setting assumption made for roles when syncing
them to new nodes (#7534)
* Fixes segfault when calling distributed procedure with a parameterized
distribution argument (#7242)
* Fixes server crash when trying to execute `activate_node_snapshot()` on a
single-node cluster (#7552)
* Improves `citus_move_shard_placement()` to fail early if there is a new node
without reference tables yet (#7467)
### citus v12.1.5 (July 17, 2024) ### ### citus v12.1.5 (July 17, 2024) ###
* Adds support for MERGE commands with single shard distributed target tables * Adds support for MERGE commands with single shard distributed target tables

View File

@ -5,6 +5,6 @@ set -euo pipefail
source ci/ci_helpers.sh source ci/ci_helpers.sh
# extract citus gucs in the form of "citus.X" # extract citus gucs in the form of "citus.X"
grep -o -E "(\.*\"citus.\w+\")," src/backend/distributed/shared_library_init.c > gucs.out grep -o -E "(\.*\"citus\.\w+\")," src/backend/distributed/shared_library_init.c > gucs.out
sort -c gucs.out sort -c gucs.out
rm gucs.out rm gucs.out

20
configure vendored
View File

@ -1,6 +1,6 @@
#! /bin/sh #! /bin/sh
# Guess values for system-dependent variables and create Makefiles. # Guess values for system-dependent variables and create Makefiles.
# Generated by GNU Autoconf 2.69 for Citus 13.0.0. # Generated by GNU Autoconf 2.69 for Citus 13.0.1.
# #
# #
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. # Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
@ -579,8 +579,8 @@ MAKEFLAGS=
# Identity of this package. # Identity of this package.
PACKAGE_NAME='Citus' PACKAGE_NAME='Citus'
PACKAGE_TARNAME='citus' PACKAGE_TARNAME='citus'
PACKAGE_VERSION='13.0.0' PACKAGE_VERSION='13.0.1'
PACKAGE_STRING='Citus 13.0.0' PACKAGE_STRING='Citus 13.0.1'
PACKAGE_BUGREPORT='' PACKAGE_BUGREPORT=''
PACKAGE_URL='' PACKAGE_URL=''
@ -1262,7 +1262,7 @@ if test "$ac_init_help" = "long"; then
# Omit some internal or obsolete options to make the list less imposing. # Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh. # This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF cat <<_ACEOF
\`configure' configures Citus 13.0.0 to adapt to many kinds of systems. \`configure' configures Citus 13.0.1 to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]... Usage: $0 [OPTION]... [VAR=VALUE]...
@ -1324,7 +1324,7 @@ fi
if test -n "$ac_init_help"; then if test -n "$ac_init_help"; then
case $ac_init_help in case $ac_init_help in
short | recursive ) echo "Configuration of Citus 13.0.0:";; short | recursive ) echo "Configuration of Citus 13.0.1:";;
esac esac
cat <<\_ACEOF cat <<\_ACEOF
@ -1429,7 +1429,7 @@ fi
test -n "$ac_init_help" && exit $ac_status test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then if $ac_init_version; then
cat <<\_ACEOF cat <<\_ACEOF
Citus configure 13.0.0 Citus configure 13.0.1
generated by GNU Autoconf 2.69 generated by GNU Autoconf 2.69
Copyright (C) 2012 Free Software Foundation, Inc. Copyright (C) 2012 Free Software Foundation, Inc.
@ -1912,7 +1912,7 @@ cat >config.log <<_ACEOF
This file contains any messages produced by compilers while This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake. running configure, to aid debugging if configure makes a mistake.
It was created by Citus $as_me 13.0.0, which was It was created by Citus $as_me 13.0.1, which was
generated by GNU Autoconf 2.69. Invocation command line was generated by GNU Autoconf 2.69. Invocation command line was
$ $0 $@ $ $0 $@
@ -2588,7 +2588,7 @@ fi
if test "$with_pg_version_check" = no; then if test "$with_pg_version_check" = no; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num (skipped compatibility check)" >&5 { $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num (skipped compatibility check)" >&5
$as_echo "$as_me: building against PostgreSQL $version_num (skipped compatibility check)" >&6;} $as_echo "$as_me: building against PostgreSQL $version_num (skipped compatibility check)" >&6;}
elif test "$version_num" != '14' -a "$version_num" != '15' -a "$version_num" != '16' -a "$version_num" != '17'; then elif test "$version_num" != '15' -a "$version_num" != '16' -a "$version_num" != '17'; then
as_fn_error $? "Citus is not compatible with the detected PostgreSQL version ${version_num}." "$LINENO" 5 as_fn_error $? "Citus is not compatible with the detected PostgreSQL version ${version_num}." "$LINENO" 5
else else
{ $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num" >&5 { $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num" >&5
@ -5393,7 +5393,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# report actual input values of CONFIG_FILES etc. instead of their # report actual input values of CONFIG_FILES etc. instead of their
# values after options handling. # values after options handling.
ac_log=" ac_log="
This file was extended by Citus $as_me 13.0.0, which was This file was extended by Citus $as_me 13.0.1, which was
generated by GNU Autoconf 2.69. Invocation command line was generated by GNU Autoconf 2.69. Invocation command line was
CONFIG_FILES = $CONFIG_FILES CONFIG_FILES = $CONFIG_FILES
@ -5455,7 +5455,7 @@ _ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\ ac_cs_version="\\
Citus config.status 13.0.0 Citus config.status 13.0.1
configured by $0, generated by GNU Autoconf 2.69, configured by $0, generated by GNU Autoconf 2.69,
with options \\"\$ac_cs_config\\" with options \\"\$ac_cs_config\\"

View File

@ -5,7 +5,7 @@
# everyone needing autoconf installed, the resulting files are checked # everyone needing autoconf installed, the resulting files are checked
# into the SCM. # into the SCM.
AC_INIT([Citus], [13.0.0]) AC_INIT([Citus], [13.0.1])
AC_COPYRIGHT([Copyright (c) Citus Data, Inc.]) AC_COPYRIGHT([Copyright (c) Citus Data, Inc.])
# we'll need sed and awk for some of the version commands # we'll need sed and awk for some of the version commands
@ -80,7 +80,7 @@ AC_SUBST(with_pg_version_check)
if test "$with_pg_version_check" = no; then if test "$with_pg_version_check" = no; then
AC_MSG_NOTICE([building against PostgreSQL $version_num (skipped compatibility check)]) AC_MSG_NOTICE([building against PostgreSQL $version_num (skipped compatibility check)])
elif test "$version_num" != '14' -a "$version_num" != '15' -a "$version_num" != '16' -a "$version_num" != '17'; then elif test "$version_num" != '15' -a "$version_num" != '16' -a "$version_num" != '17'; then
AC_MSG_ERROR([Citus is not compatible with the detected PostgreSQL version ${version_num}.]) AC_MSG_ERROR([Citus is not compatible with the detected PostgreSQL version ${version_num}.])
else else
AC_MSG_NOTICE([building against PostgreSQL $version_num]) AC_MSG_NOTICE([building against PostgreSQL $version_num])

133
gucs.out Normal file
View File

@ -0,0 +1,133 @@
"citus.all_modifications_commutative",
"citus.allow_modifications_from_workers_to_replicated_tables",
"citus.allow_nested_distributed_execution",
"citus.allow_unsafe_constraints",
"citus.allow_unsafe_locks_from_workers",
"citus.background_task_queue_interval",
"citus.check_available_space_before_move",
"citus.cluster_name",
"citus.coordinator_aggregation_strategy",
"citus.copy_switchover_threshold",
"citus.count_distinct_error_rate",
"citus.cpu_priority",
"citus.cpu_priority_for_logical_replication_senders",
"citus.create_object_propagation",
"citus.defer_drop_after_shard_move",
"citus.defer_drop_after_shard_split",
"citus.defer_shard_delete_interval",
"citus.desired_percent_disk_available_after_move",
"citus.distributed_deadlock_detection_factor",
"citus.enable_alter_database_owner",
"citus.enable_alter_role_propagation",
"citus.enable_alter_role_set_propagation",
"citus.enable_binary_protocol",
"citus.enable_change_data_capture",
"citus.enable_cluster_clock",
"citus.enable_cost_based_connection_establishment",
"citus.enable_create_role_propagation",
"citus.enable_create_type_propagation",
"citus.enable_ddl_propagation",
"citus.enable_deadlock_prevention",
"citus.enable_fast_path_router_planner",
"citus.enable_local_execution",
"citus.enable_local_reference_table_foreign_keys",
"citus.enable_manual_changes_to_shards",
"citus.enable_manual_metadata_changes_for_user",
"citus.enable_metadata_sync",
"citus.enable_non_colocated_router_query_pushdown",
"citus.enable_repartition_joins",
"citus.enable_repartitioned_insert_select",
"citus.enable_router_execution",
"citus.enable_schema_based_sharding",
"citus.enable_single_hash_repartition_joins",
"citus.enable_statistics_collection",
"citus.enable_unique_job_ids",
"citus.enable_unsafe_triggers",
"citus.enable_unsupported_feature_messages",
"citus.enable_version_checks",
"citus.enforce_foreign_key_restrictions",
"citus.enforce_object_restrictions_for_local_objects",
"citus.executor_slow_start_interval",
"citus.explain_all_tasks",
"citus.explain_analyze_sort_method",
"citus.explain_distributed_queries",
"citus.force_max_query_parallelization",
"citus.function_opens_transaction_block",
"citus.grep_remote_commands",
"citus.hide_citus_dependent_objects",
"citus.hide_shards_from_app_name_prefixes",
"citus.isolation_test_session_process_id",
"citus.isolation_test_session_remote_process_id",
"citus.limit_clause_row_fetch_count",
"citus.local_copy_flush_threshold",
"citus.local_hostname",
"citus.local_shared_pool_size",
"citus.local_table_join_policy",
"citus.log_distributed_deadlock_detection",
"citus.log_intermediate_results",
"citus.log_local_commands",
"citus.log_multi_join_order",
"citus.log_remote_commands",
"citus.logical_replication_timeout",
"citus.main_db",
"citus.max_adaptive_executor_pool_size",
"citus.max_background_task_executors",
"citus.max_background_task_executors_per_node",
"citus.max_cached_connection_lifetime",
"citus.max_cached_conns_per_worker",
"citus.max_client_connections",
"citus.max_high_priority_background_processes",
"citus.max_intermediate_result_size",
"citus.max_matview_size_to_auto_recreate",
"citus.max_rebalancer_logged_ignored_moves",
"citus.max_shared_pool_size",
"citus.max_worker_nodes_tracked",
"citus.metadata_sync_interval",
"citus.metadata_sync_mode",
"citus.metadata_sync_retry_interval",
"citus.mitmfifo",
"citus.multi_shard_modify_mode",
"citus.multi_task_query_log_level",
"citus.next_cleanup_record_id",
"citus.next_operation_id",
"citus.next_placement_id",
"citus.next_shard_id",
"citus.node_connection_timeout",
"citus.node_conninfo",
"citus.override_table_visibility",
"citus.prevent_incomplete_connection_establishment",
"citus.propagate_session_settings_for_loopback_connection",
"citus.propagate_set_commands",
"citus.rebalancer_by_disk_size_base_cost",
"citus.recover_2pc_interval",
"citus.remote_copy_flush_threshold",
"citus.remote_task_check_interval",
"citus.repartition_join_bucket_count_per_node",
"citus.replicate_reference_tables_on_activate",
"citus.replication_model",
"citus.running_under_citus_test_suite",
"citus.select_opens_transaction_block",
"citus.shard_count",
"citus.shard_replication_factor",
"citus.show_shards_for_app_name_prefixes",
"citus.skip_advisory_lock_permission_checks",
"citus.skip_constraint_validation",
"citus.skip_jsonb_validation_in_copy",
"citus.sort_returning",
"citus.stat_statements_max",
"citus.stat_statements_purge_interval",
"citus.stat_statements_track",
"citus.stat_tenants_limit",
"citus.stat_tenants_log_level",
"citus.stat_tenants_period",
"citus.stat_tenants_track",
"citus.stat_tenants_untracked_sample_rate",
"citus.subquery_pushdown",
"citus.task_assignment_policy",
"citus.task_executor_type",
"citus.use_citus_managed_tables",
"citus.use_secondary_nodes",
"citus.values_materialization_threshold",
"citus.version",
"citus.worker_min_messages",
"citus.writable_standby_coordinator",

View File

@ -1051,6 +1051,15 @@ FindCandidateRelids(PlannerInfo *root, RelOptInfo *rel, List *joinClauses)
candidateRelids = bms_del_members(candidateRelids, rel->relids); candidateRelids = bms_del_members(candidateRelids, rel->relids);
candidateRelids = bms_del_members(candidateRelids, rel->lateral_relids); candidateRelids = bms_del_members(candidateRelids, rel->lateral_relids);
/*
* For the relevant PG16 commit requiring this addition:
* postgres/postgres@2489d76
*/
#if PG_VERSION_NUM >= PG_VERSION_16
candidateRelids = bms_del_members(candidateRelids, root->outer_join_rels);
#endif
return candidateRelids; return candidateRelids;
} }
@ -1312,11 +1321,8 @@ AddColumnarScanPath(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte,
cpath->methods = &ColumnarScanPathMethods; cpath->methods = &ColumnarScanPathMethods;
#if (PG_VERSION_NUM >= PG_VERSION_15)
/* necessary to avoid extra Result node in PG15 */ /* necessary to avoid extra Result node in PG15 */
cpath->flags = CUSTOMPATH_SUPPORT_PROJECTION; cpath->flags = CUSTOMPATH_SUPPORT_PROJECTION;
#endif
/* /*
* populate generic path information * populate generic path information

View File

@ -1686,7 +1686,7 @@ DeleteTupleAndEnforceConstraints(ModifyState *state, HeapTuple heapTuple)
simple_heap_delete(state->rel, tid); simple_heap_delete(state->rel, tid);
/* execute AFTER ROW DELETE Triggers to enforce constraints */ /* execute AFTER ROW DELETE Triggers to enforce constraints */
ExecARDeleteTriggers_compat(estate, resultRelInfo, tid, NULL, NULL, false); ExecARDeleteTriggers(estate, resultRelInfo, tid, NULL, NULL, false);
} }

View File

@ -877,7 +877,7 @@ columnar_relation_set_new_filelocator(Relation rel,
*freezeXid = RecentXmin; *freezeXid = RecentXmin;
*minmulti = GetOldestMultiXactId(); *minmulti = GetOldestMultiXactId();
SMgrRelation srel = RelationCreateStorage_compat(*newrlocator, persistence, true); SMgrRelation srel = RelationCreateStorage(*newrlocator, persistence, true);
ColumnarStorageInit(srel, ColumnarMetadataNewStorageId()); ColumnarStorageInit(srel, ColumnarMetadataNewStorageId());
InitColumnarOptions(rel->rd_id); InitColumnarOptions(rel->rd_id);
@ -2245,7 +2245,6 @@ ColumnarProcessAlterTable(AlterTableStmt *alterTableStmt, List **columnarOptions
columnarRangeVar = alterTableStmt->relation; columnarRangeVar = alterTableStmt->relation;
} }
} }
#if PG_VERSION_NUM >= PG_VERSION_15
else if (alterTableCmd->subtype == AT_SetAccessMethod) else if (alterTableCmd->subtype == AT_SetAccessMethod)
{ {
if (columnarRangeVar || *columnarOptions) if (columnarRangeVar || *columnarOptions)
@ -2265,7 +2264,6 @@ ColumnarProcessAlterTable(AlterTableStmt *alterTableStmt, List **columnarOptions
DeleteColumnarTableOptions(RelationGetRelid(rel), true); DeleteColumnarTableOptions(RelationGetRelid(rel), true);
} }
} }
#endif /* PG_VERSION_15 */
} }
relation_close(rel, NoLock); relation_close(rel, NoLock);
@ -2649,21 +2647,12 @@ ColumnarCheckLogicalReplication(Relation rel)
return; return;
} }
#if PG_VERSION_NUM >= PG_VERSION_15
{ {
PublicationDesc pubdesc; PublicationDesc pubdesc;
RelationBuildPublicationDesc(rel, &pubdesc); RelationBuildPublicationDesc(rel, &pubdesc);
pubActionInsert = pubdesc.pubactions.pubinsert; pubActionInsert = pubdesc.pubactions.pubinsert;
} }
#else
if (rel->rd_pubactions == NULL)
{
GetRelationPublicationActions(rel);
Assert(rel->rd_pubactions != NULL);
}
pubActionInsert = rel->rd_pubactions->pubinsert;
#endif
if (pubActionInsert) if (pubActionInsert)
{ {
@ -3040,6 +3029,8 @@ AvailableExtensionVersionColumnar(void)
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("citus extension is not found"))); errmsg("citus extension is not found")));
return NULL; /* keep compiler happy */
} }

View File

@ -145,17 +145,6 @@ LogicalClockShmemSize(void)
void void
InitializeClusterClockMem(void) InitializeClusterClockMem(void)
{ {
/* On PG 15 and above, we use shmem_request_hook_type */
#if PG_VERSION_NUM < PG_VERSION_15
/* allocate shared memory for pre PG-15 versions */
if (!IsUnderPostmaster)
{
RequestAddinShmemSpace(LogicalClockShmemSize());
}
#endif
prev_shmem_startup_hook = shmem_startup_hook; prev_shmem_startup_hook = shmem_startup_hook;
shmem_startup_hook = LogicalClockShmemInit; shmem_startup_hook = LogicalClockShmemInit;
} }

View File

@ -209,12 +209,9 @@ static void ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommand
static bool HasAnyGeneratedStoredColumns(Oid relationId); static bool HasAnyGeneratedStoredColumns(Oid relationId);
static List * GetNonGeneratedStoredColumnNameList(Oid relationId); static List * GetNonGeneratedStoredColumnNameList(Oid relationId);
static void CheckAlterDistributedTableConversionParameters(TableConversionState *con); static void CheckAlterDistributedTableConversionParameters(TableConversionState *con);
static char * CreateWorkerChangeSequenceDependencyCommand(char *sequenceSchemaName, static char * CreateWorkerChangeSequenceDependencyCommand(char *qualifiedSequeceName,
char *sequenceName, char *qualifiedSourceName,
char *sourceSchemaName, char *qualifiedTargetName);
char *sourceName,
char *targetSchemaName,
char *targetName);
static void ErrorIfMatViewSizeExceedsTheLimit(Oid matViewOid); static void ErrorIfMatViewSizeExceedsTheLimit(Oid matViewOid);
static char * CreateMaterializedViewDDLCommand(Oid matViewOid); static char * CreateMaterializedViewDDLCommand(Oid matViewOid);
static char * GetAccessMethodForMatViewIfExists(Oid viewOid); static char * GetAccessMethodForMatViewIfExists(Oid viewOid);
@ -791,13 +788,15 @@ ConvertTableInternal(TableConversionState *con)
justBeforeDropCommands = lappend(justBeforeDropCommands, detachFromParentCommand); justBeforeDropCommands = lappend(justBeforeDropCommands, detachFromParentCommand);
} }
char *qualifiedRelationName = quote_qualified_identifier(con->schemaName,
con->relationName);
if (PartitionedTable(con->relationId)) if (PartitionedTable(con->relationId))
{ {
if (!con->suppressNoticeMessages) if (!con->suppressNoticeMessages)
{ {
ereport(NOTICE, (errmsg("converting the partitions of %s", ereport(NOTICE, (errmsg("converting the partitions of %s",
quote_qualified_identifier(con->schemaName, qualifiedRelationName)));
con->relationName))));
} }
List *partitionList = PartitionList(con->relationId); List *partitionList = PartitionList(con->relationId);
@ -870,9 +869,7 @@ ConvertTableInternal(TableConversionState *con)
if (!con->suppressNoticeMessages) if (!con->suppressNoticeMessages)
{ {
ereport(NOTICE, (errmsg("creating a new table for %s", ereport(NOTICE, (errmsg("creating a new table for %s", qualifiedRelationName)));
quote_qualified_identifier(con->schemaName,
con->relationName))));
} }
TableDDLCommand *tableCreationCommand = NULL; TableDDLCommand *tableCreationCommand = NULL;
@ -999,8 +996,6 @@ ConvertTableInternal(TableConversionState *con)
{ {
continue; continue;
} }
char *qualifiedRelationName = quote_qualified_identifier(con->schemaName,
con->relationName);
TableConversionParameters cascadeParam = { TableConversionParameters cascadeParam = {
.relationId = colocatedTableId, .relationId = colocatedTableId,
@ -1750,9 +1745,7 @@ CreateMaterializedViewDDLCommand(Oid matViewOid)
{ {
StringInfo query = makeStringInfo(); StringInfo query = makeStringInfo();
char *viewName = get_rel_name(matViewOid); char *qualifiedViewName = generate_qualified_relation_name(matViewOid);
char *schemaName = get_namespace_name(get_rel_namespace(matViewOid));
char *qualifiedViewName = quote_qualified_identifier(schemaName, viewName);
/* here we need to get the access method of the view to recreate it */ /* here we need to get the access method of the view to recreate it */
char *accessMethodName = GetAccessMethodForMatViewIfExists(matViewOid); char *accessMethodName = GetAccessMethodForMatViewIfExists(matViewOid);
@ -1801,9 +1794,8 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands,
bool suppressNoticeMessages) bool suppressNoticeMessages)
{ {
char *sourceName = get_rel_name(sourceId); char *sourceName = get_rel_name(sourceId);
char *targetName = get_rel_name(targetId); char *qualifiedSourceName = generate_qualified_relation_name(sourceId);
Oid schemaId = get_rel_namespace(sourceId); char *qualifiedTargetName = generate_qualified_relation_name(targetId);
char *schemaName = get_namespace_name(schemaId);
StringInfo query = makeStringInfo(); StringInfo query = makeStringInfo();
@ -1811,8 +1803,7 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands,
{ {
if (!suppressNoticeMessages) if (!suppressNoticeMessages)
{ {
ereport(NOTICE, (errmsg("moving the data of %s", ereport(NOTICE, (errmsg("moving the data of %s", qualifiedSourceName)));
quote_qualified_identifier(schemaName, sourceName))));
} }
if (!HasAnyGeneratedStoredColumns(sourceId)) if (!HasAnyGeneratedStoredColumns(sourceId))
@ -1822,8 +1813,7 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands,
* "INSERT INTO .. SELECT *"". * "INSERT INTO .. SELECT *"".
*/ */
appendStringInfo(query, "INSERT INTO %s SELECT * FROM %s", appendStringInfo(query, "INSERT INTO %s SELECT * FROM %s",
quote_qualified_identifier(schemaName, targetName), qualifiedTargetName, qualifiedSourceName);
quote_qualified_identifier(schemaName, sourceName));
} }
else else
{ {
@ -1838,9 +1828,8 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands,
char *insertColumnString = StringJoin(nonStoredColumnNameList, ','); char *insertColumnString = StringJoin(nonStoredColumnNameList, ',');
appendStringInfo(query, appendStringInfo(query,
"INSERT INTO %s (%s) OVERRIDING SYSTEM VALUE SELECT %s FROM %s", "INSERT INTO %s (%s) OVERRIDING SYSTEM VALUE SELECT %s FROM %s",
quote_qualified_identifier(schemaName, targetName), qualifiedTargetName, insertColumnString,
insertColumnString, insertColumnString, insertColumnString, qualifiedSourceName);
quote_qualified_identifier(schemaName, sourceName));
} }
ExecuteQueryViaSPI(query->data, SPI_OK_INSERT); ExecuteQueryViaSPI(query->data, SPI_OK_INSERT);
@ -1864,14 +1853,11 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands,
*/ */
if (ShouldSyncTableMetadata(targetId)) if (ShouldSyncTableMetadata(targetId))
{ {
Oid sequenceSchemaOid = get_rel_namespace(sequenceOid); char *qualifiedSequenceName = generate_qualified_relation_name(sequenceOid);
char *sequenceSchemaName = get_namespace_name(sequenceSchemaOid);
char *sequenceName = get_rel_name(sequenceOid);
char *workerChangeSequenceDependencyCommand = char *workerChangeSequenceDependencyCommand =
CreateWorkerChangeSequenceDependencyCommand(sequenceSchemaName, CreateWorkerChangeSequenceDependencyCommand(qualifiedSequenceName,
sequenceName, qualifiedSourceName,
schemaName, sourceName, qualifiedTargetName);
schemaName, targetName);
SendCommandToWorkersWithMetadata(workerChangeSequenceDependencyCommand); SendCommandToWorkersWithMetadata(workerChangeSequenceDependencyCommand);
} }
else if (ShouldSyncTableMetadata(sourceId)) else if (ShouldSyncTableMetadata(sourceId))
@ -1894,25 +1880,23 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands,
if (!suppressNoticeMessages) if (!suppressNoticeMessages)
{ {
ereport(NOTICE, (errmsg("dropping the old %s", ereport(NOTICE, (errmsg("dropping the old %s", qualifiedSourceName)));
quote_qualified_identifier(schemaName, sourceName))));
} }
resetStringInfo(query); resetStringInfo(query);
appendStringInfo(query, "DROP %sTABLE %s CASCADE", appendStringInfo(query, "DROP %sTABLE %s CASCADE",
IsForeignTable(sourceId) ? "FOREIGN " : "", IsForeignTable(sourceId) ? "FOREIGN " : "",
quote_qualified_identifier(schemaName, sourceName)); qualifiedSourceName);
ExecuteQueryViaSPI(query->data, SPI_OK_UTILITY); ExecuteQueryViaSPI(query->data, SPI_OK_UTILITY);
if (!suppressNoticeMessages) if (!suppressNoticeMessages)
{ {
ereport(NOTICE, (errmsg("renaming the new table to %s", ereport(NOTICE, (errmsg("renaming the new table to %s", qualifiedSourceName)));
quote_qualified_identifier(schemaName, sourceName))));
} }
resetStringInfo(query); resetStringInfo(query);
appendStringInfo(query, "ALTER TABLE %s RENAME TO %s", appendStringInfo(query, "ALTER TABLE %s RENAME TO %s",
quote_qualified_identifier(schemaName, targetName), qualifiedTargetName,
quote_identifier(sourceName)); quote_identifier(sourceName));
ExecuteQueryViaSPI(query->data, SPI_OK_UTILITY); ExecuteQueryViaSPI(query->data, SPI_OK_UTILITY);
} }
@ -2172,18 +2156,13 @@ CheckAlterDistributedTableConversionParameters(TableConversionState *con)
* worker_change_sequence_dependency query with the parameters. * worker_change_sequence_dependency query with the parameters.
*/ */
static char * static char *
CreateWorkerChangeSequenceDependencyCommand(char *sequenceSchemaName, char *sequenceName, CreateWorkerChangeSequenceDependencyCommand(char *qualifiedSequeceName,
char *sourceSchemaName, char *sourceName, char *qualifiedSourceName,
char *targetSchemaName, char *targetName) char *qualifiedTargetName)
{ {
char *qualifiedSchemaName = quote_qualified_identifier(sequenceSchemaName,
sequenceName);
char *qualifiedSourceName = quote_qualified_identifier(sourceSchemaName, sourceName);
char *qualifiedTargetName = quote_qualified_identifier(targetSchemaName, targetName);
StringInfo query = makeStringInfo(); StringInfo query = makeStringInfo();
appendStringInfo(query, "SELECT worker_change_sequence_dependency(%s, %s, %s)", appendStringInfo(query, "SELECT worker_change_sequence_dependency(%s, %s, %s)",
quote_literal_cstr(qualifiedSchemaName), quote_literal_cstr(qualifiedSequeceName),
quote_literal_cstr(qualifiedSourceName), quote_literal_cstr(qualifiedSourceName),
quote_literal_cstr(qualifiedTargetName)); quote_literal_cstr(qualifiedTargetName));

View File

@ -1160,9 +1160,7 @@ DropIdentitiesOnTable(Oid relationId)
if (attributeForm->attidentity) if (attributeForm->attidentity)
{ {
char *tableName = get_rel_name(relationId); char *qualifiedTableName = generate_qualified_relation_name(relationId);
char *schemaName = get_namespace_name(get_rel_namespace(relationId));
char *qualifiedTableName = quote_qualified_identifier(schemaName, tableName);
StringInfo dropCommand = makeStringInfo(); StringInfo dropCommand = makeStringInfo();
@ -1222,9 +1220,7 @@ DropViewsOnTable(Oid relationId)
Oid viewId = InvalidOid; Oid viewId = InvalidOid;
foreach_declared_oid(viewId, reverseOrderedViews) foreach_declared_oid(viewId, reverseOrderedViews)
{ {
char *viewName = get_rel_name(viewId); char *qualifiedViewName = generate_qualified_relation_name(viewId);
char *schemaName = get_namespace_name(get_rel_namespace(viewId));
char *qualifiedViewName = quote_qualified_identifier(schemaName, viewName);
StringInfo dropCommand = makeStringInfo(); StringInfo dropCommand = makeStringInfo();
appendStringInfo(dropCommand, "DROP %sVIEW IF EXISTS %s", appendStringInfo(dropCommand, "DROP %sVIEW IF EXISTS %s",

View File

@ -68,8 +68,6 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati
char *collcollate; char *collcollate;
char *collctype; char *collctype;
#if PG_VERSION_NUM >= PG_VERSION_15
/* /*
* In PG15, there is an added option to use ICU as global locale provider. * In PG15, there is an added option to use ICU as global locale provider.
* pg_collation has three locale-related fields: collcollate and collctype, * pg_collation has three locale-related fields: collcollate and collctype,
@ -112,16 +110,6 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati
} }
Assert((collcollate && collctype) || colllocale); Assert((collcollate && collctype) || colllocale);
#else
/*
* In versions before 15, collcollate and collctype were type "name". Use
* pstrdup() to match the interface of 15 so that we consistently free the
* result later.
*/
collcollate = pstrdup(NameStr(collationForm->collcollate));
collctype = pstrdup(NameStr(collationForm->collctype));
#endif
if (collowner != NULL) if (collowner != NULL)
{ {
@ -147,7 +135,6 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati
"CREATE COLLATION %s (provider = '%s'", "CREATE COLLATION %s (provider = '%s'",
*quotedCollationName, providerString); *quotedCollationName, providerString);
#if PG_VERSION_NUM >= PG_VERSION_15
if (colllocale) if (colllocale)
{ {
appendStringInfo(&collationNameDef, appendStringInfo(&collationNameDef,
@ -173,24 +160,7 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati
pfree(collcollate); pfree(collcollate);
pfree(collctype); pfree(collctype);
} }
#else
if (strcmp(collcollate, collctype) == 0)
{
appendStringInfo(&collationNameDef,
", locale = %s",
quote_literal_cstr(collcollate));
}
else
{
appendStringInfo(&collationNameDef,
", lc_collate = %s, lc_ctype = %s",
quote_literal_cstr(collcollate),
quote_literal_cstr(collctype));
}
pfree(collcollate);
pfree(collctype);
#endif
#if PG_VERSION_NUM >= PG_VERSION_16 #if PG_VERSION_NUM >= PG_VERSION_16
char *collicurules = NULL; char *collicurules = NULL;
datum = SysCacheGetAttr(COLLOID, heapTuple, Anum_pg_collation_collicurules, &isnull); datum = SysCacheGetAttr(COLLOID, heapTuple, Anum_pg_collation_collicurules, &isnull);

View File

@ -170,12 +170,10 @@ static void EnsureDistributedSequencesHaveOneType(Oid relationId,
static void CopyLocalDataIntoShards(Oid distributedTableId); static void CopyLocalDataIntoShards(Oid distributedTableId);
static List * TupleDescColumnNameList(TupleDesc tupleDescriptor); static List * TupleDescColumnNameList(TupleDesc tupleDescriptor);
#if (PG_VERSION_NUM >= PG_VERSION_15)
static bool DistributionColumnUsesNumericColumnNegativeScale(TupleDesc relationDesc, static bool DistributionColumnUsesNumericColumnNegativeScale(TupleDesc relationDesc,
Var *distributionColumn); Var *distributionColumn);
static int numeric_typmod_scale(int32 typmod); static int numeric_typmod_scale(int32 typmod);
static bool is_valid_numeric_typmod(int32 typmod); static bool is_valid_numeric_typmod(int32 typmod);
#endif
static bool DistributionColumnUsesGeneratedStoredColumn(TupleDesc relationDesc, static bool DistributionColumnUsesGeneratedStoredColumn(TupleDesc relationDesc,
Var *distributionColumn); Var *distributionColumn);
@ -1325,10 +1323,7 @@ CreateCitusTable(Oid relationId, CitusTableType tableType,
{ {
List *partitionList = PartitionList(relationId); List *partitionList = PartitionList(relationId);
Oid partitionRelationId = InvalidOid; Oid partitionRelationId = InvalidOid;
Oid namespaceId = get_rel_namespace(relationId); char *parentRelationName = generate_qualified_relation_name(relationId);
char *schemaName = get_namespace_name(namespaceId);
char *relationName = get_rel_name(relationId);
char *parentRelationName = quote_qualified_identifier(schemaName, relationName);
/* /*
* when there are many partitions, each call to CreateDistributedTable * when there are many partitions, each call to CreateDistributedTable
@ -2117,8 +2112,6 @@ EnsureRelationCanBeDistributed(Oid relationId, Var *distributionColumn,
"AS (...) STORED."))); "AS (...) STORED.")));
} }
#if (PG_VERSION_NUM >= PG_VERSION_15)
/* verify target relation is not distributed by a column of type numeric with negative scale */ /* verify target relation is not distributed by a column of type numeric with negative scale */
if (distributionMethod != DISTRIBUTE_BY_NONE && if (distributionMethod != DISTRIBUTE_BY_NONE &&
DistributionColumnUsesNumericColumnNegativeScale(relationDesc, DistributionColumnUsesNumericColumnNegativeScale(relationDesc,
@ -2129,7 +2122,6 @@ EnsureRelationCanBeDistributed(Oid relationId, Var *distributionColumn,
errdetail("Distribution column must not use numeric type " errdetail("Distribution column must not use numeric type "
"with negative scale"))); "with negative scale")));
} }
#endif
/* check for support function needed by specified partition method */ /* check for support function needed by specified partition method */
if (distributionMethod == DISTRIBUTE_BY_HASH) if (distributionMethod == DISTRIBUTE_BY_HASH)
@ -2847,8 +2839,6 @@ TupleDescColumnNameList(TupleDesc tupleDescriptor)
} }
#if (PG_VERSION_NUM >= PG_VERSION_15)
/* /*
* is_valid_numeric_typmod checks if the typmod value is valid * is_valid_numeric_typmod checks if the typmod value is valid
* *
@ -2898,8 +2888,6 @@ DistributionColumnUsesNumericColumnNegativeScale(TupleDesc relationDesc,
} }
#endif
/* /*
* DistributionColumnUsesGeneratedStoredColumn returns whether a given relation uses * DistributionColumnUsesGeneratedStoredColumn returns whether a given relation uses
* GENERATED ALWAYS AS (...) STORED on distribution column * GENERATED ALWAYS AS (...) STORED on distribution column

View File

@ -185,8 +185,6 @@ PreprocessAlterDatabaseStmt(Node *node, const char *queryString,
} }
#if PG_VERSION_NUM >= PG_VERSION_15
/* /*
* PreprocessAlterDatabaseSetStmt is executed before the statement is applied to the local * PreprocessAlterDatabaseSetStmt is executed before the statement is applied to the local
* postgres instance. * postgres instance.
@ -217,9 +215,6 @@ PreprocessAlterDatabaseRefreshCollStmt(Node *node, const char *queryString,
} }
#endif
/* /*
* GetDatabaseAddressFromDatabaseName gets the database name and returns the ObjectAddress * GetDatabaseAddressFromDatabaseName gets the database name and returns the ObjectAddress
* of the database. * of the database.

View File

@ -364,6 +364,15 @@ static DistributeObjectOps Any_Rename = {
.address = NULL, .address = NULL,
.markDistributed = false, .markDistributed = false,
}; };
static DistributeObjectOps Any_SecLabel = {
.deparse = DeparseSecLabelStmt,
.qualify = NULL,
.preprocess = NULL,
.postprocess = PostprocessSecLabelStmt,
.operationType = DIST_OPS_ALTER,
.address = SecLabelStmtObjectAddress,
.markDistributed = false,
};
static DistributeObjectOps Attribute_Rename = { static DistributeObjectOps Attribute_Rename = {
.deparse = DeparseRenameAttributeStmt, .deparse = DeparseRenameAttributeStmt,
.qualify = QualifyRenameAttributeStmt, .qualify = QualifyRenameAttributeStmt,
@ -456,7 +465,6 @@ static DistributeObjectOps Database_Alter = {
.markDistributed = false, .markDistributed = false,
}; };
#if PG_VERSION_NUM >= PG_VERSION_15
static DistributeObjectOps Database_RefreshColl = { static DistributeObjectOps Database_RefreshColl = {
.deparse = DeparseAlterDatabaseRefreshCollStmt, .deparse = DeparseAlterDatabaseRefreshCollStmt,
.qualify = NULL, .qualify = NULL,
@ -467,7 +475,6 @@ static DistributeObjectOps Database_RefreshColl = {
.address = NULL, .address = NULL,
.markDistributed = false, .markDistributed = false,
}; };
#endif
static DistributeObjectOps Domain_Alter = { static DistributeObjectOps Domain_Alter = {
.deparse = DeparseAlterDomainStmt, .deparse = DeparseAlterDomainStmt,
@ -828,7 +835,6 @@ static DistributeObjectOps Sequence_AlterOwner = {
.address = AlterSequenceOwnerStmtObjectAddress, .address = AlterSequenceOwnerStmtObjectAddress,
.markDistributed = false, .markDistributed = false,
}; };
#if (PG_VERSION_NUM >= PG_VERSION_15)
static DistributeObjectOps Sequence_AlterPersistence = { static DistributeObjectOps Sequence_AlterPersistence = {
.deparse = DeparseAlterSequencePersistenceStmt, .deparse = DeparseAlterSequencePersistenceStmt,
.qualify = QualifyAlterSequencePersistenceStmt, .qualify = QualifyAlterSequencePersistenceStmt,
@ -838,7 +844,6 @@ static DistributeObjectOps Sequence_AlterPersistence = {
.address = AlterSequencePersistenceStmtObjectAddress, .address = AlterSequencePersistenceStmtObjectAddress,
.markDistributed = false, .markDistributed = false,
}; };
#endif
static DistributeObjectOps Sequence_Drop = { static DistributeObjectOps Sequence_Drop = {
.deparse = DeparseDropSequenceStmt, .deparse = DeparseDropSequenceStmt,
.qualify = QualifyDropSequenceStmt, .qualify = QualifyDropSequenceStmt,
@ -1290,7 +1295,7 @@ static DistributeObjectOps View_Rename = {
static DistributeObjectOps Trigger_Rename = { static DistributeObjectOps Trigger_Rename = {
.deparse = NULL, .deparse = NULL,
.qualify = NULL, .qualify = NULL,
.preprocess = PreprocessAlterTriggerRenameStmt, .preprocess = NULL,
.operationType = DIST_OPS_ALTER, .operationType = DIST_OPS_ALTER,
.postprocess = PostprocessAlterTriggerRenameStmt, .postprocess = PostprocessAlterTriggerRenameStmt,
.address = NULL, .address = NULL,
@ -1312,13 +1317,11 @@ GetDistributeObjectOps(Node *node)
return &Database_Alter; return &Database_Alter;
} }
#if PG_VERSION_NUM >= PG_VERSION_15
case T_AlterDatabaseRefreshCollStmt: case T_AlterDatabaseRefreshCollStmt:
{ {
return &Database_RefreshColl; return &Database_RefreshColl;
} }
#endif
case T_AlterDomainStmt: case T_AlterDomainStmt:
{ {
return &Domain_Alter; return &Domain_Alter;
@ -1603,7 +1606,6 @@ GetDistributeObjectOps(Node *node)
case OBJECT_SEQUENCE: case OBJECT_SEQUENCE:
{ {
#if (PG_VERSION_NUM >= PG_VERSION_15)
ListCell *cmdCell = NULL; ListCell *cmdCell = NULL;
foreach(cmdCell, stmt->cmds) foreach(cmdCell, stmt->cmds)
{ {
@ -1631,7 +1633,6 @@ GetDistributeObjectOps(Node *node)
} }
} }
} }
#endif
/* /*
* Prior to PG15, the only Alter Table statement * Prior to PG15, the only Alter Table statement
@ -1991,6 +1992,11 @@ GetDistributeObjectOps(Node *node)
return &Vacuum_Analyze; return &Vacuum_Analyze;
} }
case T_SecLabelStmt:
{
return &Any_SecLabel;
}
case T_RenameStmt: case T_RenameStmt:
{ {
RenameStmt *stmt = castNode(RenameStmt, node); RenameStmt *stmt = castNode(RenameStmt, node);

View File

@ -467,7 +467,6 @@ ForeignKeyGetDefaultingAttrs(HeapTuple pgConstraintTuple)
} }
List *onDeleteSetDefColumnList = NIL; List *onDeleteSetDefColumnList = NIL;
#if PG_VERSION_NUM >= PG_VERSION_15
Datum onDeleteSetDefColumnsDatum = SysCacheGetAttr(CONSTROID, pgConstraintTuple, Datum onDeleteSetDefColumnsDatum = SysCacheGetAttr(CONSTROID, pgConstraintTuple,
Anum_pg_constraint_confdelsetcols, Anum_pg_constraint_confdelsetcols,
&isNull); &isNull);
@ -482,7 +481,6 @@ ForeignKeyGetDefaultingAttrs(HeapTuple pgConstraintTuple)
onDeleteSetDefColumnList = onDeleteSetDefColumnList =
IntegerArrayTypeToList(DatumGetArrayTypeP(onDeleteSetDefColumnsDatum)); IntegerArrayTypeToList(DatumGetArrayTypeP(onDeleteSetDefColumnsDatum));
} }
#endif
if (list_length(onDeleteSetDefColumnList) == 0) if (list_length(onDeleteSetDefColumnList) == 0)
{ {

View File

@ -2549,12 +2549,8 @@ ShardIdForTuple(CitusCopyDestReceiver *copyDest, Datum *columnValues, bool *colu
if (columnNulls[partitionColumnIndex]) if (columnNulls[partitionColumnIndex])
{ {
Oid relationId = copyDest->distributedRelationId; char *qualifiedTableName = generate_qualified_relation_name(
char *relationName = get_rel_name(relationId); copyDest->distributedRelationId);
Oid schemaOid = get_rel_namespace(relationId);
char *schemaName = get_namespace_name(schemaOid);
char *qualifiedTableName = quote_qualified_identifier(schemaName,
relationName);
ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
errmsg("the partition column of table %s cannot be NULL", errmsg("the partition column of table %s cannot be NULL",

View File

@ -33,11 +33,9 @@
static CreatePublicationStmt * BuildCreatePublicationStmt(Oid publicationId); static CreatePublicationStmt * BuildCreatePublicationStmt(Oid publicationId);
#if (PG_VERSION_NUM >= PG_VERSION_15)
static PublicationObjSpec * BuildPublicationRelationObjSpec(Oid relationId, static PublicationObjSpec * BuildPublicationRelationObjSpec(Oid relationId,
Oid publicationId, Oid publicationId,
bool tableOnly); bool tableOnly);
#endif
static void AppendPublishOptionList(StringInfo str, List *strings); static void AppendPublishOptionList(StringInfo str, List *strings);
static char * AlterPublicationOwnerCommand(Oid publicationId); static char * AlterPublicationOwnerCommand(Oid publicationId);
static bool ShouldPropagateCreatePublication(CreatePublicationStmt *stmt); static bool ShouldPropagateCreatePublication(CreatePublicationStmt *stmt);
@ -154,7 +152,6 @@ BuildCreatePublicationStmt(Oid publicationId)
ReleaseSysCache(publicationTuple); ReleaseSysCache(publicationTuple);
#if (PG_VERSION_NUM >= PG_VERSION_15)
List *schemaIds = GetPublicationSchemas(publicationId); List *schemaIds = GetPublicationSchemas(publicationId);
Oid schemaId = InvalidOid; Oid schemaId = InvalidOid;
@ -170,7 +167,6 @@ BuildCreatePublicationStmt(Oid publicationId)
createPubStmt->pubobjects = lappend(createPubStmt->pubobjects, publicationObject); createPubStmt->pubobjects = lappend(createPubStmt->pubobjects, publicationObject);
} }
#endif
List *relationIds = GetPublicationRelations(publicationId, List *relationIds = GetPublicationRelations(publicationId,
publicationForm->pubviaroot ? publicationForm->pubviaroot ?
@ -184,7 +180,6 @@ BuildCreatePublicationStmt(Oid publicationId)
foreach_declared_oid(relationId, relationIds) foreach_declared_oid(relationId, relationIds)
{ {
#if (PG_VERSION_NUM >= PG_VERSION_15)
bool tableOnly = false; bool tableOnly = false;
/* since postgres 15, tables can have a column list and filter */ /* since postgres 15, tables can have a column list and filter */
@ -192,15 +187,6 @@ BuildCreatePublicationStmt(Oid publicationId)
BuildPublicationRelationObjSpec(relationId, publicationId, tableOnly); BuildPublicationRelationObjSpec(relationId, publicationId, tableOnly);
createPubStmt->pubobjects = lappend(createPubStmt->pubobjects, publicationObject); createPubStmt->pubobjects = lappend(createPubStmt->pubobjects, publicationObject);
#else
/* before postgres 15, only full tables are supported */
char *schemaName = get_namespace_name(get_rel_namespace(relationId));
char *tableName = get_rel_name(relationId);
RangeVar *rangeVar = makeRangeVar(schemaName, tableName, -1);
createPubStmt->tables = lappend(createPubStmt->tables, rangeVar);
#endif
if (IsCitusTable(relationId)) if (IsCitusTable(relationId))
{ {
@ -276,8 +262,6 @@ AppendPublishOptionList(StringInfo str, List *options)
} }
#if (PG_VERSION_NUM >= PG_VERSION_15)
/* /*
* BuildPublicationRelationObjSpec returns a PublicationObjSpec that * BuildPublicationRelationObjSpec returns a PublicationObjSpec that
* can be included in a CREATE or ALTER PUBLICATION statement. * can be included in a CREATE or ALTER PUBLICATION statement.
@ -357,9 +341,6 @@ BuildPublicationRelationObjSpec(Oid relationId, Oid publicationId,
} }
#endif
/* /*
* PreprocessAlterPublicationStmt handles ALTER PUBLICATION statements * PreprocessAlterPublicationStmt handles ALTER PUBLICATION statements
* in a way that is mostly similar to PreprocessAlterDistributedObjectStmt, * in a way that is mostly similar to PreprocessAlterDistributedObjectStmt,
@ -458,7 +439,6 @@ GetAlterPublicationTableDDLCommand(Oid publicationId, Oid relationId,
ReleaseSysCache(pubTuple); ReleaseSysCache(pubTuple);
#if (PG_VERSION_NUM >= PG_VERSION_15)
bool tableOnly = !isAdd; bool tableOnly = !isAdd;
/* since postgres 15, tables can have a column list and filter */ /* since postgres 15, tables can have a column list and filter */
@ -467,16 +447,6 @@ GetAlterPublicationTableDDLCommand(Oid publicationId, Oid relationId,
alterPubStmt->pubobjects = lappend(alterPubStmt->pubobjects, publicationObject); alterPubStmt->pubobjects = lappend(alterPubStmt->pubobjects, publicationObject);
alterPubStmt->action = isAdd ? AP_AddObjects : AP_DropObjects; alterPubStmt->action = isAdd ? AP_AddObjects : AP_DropObjects;
#else
/* before postgres 15, only full tables are supported */
char *schemaName = get_namespace_name(get_rel_namespace(relationId));
char *tableName = get_rel_name(relationId);
RangeVar *rangeVar = makeRangeVar(schemaName, tableName, -1);
alterPubStmt->tables = lappend(alterPubStmt->tables, rangeVar);
alterPubStmt->tableAction = isAdd ? DEFELEM_ADD : DEFELEM_DROP;
#endif
/* we take the WHERE clause from the catalog where it is already transformed */ /* we take the WHERE clause from the catalog where it is already transformed */
bool whereClauseNeedsTransform = false; bool whereClauseNeedsTransform = false;

View File

@ -22,6 +22,7 @@
#include "catalog/pg_auth_members.h" #include "catalog/pg_auth_members.h"
#include "catalog/pg_authid.h" #include "catalog/pg_authid.h"
#include "catalog/pg_db_role_setting.h" #include "catalog/pg_db_role_setting.h"
#include "catalog/pg_shseclabel.h"
#include "catalog/pg_type.h" #include "catalog/pg_type.h"
#include "commands/dbcommands.h" #include "commands/dbcommands.h"
#include "nodes/makefuncs.h" #include "nodes/makefuncs.h"
@ -65,6 +66,7 @@ static DefElem * makeDefElemBool(char *name, bool value);
static List * GenerateRoleOptionsList(HeapTuple tuple); static List * GenerateRoleOptionsList(HeapTuple tuple);
static List * GenerateGrantRoleStmtsFromOptions(RoleSpec *roleSpec, List *options); static List * GenerateGrantRoleStmtsFromOptions(RoleSpec *roleSpec, List *options);
static List * GenerateGrantRoleStmtsOfRole(Oid roleid); static List * GenerateGrantRoleStmtsOfRole(Oid roleid);
static List * GenerateSecLabelOnRoleStmts(Oid roleid, char *rolename);
static void EnsureSequentialModeForRoleDDL(void); static void EnsureSequentialModeForRoleDDL(void);
static char * GetRoleNameFromDbRoleSetting(HeapTuple tuple, static char * GetRoleNameFromDbRoleSetting(HeapTuple tuple,
@ -491,18 +493,17 @@ GenerateRoleOptionsList(HeapTuple tuple)
options = lappend(options, makeDefElem("password", NULL, -1)); options = lappend(options, makeDefElem("password", NULL, -1));
} }
/* load valid unitl data from the heap tuple, use default of infinity if not set */ /* load valid until data from the heap tuple */
Datum rolValidUntilDatum = SysCacheGetAttr(AUTHNAME, tuple, Datum rolValidUntilDatum = SysCacheGetAttr(AUTHNAME, tuple,
Anum_pg_authid_rolvaliduntil, &isNull); Anum_pg_authid_rolvaliduntil, &isNull);
char *rolValidUntil = "infinity";
if (!isNull) if (!isNull)
{ {
rolValidUntil = pstrdup((char *) timestamptz_to_str(rolValidUntilDatum)); char *rolValidUntil = pstrdup((char *) timestamptz_to_str(rolValidUntilDatum));
}
Node *validUntilStringNode = (Node *) makeString(rolValidUntil); Node *validUntilStringNode = (Node *) makeString(rolValidUntil);
DefElem *validUntilOption = makeDefElem("validUntil", validUntilStringNode, -1); DefElem *validUntilOption = makeDefElem("validUntil", validUntilStringNode, -1);
options = lappend(options, validUntilOption); options = lappend(options, validUntilOption);
}
return options; return options;
} }
@ -517,13 +518,14 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid)
{ {
HeapTuple roleTuple = SearchSysCache1(AUTHOID, ObjectIdGetDatum(roleOid)); HeapTuple roleTuple = SearchSysCache1(AUTHOID, ObjectIdGetDatum(roleOid));
Form_pg_authid role = ((Form_pg_authid) GETSTRUCT(roleTuple)); Form_pg_authid role = ((Form_pg_authid) GETSTRUCT(roleTuple));
char *rolename = pstrdup(NameStr(role->rolname));
CreateRoleStmt *createRoleStmt = NULL; CreateRoleStmt *createRoleStmt = NULL;
if (EnableCreateRolePropagation) if (EnableCreateRolePropagation)
{ {
createRoleStmt = makeNode(CreateRoleStmt); createRoleStmt = makeNode(CreateRoleStmt);
createRoleStmt->stmt_type = ROLESTMT_ROLE; createRoleStmt->stmt_type = ROLESTMT_ROLE;
createRoleStmt->role = pstrdup(NameStr(role->rolname)); createRoleStmt->role = rolename;
createRoleStmt->options = GenerateRoleOptionsList(roleTuple); createRoleStmt->options = GenerateRoleOptionsList(roleTuple);
} }
@ -534,7 +536,7 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid)
alterRoleStmt->role = makeNode(RoleSpec); alterRoleStmt->role = makeNode(RoleSpec);
alterRoleStmt->role->roletype = ROLESPEC_CSTRING; alterRoleStmt->role->roletype = ROLESPEC_CSTRING;
alterRoleStmt->role->location = -1; alterRoleStmt->role->location = -1;
alterRoleStmt->role->rolename = pstrdup(NameStr(role->rolname)); alterRoleStmt->role->rolename = rolename;
alterRoleStmt->action = 1; alterRoleStmt->action = 1;
alterRoleStmt->options = GenerateRoleOptionsList(roleTuple); alterRoleStmt->options = GenerateRoleOptionsList(roleTuple);
} }
@ -546,7 +548,7 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid)
{ {
/* add a worker_create_or_alter_role command if any of them are set */ /* add a worker_create_or_alter_role command if any of them are set */
char *createOrAlterRoleQuery = CreateCreateOrAlterRoleCommand( char *createOrAlterRoleQuery = CreateCreateOrAlterRoleCommand(
pstrdup(NameStr(role->rolname)), rolename,
createRoleStmt, createRoleStmt,
alterRoleStmt); alterRoleStmt);
@ -568,6 +570,20 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid)
{ {
completeRoleList = lappend(completeRoleList, DeparseTreeNode(stmt)); completeRoleList = lappend(completeRoleList, DeparseTreeNode(stmt));
} }
/*
* append SECURITY LABEL ON ROLE commands for this specific user
* When we propagate user creation, we also want to make sure that we propagate
* all the security labels it has been given. For this, we check pg_shseclabel
* for the ROLE entry corresponding to roleOid, and generate the relevant
* SecLabel stmts to be run in the new node.
*/
List *secLabelOnRoleStmts = GenerateSecLabelOnRoleStmts(roleOid, rolename);
stmt = NULL;
foreach_declared_ptr(stmt, secLabelOnRoleStmts)
{
completeRoleList = lappend(completeRoleList, DeparseTreeNode(stmt));
}
} }
return completeRoleList; return completeRoleList;
@ -897,6 +913,54 @@ GenerateGrantRoleStmtsOfRole(Oid roleid)
} }
/*
* GenerateSecLabelOnRoleStmts generates the SecLabelStmts for the role
* whose oid is roleid.
*/
static List *
GenerateSecLabelOnRoleStmts(Oid roleid, char *rolename)
{
List *secLabelStmts = NIL;
/*
* Note that roles are shared database objects, therefore their
* security labels are stored in pg_shseclabel instead of pg_seclabel.
*/
Relation pg_shseclabel = table_open(SharedSecLabelRelationId, AccessShareLock);
ScanKeyData skey[1];
ScanKeyInit(&skey[0], Anum_pg_shseclabel_objoid, BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(roleid));
SysScanDesc scan = systable_beginscan(pg_shseclabel, SharedSecLabelObjectIndexId,
true, NULL, 1, &skey[0]);
HeapTuple tuple = NULL;
while (HeapTupleIsValid(tuple = systable_getnext(scan)))
{
SecLabelStmt *secLabelStmt = makeNode(SecLabelStmt);
secLabelStmt->objtype = OBJECT_ROLE;
secLabelStmt->object = (Node *) makeString(pstrdup(rolename));
Datum datumArray[Natts_pg_shseclabel];
bool isNullArray[Natts_pg_shseclabel];
heap_deform_tuple(tuple, RelationGetDescr(pg_shseclabel), datumArray,
isNullArray);
secLabelStmt->provider = TextDatumGetCString(
datumArray[Anum_pg_shseclabel_provider - 1]);
secLabelStmt->label = TextDatumGetCString(
datumArray[Anum_pg_shseclabel_label - 1]);
secLabelStmts = lappend(secLabelStmts, secLabelStmt);
}
systable_endscan(scan);
table_close(pg_shseclabel, AccessShareLock);
return secLabelStmts;
}
/* /*
* PreprocessCreateRoleStmt creates a worker_create_or_alter_role query for the * PreprocessCreateRoleStmt creates a worker_create_or_alter_role query for the
* role that is being created. With that query we can create the role in the * role that is being created. With that query we can create the role in the
@ -963,13 +1027,8 @@ makeStringConst(char *str, int location)
{ {
A_Const *n = makeNode(A_Const); A_Const *n = makeNode(A_Const);
#if PG_VERSION_NUM >= PG_VERSION_15
n->val.sval.type = T_String; n->val.sval.type = T_String;
n->val.sval.sval = str; n->val.sval.sval = str;
#else
n->val.type = T_String;
n->val.val.str = str;
#endif
n->location = location; n->location = location;
return (Node *) n; return (Node *) n;
@ -989,13 +1048,8 @@ makeIntConst(int val, int location)
{ {
A_Const *n = makeNode(A_Const); A_Const *n = makeNode(A_Const);
#if PG_VERSION_NUM >= PG_VERSION_15
n->val.ival.type = T_Integer; n->val.ival.type = T_Integer;
n->val.ival.ival = val; n->val.ival.ival = val;
#else
n->val.type = T_Integer;
n->val.val.ival = val;
#endif
n->location = location; n->location = location;
return (Node *) n; return (Node *) n;
@ -1012,13 +1066,8 @@ makeFloatConst(char *str, int location)
{ {
A_Const *n = makeNode(A_Const); A_Const *n = makeNode(A_Const);
#if PG_VERSION_NUM >= PG_VERSION_15
n->val.fval.type = T_Float; n->val.fval.type = T_Float;
n->val.fval.fval = str; n->val.fval.fval = str;
#else
n->val.type = T_Float;
n->val.val.str = str;
#endif
n->location = location; n->location = location;
return (Node *) n; return (Node *) n;

View File

@ -0,0 +1,125 @@
/*-------------------------------------------------------------------------
*
* seclabel.c
*
* This file contains the logic of SECURITY LABEL statement propagation.
*
* Copyright (c) Citus Data, Inc.
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "distributed/commands.h"
#include "distributed/commands/utility_hook.h"
#include "distributed/coordinator_protocol.h"
#include "distributed/deparser.h"
#include "distributed/log_utils.h"
#include "distributed/metadata/distobject.h"
#include "distributed/metadata_sync.h"
/*
* PostprocessSecLabelStmt prepares the commands that need to be run on all workers to assign
* security labels on distributed objects, currently supporting just Role objects.
* It also ensures that all object dependencies exist on all
* nodes for the object in the SecLabelStmt.
*/
List *
PostprocessSecLabelStmt(Node *node, const char *queryString)
{
if (!ShouldPropagate())
{
return NIL;
}
SecLabelStmt *secLabelStmt = castNode(SecLabelStmt, node);
List *objectAddresses = GetObjectAddressListFromParseTree(node, false, true);
if (!IsAnyObjectDistributed(objectAddresses))
{
return NIL;
}
if (secLabelStmt->objtype != OBJECT_ROLE)
{
/*
* If we are not in the coordinator, we don't want to interrupt the security
* label command with notices, the user expects that from the worker node
* the command will not be propagated
*/
if (EnableUnsupportedFeatureMessages && IsCoordinator())
{
ereport(NOTICE, (errmsg("not propagating SECURITY LABEL commands whose "
"object type is not role"),
errhint("Connect to worker nodes directly to manually "
"run the same SECURITY LABEL command.")));
}
return NIL;
}
if (!EnableCreateRolePropagation)
{
return NIL;
}
EnsureCoordinator();
EnsureAllObjectDependenciesExistOnAllNodes(objectAddresses);
const char *sql = DeparseTreeNode((Node *) secLabelStmt);
List *commandList = list_make3(DISABLE_DDL_PROPAGATION,
(void *) sql,
ENABLE_DDL_PROPAGATION);
return NodeDDLTaskList(NON_COORDINATOR_NODES, commandList);
}
/*
* SecLabelStmtObjectAddress returns the object address of the object on
* which this statement operates (secLabelStmt->object). Note that it has no limitation
* on the object type being OBJECT_ROLE. This is intentionally implemented like this
* since it is fairly simple to implement and we might extend SECURITY LABEL propagation
* in the future to include more object types.
*/
List *
SecLabelStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess)
{
SecLabelStmt *secLabelStmt = castNode(SecLabelStmt, node);
Relation rel = NULL;
ObjectAddress address = get_object_address(secLabelStmt->objtype,
secLabelStmt->object, &rel,
AccessShareLock, missing_ok);
if (rel != NULL)
{
relation_close(rel, AccessShareLock);
}
ObjectAddress *addressPtr = palloc0(sizeof(ObjectAddress));
*addressPtr = address;
return list_make1(addressPtr);
}
/*
* citus_test_object_relabel is a dummy function for check_object_relabel_type hook.
* It is meant to be used in tests combined with citus_test_register_label_provider
*/
void
citus_test_object_relabel(const ObjectAddress *object, const char *seclabel)
{
if (seclabel == NULL ||
strcmp(seclabel, "citus_unclassified") == 0 ||
strcmp(seclabel, "citus_classified") == 0 ||
strcmp(seclabel, "citus '!unclassified") == 0)
{
return;
}
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
errmsg("'%s' is not a valid security label for Citus tests.", seclabel)));
}

View File

@ -735,8 +735,6 @@ PostprocessAlterSequenceOwnerStmt(Node *node, const char *queryString)
} }
#if (PG_VERSION_NUM >= PG_VERSION_15)
/* /*
* PreprocessAlterSequencePersistenceStmt is called for change of persistence * PreprocessAlterSequencePersistenceStmt is called for change of persistence
* of sequences before the persistence is changed on the local instance. * of sequences before the persistence is changed on the local instance.
@ -847,9 +845,6 @@ PreprocessSequenceAlterTableStmt(Node *node, const char *queryString,
} }
#endif
/* /*
* PreprocessGrantOnSequenceStmt is executed before the statement is applied to the local * PreprocessGrantOnSequenceStmt is executed before the statement is applied to the local
* postgres instance. * postgres instance.

View File

@ -1153,7 +1153,6 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
{ {
AlterTableStmt *stmtCopy = copyObject(alterTableStatement); AlterTableStmt *stmtCopy = copyObject(alterTableStatement);
stmtCopy->objtype = OBJECT_SEQUENCE; stmtCopy->objtype = OBJECT_SEQUENCE;
#if (PG_VERSION_NUM >= PG_VERSION_15)
/* /*
* it must be ALTER TABLE .. OWNER TO .. * it must be ALTER TABLE .. OWNER TO ..
@ -1163,16 +1162,6 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
*/ */
return PreprocessSequenceAlterTableStmt((Node *) stmtCopy, alterTableCommand, return PreprocessSequenceAlterTableStmt((Node *) stmtCopy, alterTableCommand,
processUtilityContext); processUtilityContext);
#else
/*
* it must be ALTER TABLE .. OWNER TO .. command
* since this is the only ALTER command of a sequence that
* passes through an AlterTableStmt
*/
return PreprocessAlterSequenceOwnerStmt((Node *) stmtCopy, alterTableCommand,
processUtilityContext);
#endif
} }
else if (relKind == RELKIND_VIEW) else if (relKind == RELKIND_VIEW)
{ {
@ -3673,9 +3662,8 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
"are currently unsupported."))); "are currently unsupported.")));
break; break;
} }
#endif #endif
#if PG_VERSION_NUM >= PG_VERSION_15
case AT_SetAccessMethod: case AT_SetAccessMethod:
{ {
/* /*
@ -3695,7 +3683,6 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
break; break;
} }
#endif
case AT_SetNotNull: case AT_SetNotNull:
case AT_ReplicaIdentity: case AT_ReplicaIdentity:
case AT_ChangeOwner: case AT_ChangeOwner:

View File

@ -57,9 +57,6 @@ static void ExtractDropStmtTriggerAndRelationName(DropStmt *dropTriggerStmt,
static void ErrorIfDropStmtDropsMultipleTriggers(DropStmt *dropTriggerStmt); static void ErrorIfDropStmtDropsMultipleTriggers(DropStmt *dropTriggerStmt);
static char * GetTriggerNameById(Oid triggerId); static char * GetTriggerNameById(Oid triggerId);
static int16 GetTriggerTypeById(Oid triggerId); static int16 GetTriggerTypeById(Oid triggerId);
#if (PG_VERSION_NUM < PG_VERSION_15)
static void ErrorOutIfCloneTrigger(Oid tgrelid, const char *tgname);
#endif
/* GUC that overrides trigger checks for distributed tables and reference tables */ /* GUC that overrides trigger checks for distributed tables and reference tables */
@ -404,40 +401,6 @@ CreateTriggerEventExtendNames(CreateTrigStmt *createTriggerStmt, char *schemaNam
} }
/*
* PreprocessAlterTriggerRenameStmt is called before a ALTER TRIGGER RENAME
* command has been executed by standard process utility. This function errors
* out if we are trying to rename a child trigger on a partition of a distributed
* table. In PG15, this is not allowed anyway.
*/
List *
PreprocessAlterTriggerRenameStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext)
{
#if (PG_VERSION_NUM < PG_VERSION_15)
RenameStmt *renameTriggerStmt = castNode(RenameStmt, node);
Assert(renameTriggerStmt->renameType == OBJECT_TRIGGER);
RangeVar *relation = renameTriggerStmt->relation;
bool missingOk = false;
Oid relationId = RangeVarGetRelid(relation, ALTER_TRIGGER_LOCK_MODE, missingOk);
if (!IsCitusTable(relationId))
{
return NIL;
}
EnsureCoordinator();
ErrorOutForTriggerIfNotSupported(relationId);
ErrorOutIfCloneTrigger(relationId, renameTriggerStmt->subname);
#endif
return NIL;
}
/* /*
* PostprocessAlterTriggerRenameStmt is called after a ALTER TRIGGER RENAME * PostprocessAlterTriggerRenameStmt is called after a ALTER TRIGGER RENAME
* command has been executed by standard process utility. This function errors * command has been executed by standard process utility. This function errors
@ -759,64 +722,6 @@ ErrorIfRelationHasUnsupportedTrigger(Oid relationId)
} }
#if (PG_VERSION_NUM < PG_VERSION_15)
/*
* ErrorOutIfCloneTrigger is a helper function to error
* out if we are trying to rename a child trigger on a
* partition of a distributed table.
* A lot of this code is borrowed from PG15 because
* renaming clone triggers isn't allowed in PG15 anymore.
*/
static void
ErrorOutIfCloneTrigger(Oid tgrelid, const char *tgname)
{
HeapTuple tuple;
ScanKeyData key[2];
Relation tgrel = table_open(TriggerRelationId, RowExclusiveLock);
/*
* Search for the trigger to modify.
*/
ScanKeyInit(&key[0],
Anum_pg_trigger_tgrelid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(tgrelid));
ScanKeyInit(&key[1],
Anum_pg_trigger_tgname,
BTEqualStrategyNumber, F_NAMEEQ,
CStringGetDatum(tgname));
SysScanDesc tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
NULL, 2, key);
if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
{
Form_pg_trigger trigform = (Form_pg_trigger) GETSTRUCT(tuple);
/*
* If the trigger descends from a trigger on a parent partitioned
* table, reject the rename.
* Appended shard ids to find the trigger on the partition's shards
* are not correct. Hence we would fail to find the trigger on the
* partition's shard.
*/
if (OidIsValid(trigform->tgparentid))
{
ereport(ERROR, (
errmsg(
"cannot rename child triggers on distributed partitions")));
}
}
systable_endscan(tgscan);
table_close(tgrel, RowExclusiveLock);
}
#endif
/* /*
* GetDropTriggerStmtRelation takes a DropStmt for a trigger object and returns * GetDropTriggerStmtRelation takes a DropStmt for a trigger object and returns
* RangeVar for the relation that owns the trigger. * RangeVar for the relation that owns the trigger.

View File

@ -392,9 +392,7 @@ CreateViewDDLCommand(Oid viewOid)
static void static void
AppendQualifiedViewNameToCreateViewCommand(StringInfo buf, Oid viewOid) AppendQualifiedViewNameToCreateViewCommand(StringInfo buf, Oid viewOid)
{ {
char *viewName = get_rel_name(viewOid); char *qualifiedViewName = generate_qualified_relation_name(viewOid);
char *schemaName = get_namespace_name(get_rel_namespace(viewOid));
char *qualifiedViewName = quote_qualified_identifier(schemaName, viewName);
appendStringInfo(buf, "%s ", qualifiedViewName); appendStringInfo(buf, "%s ", qualifiedViewName);
} }

View File

@ -614,16 +614,6 @@ WaitForSharedConnection(void)
void void
InitializeSharedConnectionStats(void) InitializeSharedConnectionStats(void)
{ {
/* on PG 15, we use shmem_request_hook_type */
#if PG_VERSION_NUM < PG_VERSION_15
/* allocate shared memory */
if (!IsUnderPostmaster)
{
RequestAddinShmemSpace(SharedConnectionStatsShmemSize());
}
#endif
prev_shmem_startup_hook = shmem_startup_hook; prev_shmem_startup_hook = shmem_startup_hook;
shmem_startup_hook = SharedConnectionStatsShmemInit; shmem_startup_hook = SharedConnectionStatsShmemInit;
} }

View File

@ -258,10 +258,8 @@ pg_get_sequencedef_string(Oid sequenceRelationId)
char *typeName = format_type_be(pgSequenceForm->seqtypid); char *typeName = format_type_be(pgSequenceForm->seqtypid);
char *sequenceDef = psprintf(CREATE_SEQUENCE_COMMAND, char *sequenceDef = psprintf(CREATE_SEQUENCE_COMMAND,
#if (PG_VERSION_NUM >= PG_VERSION_15)
get_rel_persistence(sequenceRelationId) == get_rel_persistence(sequenceRelationId) ==
RELPERSISTENCE_UNLOGGED ? "UNLOGGED " : "", RELPERSISTENCE_UNLOGGED ? "UNLOGGED " : "",
#endif
qualifiedSequenceName, qualifiedSequenceName,
typeName, typeName,
pgSequenceForm->seqincrement, pgSequenceForm->seqmin, pgSequenceForm->seqincrement, pgSequenceForm->seqmin,
@ -857,12 +855,10 @@ deparse_shard_index_statement(IndexStmt *origStmt, Oid distrelid, int64 shardid,
appendStringInfoString(buffer, ") "); appendStringInfoString(buffer, ") ");
} }
#if PG_VERSION_NUM >= PG_VERSION_15
if (indexStmt->nulls_not_distinct) if (indexStmt->nulls_not_distinct)
{ {
appendStringInfoString(buffer, "NULLS NOT DISTINCT "); appendStringInfoString(buffer, "NULLS NOT DISTINCT ");
} }
#endif /* PG_VERSION_15 */
if (indexStmt->options != NIL) if (indexStmt->options != NIL)
{ {

View File

@ -159,7 +159,6 @@ DeparseAlterDatabaseStmt(Node *node)
} }
#if PG_VERSION_NUM >= PG_VERSION_15
char * char *
DeparseAlterDatabaseRefreshCollStmt(Node *node) DeparseAlterDatabaseRefreshCollStmt(Node *node)
{ {
@ -174,6 +173,3 @@ DeparseAlterDatabaseRefreshCollStmt(Node *node)
return str.data; return str.data;
} }
#endif

View File

@ -32,7 +32,6 @@
static void AppendCreatePublicationStmt(StringInfo buf, CreatePublicationStmt *stmt, static void AppendCreatePublicationStmt(StringInfo buf, CreatePublicationStmt *stmt,
bool whereClauseNeedsTransform, bool whereClauseNeedsTransform,
bool includeLocalTables); bool includeLocalTables);
#if (PG_VERSION_NUM >= PG_VERSION_15)
static bool AppendPublicationObjects(StringInfo buf, List *publicationObjects, static bool AppendPublicationObjects(StringInfo buf, List *publicationObjects,
bool whereClauseNeedsTransform, bool whereClauseNeedsTransform,
bool includeLocalTables); bool includeLocalTables);
@ -40,10 +39,6 @@ static void AppendWhereClauseExpression(StringInfo buf, RangeVar *tableName,
Node *whereClause, Node *whereClause,
bool whereClauseNeedsTransform); bool whereClauseNeedsTransform);
static void AppendAlterPublicationAction(StringInfo buf, AlterPublicationAction action); static void AppendAlterPublicationAction(StringInfo buf, AlterPublicationAction action);
#else
static bool AppendTables(StringInfo buf, List *tables, bool includeLocalTables);
static void AppendDefElemAction(StringInfo buf, DefElemAction action);
#endif
static bool AppendAlterPublicationStmt(StringInfo buf, AlterPublicationStmt *stmt, static bool AppendAlterPublicationStmt(StringInfo buf, AlterPublicationStmt *stmt,
bool whereClauseNeedsTransform, bool whereClauseNeedsTransform,
bool includeLocalTables); bool includeLocalTables);
@ -108,7 +103,6 @@ AppendCreatePublicationStmt(StringInfo buf, CreatePublicationStmt *stmt,
{ {
appendStringInfoString(buf, " FOR ALL TABLES"); appendStringInfoString(buf, " FOR ALL TABLES");
} }
#if (PG_VERSION_NUM >= PG_VERSION_15)
else if (stmt->pubobjects != NIL) else if (stmt->pubobjects != NIL)
{ {
bool hasObjects = false; bool hasObjects = false;
@ -146,32 +140,6 @@ AppendCreatePublicationStmt(StringInfo buf, CreatePublicationStmt *stmt,
includeLocalTables); includeLocalTables);
} }
} }
#else
else if (stmt->tables != NIL)
{
bool hasTables = false;
RangeVar *rangeVar = NULL;
/*
* Check whether there are tables to propagate, mainly to know whether
* we should include "FOR".
*/
foreach_declared_ptr(rangeVar, stmt->tables)
{
if (includeLocalTables || IsCitusTableRangeVar(rangeVar, NoLock, false))
{
hasTables = true;
break;
}
}
if (hasTables)
{
appendStringInfoString(buf, " FOR");
AppendTables(buf, stmt->tables, includeLocalTables);
}
}
#endif
if (stmt->options != NIL) if (stmt->options != NIL)
{ {
@ -182,8 +150,6 @@ AppendCreatePublicationStmt(StringInfo buf, CreatePublicationStmt *stmt,
} }
#if (PG_VERSION_NUM >= PG_VERSION_15)
/* /*
* AppendPublicationObjects appends a string representing a list of publication * AppendPublicationObjects appends a string representing a list of publication
* objects to a buffer. * objects to a buffer.
@ -320,57 +286,6 @@ AppendWhereClauseExpression(StringInfo buf, RangeVar *tableName,
} }
#else
/*
* AppendPublicationObjects appends a string representing a list of publication
* objects to a buffer.
*
* For instance: TABLE users, departments
*/
static bool
AppendTables(StringInfo buf, List *tables, bool includeLocalTables)
{
RangeVar *rangeVar = NULL;
bool appendedObject = false;
foreach_declared_ptr(rangeVar, tables)
{
if (!includeLocalTables &&
!IsCitusTableRangeVar(rangeVar, NoLock, false))
{
/* do not propagate local tables */
continue;
}
char *schemaName = rangeVar->schemaname;
char *tableName = rangeVar->relname;
if (schemaName != NULL)
{
/* qualified table name */
appendStringInfo(buf, "%s %s",
appendedObject ? "," : " TABLE",
quote_qualified_identifier(schemaName, tableName));
}
else
{
/* unqualified table name */
appendStringInfo(buf, "%s %s",
appendedObject ? "," : " TABLE",
quote_identifier(tableName));
}
appendedObject = true;
}
return appendedObject;
}
#endif
/* /*
* DeparseAlterPublicationSchemaStmt builds and returns a string representing * DeparseAlterPublicationSchemaStmt builds and returns a string representing
* an AlterPublicationStmt. * an AlterPublicationStmt.
@ -439,19 +354,12 @@ AppendAlterPublicationStmt(StringInfo buf, AlterPublicationStmt *stmt,
return true; return true;
} }
#if (PG_VERSION_NUM >= PG_VERSION_15)
AppendAlterPublicationAction(buf, stmt->action); AppendAlterPublicationAction(buf, stmt->action);
return AppendPublicationObjects(buf, stmt->pubobjects, whereClauseNeedsTransform, return AppendPublicationObjects(buf, stmt->pubobjects, whereClauseNeedsTransform,
includeLocalTables); includeLocalTables);
#else
AppendDefElemAction(buf, stmt->tableAction);
return AppendTables(buf, stmt->tables, includeLocalTables);
#endif
} }
#if (PG_VERSION_NUM >= PG_VERSION_15)
/* /*
* AppendAlterPublicationAction appends a string representing an AlterPublicationAction * AppendAlterPublicationAction appends a string representing an AlterPublicationAction
* to a buffer. * to a buffer.
@ -487,46 +395,6 @@ AppendAlterPublicationAction(StringInfo buf, AlterPublicationAction action)
} }
#else
/*
* AppendDefElemAction appends a string representing a DefElemAction
* to a buffer.
*/
static void
AppendDefElemAction(StringInfo buf, DefElemAction action)
{
switch (action)
{
case DEFELEM_ADD:
{
appendStringInfoString(buf, " ADD");
break;
}
case DEFELEM_DROP:
{
appendStringInfoString(buf, " DROP");
break;
}
case DEFELEM_SET:
{
appendStringInfoString(buf, " SET");
break;
}
default:
{
ereport(ERROR, (errmsg("unrecognized publication action: %d", action)));
}
}
}
#endif
/* /*
* DeparseDropPublicationStmt builds and returns a string representing the DropStmt * DeparseDropPublicationStmt builds and returns a string representing the DropStmt
*/ */
@ -651,11 +519,7 @@ AppendPublicationOptions(StringInfo stringBuffer, List *optionList)
appendStringInfo(stringBuffer, "%s = ", appendStringInfo(stringBuffer, "%s = ",
quote_identifier(optionName)); quote_identifier(optionName));
#if (PG_VERSION_NUM >= PG_VERSION_15)
if (valueType == T_Integer || valueType == T_Float || valueType == T_Boolean) if (valueType == T_Integer || valueType == T_Float || valueType == T_Boolean)
#else
if (valueType == T_Integer || valueType == T_Float)
#endif
{ {
/* string escaping is unnecessary for numeric types and can cause issues */ /* string escaping is unnecessary for numeric types and can cause issues */
appendStringInfo(stringBuffer, "%s", optionValue); appendStringInfo(stringBuffer, "%s", optionValue);

View File

@ -0,0 +1,79 @@
/*-------------------------------------------------------------------------
*
* deparse_seclabel_stmts.c
* All routines to deparse SECURITY LABEL statements.
*
* Copyright (c), Citus Data, Inc.
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "nodes/parsenodes.h"
#include "utils/builtins.h"
#include "distributed/deparser.h"
static void AppendSecLabelStmt(StringInfo buf, SecLabelStmt *stmt);
/*
* DeparseSecLabelStmt builds and returns a string representing of the
* SecLabelStmt for application on a remote server.
*/
char *
DeparseSecLabelStmt(Node *node)
{
SecLabelStmt *secLabelStmt = castNode(SecLabelStmt, node);
StringInfoData buf = { 0 };
initStringInfo(&buf);
AppendSecLabelStmt(&buf, secLabelStmt);
return buf.data;
}
/*
* AppendSecLabelStmt generates the string representation of the
* SecLabelStmt and appends it to the buffer.
*/
static void
AppendSecLabelStmt(StringInfo buf, SecLabelStmt *stmt)
{
appendStringInfoString(buf, "SECURITY LABEL ");
if (stmt->provider != NULL)
{
appendStringInfo(buf, "FOR %s ", quote_identifier(stmt->provider));
}
appendStringInfoString(buf, "ON ");
switch (stmt->objtype)
{
case OBJECT_ROLE:
{
appendStringInfo(buf, "ROLE %s ", quote_identifier(strVal(stmt->object)));
break;
}
/* normally, we shouldn't reach this */
default:
{
ereport(ERROR, (errmsg("unsupported security label statement for"
" deparsing")));
}
}
appendStringInfoString(buf, "IS ");
if (stmt->label != NULL)
{
appendStringInfo(buf, "%s", quote_literal_cstr(stmt->label));
}
else
{
appendStringInfoString(buf, "NULL");
}
}

View File

@ -28,9 +28,7 @@ static void AppendSequenceNameList(StringInfo buf, List *objects, ObjectType obj
static void AppendRenameSequenceStmt(StringInfo buf, RenameStmt *stmt); static void AppendRenameSequenceStmt(StringInfo buf, RenameStmt *stmt);
static void AppendAlterSequenceSchemaStmt(StringInfo buf, AlterObjectSchemaStmt *stmt); static void AppendAlterSequenceSchemaStmt(StringInfo buf, AlterObjectSchemaStmt *stmt);
static void AppendAlterSequenceOwnerStmt(StringInfo buf, AlterTableStmt *stmt); static void AppendAlterSequenceOwnerStmt(StringInfo buf, AlterTableStmt *stmt);
#if (PG_VERSION_NUM >= PG_VERSION_15)
static void AppendAlterSequencePersistenceStmt(StringInfo buf, AlterTableStmt *stmt); static void AppendAlterSequencePersistenceStmt(StringInfo buf, AlterTableStmt *stmt);
#endif
static void AppendGrantOnSequenceStmt(StringInfo buf, GrantStmt *stmt); static void AppendGrantOnSequenceStmt(StringInfo buf, GrantStmt *stmt);
static void AppendGrantOnSequenceSequences(StringInfo buf, GrantStmt *stmt); static void AppendGrantOnSequenceSequences(StringInfo buf, GrantStmt *stmt);
@ -262,8 +260,6 @@ AppendAlterSequenceOwnerStmt(StringInfo buf, AlterTableStmt *stmt)
} }
#if (PG_VERSION_NUM >= PG_VERSION_15)
/* /*
* DeparseAlterSequencePersistenceStmt builds and returns a string representing * DeparseAlterSequencePersistenceStmt builds and returns a string representing
* the AlterTableStmt consisting of changing the persistence of a sequence * the AlterTableStmt consisting of changing the persistence of a sequence
@ -349,9 +345,6 @@ AppendAlterSequencePersistenceStmt(StringInfo buf, AlterTableStmt *stmt)
} }
#endif
/* /*
* DeparseGrantOnSequenceStmt builds and returns a string representing the GrantOnSequenceStmt * DeparseGrantOnSequenceStmt builds and returns a string representing the GrantOnSequenceStmt
*/ */

View File

@ -193,12 +193,10 @@ AppendAlterTableCmdConstraint(StringInfo buf, Constraint *constraint,
{ {
appendStringInfoString(buf, " UNIQUE"); appendStringInfoString(buf, " UNIQUE");
#if (PG_VERSION_NUM >= PG_VERSION_15)
if (constraint->nulls_not_distinct == true) if (constraint->nulls_not_distinct == true)
{ {
appendStringInfoString(buf, " NULLS NOT DISTINCT"); appendStringInfoString(buf, " NULLS NOT DISTINCT");
} }
#endif
} }
if (subtype == AT_AddConstraint) if (subtype == AT_AddConstraint)

View File

@ -19,11 +19,7 @@
#include "distributed/deparser.h" #include "distributed/deparser.h"
#include "distributed/listutils.h" #include "distributed/listutils.h"
#if (PG_VERSION_NUM >= PG_VERSION_15)
static void QualifyPublicationObjects(List *publicationObjects); static void QualifyPublicationObjects(List *publicationObjects);
#else
static void QualifyTables(List *tables);
#endif
static void QualifyPublicationRangeVar(RangeVar *publication); static void QualifyPublicationRangeVar(RangeVar *publication);
@ -36,16 +32,10 @@ QualifyCreatePublicationStmt(Node *node)
{ {
CreatePublicationStmt *stmt = castNode(CreatePublicationStmt, node); CreatePublicationStmt *stmt = castNode(CreatePublicationStmt, node);
#if (PG_VERSION_NUM >= PG_VERSION_15)
QualifyPublicationObjects(stmt->pubobjects); QualifyPublicationObjects(stmt->pubobjects);
#else
QualifyTables(stmt->tables);
#endif
} }
#if (PG_VERSION_NUM >= PG_VERSION_15)
/* /*
* QualifyPublicationObjects ensures all table names in a list of * QualifyPublicationObjects ensures all table names in a list of
* publication objects are fully qualified. * publication objects are fully qualified.
@ -68,26 +58,6 @@ QualifyPublicationObjects(List *publicationObjects)
} }
#else
/*
* QualifyTables ensures all table names in a list are fully qualified.
*/
static void
QualifyTables(List *tables)
{
RangeVar *rangeVar = NULL;
foreach_declared_ptr(rangeVar, tables)
{
QualifyPublicationRangeVar(rangeVar);
}
}
#endif
/* /*
* QualifyPublicationObjects ensures all table names in a list of * QualifyPublicationObjects ensures all table names in a list of
* publication objects are fully qualified. * publication objects are fully qualified.
@ -97,11 +67,7 @@ QualifyAlterPublicationStmt(Node *node)
{ {
AlterPublicationStmt *stmt = castNode(AlterPublicationStmt, node); AlterPublicationStmt *stmt = castNode(AlterPublicationStmt, node);
#if (PG_VERSION_NUM >= PG_VERSION_15)
QualifyPublicationObjects(stmt->pubobjects); QualifyPublicationObjects(stmt->pubobjects);
#else
QualifyTables(stmt->tables);
#endif
} }

View File

@ -52,8 +52,6 @@ QualifyAlterSequenceOwnerStmt(Node *node)
} }
#if (PG_VERSION_NUM >= PG_VERSION_15)
/* /*
* QualifyAlterSequencePersistenceStmt transforms a * QualifyAlterSequencePersistenceStmt transforms a
* ALTER SEQUENCE .. SET LOGGED/UNLOGGED * ALTER SEQUENCE .. SET LOGGED/UNLOGGED
@ -80,9 +78,6 @@ QualifyAlterSequencePersistenceStmt(Node *node)
} }
#endif
/* /*
* QualifyAlterSequenceSchemaStmt transforms a * QualifyAlterSequenceSchemaStmt transforms a
* ALTER SEQUENCE .. SET SCHEMA .. * ALTER SEQUENCE .. SET SCHEMA ..

File diff suppressed because it is too large Load Diff

View File

@ -720,10 +720,8 @@ static void RebuildWaitEventSetForSessions(DistributedExecution *execution);
static void AddLatchWaitEventToExecution(DistributedExecution *execution); static void AddLatchWaitEventToExecution(DistributedExecution *execution);
static void ProcessWaitEvents(DistributedExecution *execution, WaitEvent *events, int static void ProcessWaitEvents(DistributedExecution *execution, WaitEvent *events, int
eventCount, bool *cancellationReceived); eventCount, bool *cancellationReceived);
#if PG_VERSION_NUM >= PG_VERSION_15
static void RemoteSocketClosedForAnySession(DistributedExecution *execution); static void RemoteSocketClosedForAnySession(DistributedExecution *execution);
static void ProcessWaitEventsForSocketClosed(WaitEvent *events, int eventCount); static void ProcessWaitEventsForSocketClosed(WaitEvent *events, int eventCount);
#endif
static long MillisecondsBetweenTimestamps(instr_time startTime, instr_time endTime); static long MillisecondsBetweenTimestamps(instr_time startTime, instr_time endTime);
static uint64 MicrosecondsBetweenTimestamps(instr_time startTime, instr_time endTime); static uint64 MicrosecondsBetweenTimestamps(instr_time startTime, instr_time endTime);
static int WorkerPoolCompare(const void *lhsKey, const void *rhsKey); static int WorkerPoolCompare(const void *lhsKey, const void *rhsKey);
@ -1769,11 +1767,8 @@ FindOrCreateWorkerSession(WorkerPool *workerPool, MultiConnection *connection)
session->commandsSent = 0; session->commandsSent = 0;
session->waitEventSetIndex = WAIT_EVENT_SET_INDEX_NOT_INITIALIZED; session->waitEventSetIndex = WAIT_EVENT_SET_INDEX_NOT_INITIALIZED;
#if PG_VERSION_NUM >= PG_VERSION_15
/* always detect closed sockets */ /* always detect closed sockets */
UpdateConnectionWaitFlags(session, WL_SOCKET_CLOSED); UpdateConnectionWaitFlags(session, WL_SOCKET_CLOSED);
#endif
dlist_init(&session->pendingTaskQueue); dlist_init(&session->pendingTaskQueue);
dlist_init(&session->readyTaskQueue); dlist_init(&session->readyTaskQueue);
@ -1817,7 +1812,6 @@ FindOrCreateWorkerSession(WorkerPool *workerPool, MultiConnection *connection)
* the events, even ignores cancellation events. Future callers of this * the events, even ignores cancellation events. Future callers of this
* function should consider its limitations. * function should consider its limitations.
*/ */
#if PG_VERSION_NUM >= PG_VERSION_15
static void static void
RemoteSocketClosedForAnySession(DistributedExecution *execution) RemoteSocketClosedForAnySession(DistributedExecution *execution)
{ {
@ -1835,9 +1829,6 @@ RemoteSocketClosedForAnySession(DistributedExecution *execution)
} }
#endif
/* /*
* SequentialRunDistributedExecution gets a distributed execution and * SequentialRunDistributedExecution gets a distributed execution and
* executes each individual task in the execution sequentially, one * executes each individual task in the execution sequentially, one
@ -2173,8 +2164,6 @@ ProcessWaitEvents(DistributedExecution *execution, WaitEvent *events, int eventC
} }
#if PG_VERSION_NUM >= PG_VERSION_15
/* /*
* ProcessWaitEventsForSocketClosed mainly checks for WL_SOCKET_CLOSED event. * ProcessWaitEventsForSocketClosed mainly checks for WL_SOCKET_CLOSED event.
* If WL_SOCKET_CLOSED is found, the function sets the underlying connection's * If WL_SOCKET_CLOSED is found, the function sets the underlying connection's
@ -2207,9 +2196,6 @@ ProcessWaitEventsForSocketClosed(WaitEvent *events, int eventCount)
} }
#endif
/* /*
* ManageWorkerPool ensures the worker pool has the appropriate number of connections * ManageWorkerPool ensures the worker pool has the appropriate number of connections
* based on the number of pending tasks. * based on the number of pending tasks.
@ -2704,7 +2690,6 @@ OpenNewConnections(WorkerPool *workerPool, int newConnectionCount,
* Instead, we prefer this slight difference, which in effect has almost no * Instead, we prefer this slight difference, which in effect has almost no
* difference, but doing things in different points in time. * difference, but doing things in different points in time.
*/ */
#if PG_VERSION_NUM >= PG_VERSION_15
/* we added new connections, rebuild the waitEventSet */ /* we added new connections, rebuild the waitEventSet */
RebuildWaitEventSetForSessions(execution); RebuildWaitEventSetForSessions(execution);
@ -2724,9 +2709,6 @@ OpenNewConnections(WorkerPool *workerPool, int newConnectionCount,
* of the execution. * of the execution.
*/ */
AddLatchWaitEventToExecution(execution); AddLatchWaitEventToExecution(execution);
#else
execution->rebuildWaitEventSet = true;
#endif
WorkerSession *session = NULL; WorkerSession *session = NULL;
foreach_declared_ptr(session, newSessionsList) foreach_declared_ptr(session, newSessionsList)
@ -3663,13 +3645,8 @@ UpdateConnectionWaitFlags(WorkerSession *session, int waitFlags)
return; return;
} }
#if PG_VERSION_NUM >= PG_VERSION_15
/* always detect closed sockets */ /* always detect closed sockets */
connection->waitFlags = waitFlags | WL_SOCKET_CLOSED; connection->waitFlags = waitFlags | WL_SOCKET_CLOSED;
#else
connection->waitFlags = waitFlags;
#endif
/* without signalling the execution, the flag changes won't be reflected */ /* without signalling the execution, the flag changes won't be reflected */
execution->waitFlagsChanged = true; execution->waitFlagsChanged = true;
@ -3694,13 +3671,11 @@ CheckConnectionReady(WorkerSession *session)
return false; return false;
} }
#if PG_VERSION_NUM >= PG_VERSION_15
if ((session->latestUnconsumedWaitEvents & WL_SOCKET_CLOSED) != 0) if ((session->latestUnconsumedWaitEvents & WL_SOCKET_CLOSED) != 0)
{ {
connection->connectionState = MULTI_CONNECTION_LOST; connection->connectionState = MULTI_CONNECTION_LOST;
return false; return false;
} }
#endif
/* try to send all pending data */ /* try to send all pending data */
int sendStatus = PQflush(connection->pgConn); int sendStatus = PQflush(connection->pgConn);

View File

@ -143,15 +143,10 @@ NonPushableInsertSelectExecScan(CustomScanState *node)
targetRelation->partitionColumn); targetRelation->partitionColumn);
if (distributionColumnIndex == -1) if (distributionColumnIndex == -1)
{ {
char *relationName = get_rel_name(targetRelationId);
Oid schemaOid = get_rel_namespace(targetRelationId);
char *schemaName = get_namespace_name(schemaOid);
ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
errmsg( errmsg(
"the partition column of table %s should have a value", "the partition column of table %s should have a value",
quote_qualified_identifier(schemaName, generate_qualified_relation_name(targetRelationId))));
relationName))));
} }
TargetEntry *selectPartitionTE = list_nth(selectQuery->targetList, TargetEntry *selectPartitionTE = list_nth(selectQuery->targetList,

View File

@ -219,6 +219,7 @@ ExecuteSourceAtCoordAndRedistribution(CitusScanState *scanState)
copyObject(distributedPlan->selectPlanForModifyViaCoordinatorOrRepartition); copyObject(distributedPlan->selectPlanForModifyViaCoordinatorOrRepartition);
char *intermediateResultIdPrefix = distributedPlan->intermediateResultIdPrefix; char *intermediateResultIdPrefix = distributedPlan->intermediateResultIdPrefix;
bool hasReturning = distributedPlan->expectResults; bool hasReturning = distributedPlan->expectResults;
bool hasNotMatchedBySource = HasMergeNotMatchedBySource(mergeQuery);
int partitionColumnIndex = distributedPlan->sourceResultRepartitionColumnIndex; int partitionColumnIndex = distributedPlan->sourceResultRepartitionColumnIndex;
/* /*
@ -233,7 +234,7 @@ ExecuteSourceAtCoordAndRedistribution(CitusScanState *scanState)
ereport(DEBUG1, (errmsg("Collect source query results on coordinator"))); ereport(DEBUG1, (errmsg("Collect source query results on coordinator")));
List *prunedTaskList = NIL; List *prunedTaskList = NIL, *emptySourceTaskList = NIL;
HTAB *shardStateHash = HTAB *shardStateHash =
ExecuteMergeSourcePlanIntoColocatedIntermediateResults( ExecuteMergeSourcePlanIntoColocatedIntermediateResults(
targetRelationId, targetRelationId,
@ -255,7 +256,8 @@ ExecuteSourceAtCoordAndRedistribution(CitusScanState *scanState)
* We cannot actually execute MERGE INTO ... tasks that read from * We cannot actually execute MERGE INTO ... tasks that read from
* intermediate results that weren't created because no rows were * intermediate results that weren't created because no rows were
* written to them. Prune those tasks out by only including tasks * written to them. Prune those tasks out by only including tasks
* on shards with connections. * on shards with connections; however, if the MERGE INTO includes
* a NOT MATCHED BY SOURCE clause we need to include the task.
*/ */
Task *task = NULL; Task *task = NULL;
foreach_declared_ptr(task, taskList) foreach_declared_ptr(task, taskList)
@ -268,6 +270,19 @@ ExecuteSourceAtCoordAndRedistribution(CitusScanState *scanState)
{ {
prunedTaskList = lappend(prunedTaskList, task); prunedTaskList = lappend(prunedTaskList, task);
} }
else if (hasNotMatchedBySource)
{
emptySourceTaskList = lappend(emptySourceTaskList, task);
}
}
if (emptySourceTaskList != NIL)
{
ereport(DEBUG1, (errmsg("MERGE has NOT MATCHED BY SOURCE clause, "
"execute MERGE on all shards")));
AdjustTaskQueryForEmptySource(targetRelationId, mergeQuery, emptySourceTaskList,
intermediateResultIdPrefix);
prunedTaskList = list_concat(prunedTaskList, emptySourceTaskList);
} }
if (prunedTaskList == NIL) if (prunedTaskList == NIL)

View File

@ -140,19 +140,6 @@ static void CitusQueryStatsRemoveExpiredEntries(HTAB *existingQueryIdHash);
void void
InitializeCitusQueryStats(void) InitializeCitusQueryStats(void)
{ {
/* on PG 15, we use shmem_request_hook_type */
#if PG_VERSION_NUM < PG_VERSION_15
/* allocate shared memory */
if (!IsUnderPostmaster)
{
RequestAddinShmemSpace(CitusQueryStatsSharedMemSize());
elog(LOG, "requesting named LWLockTranch for %s", STATS_SHARED_MEM_NAME);
RequestNamedLWLockTranche(STATS_SHARED_MEM_NAME, 1);
}
#endif
/* Install hook */ /* Install hook */
prev_shmem_startup_hook = shmem_startup_hook; prev_shmem_startup_hook = shmem_startup_hook;
shmem_startup_hook = CitusQueryStatsShmemStartup; shmem_startup_hook = CitusQueryStatsShmemStartup;

View File

@ -17,6 +17,7 @@
#include "nodes/parsenodes.h" #include "nodes/parsenodes.h"
#include "distributed/citus_custom_scan.h" #include "distributed/citus_custom_scan.h"
#include "distributed/deparse_shard_query.h"
#include "distributed/intermediate_results.h" #include "distributed/intermediate_results.h"
#include "distributed/listutils.h" #include "distributed/listutils.h"
#include "distributed/multi_physical_planner.h" #include "distributed/multi_physical_planner.h"
@ -101,6 +102,40 @@ IsRedistributablePlan(Plan *selectPlan)
} }
/*
* HasMergeNotMatchedBySource returns true if the MERGE query has a
* WHEN NOT MATCHED BY SOURCE clause. If it does, we need to execute
* the MERGE query on all shards of the target table, regardless of
* whether or not the source shard has any rows.
*/
bool
HasMergeNotMatchedBySource(Query *query)
{
if (!IsMergeQuery(query))
{
return false;
}
bool haveNotMatchedBySource = false;
#if PG_VERSION_NUM >= PG_VERSION_17
ListCell *lc;
foreach(lc, query->mergeActionList)
{
MergeAction *action = lfirst_node(MergeAction, lc);
if (action->matchKind == MERGE_WHEN_NOT_MATCHED_BY_SOURCE)
{
haveNotMatchedBySource = true;
break;
}
}
#endif
return haveNotMatchedBySource;
}
/* /*
* GenerateTaskListWithColocatedIntermediateResults generates a list of tasks * GenerateTaskListWithColocatedIntermediateResults generates a list of tasks
* for a query that inserts into a target relation and selects from a set of * for a query that inserts into a target relation and selects from a set of
@ -200,6 +235,61 @@ GenerateTaskListWithColocatedIntermediateResults(Oid targetRelationId,
} }
/*
* AdjustTaskQueryForEmptySource adjusts the query for tasks that read from an
* intermediate result to instead read from an empty relation. This ensures that
* the MERGE query is executed on all shards of the target table, because it has
* a NOT MATCHED BY SOURCE clause, which will be true for all target shards where
* the source shard has no rows.
*/
void
AdjustTaskQueryForEmptySource(Oid targetRelationId,
Query *mergeQuery,
List *tasks,
char *resultIdPrefix)
{
Query *mergeQueryCopy = copyObject(mergeQuery);
RangeTblEntry *selectRte = ExtractSourceResultRangeTableEntry(mergeQueryCopy);
RangeTblEntry *mergeRte = ExtractResultRelationRTE(mergeQueryCopy);
List *targetList = selectRte->subquery->targetList;
ListCell *taskCell = NULL;
foreach(taskCell, tasks)
{
Task *task = lfirst(taskCell);
uint64 shardId = task->anchorShardId;
StringInfo queryString = makeStringInfo();
StringInfo resultId = makeStringInfo();
appendStringInfo(resultId, "%s_" UINT64_FORMAT, resultIdPrefix, shardId);
/* Generate a query for an empty relation */
selectRte->subquery = BuildEmptyResultQuery(targetList, resultId->data);
/* setting an alias simplifies deparsing of RETURNING */
if (mergeRte->alias == NULL)
{
Alias *alias = makeAlias(CITUS_TABLE_ALIAS, NIL);
mergeRte->alias = alias;
}
/*
* Generate a query string for the query that merges into a shard and reads
* from an empty relation.
*
* Since CTEs have already been converted to intermediate results, they need
* to removed from the query. Otherwise, worker queries include both
* intermediate results and CTEs in the query.
*/
mergeQueryCopy->cteList = NIL;
deparse_shard_query(mergeQueryCopy, targetRelationId, shardId, queryString);
ereport(DEBUG2, (errmsg("distributed statement: %s", queryString->data)));
SetTaskQueryString(task, queryString->data);
}
}
/* /*
* GenerateTaskListWithRedistributedResults returns a task list to insert given * GenerateTaskListWithRedistributedResults returns a task list to insert given
* redistributedResults into the given target relation. * redistributedResults into the given target relation.
@ -223,6 +313,7 @@ GenerateTaskListWithRedistributedResults(Query *modifyQueryViaCoordinatorOrRepar
Query *modifyResultQuery = copyObject(modifyQueryViaCoordinatorOrRepartition); Query *modifyResultQuery = copyObject(modifyQueryViaCoordinatorOrRepartition);
RangeTblEntry *insertRte = ExtractResultRelationRTE(modifyResultQuery); RangeTblEntry *insertRte = ExtractResultRelationRTE(modifyResultQuery);
Oid targetRelationId = targetRelation->relationId; Oid targetRelationId = targetRelation->relationId;
bool hasNotMatchedBySource = HasMergeNotMatchedBySource(modifyResultQuery);
int shardCount = targetRelation->shardIntervalArrayLength; int shardCount = targetRelation->shardIntervalArrayLength;
int shardOffset = 0; int shardOffset = 0;
@ -242,19 +333,33 @@ GenerateTaskListWithRedistributedResults(Query *modifyQueryViaCoordinatorOrRepar
StringInfo queryString = makeStringInfo(); StringInfo queryString = makeStringInfo();
/* skip empty tasks */ /* skip empty tasks */
if (resultIdList == NIL) if (resultIdList == NIL && !hasNotMatchedBySource)
{ {
continue; continue;
} }
Query *fragmentSetQuery = NULL;
if (resultIdList != NIL)
{
/* sort result ids for consistent test output */ /* sort result ids for consistent test output */
List *sortedResultIds = SortList(resultIdList, pg_qsort_strcmp); List *sortedResultIds = SortList(resultIdList, pg_qsort_strcmp);
/* generate the query on the intermediate result */ /* generate the query on the intermediate result */
Query *fragmentSetQuery = BuildReadIntermediateResultsArrayQuery(selectTargetList, fragmentSetQuery = BuildReadIntermediateResultsArrayQuery(selectTargetList,
NIL, NIL,
sortedResultIds, sortedResultIds,
useBinaryFormat); useBinaryFormat);
}
else
{
/* No source data, but MERGE query has NOT MATCHED BY SOURCE */
StringInfo emptyFragmentId = makeStringInfo();
appendStringInfo(emptyFragmentId, "%s_" UINT64_FORMAT, "temp_empty_rel_",
shardId);
fragmentSetQuery = BuildEmptyResultQuery(selectTargetList,
emptyFragmentId->data);
}
/* put the intermediate result query in the INSERT..SELECT */ /* put the intermediate result query in the INSERT..SELECT */
selectRte->subquery = fragmentSetQuery; selectRte->subquery = fragmentSetQuery;

View File

@ -109,7 +109,7 @@ TupleStoreTupleDestPutTuple(TupleDestination *self, Task *task,
uint64 tupleSize = tupleLibpqSize; uint64 tupleSize = tupleLibpqSize;
if (tupleSize == 0) if (tupleSize == 0)
{ {
tupleSize = HeapTupleHeaderGetDatumLength(heapTuple); tupleSize = heapTuple->t_len;
} }
/* /*

View File

@ -1717,13 +1717,11 @@ ExpandCitusSupportedTypes(ObjectAddressCollector *collector, ObjectAddress targe
/* /*
* As of PostgreSQL 15, the same applies to schemas. * As of PostgreSQL 15, the same applies to schemas.
*/ */
#if PG_VERSION_NUM >= PG_VERSION_15
List *schemaIdList = List *schemaIdList =
GetPublicationSchemas(publicationId); GetPublicationSchemas(publicationId);
List *schemaDependencyList = List *schemaDependencyList =
CreateObjectAddressDependencyDefList(NamespaceRelationId, schemaIdList); CreateObjectAddressDependencyDefList(NamespaceRelationId, schemaIdList);
result = list_concat(result, schemaDependencyList); result = list_concat(result, schemaDependencyList);
#endif
break; break;
} }

View File

@ -379,7 +379,7 @@ EnsureModificationsCanRun(void)
/* /*
* EnsureModificationsCanRunOnRelation firsts calls into EnsureModificationsCanRun() and * EnsureModificationsCanRunOnRelation first calls into EnsureModificationsCanRun() and
* then does one more additional check. The additional check is to give a proper error * then does one more additional check. The additional check is to give a proper error
* message if any relation that is modified is replicated, as replicated tables use * message if any relation that is modified is replicated, as replicated tables use
* 2PC and 2PC cannot happen when recovery is in progress. * 2PC and 2PC cannot happen when recovery is in progress.
@ -660,6 +660,18 @@ GetTableTypeName(Oid tableId)
bool bool
IsCitusTable(Oid relationId) IsCitusTable(Oid relationId)
{ {
/*
* PostgreSQL's OID generator assigns user operation OIDs starting
* from FirstNormalObjectId. This means no user object can have
* an OID lower than FirstNormalObjectId. Therefore, if the
* relationId is less than FirstNormalObjectId
* (i.e. in PostgreSQL's reserved range), we can immediately
* return false, since such objects cannot be Citus tables.
*/
if (relationId < FirstNormalObjectId)
{
return false;
}
return LookupCitusTableCacheEntry(relationId) != NULL; return LookupCitusTableCacheEntry(relationId) != NULL;
} }
@ -2521,6 +2533,8 @@ AvailableExtensionVersion(void)
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("citus extension is not found"))); errmsg("citus extension is not found")));
return NULL; /* keep compiler happy */
} }

View File

@ -1739,48 +1739,6 @@ GetSequencesFromAttrDef(Oid attrdefOid)
} }
#if PG_VERSION_NUM < PG_VERSION_15
/*
* Given a pg_attrdef OID, return the relation OID and column number of
* the owning column (represented as an ObjectAddress for convenience).
*
* Returns InvalidObjectAddress if there is no such pg_attrdef entry.
*/
ObjectAddress
GetAttrDefaultColumnAddress(Oid attrdefoid)
{
ObjectAddress result = InvalidObjectAddress;
ScanKeyData skey[1];
HeapTuple tup;
Relation attrdef = table_open(AttrDefaultRelationId, AccessShareLock);
ScanKeyInit(&skey[0],
Anum_pg_attrdef_oid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(attrdefoid));
SysScanDesc scan = systable_beginscan(attrdef, AttrDefaultOidIndexId, true,
NULL, 1, skey);
if (HeapTupleIsValid(tup = systable_getnext(scan)))
{
Form_pg_attrdef atdform = (Form_pg_attrdef) GETSTRUCT(tup);
result.classId = RelationRelationId;
result.objectId = atdform->adrelid;
result.objectSubId = atdform->adnum;
}
systable_endscan(scan);
table_close(attrdef, AccessShareLock);
return result;
}
#endif
/* /*
* GetAttrDefsFromSequence returns a list of attrdef OIDs that have * GetAttrDefsFromSequence returns a list of attrdef OIDs that have
* a dependency on the given sequence * a dependency on the given sequence
@ -3011,7 +2969,6 @@ SyncNodeMetadataToNodesMain(Datum main_arg)
PopActiveSnapshot(); PopActiveSnapshot();
CommitTransactionCommand(); CommitTransactionCommand();
ProcessCompletedNotifies();
if (syncedAllNodes) if (syncedAllNodes)
{ {

View File

@ -217,6 +217,9 @@ citus_set_coordinator_host(PG_FUNCTION_ARGS)
EnsureTransactionalMetadataSyncMode(); EnsureTransactionalMetadataSyncMode();
} }
/* prevent concurrent modification */
LockRelationOid(DistNodeRelationId(), RowExclusiveLock);
bool isCoordinatorInMetadata = false; bool isCoordinatorInMetadata = false;
WorkerNode *coordinatorNode = PrimaryNodeForGroup(COORDINATOR_GROUP_ID, WorkerNode *coordinatorNode = PrimaryNodeForGroup(COORDINATOR_GROUP_ID,
&isCoordinatorInMetadata); &isCoordinatorInMetadata);

View File

@ -283,9 +283,7 @@ PgGetObjectAddress(char *ttype, ArrayType *namearr, ArrayType *argsarr)
case OBJECT_FDW: case OBJECT_FDW:
case OBJECT_FOREIGN_SERVER: case OBJECT_FOREIGN_SERVER:
case OBJECT_LANGUAGE: case OBJECT_LANGUAGE:
#if PG_VERSION_NUM >= PG_VERSION_15
case OBJECT_PARAMETER_ACL: case OBJECT_PARAMETER_ACL:
#endif
case OBJECT_PUBLICATION: case OBJECT_PUBLICATION:
case OBJECT_ROLE: case OBJECT_ROLE:
case OBJECT_SCHEMA: case OBJECT_SCHEMA:
@ -323,9 +321,7 @@ PgGetObjectAddress(char *ttype, ArrayType *namearr, ArrayType *argsarr)
break; break;
} }
#if PG_VERSION_NUM >= PG_VERSION_15
case OBJECT_PUBLICATION_NAMESPACE: case OBJECT_PUBLICATION_NAMESPACE:
#endif
case OBJECT_USER_MAPPING: case OBJECT_USER_MAPPING:
{ {
objnode = (Node *) list_make2(linitial(name), linitial(args)); objnode = (Node *) list_make2(linitial(name), linitial(args));

View File

@ -319,7 +319,7 @@ PG_FUNCTION_INFO_V1(citus_rebalance_start);
PG_FUNCTION_INFO_V1(citus_rebalance_stop); PG_FUNCTION_INFO_V1(citus_rebalance_stop);
PG_FUNCTION_INFO_V1(citus_rebalance_wait); PG_FUNCTION_INFO_V1(citus_rebalance_wait);
bool RunningUnderIsolationTest = false; bool RunningUnderCitusTestSuite = false;
int MaxRebalancerLoggedIgnoredMoves = 5; int MaxRebalancerLoggedIgnoredMoves = 5;
int RebalancerByDiskSizeBaseCost = 100 * 1024 * 1024; int RebalancerByDiskSizeBaseCost = 100 * 1024 * 1024;
bool PropagateSessionSettingsForLoopbackConnection = false; bool PropagateSessionSettingsForLoopbackConnection = false;
@ -384,6 +384,7 @@ CheckRebalanceStateInvariants(const RebalanceState *state)
Assert(shardCost->cost <= prevShardCost->cost); Assert(shardCost->cost <= prevShardCost->cost);
} }
totalCost += shardCost->cost; totalCost += shardCost->cost;
prevShardCost = shardCost;
} }
/* Check that utilization field is up to date. */ /* Check that utilization field is up to date. */

View File

@ -294,6 +294,17 @@ citus_move_shard_placement(PG_FUNCTION_ARGS)
CheckCitusVersion(ERROR); CheckCitusVersion(ERROR);
EnsureCoordinator(); EnsureCoordinator();
List *referenceTableIdList = NIL;
if (HasNodesWithMissingReferenceTables(&referenceTableIdList))
{
ereport(ERROR, (errmsg("there are missing reference tables on some nodes"),
errhint("Copy reference tables first with "
"replicate_reference_tables() or use "
"citus_rebalance_start() that will do it automatically."
)));
}
int64 shardId = PG_GETARG_INT64(0); int64 shardId = PG_GETARG_INT64(0);
char *sourceNodeName = text_to_cstring(PG_GETARG_TEXT_P(1)); char *sourceNodeName = text_to_cstring(PG_GETARG_TEXT_P(1));
int32 sourceNodePort = PG_GETARG_INT32(2); int32 sourceNodePort = PG_GETARG_INT32(2);
@ -1940,11 +1951,7 @@ ConstructQualifiedShardName(ShardInterval *shardInterval)
static List * static List *
RecreateTableDDLCommandList(Oid relationId) RecreateTableDDLCommandList(Oid relationId)
{ {
const char *relationName = get_rel_name(relationId); const char *qualifiedRelationName = generate_qualified_relation_name(relationId);
Oid relationSchemaId = get_rel_namespace(relationId);
const char *relationSchemaName = get_namespace_name(relationSchemaId);
const char *qualifiedRelationName = quote_qualified_identifier(relationSchemaName,
relationName);
StringInfo dropCommand = makeStringInfo(); StringInfo dropCommand = makeStringInfo();

View File

@ -136,11 +136,8 @@ CreateCitusCustomScanPath(PlannerInfo *root, RelOptInfo *relOptInfo,
path->custom_path.path.pathtarget = relOptInfo->reltarget; path->custom_path.path.pathtarget = relOptInfo->reltarget;
path->custom_path.path.parent = relOptInfo; path->custom_path.path.parent = relOptInfo;
#if (PG_VERSION_NUM >= PG_VERSION_15)
/* necessary to avoid extra Result node in PG15 */ /* necessary to avoid extra Result node in PG15 */
path->custom_path.flags = CUSTOMPATH_SUPPORT_PROJECTION; path->custom_path.flags = CUSTOMPATH_SUPPORT_PROJECTION;
#endif
/* /*
* The 100k rows we put on the cost of the path is kind of arbitrary and could be * The 100k rows we put on the cost of the path is kind of arbitrary and could be

View File

@ -151,7 +151,10 @@ static RouterPlanType GetRouterPlanType(Query *query,
bool hasUnresolvedParams); bool hasUnresolvedParams);
static void ConcatenateRTablesAndPerminfos(PlannedStmt *mainPlan, static void ConcatenateRTablesAndPerminfos(PlannedStmt *mainPlan,
PlannedStmt *concatPlan); PlannedStmt *concatPlan);
static bool CheckPostPlanDistribution(bool isDistributedQuery,
Query *origQuery,
List *rangeTableList,
Query *plannedQuery);
/* Distributed planner hook */ /* Distributed planner hook */
PlannedStmt * PlannedStmt *
@ -272,6 +275,11 @@ distributed_planner(Query *parse,
planContext.plan = standard_planner(planContext.query, NULL, planContext.plan = standard_planner(planContext.query, NULL,
planContext.cursorOptions, planContext.cursorOptions,
planContext.boundParams); planContext.boundParams);
needsDistributedPlanning = CheckPostPlanDistribution(needsDistributedPlanning,
planContext.originalQuery,
rangeTableList,
planContext.query);
if (needsDistributedPlanning) if (needsDistributedPlanning)
{ {
result = PlanDistributedStmt(&planContext, rteIdCounter); result = PlanDistributedStmt(&planContext, rteIdCounter);
@ -703,6 +711,7 @@ DissuadePlannerFromUsingPlan(PlannedStmt *plan)
* Arbitrarily high cost, but low enough that it can be added up * Arbitrarily high cost, but low enough that it can be added up
* without overflowing by choose_custom_plan(). * without overflowing by choose_custom_plan().
*/ */
Assert(plan != NULL);
plan->planTree->total_cost = FLT_MAX / 100000000; plan->planTree->total_cost = FLT_MAX / 100000000;
} }
@ -1441,13 +1450,8 @@ FinalizePlan(PlannedStmt *localPlan, DistributedPlan *distributedPlan)
customScan->custom_private = list_make1(distributedPlanData); customScan->custom_private = list_make1(distributedPlanData);
#if (PG_VERSION_NUM >= PG_VERSION_15)
/* necessary to avoid extra Result node in PG15 */ /* necessary to avoid extra Result node in PG15 */
customScan->flags = CUSTOMPATH_SUPPORT_BACKWARD_SCAN | CUSTOMPATH_SUPPORT_PROJECTION; customScan->flags = CUSTOMPATH_SUPPORT_BACKWARD_SCAN | CUSTOMPATH_SUPPORT_PROJECTION;
#else
customScan->flags = CUSTOMPATH_SUPPORT_BACKWARD_SCAN;
#endif
/* /*
* Fast path queries cannot have any subplans by definition, so skip * Fast path queries cannot have any subplans by definition, so skip
@ -2733,3 +2737,41 @@ WarnIfListHasForeignDistributedTable(List *rangeTableList)
} }
} }
} }
static bool
CheckPostPlanDistribution(bool isDistributedQuery,
Query *origQuery, List *rangeTableList,
Query *plannedQuery)
{
if (isDistributedQuery)
{
Node *origQuals = origQuery->jointree->quals;
Node *plannedQuals = plannedQuery->jointree->quals;
#if PG_VERSION_NUM >= PG_VERSION_17
if (IsMergeQuery(origQuery))
{
origQuals = origQuery->mergeJoinCondition;
plannedQuals = plannedQuery->mergeJoinCondition;
}
#endif
/*
* The WHERE quals have been eliminated by the Postgres planner, possibly by
* an OR clause that was simplified to TRUE. In such cases, we need to check
* if the planned query still requires distributed planning.
*/
if (origQuals != NULL && plannedQuals == NULL)
{
List *rtesPostPlan = ExtractRangeTableEntryList(plannedQuery);
if (list_length(rtesPostPlan) < list_length(rangeTableList))
{
isDistributedQuery = ListContainsDistributedTableRTE(
rtesPostPlan, NULL);
}
}
}
return isDistributedQuery;
}

View File

@ -531,8 +531,16 @@ ShardPlacementForFunctionColocatedWithDistTable(DistObjectCacheEntry *procedure,
if (partitionParam->paramkind == PARAM_EXTERN) if (partitionParam->paramkind == PARAM_EXTERN)
{ {
/* Don't log a message, we should end up here again without a parameter */ /*
* Don't log a message, we should end up here again without a
* parameter.
* Note that "plan" can be null, for example when a CALL statement
* is prepared.
*/
if (plan)
{
DissuadePlannerFromUsingPlan(plan); DissuadePlannerFromUsingPlan(plan);
}
return NULL; return NULL;
} }
} }

View File

@ -1810,6 +1810,8 @@ CastExpr(Expr *expr, Oid sourceType, Oid targetType, Oid targetCollation,
ereport(ERROR, (errmsg("could not find a conversion path from type %d to %d", ereport(ERROR, (errmsg("could not find a conversion path from type %d to %d",
sourceType, targetType))); sourceType, targetType)));
} }
return NULL; /* keep compiler happy */
} }

View File

@ -38,8 +38,6 @@
#include "distributed/shard_pruning.h" #include "distributed/shard_pruning.h"
#include "distributed/shared_library_init.h" #include "distributed/shared_library_init.h"
#if PG_VERSION_NUM >= PG_VERSION_15
static int SourceResultPartitionColumnIndex(Query *mergeQuery, static int SourceResultPartitionColumnIndex(Query *mergeQuery,
List *sourceTargetList, List *sourceTargetList,
CitusTableCacheEntry *targetRelation); CitusTableCacheEntry *targetRelation);
@ -100,8 +98,6 @@ static char * MergeCommandResultIdPrefix(uint64 planId);
static void ErrorIfMergeHasReturningList(Query *query); static void ErrorIfMergeHasReturningList(Query *query);
static Node * GetMergeJoinCondition(Query *mergeQuery); static Node * GetMergeJoinCondition(Query *mergeQuery);
#endif
/* /*
* CreateMergePlan * CreateMergePlan
@ -118,13 +114,6 @@ CreateMergePlan(uint64 planId, Query *originalQuery, Query *query,
PlannerRestrictionContext *plannerRestrictionContext, PlannerRestrictionContext *plannerRestrictionContext,
ParamListInfo boundParams) ParamListInfo boundParams)
{ {
/* function is void for pre-15 versions of Postgres */
#if PG_VERSION_NUM < PG_VERSION_15
ereport(ERROR, (errmsg("MERGE is not supported in pre-15 Postgres versions")));
#else
Oid targetRelationId = ModifyQueryResultRelationId(originalQuery); Oid targetRelationId = ModifyQueryResultRelationId(originalQuery);
/* /*
@ -153,8 +142,6 @@ CreateMergePlan(uint64 planId, Query *originalQuery, Query *query,
} }
return distributedPlan; return distributedPlan;
#endif
} }
@ -184,9 +171,6 @@ GetMergeJoinTree(Query *mergeQuery)
} }
#if PG_VERSION_NUM >= PG_VERSION_15
/* /*
* GetMergeJoinCondition returns the quals of the ON condition * GetMergeJoinCondition returns the quals of the ON condition
*/ */
@ -904,7 +888,7 @@ ConvertRelationRTEIntoSubquery(Query *mergeQuery, RangeTblEntry *sourceRte,
newRangeTableRef->rtindex = SINGLE_RTE_INDEX; newRangeTableRef->rtindex = SINGLE_RTE_INDEX;
sourceResultsQuery->jointree = makeFromExpr(list_make1(newRangeTableRef), NULL); sourceResultsQuery->jointree = makeFromExpr(list_make1(newRangeTableRef), NULL);
sourceResultsQuery->targetList = sourceResultsQuery->targetList =
CreateAllTargetListForRelation(sourceRte->relid, requiredAttributes); CreateFilteredTargetListForRelation(sourceRte->relid, requiredAttributes);
List *restrictionList = List *restrictionList =
GetRestrictInfoListForRelation(sourceRte, plannerRestrictionContext); GetRestrictInfoListForRelation(sourceRte, plannerRestrictionContext);
List *copyRestrictionList = copyObject(restrictionList); List *copyRestrictionList = copyObject(restrictionList);
@ -1443,9 +1427,6 @@ SourceResultPartitionColumnIndex(Query *mergeQuery, List *sourceTargetList,
} }
#endif
/* /*
* ExtractMergeSourceRangeTableEntry returns the range table entry of source * ExtractMergeSourceRangeTableEntry returns the range table entry of source
* table or source query in USING clause. * table or source query in USING clause.
@ -1453,13 +1434,6 @@ SourceResultPartitionColumnIndex(Query *mergeQuery, List *sourceTargetList,
RangeTblEntry * RangeTblEntry *
ExtractMergeSourceRangeTableEntry(Query *query, bool joinSourceOk) ExtractMergeSourceRangeTableEntry(Query *query, bool joinSourceOk)
{ {
/* function is void for pre-15 versions of Postgres */
#if PG_VERSION_NUM < PG_VERSION_15
ereport(ERROR, (errmsg("MERGE is not supported in pre-15 Postgres versions")));
#else
Assert(IsMergeQuery(query)); Assert(IsMergeQuery(query));
List *fromList = query->jointree->fromlist; List *fromList = query->jointree->fromlist;
@ -1498,8 +1472,6 @@ ExtractMergeSourceRangeTableEntry(Query *query, bool joinSourceOk)
RangeTblEntry *subqueryRte = rt_fetch(reference->rtindex, query->rtable); RangeTblEntry *subqueryRte = rt_fetch(reference->rtindex, query->rtable);
return subqueryRte; return subqueryRte;
#endif
} }
@ -1516,13 +1488,6 @@ ExtractMergeSourceRangeTableEntry(Query *query, bool joinSourceOk)
Var * Var *
FetchAndValidateInsertVarIfExists(Oid targetRelationId, Query *query) FetchAndValidateInsertVarIfExists(Oid targetRelationId, Query *query)
{ {
/* function is void for pre-15 versions of Postgres */
#if PG_VERSION_NUM < PG_VERSION_15
ereport(ERROR, (errmsg("MERGE is not supported in pre-15 Postgres versions")));
#else
Assert(IsMergeQuery(query)); Assert(IsMergeQuery(query));
if (!IsCitusTableType(targetRelationId, DISTRIBUTED_TABLE)) if (!IsCitusTableType(targetRelationId, DISTRIBUTED_TABLE))
@ -1546,8 +1511,8 @@ FetchAndValidateInsertVarIfExists(Oid targetRelationId, Query *query)
continue; continue;
} }
/* NOT MATCHED can have either INSERT or DO NOTHING */ /* NOT MATCHED can have either INSERT, DO NOTHING or UPDATE(PG17) */
if (action->commandType == CMD_NOTHING) if (action->commandType == CMD_NOTHING || action->commandType == CMD_UPDATE)
{ {
return NULL; return NULL;
} }
@ -1593,8 +1558,6 @@ FetchAndValidateInsertVarIfExists(Oid targetRelationId, Query *query)
} }
return NULL; return NULL;
#endif
} }
@ -1620,7 +1583,7 @@ IsLocalTableModification(Oid targetRelationId, Query *query, uint64 shardId,
return true; return true;
} }
if (shardId == INVALID_SHARD_ID && ContainsOnlyLocalTables(rteProperties)) if (shardId == INVALID_SHARD_ID && ContainsOnlyLocalOrReferenceTables(rteProperties))
{ {
return true; return true;
} }

View File

@ -95,14 +95,24 @@ typedef struct
bool wal; bool wal;
bool timing; bool timing;
bool summary; bool summary;
#if PG_VERSION_NUM >= PG_VERSION_17
bool memory;
ExplainSerializeOption serialize;
#endif
ExplainFormat format; ExplainFormat format;
} ExplainOptions; } ExplainOptions;
/* EXPLAIN flags of current distributed explain */ /* EXPLAIN flags of current distributed explain */
#if PG_VERSION_NUM >= PG_VERSION_17
static ExplainOptions CurrentDistributedQueryExplainOptions = {
0, 0, 0, 0, 0, 0, 0, EXPLAIN_SERIALIZE_NONE, EXPLAIN_FORMAT_TEXT
};
#else
static ExplainOptions CurrentDistributedQueryExplainOptions = { static ExplainOptions CurrentDistributedQueryExplainOptions = {
0, 0, 0, 0, 0, 0, EXPLAIN_FORMAT_TEXT 0, 0, 0, 0, 0, 0, EXPLAIN_FORMAT_TEXT
}; };
#endif
/* Result for a single remote EXPLAIN command */ /* Result for a single remote EXPLAIN command */
typedef struct RemoteExplainPlan typedef struct RemoteExplainPlan
@ -124,6 +134,59 @@ typedef struct ExplainAnalyzeDestination
TupleDesc lastSavedExplainAnalyzeTupDesc; TupleDesc lastSavedExplainAnalyzeTupDesc;
} ExplainAnalyzeDestination; } ExplainAnalyzeDestination;
#if PG_VERSION_NUM >= PG_VERSION_17
/*
* Various places within need to convert bytes to kilobytes. Round these up
* to the next whole kilobyte.
* copied from explain.c
*/
#define BYTES_TO_KILOBYTES(b) (((b) + 1023) / 1024)
/* copied from explain.c */
/* Instrumentation data for SERIALIZE option */
typedef struct SerializeMetrics
{
uint64 bytesSent; /* # of bytes serialized */
instr_time timeSpent; /* time spent serializing */
BufferUsage bufferUsage; /* buffers accessed during serialization */
} SerializeMetrics;
/* copied from explain.c */
static bool peek_buffer_usage(ExplainState *es, const BufferUsage *usage);
static void show_buffer_usage(ExplainState *es, const BufferUsage *usage);
static void show_memory_counters(ExplainState *es,
const MemoryContextCounters *mem_counters);
static void ExplainIndentText(ExplainState *es);
static void ExplainPrintSerialize(ExplainState *es,
SerializeMetrics *metrics);
static SerializeMetrics GetSerializationMetrics(DestReceiver *dest);
/*
* DestReceiver functions for SERIALIZE option
*
* A DestReceiver for query tuples, that serializes passed rows into RowData
* messages while measuring the resources expended and total serialized size,
* while never sending the data to the client. This allows measuring the
* overhead of deTOASTing and datatype out/sendfuncs, which are not otherwise
* exercisable without actually hitting the network.
*
* copied from explain.c
*/
typedef struct SerializeDestReceiver
{
DestReceiver pub;
ExplainState *es; /* this EXPLAIN statement's ExplainState */
int8 format; /* text or binary, like pq wire protocol */
TupleDesc attrinfo; /* the output tuple desc */
int nattrs; /* current number of columns */
FmgrInfo *finfos; /* precomputed call info for output fns */
MemoryContext tmpcontext; /* per-row temporary memory context */
StringInfoData buf; /* buffer to hold the constructed message */
SerializeMetrics metrics; /* collected metrics */
} SerializeDestReceiver;
#endif
/* Explain functions for distributed queries */ /* Explain functions for distributed queries */
static void ExplainSubPlans(DistributedPlan *distributedPlan, ExplainState *es); static void ExplainSubPlans(DistributedPlan *distributedPlan, ExplainState *es);
@ -144,14 +207,27 @@ static void ExplainTaskPlacement(ShardPlacement *taskPlacement, List *explainOut
ExplainState *es); ExplainState *es);
static StringInfo BuildRemoteExplainQuery(char *queryString, ExplainState *es); static StringInfo BuildRemoteExplainQuery(char *queryString, ExplainState *es);
static const char * ExplainFormatStr(ExplainFormat format); static const char * ExplainFormatStr(ExplainFormat format);
#if PG_VERSION_NUM >= PG_VERSION_17
static const char * ExplainSerializeStr(ExplainSerializeOption serializeOption);
#endif
static void ExplainWorkerPlan(PlannedStmt *plannedStmt, DestReceiver *dest, static void ExplainWorkerPlan(PlannedStmt *plannedStmt, DestReceiver *dest,
ExplainState *es, ExplainState *es,
const char *queryString, ParamListInfo params, const char *queryString, ParamListInfo params,
QueryEnvironment *queryEnv, QueryEnvironment *queryEnv,
const instr_time *planduration, const instr_time *planduration,
#if PG_VERSION_NUM >= PG_VERSION_17
const BufferUsage *bufusage,
const MemoryContextCounters *mem_counters,
#endif
double *executionDurationMillisec); double *executionDurationMillisec);
static ExplainFormat ExtractFieldExplainFormat(Datum jsonbDoc, const char *fieldName, static ExplainFormat ExtractFieldExplainFormat(Datum jsonbDoc, const char *fieldName,
ExplainFormat defaultValue); ExplainFormat defaultValue);
#if PG_VERSION_NUM >= PG_VERSION_17
static ExplainSerializeOption ExtractFieldExplainSerialize(Datum jsonbDoc,
const char *fieldName,
ExplainSerializeOption
defaultValue);
#endif
static TupleDestination * CreateExplainAnlyzeDestination(Task *task, static TupleDestination * CreateExplainAnlyzeDestination(Task *task,
TupleDestination *taskDest); TupleDestination *taskDest);
static void ExplainAnalyzeDestPutTuple(TupleDestination *self, Task *task, static void ExplainAnalyzeDestPutTuple(TupleDestination *self, Task *task,
@ -190,6 +266,14 @@ PG_FUNCTION_INFO_V1(worker_save_query_explain_analyze);
void void
CitusExplainScan(CustomScanState *node, List *ancestors, struct ExplainState *es) CitusExplainScan(CustomScanState *node, List *ancestors, struct ExplainState *es)
{ {
#if PG_VERSION_NUM >= PG_VERSION_16
if (es->generic)
{
ereport(ERROR, (errmsg(
"EXPLAIN GENERIC_PLAN is currently not supported for Citus tables")));
}
#endif
CitusScanState *scanState = (CitusScanState *) node; CitusScanState *scanState = (CitusScanState *) node;
DistributedPlan *distributedPlan = scanState->distributedPlan; DistributedPlan *distributedPlan = scanState->distributedPlan;
EState *executorState = ScanStateGetExecutorState(scanState); EState *executorState = ScanStateGetExecutorState(scanState);
@ -1017,24 +1101,30 @@ BuildRemoteExplainQuery(char *queryString, ExplainState *es)
{ {
StringInfo explainQuery = makeStringInfo(); StringInfo explainQuery = makeStringInfo();
const char *formatStr = ExplainFormatStr(es->format); const char *formatStr = ExplainFormatStr(es->format);
#if PG_VERSION_NUM >= PG_VERSION_17
const char *serializeStr = ExplainSerializeStr(es->serialize);
#endif
appendStringInfo(explainQuery, appendStringInfo(explainQuery,
"EXPLAIN (ANALYZE %s, VERBOSE %s, " "EXPLAIN (ANALYZE %s, VERBOSE %s, "
"COSTS %s, BUFFERS %s, WAL %s, " "COSTS %s, BUFFERS %s, WAL %s, "
#if PG_VERSION_NUM >= PG_VERSION_16 "TIMING %s, SUMMARY %s, "
"GENERIC_PLAN %s, " #if PG_VERSION_NUM >= PG_VERSION_17
"MEMORY %s, SERIALIZE %s, "
#endif #endif
"TIMING %s, SUMMARY %s, FORMAT %s) %s", "FORMAT %s) %s",
es->analyze ? "TRUE" : "FALSE", es->analyze ? "TRUE" : "FALSE",
es->verbose ? "TRUE" : "FALSE", es->verbose ? "TRUE" : "FALSE",
es->costs ? "TRUE" : "FALSE", es->costs ? "TRUE" : "FALSE",
es->buffers ? "TRUE" : "FALSE", es->buffers ? "TRUE" : "FALSE",
es->wal ? "TRUE" : "FALSE", es->wal ? "TRUE" : "FALSE",
#if PG_VERSION_NUM >= PG_VERSION_16
es->generic ? "TRUE" : "FALSE",
#endif
es->timing ? "TRUE" : "FALSE", es->timing ? "TRUE" : "FALSE",
es->summary ? "TRUE" : "FALSE", es->summary ? "TRUE" : "FALSE",
#if PG_VERSION_NUM >= PG_VERSION_17
es->memory ? "TRUE" : "FALSE",
serializeStr,
#endif
formatStr, formatStr,
queryString); queryString);
@ -1073,6 +1163,42 @@ ExplainFormatStr(ExplainFormat format)
} }
#if PG_VERSION_NUM >= PG_VERSION_17
/*
* ExplainSerializeStr converts the given explain serialize option to string.
*/
static const char *
ExplainSerializeStr(ExplainSerializeOption serializeOption)
{
switch (serializeOption)
{
case EXPLAIN_SERIALIZE_NONE:
{
return "none";
}
case EXPLAIN_SERIALIZE_TEXT:
{
return "text";
}
case EXPLAIN_SERIALIZE_BINARY:
{
return "binary";
}
default:
{
return "none";
}
}
}
#endif
/* /*
* worker_last_saved_explain_analyze returns the last saved EXPLAIN ANALYZE output of * worker_last_saved_explain_analyze returns the last saved EXPLAIN ANALYZE output of
* a worker task query. It returns NULL if nothing has been saved yet. * a worker task query. It returns NULL if nothing has been saved yet.
@ -1132,6 +1258,11 @@ worker_save_query_explain_analyze(PG_FUNCTION_ARGS)
es->verbose = ExtractFieldBoolean(explainOptions, "verbose", es->verbose); es->verbose = ExtractFieldBoolean(explainOptions, "verbose", es->verbose);
es->timing = ExtractFieldBoolean(explainOptions, "timing", es->timing); es->timing = ExtractFieldBoolean(explainOptions, "timing", es->timing);
es->format = ExtractFieldExplainFormat(explainOptions, "format", es->format); es->format = ExtractFieldExplainFormat(explainOptions, "format", es->format);
#if PG_VERSION_NUM >= PG_VERSION_17
es->memory = ExtractFieldBoolean(explainOptions, "memory", es->memory);
es->serialize = ExtractFieldExplainSerialize(explainOptions, "serialize",
es->serialize);
#endif
TupleDesc tupleDescriptor = NULL; TupleDesc tupleDescriptor = NULL;
Tuplestorestate *tupleStore = SetupTuplestore(fcinfo, &tupleDescriptor); Tuplestorestate *tupleStore = SetupTuplestore(fcinfo, &tupleDescriptor);
@ -1158,7 +1289,7 @@ worker_save_query_explain_analyze(PG_FUNCTION_ARGS)
} }
/* resolve OIDs of unknown (user-defined) types */ /* resolve OIDs of unknown (user-defined) types */
Query *analyzedQuery = parse_analyze_varparams_compat(parseTree, queryString, Query *analyzedQuery = parse_analyze_varparams(parseTree, queryString,
&paramTypes, &numParams, NULL); &paramTypes, &numParams, NULL);
/* pg_rewrite_query is a wrapper around QueryRewrite with some debugging logic */ /* pg_rewrite_query is a wrapper around QueryRewrite with some debugging logic */
@ -1177,6 +1308,36 @@ worker_save_query_explain_analyze(PG_FUNCTION_ARGS)
/* plan query and record planning stats */ /* plan query and record planning stats */
instr_time planStart; instr_time planStart;
instr_time planDuration; instr_time planDuration;
#if PG_VERSION_NUM >= PG_VERSION_17
BufferUsage bufusage_start,
bufusage;
MemoryContextCounters mem_counters;
MemoryContext planner_ctx = NULL;
MemoryContext saved_ctx = NULL;
if (es->memory)
{
/*
* Create a new memory context to measure planner's memory consumption
* accurately. Note that if the planner were to be modified to use a
* different memory context type, here we would be changing that to
* AllocSet, which might be undesirable. However, we don't have a way
* to create a context of the same type as another, so we pray and
* hope that this is OK.
*
* copied from explain.c
*/
planner_ctx = AllocSetContextCreate(CurrentMemoryContext,
"explain analyze planner context",
ALLOCSET_DEFAULT_SIZES);
saved_ctx = MemoryContextSwitchTo(planner_ctx);
}
if (es->buffers)
{
bufusage_start = pgBufferUsage;
}
#endif
INSTR_TIME_SET_CURRENT(planStart); INSTR_TIME_SET_CURRENT(planStart);
@ -1185,9 +1346,32 @@ worker_save_query_explain_analyze(PG_FUNCTION_ARGS)
INSTR_TIME_SET_CURRENT(planDuration); INSTR_TIME_SET_CURRENT(planDuration);
INSTR_TIME_SUBTRACT(planDuration, planStart); INSTR_TIME_SUBTRACT(planDuration, planStart);
#if PG_VERSION_NUM >= PG_VERSION_17
if (es->memory)
{
MemoryContextSwitchTo(saved_ctx);
MemoryContextMemConsumed(planner_ctx, &mem_counters);
}
/* calc differences of buffer counters. */
if (es->buffers)
{
memset(&bufusage, 0, sizeof(BufferUsage));
BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start);
}
/* do the actual EXPLAIN ANALYZE */
ExplainWorkerPlan(plan, tupleStoreDest, es, queryString, boundParams, NULL,
&planDuration,
(es->buffers ? &bufusage : NULL),
(es->memory ? &mem_counters : NULL),
&executionDurationMillisec);
#else
/* do the actual EXPLAIN ANALYZE */ /* do the actual EXPLAIN ANALYZE */
ExplainWorkerPlan(plan, tupleStoreDest, es, queryString, boundParams, NULL, ExplainWorkerPlan(plan, tupleStoreDest, es, queryString, boundParams, NULL,
&planDuration, &executionDurationMillisec); &planDuration, &executionDurationMillisec);
#endif
ExplainEndOutput(es); ExplainEndOutput(es);
@ -1256,6 +1440,50 @@ ExtractFieldExplainFormat(Datum jsonbDoc, const char *fieldName, ExplainFormat
} }
#if PG_VERSION_NUM >= PG_VERSION_17
/*
* ExtractFieldExplainSerialize gets value of fieldName from jsonbDoc, or returns
* defaultValue if it doesn't exist.
*/
static ExplainSerializeOption
ExtractFieldExplainSerialize(Datum jsonbDoc, const char *fieldName, ExplainSerializeOption
defaultValue)
{
Datum jsonbDatum = 0;
bool found = ExtractFieldJsonbDatum(jsonbDoc, fieldName, &jsonbDatum);
if (!found)
{
return defaultValue;
}
const char *serializeStr = DatumGetCString(DirectFunctionCall1(jsonb_out,
jsonbDatum));
if (pg_strcasecmp(serializeStr, "\"none\"") == 0)
{
return EXPLAIN_SERIALIZE_NONE;
}
else if (pg_strcasecmp(serializeStr, "\"off\"") == 0)
{
return EXPLAIN_SERIALIZE_NONE;
}
else if (pg_strcasecmp(serializeStr, "\"text\"") == 0)
{
return EXPLAIN_SERIALIZE_TEXT;
}
else if (pg_strcasecmp(serializeStr, "\"binary\"") == 0)
{
return EXPLAIN_SERIALIZE_BINARY;
}
ereport(ERROR, (errmsg("Invalid explain analyze serialize: %s", serializeStr)));
return 0;
}
#endif
/* /*
* CitusExplainOneQuery is the executor hook that is called when * CitusExplainOneQuery is the executor hook that is called when
* postgres wants to explain a query. * postgres wants to explain a query.
@ -1273,6 +1501,10 @@ CitusExplainOneQuery(Query *query, int cursorOptions, IntoClause *into,
CurrentDistributedQueryExplainOptions.summary = es->summary; CurrentDistributedQueryExplainOptions.summary = es->summary;
CurrentDistributedQueryExplainOptions.timing = es->timing; CurrentDistributedQueryExplainOptions.timing = es->timing;
CurrentDistributedQueryExplainOptions.format = es->format; CurrentDistributedQueryExplainOptions.format = es->format;
#if PG_VERSION_NUM >= PG_VERSION_17
CurrentDistributedQueryExplainOptions.memory = es->memory;
CurrentDistributedQueryExplainOptions.serialize = es->serialize;
#endif
/* rest is copied from ExplainOneQuery() */ /* rest is copied from ExplainOneQuery() */
instr_time planstart, instr_time planstart,
@ -1595,11 +1827,18 @@ WrapQueryForExplainAnalyze(const char *queryString, TupleDesc tupleDesc,
StringInfo explainOptions = makeStringInfo(); StringInfo explainOptions = makeStringInfo();
appendStringInfo(explainOptions, appendStringInfo(explainOptions,
"{\"verbose\": %s, \"costs\": %s, \"buffers\": %s, \"wal\": %s, " "{\"verbose\": %s, \"costs\": %s, \"buffers\": %s, \"wal\": %s, "
#if PG_VERSION_NUM >= PG_VERSION_17
"\"memory\": %s, \"serialize\": \"%s\", "
#endif
"\"timing\": %s, \"summary\": %s, \"format\": \"%s\"}", "\"timing\": %s, \"summary\": %s, \"format\": \"%s\"}",
CurrentDistributedQueryExplainOptions.verbose ? "true" : "false", CurrentDistributedQueryExplainOptions.verbose ? "true" : "false",
CurrentDistributedQueryExplainOptions.costs ? "true" : "false", CurrentDistributedQueryExplainOptions.costs ? "true" : "false",
CurrentDistributedQueryExplainOptions.buffers ? "true" : "false", CurrentDistributedQueryExplainOptions.buffers ? "true" : "false",
CurrentDistributedQueryExplainOptions.wal ? "true" : "false", CurrentDistributedQueryExplainOptions.wal ? "true" : "false",
#if PG_VERSION_NUM >= PG_VERSION_17
CurrentDistributedQueryExplainOptions.memory ? "true" : "false",
ExplainSerializeStr(CurrentDistributedQueryExplainOptions.serialize),
#endif
CurrentDistributedQueryExplainOptions.timing ? "true" : "false", CurrentDistributedQueryExplainOptions.timing ? "true" : "false",
CurrentDistributedQueryExplainOptions.summary ? "true" : "false", CurrentDistributedQueryExplainOptions.summary ? "true" : "false",
ExplainFormatStr(CurrentDistributedQueryExplainOptions.format)); ExplainFormatStr(CurrentDistributedQueryExplainOptions.format));
@ -1824,7 +2063,12 @@ ExplainOneQuery(Query *query, int cursorOptions,
static void static void
ExplainWorkerPlan(PlannedStmt *plannedstmt, DestReceiver *dest, ExplainState *es, ExplainWorkerPlan(PlannedStmt *plannedstmt, DestReceiver *dest, ExplainState *es,
const char *queryString, ParamListInfo params, QueryEnvironment *queryEnv, const char *queryString, ParamListInfo params, QueryEnvironment *queryEnv,
const instr_time *planduration, double *executionDurationMillisec) const instr_time *planduration,
#if PG_VERSION_NUM >= PG_VERSION_17
const BufferUsage *bufusage,
const MemoryContextCounters *mem_counters,
#endif
double *executionDurationMillisec)
{ {
QueryDesc *queryDesc; QueryDesc *queryDesc;
instr_time starttime; instr_time starttime;
@ -1893,6 +2137,32 @@ ExplainWorkerPlan(PlannedStmt *plannedstmt, DestReceiver *dest, ExplainState *es
/* Create textual dump of plan tree */ /* Create textual dump of plan tree */
ExplainPrintPlan(es, queryDesc); ExplainPrintPlan(es, queryDesc);
#if PG_VERSION_NUM >= PG_VERSION_17
/* Show buffer and/or memory usage in planning */
if (peek_buffer_usage(es, bufusage) || mem_counters)
{
ExplainOpenGroup("Planning", "Planning", true, es);
if (es->format == EXPLAIN_FORMAT_TEXT)
{
ExplainIndentText(es);
appendStringInfoString(es->str, "Planning:\n");
es->indent++;
}
if (bufusage)
show_buffer_usage(es, bufusage);
if (mem_counters)
show_memory_counters(es, mem_counters);
if (es->format == EXPLAIN_FORMAT_TEXT)
es->indent--;
ExplainCloseGroup("Planning", "Planning", true, es);
}
#endif
if (es->summary && planduration) if (es->summary && planduration)
{ {
double plantime = INSTR_TIME_GET_DOUBLE(*planduration); double plantime = INSTR_TIME_GET_DOUBLE(*planduration);
@ -1913,6 +2183,23 @@ ExplainWorkerPlan(PlannedStmt *plannedstmt, DestReceiver *dest, ExplainState *es
if (es->costs) if (es->costs)
ExplainPrintJITSummary(es, queryDesc); ExplainPrintJITSummary(es, queryDesc);
#if PG_VERSION_NUM >= PG_VERSION_17
if (es->serialize != EXPLAIN_SERIALIZE_NONE)
{
/* the SERIALIZE option requires its own tuple receiver */
DestReceiver *dest_serialize = CreateExplainSerializeDestReceiver(es);
/* grab serialization metrics before we destroy the DestReceiver */
SerializeMetrics serializeMetrics = GetSerializationMetrics(dest_serialize);
/* call the DestReceiver's destroy method even during explain */
dest_serialize->rDestroy(dest_serialize);
/* Print info about serialization of output */
ExplainPrintSerialize(es, &serializeMetrics);
}
#endif
/* /*
* Close down the query and free resources. Include time for this in the * Close down the query and free resources. Include time for this in the
* total execution time (although it should be pretty minimal). * total execution time (although it should be pretty minimal).
@ -1961,3 +2248,351 @@ elapsed_time(instr_time *starttime)
INSTR_TIME_SUBTRACT(endtime, *starttime); INSTR_TIME_SUBTRACT(endtime, *starttime);
return INSTR_TIME_GET_DOUBLE(endtime); return INSTR_TIME_GET_DOUBLE(endtime);
} }
#if PG_VERSION_NUM >= PG_VERSION_17
/*
* Return whether show_buffer_usage would have anything to print, if given
* the same 'usage' data. Note that when the format is anything other than
* text, we print even if the counters are all zeroes.
*
* Copied from explain.c.
*/
static bool
peek_buffer_usage(ExplainState *es, const BufferUsage *usage)
{
bool has_shared;
bool has_local;
bool has_temp;
bool has_shared_timing;
bool has_local_timing;
bool has_temp_timing;
if (usage == NULL)
return false;
if (es->format != EXPLAIN_FORMAT_TEXT)
return true;
has_shared = (usage->shared_blks_hit > 0 ||
usage->shared_blks_read > 0 ||
usage->shared_blks_dirtied > 0 ||
usage->shared_blks_written > 0);
has_local = (usage->local_blks_hit > 0 ||
usage->local_blks_read > 0 ||
usage->local_blks_dirtied > 0 ||
usage->local_blks_written > 0);
has_temp = (usage->temp_blks_read > 0 ||
usage->temp_blks_written > 0);
has_shared_timing = (!INSTR_TIME_IS_ZERO(usage->shared_blk_read_time) ||
!INSTR_TIME_IS_ZERO(usage->shared_blk_write_time));
has_local_timing = (!INSTR_TIME_IS_ZERO(usage->local_blk_read_time) ||
!INSTR_TIME_IS_ZERO(usage->local_blk_write_time));
has_temp_timing = (!INSTR_TIME_IS_ZERO(usage->temp_blk_read_time) ||
!INSTR_TIME_IS_ZERO(usage->temp_blk_write_time));
return has_shared || has_local || has_temp || has_shared_timing ||
has_local_timing || has_temp_timing;
}
/*
* Show buffer usage details. This better be sync with peek_buffer_usage.
*
* Copied from explain.c.
*/
static void
show_buffer_usage(ExplainState *es, const BufferUsage *usage)
{
if (es->format == EXPLAIN_FORMAT_TEXT)
{
bool has_shared = (usage->shared_blks_hit > 0 ||
usage->shared_blks_read > 0 ||
usage->shared_blks_dirtied > 0 ||
usage->shared_blks_written > 0);
bool has_local = (usage->local_blks_hit > 0 ||
usage->local_blks_read > 0 ||
usage->local_blks_dirtied > 0 ||
usage->local_blks_written > 0);
bool has_temp = (usage->temp_blks_read > 0 ||
usage->temp_blks_written > 0);
bool has_shared_timing = (!INSTR_TIME_IS_ZERO(usage->shared_blk_read_time) ||
!INSTR_TIME_IS_ZERO(usage->shared_blk_write_time));
bool has_local_timing = (!INSTR_TIME_IS_ZERO(usage->local_blk_read_time) ||
!INSTR_TIME_IS_ZERO(usage->local_blk_write_time));
bool has_temp_timing = (!INSTR_TIME_IS_ZERO(usage->temp_blk_read_time) ||
!INSTR_TIME_IS_ZERO(usage->temp_blk_write_time));
/* Show only positive counter values. */
if (has_shared || has_local || has_temp)
{
ExplainIndentText(es);
appendStringInfoString(es->str, "Buffers:");
if (has_shared)
{
appendStringInfoString(es->str, " shared");
if (usage->shared_blks_hit > 0)
appendStringInfo(es->str, " hit=%lld",
(long long) usage->shared_blks_hit);
if (usage->shared_blks_read > 0)
appendStringInfo(es->str, " read=%lld",
(long long) usage->shared_blks_read);
if (usage->shared_blks_dirtied > 0)
appendStringInfo(es->str, " dirtied=%lld",
(long long) usage->shared_blks_dirtied);
if (usage->shared_blks_written > 0)
appendStringInfo(es->str, " written=%lld",
(long long) usage->shared_blks_written);
if (has_local || has_temp)
appendStringInfoChar(es->str, ',');
}
if (has_local)
{
appendStringInfoString(es->str, " local");
if (usage->local_blks_hit > 0)
appendStringInfo(es->str, " hit=%lld",
(long long) usage->local_blks_hit);
if (usage->local_blks_read > 0)
appendStringInfo(es->str, " read=%lld",
(long long) usage->local_blks_read);
if (usage->local_blks_dirtied > 0)
appendStringInfo(es->str, " dirtied=%lld",
(long long) usage->local_blks_dirtied);
if (usage->local_blks_written > 0)
appendStringInfo(es->str, " written=%lld",
(long long) usage->local_blks_written);
if (has_temp)
appendStringInfoChar(es->str, ',');
}
if (has_temp)
{
appendStringInfoString(es->str, " temp");
if (usage->temp_blks_read > 0)
appendStringInfo(es->str, " read=%lld",
(long long) usage->temp_blks_read);
if (usage->temp_blks_written > 0)
appendStringInfo(es->str, " written=%lld",
(long long) usage->temp_blks_written);
}
appendStringInfoChar(es->str, '\n');
}
/* As above, show only positive counter values. */
if (has_shared_timing || has_local_timing || has_temp_timing)
{
ExplainIndentText(es);
appendStringInfoString(es->str, "I/O Timings:");
if (has_shared_timing)
{
appendStringInfoString(es->str, " shared");
if (!INSTR_TIME_IS_ZERO(usage->shared_blk_read_time))
appendStringInfo(es->str, " read=%0.3f",
INSTR_TIME_GET_MILLISEC(usage->shared_blk_read_time));
if (!INSTR_TIME_IS_ZERO(usage->shared_blk_write_time))
appendStringInfo(es->str, " write=%0.3f",
INSTR_TIME_GET_MILLISEC(usage->shared_blk_write_time));
if (has_local_timing || has_temp_timing)
appendStringInfoChar(es->str, ',');
}
if (has_local_timing)
{
appendStringInfoString(es->str, " local");
if (!INSTR_TIME_IS_ZERO(usage->local_blk_read_time))
appendStringInfo(es->str, " read=%0.3f",
INSTR_TIME_GET_MILLISEC(usage->local_blk_read_time));
if (!INSTR_TIME_IS_ZERO(usage->local_blk_write_time))
appendStringInfo(es->str, " write=%0.3f",
INSTR_TIME_GET_MILLISEC(usage->local_blk_write_time));
if (has_temp_timing)
appendStringInfoChar(es->str, ',');
}
if (has_temp_timing)
{
appendStringInfoString(es->str, " temp");
if (!INSTR_TIME_IS_ZERO(usage->temp_blk_read_time))
appendStringInfo(es->str, " read=%0.3f",
INSTR_TIME_GET_MILLISEC(usage->temp_blk_read_time));
if (!INSTR_TIME_IS_ZERO(usage->temp_blk_write_time))
appendStringInfo(es->str, " write=%0.3f",
INSTR_TIME_GET_MILLISEC(usage->temp_blk_write_time));
}
appendStringInfoChar(es->str, '\n');
}
}
else
{
ExplainPropertyInteger("Shared Hit Blocks", NULL,
usage->shared_blks_hit, es);
ExplainPropertyInteger("Shared Read Blocks", NULL,
usage->shared_blks_read, es);
ExplainPropertyInteger("Shared Dirtied Blocks", NULL,
usage->shared_blks_dirtied, es);
ExplainPropertyInteger("Shared Written Blocks", NULL,
usage->shared_blks_written, es);
ExplainPropertyInteger("Local Hit Blocks", NULL,
usage->local_blks_hit, es);
ExplainPropertyInteger("Local Read Blocks", NULL,
usage->local_blks_read, es);
ExplainPropertyInteger("Local Dirtied Blocks", NULL,
usage->local_blks_dirtied, es);
ExplainPropertyInteger("Local Written Blocks", NULL,
usage->local_blks_written, es);
ExplainPropertyInteger("Temp Read Blocks", NULL,
usage->temp_blks_read, es);
ExplainPropertyInteger("Temp Written Blocks", NULL,
usage->temp_blks_written, es);
if (track_io_timing)
{
ExplainPropertyFloat("Shared I/O Read Time", "ms",
INSTR_TIME_GET_MILLISEC(usage->shared_blk_read_time),
3, es);
ExplainPropertyFloat("Shared I/O Write Time", "ms",
INSTR_TIME_GET_MILLISEC(usage->shared_blk_write_time),
3, es);
ExplainPropertyFloat("Local I/O Read Time", "ms",
INSTR_TIME_GET_MILLISEC(usage->local_blk_read_time),
3, es);
ExplainPropertyFloat("Local I/O Write Time", "ms",
INSTR_TIME_GET_MILLISEC(usage->local_blk_write_time),
3, es);
ExplainPropertyFloat("Temp I/O Read Time", "ms",
INSTR_TIME_GET_MILLISEC(usage->temp_blk_read_time),
3, es);
ExplainPropertyFloat("Temp I/O Write Time", "ms",
INSTR_TIME_GET_MILLISEC(usage->temp_blk_write_time),
3, es);
}
}
}
/*
* Indent a text-format line.
*
* We indent by two spaces per indentation level. However, when emitting
* data for a parallel worker there might already be data on the current line
* (cf. ExplainOpenWorker); in that case, don't indent any more.
*
* Copied from explain.c.
*/
static void
ExplainIndentText(ExplainState *es)
{
Assert(es->format == EXPLAIN_FORMAT_TEXT);
if (es->str->len == 0 || es->str->data[es->str->len - 1] == '\n')
appendStringInfoSpaces(es->str, es->indent * 2);
}
/*
* Show memory usage details.
*
* Copied from explain.c.
*/
static void
show_memory_counters(ExplainState *es, const MemoryContextCounters *mem_counters)
{
int64 memUsedkB = BYTES_TO_KILOBYTES(mem_counters->totalspace -
mem_counters->freespace);
int64 memAllocatedkB = BYTES_TO_KILOBYTES(mem_counters->totalspace);
if (es->format == EXPLAIN_FORMAT_TEXT)
{
ExplainIndentText(es);
appendStringInfo(es->str,
"Memory: used=" INT64_FORMAT "kB allocated=" INT64_FORMAT "kB",
memUsedkB, memAllocatedkB);
appendStringInfoChar(es->str, '\n');
}
else
{
ExplainPropertyInteger("Memory Used", "kB", memUsedkB, es);
ExplainPropertyInteger("Memory Allocated", "kB", memAllocatedkB, es);
}
}
/*
* ExplainPrintSerialize -
* Append information about query output volume to es->str.
*
* Copied from explain.c.
*/
static void
ExplainPrintSerialize(ExplainState *es, SerializeMetrics *metrics)
{
const char *format;
/* We shouldn't get called for EXPLAIN_SERIALIZE_NONE */
if (es->serialize == EXPLAIN_SERIALIZE_TEXT)
format = "text";
else
{
Assert(es->serialize == EXPLAIN_SERIALIZE_BINARY);
format = "binary";
}
ExplainOpenGroup("Serialization", "Serialization", true, es);
if (es->format == EXPLAIN_FORMAT_TEXT)
{
ExplainIndentText(es);
if (es->timing)
appendStringInfo(es->str, "Serialization: time=%.3f ms output=" UINT64_FORMAT "kB format=%s\n",
1000.0 * INSTR_TIME_GET_DOUBLE(metrics->timeSpent),
BYTES_TO_KILOBYTES(metrics->bytesSent),
format);
else
appendStringInfo(es->str, "Serialization: output=" UINT64_FORMAT "kB format=%s\n",
BYTES_TO_KILOBYTES(metrics->bytesSent),
format);
if (es->buffers && peek_buffer_usage(es, &metrics->bufferUsage))
{
es->indent++;
show_buffer_usage(es, &metrics->bufferUsage);
es->indent--;
}
}
else
{
if (es->timing)
ExplainPropertyFloat("Time", "ms",
1000.0 * INSTR_TIME_GET_DOUBLE(metrics->timeSpent),
3, es);
ExplainPropertyUInteger("Output Volume", "kB",
BYTES_TO_KILOBYTES(metrics->bytesSent), es);
ExplainPropertyText("Format", format, es);
if (es->buffers)
show_buffer_usage(es, &metrics->bufferUsage);
}
ExplainCloseGroup("Serialization", "Serialization", true, es);
}
/*
* GetSerializationMetrics - collect metrics
*
* We have to be careful here since the receiver could be an IntoRel
* receiver if the subject statement is CREATE TABLE AS. In that
* case, return all-zeroes stats.
*
* Copied from explain.c.
*/
static SerializeMetrics
GetSerializationMetrics(DestReceiver *dest)
{
SerializeMetrics empty;
if (dest->mydest == DestExplainSerialize)
return ((SerializeDestReceiver *) dest)->metrics;
memset(&empty, 0, sizeof(SerializeMetrics));
INSTR_TIME_SET_ZERO(empty.timeSpent);
return empty;
}
#endif

View File

@ -1557,9 +1557,10 @@ MasterAggregateMutator(Node *originalNode, MasterAggregateWalkerContext *walkerC
} }
else if (IsA(originalNode, Var)) else if (IsA(originalNode, Var))
{ {
Var *newColumn = copyObject((Var *) originalNode); Var *origColumn = (Var *) originalNode;
newColumn->varno = masterTableId; Var *newColumn = makeVar(masterTableId, walkerContext->columnId,
newColumn->varattno = walkerContext->columnId; origColumn->vartype, origColumn->vartypmod,
origColumn->varcollid, origColumn->varlevelsup);
walkerContext->columnId++; walkerContext->columnId++;
newNode = (Node *) newColumn; newNode = (Node *) newColumn;
@ -4753,23 +4754,36 @@ WorkerLimitCount(Node *limitCount, Node *limitOffset, OrderByLimitReference
if (workerLimitNode != NULL && limitOffset != NULL) if (workerLimitNode != NULL && limitOffset != NULL)
{ {
Const *workerLimitConst = (Const *) workerLimitNode; Const *workerLimitConst = (Const *) workerLimitNode;
/* Only update the worker limit if the const is not null.*/
if (!workerLimitConst->constisnull)
{
Const *workerOffsetConst = (Const *) limitOffset; Const *workerOffsetConst = (Const *) limitOffset;
int64 workerLimitCount = DatumGetInt64(workerLimitConst->constvalue); int64 workerLimitCount = DatumGetInt64(workerLimitConst->constvalue);
int64 workerOffsetCount = DatumGetInt64(workerOffsetConst->constvalue);
/* If the offset is null, it defaults to 0 when cast to int64. */
int64 workerOffsetCount = DatumGetInt64(workerOffsetConst->constvalue);
workerLimitCount = workerLimitCount + workerOffsetCount; workerLimitCount = workerLimitCount + workerOffsetCount;
workerLimitNode = (Node *) MakeIntegerConstInt64(workerLimitCount); workerLimitNode = (Node *) MakeIntegerConstInt64(workerLimitCount);
} }
}
/* display debug message on limit push down */ /* display debug message on limit push down */
if (workerLimitNode != NULL) if (workerLimitNode != NULL)
{ {
Const *workerLimitConst = (Const *) workerLimitNode; Const *workerLimitConst = (Const *) workerLimitNode;
if (!workerLimitConst->constisnull)
{
int64 workerLimitCount = DatumGetInt64(workerLimitConst->constvalue); int64 workerLimitCount = DatumGetInt64(workerLimitConst->constvalue);
ereport(DEBUG1, (errmsg("push down of limit count: " INT64_FORMAT, ereport(DEBUG1, (errmsg("push down of limit count: " INT64_FORMAT,
workerLimitCount))); workerLimitCount)));
} }
else
{
ereport(DEBUG1, (errmsg("push down of limit count: ALL")));
}
}
return workerLimitNode; return workerLimitNode;
} }

View File

@ -1170,7 +1170,8 @@ HasComplexRangeTableType(Query *queryTree)
if (rangeTableEntry->rtekind != RTE_RELATION && if (rangeTableEntry->rtekind != RTE_RELATION &&
rangeTableEntry->rtekind != RTE_SUBQUERY && rangeTableEntry->rtekind != RTE_SUBQUERY &&
rangeTableEntry->rtekind != RTE_FUNCTION && rangeTableEntry->rtekind != RTE_FUNCTION &&
rangeTableEntry->rtekind != RTE_VALUES) rangeTableEntry->rtekind != RTE_VALUES &&
!IsJsonTableRTE(rangeTableEntry))
{ {
hasComplexRangeTableType = true; hasComplexRangeTableType = true;
} }

View File

@ -2556,13 +2556,15 @@ AllShardsColocated(List *relationShardList)
/* /*
* ContainsOnlyLocalTables returns true if there is only * ContainsOnlyLocalOrReferenceTables returns true if there are no distributed
* local tables and not any distributed or reference table. * tables in the query. In other words, the query might reference only local
* tables and/or reference tables, but no fully distributed tables.
*/ */
bool bool
ContainsOnlyLocalTables(RTEListProperties *rteProperties) ContainsOnlyLocalOrReferenceTables(RTEListProperties *rteProperties)
{ {
return !rteProperties->hasDistributedTable && !rteProperties->hasReferenceTable; /* If hasDistributedTable is false, then all tables are either local or reference. */
return !rteProperties->hasDistributedTable;
} }

View File

@ -45,8 +45,6 @@
static RangeTblEntry * AnchorRte(Query *subquery); static RangeTblEntry * AnchorRte(Query *subquery);
static List * UnionRelationRestrictionLists(List *firstRelationList, static List * UnionRelationRestrictionLists(List *firstRelationList,
List *secondRelationList); List *secondRelationList);
static List * CreateFilteredTargetListForRelation(Oid relationId,
List *requiredAttributes);
static List * CreateDummyTargetList(Oid relationId, List *requiredAttributes); static List * CreateDummyTargetList(Oid relationId, List *requiredAttributes);
static TargetEntry * CreateTargetEntryForColumn(Form_pg_attribute attributeTuple, Index static TargetEntry * CreateTargetEntryForColumn(Form_pg_attribute attributeTuple, Index
rteIndex, rteIndex,
@ -378,7 +376,7 @@ CreateAllTargetListForRelation(Oid relationId, List *requiredAttributes)
* only the required columns of the given relation. If there is not required * only the required columns of the given relation. If there is not required
* columns then a dummy NULL column is put as the only entry. * columns then a dummy NULL column is put as the only entry.
*/ */
static List * List *
CreateFilteredTargetListForRelation(Oid relationId, List *requiredAttributes) CreateFilteredTargetListForRelation(Oid relationId, List *requiredAttributes)
{ {
Relation relation = relation_open(relationId, AccessShareLock); Relation relation = relation_open(relationId, AccessShareLock);

View File

@ -61,7 +61,8 @@ typedef enum RecurringTuplesType
RECURRING_TUPLES_FUNCTION, RECURRING_TUPLES_FUNCTION,
RECURRING_TUPLES_EMPTY_JOIN_TREE, RECURRING_TUPLES_EMPTY_JOIN_TREE,
RECURRING_TUPLES_RESULT_FUNCTION, RECURRING_TUPLES_RESULT_FUNCTION,
RECURRING_TUPLES_VALUES RECURRING_TUPLES_VALUES,
RECURRING_TUPLES_JSON_TABLE
} RecurringTuplesType; } RecurringTuplesType;
/* /*
@ -347,7 +348,8 @@ IsFunctionOrValuesRTE(Node *node)
RangeTblEntry *rangeTblEntry = (RangeTblEntry *) node; RangeTblEntry *rangeTblEntry = (RangeTblEntry *) node;
if (rangeTblEntry->rtekind == RTE_FUNCTION || if (rangeTblEntry->rtekind == RTE_FUNCTION ||
rangeTblEntry->rtekind == RTE_VALUES) rangeTblEntry->rtekind == RTE_VALUES ||
IsJsonTableRTE(rangeTblEntry))
{ {
return true; return true;
} }
@ -700,6 +702,13 @@ DeferErrorIfFromClauseRecurs(Query *queryTree)
"the FROM clause contains VALUES", NULL, "the FROM clause contains VALUES", NULL,
NULL); NULL);
} }
else if (recurType == RECURRING_TUPLES_JSON_TABLE)
{
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
"correlated subqueries are not supported when "
"the FROM clause contains JSON_TABLE", NULL,
NULL);
}
/* /*
@ -1204,7 +1213,8 @@ DeferErrorIfUnsupportedTableCombination(Query *queryTree)
*/ */
if (rangeTableEntry->rtekind == RTE_RELATION || if (rangeTableEntry->rtekind == RTE_RELATION ||
rangeTableEntry->rtekind == RTE_SUBQUERY || rangeTableEntry->rtekind == RTE_SUBQUERY ||
rangeTableEntry->rtekind == RTE_RESULT) rangeTableEntry->rtekind == RTE_RESULT ||
IsJsonTableRTE(rangeTableEntry))
{ {
/* accepted */ /* accepted */
} }
@ -1372,6 +1382,13 @@ DeferErrorIfUnsupportedUnionQuery(Query *subqueryTree)
"VALUES is not supported within a " "VALUES is not supported within a "
"UNION", NULL); "UNION", NULL);
} }
else if (recurType == RECURRING_TUPLES_JSON_TABLE)
{
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
"cannot push down this subquery",
"JSON_TABLE is not supported within a "
"UNION", NULL);
}
return NULL; return NULL;
} }
@ -1477,6 +1494,11 @@ RecurringTypeDescription(RecurringTuplesType recurType)
return "a VALUES clause"; return "a VALUES clause";
} }
case RECURRING_TUPLES_JSON_TABLE:
{
return "a JSON_TABLE";
}
case RECURRING_TUPLES_INVALID: case RECURRING_TUPLES_INVALID:
{ {
/* /*
@ -1673,7 +1695,8 @@ DeferredErrorIfUnsupportedLateralSubquery(PlannerInfo *plannerInfo,
* strings anyway. * strings anyway.
*/ */
if (recurType != RECURRING_TUPLES_VALUES && if (recurType != RECURRING_TUPLES_VALUES &&
recurType != RECURRING_TUPLES_RESULT_FUNCTION) recurType != RECURRING_TUPLES_RESULT_FUNCTION &&
recurType != RECURRING_TUPLES_JSON_TABLE)
{ {
recurTypeDescription = psprintf("%s (%s)", recurTypeDescription, recurTypeDescription = psprintf("%s (%s)", recurTypeDescription,
recurringRangeTableEntry->eref-> recurringRangeTableEntry->eref->
@ -1750,6 +1773,26 @@ ContainsRecurringRangeTable(List *rangeTable, RecurringTuplesType *recurType)
} }
/*
* IsJsonTableRTE checks whether the RTE refers to a JSON_TABLE
* table function, which was introduced in PostgreSQL 17.
*/
bool
IsJsonTableRTE(RangeTblEntry *rte)
{
#if PG_VERSION_NUM >= PG_VERSION_17
if (rte == NULL)
{
return false;
}
return (rte->rtekind == RTE_TABLEFUNC &&
rte->tablefunc->functype == TFT_JSON_TABLE);
#endif
return false;
}
/* /*
* HasRecurringTuples returns whether any part of the expression will generate * HasRecurringTuples returns whether any part of the expression will generate
* the same set of tuples in every query on shards when executing a distributed * the same set of tuples in every query on shards when executing a distributed
@ -1811,6 +1854,11 @@ HasRecurringTuples(Node *node, RecurringTuplesType *recurType)
*recurType = RECURRING_TUPLES_VALUES; *recurType = RECURRING_TUPLES_VALUES;
return true; return true;
} }
else if (IsJsonTableRTE(rangeTableEntry))
{
*recurType = RECURRING_TUPLES_JSON_TABLE;
return true;
}
return false; return false;
} }
@ -2049,6 +2097,16 @@ CreateSubqueryTargetListAndAdjustVars(List *columnList)
*/ */
column->varno = 1; column->varno = 1;
column->varattno = resNo; column->varattno = resNo;
/*
* 1 subquery means there is one range table entry so with Postgres 16+ we need
* to ensure that column's varnullingrels - the set of join rels that can null
* the var - is empty. Otherwise, when given the query, the Postgres planner
* may attempt to access a non-existent range table and segfault, as in #7787.
*/
#if PG_VERSION_NUM >= PG_VERSION_16
column->varnullingrels = NULL;
#endif
} }
return subqueryTargetEntryList; return subqueryTargetEntryList;

View File

@ -2291,6 +2291,129 @@ BuildReadIntermediateResultsArrayQuery(List *targetEntryList,
} }
/*
* For the given target list, build an empty relation with the same target list.
* For example, if the target list is (a, b, c), and resultId is "empty", then
* it returns a Query object for this SQL:
* SELECT a, b, c FROM (VALUES (NULL, NULL, NULL)) AS empty(a, b, c) WHERE false;
*/
Query *
BuildEmptyResultQuery(List *targetEntryList, char *resultId)
{
List *targetList = NIL;
ListCell *targetEntryCell = NULL;
List *colTypes = NIL;
List *colTypMods = NIL;
List *colCollations = NIL;
List *colNames = NIL;
List *valueConsts = NIL;
List *valueTargetList = NIL;
List *valueColNames = NIL;
int targetIndex = 1;
/* build the target list and column lists needed */
foreach(targetEntryCell, targetEntryList)
{
TargetEntry *targetEntry = (TargetEntry *) lfirst(targetEntryCell);
Node *targetExpr = (Node *) targetEntry->expr;
char *columnName = targetEntry->resname;
Oid columnType = exprType(targetExpr);
Oid columnTypMod = exprTypmod(targetExpr);
Oid columnCollation = exprCollation(targetExpr);
if (targetEntry->resjunk)
{
continue;
}
Var *tgtVar = makeVar(1, targetIndex, columnType, columnTypMod, columnCollation,
0);
TargetEntry *tgtEntry = makeTargetEntry((Expr *) tgtVar, targetIndex, columnName,
false);
Const *valueConst = makeConst(columnType, columnTypMod, columnCollation, 0,
(Datum) 0, true, false);
StringInfoData *columnString = makeStringInfo();
appendStringInfo(columnString, "column%d", targetIndex);
TargetEntry *valueTgtEntry = makeTargetEntry((Expr *) tgtVar, targetIndex,
columnString->data, false);
valueConsts = lappend(valueConsts, valueConst);
valueTargetList = lappend(valueTargetList, valueTgtEntry);
valueColNames = lappend(valueColNames, makeString(columnString->data));
colNames = lappend(colNames, makeString(columnName));
colTypes = lappend_oid(colTypes, columnType);
colTypMods = lappend_oid(colTypMods, columnTypMod);
colCollations = lappend_oid(colCollations, columnCollation);
targetList = lappend(targetList, tgtEntry);
targetIndex++;
}
/* Build a RangeTable Entry for the VALUES relation */
RangeTblEntry *valuesRangeTable = makeNode(RangeTblEntry);
valuesRangeTable->rtekind = RTE_VALUES;
valuesRangeTable->values_lists = list_make1(valueConsts);
valuesRangeTable->colcollations = colCollations;
valuesRangeTable->coltypes = colTypes;
valuesRangeTable->coltypmods = colTypMods;
valuesRangeTable->alias = NULL;
valuesRangeTable->eref = makeAlias("*VALUES*", valueColNames);
valuesRangeTable->inFromCl = true;
RangeTblRef *valuesRTRef = makeNode(RangeTblRef);
valuesRTRef->rtindex = 1;
FromExpr *valuesJoinTree = makeNode(FromExpr);
valuesJoinTree->fromlist = list_make1(valuesRTRef);
/* build the VALUES query */
Query *valuesQuery = makeNode(Query);
valuesQuery->canSetTag = true;
valuesQuery->commandType = CMD_SELECT;
valuesQuery->rtable = list_make1(valuesRangeTable);
#if PG_VERSION_NUM >= PG_VERSION_16
valuesQuery->rteperminfos = NIL;
#endif
valuesQuery->jointree = valuesJoinTree;
valuesQuery->targetList = valueTargetList;
/* build the relation selecting from the VALUES */
RangeTblEntry *emptyRangeTable = makeNode(RangeTblEntry);
emptyRangeTable->rtekind = RTE_SUBQUERY;
emptyRangeTable->subquery = valuesQuery;
emptyRangeTable->alias = makeAlias(resultId, colNames);
emptyRangeTable->eref = emptyRangeTable->alias;
emptyRangeTable->inFromCl = true;
/* build the SELECT query */
Query *resultQuery = makeNode(Query);
resultQuery->commandType = CMD_SELECT;
resultQuery->canSetTag = true;
resultQuery->rtable = list_make1(emptyRangeTable);
#if PG_VERSION_NUM >= PG_VERSION_16
resultQuery->rteperminfos = NIL;
#endif
RangeTblRef *rangeTableRef = makeNode(RangeTblRef);
rangeTableRef->rtindex = 1;
/* insert a FALSE qual to ensure 0 rows returned */
FromExpr *joinTree = makeNode(FromExpr);
joinTree->fromlist = list_make1(rangeTableRef);
joinTree->quals = makeBoolConst(false, false);
resultQuery->jointree = joinTree;
resultQuery->targetList = targetList;
return resultQuery;
}
/* /*
* BuildReadIntermediateResultsQuery is the common code for generating * BuildReadIntermediateResultsQuery is the common code for generating
* queries to read from result files. It is used by * queries to read from result files. It is used by

View File

@ -1143,7 +1143,7 @@ ConflictWithIsolationTestingBeforeCopy(void)
const bool sessionLock = false; const bool sessionLock = false;
const bool dontWait = false; const bool dontWait = false;
if (RunningUnderIsolationTest) if (RunningUnderCitusTestSuite)
{ {
SET_LOCKTAG_ADVISORY(tag, MyDatabaseId, SET_LOCKTAG_ADVISORY(tag, MyDatabaseId,
SHARD_MOVE_ADVISORY_LOCK_SECOND_KEY, SHARD_MOVE_ADVISORY_LOCK_SECOND_KEY,
@ -1177,7 +1177,7 @@ ConflictWithIsolationTestingAfterCopy(void)
const bool sessionLock = false; const bool sessionLock = false;
const bool dontWait = false; const bool dontWait = false;
if (RunningUnderIsolationTest) if (RunningUnderCitusTestSuite)
{ {
SET_LOCKTAG_ADVISORY(tag, MyDatabaseId, SET_LOCKTAG_ADVISORY(tag, MyDatabaseId,
SHARD_MOVE_ADVISORY_LOCK_FIRST_KEY, SHARD_MOVE_ADVISORY_LOCK_FIRST_KEY,
@ -1882,14 +1882,15 @@ WaitForGroupedLogicalRepTargetsToCatchUp(XLogRecPtr sourcePosition,
GetCurrentTimestamp(), GetCurrentTimestamp(),
logicalReplicationProgressReportTimeout)) logicalReplicationProgressReportTimeout))
{ {
ereport(LOG, (errmsg( ereport(LOG, (errmsg("The LSN of the target subscriptions on node %s:%d "
"The LSN of the target subscriptions on node %s:%d have " "has increased from %X/%X to %X/%X at %s where the "
"increased from %ld to %ld at %s where the source LSN is %ld ", "source LSN is %X/%X ",
superuserConnection->hostname, superuserConnection->hostname,
superuserConnection->port, previousTargetBeforeThisLoop, superuserConnection->port,
targetPosition, LSN_FORMAT_ARGS(previousTargetBeforeThisLoop),
LSN_FORMAT_ARGS(targetPosition),
timestamptz_to_str(previousLSNIncrementTime), timestamptz_to_str(previousLSNIncrementTime),
sourcePosition))); LSN_FORMAT_ARGS(sourcePosition))));
previousReportTime = GetCurrentTimestamp(); previousReportTime = GetCurrentTimestamp();
} }

View File

@ -94,6 +94,42 @@ replication_origin_filter_cb(LogicalDecodingContext *ctx, RepOriginId origin_id)
} }
/*
* update_replication_progress is copied from Postgres 15. We use it to send keepalive
* messages when we are filtering out the wal changes resulting from the initial copy.
* If we do not send out messages long enough, wal reciever will time out.
* Postgres 16 has refactored this code such that keepalive messages are sent during
* reordering phase which is above change_cb. So we do not need to send keepalive in
* change_cb.
*/
#if (PG_VERSION_NUM < PG_VERSION_16)
static void
update_replication_progress(LogicalDecodingContext *ctx, bool skipped_xact)
{
static int changes_count = 0;
/*
* We don't want to try sending a keepalive message after processing each
* change as that can have overhead. Tests revealed that there is no
* noticeable overhead in doing it after continuously processing 100 or so
* changes.
*/
#define CHANGES_THRESHOLD 100
/*
* After continuously processing CHANGES_THRESHOLD changes, we
* try to send a keepalive message if required.
*/
if (ctx->end_xact || ++changes_count >= CHANGES_THRESHOLD)
{
OutputPluginUpdateProgress(ctx, skipped_xact);
changes_count = 0;
}
}
#endif
/* /*
* shard_split_change_cb function emits the incoming tuple change * shard_split_change_cb function emits the incoming tuple change
* to the appropriate destination shard. * to the appropriate destination shard.
@ -112,6 +148,12 @@ shard_split_change_cb(LogicalDecodingContext *ctx, ReorderBufferTXN *txn,
return; return;
} }
#if (PG_VERSION_NUM < PG_VERSION_16)
/* Send replication keepalive. */
update_replication_progress(ctx, false);
#endif
/* check if the relation is publishable.*/ /* check if the relation is publishable.*/
if (!is_publishable_relation(relation)) if (!is_publishable_relation(relation))
{ {

View File

@ -27,6 +27,7 @@
#include "catalog/pg_extension.h" #include "catalog/pg_extension.h"
#include "commands/explain.h" #include "commands/explain.h"
#include "commands/extension.h" #include "commands/extension.h"
#include "commands/seclabel.h"
#include "common/string.h" #include "common/string.h"
#include "executor/executor.h" #include "executor/executor.h"
#include "libpq/auth.h" #include "libpq/auth.h"
@ -173,15 +174,11 @@ static bool FinishedStartupCitusBackend = false;
static object_access_hook_type PrevObjectAccessHook = NULL; static object_access_hook_type PrevObjectAccessHook = NULL;
#if PG_VERSION_NUM >= PG_VERSION_15
static shmem_request_hook_type prev_shmem_request_hook = NULL; static shmem_request_hook_type prev_shmem_request_hook = NULL;
#endif
void _PG_init(void); void _PG_init(void);
#if PG_VERSION_NUM >= PG_VERSION_15
static void citus_shmem_request(void); static void citus_shmem_request(void);
#endif
static void CitusObjectAccessHook(ObjectAccessType access, Oid classId, Oid objectId, int static void CitusObjectAccessHook(ObjectAccessType access, Oid classId, Oid objectId, int
subId, void *arg); subId, void *arg);
static void DoInitialCleanup(void); static void DoInitialCleanup(void);
@ -474,10 +471,8 @@ _PG_init(void)
original_client_auth_hook = ClientAuthentication_hook; original_client_auth_hook = ClientAuthentication_hook;
ClientAuthentication_hook = CitusAuthHook; ClientAuthentication_hook = CitusAuthHook;
#if PG_VERSION_NUM >= PG_VERSION_15
prev_shmem_request_hook = shmem_request_hook; prev_shmem_request_hook = shmem_request_hook;
shmem_request_hook = citus_shmem_request; shmem_request_hook = citus_shmem_request;
#endif
InitializeMaintenanceDaemon(); InitializeMaintenanceDaemon();
@ -572,6 +567,16 @@ _PG_init(void)
INIT_COLUMNAR_SYMBOL(PGFunction, columnar_storage_info); INIT_COLUMNAR_SYMBOL(PGFunction, columnar_storage_info);
INIT_COLUMNAR_SYMBOL(PGFunction, columnar_store_memory_stats); INIT_COLUMNAR_SYMBOL(PGFunction, columnar_store_memory_stats);
INIT_COLUMNAR_SYMBOL(PGFunction, test_columnar_storage_write_new_page); INIT_COLUMNAR_SYMBOL(PGFunction, test_columnar_storage_write_new_page);
/*
* This part is only for SECURITY LABEL tests
* mimicking what an actual security label provider would do
*/
if (RunningUnderCitusTestSuite)
{
register_label_provider("citus '!tests_label_provider",
citus_test_object_relabel);
}
} }
@ -591,8 +596,6 @@ AdjustDynamicLibraryPathForCdcDecoders(void)
} }
#if PG_VERSION_NUM >= PG_VERSION_15
/* /*
* Requests any additional shared memory required for citus. * Requests any additional shared memory required for citus.
*/ */
@ -613,9 +616,6 @@ citus_shmem_request(void)
} }
#endif
/* /*
* DoInitialCleanup does cleanup at start time. * DoInitialCleanup does cleanup at start time.
* Currently it: * Currently it:
@ -2293,13 +2293,14 @@ RegisterCitusConfigVariables(void)
WarnIfReplicationModelIsSet, NULL, NULL); WarnIfReplicationModelIsSet, NULL, NULL);
DefineCustomBoolVariable( DefineCustomBoolVariable(
"citus.running_under_isolation_test", "citus.running_under_citus_test_suite",
gettext_noop( gettext_noop(
"Only useful for testing purposes, when set to true, Citus does some " "Only useful for testing purposes, when set to true, Citus does some "
"tricks to implement useful isolation tests with rebalancing. Should " "tricks to implement useful isolation tests with rebalancing. It also "
"registers a dummy label provider for SECURITY LABEL tests. Should "
"never be set to true on production systems "), "never be set to true on production systems "),
gettext_noop("for details of the tricks implemented, refer to the source code"), gettext_noop("for details of the tricks implemented, refer to the source code"),
&RunningUnderIsolationTest, &RunningUnderCitusTestSuite,
false, false,
PGC_SUSET, PGC_SUSET,
GUC_SUPERUSER_ONLY | GUC_NO_SHOW_ALL | GUC_NOT_IN_SAMPLE, GUC_SUPERUSER_ONLY | GUC_NO_SHOW_ALL | GUC_NOT_IN_SAMPLE,
@ -2853,14 +2854,27 @@ ApplicationNameAssignHook(const char *newval, void *extra)
DetermineCitusBackendType(newval); DetermineCitusBackendType(newval);
/* /*
* AssignGlobalPID might read from catalog tables to get the the local * We use StartupCitusBackend to initialize the global pid after catalogs
* nodeid. But ApplicationNameAssignHook might be called before catalog * are available. After that happens this hook becomes responsible to update
* access is available to the backend (such as in early stages of * the global pid on later application_name changes. So we set the
* authentication). We use StartupCitusBackend to initialize the global pid * FinishedStartupCitusBackend flag in StartupCitusBackend to indicate when
* after catalogs are available. After that happens this hook becomes * this responsibility handoff has happened.
* responsible to update the global pid on later application_name changes. *
* So we set the FinishedStartupCitusBackend flag in StartupCitusBackend to * Also note that when application_name changes, we don't actually need to
* indicate when this responsibility handoff has happened. * try re-assigning the global pid for external client backends and
* background workers because application_name doesn't affect the global
* pid for such backends - note that !IsExternalClientBackend() check covers
* both types of backends. Plus,
* trying to re-assign the global pid for such backends would unnecessarily
* cause performing a catalog access when the cached local node id is
* invalidated. However, accessing to the catalog tables is dangerous in
* certain situations like when we're not in a transaction block. And for
* the other types of backends, i.e., the Citus internal backends, we need
* to re-assign the global pid when the application_name changes because for
* such backends we simply extract the global pid inherited from the
* originating backend from the application_name -that's specified by
* originating backend when openning that connection- and this doesn't require
* catalog access.
* *
* Another solution to the catalog table acccess problem would be to update * Another solution to the catalog table acccess problem would be to update
* global pid lazily, like we do for HideShards. But that's not possible * global pid lazily, like we do for HideShards. But that's not possible
@ -2870,7 +2884,7 @@ ApplicationNameAssignHook(const char *newval, void *extra)
* as reasonably possible, which is also why we extract global pids in the * as reasonably possible, which is also why we extract global pids in the
* AuthHook already (extracting doesn't require catalog access). * AuthHook already (extracting doesn't require catalog access).
*/ */
if (FinishedStartupCitusBackend) if (FinishedStartupCitusBackend && !IsExternalClientBackend())
{ {
AssignGlobalPID(newval); AssignGlobalPID(newval);
} }
@ -2905,6 +2919,9 @@ NodeConninfoGucCheckHook(char **newval, void **extra, GucSource source)
"sslcrl", "sslcrl",
"sslkey", "sslkey",
"sslmode", "sslmode",
#if PG_VERSION_NUM >= PG_VERSION_17
"sslnegotiation",
#endif
"sslrootcert", "sslrootcert",
"tcp_user_timeout", "tcp_user_timeout",
}; };

View File

@ -2,3 +2,4 @@
-- bump version to 13.0-1 -- bump version to 13.0-1
#include "udfs/citus_prepare_pg_upgrade/13.0-1.sql" #include "udfs/citus_prepare_pg_upgrade/13.0-1.sql"
#include "udfs/create_time_partitions/13.0-1.sql"

View File

@ -1,2 +1,4 @@
-- citus--13.0-1--12.1-1 -- citus--13.0-1--12.1-1
-- this is an empty downgrade path since citus--12.1-1--13.0-1.sql is empty -- this is an empty downgrade path since citus--12.1-1--13.0-1.sql is empty
#include "../udfs/create_time_partitions/10.2-1.sql"

View File

@ -0,0 +1,58 @@
CREATE OR REPLACE FUNCTION pg_catalog.create_time_partitions(
table_name regclass,
partition_interval INTERVAL,
end_at timestamptz,
start_from timestamptz DEFAULT now())
returns boolean
LANGUAGE plpgsql
AS $$
DECLARE
-- partitioned table name
schema_name_text name;
table_name_text name;
-- record for to-be-created partition
missing_partition_record record;
-- result indiciates whether any partitions were created
partition_created bool := false;
BEGIN
IF start_from >= end_at THEN
RAISE 'start_from (%) must be older than end_at (%)', start_from, end_at;
END IF;
IF NOT isfinite(partition_interval) THEN
RAISE 'Partition interval must be a finite value';
END IF;
SELECT nspname, relname
INTO schema_name_text, table_name_text
FROM pg_class JOIN pg_namespace ON pg_class.relnamespace = pg_namespace.oid
WHERE pg_class.oid = table_name::oid;
-- Get missing partition range info using the get_missing_partition_ranges
-- and create partitions using that info.
FOR missing_partition_record IN
SELECT *
FROM get_missing_time_partition_ranges(table_name, partition_interval, end_at, start_from)
LOOP
EXECUTE format('CREATE TABLE %I.%I PARTITION OF %I.%I FOR VALUES FROM (%L) TO (%L)',
schema_name_text,
missing_partition_record.partition_name,
schema_name_text,
table_name_text,
missing_partition_record.range_from_value,
missing_partition_record.range_to_value);
partition_created := true;
END LOOP;
RETURN partition_created;
END;
$$;
COMMENT ON FUNCTION pg_catalog.create_time_partitions(
table_name regclass,
partition_interval INTERVAL,
end_at timestamptz,
start_from timestamptz)
IS 'create time partitions for the given range';

View File

@ -21,6 +21,10 @@ BEGIN
RAISE 'start_from (%) must be older than end_at (%)', start_from, end_at; RAISE 'start_from (%) must be older than end_at (%)', start_from, end_at;
END IF; END IF;
IF NOT isfinite(partition_interval) THEN
RAISE 'Partition interval must be a finite value';
END IF;
SELECT nspname, relname SELECT nspname, relname
INTO schema_name_text, table_name_text INTO schema_name_text, table_name_text
FROM pg_class JOIN pg_namespace ON pg_class.relnamespace = pg_namespace.oid FROM pg_class JOIN pg_namespace ON pg_class.relnamespace = pg_namespace.oid

View File

@ -310,7 +310,7 @@ fake_relation_set_new_filenode(Relation rel,
*/ */
*minmulti = GetOldestMultiXactId(); *minmulti = GetOldestMultiXactId();
SMgrRelation srel = RelationCreateStorage_compat(*newrnode, persistence, true); SMgrRelation srel = RelationCreateStorage(*newrnode, persistence, true);
/* /*
* If required, set up an init fork for an unlogged table so that it can * If required, set up an init fork for an unlogged table so that it can

View File

@ -50,6 +50,13 @@ activate_node_snapshot(PG_FUNCTION_ARGS)
* so we are using first primary worker node just for test purposes. * so we are using first primary worker node just for test purposes.
*/ */
WorkerNode *dummyWorkerNode = GetFirstPrimaryWorkerNode(); WorkerNode *dummyWorkerNode = GetFirstPrimaryWorkerNode();
if (dummyWorkerNode == NULL)
{
ereport(ERROR, (errmsg("no worker nodes found"),
errdetail("Function activate_node_snapshot is meant to be "
"used when running tests on a multi-node cluster "
"with workers.")));
}
/* /*
* Create MetadataSyncContext which is used throughout nodes' activation. * Create MetadataSyncContext which is used throughout nodes' activation.

View File

@ -190,6 +190,9 @@ run_commands_on_session_level_connection_to_node(PG_FUNCTION_ARGS)
/* /*
* override_backend_data_gpid is a wrapper around SetBackendDataGpid(). * override_backend_data_gpid is a wrapper around SetBackendDataGpid().
* Also sets distributedCommandOriginator to true since the only caller of
* this method calls this function actually wants this backend to
* be treated as a distributed command originator with the given global pid.
*/ */
Datum Datum
override_backend_data_gpid(PG_FUNCTION_ARGS) override_backend_data_gpid(PG_FUNCTION_ARGS)
@ -199,6 +202,7 @@ override_backend_data_gpid(PG_FUNCTION_ARGS)
uint64 gpid = PG_GETARG_INT64(0); uint64 gpid = PG_GETARG_INT64(0);
SetBackendDataGlobalPID(gpid); SetBackendDataGlobalPID(gpid);
SetBackendDataDistributedCommandOriginator(true);
PG_RETURN_VOID(); PG_RETURN_VOID();
} }

View File

@ -49,13 +49,8 @@ makeIntConst(int val, int location)
{ {
A_Const *n = makeNode(A_Const); A_Const *n = makeNode(A_Const);
#if PG_VERSION_NUM >= PG_VERSION_15
n->val.ival.type = T_Integer; n->val.ival.type = T_Integer;
n->val.ival.ival = val; n->val.ival.ival = val;
#else
n->val.type = T_Integer;
n->val.val.ival = val;
#endif
n->location = location; n->location = location;
return (Node *) n; return (Node *) n;

View File

@ -395,7 +395,7 @@ StoreAllActiveTransactions(Tuplestorestate *tupleStore, TupleDesc tupleDescripto
bool showCurrentBackendDetails = showAllBackends; bool showCurrentBackendDetails = showAllBackends;
BackendData *currentBackend = BackendData *currentBackend =
&backendManagementShmemData->backends[backendIndex]; &backendManagementShmemData->backends[backendIndex];
PGPROC *currentProc = &ProcGlobal->allProcs[backendIndex]; PGPROC *currentProc = GetPGProcByNumber(backendIndex);
/* to work on data after releasing g spinlock to protect against errors */ /* to work on data after releasing g spinlock to protect against errors */
uint64 transactionNumber = 0; uint64 transactionNumber = 0;
@ -420,7 +420,7 @@ StoreAllActiveTransactions(Tuplestorestate *tupleStore, TupleDesc tupleDescripto
} }
Oid databaseId = currentBackend->databaseId; Oid databaseId = currentBackend->databaseId;
int backendPid = ProcGlobal->allProcs[backendIndex].pid; int backendPid = GetPGProcByNumber(backendIndex)->pid;
/* /*
* We prefer to use worker_query instead of distributedCommandOriginator in * We prefer to use worker_query instead of distributedCommandOriginator in
@ -519,15 +519,6 @@ UserHasPermissionToViewStatsOf(Oid currentUserId, Oid backendOwnedId)
void void
InitializeBackendManagement(void) InitializeBackendManagement(void)
{ {
/* on PG 15, we use shmem_request_hook_type */
#if PG_VERSION_NUM < PG_VERSION_15
/* allocate shared memory */
if (!IsUnderPostmaster)
{
RequestAddinShmemSpace(BackendManagementShmemSize());
}
#endif
prev_shmem_startup_hook = shmem_startup_hook; prev_shmem_startup_hook = shmem_startup_hook;
shmem_startup_hook = BackendManagementShmemInit; shmem_startup_hook = BackendManagementShmemInit;
} }
@ -855,6 +846,16 @@ GetCurrentDistributedTransactionId(void)
void void
AssignDistributedTransactionId(void) AssignDistributedTransactionId(void)
{ {
/*
* MyBackendData should always be available. However, we observed some
* crashes where certain hooks were not executed.
* Bug 3697586: Server crashes when assigning distributed transaction
*/
if (!MyBackendData)
{
ereport(ERROR, (errmsg("backend is not ready for distributed transactions")));
}
pg_atomic_uint64 *transactionNumberSequence = pg_atomic_uint64 *transactionNumberSequence =
&backendManagementShmemData->nextTransactionNumber; &backendManagementShmemData->nextTransactionNumber;
@ -964,6 +965,23 @@ SetBackendDataGlobalPID(uint64 gpid)
} }
/*
* SetBackendDataDistributedCommandOriginator sets the distributedCommandOriginator
* field on MyBackendData.
*/
void
SetBackendDataDistributedCommandOriginator(bool distributedCommandOriginator)
{
if (!MyBackendData)
{
return;
}
SpinLockAcquire(&MyBackendData->mutex);
MyBackendData->distributedCommandOriginator = distributedCommandOriginator;
SpinLockRelease(&MyBackendData->mutex);
}
/* /*
* GetGlobalPID returns the global process id of the current backend. * GetGlobalPID returns the global process id of the current backend.
*/ */
@ -1280,7 +1298,7 @@ ActiveDistributedTransactionNumbers(void)
/* build list of starting procs */ /* build list of starting procs */
for (int curBackend = 0; curBackend < MaxBackends; curBackend++) for (int curBackend = 0; curBackend < MaxBackends; curBackend++)
{ {
PGPROC *currentProc = &ProcGlobal->allProcs[curBackend]; PGPROC *currentProc = GetPGProcByNumber(curBackend);
BackendData currentBackendData; BackendData currentBackendData;
if (currentProc->pid == 0) if (currentProc->pid == 0)

View File

@ -375,7 +375,7 @@ AssociateDistributedTransactionWithBackendProc(TransactionNode *transactionNode)
for (int backendIndex = 0; backendIndex < MaxBackends; ++backendIndex) for (int backendIndex = 0; backendIndex < MaxBackends; ++backendIndex)
{ {
PGPROC *currentProc = &ProcGlobal->allProcs[backendIndex]; PGPROC *currentProc = GetPGProcByNumber(backendIndex);
BackendData currentBackendData; BackendData currentBackendData;
/* we're not interested in processes that are not active or waiting on a lock */ /* we're not interested in processes that are not active or waiting on a lock */

View File

@ -561,7 +561,7 @@ BuildLocalWaitGraph(bool onlyDistributedTx)
/* build list of starting procs */ /* build list of starting procs */
for (int curBackend = 0; curBackend < totalProcs; curBackend++) for (int curBackend = 0; curBackend < totalProcs; curBackend++)
{ {
PGPROC *currentProc = &ProcGlobal->allProcs[curBackend]; PGPROC *currentProc = GetPGProcByNumber(curBackend);
BackendData currentBackendData; BackendData currentBackendData;
if (currentProc->pid == 0) if (currentProc->pid == 0)

View File

@ -53,7 +53,8 @@ PG_FUNCTION_INFO_V1(recover_prepared_transactions);
/* Local functions forward declarations */ /* Local functions forward declarations */
static int RecoverWorkerTransactions(WorkerNode *workerNode); static int RecoverWorkerTransactions(WorkerNode *workerNode,
MultiConnection *connection);
static List * PendingWorkerTransactionList(MultiConnection *connection); static List * PendingWorkerTransactionList(MultiConnection *connection);
static bool IsTransactionInProgress(HTAB *activeTransactionNumberSet, static bool IsTransactionInProgress(HTAB *activeTransactionNumberSet,
char *preparedTransactionName); char *preparedTransactionName);
@ -123,10 +124,51 @@ RecoverTwoPhaseCommits(void)
LockTransactionRecovery(ShareUpdateExclusiveLock); LockTransactionRecovery(ShareUpdateExclusiveLock);
List *workerList = ActivePrimaryNodeList(NoLock); List *workerList = ActivePrimaryNodeList(NoLock);
List *workerConnections = NIL;
WorkerNode *workerNode = NULL; WorkerNode *workerNode = NULL;
MultiConnection *connection = NULL;
/*
* Pre-establish all connections to worker nodes.
*
* We do this to enforce a consistent lock acquisition order and prevent deadlocks.
* Currently, during extension updates, we take strong locks on the Citus
* catalog tables in a specific order: first on pg_dist_authinfo, then on
* pg_dist_transaction. It's critical that any operation locking these two
* tables adheres to this order, or a deadlock could occur.
*
* Note that RecoverWorkerTransactions() retains its lock until the end
* of the transaction, while GetNodeConnection() releases its lock after
* the catalog lookup. So when there are multiple workers in the active primary
* node list, the lock acquisition order may reverse in subsequent iterations
* of the loop calling RecoverWorkerTransactions(), increasing the risk
* of deadlock.
*
* By establishing all worker connections upfront, we ensure that
* RecoverWorkerTransactions() deals with a single distributed catalog table,
* thereby preventing deadlocks regardless of the lock acquisition sequence
* used in the upgrade extension script.
*/
foreach_declared_ptr(workerNode, workerList) foreach_declared_ptr(workerNode, workerList)
{ {
recoveredTransactionCount += RecoverWorkerTransactions(workerNode); int connectionFlags = 0;
char *nodeName = workerNode->workerName;
int nodePort = workerNode->workerPort;
connection = GetNodeConnection(connectionFlags, nodeName, nodePort);
Assert(connection != NULL);
/*
* We don't verify connection validity here.
* Instead, RecoverWorkerTransactions() performs the necessary
* sanity checks on the connection state.
*/
workerConnections = lappend(workerConnections, connection);
}
forboth_ptr(workerNode, workerList, connection, workerConnections)
{
recoveredTransactionCount += RecoverWorkerTransactions(workerNode, connection);
} }
return recoveredTransactionCount; return recoveredTransactionCount;
@ -138,7 +180,7 @@ RecoverTwoPhaseCommits(void)
* started by this node on the specified worker. * started by this node on the specified worker.
*/ */
static int static int
RecoverWorkerTransactions(WorkerNode *workerNode) RecoverWorkerTransactions(WorkerNode *workerNode, MultiConnection *connection)
{ {
int recoveredTransactionCount = 0; int recoveredTransactionCount = 0;
@ -156,8 +198,7 @@ RecoverWorkerTransactions(WorkerNode *workerNode)
bool recoveryFailed = false; bool recoveryFailed = false;
int connectionFlags = 0; Assert(connection != NULL);
MultiConnection *connection = GetNodeConnection(connectionFlags, nodeName, nodePort);
if (connection->pgConn == NULL || PQstatus(connection->pgConn) != CONNECTION_OK) if (connection->pgConn == NULL || PQstatus(connection->pgConn) != CONNECTION_OK)
{ {
ereport(WARNING, (errmsg("transaction recovery cannot connect to %s:%d", ereport(WARNING, (errmsg("transaction recovery cannot connect to %s:%d",

View File

@ -1393,87 +1393,6 @@ CalculateBackoffDelay(int retryCount)
} }
#if PG_VERSION_NUM < PG_VERSION_15
static const char *
error_severity(int elevel)
{
const char *prefix;
switch (elevel)
{
case DEBUG1:
case DEBUG2:
case DEBUG3:
case DEBUG4:
case DEBUG5:
{
prefix = gettext_noop("DEBUG");
break;
}
case LOG:
case LOG_SERVER_ONLY:
{
prefix = gettext_noop("LOG");
break;
}
case INFO:
{
prefix = gettext_noop("INFO");
break;
}
case NOTICE:
{
prefix = gettext_noop("NOTICE");
break;
}
case WARNING:
{
prefix = gettext_noop("WARNING");
break;
}
case WARNING_CLIENT_ONLY:
{
prefix = gettext_noop("WARNING");
break;
}
case ERROR:
{
prefix = gettext_noop("ERROR");
break;
}
case FATAL:
{
prefix = gettext_noop("FATAL");
break;
}
case PANIC:
{
prefix = gettext_noop("PANIC");
break;
}
default:
{
prefix = "???";
break;
}
}
return prefix;
}
#endif
/* /*
* bgw_generate_returned_message - * bgw_generate_returned_message -
* generates the message to be inserted into the job_run_details table * generates the message to be inserted into the job_run_details table

View File

@ -15,6 +15,7 @@
#include "unistd.h" #include "unistd.h"
#include "access/hash.h" #include "access/hash.h"
#include "common/pg_prng.h"
#include "executor/execdesc.h" #include "executor/execdesc.h"
#include "storage/ipc.h" #include "storage/ipc.h"
#include "storage/lwlock.h" #include "storage/lwlock.h"
@ -38,10 +39,6 @@
#include "distributed/tuplestore.h" #include "distributed/tuplestore.h"
#include "distributed/utils/citus_stat_tenants.h" #include "distributed/utils/citus_stat_tenants.h"
#if (PG_VERSION_NUM >= PG_VERSION_15)
#include "common/pg_prng.h"
#endif
static void AttributeMetricsIfApplicable(void); static void AttributeMetricsIfApplicable(void);
ExecutorEnd_hook_type prev_ExecutorEnd = NULL; ExecutorEnd_hook_type prev_ExecutorEnd = NULL;
@ -298,13 +295,7 @@ AttributeTask(char *tenantId, int colocationId, CmdType commandType)
/* If the tenant is not found in the hash table, we will track the query with a probability of StatTenantsSampleRateForNewTenants. */ /* If the tenant is not found in the hash table, we will track the query with a probability of StatTenantsSampleRateForNewTenants. */
if (!found) if (!found)
{ {
#if (PG_VERSION_NUM >= PG_VERSION_15)
double randomValue = pg_prng_double(&pg_global_prng_state); double randomValue = pg_prng_double(&pg_global_prng_state);
#else
/* Generate a random double between 0 and 1 */
double randomValue = (double) random() / MAX_RANDOM_VALUE;
#endif
bool shouldTrackQuery = randomValue <= StatTenantsSampleRateForNewTenants; bool shouldTrackQuery = randomValue <= StatTenantsSampleRateForNewTenants;
if (!shouldTrackQuery) if (!shouldTrackQuery)
{ {

View File

@ -362,10 +362,8 @@ ErrorIfShardPlacementsNotColocated(Oid leftRelationId, Oid rightRelationId)
leftRelationName, rightRelationName))); leftRelationName, rightRelationName)));
} }
List *leftPlacementList = ShardPlacementListSortedByWorker( List *leftPlacementList = ShardPlacementList(leftShardId);
leftShardId); List *rightPlacementList = ShardPlacementList(rightShardId);
List *rightPlacementList = ShardPlacementListSortedByWorker(
rightShardId);
if (list_length(leftPlacementList) != list_length(rightPlacementList)) if (list_length(leftPlacementList) != list_length(rightPlacementList))
{ {

View File

@ -470,12 +470,11 @@ SingleReplicatedTable(Oid relationId)
return false; return false;
} }
List *shardIntervalList = LoadShardList(relationId);
uint64 *shardIdPointer = NULL; uint64 *shardIdPointer = NULL;
foreach_declared_ptr(shardIdPointer, shardIntervalList) foreach_declared_ptr(shardIdPointer, shardList)
{ {
uint64 shardId = *shardIdPointer; uint64 shardId = *shardIdPointer;
shardPlacementList = ShardPlacementListSortedByWorker(shardId); shardPlacementList = ShardPlacementList(shardId);
if (list_length(shardPlacementList) != 1) if (list_length(shardPlacementList) != 1)
{ {

View File

@ -170,14 +170,10 @@ WorkerDropDistributedTable(Oid relationId)
*/ */
if (!IsAnyObjectAddressOwnedByExtension(list_make1(distributedTableObject), NULL)) if (!IsAnyObjectAddressOwnedByExtension(list_make1(distributedTableObject), NULL))
{ {
char *relName = get_rel_name(relationId);
Oid schemaId = get_rel_namespace(relationId);
char *schemaName = get_namespace_name(schemaId);
StringInfo dropCommand = makeStringInfo(); StringInfo dropCommand = makeStringInfo();
appendStringInfo(dropCommand, "DROP%sTABLE %s CASCADE", appendStringInfo(dropCommand, "DROP%sTABLE %s CASCADE",
IsForeignTable(relationId) ? " FOREIGN " : " ", IsForeignTable(relationId) ? " FOREIGN " : " ",
quote_qualified_identifier(schemaName, relName)); generate_qualified_relation_name(relationId));
Node *dropCommandNode = ParseTreeNode(dropCommand->data); Node *dropCommandNode = ParseTreeNode(dropCommand->data);

View File

@ -441,7 +441,7 @@ FilterShardsFromPgclass(Node *node, void *context)
/* /*
* We process the whole rtable rather than visiting individual RangeTblEntry's * We process the whole rtable rather than visiting individual RangeTblEntry's
* in the walker, since we need to know the varno to generate the right * in the walker, since we need to know the varno to generate the right
* fiter. * filter.
*/ */
int varno = 0; int varno = 0;
RangeTblEntry *rangeTableEntry = NULL; RangeTblEntry *rangeTableEntry = NULL;
@ -471,9 +471,27 @@ FilterShardsFromPgclass(Node *node, void *context)
/* make sure the expression is in the right memory context */ /* make sure the expression is in the right memory context */
MemoryContext originalContext = MemoryContextSwitchTo(queryContext); MemoryContext originalContext = MemoryContextSwitchTo(queryContext);
/* add relation_is_a_known_shard(oid) IS NOT TRUE to the quals of the query */ /* add relation_is_a_known_shard(oid) IS NOT TRUE to the quals of the query */
Node *newQual = CreateRelationIsAKnownShardFilter(varno); Node *newQual = CreateRelationIsAKnownShardFilter(varno);
#if PG_VERSION_NUM >= PG_VERSION_17
/*
* In PG17, MERGE queries introduce a new struct `mergeJoinCondition`.
* We need to handle this condition safely.
*/
if (query->mergeJoinCondition != NULL)
{
/* Add the filter to mergeJoinCondition */
query->mergeJoinCondition = (Node *) makeBoolExpr(
AND_EXPR,
list_make2(query->mergeJoinCondition, newQual),
-1);
}
else
#endif
{
/* Handle older versions or queries without mergeJoinCondition */
Node *oldQuals = query->jointree->quals; Node *oldQuals = query->jointree->quals;
if (oldQuals) if (oldQuals)
{ {
@ -486,6 +504,7 @@ FilterShardsFromPgclass(Node *node, void *context)
{ {
query->jointree->quals = newQual; query->jointree->quals = newQual;
} }
}
MemoryContextSwitchTo(originalContext); MemoryContextSwitchTo(originalContext);
} }

View File

@ -14,14 +14,6 @@
#include "pg_version_constants.h" #include "pg_version_constants.h"
#if PG_VERSION_NUM >= PG_VERSION_15
#define ExecARDeleteTriggers_compat(a, b, c, d, e, f) \
ExecARDeleteTriggers(a, b, c, d, e, f)
#else
#define ExecARDeleteTriggers_compat(a, b, c, d, e, f) \
ExecARDeleteTriggers(a, b, c, d, e)
#endif
#define ACLCHECK_OBJECT_TABLE OBJECT_TABLE #define ACLCHECK_OBJECT_TABLE OBJECT_TABLE
#define ExplainPropertyLong(qlabel, value, es) \ #define ExplainPropertyLong(qlabel, value, es) \

View File

@ -61,6 +61,7 @@ extern void AssignGlobalPID(const char *applicationName);
extern uint64 GetGlobalPID(void); extern uint64 GetGlobalPID(void);
extern void SetBackendDataDatabaseId(void); extern void SetBackendDataDatabaseId(void);
extern void SetBackendDataGlobalPID(uint64 gpid); extern void SetBackendDataGlobalPID(uint64 gpid);
extern void SetBackendDataDistributedCommandOriginator(bool distributedCommandOriginator);
extern uint64 ExtractGlobalPID(const char *applicationName); extern uint64 ExtractGlobalPID(const char *applicationName);
extern int ExtractNodeIdFromGlobalPID(uint64 globalPID, bool missingOk); extern int ExtractNodeIdFromGlobalPID(uint64 globalPID, bool missingOk);
extern int ExtractProcessIdFromGlobalPID(uint64 globalPID); extern int ExtractProcessIdFromGlobalPID(uint64 globalPID);

View File

@ -510,6 +510,11 @@ extern List * AlterSchemaOwnerStmtObjectAddress(Node *node, bool missing_ok,
extern List * AlterSchemaRenameStmtObjectAddress(Node *node, bool missing_ok, bool extern List * AlterSchemaRenameStmtObjectAddress(Node *node, bool missing_ok, bool
isPostprocess); isPostprocess);
/* seclabel.c - forward declarations*/
extern List * PostprocessSecLabelStmt(Node *node, const char *queryString);
extern List * SecLabelStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess);
extern void citus_test_object_relabel(const ObjectAddress *object, const char *seclabel);
/* sequence.c - forward declarations */ /* sequence.c - forward declarations */
extern List * PreprocessAlterSequenceStmt(Node *node, const char *queryString, extern List * PreprocessAlterSequenceStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext); ProcessUtilityContext processUtilityContext);
@ -520,13 +525,11 @@ extern List * PostprocessAlterSequenceSchemaStmt(Node *node, const char *querySt
extern List * PreprocessAlterSequenceOwnerStmt(Node *node, const char *queryString, extern List * PreprocessAlterSequenceOwnerStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext); ProcessUtilityContext processUtilityContext);
extern List * PostprocessAlterSequenceOwnerStmt(Node *node, const char *queryString); extern List * PostprocessAlterSequenceOwnerStmt(Node *node, const char *queryString);
#if (PG_VERSION_NUM >= PG_VERSION_15)
extern List * PreprocessAlterSequencePersistenceStmt(Node *node, const char *queryString, extern List * PreprocessAlterSequencePersistenceStmt(Node *node, const char *queryString,
ProcessUtilityContext ProcessUtilityContext
processUtilityContext); processUtilityContext);
extern List * PreprocessSequenceAlterTableStmt(Node *node, const char *queryString, extern List * PreprocessSequenceAlterTableStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext); ProcessUtilityContext processUtilityContext);
#endif
extern List * PreprocessDropSequenceStmt(Node *node, const char *queryString, extern List * PreprocessDropSequenceStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext); ProcessUtilityContext processUtilityContext);
extern List * SequenceDropStmtObjectAddress(Node *stmt, bool missing_ok, bool extern List * SequenceDropStmtObjectAddress(Node *stmt, bool missing_ok, bool
@ -542,10 +545,8 @@ extern List * AlterSequenceSchemaStmtObjectAddress(Node *node, bool missing_ok,
isPostprocess); isPostprocess);
extern List * AlterSequenceOwnerStmtObjectAddress(Node *node, bool missing_ok, bool extern List * AlterSequenceOwnerStmtObjectAddress(Node *node, bool missing_ok, bool
isPostprocess); isPostprocess);
#if (PG_VERSION_NUM >= PG_VERSION_15)
extern List * AlterSequencePersistenceStmtObjectAddress(Node *node, bool missing_ok, bool extern List * AlterSequencePersistenceStmtObjectAddress(Node *node, bool missing_ok, bool
isPostprocess); isPostprocess);
#endif
extern List * RenameSequenceStmtObjectAddress(Node *node, bool missing_ok, bool extern List * RenameSequenceStmtObjectAddress(Node *node, bool missing_ok, bool
isPostprocess); isPostprocess);
extern void ErrorIfUnsupportedSeqStmt(CreateSeqStmt *createSeqStmt); extern void ErrorIfUnsupportedSeqStmt(CreateSeqStmt *createSeqStmt);
@ -749,8 +750,6 @@ extern List * CreateTriggerStmtObjectAddress(Node *node, bool missingOk, bool
isPostprocess); isPostprocess);
extern void CreateTriggerEventExtendNames(CreateTrigStmt *createTriggerStmt, extern void CreateTriggerEventExtendNames(CreateTrigStmt *createTriggerStmt,
char *schemaName, uint64 shardId); char *schemaName, uint64 shardId);
extern List * PreprocessAlterTriggerRenameStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext);
extern List * PostprocessAlterTriggerRenameStmt(Node *node, const char *queryString); extern List * PostprocessAlterTriggerRenameStmt(Node *node, const char *queryString);
extern void AlterTriggerRenameEventExtendNames(RenameStmt *renameTriggerStmt, extern void AlterTriggerRenameEventExtendNames(RenameStmt *renameTriggerStmt,
char *schemaName, uint64 shardId); char *schemaName, uint64 shardId);

View File

@ -259,14 +259,15 @@ extern void QualifyRenameTextSearchDictionaryStmt(Node *node);
extern void QualifyTextSearchConfigurationCommentStmt(Node *node); extern void QualifyTextSearchConfigurationCommentStmt(Node *node);
extern void QualifyTextSearchDictionaryCommentStmt(Node *node); extern void QualifyTextSearchDictionaryCommentStmt(Node *node);
/* forward declarations for deparse_seclabel_stmts.c */
extern char * DeparseSecLabelStmt(Node *node);
/* forward declarations for deparse_sequence_stmts.c */ /* forward declarations for deparse_sequence_stmts.c */
extern char * DeparseDropSequenceStmt(Node *node); extern char * DeparseDropSequenceStmt(Node *node);
extern char * DeparseRenameSequenceStmt(Node *node); extern char * DeparseRenameSequenceStmt(Node *node);
extern char * DeparseAlterSequenceSchemaStmt(Node *node); extern char * DeparseAlterSequenceSchemaStmt(Node *node);
extern char * DeparseAlterSequenceOwnerStmt(Node *node); extern char * DeparseAlterSequenceOwnerStmt(Node *node);
#if (PG_VERSION_NUM >= PG_VERSION_15)
extern char * DeparseAlterSequencePersistenceStmt(Node *node); extern char * DeparseAlterSequencePersistenceStmt(Node *node);
#endif
extern char * DeparseGrantOnSequenceStmt(Node *node); extern char * DeparseGrantOnSequenceStmt(Node *node);
/* forward declarations for qualify_sequence_stmt.c */ /* forward declarations for qualify_sequence_stmt.c */
@ -274,9 +275,7 @@ extern void QualifyRenameSequenceStmt(Node *node);
extern void QualifyDropSequenceStmt(Node *node); extern void QualifyDropSequenceStmt(Node *node);
extern void QualifyAlterSequenceSchemaStmt(Node *node); extern void QualifyAlterSequenceSchemaStmt(Node *node);
extern void QualifyAlterSequenceOwnerStmt(Node *node); extern void QualifyAlterSequenceOwnerStmt(Node *node);
#if (PG_VERSION_NUM >= PG_VERSION_15)
extern void QualifyAlterSequencePersistenceStmt(Node *node); extern void QualifyAlterSequencePersistenceStmt(Node *node);
#endif
extern void QualifyGrantOnSequenceStmt(Node *node); extern void QualifyGrantOnSequenceStmt(Node *node);
#endif /* CITUS_DEPARSER_H */ #endif /* CITUS_DEPARSER_H */

View File

@ -28,11 +28,6 @@
#define CURSOR_OPT_FORCE_DISTRIBUTED 0x080000 #define CURSOR_OPT_FORCE_DISTRIBUTED 0x080000
/* Hack to compile Citus on pre-MERGE Postgres versions */
#if PG_VERSION_NUM < PG_VERSION_15
#define CMD_MERGE CMD_UNKNOWN
#endif
/* level of planner calls */ /* level of planner calls */
extern int PlannerLevel; extern int PlannerLevel;

View File

@ -128,9 +128,6 @@ extern List * IdentitySequenceDependencyCommandList(Oid targetRelationId);
extern List * DDLCommandsForSequence(Oid sequenceOid, char *ownerName); extern List * DDLCommandsForSequence(Oid sequenceOid, char *ownerName);
extern List * GetSequencesFromAttrDef(Oid attrdefOid); extern List * GetSequencesFromAttrDef(Oid attrdefOid);
#if PG_VERSION_NUM < PG_VERSION_15
ObjectAddress GetAttrDefaultColumnAddress(Oid attrdefoid);
#endif
extern List * GetAttrDefsFromSequence(Oid seqOid); extern List * GetAttrDefsFromSequence(Oid seqOid);
extern void GetDependentSequencesWithRelation(Oid relationId, List **seqInfoList, extern void GetDependentSequencesWithRelation(Oid relationId, List **seqInfoList,
AttrNumber attnum, char depType); AttrNumber attnum, char depType);

View File

@ -118,7 +118,7 @@ extern bool HasDangerousJoinUsing(List *rtableList, Node *jtnode);
extern Job * RouterJob(Query *originalQuery, extern Job * RouterJob(Query *originalQuery,
PlannerRestrictionContext *plannerRestrictionContext, PlannerRestrictionContext *plannerRestrictionContext,
DeferredErrorMessage **planningError); DeferredErrorMessage **planningError);
extern bool ContainsOnlyLocalTables(RTEListProperties *rteProperties); extern bool ContainsOnlyLocalOrReferenceTables(RTEListProperties *rteProperties);
extern RangeTblEntry * ExtractSourceResultRangeTableEntry(Query *query); extern RangeTblEntry * ExtractSourceResultRangeTableEntry(Query *query);
#endif /* MULTI_ROUTER_PLANNER_H */ #endif /* MULTI_ROUTER_PLANNER_H */

View File

@ -39,5 +39,7 @@ extern Query * WrapRteRelationIntoSubquery(RangeTblEntry *rteRelation,
List *requiredAttributes, List *requiredAttributes,
RTEPermissionInfo *perminfo); RTEPermissionInfo *perminfo);
extern List * CreateAllTargetListForRelation(Oid relationId, List *requiredAttributes); extern List * CreateAllTargetListForRelation(Oid relationId, List *requiredAttributes);
extern List * CreateFilteredTargetListForRelation(Oid relationId,
List *requiredAttributes);
#endif /* QUERY_COLOCATION_CHECKER_H */ #endif /* QUERY_COLOCATION_CHECKER_H */

View File

@ -46,6 +46,7 @@ extern DeferredErrorMessage * DeferErrorIfCannotPushdownSubquery(Query *subquery
bool bool
outerMostQueryHasLimit); outerMostQueryHasLimit);
extern DeferredErrorMessage * DeferErrorIfUnsupportedUnionQuery(Query *queryTree); extern DeferredErrorMessage * DeferErrorIfUnsupportedUnionQuery(Query *queryTree);
extern bool IsJsonTableRTE(RangeTblEntry *rte);
#endif /* QUERY_PUSHDOWN_PLANNING_H */ #endif /* QUERY_PUSHDOWN_PLANNING_H */

View File

@ -40,6 +40,7 @@ extern Query * BuildReadIntermediateResultsArrayQuery(List *targetEntryList,
List *columnAliasList, List *columnAliasList,
List *resultIdList, List *resultIdList,
bool useBinaryCopyFormat); bool useBinaryCopyFormat);
extern Query * BuildEmptyResultQuery(List *targetEntryList, char *resultId);
extern bool GeneratingSubplans(void); extern bool GeneratingSubplans(void);
extern bool ContainsLocalTableDistributedTableJoin(List *rangeTableList); extern bool ContainsLocalTableDistributedTableJoin(List *rangeTableList);
extern void ReplaceRTERelationWithRteSubquery(RangeTblEntry *rangeTableEntry, extern void ReplaceRTERelationWithRteSubquery(RangeTblEntry *rangeTableEntry,

View File

@ -28,5 +28,10 @@ extern List * GenerateTaskListWithRedistributedResults(
bool useBinaryFormat); bool useBinaryFormat);
extern bool IsSupportedRedistributionTarget(Oid targetRelationId); extern bool IsSupportedRedistributionTarget(Oid targetRelationId);
extern bool IsRedistributablePlan(Plan *selectPlan); extern bool IsRedistributablePlan(Plan *selectPlan);
extern bool HasMergeNotMatchedBySource(Query *query);
extern void AdjustTaskQueryForEmptySource(Oid targetRelationId,
Query *mergeQuery,
List *emptySourceTaskList,
char *resultIdPrefix);
#endif /* REPARTITION_EXECUTOR_H */ #endif /* REPARTITION_EXECUTOR_H */

Some files were not shown because too many files have changed in this diff Show More