mirror of https://github.com/citusdata/citus.git
Merge branch 'release-13.0' into fix-issue-7676
commit
b201eee280
|
@ -68,7 +68,7 @@ USER citus
|
|||
|
||||
# build postgres versions separately for effective parrallelism and caching of already built versions when changing only certain versions
|
||||
FROM base AS pg14
|
||||
RUN MAKEFLAGS="-j $(nproc)" pgenv build 14.14
|
||||
RUN MAKEFLAGS="-j $(nproc)" pgenv build 14.15
|
||||
RUN rm .pgenv/src/*.tar*
|
||||
RUN make -C .pgenv/src/postgresql-*/ clean
|
||||
RUN make -C .pgenv/src/postgresql-*/src/include install
|
||||
|
@ -80,7 +80,7 @@ RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/
|
|||
RUN rm .pgenv-staging/config/default.conf
|
||||
|
||||
FROM base AS pg15
|
||||
RUN MAKEFLAGS="-j $(nproc)" pgenv build 15.9
|
||||
RUN MAKEFLAGS="-j $(nproc)" pgenv build 15.10
|
||||
RUN rm .pgenv/src/*.tar*
|
||||
RUN make -C .pgenv/src/postgresql-*/ clean
|
||||
RUN make -C .pgenv/src/postgresql-*/src/include install
|
||||
|
@ -92,7 +92,7 @@ RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/
|
|||
RUN rm .pgenv-staging/config/default.conf
|
||||
|
||||
FROM base AS pg16
|
||||
RUN MAKEFLAGS="-j $(nproc)" pgenv build 16.5
|
||||
RUN MAKEFLAGS="-j $(nproc)" pgenv build 16.6
|
||||
RUN rm .pgenv/src/*.tar*
|
||||
RUN make -C .pgenv/src/postgresql-*/ clean
|
||||
RUN make -C .pgenv/src/postgresql-*/src/include install
|
||||
|
@ -104,7 +104,7 @@ RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/
|
|||
RUN rm .pgenv-staging/config/default.conf
|
||||
|
||||
FROM base AS pg17
|
||||
RUN MAKEFLAGS="-j $(nproc)" pgenv build 17.1
|
||||
RUN MAKEFLAGS="-j $(nproc)" pgenv build 17.2
|
||||
RUN rm .pgenv/src/*.tar*
|
||||
RUN make -C .pgenv/src/postgresql-*/ clean
|
||||
RUN make -C .pgenv/src/postgresql-*/src/include install
|
||||
|
@ -223,7 +223,7 @@ COPY --chown=citus:citus .psqlrc .
|
|||
RUN sudo chown --from=root:root citus:citus -R ~
|
||||
|
||||
# sets default pg version
|
||||
RUN pgenv switch 17.1
|
||||
RUN pgenv switch 17.2
|
||||
|
||||
# make connecting to the coordinator easy
|
||||
ENV PGPORT=9700
|
||||
|
|
|
@ -25,8 +25,6 @@ configure -whitespace
|
|||
|
||||
# except these exceptions...
|
||||
src/backend/distributed/utils/citus_outfuncs.c -citus-style
|
||||
src/backend/distributed/deparser/ruleutils_13.c -citus-style
|
||||
src/backend/distributed/deparser/ruleutils_14.c -citus-style
|
||||
src/backend/distributed/deparser/ruleutils_15.c -citus-style
|
||||
src/backend/distributed/deparser/ruleutils_16.c -citus-style
|
||||
src/backend/distributed/deparser/ruleutils_17.c -citus-style
|
||||
|
|
|
@ -6,7 +6,7 @@ inputs:
|
|||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: actions/upload-artifact@v3.1.1
|
||||
- uses: actions/upload-artifact@v4.6.0
|
||||
name: Upload logs
|
||||
with:
|
||||
name: ${{ inputs.folder }}
|
||||
|
|
|
@ -17,7 +17,7 @@ runs:
|
|||
echo "PG_MAJOR=${{ inputs.pg_major }}" >> $GITHUB_ENV
|
||||
fi
|
||||
shell: bash
|
||||
- uses: actions/download-artifact@v3.0.1
|
||||
- uses: actions/download-artifact@v4.1.8
|
||||
with:
|
||||
name: build-${{ env.PG_MAJOR }}
|
||||
- name: Install Extension
|
||||
|
|
|
@ -21,7 +21,7 @@ runs:
|
|||
mkdir -p /tmp/codeclimate
|
||||
cc-test-reporter format-coverage -t lcov -o /tmp/codeclimate/${{ inputs.flags }}.json lcov.info
|
||||
shell: bash
|
||||
- uses: actions/upload-artifact@v3.1.1
|
||||
- uses: actions/upload-artifact@v4.6.0
|
||||
with:
|
||||
path: "/tmp/codeclimate/*.json"
|
||||
name: codeclimate
|
||||
name: codeclimate-${{ inputs.flags }}
|
||||
|
|
|
@ -26,13 +26,13 @@ jobs:
|
|||
pgupgrade_image_name: "ghcr.io/citusdata/pgupgradetester"
|
||||
style_checker_image_name: "ghcr.io/citusdata/stylechecker"
|
||||
style_checker_tools_version: "0.8.18"
|
||||
sql_snapshot_pg_version: "17.1"
|
||||
image_suffix: "-v84c0cf8"
|
||||
pg14_version: '{ "major": "14", "full": "14.14" }'
|
||||
pg15_version: '{ "major": "15", "full": "15.9" }'
|
||||
pg16_version: '{ "major": "16", "full": "16.5" }'
|
||||
pg17_version: '{ "major": "17", "full": "17.1" }'
|
||||
upgrade_pg_versions: "14.14-15.9-16.5-17.1"
|
||||
sql_snapshot_pg_version: "17.2"
|
||||
image_suffix: "-v889e4c1"
|
||||
image_suffix_citus_upgrade: "-dev-2ad1f90"
|
||||
pg15_version: '{ "major": "15", "full": "15.10" }'
|
||||
pg16_version: '{ "major": "16", "full": "16.6" }'
|
||||
pg17_version: '{ "major": "17", "full": "17.2" }'
|
||||
upgrade_pg_versions: "14.15-15.10-16.6-17.2"
|
||||
steps:
|
||||
# Since GHA jobs need at least one step we use a noop step here.
|
||||
- name: Set up parameters
|
||||
|
@ -44,7 +44,7 @@ jobs:
|
|||
image: ${{ needs.params.outputs.build_image_name }}:${{ needs.params.outputs.sql_snapshot_pg_version }}${{ needs.params.outputs.image_suffix }}
|
||||
options: --user root
|
||||
steps:
|
||||
- uses: actions/checkout@v3.5.0
|
||||
- uses: actions/checkout@v4
|
||||
- name: Check Snapshots
|
||||
run: |
|
||||
git config --global --add safe.directory ${GITHUB_WORKSPACE}
|
||||
|
@ -58,7 +58,7 @@ jobs:
|
|||
- name: Check Snapshots
|
||||
run: |
|
||||
git config --global --add safe.directory ${GITHUB_WORKSPACE}
|
||||
- uses: actions/checkout@v3.5.0
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Check C Style
|
||||
|
@ -106,7 +106,6 @@ jobs:
|
|||
image_suffix:
|
||||
- ${{ needs.params.outputs.image_suffix}}
|
||||
pg_version:
|
||||
- ${{ needs.params.outputs.pg14_version }}
|
||||
- ${{ needs.params.outputs.pg15_version }}
|
||||
- ${{ needs.params.outputs.pg16_version }}
|
||||
- ${{ needs.params.outputs.pg17_version }}
|
||||
|
@ -115,14 +114,14 @@ jobs:
|
|||
image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ matrix.image_suffix }}"
|
||||
options: --user root
|
||||
steps:
|
||||
- uses: actions/checkout@v3.5.0
|
||||
- uses: actions/checkout@v4
|
||||
- name: Expose $PG_MAJOR to Github Env
|
||||
run: echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
- name: Build
|
||||
run: "./ci/build-citus.sh"
|
||||
shell: bash
|
||||
- uses: actions/upload-artifact@v3.1.1
|
||||
- uses: actions/upload-artifact@v4.6.0
|
||||
with:
|
||||
name: build-${{ env.PG_MAJOR }}
|
||||
path: |-
|
||||
|
@ -138,7 +137,6 @@ jobs:
|
|||
image_name:
|
||||
- ${{ needs.params.outputs.test_image_name }}
|
||||
pg_version:
|
||||
- ${{ needs.params.outputs.pg14_version }}
|
||||
- ${{ needs.params.outputs.pg15_version }}
|
||||
- ${{ needs.params.outputs.pg16_version }}
|
||||
- ${{ needs.params.outputs.pg17_version }}
|
||||
|
@ -159,10 +157,6 @@ jobs:
|
|||
- check-enterprise-isolation-logicalrep-2
|
||||
- check-enterprise-isolation-logicalrep-3
|
||||
include:
|
||||
- make: check-failure
|
||||
pg_version: ${{ needs.params.outputs.pg14_version }}
|
||||
suite: regress
|
||||
image_name: ${{ needs.params.outputs.fail_test_image_name }}
|
||||
- make: check-failure
|
||||
pg_version: ${{ needs.params.outputs.pg15_version }}
|
||||
suite: regress
|
||||
|
@ -175,10 +169,6 @@ jobs:
|
|||
pg_version: ${{ needs.params.outputs.pg17_version }}
|
||||
suite: regress
|
||||
image_name: ${{ needs.params.outputs.fail_test_image_name }}
|
||||
- make: check-enterprise-failure
|
||||
pg_version: ${{ needs.params.outputs.pg14_version }}
|
||||
suite: regress
|
||||
image_name: ${{ needs.params.outputs.fail_test_image_name }}
|
||||
- make: check-enterprise-failure
|
||||
pg_version: ${{ needs.params.outputs.pg15_version }}
|
||||
suite: regress
|
||||
|
@ -191,10 +181,6 @@ jobs:
|
|||
pg_version: ${{ needs.params.outputs.pg17_version }}
|
||||
suite: regress
|
||||
image_name: ${{ needs.params.outputs.fail_test_image_name }}
|
||||
- make: check-pytest
|
||||
pg_version: ${{ needs.params.outputs.pg14_version }}
|
||||
suite: regress
|
||||
image_name: ${{ needs.params.outputs.fail_test_image_name }}
|
||||
- make: check-pytest
|
||||
pg_version: ${{ needs.params.outputs.pg15_version }}
|
||||
suite: regress
|
||||
|
@ -219,10 +205,6 @@ jobs:
|
|||
suite: cdc
|
||||
image_name: ${{ needs.params.outputs.test_image_name }}
|
||||
pg_version: ${{ needs.params.outputs.pg17_version }}
|
||||
- make: check-query-generator
|
||||
pg_version: ${{ needs.params.outputs.pg14_version }}
|
||||
suite: regress
|
||||
image_name: ${{ needs.params.outputs.fail_test_image_name }}
|
||||
- make: check-query-generator
|
||||
pg_version: ${{ needs.params.outputs.pg15_version }}
|
||||
suite: regress
|
||||
|
@ -246,7 +228,7 @@ jobs:
|
|||
- params
|
||||
- build
|
||||
steps:
|
||||
- uses: actions/checkout@v3.5.0
|
||||
- uses: actions/checkout@v4
|
||||
- uses: "./.github/actions/setup_extension"
|
||||
- name: Run Test
|
||||
run: gosu circleci make -C src/test/${{ matrix.suite }} ${{ matrix.make }}
|
||||
|
@ -275,13 +257,12 @@ jobs:
|
|||
image_name:
|
||||
- ${{ needs.params.outputs.fail_test_image_name }}
|
||||
pg_version:
|
||||
- ${{ needs.params.outputs.pg14_version }}
|
||||
- ${{ needs.params.outputs.pg15_version }}
|
||||
- ${{ needs.params.outputs.pg16_version }}
|
||||
- ${{ needs.params.outputs.pg17_version }}
|
||||
parallel: [0,1,2,3,4,5] # workaround for running 6 parallel jobs
|
||||
steps:
|
||||
- uses: actions/checkout@v3.5.0
|
||||
- uses: actions/checkout@v4
|
||||
- uses: "./.github/actions/setup_extension"
|
||||
- name: Test arbitrary configs
|
||||
run: |-
|
||||
|
@ -303,10 +284,12 @@ jobs:
|
|||
check-arbitrary-configs parallel=4 CONFIGS=$TESTS
|
||||
- uses: "./.github/actions/save_logs_and_results"
|
||||
if: always()
|
||||
with:
|
||||
folder: ${{ env.PG_MAJOR }}_arbitrary_configs_${{ matrix.parallel }}
|
||||
- uses: "./.github/actions/upload_coverage"
|
||||
if: always()
|
||||
with:
|
||||
flags: ${{ env.pg_major }}_upgrade
|
||||
flags: ${{ env.PG_MAJOR }}_arbitrary_configs_${{ matrix.parallel }}
|
||||
codecov_token: ${{ secrets.CODECOV_TOKEN }}
|
||||
test-pg-upgrade:
|
||||
name: PG${{ matrix.old_pg_major }}-PG${{ matrix.new_pg_major }} - check-pg-upgrade
|
||||
|
@ -321,23 +304,17 @@ jobs:
|
|||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- old_pg_major: 14
|
||||
new_pg_major: 15
|
||||
- old_pg_major: 15
|
||||
new_pg_major: 16
|
||||
- old_pg_major: 14
|
||||
new_pg_major: 16
|
||||
- old_pg_major: 16
|
||||
new_pg_major: 17
|
||||
- old_pg_major: 15
|
||||
new_pg_major: 17
|
||||
- old_pg_major: 14
|
||||
new_pg_major: 17
|
||||
env:
|
||||
old_pg_major: ${{ matrix.old_pg_major }}
|
||||
new_pg_major: ${{ matrix.new_pg_major }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3.5.0
|
||||
- uses: actions/checkout@v4
|
||||
- uses: "./.github/actions/setup_extension"
|
||||
with:
|
||||
pg_major: "${{ env.old_pg_major }}"
|
||||
|
@ -360,22 +337,24 @@ jobs:
|
|||
if: failure()
|
||||
- uses: "./.github/actions/save_logs_and_results"
|
||||
if: always()
|
||||
with:
|
||||
folder: ${{ env.old_pg_major }}_${{ env.new_pg_major }}_upgrade
|
||||
- uses: "./.github/actions/upload_coverage"
|
||||
if: always()
|
||||
with:
|
||||
flags: ${{ env.old_pg_major }}_${{ env.new_pg_major }}_upgrade
|
||||
codecov_token: ${{ secrets.CODECOV_TOKEN }}
|
||||
test-citus-upgrade:
|
||||
name: PG${{ fromJson(needs.params.outputs.pg14_version).major }} - check-citus-upgrade
|
||||
name: PG${{ fromJson(needs.params.outputs.pg15_version).major }} - check-citus-upgrade
|
||||
runs-on: ubuntu-20.04
|
||||
container:
|
||||
image: "${{ needs.params.outputs.citusupgrade_image_name }}:${{ fromJson(needs.params.outputs.pg14_version).full }}${{ needs.params.outputs.image_suffix }}"
|
||||
image: "${{ needs.params.outputs.citusupgrade_image_name }}:${{ fromJson(needs.params.outputs.pg15_version).full }}${{ needs.params.outputs.image_suffix_citus_upgrade }}"
|
||||
options: --user root
|
||||
needs:
|
||||
- params
|
||||
- build
|
||||
steps:
|
||||
- uses: actions/checkout@v3.5.0
|
||||
- uses: actions/checkout@v4
|
||||
- uses: "./.github/actions/setup_extension"
|
||||
with:
|
||||
skip_installation: true
|
||||
|
@ -405,10 +384,12 @@ jobs:
|
|||
done;
|
||||
- uses: "./.github/actions/save_logs_and_results"
|
||||
if: always()
|
||||
with:
|
||||
folder: ${{ env.PG_MAJOR }}_citus_upgrade
|
||||
- uses: "./.github/actions/upload_coverage"
|
||||
if: always()
|
||||
with:
|
||||
flags: ${{ env.pg_major }}_upgrade
|
||||
flags: ${{ env.PG_MAJOR }}_citus_upgrade
|
||||
codecov_token: ${{ secrets.CODECOV_TOKEN }}
|
||||
upload-coverage:
|
||||
if: always()
|
||||
|
@ -424,10 +405,11 @@ jobs:
|
|||
- test-citus-upgrade
|
||||
- test-pg-upgrade
|
||||
steps:
|
||||
- uses: actions/download-artifact@v3.0.1
|
||||
- uses: actions/download-artifact@v4.1.8
|
||||
with:
|
||||
name: "codeclimate"
|
||||
path: "codeclimate"
|
||||
pattern: codeclimate*
|
||||
path: codeclimate
|
||||
merge-multiple: true
|
||||
- name: Upload coverage results to Code Climate
|
||||
run: |-
|
||||
cc-test-reporter sum-coverage codeclimate/*.json -o total.json
|
||||
|
@ -439,7 +421,7 @@ jobs:
|
|||
needs:
|
||||
- build
|
||||
steps:
|
||||
- uses: actions/checkout@v3.5.0
|
||||
- uses: actions/checkout@v4
|
||||
- uses: azure/login@v1
|
||||
with:
|
||||
creds: ${{ secrets.AZURE_CREDENTIALS }}
|
||||
|
@ -457,7 +439,7 @@ jobs:
|
|||
needs:
|
||||
- build
|
||||
steps:
|
||||
- uses: actions/checkout@v3.5.0
|
||||
- uses: actions/checkout@v4
|
||||
- uses: azure/login@v1
|
||||
with:
|
||||
creds: ${{ secrets.AZURE_CREDENTIALS }}
|
||||
|
@ -476,7 +458,7 @@ jobs:
|
|||
outputs:
|
||||
json: ${{ steps.parallelization.outputs.json }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3.5.0
|
||||
- uses: actions/checkout@v4
|
||||
- uses: "./.github/actions/parallelization"
|
||||
id: parallelization
|
||||
with:
|
||||
|
@ -489,7 +471,7 @@ jobs:
|
|||
outputs:
|
||||
tests: ${{ steps.detect-regression-tests.outputs.tests }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3.5.0
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Detect regression tests need to be ran
|
||||
|
@ -524,8 +506,8 @@ jobs:
|
|||
fail-fast: false
|
||||
matrix: ${{ fromJson(needs.prepare_parallelization_matrix_32.outputs.json) }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3.5.0
|
||||
- uses: actions/download-artifact@v3.0.1
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/download-artifact@v4.1.8
|
||||
- uses: "./.github/actions/setup_extension"
|
||||
- name: Run minimal tests
|
||||
run: |-
|
||||
|
|
|
@ -21,7 +21,7 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v2
|
||||
|
|
|
@ -28,13 +28,13 @@ jobs:
|
|||
image: ${{ vars.build_image_name }}:${{ vars.pg15_version }}${{ vars.image_suffix }}
|
||||
options: --user root
|
||||
steps:
|
||||
- uses: actions/checkout@v3.5.0
|
||||
- uses: actions/checkout@v4
|
||||
- name: Configure, Build, and Install
|
||||
run: |
|
||||
echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV
|
||||
./ci/build-citus.sh
|
||||
shell: bash
|
||||
- uses: actions/upload-artifact@v3.1.1
|
||||
- uses: actions/upload-artifact@v4.6.0
|
||||
with:
|
||||
name: build-${{ env.PG_MAJOR }}
|
||||
path: |-
|
||||
|
@ -46,7 +46,7 @@ jobs:
|
|||
outputs:
|
||||
json: ${{ steps.parallelization.outputs.json }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3.5.0
|
||||
- uses: actions/checkout@v4
|
||||
- uses: "./.github/actions/parallelization"
|
||||
id: parallelization
|
||||
with:
|
||||
|
@ -67,7 +67,7 @@ jobs:
|
|||
fail-fast: false
|
||||
matrix: ${{ fromJson(needs.prepare_parallelization_matrix.outputs.json) }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3.5.0
|
||||
- uses: actions/checkout@v4
|
||||
- uses: "./.github/actions/setup_extension"
|
||||
- name: Run minimal tests
|
||||
run: |-
|
||||
|
|
|
@ -115,7 +115,6 @@ jobs:
|
|||
# for each deb based image and we use POSTGRES_VERSION to set
|
||||
# PG_CONFIG variable in each of those runs.
|
||||
packaging_docker_image:
|
||||
- debian-buster-all
|
||||
- debian-bookworm-all
|
||||
- debian-bullseye-all
|
||||
- ubuntu-focal-all
|
||||
|
@ -129,7 +128,7 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set pg_config path and python parameters for deb based distros
|
||||
run: |
|
||||
|
|
42
CHANGELOG.md
42
CHANGELOG.md
|
@ -1,3 +1,45 @@
|
|||
### citus v13.0.1 (February 4th, 2025) ###
|
||||
|
||||
* Drops support for PostgreSQL 14 (#7753)
|
||||
|
||||
### citus v13.0.0 (January 22, 2025) ###
|
||||
|
||||
* Adds support for PostgreSQL 17 (#7699, #7661)
|
||||
|
||||
* Adds `JSON_TABLE()` support in distributed queries (#7816)
|
||||
|
||||
* Propagates `MERGE ... WHEN NOT MATCHED BY SOURCE` (#7807)
|
||||
|
||||
* Propagates `MEMORY` and `SERIALIZE` options of `EXPLAIN` (#7802)
|
||||
|
||||
* Adds support for identity columns in distributed partitioned tables (#7785)
|
||||
|
||||
* Allows specifying an access method for distributed partitioned tables (#7818)
|
||||
|
||||
* Allows exclusion constraints on distributed partitioned tables (#7733)
|
||||
|
||||
* Allows configuring sslnegotiation using `citus.node_conn_info` (#7821)
|
||||
|
||||
* Avoids wal receiver timeouts during large shard splits (#7229)
|
||||
|
||||
* Fixes a bug causing incorrect writing of data to target `MERGE` repartition
|
||||
command (#7659)
|
||||
|
||||
* Fixes a crash that happens because of unsafe catalog access when re-assigning
|
||||
the global pid after `application_name` changes (#7791)
|
||||
|
||||
* Fixes incorrect `VALID UNTIL` setting assumption made for roles when syncing
|
||||
them to new nodes (#7534)
|
||||
|
||||
* Fixes segfault when calling distributed procedure with a parameterized
|
||||
distribution argument (#7242)
|
||||
|
||||
* Fixes server crash when trying to execute `activate_node_snapshot()` on a
|
||||
single-node cluster (#7552)
|
||||
|
||||
* Improves `citus_move_shard_placement()` to fail early if there is a new node
|
||||
without reference tables yet (#7467)
|
||||
|
||||
### citus v12.1.5 (July 17, 2024) ###
|
||||
|
||||
* Adds support for MERGE commands with single shard distributed target tables
|
||||
|
|
|
@ -5,6 +5,6 @@ set -euo pipefail
|
|||
source ci/ci_helpers.sh
|
||||
|
||||
# extract citus gucs in the form of "citus.X"
|
||||
grep -o -E "(\.*\"citus.\w+\")," src/backend/distributed/shared_library_init.c > gucs.out
|
||||
grep -o -E "(\.*\"citus\.\w+\")," src/backend/distributed/shared_library_init.c > gucs.out
|
||||
sort -c gucs.out
|
||||
rm gucs.out
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#! /bin/sh
|
||||
# Guess values for system-dependent variables and create Makefiles.
|
||||
# Generated by GNU Autoconf 2.69 for Citus 13.0.0.
|
||||
# Generated by GNU Autoconf 2.69 for Citus 13.0.1.
|
||||
#
|
||||
#
|
||||
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
|
||||
|
@ -579,8 +579,8 @@ MAKEFLAGS=
|
|||
# Identity of this package.
|
||||
PACKAGE_NAME='Citus'
|
||||
PACKAGE_TARNAME='citus'
|
||||
PACKAGE_VERSION='13.0.0'
|
||||
PACKAGE_STRING='Citus 13.0.0'
|
||||
PACKAGE_VERSION='13.0.1'
|
||||
PACKAGE_STRING='Citus 13.0.1'
|
||||
PACKAGE_BUGREPORT=''
|
||||
PACKAGE_URL=''
|
||||
|
||||
|
@ -1262,7 +1262,7 @@ if test "$ac_init_help" = "long"; then
|
|||
# Omit some internal or obsolete options to make the list less imposing.
|
||||
# This message is too long to be a string in the A/UX 3.1 sh.
|
||||
cat <<_ACEOF
|
||||
\`configure' configures Citus 13.0.0 to adapt to many kinds of systems.
|
||||
\`configure' configures Citus 13.0.1 to adapt to many kinds of systems.
|
||||
|
||||
Usage: $0 [OPTION]... [VAR=VALUE]...
|
||||
|
||||
|
@ -1324,7 +1324,7 @@ fi
|
|||
|
||||
if test -n "$ac_init_help"; then
|
||||
case $ac_init_help in
|
||||
short | recursive ) echo "Configuration of Citus 13.0.0:";;
|
||||
short | recursive ) echo "Configuration of Citus 13.0.1:";;
|
||||
esac
|
||||
cat <<\_ACEOF
|
||||
|
||||
|
@ -1429,7 +1429,7 @@ fi
|
|||
test -n "$ac_init_help" && exit $ac_status
|
||||
if $ac_init_version; then
|
||||
cat <<\_ACEOF
|
||||
Citus configure 13.0.0
|
||||
Citus configure 13.0.1
|
||||
generated by GNU Autoconf 2.69
|
||||
|
||||
Copyright (C) 2012 Free Software Foundation, Inc.
|
||||
|
@ -1912,7 +1912,7 @@ cat >config.log <<_ACEOF
|
|||
This file contains any messages produced by compilers while
|
||||
running configure, to aid debugging if configure makes a mistake.
|
||||
|
||||
It was created by Citus $as_me 13.0.0, which was
|
||||
It was created by Citus $as_me 13.0.1, which was
|
||||
generated by GNU Autoconf 2.69. Invocation command line was
|
||||
|
||||
$ $0 $@
|
||||
|
@ -2588,7 +2588,7 @@ fi
|
|||
if test "$with_pg_version_check" = no; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num (skipped compatibility check)" >&5
|
||||
$as_echo "$as_me: building against PostgreSQL $version_num (skipped compatibility check)" >&6;}
|
||||
elif test "$version_num" != '14' -a "$version_num" != '15' -a "$version_num" != '16' -a "$version_num" != '17'; then
|
||||
elif test "$version_num" != '15' -a "$version_num" != '16' -a "$version_num" != '17'; then
|
||||
as_fn_error $? "Citus is not compatible with the detected PostgreSQL version ${version_num}." "$LINENO" 5
|
||||
else
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num" >&5
|
||||
|
@ -5393,7 +5393,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
|
|||
# report actual input values of CONFIG_FILES etc. instead of their
|
||||
# values after options handling.
|
||||
ac_log="
|
||||
This file was extended by Citus $as_me 13.0.0, which was
|
||||
This file was extended by Citus $as_me 13.0.1, which was
|
||||
generated by GNU Autoconf 2.69. Invocation command line was
|
||||
|
||||
CONFIG_FILES = $CONFIG_FILES
|
||||
|
@ -5455,7 +5455,7 @@ _ACEOF
|
|||
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
|
||||
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
|
||||
ac_cs_version="\\
|
||||
Citus config.status 13.0.0
|
||||
Citus config.status 13.0.1
|
||||
configured by $0, generated by GNU Autoconf 2.69,
|
||||
with options \\"\$ac_cs_config\\"
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
# everyone needing autoconf installed, the resulting files are checked
|
||||
# into the SCM.
|
||||
|
||||
AC_INIT([Citus], [13.0.0])
|
||||
AC_INIT([Citus], [13.0.1])
|
||||
AC_COPYRIGHT([Copyright (c) Citus Data, Inc.])
|
||||
|
||||
# we'll need sed and awk for some of the version commands
|
||||
|
@ -80,7 +80,7 @@ AC_SUBST(with_pg_version_check)
|
|||
|
||||
if test "$with_pg_version_check" = no; then
|
||||
AC_MSG_NOTICE([building against PostgreSQL $version_num (skipped compatibility check)])
|
||||
elif test "$version_num" != '14' -a "$version_num" != '15' -a "$version_num" != '16' -a "$version_num" != '17'; then
|
||||
elif test "$version_num" != '15' -a "$version_num" != '16' -a "$version_num" != '17'; then
|
||||
AC_MSG_ERROR([Citus is not compatible with the detected PostgreSQL version ${version_num}.])
|
||||
else
|
||||
AC_MSG_NOTICE([building against PostgreSQL $version_num])
|
||||
|
|
|
@ -0,0 +1,133 @@
|
|||
"citus.all_modifications_commutative",
|
||||
"citus.allow_modifications_from_workers_to_replicated_tables",
|
||||
"citus.allow_nested_distributed_execution",
|
||||
"citus.allow_unsafe_constraints",
|
||||
"citus.allow_unsafe_locks_from_workers",
|
||||
"citus.background_task_queue_interval",
|
||||
"citus.check_available_space_before_move",
|
||||
"citus.cluster_name",
|
||||
"citus.coordinator_aggregation_strategy",
|
||||
"citus.copy_switchover_threshold",
|
||||
"citus.count_distinct_error_rate",
|
||||
"citus.cpu_priority",
|
||||
"citus.cpu_priority_for_logical_replication_senders",
|
||||
"citus.create_object_propagation",
|
||||
"citus.defer_drop_after_shard_move",
|
||||
"citus.defer_drop_after_shard_split",
|
||||
"citus.defer_shard_delete_interval",
|
||||
"citus.desired_percent_disk_available_after_move",
|
||||
"citus.distributed_deadlock_detection_factor",
|
||||
"citus.enable_alter_database_owner",
|
||||
"citus.enable_alter_role_propagation",
|
||||
"citus.enable_alter_role_set_propagation",
|
||||
"citus.enable_binary_protocol",
|
||||
"citus.enable_change_data_capture",
|
||||
"citus.enable_cluster_clock",
|
||||
"citus.enable_cost_based_connection_establishment",
|
||||
"citus.enable_create_role_propagation",
|
||||
"citus.enable_create_type_propagation",
|
||||
"citus.enable_ddl_propagation",
|
||||
"citus.enable_deadlock_prevention",
|
||||
"citus.enable_fast_path_router_planner",
|
||||
"citus.enable_local_execution",
|
||||
"citus.enable_local_reference_table_foreign_keys",
|
||||
"citus.enable_manual_changes_to_shards",
|
||||
"citus.enable_manual_metadata_changes_for_user",
|
||||
"citus.enable_metadata_sync",
|
||||
"citus.enable_non_colocated_router_query_pushdown",
|
||||
"citus.enable_repartition_joins",
|
||||
"citus.enable_repartitioned_insert_select",
|
||||
"citus.enable_router_execution",
|
||||
"citus.enable_schema_based_sharding",
|
||||
"citus.enable_single_hash_repartition_joins",
|
||||
"citus.enable_statistics_collection",
|
||||
"citus.enable_unique_job_ids",
|
||||
"citus.enable_unsafe_triggers",
|
||||
"citus.enable_unsupported_feature_messages",
|
||||
"citus.enable_version_checks",
|
||||
"citus.enforce_foreign_key_restrictions",
|
||||
"citus.enforce_object_restrictions_for_local_objects",
|
||||
"citus.executor_slow_start_interval",
|
||||
"citus.explain_all_tasks",
|
||||
"citus.explain_analyze_sort_method",
|
||||
"citus.explain_distributed_queries",
|
||||
"citus.force_max_query_parallelization",
|
||||
"citus.function_opens_transaction_block",
|
||||
"citus.grep_remote_commands",
|
||||
"citus.hide_citus_dependent_objects",
|
||||
"citus.hide_shards_from_app_name_prefixes",
|
||||
"citus.isolation_test_session_process_id",
|
||||
"citus.isolation_test_session_remote_process_id",
|
||||
"citus.limit_clause_row_fetch_count",
|
||||
"citus.local_copy_flush_threshold",
|
||||
"citus.local_hostname",
|
||||
"citus.local_shared_pool_size",
|
||||
"citus.local_table_join_policy",
|
||||
"citus.log_distributed_deadlock_detection",
|
||||
"citus.log_intermediate_results",
|
||||
"citus.log_local_commands",
|
||||
"citus.log_multi_join_order",
|
||||
"citus.log_remote_commands",
|
||||
"citus.logical_replication_timeout",
|
||||
"citus.main_db",
|
||||
"citus.max_adaptive_executor_pool_size",
|
||||
"citus.max_background_task_executors",
|
||||
"citus.max_background_task_executors_per_node",
|
||||
"citus.max_cached_connection_lifetime",
|
||||
"citus.max_cached_conns_per_worker",
|
||||
"citus.max_client_connections",
|
||||
"citus.max_high_priority_background_processes",
|
||||
"citus.max_intermediate_result_size",
|
||||
"citus.max_matview_size_to_auto_recreate",
|
||||
"citus.max_rebalancer_logged_ignored_moves",
|
||||
"citus.max_shared_pool_size",
|
||||
"citus.max_worker_nodes_tracked",
|
||||
"citus.metadata_sync_interval",
|
||||
"citus.metadata_sync_mode",
|
||||
"citus.metadata_sync_retry_interval",
|
||||
"citus.mitmfifo",
|
||||
"citus.multi_shard_modify_mode",
|
||||
"citus.multi_task_query_log_level",
|
||||
"citus.next_cleanup_record_id",
|
||||
"citus.next_operation_id",
|
||||
"citus.next_placement_id",
|
||||
"citus.next_shard_id",
|
||||
"citus.node_connection_timeout",
|
||||
"citus.node_conninfo",
|
||||
"citus.override_table_visibility",
|
||||
"citus.prevent_incomplete_connection_establishment",
|
||||
"citus.propagate_session_settings_for_loopback_connection",
|
||||
"citus.propagate_set_commands",
|
||||
"citus.rebalancer_by_disk_size_base_cost",
|
||||
"citus.recover_2pc_interval",
|
||||
"citus.remote_copy_flush_threshold",
|
||||
"citus.remote_task_check_interval",
|
||||
"citus.repartition_join_bucket_count_per_node",
|
||||
"citus.replicate_reference_tables_on_activate",
|
||||
"citus.replication_model",
|
||||
"citus.running_under_citus_test_suite",
|
||||
"citus.select_opens_transaction_block",
|
||||
"citus.shard_count",
|
||||
"citus.shard_replication_factor",
|
||||
"citus.show_shards_for_app_name_prefixes",
|
||||
"citus.skip_advisory_lock_permission_checks",
|
||||
"citus.skip_constraint_validation",
|
||||
"citus.skip_jsonb_validation_in_copy",
|
||||
"citus.sort_returning",
|
||||
"citus.stat_statements_max",
|
||||
"citus.stat_statements_purge_interval",
|
||||
"citus.stat_statements_track",
|
||||
"citus.stat_tenants_limit",
|
||||
"citus.stat_tenants_log_level",
|
||||
"citus.stat_tenants_period",
|
||||
"citus.stat_tenants_track",
|
||||
"citus.stat_tenants_untracked_sample_rate",
|
||||
"citus.subquery_pushdown",
|
||||
"citus.task_assignment_policy",
|
||||
"citus.task_executor_type",
|
||||
"citus.use_citus_managed_tables",
|
||||
"citus.use_secondary_nodes",
|
||||
"citus.values_materialization_threshold",
|
||||
"citus.version",
|
||||
"citus.worker_min_messages",
|
||||
"citus.writable_standby_coordinator",
|
|
@ -1051,6 +1051,15 @@ FindCandidateRelids(PlannerInfo *root, RelOptInfo *rel, List *joinClauses)
|
|||
|
||||
candidateRelids = bms_del_members(candidateRelids, rel->relids);
|
||||
candidateRelids = bms_del_members(candidateRelids, rel->lateral_relids);
|
||||
|
||||
/*
|
||||
* For the relevant PG16 commit requiring this addition:
|
||||
* postgres/postgres@2489d76
|
||||
*/
|
||||
#if PG_VERSION_NUM >= PG_VERSION_16
|
||||
candidateRelids = bms_del_members(candidateRelids, root->outer_join_rels);
|
||||
#endif
|
||||
|
||||
return candidateRelids;
|
||||
}
|
||||
|
||||
|
@ -1312,11 +1321,8 @@ AddColumnarScanPath(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte,
|
|||
|
||||
cpath->methods = &ColumnarScanPathMethods;
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
|
||||
/* necessary to avoid extra Result node in PG15 */
|
||||
cpath->flags = CUSTOMPATH_SUPPORT_PROJECTION;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* populate generic path information
|
||||
|
|
|
@ -1686,7 +1686,7 @@ DeleteTupleAndEnforceConstraints(ModifyState *state, HeapTuple heapTuple)
|
|||
simple_heap_delete(state->rel, tid);
|
||||
|
||||
/* execute AFTER ROW DELETE Triggers to enforce constraints */
|
||||
ExecARDeleteTriggers_compat(estate, resultRelInfo, tid, NULL, NULL, false);
|
||||
ExecARDeleteTriggers(estate, resultRelInfo, tid, NULL, NULL, false);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -877,7 +877,7 @@ columnar_relation_set_new_filelocator(Relation rel,
|
|||
|
||||
*freezeXid = RecentXmin;
|
||||
*minmulti = GetOldestMultiXactId();
|
||||
SMgrRelation srel = RelationCreateStorage_compat(*newrlocator, persistence, true);
|
||||
SMgrRelation srel = RelationCreateStorage(*newrlocator, persistence, true);
|
||||
|
||||
ColumnarStorageInit(srel, ColumnarMetadataNewStorageId());
|
||||
InitColumnarOptions(rel->rd_id);
|
||||
|
@ -2245,7 +2245,6 @@ ColumnarProcessAlterTable(AlterTableStmt *alterTableStmt, List **columnarOptions
|
|||
columnarRangeVar = alterTableStmt->relation;
|
||||
}
|
||||
}
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
else if (alterTableCmd->subtype == AT_SetAccessMethod)
|
||||
{
|
||||
if (columnarRangeVar || *columnarOptions)
|
||||
|
@ -2265,7 +2264,6 @@ ColumnarProcessAlterTable(AlterTableStmt *alterTableStmt, List **columnarOptions
|
|||
DeleteColumnarTableOptions(RelationGetRelid(rel), true);
|
||||
}
|
||||
}
|
||||
#endif /* PG_VERSION_15 */
|
||||
}
|
||||
|
||||
relation_close(rel, NoLock);
|
||||
|
@ -2649,21 +2647,12 @@ ColumnarCheckLogicalReplication(Relation rel)
|
|||
return;
|
||||
}
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
{
|
||||
PublicationDesc pubdesc;
|
||||
|
||||
RelationBuildPublicationDesc(rel, &pubdesc);
|
||||
pubActionInsert = pubdesc.pubactions.pubinsert;
|
||||
}
|
||||
#else
|
||||
if (rel->rd_pubactions == NULL)
|
||||
{
|
||||
GetRelationPublicationActions(rel);
|
||||
Assert(rel->rd_pubactions != NULL);
|
||||
}
|
||||
pubActionInsert = rel->rd_pubactions->pubinsert;
|
||||
#endif
|
||||
|
||||
if (pubActionInsert)
|
||||
{
|
||||
|
@ -3040,6 +3029,8 @@ AvailableExtensionVersionColumnar(void)
|
|||
|
||||
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
errmsg("citus extension is not found")));
|
||||
|
||||
return NULL; /* keep compiler happy */
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -145,17 +145,6 @@ LogicalClockShmemSize(void)
|
|||
void
|
||||
InitializeClusterClockMem(void)
|
||||
{
|
||||
/* On PG 15 and above, we use shmem_request_hook_type */
|
||||
#if PG_VERSION_NUM < PG_VERSION_15
|
||||
|
||||
/* allocate shared memory for pre PG-15 versions */
|
||||
if (!IsUnderPostmaster)
|
||||
{
|
||||
RequestAddinShmemSpace(LogicalClockShmemSize());
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
prev_shmem_startup_hook = shmem_startup_hook;
|
||||
shmem_startup_hook = LogicalClockShmemInit;
|
||||
}
|
||||
|
|
|
@ -209,12 +209,9 @@ static void ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommand
|
|||
static bool HasAnyGeneratedStoredColumns(Oid relationId);
|
||||
static List * GetNonGeneratedStoredColumnNameList(Oid relationId);
|
||||
static void CheckAlterDistributedTableConversionParameters(TableConversionState *con);
|
||||
static char * CreateWorkerChangeSequenceDependencyCommand(char *sequenceSchemaName,
|
||||
char *sequenceName,
|
||||
char *sourceSchemaName,
|
||||
char *sourceName,
|
||||
char *targetSchemaName,
|
||||
char *targetName);
|
||||
static char * CreateWorkerChangeSequenceDependencyCommand(char *qualifiedSequeceName,
|
||||
char *qualifiedSourceName,
|
||||
char *qualifiedTargetName);
|
||||
static void ErrorIfMatViewSizeExceedsTheLimit(Oid matViewOid);
|
||||
static char * CreateMaterializedViewDDLCommand(Oid matViewOid);
|
||||
static char * GetAccessMethodForMatViewIfExists(Oid viewOid);
|
||||
|
@ -791,13 +788,15 @@ ConvertTableInternal(TableConversionState *con)
|
|||
justBeforeDropCommands = lappend(justBeforeDropCommands, detachFromParentCommand);
|
||||
}
|
||||
|
||||
char *qualifiedRelationName = quote_qualified_identifier(con->schemaName,
|
||||
con->relationName);
|
||||
|
||||
if (PartitionedTable(con->relationId))
|
||||
{
|
||||
if (!con->suppressNoticeMessages)
|
||||
{
|
||||
ereport(NOTICE, (errmsg("converting the partitions of %s",
|
||||
quote_qualified_identifier(con->schemaName,
|
||||
con->relationName))));
|
||||
qualifiedRelationName)));
|
||||
}
|
||||
|
||||
List *partitionList = PartitionList(con->relationId);
|
||||
|
@ -870,9 +869,7 @@ ConvertTableInternal(TableConversionState *con)
|
|||
|
||||
if (!con->suppressNoticeMessages)
|
||||
{
|
||||
ereport(NOTICE, (errmsg("creating a new table for %s",
|
||||
quote_qualified_identifier(con->schemaName,
|
||||
con->relationName))));
|
||||
ereport(NOTICE, (errmsg("creating a new table for %s", qualifiedRelationName)));
|
||||
}
|
||||
|
||||
TableDDLCommand *tableCreationCommand = NULL;
|
||||
|
@ -999,8 +996,6 @@ ConvertTableInternal(TableConversionState *con)
|
|||
{
|
||||
continue;
|
||||
}
|
||||
char *qualifiedRelationName = quote_qualified_identifier(con->schemaName,
|
||||
con->relationName);
|
||||
|
||||
TableConversionParameters cascadeParam = {
|
||||
.relationId = colocatedTableId,
|
||||
|
@ -1750,9 +1745,7 @@ CreateMaterializedViewDDLCommand(Oid matViewOid)
|
|||
{
|
||||
StringInfo query = makeStringInfo();
|
||||
|
||||
char *viewName = get_rel_name(matViewOid);
|
||||
char *schemaName = get_namespace_name(get_rel_namespace(matViewOid));
|
||||
char *qualifiedViewName = quote_qualified_identifier(schemaName, viewName);
|
||||
char *qualifiedViewName = generate_qualified_relation_name(matViewOid);
|
||||
|
||||
/* here we need to get the access method of the view to recreate it */
|
||||
char *accessMethodName = GetAccessMethodForMatViewIfExists(matViewOid);
|
||||
|
@ -1801,9 +1794,8 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands,
|
|||
bool suppressNoticeMessages)
|
||||
{
|
||||
char *sourceName = get_rel_name(sourceId);
|
||||
char *targetName = get_rel_name(targetId);
|
||||
Oid schemaId = get_rel_namespace(sourceId);
|
||||
char *schemaName = get_namespace_name(schemaId);
|
||||
char *qualifiedSourceName = generate_qualified_relation_name(sourceId);
|
||||
char *qualifiedTargetName = generate_qualified_relation_name(targetId);
|
||||
|
||||
StringInfo query = makeStringInfo();
|
||||
|
||||
|
@ -1811,8 +1803,7 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands,
|
|||
{
|
||||
if (!suppressNoticeMessages)
|
||||
{
|
||||
ereport(NOTICE, (errmsg("moving the data of %s",
|
||||
quote_qualified_identifier(schemaName, sourceName))));
|
||||
ereport(NOTICE, (errmsg("moving the data of %s", qualifiedSourceName)));
|
||||
}
|
||||
|
||||
if (!HasAnyGeneratedStoredColumns(sourceId))
|
||||
|
@ -1822,8 +1813,7 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands,
|
|||
* "INSERT INTO .. SELECT *"".
|
||||
*/
|
||||
appendStringInfo(query, "INSERT INTO %s SELECT * FROM %s",
|
||||
quote_qualified_identifier(schemaName, targetName),
|
||||
quote_qualified_identifier(schemaName, sourceName));
|
||||
qualifiedTargetName, qualifiedSourceName);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -1838,9 +1828,8 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands,
|
|||
char *insertColumnString = StringJoin(nonStoredColumnNameList, ',');
|
||||
appendStringInfo(query,
|
||||
"INSERT INTO %s (%s) OVERRIDING SYSTEM VALUE SELECT %s FROM %s",
|
||||
quote_qualified_identifier(schemaName, targetName),
|
||||
insertColumnString, insertColumnString,
|
||||
quote_qualified_identifier(schemaName, sourceName));
|
||||
qualifiedTargetName, insertColumnString,
|
||||
insertColumnString, qualifiedSourceName);
|
||||
}
|
||||
|
||||
ExecuteQueryViaSPI(query->data, SPI_OK_INSERT);
|
||||
|
@ -1864,14 +1853,11 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands,
|
|||
*/
|
||||
if (ShouldSyncTableMetadata(targetId))
|
||||
{
|
||||
Oid sequenceSchemaOid = get_rel_namespace(sequenceOid);
|
||||
char *sequenceSchemaName = get_namespace_name(sequenceSchemaOid);
|
||||
char *sequenceName = get_rel_name(sequenceOid);
|
||||
char *qualifiedSequenceName = generate_qualified_relation_name(sequenceOid);
|
||||
char *workerChangeSequenceDependencyCommand =
|
||||
CreateWorkerChangeSequenceDependencyCommand(sequenceSchemaName,
|
||||
sequenceName,
|
||||
schemaName, sourceName,
|
||||
schemaName, targetName);
|
||||
CreateWorkerChangeSequenceDependencyCommand(qualifiedSequenceName,
|
||||
qualifiedSourceName,
|
||||
qualifiedTargetName);
|
||||
SendCommandToWorkersWithMetadata(workerChangeSequenceDependencyCommand);
|
||||
}
|
||||
else if (ShouldSyncTableMetadata(sourceId))
|
||||
|
@ -1894,25 +1880,23 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands,
|
|||
|
||||
if (!suppressNoticeMessages)
|
||||
{
|
||||
ereport(NOTICE, (errmsg("dropping the old %s",
|
||||
quote_qualified_identifier(schemaName, sourceName))));
|
||||
ereport(NOTICE, (errmsg("dropping the old %s", qualifiedSourceName)));
|
||||
}
|
||||
|
||||
resetStringInfo(query);
|
||||
appendStringInfo(query, "DROP %sTABLE %s CASCADE",
|
||||
IsForeignTable(sourceId) ? "FOREIGN " : "",
|
||||
quote_qualified_identifier(schemaName, sourceName));
|
||||
qualifiedSourceName);
|
||||
ExecuteQueryViaSPI(query->data, SPI_OK_UTILITY);
|
||||
|
||||
if (!suppressNoticeMessages)
|
||||
{
|
||||
ereport(NOTICE, (errmsg("renaming the new table to %s",
|
||||
quote_qualified_identifier(schemaName, sourceName))));
|
||||
ereport(NOTICE, (errmsg("renaming the new table to %s", qualifiedSourceName)));
|
||||
}
|
||||
|
||||
resetStringInfo(query);
|
||||
appendStringInfo(query, "ALTER TABLE %s RENAME TO %s",
|
||||
quote_qualified_identifier(schemaName, targetName),
|
||||
qualifiedTargetName,
|
||||
quote_identifier(sourceName));
|
||||
ExecuteQueryViaSPI(query->data, SPI_OK_UTILITY);
|
||||
}
|
||||
|
@ -2172,18 +2156,13 @@ CheckAlterDistributedTableConversionParameters(TableConversionState *con)
|
|||
* worker_change_sequence_dependency query with the parameters.
|
||||
*/
|
||||
static char *
|
||||
CreateWorkerChangeSequenceDependencyCommand(char *sequenceSchemaName, char *sequenceName,
|
||||
char *sourceSchemaName, char *sourceName,
|
||||
char *targetSchemaName, char *targetName)
|
||||
CreateWorkerChangeSequenceDependencyCommand(char *qualifiedSequeceName,
|
||||
char *qualifiedSourceName,
|
||||
char *qualifiedTargetName)
|
||||
{
|
||||
char *qualifiedSchemaName = quote_qualified_identifier(sequenceSchemaName,
|
||||
sequenceName);
|
||||
char *qualifiedSourceName = quote_qualified_identifier(sourceSchemaName, sourceName);
|
||||
char *qualifiedTargetName = quote_qualified_identifier(targetSchemaName, targetName);
|
||||
|
||||
StringInfo query = makeStringInfo();
|
||||
appendStringInfo(query, "SELECT worker_change_sequence_dependency(%s, %s, %s)",
|
||||
quote_literal_cstr(qualifiedSchemaName),
|
||||
quote_literal_cstr(qualifiedSequeceName),
|
||||
quote_literal_cstr(qualifiedSourceName),
|
||||
quote_literal_cstr(qualifiedTargetName));
|
||||
|
||||
|
|
|
@ -1160,9 +1160,7 @@ DropIdentitiesOnTable(Oid relationId)
|
|||
|
||||
if (attributeForm->attidentity)
|
||||
{
|
||||
char *tableName = get_rel_name(relationId);
|
||||
char *schemaName = get_namespace_name(get_rel_namespace(relationId));
|
||||
char *qualifiedTableName = quote_qualified_identifier(schemaName, tableName);
|
||||
char *qualifiedTableName = generate_qualified_relation_name(relationId);
|
||||
|
||||
StringInfo dropCommand = makeStringInfo();
|
||||
|
||||
|
@ -1222,9 +1220,7 @@ DropViewsOnTable(Oid relationId)
|
|||
Oid viewId = InvalidOid;
|
||||
foreach_declared_oid(viewId, reverseOrderedViews)
|
||||
{
|
||||
char *viewName = get_rel_name(viewId);
|
||||
char *schemaName = get_namespace_name(get_rel_namespace(viewId));
|
||||
char *qualifiedViewName = quote_qualified_identifier(schemaName, viewName);
|
||||
char *qualifiedViewName = generate_qualified_relation_name(viewId);
|
||||
|
||||
StringInfo dropCommand = makeStringInfo();
|
||||
appendStringInfo(dropCommand, "DROP %sVIEW IF EXISTS %s",
|
||||
|
|
|
@ -68,8 +68,6 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati
|
|||
char *collcollate;
|
||||
char *collctype;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
|
||||
/*
|
||||
* In PG15, there is an added option to use ICU as global locale provider.
|
||||
* pg_collation has three locale-related fields: collcollate and collctype,
|
||||
|
@ -112,16 +110,6 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati
|
|||
}
|
||||
|
||||
Assert((collcollate && collctype) || colllocale);
|
||||
#else
|
||||
|
||||
/*
|
||||
* In versions before 15, collcollate and collctype were type "name". Use
|
||||
* pstrdup() to match the interface of 15 so that we consistently free the
|
||||
* result later.
|
||||
*/
|
||||
collcollate = pstrdup(NameStr(collationForm->collcollate));
|
||||
collctype = pstrdup(NameStr(collationForm->collctype));
|
||||
#endif
|
||||
|
||||
if (collowner != NULL)
|
||||
{
|
||||
|
@ -147,7 +135,6 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati
|
|||
"CREATE COLLATION %s (provider = '%s'",
|
||||
*quotedCollationName, providerString);
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
if (colllocale)
|
||||
{
|
||||
appendStringInfo(&collationNameDef,
|
||||
|
@ -173,24 +160,7 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati
|
|||
pfree(collcollate);
|
||||
pfree(collctype);
|
||||
}
|
||||
#else
|
||||
if (strcmp(collcollate, collctype) == 0)
|
||||
{
|
||||
appendStringInfo(&collationNameDef,
|
||||
", locale = %s",
|
||||
quote_literal_cstr(collcollate));
|
||||
}
|
||||
else
|
||||
{
|
||||
appendStringInfo(&collationNameDef,
|
||||
", lc_collate = %s, lc_ctype = %s",
|
||||
quote_literal_cstr(collcollate),
|
||||
quote_literal_cstr(collctype));
|
||||
}
|
||||
|
||||
pfree(collcollate);
|
||||
pfree(collctype);
|
||||
#endif
|
||||
#if PG_VERSION_NUM >= PG_VERSION_16
|
||||
char *collicurules = NULL;
|
||||
datum = SysCacheGetAttr(COLLOID, heapTuple, Anum_pg_collation_collicurules, &isnull);
|
||||
|
|
|
@ -170,12 +170,10 @@ static void EnsureDistributedSequencesHaveOneType(Oid relationId,
|
|||
static void CopyLocalDataIntoShards(Oid distributedTableId);
|
||||
static List * TupleDescColumnNameList(TupleDesc tupleDescriptor);
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
static bool DistributionColumnUsesNumericColumnNegativeScale(TupleDesc relationDesc,
|
||||
Var *distributionColumn);
|
||||
static int numeric_typmod_scale(int32 typmod);
|
||||
static bool is_valid_numeric_typmod(int32 typmod);
|
||||
#endif
|
||||
|
||||
static bool DistributionColumnUsesGeneratedStoredColumn(TupleDesc relationDesc,
|
||||
Var *distributionColumn);
|
||||
|
@ -1325,10 +1323,7 @@ CreateCitusTable(Oid relationId, CitusTableType tableType,
|
|||
{
|
||||
List *partitionList = PartitionList(relationId);
|
||||
Oid partitionRelationId = InvalidOid;
|
||||
Oid namespaceId = get_rel_namespace(relationId);
|
||||
char *schemaName = get_namespace_name(namespaceId);
|
||||
char *relationName = get_rel_name(relationId);
|
||||
char *parentRelationName = quote_qualified_identifier(schemaName, relationName);
|
||||
char *parentRelationName = generate_qualified_relation_name(relationId);
|
||||
|
||||
/*
|
||||
* when there are many partitions, each call to CreateDistributedTable
|
||||
|
@ -2117,8 +2112,6 @@ EnsureRelationCanBeDistributed(Oid relationId, Var *distributionColumn,
|
|||
"AS (...) STORED.")));
|
||||
}
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
|
||||
/* verify target relation is not distributed by a column of type numeric with negative scale */
|
||||
if (distributionMethod != DISTRIBUTE_BY_NONE &&
|
||||
DistributionColumnUsesNumericColumnNegativeScale(relationDesc,
|
||||
|
@ -2129,7 +2122,6 @@ EnsureRelationCanBeDistributed(Oid relationId, Var *distributionColumn,
|
|||
errdetail("Distribution column must not use numeric type "
|
||||
"with negative scale")));
|
||||
}
|
||||
#endif
|
||||
|
||||
/* check for support function needed by specified partition method */
|
||||
if (distributionMethod == DISTRIBUTE_BY_HASH)
|
||||
|
@ -2847,8 +2839,6 @@ TupleDescColumnNameList(TupleDesc tupleDescriptor)
|
|||
}
|
||||
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
|
||||
/*
|
||||
* is_valid_numeric_typmod checks if the typmod value is valid
|
||||
*
|
||||
|
@ -2898,8 +2888,6 @@ DistributionColumnUsesNumericColumnNegativeScale(TupleDesc relationDesc,
|
|||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* DistributionColumnUsesGeneratedStoredColumn returns whether a given relation uses
|
||||
* GENERATED ALWAYS AS (...) STORED on distribution column
|
||||
|
|
|
@ -185,8 +185,6 @@ PreprocessAlterDatabaseStmt(Node *node, const char *queryString,
|
|||
}
|
||||
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
|
||||
/*
|
||||
* PreprocessAlterDatabaseSetStmt is executed before the statement is applied to the local
|
||||
* postgres instance.
|
||||
|
@ -217,9 +215,6 @@ PreprocessAlterDatabaseRefreshCollStmt(Node *node, const char *queryString,
|
|||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* GetDatabaseAddressFromDatabaseName gets the database name and returns the ObjectAddress
|
||||
* of the database.
|
||||
|
|
|
@ -364,6 +364,15 @@ static DistributeObjectOps Any_Rename = {
|
|||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Any_SecLabel = {
|
||||
.deparse = DeparseSecLabelStmt,
|
||||
.qualify = NULL,
|
||||
.preprocess = NULL,
|
||||
.postprocess = PostprocessSecLabelStmt,
|
||||
.operationType = DIST_OPS_ALTER,
|
||||
.address = SecLabelStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Attribute_Rename = {
|
||||
.deparse = DeparseRenameAttributeStmt,
|
||||
.qualify = QualifyRenameAttributeStmt,
|
||||
|
@ -456,7 +465,6 @@ static DistributeObjectOps Database_Alter = {
|
|||
.markDistributed = false,
|
||||
};
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
static DistributeObjectOps Database_RefreshColl = {
|
||||
.deparse = DeparseAlterDatabaseRefreshCollStmt,
|
||||
.qualify = NULL,
|
||||
|
@ -467,7 +475,6 @@ static DistributeObjectOps Database_RefreshColl = {
|
|||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
#endif
|
||||
|
||||
static DistributeObjectOps Domain_Alter = {
|
||||
.deparse = DeparseAlterDomainStmt,
|
||||
|
@ -828,7 +835,6 @@ static DistributeObjectOps Sequence_AlterOwner = {
|
|||
.address = AlterSequenceOwnerStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
static DistributeObjectOps Sequence_AlterPersistence = {
|
||||
.deparse = DeparseAlterSequencePersistenceStmt,
|
||||
.qualify = QualifyAlterSequencePersistenceStmt,
|
||||
|
@ -838,7 +844,6 @@ static DistributeObjectOps Sequence_AlterPersistence = {
|
|||
.address = AlterSequencePersistenceStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
#endif
|
||||
static DistributeObjectOps Sequence_Drop = {
|
||||
.deparse = DeparseDropSequenceStmt,
|
||||
.qualify = QualifyDropSequenceStmt,
|
||||
|
@ -1290,7 +1295,7 @@ static DistributeObjectOps View_Rename = {
|
|||
static DistributeObjectOps Trigger_Rename = {
|
||||
.deparse = NULL,
|
||||
.qualify = NULL,
|
||||
.preprocess = PreprocessAlterTriggerRenameStmt,
|
||||
.preprocess = NULL,
|
||||
.operationType = DIST_OPS_ALTER,
|
||||
.postprocess = PostprocessAlterTriggerRenameStmt,
|
||||
.address = NULL,
|
||||
|
@ -1312,13 +1317,11 @@ GetDistributeObjectOps(Node *node)
|
|||
return &Database_Alter;
|
||||
}
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
case T_AlterDatabaseRefreshCollStmt:
|
||||
{
|
||||
return &Database_RefreshColl;
|
||||
}
|
||||
|
||||
#endif
|
||||
case T_AlterDomainStmt:
|
||||
{
|
||||
return &Domain_Alter;
|
||||
|
@ -1603,7 +1606,6 @@ GetDistributeObjectOps(Node *node)
|
|||
|
||||
case OBJECT_SEQUENCE:
|
||||
{
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
ListCell *cmdCell = NULL;
|
||||
foreach(cmdCell, stmt->cmds)
|
||||
{
|
||||
|
@ -1631,7 +1633,6 @@ GetDistributeObjectOps(Node *node)
|
|||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Prior to PG15, the only Alter Table statement
|
||||
|
@ -1991,6 +1992,11 @@ GetDistributeObjectOps(Node *node)
|
|||
return &Vacuum_Analyze;
|
||||
}
|
||||
|
||||
case T_SecLabelStmt:
|
||||
{
|
||||
return &Any_SecLabel;
|
||||
}
|
||||
|
||||
case T_RenameStmt:
|
||||
{
|
||||
RenameStmt *stmt = castNode(RenameStmt, node);
|
||||
|
|
|
@ -467,7 +467,6 @@ ForeignKeyGetDefaultingAttrs(HeapTuple pgConstraintTuple)
|
|||
}
|
||||
|
||||
List *onDeleteSetDefColumnList = NIL;
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
Datum onDeleteSetDefColumnsDatum = SysCacheGetAttr(CONSTROID, pgConstraintTuple,
|
||||
Anum_pg_constraint_confdelsetcols,
|
||||
&isNull);
|
||||
|
@ -482,7 +481,6 @@ ForeignKeyGetDefaultingAttrs(HeapTuple pgConstraintTuple)
|
|||
onDeleteSetDefColumnList =
|
||||
IntegerArrayTypeToList(DatumGetArrayTypeP(onDeleteSetDefColumnsDatum));
|
||||
}
|
||||
#endif
|
||||
|
||||
if (list_length(onDeleteSetDefColumnList) == 0)
|
||||
{
|
||||
|
|
|
@ -2549,12 +2549,8 @@ ShardIdForTuple(CitusCopyDestReceiver *copyDest, Datum *columnValues, bool *colu
|
|||
|
||||
if (columnNulls[partitionColumnIndex])
|
||||
{
|
||||
Oid relationId = copyDest->distributedRelationId;
|
||||
char *relationName = get_rel_name(relationId);
|
||||
Oid schemaOid = get_rel_namespace(relationId);
|
||||
char *schemaName = get_namespace_name(schemaOid);
|
||||
char *qualifiedTableName = quote_qualified_identifier(schemaName,
|
||||
relationName);
|
||||
char *qualifiedTableName = generate_qualified_relation_name(
|
||||
copyDest->distributedRelationId);
|
||||
|
||||
ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
|
||||
errmsg("the partition column of table %s cannot be NULL",
|
||||
|
|
|
@ -33,11 +33,9 @@
|
|||
|
||||
|
||||
static CreatePublicationStmt * BuildCreatePublicationStmt(Oid publicationId);
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
static PublicationObjSpec * BuildPublicationRelationObjSpec(Oid relationId,
|
||||
Oid publicationId,
|
||||
bool tableOnly);
|
||||
#endif
|
||||
static void AppendPublishOptionList(StringInfo str, List *strings);
|
||||
static char * AlterPublicationOwnerCommand(Oid publicationId);
|
||||
static bool ShouldPropagateCreatePublication(CreatePublicationStmt *stmt);
|
||||
|
@ -154,7 +152,6 @@ BuildCreatePublicationStmt(Oid publicationId)
|
|||
|
||||
ReleaseSysCache(publicationTuple);
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
List *schemaIds = GetPublicationSchemas(publicationId);
|
||||
Oid schemaId = InvalidOid;
|
||||
|
||||
|
@ -170,7 +167,6 @@ BuildCreatePublicationStmt(Oid publicationId)
|
|||
|
||||
createPubStmt->pubobjects = lappend(createPubStmt->pubobjects, publicationObject);
|
||||
}
|
||||
#endif
|
||||
|
||||
List *relationIds = GetPublicationRelations(publicationId,
|
||||
publicationForm->pubviaroot ?
|
||||
|
@ -184,7 +180,6 @@ BuildCreatePublicationStmt(Oid publicationId)
|
|||
|
||||
foreach_declared_oid(relationId, relationIds)
|
||||
{
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
bool tableOnly = false;
|
||||
|
||||
/* since postgres 15, tables can have a column list and filter */
|
||||
|
@ -192,15 +187,6 @@ BuildCreatePublicationStmt(Oid publicationId)
|
|||
BuildPublicationRelationObjSpec(relationId, publicationId, tableOnly);
|
||||
|
||||
createPubStmt->pubobjects = lappend(createPubStmt->pubobjects, publicationObject);
|
||||
#else
|
||||
|
||||
/* before postgres 15, only full tables are supported */
|
||||
char *schemaName = get_namespace_name(get_rel_namespace(relationId));
|
||||
char *tableName = get_rel_name(relationId);
|
||||
RangeVar *rangeVar = makeRangeVar(schemaName, tableName, -1);
|
||||
|
||||
createPubStmt->tables = lappend(createPubStmt->tables, rangeVar);
|
||||
#endif
|
||||
|
||||
if (IsCitusTable(relationId))
|
||||
{
|
||||
|
@ -276,8 +262,6 @@ AppendPublishOptionList(StringInfo str, List *options)
|
|||
}
|
||||
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
|
||||
/*
|
||||
* BuildPublicationRelationObjSpec returns a PublicationObjSpec that
|
||||
* can be included in a CREATE or ALTER PUBLICATION statement.
|
||||
|
@ -357,9 +341,6 @@ BuildPublicationRelationObjSpec(Oid relationId, Oid publicationId,
|
|||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* PreprocessAlterPublicationStmt handles ALTER PUBLICATION statements
|
||||
* in a way that is mostly similar to PreprocessAlterDistributedObjectStmt,
|
||||
|
@ -458,7 +439,6 @@ GetAlterPublicationTableDDLCommand(Oid publicationId, Oid relationId,
|
|||
|
||||
ReleaseSysCache(pubTuple);
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
bool tableOnly = !isAdd;
|
||||
|
||||
/* since postgres 15, tables can have a column list and filter */
|
||||
|
@ -467,16 +447,6 @@ GetAlterPublicationTableDDLCommand(Oid publicationId, Oid relationId,
|
|||
|
||||
alterPubStmt->pubobjects = lappend(alterPubStmt->pubobjects, publicationObject);
|
||||
alterPubStmt->action = isAdd ? AP_AddObjects : AP_DropObjects;
|
||||
#else
|
||||
|
||||
/* before postgres 15, only full tables are supported */
|
||||
char *schemaName = get_namespace_name(get_rel_namespace(relationId));
|
||||
char *tableName = get_rel_name(relationId);
|
||||
RangeVar *rangeVar = makeRangeVar(schemaName, tableName, -1);
|
||||
|
||||
alterPubStmt->tables = lappend(alterPubStmt->tables, rangeVar);
|
||||
alterPubStmt->tableAction = isAdd ? DEFELEM_ADD : DEFELEM_DROP;
|
||||
#endif
|
||||
|
||||
/* we take the WHERE clause from the catalog where it is already transformed */
|
||||
bool whereClauseNeedsTransform = false;
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include "catalog/pg_auth_members.h"
|
||||
#include "catalog/pg_authid.h"
|
||||
#include "catalog/pg_db_role_setting.h"
|
||||
#include "catalog/pg_shseclabel.h"
|
||||
#include "catalog/pg_type.h"
|
||||
#include "commands/dbcommands.h"
|
||||
#include "nodes/makefuncs.h"
|
||||
|
@ -65,6 +66,7 @@ static DefElem * makeDefElemBool(char *name, bool value);
|
|||
static List * GenerateRoleOptionsList(HeapTuple tuple);
|
||||
static List * GenerateGrantRoleStmtsFromOptions(RoleSpec *roleSpec, List *options);
|
||||
static List * GenerateGrantRoleStmtsOfRole(Oid roleid);
|
||||
static List * GenerateSecLabelOnRoleStmts(Oid roleid, char *rolename);
|
||||
static void EnsureSequentialModeForRoleDDL(void);
|
||||
|
||||
static char * GetRoleNameFromDbRoleSetting(HeapTuple tuple,
|
||||
|
@ -491,18 +493,17 @@ GenerateRoleOptionsList(HeapTuple tuple)
|
|||
options = lappend(options, makeDefElem("password", NULL, -1));
|
||||
}
|
||||
|
||||
/* load valid unitl data from the heap tuple, use default of infinity if not set */
|
||||
/* load valid until data from the heap tuple */
|
||||
Datum rolValidUntilDatum = SysCacheGetAttr(AUTHNAME, tuple,
|
||||
Anum_pg_authid_rolvaliduntil, &isNull);
|
||||
char *rolValidUntil = "infinity";
|
||||
if (!isNull)
|
||||
{
|
||||
rolValidUntil = pstrdup((char *) timestamptz_to_str(rolValidUntilDatum));
|
||||
}
|
||||
char *rolValidUntil = pstrdup((char *) timestamptz_to_str(rolValidUntilDatum));
|
||||
|
||||
Node *validUntilStringNode = (Node *) makeString(rolValidUntil);
|
||||
DefElem *validUntilOption = makeDefElem("validUntil", validUntilStringNode, -1);
|
||||
options = lappend(options, validUntilOption);
|
||||
Node *validUntilStringNode = (Node *) makeString(rolValidUntil);
|
||||
DefElem *validUntilOption = makeDefElem("validUntil", validUntilStringNode, -1);
|
||||
options = lappend(options, validUntilOption);
|
||||
}
|
||||
|
||||
return options;
|
||||
}
|
||||
|
@ -517,13 +518,14 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid)
|
|||
{
|
||||
HeapTuple roleTuple = SearchSysCache1(AUTHOID, ObjectIdGetDatum(roleOid));
|
||||
Form_pg_authid role = ((Form_pg_authid) GETSTRUCT(roleTuple));
|
||||
char *rolename = pstrdup(NameStr(role->rolname));
|
||||
|
||||
CreateRoleStmt *createRoleStmt = NULL;
|
||||
if (EnableCreateRolePropagation)
|
||||
{
|
||||
createRoleStmt = makeNode(CreateRoleStmt);
|
||||
createRoleStmt->stmt_type = ROLESTMT_ROLE;
|
||||
createRoleStmt->role = pstrdup(NameStr(role->rolname));
|
||||
createRoleStmt->role = rolename;
|
||||
createRoleStmt->options = GenerateRoleOptionsList(roleTuple);
|
||||
}
|
||||
|
||||
|
@ -534,7 +536,7 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid)
|
|||
alterRoleStmt->role = makeNode(RoleSpec);
|
||||
alterRoleStmt->role->roletype = ROLESPEC_CSTRING;
|
||||
alterRoleStmt->role->location = -1;
|
||||
alterRoleStmt->role->rolename = pstrdup(NameStr(role->rolname));
|
||||
alterRoleStmt->role->rolename = rolename;
|
||||
alterRoleStmt->action = 1;
|
||||
alterRoleStmt->options = GenerateRoleOptionsList(roleTuple);
|
||||
}
|
||||
|
@ -546,7 +548,7 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid)
|
|||
{
|
||||
/* add a worker_create_or_alter_role command if any of them are set */
|
||||
char *createOrAlterRoleQuery = CreateCreateOrAlterRoleCommand(
|
||||
pstrdup(NameStr(role->rolname)),
|
||||
rolename,
|
||||
createRoleStmt,
|
||||
alterRoleStmt);
|
||||
|
||||
|
@ -568,6 +570,20 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid)
|
|||
{
|
||||
completeRoleList = lappend(completeRoleList, DeparseTreeNode(stmt));
|
||||
}
|
||||
|
||||
/*
|
||||
* append SECURITY LABEL ON ROLE commands for this specific user
|
||||
* When we propagate user creation, we also want to make sure that we propagate
|
||||
* all the security labels it has been given. For this, we check pg_shseclabel
|
||||
* for the ROLE entry corresponding to roleOid, and generate the relevant
|
||||
* SecLabel stmts to be run in the new node.
|
||||
*/
|
||||
List *secLabelOnRoleStmts = GenerateSecLabelOnRoleStmts(roleOid, rolename);
|
||||
stmt = NULL;
|
||||
foreach_declared_ptr(stmt, secLabelOnRoleStmts)
|
||||
{
|
||||
completeRoleList = lappend(completeRoleList, DeparseTreeNode(stmt));
|
||||
}
|
||||
}
|
||||
|
||||
return completeRoleList;
|
||||
|
@ -897,6 +913,54 @@ GenerateGrantRoleStmtsOfRole(Oid roleid)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* GenerateSecLabelOnRoleStmts generates the SecLabelStmts for the role
|
||||
* whose oid is roleid.
|
||||
*/
|
||||
static List *
|
||||
GenerateSecLabelOnRoleStmts(Oid roleid, char *rolename)
|
||||
{
|
||||
List *secLabelStmts = NIL;
|
||||
|
||||
/*
|
||||
* Note that roles are shared database objects, therefore their
|
||||
* security labels are stored in pg_shseclabel instead of pg_seclabel.
|
||||
*/
|
||||
Relation pg_shseclabel = table_open(SharedSecLabelRelationId, AccessShareLock);
|
||||
ScanKeyData skey[1];
|
||||
ScanKeyInit(&skey[0], Anum_pg_shseclabel_objoid, BTEqualStrategyNumber, F_OIDEQ,
|
||||
ObjectIdGetDatum(roleid));
|
||||
SysScanDesc scan = systable_beginscan(pg_shseclabel, SharedSecLabelObjectIndexId,
|
||||
true, NULL, 1, &skey[0]);
|
||||
|
||||
HeapTuple tuple = NULL;
|
||||
while (HeapTupleIsValid(tuple = systable_getnext(scan)))
|
||||
{
|
||||
SecLabelStmt *secLabelStmt = makeNode(SecLabelStmt);
|
||||
secLabelStmt->objtype = OBJECT_ROLE;
|
||||
secLabelStmt->object = (Node *) makeString(pstrdup(rolename));
|
||||
|
||||
Datum datumArray[Natts_pg_shseclabel];
|
||||
bool isNullArray[Natts_pg_shseclabel];
|
||||
|
||||
heap_deform_tuple(tuple, RelationGetDescr(pg_shseclabel), datumArray,
|
||||
isNullArray);
|
||||
|
||||
secLabelStmt->provider = TextDatumGetCString(
|
||||
datumArray[Anum_pg_shseclabel_provider - 1]);
|
||||
secLabelStmt->label = TextDatumGetCString(
|
||||
datumArray[Anum_pg_shseclabel_label - 1]);
|
||||
|
||||
secLabelStmts = lappend(secLabelStmts, secLabelStmt);
|
||||
}
|
||||
|
||||
systable_endscan(scan);
|
||||
table_close(pg_shseclabel, AccessShareLock);
|
||||
|
||||
return secLabelStmts;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* PreprocessCreateRoleStmt creates a worker_create_or_alter_role query for the
|
||||
* role that is being created. With that query we can create the role in the
|
||||
|
@ -963,13 +1027,8 @@ makeStringConst(char *str, int location)
|
|||
{
|
||||
A_Const *n = makeNode(A_Const);
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
n->val.sval.type = T_String;
|
||||
n->val.sval.sval = str;
|
||||
#else
|
||||
n->val.type = T_String;
|
||||
n->val.val.str = str;
|
||||
#endif
|
||||
n->location = location;
|
||||
|
||||
return (Node *) n;
|
||||
|
@ -989,13 +1048,8 @@ makeIntConst(int val, int location)
|
|||
{
|
||||
A_Const *n = makeNode(A_Const);
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
n->val.ival.type = T_Integer;
|
||||
n->val.ival.ival = val;
|
||||
#else
|
||||
n->val.type = T_Integer;
|
||||
n->val.val.ival = val;
|
||||
#endif
|
||||
n->location = location;
|
||||
|
||||
return (Node *) n;
|
||||
|
@ -1012,13 +1066,8 @@ makeFloatConst(char *str, int location)
|
|||
{
|
||||
A_Const *n = makeNode(A_Const);
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
n->val.fval.type = T_Float;
|
||||
n->val.fval.fval = str;
|
||||
#else
|
||||
n->val.type = T_Float;
|
||||
n->val.val.str = str;
|
||||
#endif
|
||||
n->location = location;
|
||||
|
||||
return (Node *) n;
|
||||
|
|
|
@ -0,0 +1,125 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* seclabel.c
|
||||
*
|
||||
* This file contains the logic of SECURITY LABEL statement propagation.
|
||||
*
|
||||
* Copyright (c) Citus Data, Inc.
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#include "postgres.h"
|
||||
|
||||
#include "distributed/commands.h"
|
||||
#include "distributed/commands/utility_hook.h"
|
||||
#include "distributed/coordinator_protocol.h"
|
||||
#include "distributed/deparser.h"
|
||||
#include "distributed/log_utils.h"
|
||||
#include "distributed/metadata/distobject.h"
|
||||
#include "distributed/metadata_sync.h"
|
||||
|
||||
|
||||
/*
|
||||
* PostprocessSecLabelStmt prepares the commands that need to be run on all workers to assign
|
||||
* security labels on distributed objects, currently supporting just Role objects.
|
||||
* It also ensures that all object dependencies exist on all
|
||||
* nodes for the object in the SecLabelStmt.
|
||||
*/
|
||||
List *
|
||||
PostprocessSecLabelStmt(Node *node, const char *queryString)
|
||||
{
|
||||
if (!ShouldPropagate())
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
SecLabelStmt *secLabelStmt = castNode(SecLabelStmt, node);
|
||||
|
||||
List *objectAddresses = GetObjectAddressListFromParseTree(node, false, true);
|
||||
if (!IsAnyObjectDistributed(objectAddresses))
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
if (secLabelStmt->objtype != OBJECT_ROLE)
|
||||
{
|
||||
/*
|
||||
* If we are not in the coordinator, we don't want to interrupt the security
|
||||
* label command with notices, the user expects that from the worker node
|
||||
* the command will not be propagated
|
||||
*/
|
||||
if (EnableUnsupportedFeatureMessages && IsCoordinator())
|
||||
{
|
||||
ereport(NOTICE, (errmsg("not propagating SECURITY LABEL commands whose "
|
||||
"object type is not role"),
|
||||
errhint("Connect to worker nodes directly to manually "
|
||||
"run the same SECURITY LABEL command.")));
|
||||
}
|
||||
return NIL;
|
||||
}
|
||||
|
||||
if (!EnableCreateRolePropagation)
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
EnsureCoordinator();
|
||||
EnsureAllObjectDependenciesExistOnAllNodes(objectAddresses);
|
||||
|
||||
const char *sql = DeparseTreeNode((Node *) secLabelStmt);
|
||||
|
||||
List *commandList = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commandList);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* SecLabelStmtObjectAddress returns the object address of the object on
|
||||
* which this statement operates (secLabelStmt->object). Note that it has no limitation
|
||||
* on the object type being OBJECT_ROLE. This is intentionally implemented like this
|
||||
* since it is fairly simple to implement and we might extend SECURITY LABEL propagation
|
||||
* in the future to include more object types.
|
||||
*/
|
||||
List *
|
||||
SecLabelStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess)
|
||||
{
|
||||
SecLabelStmt *secLabelStmt = castNode(SecLabelStmt, node);
|
||||
|
||||
Relation rel = NULL;
|
||||
ObjectAddress address = get_object_address(secLabelStmt->objtype,
|
||||
secLabelStmt->object, &rel,
|
||||
AccessShareLock, missing_ok);
|
||||
if (rel != NULL)
|
||||
{
|
||||
relation_close(rel, AccessShareLock);
|
||||
}
|
||||
|
||||
ObjectAddress *addressPtr = palloc0(sizeof(ObjectAddress));
|
||||
*addressPtr = address;
|
||||
return list_make1(addressPtr);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* citus_test_object_relabel is a dummy function for check_object_relabel_type hook.
|
||||
* It is meant to be used in tests combined with citus_test_register_label_provider
|
||||
*/
|
||||
void
|
||||
citus_test_object_relabel(const ObjectAddress *object, const char *seclabel)
|
||||
{
|
||||
if (seclabel == NULL ||
|
||||
strcmp(seclabel, "citus_unclassified") == 0 ||
|
||||
strcmp(seclabel, "citus_classified") == 0 ||
|
||||
strcmp(seclabel, "citus '!unclassified") == 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_NAME),
|
||||
errmsg("'%s' is not a valid security label for Citus tests.", seclabel)));
|
||||
}
|
|
@ -735,8 +735,6 @@ PostprocessAlterSequenceOwnerStmt(Node *node, const char *queryString)
|
|||
}
|
||||
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
|
||||
/*
|
||||
* PreprocessAlterSequencePersistenceStmt is called for change of persistence
|
||||
* of sequences before the persistence is changed on the local instance.
|
||||
|
@ -847,9 +845,6 @@ PreprocessSequenceAlterTableStmt(Node *node, const char *queryString,
|
|||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* PreprocessGrantOnSequenceStmt is executed before the statement is applied to the local
|
||||
* postgres instance.
|
||||
|
|
|
@ -1153,7 +1153,6 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
|
|||
{
|
||||
AlterTableStmt *stmtCopy = copyObject(alterTableStatement);
|
||||
stmtCopy->objtype = OBJECT_SEQUENCE;
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
|
||||
/*
|
||||
* it must be ALTER TABLE .. OWNER TO ..
|
||||
|
@ -1163,16 +1162,6 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
|
|||
*/
|
||||
return PreprocessSequenceAlterTableStmt((Node *) stmtCopy, alterTableCommand,
|
||||
processUtilityContext);
|
||||
#else
|
||||
|
||||
/*
|
||||
* it must be ALTER TABLE .. OWNER TO .. command
|
||||
* since this is the only ALTER command of a sequence that
|
||||
* passes through an AlterTableStmt
|
||||
*/
|
||||
return PreprocessAlterSequenceOwnerStmt((Node *) stmtCopy, alterTableCommand,
|
||||
processUtilityContext);
|
||||
#endif
|
||||
}
|
||||
else if (relKind == RELKIND_VIEW)
|
||||
{
|
||||
|
@ -3673,9 +3662,8 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
|
|||
"are currently unsupported.")));
|
||||
break;
|
||||
}
|
||||
|
||||
#endif
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
|
||||
case AT_SetAccessMethod:
|
||||
{
|
||||
/*
|
||||
|
@ -3695,7 +3683,6 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
|
|||
break;
|
||||
}
|
||||
|
||||
#endif
|
||||
case AT_SetNotNull:
|
||||
case AT_ReplicaIdentity:
|
||||
case AT_ChangeOwner:
|
||||
|
|
|
@ -57,9 +57,6 @@ static void ExtractDropStmtTriggerAndRelationName(DropStmt *dropTriggerStmt,
|
|||
static void ErrorIfDropStmtDropsMultipleTriggers(DropStmt *dropTriggerStmt);
|
||||
static char * GetTriggerNameById(Oid triggerId);
|
||||
static int16 GetTriggerTypeById(Oid triggerId);
|
||||
#if (PG_VERSION_NUM < PG_VERSION_15)
|
||||
static void ErrorOutIfCloneTrigger(Oid tgrelid, const char *tgname);
|
||||
#endif
|
||||
|
||||
|
||||
/* GUC that overrides trigger checks for distributed tables and reference tables */
|
||||
|
@ -404,40 +401,6 @@ CreateTriggerEventExtendNames(CreateTrigStmt *createTriggerStmt, char *schemaNam
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* PreprocessAlterTriggerRenameStmt is called before a ALTER TRIGGER RENAME
|
||||
* command has been executed by standard process utility. This function errors
|
||||
* out if we are trying to rename a child trigger on a partition of a distributed
|
||||
* table. In PG15, this is not allowed anyway.
|
||||
*/
|
||||
List *
|
||||
PreprocessAlterTriggerRenameStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext)
|
||||
{
|
||||
#if (PG_VERSION_NUM < PG_VERSION_15)
|
||||
RenameStmt *renameTriggerStmt = castNode(RenameStmt, node);
|
||||
Assert(renameTriggerStmt->renameType == OBJECT_TRIGGER);
|
||||
|
||||
RangeVar *relation = renameTriggerStmt->relation;
|
||||
|
||||
bool missingOk = false;
|
||||
Oid relationId = RangeVarGetRelid(relation, ALTER_TRIGGER_LOCK_MODE, missingOk);
|
||||
|
||||
if (!IsCitusTable(relationId))
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
EnsureCoordinator();
|
||||
ErrorOutForTriggerIfNotSupported(relationId);
|
||||
|
||||
ErrorOutIfCloneTrigger(relationId, renameTriggerStmt->subname);
|
||||
#endif
|
||||
|
||||
return NIL;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* PostprocessAlterTriggerRenameStmt is called after a ALTER TRIGGER RENAME
|
||||
* command has been executed by standard process utility. This function errors
|
||||
|
@ -759,64 +722,6 @@ ErrorIfRelationHasUnsupportedTrigger(Oid relationId)
|
|||
}
|
||||
|
||||
|
||||
#if (PG_VERSION_NUM < PG_VERSION_15)
|
||||
|
||||
/*
|
||||
* ErrorOutIfCloneTrigger is a helper function to error
|
||||
* out if we are trying to rename a child trigger on a
|
||||
* partition of a distributed table.
|
||||
* A lot of this code is borrowed from PG15 because
|
||||
* renaming clone triggers isn't allowed in PG15 anymore.
|
||||
*/
|
||||
static void
|
||||
ErrorOutIfCloneTrigger(Oid tgrelid, const char *tgname)
|
||||
{
|
||||
HeapTuple tuple;
|
||||
ScanKeyData key[2];
|
||||
|
||||
Relation tgrel = table_open(TriggerRelationId, RowExclusiveLock);
|
||||
|
||||
/*
|
||||
* Search for the trigger to modify.
|
||||
*/
|
||||
ScanKeyInit(&key[0],
|
||||
Anum_pg_trigger_tgrelid,
|
||||
BTEqualStrategyNumber, F_OIDEQ,
|
||||
ObjectIdGetDatum(tgrelid));
|
||||
ScanKeyInit(&key[1],
|
||||
Anum_pg_trigger_tgname,
|
||||
BTEqualStrategyNumber, F_NAMEEQ,
|
||||
CStringGetDatum(tgname));
|
||||
SysScanDesc tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
|
||||
NULL, 2, key);
|
||||
|
||||
if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
|
||||
{
|
||||
Form_pg_trigger trigform = (Form_pg_trigger) GETSTRUCT(tuple);
|
||||
|
||||
/*
|
||||
* If the trigger descends from a trigger on a parent partitioned
|
||||
* table, reject the rename.
|
||||
* Appended shard ids to find the trigger on the partition's shards
|
||||
* are not correct. Hence we would fail to find the trigger on the
|
||||
* partition's shard.
|
||||
*/
|
||||
if (OidIsValid(trigform->tgparentid))
|
||||
{
|
||||
ereport(ERROR, (
|
||||
errmsg(
|
||||
"cannot rename child triggers on distributed partitions")));
|
||||
}
|
||||
}
|
||||
|
||||
systable_endscan(tgscan);
|
||||
table_close(tgrel, RowExclusiveLock);
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* GetDropTriggerStmtRelation takes a DropStmt for a trigger object and returns
|
||||
* RangeVar for the relation that owns the trigger.
|
||||
|
|
|
@ -392,9 +392,7 @@ CreateViewDDLCommand(Oid viewOid)
|
|||
static void
|
||||
AppendQualifiedViewNameToCreateViewCommand(StringInfo buf, Oid viewOid)
|
||||
{
|
||||
char *viewName = get_rel_name(viewOid);
|
||||
char *schemaName = get_namespace_name(get_rel_namespace(viewOid));
|
||||
char *qualifiedViewName = quote_qualified_identifier(schemaName, viewName);
|
||||
char *qualifiedViewName = generate_qualified_relation_name(viewOid);
|
||||
|
||||
appendStringInfo(buf, "%s ", qualifiedViewName);
|
||||
}
|
||||
|
|
|
@ -614,16 +614,6 @@ WaitForSharedConnection(void)
|
|||
void
|
||||
InitializeSharedConnectionStats(void)
|
||||
{
|
||||
/* on PG 15, we use shmem_request_hook_type */
|
||||
#if PG_VERSION_NUM < PG_VERSION_15
|
||||
|
||||
/* allocate shared memory */
|
||||
if (!IsUnderPostmaster)
|
||||
{
|
||||
RequestAddinShmemSpace(SharedConnectionStatsShmemSize());
|
||||
}
|
||||
#endif
|
||||
|
||||
prev_shmem_startup_hook = shmem_startup_hook;
|
||||
shmem_startup_hook = SharedConnectionStatsShmemInit;
|
||||
}
|
||||
|
|
|
@ -258,10 +258,8 @@ pg_get_sequencedef_string(Oid sequenceRelationId)
|
|||
char *typeName = format_type_be(pgSequenceForm->seqtypid);
|
||||
|
||||
char *sequenceDef = psprintf(CREATE_SEQUENCE_COMMAND,
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
get_rel_persistence(sequenceRelationId) ==
|
||||
RELPERSISTENCE_UNLOGGED ? "UNLOGGED " : "",
|
||||
#endif
|
||||
qualifiedSequenceName,
|
||||
typeName,
|
||||
pgSequenceForm->seqincrement, pgSequenceForm->seqmin,
|
||||
|
@ -857,12 +855,10 @@ deparse_shard_index_statement(IndexStmt *origStmt, Oid distrelid, int64 shardid,
|
|||
appendStringInfoString(buffer, ") ");
|
||||
}
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
if (indexStmt->nulls_not_distinct)
|
||||
{
|
||||
appendStringInfoString(buffer, "NULLS NOT DISTINCT ");
|
||||
}
|
||||
#endif /* PG_VERSION_15 */
|
||||
|
||||
if (indexStmt->options != NIL)
|
||||
{
|
||||
|
|
|
@ -159,7 +159,6 @@ DeparseAlterDatabaseStmt(Node *node)
|
|||
}
|
||||
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
char *
|
||||
DeparseAlterDatabaseRefreshCollStmt(Node *node)
|
||||
{
|
||||
|
@ -174,6 +173,3 @@ DeparseAlterDatabaseRefreshCollStmt(Node *node)
|
|||
|
||||
return str.data;
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
|
|
@ -32,7 +32,6 @@
|
|||
static void AppendCreatePublicationStmt(StringInfo buf, CreatePublicationStmt *stmt,
|
||||
bool whereClauseNeedsTransform,
|
||||
bool includeLocalTables);
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
static bool AppendPublicationObjects(StringInfo buf, List *publicationObjects,
|
||||
bool whereClauseNeedsTransform,
|
||||
bool includeLocalTables);
|
||||
|
@ -40,10 +39,6 @@ static void AppendWhereClauseExpression(StringInfo buf, RangeVar *tableName,
|
|||
Node *whereClause,
|
||||
bool whereClauseNeedsTransform);
|
||||
static void AppendAlterPublicationAction(StringInfo buf, AlterPublicationAction action);
|
||||
#else
|
||||
static bool AppendTables(StringInfo buf, List *tables, bool includeLocalTables);
|
||||
static void AppendDefElemAction(StringInfo buf, DefElemAction action);
|
||||
#endif
|
||||
static bool AppendAlterPublicationStmt(StringInfo buf, AlterPublicationStmt *stmt,
|
||||
bool whereClauseNeedsTransform,
|
||||
bool includeLocalTables);
|
||||
|
@ -108,7 +103,6 @@ AppendCreatePublicationStmt(StringInfo buf, CreatePublicationStmt *stmt,
|
|||
{
|
||||
appendStringInfoString(buf, " FOR ALL TABLES");
|
||||
}
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
else if (stmt->pubobjects != NIL)
|
||||
{
|
||||
bool hasObjects = false;
|
||||
|
@ -146,32 +140,6 @@ AppendCreatePublicationStmt(StringInfo buf, CreatePublicationStmt *stmt,
|
|||
includeLocalTables);
|
||||
}
|
||||
}
|
||||
#else
|
||||
else if (stmt->tables != NIL)
|
||||
{
|
||||
bool hasTables = false;
|
||||
RangeVar *rangeVar = NULL;
|
||||
|
||||
/*
|
||||
* Check whether there are tables to propagate, mainly to know whether
|
||||
* we should include "FOR".
|
||||
*/
|
||||
foreach_declared_ptr(rangeVar, stmt->tables)
|
||||
{
|
||||
if (includeLocalTables || IsCitusTableRangeVar(rangeVar, NoLock, false))
|
||||
{
|
||||
hasTables = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (hasTables)
|
||||
{
|
||||
appendStringInfoString(buf, " FOR");
|
||||
AppendTables(buf, stmt->tables, includeLocalTables);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
if (stmt->options != NIL)
|
||||
{
|
||||
|
@ -182,8 +150,6 @@ AppendCreatePublicationStmt(StringInfo buf, CreatePublicationStmt *stmt,
|
|||
}
|
||||
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
|
||||
/*
|
||||
* AppendPublicationObjects appends a string representing a list of publication
|
||||
* objects to a buffer.
|
||||
|
@ -320,57 +286,6 @@ AppendWhereClauseExpression(StringInfo buf, RangeVar *tableName,
|
|||
}
|
||||
|
||||
|
||||
#else
|
||||
|
||||
/*
|
||||
* AppendPublicationObjects appends a string representing a list of publication
|
||||
* objects to a buffer.
|
||||
*
|
||||
* For instance: TABLE users, departments
|
||||
*/
|
||||
static bool
|
||||
AppendTables(StringInfo buf, List *tables, bool includeLocalTables)
|
||||
{
|
||||
RangeVar *rangeVar = NULL;
|
||||
bool appendedObject = false;
|
||||
|
||||
foreach_declared_ptr(rangeVar, tables)
|
||||
{
|
||||
if (!includeLocalTables &&
|
||||
!IsCitusTableRangeVar(rangeVar, NoLock, false))
|
||||
{
|
||||
/* do not propagate local tables */
|
||||
continue;
|
||||
}
|
||||
|
||||
char *schemaName = rangeVar->schemaname;
|
||||
char *tableName = rangeVar->relname;
|
||||
|
||||
if (schemaName != NULL)
|
||||
{
|
||||
/* qualified table name */
|
||||
appendStringInfo(buf, "%s %s",
|
||||
appendedObject ? "," : " TABLE",
|
||||
quote_qualified_identifier(schemaName, tableName));
|
||||
}
|
||||
else
|
||||
{
|
||||
/* unqualified table name */
|
||||
appendStringInfo(buf, "%s %s",
|
||||
appendedObject ? "," : " TABLE",
|
||||
quote_identifier(tableName));
|
||||
}
|
||||
|
||||
appendedObject = true;
|
||||
}
|
||||
|
||||
return appendedObject;
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* DeparseAlterPublicationSchemaStmt builds and returns a string representing
|
||||
* an AlterPublicationStmt.
|
||||
|
@ -439,19 +354,12 @@ AppendAlterPublicationStmt(StringInfo buf, AlterPublicationStmt *stmt,
|
|||
return true;
|
||||
}
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
AppendAlterPublicationAction(buf, stmt->action);
|
||||
return AppendPublicationObjects(buf, stmt->pubobjects, whereClauseNeedsTransform,
|
||||
includeLocalTables);
|
||||
#else
|
||||
AppendDefElemAction(buf, stmt->tableAction);
|
||||
return AppendTables(buf, stmt->tables, includeLocalTables);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
|
||||
/*
|
||||
* AppendAlterPublicationAction appends a string representing an AlterPublicationAction
|
||||
* to a buffer.
|
||||
|
@ -487,46 +395,6 @@ AppendAlterPublicationAction(StringInfo buf, AlterPublicationAction action)
|
|||
}
|
||||
|
||||
|
||||
#else
|
||||
|
||||
/*
|
||||
* AppendDefElemAction appends a string representing a DefElemAction
|
||||
* to a buffer.
|
||||
*/
|
||||
static void
|
||||
AppendDefElemAction(StringInfo buf, DefElemAction action)
|
||||
{
|
||||
switch (action)
|
||||
{
|
||||
case DEFELEM_ADD:
|
||||
{
|
||||
appendStringInfoString(buf, " ADD");
|
||||
break;
|
||||
}
|
||||
|
||||
case DEFELEM_DROP:
|
||||
{
|
||||
appendStringInfoString(buf, " DROP");
|
||||
break;
|
||||
}
|
||||
|
||||
case DEFELEM_SET:
|
||||
{
|
||||
appendStringInfoString(buf, " SET");
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
{
|
||||
ereport(ERROR, (errmsg("unrecognized publication action: %d", action)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* DeparseDropPublicationStmt builds and returns a string representing the DropStmt
|
||||
*/
|
||||
|
@ -651,11 +519,7 @@ AppendPublicationOptions(StringInfo stringBuffer, List *optionList)
|
|||
appendStringInfo(stringBuffer, "%s = ",
|
||||
quote_identifier(optionName));
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
if (valueType == T_Integer || valueType == T_Float || valueType == T_Boolean)
|
||||
#else
|
||||
if (valueType == T_Integer || valueType == T_Float)
|
||||
#endif
|
||||
{
|
||||
/* string escaping is unnecessary for numeric types and can cause issues */
|
||||
appendStringInfo(stringBuffer, "%s", optionValue);
|
||||
|
|
|
@ -0,0 +1,79 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* deparse_seclabel_stmts.c
|
||||
* All routines to deparse SECURITY LABEL statements.
|
||||
*
|
||||
* Copyright (c), Citus Data, Inc.
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#include "postgres.h"
|
||||
|
||||
#include "nodes/parsenodes.h"
|
||||
#include "utils/builtins.h"
|
||||
|
||||
#include "distributed/deparser.h"
|
||||
|
||||
static void AppendSecLabelStmt(StringInfo buf, SecLabelStmt *stmt);
|
||||
|
||||
/*
|
||||
* DeparseSecLabelStmt builds and returns a string representing of the
|
||||
* SecLabelStmt for application on a remote server.
|
||||
*/
|
||||
char *
|
||||
DeparseSecLabelStmt(Node *node)
|
||||
{
|
||||
SecLabelStmt *secLabelStmt = castNode(SecLabelStmt, node);
|
||||
StringInfoData buf = { 0 };
|
||||
initStringInfo(&buf);
|
||||
|
||||
AppendSecLabelStmt(&buf, secLabelStmt);
|
||||
|
||||
return buf.data;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* AppendSecLabelStmt generates the string representation of the
|
||||
* SecLabelStmt and appends it to the buffer.
|
||||
*/
|
||||
static void
|
||||
AppendSecLabelStmt(StringInfo buf, SecLabelStmt *stmt)
|
||||
{
|
||||
appendStringInfoString(buf, "SECURITY LABEL ");
|
||||
|
||||
if (stmt->provider != NULL)
|
||||
{
|
||||
appendStringInfo(buf, "FOR %s ", quote_identifier(stmt->provider));
|
||||
}
|
||||
|
||||
appendStringInfoString(buf, "ON ");
|
||||
|
||||
switch (stmt->objtype)
|
||||
{
|
||||
case OBJECT_ROLE:
|
||||
{
|
||||
appendStringInfo(buf, "ROLE %s ", quote_identifier(strVal(stmt->object)));
|
||||
break;
|
||||
}
|
||||
|
||||
/* normally, we shouldn't reach this */
|
||||
default:
|
||||
{
|
||||
ereport(ERROR, (errmsg("unsupported security label statement for"
|
||||
" deparsing")));
|
||||
}
|
||||
}
|
||||
|
||||
appendStringInfoString(buf, "IS ");
|
||||
|
||||
if (stmt->label != NULL)
|
||||
{
|
||||
appendStringInfo(buf, "%s", quote_literal_cstr(stmt->label));
|
||||
}
|
||||
else
|
||||
{
|
||||
appendStringInfoString(buf, "NULL");
|
||||
}
|
||||
}
|
|
@ -28,9 +28,7 @@ static void AppendSequenceNameList(StringInfo buf, List *objects, ObjectType obj
|
|||
static void AppendRenameSequenceStmt(StringInfo buf, RenameStmt *stmt);
|
||||
static void AppendAlterSequenceSchemaStmt(StringInfo buf, AlterObjectSchemaStmt *stmt);
|
||||
static void AppendAlterSequenceOwnerStmt(StringInfo buf, AlterTableStmt *stmt);
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
static void AppendAlterSequencePersistenceStmt(StringInfo buf, AlterTableStmt *stmt);
|
||||
#endif
|
||||
static void AppendGrantOnSequenceStmt(StringInfo buf, GrantStmt *stmt);
|
||||
static void AppendGrantOnSequenceSequences(StringInfo buf, GrantStmt *stmt);
|
||||
|
||||
|
@ -262,8 +260,6 @@ AppendAlterSequenceOwnerStmt(StringInfo buf, AlterTableStmt *stmt)
|
|||
}
|
||||
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
|
||||
/*
|
||||
* DeparseAlterSequencePersistenceStmt builds and returns a string representing
|
||||
* the AlterTableStmt consisting of changing the persistence of a sequence
|
||||
|
@ -349,9 +345,6 @@ AppendAlterSequencePersistenceStmt(StringInfo buf, AlterTableStmt *stmt)
|
|||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* DeparseGrantOnSequenceStmt builds and returns a string representing the GrantOnSequenceStmt
|
||||
*/
|
||||
|
|
|
@ -193,12 +193,10 @@ AppendAlterTableCmdConstraint(StringInfo buf, Constraint *constraint,
|
|||
{
|
||||
appendStringInfoString(buf, " UNIQUE");
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
if (constraint->nulls_not_distinct == true)
|
||||
{
|
||||
appendStringInfoString(buf, " NULLS NOT DISTINCT");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
if (subtype == AT_AddConstraint)
|
||||
|
|
|
@ -19,11 +19,7 @@
|
|||
#include "distributed/deparser.h"
|
||||
#include "distributed/listutils.h"
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
static void QualifyPublicationObjects(List *publicationObjects);
|
||||
#else
|
||||
static void QualifyTables(List *tables);
|
||||
#endif
|
||||
static void QualifyPublicationRangeVar(RangeVar *publication);
|
||||
|
||||
|
||||
|
@ -36,16 +32,10 @@ QualifyCreatePublicationStmt(Node *node)
|
|||
{
|
||||
CreatePublicationStmt *stmt = castNode(CreatePublicationStmt, node);
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
QualifyPublicationObjects(stmt->pubobjects);
|
||||
#else
|
||||
QualifyTables(stmt->tables);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
|
||||
/*
|
||||
* QualifyPublicationObjects ensures all table names in a list of
|
||||
* publication objects are fully qualified.
|
||||
|
@ -68,26 +58,6 @@ QualifyPublicationObjects(List *publicationObjects)
|
|||
}
|
||||
|
||||
|
||||
#else
|
||||
|
||||
/*
|
||||
* QualifyTables ensures all table names in a list are fully qualified.
|
||||
*/
|
||||
static void
|
||||
QualifyTables(List *tables)
|
||||
{
|
||||
RangeVar *rangeVar = NULL;
|
||||
|
||||
foreach_declared_ptr(rangeVar, tables)
|
||||
{
|
||||
QualifyPublicationRangeVar(rangeVar);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* QualifyPublicationObjects ensures all table names in a list of
|
||||
* publication objects are fully qualified.
|
||||
|
@ -97,11 +67,7 @@ QualifyAlterPublicationStmt(Node *node)
|
|||
{
|
||||
AlterPublicationStmt *stmt = castNode(AlterPublicationStmt, node);
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
QualifyPublicationObjects(stmt->pubobjects);
|
||||
#else
|
||||
QualifyTables(stmt->tables);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -52,8 +52,6 @@ QualifyAlterSequenceOwnerStmt(Node *node)
|
|||
}
|
||||
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
|
||||
/*
|
||||
* QualifyAlterSequencePersistenceStmt transforms a
|
||||
* ALTER SEQUENCE .. SET LOGGED/UNLOGGED
|
||||
|
@ -80,9 +78,6 @@ QualifyAlterSequencePersistenceStmt(Node *node)
|
|||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* QualifyAlterSequenceSchemaStmt transforms a
|
||||
* ALTER SEQUENCE .. SET SCHEMA ..
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -720,10 +720,8 @@ static void RebuildWaitEventSetForSessions(DistributedExecution *execution);
|
|||
static void AddLatchWaitEventToExecution(DistributedExecution *execution);
|
||||
static void ProcessWaitEvents(DistributedExecution *execution, WaitEvent *events, int
|
||||
eventCount, bool *cancellationReceived);
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
static void RemoteSocketClosedForAnySession(DistributedExecution *execution);
|
||||
static void ProcessWaitEventsForSocketClosed(WaitEvent *events, int eventCount);
|
||||
#endif
|
||||
static long MillisecondsBetweenTimestamps(instr_time startTime, instr_time endTime);
|
||||
static uint64 MicrosecondsBetweenTimestamps(instr_time startTime, instr_time endTime);
|
||||
static int WorkerPoolCompare(const void *lhsKey, const void *rhsKey);
|
||||
|
@ -1769,11 +1767,8 @@ FindOrCreateWorkerSession(WorkerPool *workerPool, MultiConnection *connection)
|
|||
session->commandsSent = 0;
|
||||
session->waitEventSetIndex = WAIT_EVENT_SET_INDEX_NOT_INITIALIZED;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
|
||||
/* always detect closed sockets */
|
||||
UpdateConnectionWaitFlags(session, WL_SOCKET_CLOSED);
|
||||
#endif
|
||||
|
||||
dlist_init(&session->pendingTaskQueue);
|
||||
dlist_init(&session->readyTaskQueue);
|
||||
|
@ -1817,7 +1812,6 @@ FindOrCreateWorkerSession(WorkerPool *workerPool, MultiConnection *connection)
|
|||
* the events, even ignores cancellation events. Future callers of this
|
||||
* function should consider its limitations.
|
||||
*/
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
static void
|
||||
RemoteSocketClosedForAnySession(DistributedExecution *execution)
|
||||
{
|
||||
|
@ -1835,9 +1829,6 @@ RemoteSocketClosedForAnySession(DistributedExecution *execution)
|
|||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* SequentialRunDistributedExecution gets a distributed execution and
|
||||
* executes each individual task in the execution sequentially, one
|
||||
|
@ -2173,8 +2164,6 @@ ProcessWaitEvents(DistributedExecution *execution, WaitEvent *events, int eventC
|
|||
}
|
||||
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
|
||||
/*
|
||||
* ProcessWaitEventsForSocketClosed mainly checks for WL_SOCKET_CLOSED event.
|
||||
* If WL_SOCKET_CLOSED is found, the function sets the underlying connection's
|
||||
|
@ -2207,9 +2196,6 @@ ProcessWaitEventsForSocketClosed(WaitEvent *events, int eventCount)
|
|||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* ManageWorkerPool ensures the worker pool has the appropriate number of connections
|
||||
* based on the number of pending tasks.
|
||||
|
@ -2704,7 +2690,6 @@ OpenNewConnections(WorkerPool *workerPool, int newConnectionCount,
|
|||
* Instead, we prefer this slight difference, which in effect has almost no
|
||||
* difference, but doing things in different points in time.
|
||||
*/
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
|
||||
/* we added new connections, rebuild the waitEventSet */
|
||||
RebuildWaitEventSetForSessions(execution);
|
||||
|
@ -2724,9 +2709,6 @@ OpenNewConnections(WorkerPool *workerPool, int newConnectionCount,
|
|||
* of the execution.
|
||||
*/
|
||||
AddLatchWaitEventToExecution(execution);
|
||||
#else
|
||||
execution->rebuildWaitEventSet = true;
|
||||
#endif
|
||||
|
||||
WorkerSession *session = NULL;
|
||||
foreach_declared_ptr(session, newSessionsList)
|
||||
|
@ -3663,13 +3645,8 @@ UpdateConnectionWaitFlags(WorkerSession *session, int waitFlags)
|
|||
return;
|
||||
}
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
|
||||
/* always detect closed sockets */
|
||||
connection->waitFlags = waitFlags | WL_SOCKET_CLOSED;
|
||||
#else
|
||||
connection->waitFlags = waitFlags;
|
||||
#endif
|
||||
|
||||
/* without signalling the execution, the flag changes won't be reflected */
|
||||
execution->waitFlagsChanged = true;
|
||||
|
@ -3694,13 +3671,11 @@ CheckConnectionReady(WorkerSession *session)
|
|||
return false;
|
||||
}
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
if ((session->latestUnconsumedWaitEvents & WL_SOCKET_CLOSED) != 0)
|
||||
{
|
||||
connection->connectionState = MULTI_CONNECTION_LOST;
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* try to send all pending data */
|
||||
int sendStatus = PQflush(connection->pgConn);
|
||||
|
|
|
@ -143,15 +143,10 @@ NonPushableInsertSelectExecScan(CustomScanState *node)
|
|||
targetRelation->partitionColumn);
|
||||
if (distributionColumnIndex == -1)
|
||||
{
|
||||
char *relationName = get_rel_name(targetRelationId);
|
||||
Oid schemaOid = get_rel_namespace(targetRelationId);
|
||||
char *schemaName = get_namespace_name(schemaOid);
|
||||
|
||||
ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
|
||||
errmsg(
|
||||
"the partition column of table %s should have a value",
|
||||
quote_qualified_identifier(schemaName,
|
||||
relationName))));
|
||||
generate_qualified_relation_name(targetRelationId))));
|
||||
}
|
||||
|
||||
TargetEntry *selectPartitionTE = list_nth(selectQuery->targetList,
|
||||
|
|
|
@ -219,6 +219,7 @@ ExecuteSourceAtCoordAndRedistribution(CitusScanState *scanState)
|
|||
copyObject(distributedPlan->selectPlanForModifyViaCoordinatorOrRepartition);
|
||||
char *intermediateResultIdPrefix = distributedPlan->intermediateResultIdPrefix;
|
||||
bool hasReturning = distributedPlan->expectResults;
|
||||
bool hasNotMatchedBySource = HasMergeNotMatchedBySource(mergeQuery);
|
||||
int partitionColumnIndex = distributedPlan->sourceResultRepartitionColumnIndex;
|
||||
|
||||
/*
|
||||
|
@ -233,7 +234,7 @@ ExecuteSourceAtCoordAndRedistribution(CitusScanState *scanState)
|
|||
|
||||
ereport(DEBUG1, (errmsg("Collect source query results on coordinator")));
|
||||
|
||||
List *prunedTaskList = NIL;
|
||||
List *prunedTaskList = NIL, *emptySourceTaskList = NIL;
|
||||
HTAB *shardStateHash =
|
||||
ExecuteMergeSourcePlanIntoColocatedIntermediateResults(
|
||||
targetRelationId,
|
||||
|
@ -255,7 +256,8 @@ ExecuteSourceAtCoordAndRedistribution(CitusScanState *scanState)
|
|||
* We cannot actually execute MERGE INTO ... tasks that read from
|
||||
* intermediate results that weren't created because no rows were
|
||||
* written to them. Prune those tasks out by only including tasks
|
||||
* on shards with connections.
|
||||
* on shards with connections; however, if the MERGE INTO includes
|
||||
* a NOT MATCHED BY SOURCE clause we need to include the task.
|
||||
*/
|
||||
Task *task = NULL;
|
||||
foreach_declared_ptr(task, taskList)
|
||||
|
@ -268,6 +270,19 @@ ExecuteSourceAtCoordAndRedistribution(CitusScanState *scanState)
|
|||
{
|
||||
prunedTaskList = lappend(prunedTaskList, task);
|
||||
}
|
||||
else if (hasNotMatchedBySource)
|
||||
{
|
||||
emptySourceTaskList = lappend(emptySourceTaskList, task);
|
||||
}
|
||||
}
|
||||
|
||||
if (emptySourceTaskList != NIL)
|
||||
{
|
||||
ereport(DEBUG1, (errmsg("MERGE has NOT MATCHED BY SOURCE clause, "
|
||||
"execute MERGE on all shards")));
|
||||
AdjustTaskQueryForEmptySource(targetRelationId, mergeQuery, emptySourceTaskList,
|
||||
intermediateResultIdPrefix);
|
||||
prunedTaskList = list_concat(prunedTaskList, emptySourceTaskList);
|
||||
}
|
||||
|
||||
if (prunedTaskList == NIL)
|
||||
|
|
|
@ -140,19 +140,6 @@ static void CitusQueryStatsRemoveExpiredEntries(HTAB *existingQueryIdHash);
|
|||
void
|
||||
InitializeCitusQueryStats(void)
|
||||
{
|
||||
/* on PG 15, we use shmem_request_hook_type */
|
||||
#if PG_VERSION_NUM < PG_VERSION_15
|
||||
|
||||
/* allocate shared memory */
|
||||
if (!IsUnderPostmaster)
|
||||
{
|
||||
RequestAddinShmemSpace(CitusQueryStatsSharedMemSize());
|
||||
|
||||
elog(LOG, "requesting named LWLockTranch for %s", STATS_SHARED_MEM_NAME);
|
||||
RequestNamedLWLockTranche(STATS_SHARED_MEM_NAME, 1);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Install hook */
|
||||
prev_shmem_startup_hook = shmem_startup_hook;
|
||||
shmem_startup_hook = CitusQueryStatsShmemStartup;
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include "nodes/parsenodes.h"
|
||||
|
||||
#include "distributed/citus_custom_scan.h"
|
||||
#include "distributed/deparse_shard_query.h"
|
||||
#include "distributed/intermediate_results.h"
|
||||
#include "distributed/listutils.h"
|
||||
#include "distributed/multi_physical_planner.h"
|
||||
|
@ -101,6 +102,40 @@ IsRedistributablePlan(Plan *selectPlan)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* HasMergeNotMatchedBySource returns true if the MERGE query has a
|
||||
* WHEN NOT MATCHED BY SOURCE clause. If it does, we need to execute
|
||||
* the MERGE query on all shards of the target table, regardless of
|
||||
* whether or not the source shard has any rows.
|
||||
*/
|
||||
bool
|
||||
HasMergeNotMatchedBySource(Query *query)
|
||||
{
|
||||
if (!IsMergeQuery(query))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool haveNotMatchedBySource = false;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_17
|
||||
ListCell *lc;
|
||||
foreach(lc, query->mergeActionList)
|
||||
{
|
||||
MergeAction *action = lfirst_node(MergeAction, lc);
|
||||
|
||||
if (action->matchKind == MERGE_WHEN_NOT_MATCHED_BY_SOURCE)
|
||||
{
|
||||
haveNotMatchedBySource = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return haveNotMatchedBySource;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GenerateTaskListWithColocatedIntermediateResults generates a list of tasks
|
||||
* for a query that inserts into a target relation and selects from a set of
|
||||
|
@ -200,6 +235,61 @@ GenerateTaskListWithColocatedIntermediateResults(Oid targetRelationId,
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* AdjustTaskQueryForEmptySource adjusts the query for tasks that read from an
|
||||
* intermediate result to instead read from an empty relation. This ensures that
|
||||
* the MERGE query is executed on all shards of the target table, because it has
|
||||
* a NOT MATCHED BY SOURCE clause, which will be true for all target shards where
|
||||
* the source shard has no rows.
|
||||
*/
|
||||
void
|
||||
AdjustTaskQueryForEmptySource(Oid targetRelationId,
|
||||
Query *mergeQuery,
|
||||
List *tasks,
|
||||
char *resultIdPrefix)
|
||||
{
|
||||
Query *mergeQueryCopy = copyObject(mergeQuery);
|
||||
RangeTblEntry *selectRte = ExtractSourceResultRangeTableEntry(mergeQueryCopy);
|
||||
RangeTblEntry *mergeRte = ExtractResultRelationRTE(mergeQueryCopy);
|
||||
List *targetList = selectRte->subquery->targetList;
|
||||
ListCell *taskCell = NULL;
|
||||
|
||||
foreach(taskCell, tasks)
|
||||
{
|
||||
Task *task = lfirst(taskCell);
|
||||
uint64 shardId = task->anchorShardId;
|
||||
StringInfo queryString = makeStringInfo();
|
||||
StringInfo resultId = makeStringInfo();
|
||||
|
||||
appendStringInfo(resultId, "%s_" UINT64_FORMAT, resultIdPrefix, shardId);
|
||||
|
||||
/* Generate a query for an empty relation */
|
||||
selectRte->subquery = BuildEmptyResultQuery(targetList, resultId->data);
|
||||
|
||||
/* setting an alias simplifies deparsing of RETURNING */
|
||||
if (mergeRte->alias == NULL)
|
||||
{
|
||||
Alias *alias = makeAlias(CITUS_TABLE_ALIAS, NIL);
|
||||
mergeRte->alias = alias;
|
||||
}
|
||||
|
||||
/*
|
||||
* Generate a query string for the query that merges into a shard and reads
|
||||
* from an empty relation.
|
||||
*
|
||||
* Since CTEs have already been converted to intermediate results, they need
|
||||
* to removed from the query. Otherwise, worker queries include both
|
||||
* intermediate results and CTEs in the query.
|
||||
*/
|
||||
mergeQueryCopy->cteList = NIL;
|
||||
deparse_shard_query(mergeQueryCopy, targetRelationId, shardId, queryString);
|
||||
ereport(DEBUG2, (errmsg("distributed statement: %s", queryString->data)));
|
||||
|
||||
SetTaskQueryString(task, queryString->data);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GenerateTaskListWithRedistributedResults returns a task list to insert given
|
||||
* redistributedResults into the given target relation.
|
||||
|
@ -223,6 +313,7 @@ GenerateTaskListWithRedistributedResults(Query *modifyQueryViaCoordinatorOrRepar
|
|||
Query *modifyResultQuery = copyObject(modifyQueryViaCoordinatorOrRepartition);
|
||||
RangeTblEntry *insertRte = ExtractResultRelationRTE(modifyResultQuery);
|
||||
Oid targetRelationId = targetRelation->relationId;
|
||||
bool hasNotMatchedBySource = HasMergeNotMatchedBySource(modifyResultQuery);
|
||||
|
||||
int shardCount = targetRelation->shardIntervalArrayLength;
|
||||
int shardOffset = 0;
|
||||
|
@ -242,19 +333,33 @@ GenerateTaskListWithRedistributedResults(Query *modifyQueryViaCoordinatorOrRepar
|
|||
StringInfo queryString = makeStringInfo();
|
||||
|
||||
/* skip empty tasks */
|
||||
if (resultIdList == NIL)
|
||||
if (resultIdList == NIL && !hasNotMatchedBySource)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
/* sort result ids for consistent test output */
|
||||
List *sortedResultIds = SortList(resultIdList, pg_qsort_strcmp);
|
||||
Query *fragmentSetQuery = NULL;
|
||||
|
||||
/* generate the query on the intermediate result */
|
||||
Query *fragmentSetQuery = BuildReadIntermediateResultsArrayQuery(selectTargetList,
|
||||
NIL,
|
||||
sortedResultIds,
|
||||
useBinaryFormat);
|
||||
if (resultIdList != NIL)
|
||||
{
|
||||
/* sort result ids for consistent test output */
|
||||
List *sortedResultIds = SortList(resultIdList, pg_qsort_strcmp);
|
||||
|
||||
/* generate the query on the intermediate result */
|
||||
fragmentSetQuery = BuildReadIntermediateResultsArrayQuery(selectTargetList,
|
||||
NIL,
|
||||
sortedResultIds,
|
||||
useBinaryFormat);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* No source data, but MERGE query has NOT MATCHED BY SOURCE */
|
||||
StringInfo emptyFragmentId = makeStringInfo();
|
||||
appendStringInfo(emptyFragmentId, "%s_" UINT64_FORMAT, "temp_empty_rel_",
|
||||
shardId);
|
||||
fragmentSetQuery = BuildEmptyResultQuery(selectTargetList,
|
||||
emptyFragmentId->data);
|
||||
}
|
||||
|
||||
/* put the intermediate result query in the INSERT..SELECT */
|
||||
selectRte->subquery = fragmentSetQuery;
|
||||
|
|
|
@ -109,7 +109,7 @@ TupleStoreTupleDestPutTuple(TupleDestination *self, Task *task,
|
|||
uint64 tupleSize = tupleLibpqSize;
|
||||
if (tupleSize == 0)
|
||||
{
|
||||
tupleSize = HeapTupleHeaderGetDatumLength(heapTuple);
|
||||
tupleSize = heapTuple->t_len;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -1717,13 +1717,11 @@ ExpandCitusSupportedTypes(ObjectAddressCollector *collector, ObjectAddress targe
|
|||
/*
|
||||
* As of PostgreSQL 15, the same applies to schemas.
|
||||
*/
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
List *schemaIdList =
|
||||
GetPublicationSchemas(publicationId);
|
||||
List *schemaDependencyList =
|
||||
CreateObjectAddressDependencyDefList(NamespaceRelationId, schemaIdList);
|
||||
result = list_concat(result, schemaDependencyList);
|
||||
#endif
|
||||
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -379,7 +379,7 @@ EnsureModificationsCanRun(void)
|
|||
|
||||
|
||||
/*
|
||||
* EnsureModificationsCanRunOnRelation firsts calls into EnsureModificationsCanRun() and
|
||||
* EnsureModificationsCanRunOnRelation first calls into EnsureModificationsCanRun() and
|
||||
* then does one more additional check. The additional check is to give a proper error
|
||||
* message if any relation that is modified is replicated, as replicated tables use
|
||||
* 2PC and 2PC cannot happen when recovery is in progress.
|
||||
|
@ -660,6 +660,18 @@ GetTableTypeName(Oid tableId)
|
|||
bool
|
||||
IsCitusTable(Oid relationId)
|
||||
{
|
||||
/*
|
||||
* PostgreSQL's OID generator assigns user operation OIDs starting
|
||||
* from FirstNormalObjectId. This means no user object can have
|
||||
* an OID lower than FirstNormalObjectId. Therefore, if the
|
||||
* relationId is less than FirstNormalObjectId
|
||||
* (i.e. in PostgreSQL's reserved range), we can immediately
|
||||
* return false, since such objects cannot be Citus tables.
|
||||
*/
|
||||
if (relationId < FirstNormalObjectId)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
return LookupCitusTableCacheEntry(relationId) != NULL;
|
||||
}
|
||||
|
||||
|
@ -2521,6 +2533,8 @@ AvailableExtensionVersion(void)
|
|||
|
||||
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
errmsg("citus extension is not found")));
|
||||
|
||||
return NULL; /* keep compiler happy */
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1739,48 +1739,6 @@ GetSequencesFromAttrDef(Oid attrdefOid)
|
|||
}
|
||||
|
||||
|
||||
#if PG_VERSION_NUM < PG_VERSION_15
|
||||
|
||||
/*
|
||||
* Given a pg_attrdef OID, return the relation OID and column number of
|
||||
* the owning column (represented as an ObjectAddress for convenience).
|
||||
*
|
||||
* Returns InvalidObjectAddress if there is no such pg_attrdef entry.
|
||||
*/
|
||||
ObjectAddress
|
||||
GetAttrDefaultColumnAddress(Oid attrdefoid)
|
||||
{
|
||||
ObjectAddress result = InvalidObjectAddress;
|
||||
ScanKeyData skey[1];
|
||||
HeapTuple tup;
|
||||
|
||||
Relation attrdef = table_open(AttrDefaultRelationId, AccessShareLock);
|
||||
ScanKeyInit(&skey[0],
|
||||
Anum_pg_attrdef_oid,
|
||||
BTEqualStrategyNumber, F_OIDEQ,
|
||||
ObjectIdGetDatum(attrdefoid));
|
||||
SysScanDesc scan = systable_beginscan(attrdef, AttrDefaultOidIndexId, true,
|
||||
NULL, 1, skey);
|
||||
|
||||
if (HeapTupleIsValid(tup = systable_getnext(scan)))
|
||||
{
|
||||
Form_pg_attrdef atdform = (Form_pg_attrdef) GETSTRUCT(tup);
|
||||
|
||||
result.classId = RelationRelationId;
|
||||
result.objectId = atdform->adrelid;
|
||||
result.objectSubId = atdform->adnum;
|
||||
}
|
||||
|
||||
systable_endscan(scan);
|
||||
table_close(attrdef, AccessShareLock);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* GetAttrDefsFromSequence returns a list of attrdef OIDs that have
|
||||
* a dependency on the given sequence
|
||||
|
@ -3011,7 +2969,6 @@ SyncNodeMetadataToNodesMain(Datum main_arg)
|
|||
|
||||
PopActiveSnapshot();
|
||||
CommitTransactionCommand();
|
||||
ProcessCompletedNotifies();
|
||||
|
||||
if (syncedAllNodes)
|
||||
{
|
||||
|
|
|
@ -217,6 +217,9 @@ citus_set_coordinator_host(PG_FUNCTION_ARGS)
|
|||
EnsureTransactionalMetadataSyncMode();
|
||||
}
|
||||
|
||||
/* prevent concurrent modification */
|
||||
LockRelationOid(DistNodeRelationId(), RowExclusiveLock);
|
||||
|
||||
bool isCoordinatorInMetadata = false;
|
||||
WorkerNode *coordinatorNode = PrimaryNodeForGroup(COORDINATOR_GROUP_ID,
|
||||
&isCoordinatorInMetadata);
|
||||
|
|
|
@ -283,9 +283,7 @@ PgGetObjectAddress(char *ttype, ArrayType *namearr, ArrayType *argsarr)
|
|||
case OBJECT_FDW:
|
||||
case OBJECT_FOREIGN_SERVER:
|
||||
case OBJECT_LANGUAGE:
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
case OBJECT_PARAMETER_ACL:
|
||||
#endif
|
||||
case OBJECT_PUBLICATION:
|
||||
case OBJECT_ROLE:
|
||||
case OBJECT_SCHEMA:
|
||||
|
@ -323,9 +321,7 @@ PgGetObjectAddress(char *ttype, ArrayType *namearr, ArrayType *argsarr)
|
|||
break;
|
||||
}
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
case OBJECT_PUBLICATION_NAMESPACE:
|
||||
#endif
|
||||
case OBJECT_USER_MAPPING:
|
||||
{
|
||||
objnode = (Node *) list_make2(linitial(name), linitial(args));
|
||||
|
|
|
@ -319,7 +319,7 @@ PG_FUNCTION_INFO_V1(citus_rebalance_start);
|
|||
PG_FUNCTION_INFO_V1(citus_rebalance_stop);
|
||||
PG_FUNCTION_INFO_V1(citus_rebalance_wait);
|
||||
|
||||
bool RunningUnderIsolationTest = false;
|
||||
bool RunningUnderCitusTestSuite = false;
|
||||
int MaxRebalancerLoggedIgnoredMoves = 5;
|
||||
int RebalancerByDiskSizeBaseCost = 100 * 1024 * 1024;
|
||||
bool PropagateSessionSettingsForLoopbackConnection = false;
|
||||
|
@ -384,6 +384,7 @@ CheckRebalanceStateInvariants(const RebalanceState *state)
|
|||
Assert(shardCost->cost <= prevShardCost->cost);
|
||||
}
|
||||
totalCost += shardCost->cost;
|
||||
prevShardCost = shardCost;
|
||||
}
|
||||
|
||||
/* Check that utilization field is up to date. */
|
||||
|
|
|
@ -294,6 +294,17 @@ citus_move_shard_placement(PG_FUNCTION_ARGS)
|
|||
CheckCitusVersion(ERROR);
|
||||
EnsureCoordinator();
|
||||
|
||||
List *referenceTableIdList = NIL;
|
||||
|
||||
if (HasNodesWithMissingReferenceTables(&referenceTableIdList))
|
||||
{
|
||||
ereport(ERROR, (errmsg("there are missing reference tables on some nodes"),
|
||||
errhint("Copy reference tables first with "
|
||||
"replicate_reference_tables() or use "
|
||||
"citus_rebalance_start() that will do it automatically."
|
||||
)));
|
||||
}
|
||||
|
||||
int64 shardId = PG_GETARG_INT64(0);
|
||||
char *sourceNodeName = text_to_cstring(PG_GETARG_TEXT_P(1));
|
||||
int32 sourceNodePort = PG_GETARG_INT32(2);
|
||||
|
@ -1940,11 +1951,7 @@ ConstructQualifiedShardName(ShardInterval *shardInterval)
|
|||
static List *
|
||||
RecreateTableDDLCommandList(Oid relationId)
|
||||
{
|
||||
const char *relationName = get_rel_name(relationId);
|
||||
Oid relationSchemaId = get_rel_namespace(relationId);
|
||||
const char *relationSchemaName = get_namespace_name(relationSchemaId);
|
||||
const char *qualifiedRelationName = quote_qualified_identifier(relationSchemaName,
|
||||
relationName);
|
||||
const char *qualifiedRelationName = generate_qualified_relation_name(relationId);
|
||||
|
||||
StringInfo dropCommand = makeStringInfo();
|
||||
|
||||
|
|
|
@ -136,11 +136,8 @@ CreateCitusCustomScanPath(PlannerInfo *root, RelOptInfo *relOptInfo,
|
|||
path->custom_path.path.pathtarget = relOptInfo->reltarget;
|
||||
path->custom_path.path.parent = relOptInfo;
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
|
||||
/* necessary to avoid extra Result node in PG15 */
|
||||
path->custom_path.flags = CUSTOMPATH_SUPPORT_PROJECTION;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The 100k rows we put on the cost of the path is kind of arbitrary and could be
|
||||
|
|
|
@ -151,7 +151,10 @@ static RouterPlanType GetRouterPlanType(Query *query,
|
|||
bool hasUnresolvedParams);
|
||||
static void ConcatenateRTablesAndPerminfos(PlannedStmt *mainPlan,
|
||||
PlannedStmt *concatPlan);
|
||||
|
||||
static bool CheckPostPlanDistribution(bool isDistributedQuery,
|
||||
Query *origQuery,
|
||||
List *rangeTableList,
|
||||
Query *plannedQuery);
|
||||
|
||||
/* Distributed planner hook */
|
||||
PlannedStmt *
|
||||
|
@ -272,6 +275,11 @@ distributed_planner(Query *parse,
|
|||
planContext.plan = standard_planner(planContext.query, NULL,
|
||||
planContext.cursorOptions,
|
||||
planContext.boundParams);
|
||||
needsDistributedPlanning = CheckPostPlanDistribution(needsDistributedPlanning,
|
||||
planContext.originalQuery,
|
||||
rangeTableList,
|
||||
planContext.query);
|
||||
|
||||
if (needsDistributedPlanning)
|
||||
{
|
||||
result = PlanDistributedStmt(&planContext, rteIdCounter);
|
||||
|
@ -703,6 +711,7 @@ DissuadePlannerFromUsingPlan(PlannedStmt *plan)
|
|||
* Arbitrarily high cost, but low enough that it can be added up
|
||||
* without overflowing by choose_custom_plan().
|
||||
*/
|
||||
Assert(plan != NULL);
|
||||
plan->planTree->total_cost = FLT_MAX / 100000000;
|
||||
}
|
||||
|
||||
|
@ -1441,13 +1450,8 @@ FinalizePlan(PlannedStmt *localPlan, DistributedPlan *distributedPlan)
|
|||
|
||||
customScan->custom_private = list_make1(distributedPlanData);
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
|
||||
/* necessary to avoid extra Result node in PG15 */
|
||||
customScan->flags = CUSTOMPATH_SUPPORT_BACKWARD_SCAN | CUSTOMPATH_SUPPORT_PROJECTION;
|
||||
#else
|
||||
customScan->flags = CUSTOMPATH_SUPPORT_BACKWARD_SCAN;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Fast path queries cannot have any subplans by definition, so skip
|
||||
|
@ -2733,3 +2737,41 @@ WarnIfListHasForeignDistributedTable(List *rangeTableList)
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static bool
|
||||
CheckPostPlanDistribution(bool isDistributedQuery,
|
||||
Query *origQuery, List *rangeTableList,
|
||||
Query *plannedQuery)
|
||||
{
|
||||
if (isDistributedQuery)
|
||||
{
|
||||
Node *origQuals = origQuery->jointree->quals;
|
||||
Node *plannedQuals = plannedQuery->jointree->quals;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_17
|
||||
if (IsMergeQuery(origQuery))
|
||||
{
|
||||
origQuals = origQuery->mergeJoinCondition;
|
||||
plannedQuals = plannedQuery->mergeJoinCondition;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The WHERE quals have been eliminated by the Postgres planner, possibly by
|
||||
* an OR clause that was simplified to TRUE. In such cases, we need to check
|
||||
* if the planned query still requires distributed planning.
|
||||
*/
|
||||
if (origQuals != NULL && plannedQuals == NULL)
|
||||
{
|
||||
List *rtesPostPlan = ExtractRangeTableEntryList(plannedQuery);
|
||||
if (list_length(rtesPostPlan) < list_length(rangeTableList))
|
||||
{
|
||||
isDistributedQuery = ListContainsDistributedTableRTE(
|
||||
rtesPostPlan, NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return isDistributedQuery;
|
||||
}
|
||||
|
|
|
@ -531,8 +531,16 @@ ShardPlacementForFunctionColocatedWithDistTable(DistObjectCacheEntry *procedure,
|
|||
|
||||
if (partitionParam->paramkind == PARAM_EXTERN)
|
||||
{
|
||||
/* Don't log a message, we should end up here again without a parameter */
|
||||
DissuadePlannerFromUsingPlan(plan);
|
||||
/*
|
||||
* Don't log a message, we should end up here again without a
|
||||
* parameter.
|
||||
* Note that "plan" can be null, for example when a CALL statement
|
||||
* is prepared.
|
||||
*/
|
||||
if (plan)
|
||||
{
|
||||
DissuadePlannerFromUsingPlan(plan);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1810,6 +1810,8 @@ CastExpr(Expr *expr, Oid sourceType, Oid targetType, Oid targetCollation,
|
|||
ereport(ERROR, (errmsg("could not find a conversion path from type %d to %d",
|
||||
sourceType, targetType)));
|
||||
}
|
||||
|
||||
return NULL; /* keep compiler happy */
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -38,8 +38,6 @@
|
|||
#include "distributed/shard_pruning.h"
|
||||
#include "distributed/shared_library_init.h"
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
|
||||
static int SourceResultPartitionColumnIndex(Query *mergeQuery,
|
||||
List *sourceTargetList,
|
||||
CitusTableCacheEntry *targetRelation);
|
||||
|
@ -100,8 +98,6 @@ static char * MergeCommandResultIdPrefix(uint64 planId);
|
|||
static void ErrorIfMergeHasReturningList(Query *query);
|
||||
static Node * GetMergeJoinCondition(Query *mergeQuery);
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* CreateMergePlan
|
||||
|
@ -118,13 +114,6 @@ CreateMergePlan(uint64 planId, Query *originalQuery, Query *query,
|
|||
PlannerRestrictionContext *plannerRestrictionContext,
|
||||
ParamListInfo boundParams)
|
||||
{
|
||||
/* function is void for pre-15 versions of Postgres */
|
||||
#if PG_VERSION_NUM < PG_VERSION_15
|
||||
|
||||
ereport(ERROR, (errmsg("MERGE is not supported in pre-15 Postgres versions")));
|
||||
|
||||
#else
|
||||
|
||||
Oid targetRelationId = ModifyQueryResultRelationId(originalQuery);
|
||||
|
||||
/*
|
||||
|
@ -153,8 +142,6 @@ CreateMergePlan(uint64 planId, Query *originalQuery, Query *query,
|
|||
}
|
||||
|
||||
return distributedPlan;
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
@ -184,9 +171,6 @@ GetMergeJoinTree(Query *mergeQuery)
|
|||
}
|
||||
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
|
||||
|
||||
/*
|
||||
* GetMergeJoinCondition returns the quals of the ON condition
|
||||
*/
|
||||
|
@ -904,7 +888,7 @@ ConvertRelationRTEIntoSubquery(Query *mergeQuery, RangeTblEntry *sourceRte,
|
|||
newRangeTableRef->rtindex = SINGLE_RTE_INDEX;
|
||||
sourceResultsQuery->jointree = makeFromExpr(list_make1(newRangeTableRef), NULL);
|
||||
sourceResultsQuery->targetList =
|
||||
CreateAllTargetListForRelation(sourceRte->relid, requiredAttributes);
|
||||
CreateFilteredTargetListForRelation(sourceRte->relid, requiredAttributes);
|
||||
List *restrictionList =
|
||||
GetRestrictInfoListForRelation(sourceRte, plannerRestrictionContext);
|
||||
List *copyRestrictionList = copyObject(restrictionList);
|
||||
|
@ -1443,9 +1427,6 @@ SourceResultPartitionColumnIndex(Query *mergeQuery, List *sourceTargetList,
|
|||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* ExtractMergeSourceRangeTableEntry returns the range table entry of source
|
||||
* table or source query in USING clause.
|
||||
|
@ -1453,13 +1434,6 @@ SourceResultPartitionColumnIndex(Query *mergeQuery, List *sourceTargetList,
|
|||
RangeTblEntry *
|
||||
ExtractMergeSourceRangeTableEntry(Query *query, bool joinSourceOk)
|
||||
{
|
||||
/* function is void for pre-15 versions of Postgres */
|
||||
#if PG_VERSION_NUM < PG_VERSION_15
|
||||
|
||||
ereport(ERROR, (errmsg("MERGE is not supported in pre-15 Postgres versions")));
|
||||
|
||||
#else
|
||||
|
||||
Assert(IsMergeQuery(query));
|
||||
|
||||
List *fromList = query->jointree->fromlist;
|
||||
|
@ -1498,8 +1472,6 @@ ExtractMergeSourceRangeTableEntry(Query *query, bool joinSourceOk)
|
|||
RangeTblEntry *subqueryRte = rt_fetch(reference->rtindex, query->rtable);
|
||||
|
||||
return subqueryRte;
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
@ -1516,13 +1488,6 @@ ExtractMergeSourceRangeTableEntry(Query *query, bool joinSourceOk)
|
|||
Var *
|
||||
FetchAndValidateInsertVarIfExists(Oid targetRelationId, Query *query)
|
||||
{
|
||||
/* function is void for pre-15 versions of Postgres */
|
||||
#if PG_VERSION_NUM < PG_VERSION_15
|
||||
|
||||
ereport(ERROR, (errmsg("MERGE is not supported in pre-15 Postgres versions")));
|
||||
|
||||
#else
|
||||
|
||||
Assert(IsMergeQuery(query));
|
||||
|
||||
if (!IsCitusTableType(targetRelationId, DISTRIBUTED_TABLE))
|
||||
|
@ -1546,8 +1511,8 @@ FetchAndValidateInsertVarIfExists(Oid targetRelationId, Query *query)
|
|||
continue;
|
||||
}
|
||||
|
||||
/* NOT MATCHED can have either INSERT or DO NOTHING */
|
||||
if (action->commandType == CMD_NOTHING)
|
||||
/* NOT MATCHED can have either INSERT, DO NOTHING or UPDATE(PG17) */
|
||||
if (action->commandType == CMD_NOTHING || action->commandType == CMD_UPDATE)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1593,8 +1558,6 @@ FetchAndValidateInsertVarIfExists(Oid targetRelationId, Query *query)
|
|||
}
|
||||
|
||||
return NULL;
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
@ -1620,7 +1583,7 @@ IsLocalTableModification(Oid targetRelationId, Query *query, uint64 shardId,
|
|||
return true;
|
||||
}
|
||||
|
||||
if (shardId == INVALID_SHARD_ID && ContainsOnlyLocalTables(rteProperties))
|
||||
if (shardId == INVALID_SHARD_ID && ContainsOnlyLocalOrReferenceTables(rteProperties))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -95,14 +95,24 @@ typedef struct
|
|||
bool wal;
|
||||
bool timing;
|
||||
bool summary;
|
||||
#if PG_VERSION_NUM >= PG_VERSION_17
|
||||
bool memory;
|
||||
ExplainSerializeOption serialize;
|
||||
#endif
|
||||
ExplainFormat format;
|
||||
} ExplainOptions;
|
||||
|
||||
|
||||
/* EXPLAIN flags of current distributed explain */
|
||||
#if PG_VERSION_NUM >= PG_VERSION_17
|
||||
static ExplainOptions CurrentDistributedQueryExplainOptions = {
|
||||
0, 0, 0, 0, 0, 0, 0, EXPLAIN_SERIALIZE_NONE, EXPLAIN_FORMAT_TEXT
|
||||
};
|
||||
#else
|
||||
static ExplainOptions CurrentDistributedQueryExplainOptions = {
|
||||
0, 0, 0, 0, 0, 0, EXPLAIN_FORMAT_TEXT
|
||||
};
|
||||
#endif
|
||||
|
||||
/* Result for a single remote EXPLAIN command */
|
||||
typedef struct RemoteExplainPlan
|
||||
|
@ -124,6 +134,59 @@ typedef struct ExplainAnalyzeDestination
|
|||
TupleDesc lastSavedExplainAnalyzeTupDesc;
|
||||
} ExplainAnalyzeDestination;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_17
|
||||
|
||||
/*
|
||||
* Various places within need to convert bytes to kilobytes. Round these up
|
||||
* to the next whole kilobyte.
|
||||
* copied from explain.c
|
||||
*/
|
||||
#define BYTES_TO_KILOBYTES(b) (((b) + 1023) / 1024)
|
||||
|
||||
/* copied from explain.c */
|
||||
/* Instrumentation data for SERIALIZE option */
|
||||
typedef struct SerializeMetrics
|
||||
{
|
||||
uint64 bytesSent; /* # of bytes serialized */
|
||||
instr_time timeSpent; /* time spent serializing */
|
||||
BufferUsage bufferUsage; /* buffers accessed during serialization */
|
||||
} SerializeMetrics;
|
||||
|
||||
/* copied from explain.c */
|
||||
static bool peek_buffer_usage(ExplainState *es, const BufferUsage *usage);
|
||||
static void show_buffer_usage(ExplainState *es, const BufferUsage *usage);
|
||||
static void show_memory_counters(ExplainState *es,
|
||||
const MemoryContextCounters *mem_counters);
|
||||
static void ExplainIndentText(ExplainState *es);
|
||||
static void ExplainPrintSerialize(ExplainState *es,
|
||||
SerializeMetrics *metrics);
|
||||
static SerializeMetrics GetSerializationMetrics(DestReceiver *dest);
|
||||
|
||||
/*
|
||||
* DestReceiver functions for SERIALIZE option
|
||||
*
|
||||
* A DestReceiver for query tuples, that serializes passed rows into RowData
|
||||
* messages while measuring the resources expended and total serialized size,
|
||||
* while never sending the data to the client. This allows measuring the
|
||||
* overhead of deTOASTing and datatype out/sendfuncs, which are not otherwise
|
||||
* exercisable without actually hitting the network.
|
||||
*
|
||||
* copied from explain.c
|
||||
*/
|
||||
typedef struct SerializeDestReceiver
|
||||
{
|
||||
DestReceiver pub;
|
||||
ExplainState *es; /* this EXPLAIN statement's ExplainState */
|
||||
int8 format; /* text or binary, like pq wire protocol */
|
||||
TupleDesc attrinfo; /* the output tuple desc */
|
||||
int nattrs; /* current number of columns */
|
||||
FmgrInfo *finfos; /* precomputed call info for output fns */
|
||||
MemoryContext tmpcontext; /* per-row temporary memory context */
|
||||
StringInfoData buf; /* buffer to hold the constructed message */
|
||||
SerializeMetrics metrics; /* collected metrics */
|
||||
} SerializeDestReceiver;
|
||||
#endif
|
||||
|
||||
|
||||
/* Explain functions for distributed queries */
|
||||
static void ExplainSubPlans(DistributedPlan *distributedPlan, ExplainState *es);
|
||||
|
@ -144,14 +207,27 @@ static void ExplainTaskPlacement(ShardPlacement *taskPlacement, List *explainOut
|
|||
ExplainState *es);
|
||||
static StringInfo BuildRemoteExplainQuery(char *queryString, ExplainState *es);
|
||||
static const char * ExplainFormatStr(ExplainFormat format);
|
||||
#if PG_VERSION_NUM >= PG_VERSION_17
|
||||
static const char * ExplainSerializeStr(ExplainSerializeOption serializeOption);
|
||||
#endif
|
||||
static void ExplainWorkerPlan(PlannedStmt *plannedStmt, DestReceiver *dest,
|
||||
ExplainState *es,
|
||||
const char *queryString, ParamListInfo params,
|
||||
QueryEnvironment *queryEnv,
|
||||
const instr_time *planduration,
|
||||
#if PG_VERSION_NUM >= PG_VERSION_17
|
||||
const BufferUsage *bufusage,
|
||||
const MemoryContextCounters *mem_counters,
|
||||
#endif
|
||||
double *executionDurationMillisec);
|
||||
static ExplainFormat ExtractFieldExplainFormat(Datum jsonbDoc, const char *fieldName,
|
||||
ExplainFormat defaultValue);
|
||||
#if PG_VERSION_NUM >= PG_VERSION_17
|
||||
static ExplainSerializeOption ExtractFieldExplainSerialize(Datum jsonbDoc,
|
||||
const char *fieldName,
|
||||
ExplainSerializeOption
|
||||
defaultValue);
|
||||
#endif
|
||||
static TupleDestination * CreateExplainAnlyzeDestination(Task *task,
|
||||
TupleDestination *taskDest);
|
||||
static void ExplainAnalyzeDestPutTuple(TupleDestination *self, Task *task,
|
||||
|
@ -190,6 +266,14 @@ PG_FUNCTION_INFO_V1(worker_save_query_explain_analyze);
|
|||
void
|
||||
CitusExplainScan(CustomScanState *node, List *ancestors, struct ExplainState *es)
|
||||
{
|
||||
#if PG_VERSION_NUM >= PG_VERSION_16
|
||||
if (es->generic)
|
||||
{
|
||||
ereport(ERROR, (errmsg(
|
||||
"EXPLAIN GENERIC_PLAN is currently not supported for Citus tables")));
|
||||
}
|
||||
#endif
|
||||
|
||||
CitusScanState *scanState = (CitusScanState *) node;
|
||||
DistributedPlan *distributedPlan = scanState->distributedPlan;
|
||||
EState *executorState = ScanStateGetExecutorState(scanState);
|
||||
|
@ -1017,24 +1101,30 @@ BuildRemoteExplainQuery(char *queryString, ExplainState *es)
|
|||
{
|
||||
StringInfo explainQuery = makeStringInfo();
|
||||
const char *formatStr = ExplainFormatStr(es->format);
|
||||
#if PG_VERSION_NUM >= PG_VERSION_17
|
||||
const char *serializeStr = ExplainSerializeStr(es->serialize);
|
||||
#endif
|
||||
|
||||
|
||||
appendStringInfo(explainQuery,
|
||||
"EXPLAIN (ANALYZE %s, VERBOSE %s, "
|
||||
"COSTS %s, BUFFERS %s, WAL %s, "
|
||||
#if PG_VERSION_NUM >= PG_VERSION_16
|
||||
"GENERIC_PLAN %s, "
|
||||
"TIMING %s, SUMMARY %s, "
|
||||
#if PG_VERSION_NUM >= PG_VERSION_17
|
||||
"MEMORY %s, SERIALIZE %s, "
|
||||
#endif
|
||||
"TIMING %s, SUMMARY %s, FORMAT %s) %s",
|
||||
"FORMAT %s) %s",
|
||||
es->analyze ? "TRUE" : "FALSE",
|
||||
es->verbose ? "TRUE" : "FALSE",
|
||||
es->costs ? "TRUE" : "FALSE",
|
||||
es->buffers ? "TRUE" : "FALSE",
|
||||
es->wal ? "TRUE" : "FALSE",
|
||||
#if PG_VERSION_NUM >= PG_VERSION_16
|
||||
es->generic ? "TRUE" : "FALSE",
|
||||
#endif
|
||||
es->timing ? "TRUE" : "FALSE",
|
||||
es->summary ? "TRUE" : "FALSE",
|
||||
#if PG_VERSION_NUM >= PG_VERSION_17
|
||||
es->memory ? "TRUE" : "FALSE",
|
||||
serializeStr,
|
||||
#endif
|
||||
formatStr,
|
||||
queryString);
|
||||
|
||||
|
@ -1073,6 +1163,42 @@ ExplainFormatStr(ExplainFormat format)
|
|||
}
|
||||
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_17
|
||||
|
||||
/*
|
||||
* ExplainSerializeStr converts the given explain serialize option to string.
|
||||
*/
|
||||
static const char *
|
||||
ExplainSerializeStr(ExplainSerializeOption serializeOption)
|
||||
{
|
||||
switch (serializeOption)
|
||||
{
|
||||
case EXPLAIN_SERIALIZE_NONE:
|
||||
{
|
||||
return "none";
|
||||
}
|
||||
|
||||
case EXPLAIN_SERIALIZE_TEXT:
|
||||
{
|
||||
return "text";
|
||||
}
|
||||
|
||||
case EXPLAIN_SERIALIZE_BINARY:
|
||||
{
|
||||
return "binary";
|
||||
}
|
||||
|
||||
default:
|
||||
{
|
||||
return "none";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* worker_last_saved_explain_analyze returns the last saved EXPLAIN ANALYZE output of
|
||||
* a worker task query. It returns NULL if nothing has been saved yet.
|
||||
|
@ -1132,6 +1258,11 @@ worker_save_query_explain_analyze(PG_FUNCTION_ARGS)
|
|||
es->verbose = ExtractFieldBoolean(explainOptions, "verbose", es->verbose);
|
||||
es->timing = ExtractFieldBoolean(explainOptions, "timing", es->timing);
|
||||
es->format = ExtractFieldExplainFormat(explainOptions, "format", es->format);
|
||||
#if PG_VERSION_NUM >= PG_VERSION_17
|
||||
es->memory = ExtractFieldBoolean(explainOptions, "memory", es->memory);
|
||||
es->serialize = ExtractFieldExplainSerialize(explainOptions, "serialize",
|
||||
es->serialize);
|
||||
#endif
|
||||
|
||||
TupleDesc tupleDescriptor = NULL;
|
||||
Tuplestorestate *tupleStore = SetupTuplestore(fcinfo, &tupleDescriptor);
|
||||
|
@ -1158,8 +1289,8 @@ worker_save_query_explain_analyze(PG_FUNCTION_ARGS)
|
|||
}
|
||||
|
||||
/* resolve OIDs of unknown (user-defined) types */
|
||||
Query *analyzedQuery = parse_analyze_varparams_compat(parseTree, queryString,
|
||||
¶mTypes, &numParams, NULL);
|
||||
Query *analyzedQuery = parse_analyze_varparams(parseTree, queryString,
|
||||
¶mTypes, &numParams, NULL);
|
||||
|
||||
/* pg_rewrite_query is a wrapper around QueryRewrite with some debugging logic */
|
||||
List *queryList = pg_rewrite_query(analyzedQuery);
|
||||
|
@ -1177,6 +1308,36 @@ worker_save_query_explain_analyze(PG_FUNCTION_ARGS)
|
|||
/* plan query and record planning stats */
|
||||
instr_time planStart;
|
||||
instr_time planDuration;
|
||||
#if PG_VERSION_NUM >= PG_VERSION_17
|
||||
BufferUsage bufusage_start,
|
||||
bufusage;
|
||||
MemoryContextCounters mem_counters;
|
||||
MemoryContext planner_ctx = NULL;
|
||||
MemoryContext saved_ctx = NULL;
|
||||
|
||||
if (es->memory)
|
||||
{
|
||||
/*
|
||||
* Create a new memory context to measure planner's memory consumption
|
||||
* accurately. Note that if the planner were to be modified to use a
|
||||
* different memory context type, here we would be changing that to
|
||||
* AllocSet, which might be undesirable. However, we don't have a way
|
||||
* to create a context of the same type as another, so we pray and
|
||||
* hope that this is OK.
|
||||
*
|
||||
* copied from explain.c
|
||||
*/
|
||||
planner_ctx = AllocSetContextCreate(CurrentMemoryContext,
|
||||
"explain analyze planner context",
|
||||
ALLOCSET_DEFAULT_SIZES);
|
||||
saved_ctx = MemoryContextSwitchTo(planner_ctx);
|
||||
}
|
||||
|
||||
if (es->buffers)
|
||||
{
|
||||
bufusage_start = pgBufferUsage;
|
||||
}
|
||||
#endif
|
||||
|
||||
INSTR_TIME_SET_CURRENT(planStart);
|
||||
|
||||
|
@ -1185,9 +1346,32 @@ worker_save_query_explain_analyze(PG_FUNCTION_ARGS)
|
|||
INSTR_TIME_SET_CURRENT(planDuration);
|
||||
INSTR_TIME_SUBTRACT(planDuration, planStart);
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_17
|
||||
if (es->memory)
|
||||
{
|
||||
MemoryContextSwitchTo(saved_ctx);
|
||||
MemoryContextMemConsumed(planner_ctx, &mem_counters);
|
||||
}
|
||||
|
||||
/* calc differences of buffer counters. */
|
||||
if (es->buffers)
|
||||
{
|
||||
memset(&bufusage, 0, sizeof(BufferUsage));
|
||||
BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start);
|
||||
}
|
||||
|
||||
/* do the actual EXPLAIN ANALYZE */
|
||||
ExplainWorkerPlan(plan, tupleStoreDest, es, queryString, boundParams, NULL,
|
||||
&planDuration,
|
||||
(es->buffers ? &bufusage : NULL),
|
||||
(es->memory ? &mem_counters : NULL),
|
||||
&executionDurationMillisec);
|
||||
#else
|
||||
|
||||
/* do the actual EXPLAIN ANALYZE */
|
||||
ExplainWorkerPlan(plan, tupleStoreDest, es, queryString, boundParams, NULL,
|
||||
&planDuration, &executionDurationMillisec);
|
||||
#endif
|
||||
|
||||
ExplainEndOutput(es);
|
||||
|
||||
|
@ -1256,6 +1440,50 @@ ExtractFieldExplainFormat(Datum jsonbDoc, const char *fieldName, ExplainFormat
|
|||
}
|
||||
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_17
|
||||
|
||||
/*
|
||||
* ExtractFieldExplainSerialize gets value of fieldName from jsonbDoc, or returns
|
||||
* defaultValue if it doesn't exist.
|
||||
*/
|
||||
static ExplainSerializeOption
|
||||
ExtractFieldExplainSerialize(Datum jsonbDoc, const char *fieldName, ExplainSerializeOption
|
||||
defaultValue)
|
||||
{
|
||||
Datum jsonbDatum = 0;
|
||||
bool found = ExtractFieldJsonbDatum(jsonbDoc, fieldName, &jsonbDatum);
|
||||
if (!found)
|
||||
{
|
||||
return defaultValue;
|
||||
}
|
||||
|
||||
const char *serializeStr = DatumGetCString(DirectFunctionCall1(jsonb_out,
|
||||
jsonbDatum));
|
||||
if (pg_strcasecmp(serializeStr, "\"none\"") == 0)
|
||||
{
|
||||
return EXPLAIN_SERIALIZE_NONE;
|
||||
}
|
||||
else if (pg_strcasecmp(serializeStr, "\"off\"") == 0)
|
||||
{
|
||||
return EXPLAIN_SERIALIZE_NONE;
|
||||
}
|
||||
else if (pg_strcasecmp(serializeStr, "\"text\"") == 0)
|
||||
{
|
||||
return EXPLAIN_SERIALIZE_TEXT;
|
||||
}
|
||||
else if (pg_strcasecmp(serializeStr, "\"binary\"") == 0)
|
||||
{
|
||||
return EXPLAIN_SERIALIZE_BINARY;
|
||||
}
|
||||
|
||||
ereport(ERROR, (errmsg("Invalid explain analyze serialize: %s", serializeStr)));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* CitusExplainOneQuery is the executor hook that is called when
|
||||
* postgres wants to explain a query.
|
||||
|
@ -1273,6 +1501,10 @@ CitusExplainOneQuery(Query *query, int cursorOptions, IntoClause *into,
|
|||
CurrentDistributedQueryExplainOptions.summary = es->summary;
|
||||
CurrentDistributedQueryExplainOptions.timing = es->timing;
|
||||
CurrentDistributedQueryExplainOptions.format = es->format;
|
||||
#if PG_VERSION_NUM >= PG_VERSION_17
|
||||
CurrentDistributedQueryExplainOptions.memory = es->memory;
|
||||
CurrentDistributedQueryExplainOptions.serialize = es->serialize;
|
||||
#endif
|
||||
|
||||
/* rest is copied from ExplainOneQuery() */
|
||||
instr_time planstart,
|
||||
|
@ -1595,11 +1827,18 @@ WrapQueryForExplainAnalyze(const char *queryString, TupleDesc tupleDesc,
|
|||
StringInfo explainOptions = makeStringInfo();
|
||||
appendStringInfo(explainOptions,
|
||||
"{\"verbose\": %s, \"costs\": %s, \"buffers\": %s, \"wal\": %s, "
|
||||
#if PG_VERSION_NUM >= PG_VERSION_17
|
||||
"\"memory\": %s, \"serialize\": \"%s\", "
|
||||
#endif
|
||||
"\"timing\": %s, \"summary\": %s, \"format\": \"%s\"}",
|
||||
CurrentDistributedQueryExplainOptions.verbose ? "true" : "false",
|
||||
CurrentDistributedQueryExplainOptions.costs ? "true" : "false",
|
||||
CurrentDistributedQueryExplainOptions.buffers ? "true" : "false",
|
||||
CurrentDistributedQueryExplainOptions.wal ? "true" : "false",
|
||||
#if PG_VERSION_NUM >= PG_VERSION_17
|
||||
CurrentDistributedQueryExplainOptions.memory ? "true" : "false",
|
||||
ExplainSerializeStr(CurrentDistributedQueryExplainOptions.serialize),
|
||||
#endif
|
||||
CurrentDistributedQueryExplainOptions.timing ? "true" : "false",
|
||||
CurrentDistributedQueryExplainOptions.summary ? "true" : "false",
|
||||
ExplainFormatStr(CurrentDistributedQueryExplainOptions.format));
|
||||
|
@ -1824,7 +2063,12 @@ ExplainOneQuery(Query *query, int cursorOptions,
|
|||
static void
|
||||
ExplainWorkerPlan(PlannedStmt *plannedstmt, DestReceiver *dest, ExplainState *es,
|
||||
const char *queryString, ParamListInfo params, QueryEnvironment *queryEnv,
|
||||
const instr_time *planduration, double *executionDurationMillisec)
|
||||
const instr_time *planduration,
|
||||
#if PG_VERSION_NUM >= PG_VERSION_17
|
||||
const BufferUsage *bufusage,
|
||||
const MemoryContextCounters *mem_counters,
|
||||
#endif
|
||||
double *executionDurationMillisec)
|
||||
{
|
||||
QueryDesc *queryDesc;
|
||||
instr_time starttime;
|
||||
|
@ -1893,6 +2137,32 @@ ExplainWorkerPlan(PlannedStmt *plannedstmt, DestReceiver *dest, ExplainState *es
|
|||
/* Create textual dump of plan tree */
|
||||
ExplainPrintPlan(es, queryDesc);
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_17
|
||||
/* Show buffer and/or memory usage in planning */
|
||||
if (peek_buffer_usage(es, bufusage) || mem_counters)
|
||||
{
|
||||
ExplainOpenGroup("Planning", "Planning", true, es);
|
||||
|
||||
if (es->format == EXPLAIN_FORMAT_TEXT)
|
||||
{
|
||||
ExplainIndentText(es);
|
||||
appendStringInfoString(es->str, "Planning:\n");
|
||||
es->indent++;
|
||||
}
|
||||
|
||||
if (bufusage)
|
||||
show_buffer_usage(es, bufusage);
|
||||
|
||||
if (mem_counters)
|
||||
show_memory_counters(es, mem_counters);
|
||||
|
||||
if (es->format == EXPLAIN_FORMAT_TEXT)
|
||||
es->indent--;
|
||||
|
||||
ExplainCloseGroup("Planning", "Planning", true, es);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (es->summary && planduration)
|
||||
{
|
||||
double plantime = INSTR_TIME_GET_DOUBLE(*planduration);
|
||||
|
@ -1913,6 +2183,23 @@ ExplainWorkerPlan(PlannedStmt *plannedstmt, DestReceiver *dest, ExplainState *es
|
|||
if (es->costs)
|
||||
ExplainPrintJITSummary(es, queryDesc);
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_17
|
||||
if (es->serialize != EXPLAIN_SERIALIZE_NONE)
|
||||
{
|
||||
/* the SERIALIZE option requires its own tuple receiver */
|
||||
DestReceiver *dest_serialize = CreateExplainSerializeDestReceiver(es);
|
||||
|
||||
/* grab serialization metrics before we destroy the DestReceiver */
|
||||
SerializeMetrics serializeMetrics = GetSerializationMetrics(dest_serialize);
|
||||
|
||||
/* call the DestReceiver's destroy method even during explain */
|
||||
dest_serialize->rDestroy(dest_serialize);
|
||||
|
||||
/* Print info about serialization of output */
|
||||
ExplainPrintSerialize(es, &serializeMetrics);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Close down the query and free resources. Include time for this in the
|
||||
* total execution time (although it should be pretty minimal).
|
||||
|
@ -1961,3 +2248,351 @@ elapsed_time(instr_time *starttime)
|
|||
INSTR_TIME_SUBTRACT(endtime, *starttime);
|
||||
return INSTR_TIME_GET_DOUBLE(endtime);
|
||||
}
|
||||
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_17
|
||||
/*
|
||||
* Return whether show_buffer_usage would have anything to print, if given
|
||||
* the same 'usage' data. Note that when the format is anything other than
|
||||
* text, we print even if the counters are all zeroes.
|
||||
*
|
||||
* Copied from explain.c.
|
||||
*/
|
||||
static bool
|
||||
peek_buffer_usage(ExplainState *es, const BufferUsage *usage)
|
||||
{
|
||||
bool has_shared;
|
||||
bool has_local;
|
||||
bool has_temp;
|
||||
bool has_shared_timing;
|
||||
bool has_local_timing;
|
||||
bool has_temp_timing;
|
||||
|
||||
if (usage == NULL)
|
||||
return false;
|
||||
|
||||
if (es->format != EXPLAIN_FORMAT_TEXT)
|
||||
return true;
|
||||
|
||||
has_shared = (usage->shared_blks_hit > 0 ||
|
||||
usage->shared_blks_read > 0 ||
|
||||
usage->shared_blks_dirtied > 0 ||
|
||||
usage->shared_blks_written > 0);
|
||||
has_local = (usage->local_blks_hit > 0 ||
|
||||
usage->local_blks_read > 0 ||
|
||||
usage->local_blks_dirtied > 0 ||
|
||||
usage->local_blks_written > 0);
|
||||
has_temp = (usage->temp_blks_read > 0 ||
|
||||
usage->temp_blks_written > 0);
|
||||
has_shared_timing = (!INSTR_TIME_IS_ZERO(usage->shared_blk_read_time) ||
|
||||
!INSTR_TIME_IS_ZERO(usage->shared_blk_write_time));
|
||||
has_local_timing = (!INSTR_TIME_IS_ZERO(usage->local_blk_read_time) ||
|
||||
!INSTR_TIME_IS_ZERO(usage->local_blk_write_time));
|
||||
has_temp_timing = (!INSTR_TIME_IS_ZERO(usage->temp_blk_read_time) ||
|
||||
!INSTR_TIME_IS_ZERO(usage->temp_blk_write_time));
|
||||
|
||||
return has_shared || has_local || has_temp || has_shared_timing ||
|
||||
has_local_timing || has_temp_timing;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Show buffer usage details. This better be sync with peek_buffer_usage.
|
||||
*
|
||||
* Copied from explain.c.
|
||||
*/
|
||||
static void
|
||||
show_buffer_usage(ExplainState *es, const BufferUsage *usage)
|
||||
{
|
||||
if (es->format == EXPLAIN_FORMAT_TEXT)
|
||||
{
|
||||
bool has_shared = (usage->shared_blks_hit > 0 ||
|
||||
usage->shared_blks_read > 0 ||
|
||||
usage->shared_blks_dirtied > 0 ||
|
||||
usage->shared_blks_written > 0);
|
||||
bool has_local = (usage->local_blks_hit > 0 ||
|
||||
usage->local_blks_read > 0 ||
|
||||
usage->local_blks_dirtied > 0 ||
|
||||
usage->local_blks_written > 0);
|
||||
bool has_temp = (usage->temp_blks_read > 0 ||
|
||||
usage->temp_blks_written > 0);
|
||||
bool has_shared_timing = (!INSTR_TIME_IS_ZERO(usage->shared_blk_read_time) ||
|
||||
!INSTR_TIME_IS_ZERO(usage->shared_blk_write_time));
|
||||
bool has_local_timing = (!INSTR_TIME_IS_ZERO(usage->local_blk_read_time) ||
|
||||
!INSTR_TIME_IS_ZERO(usage->local_blk_write_time));
|
||||
bool has_temp_timing = (!INSTR_TIME_IS_ZERO(usage->temp_blk_read_time) ||
|
||||
!INSTR_TIME_IS_ZERO(usage->temp_blk_write_time));
|
||||
|
||||
/* Show only positive counter values. */
|
||||
if (has_shared || has_local || has_temp)
|
||||
{
|
||||
ExplainIndentText(es);
|
||||
appendStringInfoString(es->str, "Buffers:");
|
||||
|
||||
if (has_shared)
|
||||
{
|
||||
appendStringInfoString(es->str, " shared");
|
||||
if (usage->shared_blks_hit > 0)
|
||||
appendStringInfo(es->str, " hit=%lld",
|
||||
(long long) usage->shared_blks_hit);
|
||||
if (usage->shared_blks_read > 0)
|
||||
appendStringInfo(es->str, " read=%lld",
|
||||
(long long) usage->shared_blks_read);
|
||||
if (usage->shared_blks_dirtied > 0)
|
||||
appendStringInfo(es->str, " dirtied=%lld",
|
||||
(long long) usage->shared_blks_dirtied);
|
||||
if (usage->shared_blks_written > 0)
|
||||
appendStringInfo(es->str, " written=%lld",
|
||||
(long long) usage->shared_blks_written);
|
||||
if (has_local || has_temp)
|
||||
appendStringInfoChar(es->str, ',');
|
||||
}
|
||||
if (has_local)
|
||||
{
|
||||
appendStringInfoString(es->str, " local");
|
||||
if (usage->local_blks_hit > 0)
|
||||
appendStringInfo(es->str, " hit=%lld",
|
||||
(long long) usage->local_blks_hit);
|
||||
if (usage->local_blks_read > 0)
|
||||
appendStringInfo(es->str, " read=%lld",
|
||||
(long long) usage->local_blks_read);
|
||||
if (usage->local_blks_dirtied > 0)
|
||||
appendStringInfo(es->str, " dirtied=%lld",
|
||||
(long long) usage->local_blks_dirtied);
|
||||
if (usage->local_blks_written > 0)
|
||||
appendStringInfo(es->str, " written=%lld",
|
||||
(long long) usage->local_blks_written);
|
||||
if (has_temp)
|
||||
appendStringInfoChar(es->str, ',');
|
||||
}
|
||||
if (has_temp)
|
||||
{
|
||||
appendStringInfoString(es->str, " temp");
|
||||
if (usage->temp_blks_read > 0)
|
||||
appendStringInfo(es->str, " read=%lld",
|
||||
(long long) usage->temp_blks_read);
|
||||
if (usage->temp_blks_written > 0)
|
||||
appendStringInfo(es->str, " written=%lld",
|
||||
(long long) usage->temp_blks_written);
|
||||
}
|
||||
appendStringInfoChar(es->str, '\n');
|
||||
}
|
||||
|
||||
/* As above, show only positive counter values. */
|
||||
if (has_shared_timing || has_local_timing || has_temp_timing)
|
||||
{
|
||||
ExplainIndentText(es);
|
||||
appendStringInfoString(es->str, "I/O Timings:");
|
||||
|
||||
if (has_shared_timing)
|
||||
{
|
||||
appendStringInfoString(es->str, " shared");
|
||||
if (!INSTR_TIME_IS_ZERO(usage->shared_blk_read_time))
|
||||
appendStringInfo(es->str, " read=%0.3f",
|
||||
INSTR_TIME_GET_MILLISEC(usage->shared_blk_read_time));
|
||||
if (!INSTR_TIME_IS_ZERO(usage->shared_blk_write_time))
|
||||
appendStringInfo(es->str, " write=%0.3f",
|
||||
INSTR_TIME_GET_MILLISEC(usage->shared_blk_write_time));
|
||||
if (has_local_timing || has_temp_timing)
|
||||
appendStringInfoChar(es->str, ',');
|
||||
}
|
||||
if (has_local_timing)
|
||||
{
|
||||
appendStringInfoString(es->str, " local");
|
||||
if (!INSTR_TIME_IS_ZERO(usage->local_blk_read_time))
|
||||
appendStringInfo(es->str, " read=%0.3f",
|
||||
INSTR_TIME_GET_MILLISEC(usage->local_blk_read_time));
|
||||
if (!INSTR_TIME_IS_ZERO(usage->local_blk_write_time))
|
||||
appendStringInfo(es->str, " write=%0.3f",
|
||||
INSTR_TIME_GET_MILLISEC(usage->local_blk_write_time));
|
||||
if (has_temp_timing)
|
||||
appendStringInfoChar(es->str, ',');
|
||||
}
|
||||
if (has_temp_timing)
|
||||
{
|
||||
appendStringInfoString(es->str, " temp");
|
||||
if (!INSTR_TIME_IS_ZERO(usage->temp_blk_read_time))
|
||||
appendStringInfo(es->str, " read=%0.3f",
|
||||
INSTR_TIME_GET_MILLISEC(usage->temp_blk_read_time));
|
||||
if (!INSTR_TIME_IS_ZERO(usage->temp_blk_write_time))
|
||||
appendStringInfo(es->str, " write=%0.3f",
|
||||
INSTR_TIME_GET_MILLISEC(usage->temp_blk_write_time));
|
||||
}
|
||||
appendStringInfoChar(es->str, '\n');
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
ExplainPropertyInteger("Shared Hit Blocks", NULL,
|
||||
usage->shared_blks_hit, es);
|
||||
ExplainPropertyInteger("Shared Read Blocks", NULL,
|
||||
usage->shared_blks_read, es);
|
||||
ExplainPropertyInteger("Shared Dirtied Blocks", NULL,
|
||||
usage->shared_blks_dirtied, es);
|
||||
ExplainPropertyInteger("Shared Written Blocks", NULL,
|
||||
usage->shared_blks_written, es);
|
||||
ExplainPropertyInteger("Local Hit Blocks", NULL,
|
||||
usage->local_blks_hit, es);
|
||||
ExplainPropertyInteger("Local Read Blocks", NULL,
|
||||
usage->local_blks_read, es);
|
||||
ExplainPropertyInteger("Local Dirtied Blocks", NULL,
|
||||
usage->local_blks_dirtied, es);
|
||||
ExplainPropertyInteger("Local Written Blocks", NULL,
|
||||
usage->local_blks_written, es);
|
||||
ExplainPropertyInteger("Temp Read Blocks", NULL,
|
||||
usage->temp_blks_read, es);
|
||||
ExplainPropertyInteger("Temp Written Blocks", NULL,
|
||||
usage->temp_blks_written, es);
|
||||
if (track_io_timing)
|
||||
{
|
||||
ExplainPropertyFloat("Shared I/O Read Time", "ms",
|
||||
INSTR_TIME_GET_MILLISEC(usage->shared_blk_read_time),
|
||||
3, es);
|
||||
ExplainPropertyFloat("Shared I/O Write Time", "ms",
|
||||
INSTR_TIME_GET_MILLISEC(usage->shared_blk_write_time),
|
||||
3, es);
|
||||
ExplainPropertyFloat("Local I/O Read Time", "ms",
|
||||
INSTR_TIME_GET_MILLISEC(usage->local_blk_read_time),
|
||||
3, es);
|
||||
ExplainPropertyFloat("Local I/O Write Time", "ms",
|
||||
INSTR_TIME_GET_MILLISEC(usage->local_blk_write_time),
|
||||
3, es);
|
||||
ExplainPropertyFloat("Temp I/O Read Time", "ms",
|
||||
INSTR_TIME_GET_MILLISEC(usage->temp_blk_read_time),
|
||||
3, es);
|
||||
ExplainPropertyFloat("Temp I/O Write Time", "ms",
|
||||
INSTR_TIME_GET_MILLISEC(usage->temp_blk_write_time),
|
||||
3, es);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Indent a text-format line.
|
||||
*
|
||||
* We indent by two spaces per indentation level. However, when emitting
|
||||
* data for a parallel worker there might already be data on the current line
|
||||
* (cf. ExplainOpenWorker); in that case, don't indent any more.
|
||||
*
|
||||
* Copied from explain.c.
|
||||
*/
|
||||
static void
|
||||
ExplainIndentText(ExplainState *es)
|
||||
{
|
||||
Assert(es->format == EXPLAIN_FORMAT_TEXT);
|
||||
if (es->str->len == 0 || es->str->data[es->str->len - 1] == '\n')
|
||||
appendStringInfoSpaces(es->str, es->indent * 2);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Show memory usage details.
|
||||
*
|
||||
* Copied from explain.c.
|
||||
*/
|
||||
static void
|
||||
show_memory_counters(ExplainState *es, const MemoryContextCounters *mem_counters)
|
||||
{
|
||||
int64 memUsedkB = BYTES_TO_KILOBYTES(mem_counters->totalspace -
|
||||
mem_counters->freespace);
|
||||
int64 memAllocatedkB = BYTES_TO_KILOBYTES(mem_counters->totalspace);
|
||||
|
||||
if (es->format == EXPLAIN_FORMAT_TEXT)
|
||||
{
|
||||
ExplainIndentText(es);
|
||||
appendStringInfo(es->str,
|
||||
"Memory: used=" INT64_FORMAT "kB allocated=" INT64_FORMAT "kB",
|
||||
memUsedkB, memAllocatedkB);
|
||||
appendStringInfoChar(es->str, '\n');
|
||||
}
|
||||
else
|
||||
{
|
||||
ExplainPropertyInteger("Memory Used", "kB", memUsedkB, es);
|
||||
ExplainPropertyInteger("Memory Allocated", "kB", memAllocatedkB, es);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ExplainPrintSerialize -
|
||||
* Append information about query output volume to es->str.
|
||||
*
|
||||
* Copied from explain.c.
|
||||
*/
|
||||
static void
|
||||
ExplainPrintSerialize(ExplainState *es, SerializeMetrics *metrics)
|
||||
{
|
||||
const char *format;
|
||||
|
||||
/* We shouldn't get called for EXPLAIN_SERIALIZE_NONE */
|
||||
if (es->serialize == EXPLAIN_SERIALIZE_TEXT)
|
||||
format = "text";
|
||||
else
|
||||
{
|
||||
Assert(es->serialize == EXPLAIN_SERIALIZE_BINARY);
|
||||
format = "binary";
|
||||
}
|
||||
|
||||
ExplainOpenGroup("Serialization", "Serialization", true, es);
|
||||
|
||||
if (es->format == EXPLAIN_FORMAT_TEXT)
|
||||
{
|
||||
ExplainIndentText(es);
|
||||
if (es->timing)
|
||||
appendStringInfo(es->str, "Serialization: time=%.3f ms output=" UINT64_FORMAT "kB format=%s\n",
|
||||
1000.0 * INSTR_TIME_GET_DOUBLE(metrics->timeSpent),
|
||||
BYTES_TO_KILOBYTES(metrics->bytesSent),
|
||||
format);
|
||||
else
|
||||
appendStringInfo(es->str, "Serialization: output=" UINT64_FORMAT "kB format=%s\n",
|
||||
BYTES_TO_KILOBYTES(metrics->bytesSent),
|
||||
format);
|
||||
|
||||
if (es->buffers && peek_buffer_usage(es, &metrics->bufferUsage))
|
||||
{
|
||||
es->indent++;
|
||||
show_buffer_usage(es, &metrics->bufferUsage);
|
||||
es->indent--;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (es->timing)
|
||||
ExplainPropertyFloat("Time", "ms",
|
||||
1000.0 * INSTR_TIME_GET_DOUBLE(metrics->timeSpent),
|
||||
3, es);
|
||||
ExplainPropertyUInteger("Output Volume", "kB",
|
||||
BYTES_TO_KILOBYTES(metrics->bytesSent), es);
|
||||
ExplainPropertyText("Format", format, es);
|
||||
if (es->buffers)
|
||||
show_buffer_usage(es, &metrics->bufferUsage);
|
||||
}
|
||||
|
||||
ExplainCloseGroup("Serialization", "Serialization", true, es);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetSerializationMetrics - collect metrics
|
||||
*
|
||||
* We have to be careful here since the receiver could be an IntoRel
|
||||
* receiver if the subject statement is CREATE TABLE AS. In that
|
||||
* case, return all-zeroes stats.
|
||||
*
|
||||
* Copied from explain.c.
|
||||
*/
|
||||
static SerializeMetrics
|
||||
GetSerializationMetrics(DestReceiver *dest)
|
||||
{
|
||||
SerializeMetrics empty;
|
||||
|
||||
if (dest->mydest == DestExplainSerialize)
|
||||
return ((SerializeDestReceiver *) dest)->metrics;
|
||||
|
||||
memset(&empty, 0, sizeof(SerializeMetrics));
|
||||
INSTR_TIME_SET_ZERO(empty.timeSpent);
|
||||
|
||||
return empty;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -1557,9 +1557,10 @@ MasterAggregateMutator(Node *originalNode, MasterAggregateWalkerContext *walkerC
|
|||
}
|
||||
else if (IsA(originalNode, Var))
|
||||
{
|
||||
Var *newColumn = copyObject((Var *) originalNode);
|
||||
newColumn->varno = masterTableId;
|
||||
newColumn->varattno = walkerContext->columnId;
|
||||
Var *origColumn = (Var *) originalNode;
|
||||
Var *newColumn = makeVar(masterTableId, walkerContext->columnId,
|
||||
origColumn->vartype, origColumn->vartypmod,
|
||||
origColumn->varcollid, origColumn->varlevelsup);
|
||||
walkerContext->columnId++;
|
||||
|
||||
newNode = (Node *) newColumn;
|
||||
|
@ -4753,22 +4754,35 @@ WorkerLimitCount(Node *limitCount, Node *limitOffset, OrderByLimitReference
|
|||
if (workerLimitNode != NULL && limitOffset != NULL)
|
||||
{
|
||||
Const *workerLimitConst = (Const *) workerLimitNode;
|
||||
Const *workerOffsetConst = (Const *) limitOffset;
|
||||
int64 workerLimitCount = DatumGetInt64(workerLimitConst->constvalue);
|
||||
int64 workerOffsetCount = DatumGetInt64(workerOffsetConst->constvalue);
|
||||
|
||||
workerLimitCount = workerLimitCount + workerOffsetCount;
|
||||
workerLimitNode = (Node *) MakeIntegerConstInt64(workerLimitCount);
|
||||
/* Only update the worker limit if the const is not null.*/
|
||||
if (!workerLimitConst->constisnull)
|
||||
{
|
||||
Const *workerOffsetConst = (Const *) limitOffset;
|
||||
int64 workerLimitCount = DatumGetInt64(workerLimitConst->constvalue);
|
||||
|
||||
/* If the offset is null, it defaults to 0 when cast to int64. */
|
||||
int64 workerOffsetCount = DatumGetInt64(workerOffsetConst->constvalue);
|
||||
workerLimitCount = workerLimitCount + workerOffsetCount;
|
||||
workerLimitNode = (Node *) MakeIntegerConstInt64(workerLimitCount);
|
||||
}
|
||||
}
|
||||
|
||||
/* display debug message on limit push down */
|
||||
if (workerLimitNode != NULL)
|
||||
{
|
||||
Const *workerLimitConst = (Const *) workerLimitNode;
|
||||
int64 workerLimitCount = DatumGetInt64(workerLimitConst->constvalue);
|
||||
if (!workerLimitConst->constisnull)
|
||||
{
|
||||
int64 workerLimitCount = DatumGetInt64(workerLimitConst->constvalue);
|
||||
|
||||
ereport(DEBUG1, (errmsg("push down of limit count: " INT64_FORMAT,
|
||||
workerLimitCount)));
|
||||
ereport(DEBUG1, (errmsg("push down of limit count: " INT64_FORMAT,
|
||||
workerLimitCount)));
|
||||
}
|
||||
else
|
||||
{
|
||||
ereport(DEBUG1, (errmsg("push down of limit count: ALL")));
|
||||
}
|
||||
}
|
||||
|
||||
return workerLimitNode;
|
||||
|
|
|
@ -1170,7 +1170,8 @@ HasComplexRangeTableType(Query *queryTree)
|
|||
if (rangeTableEntry->rtekind != RTE_RELATION &&
|
||||
rangeTableEntry->rtekind != RTE_SUBQUERY &&
|
||||
rangeTableEntry->rtekind != RTE_FUNCTION &&
|
||||
rangeTableEntry->rtekind != RTE_VALUES)
|
||||
rangeTableEntry->rtekind != RTE_VALUES &&
|
||||
!IsJsonTableRTE(rangeTableEntry))
|
||||
{
|
||||
hasComplexRangeTableType = true;
|
||||
}
|
||||
|
|
|
@ -2556,13 +2556,15 @@ AllShardsColocated(List *relationShardList)
|
|||
|
||||
|
||||
/*
|
||||
* ContainsOnlyLocalTables returns true if there is only
|
||||
* local tables and not any distributed or reference table.
|
||||
* ContainsOnlyLocalOrReferenceTables returns true if there are no distributed
|
||||
* tables in the query. In other words, the query might reference only local
|
||||
* tables and/or reference tables, but no fully distributed tables.
|
||||
*/
|
||||
bool
|
||||
ContainsOnlyLocalTables(RTEListProperties *rteProperties)
|
||||
ContainsOnlyLocalOrReferenceTables(RTEListProperties *rteProperties)
|
||||
{
|
||||
return !rteProperties->hasDistributedTable && !rteProperties->hasReferenceTable;
|
||||
/* If hasDistributedTable is false, then all tables are either local or reference. */
|
||||
return !rteProperties->hasDistributedTable;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -45,8 +45,6 @@
|
|||
static RangeTblEntry * AnchorRte(Query *subquery);
|
||||
static List * UnionRelationRestrictionLists(List *firstRelationList,
|
||||
List *secondRelationList);
|
||||
static List * CreateFilteredTargetListForRelation(Oid relationId,
|
||||
List *requiredAttributes);
|
||||
static List * CreateDummyTargetList(Oid relationId, List *requiredAttributes);
|
||||
static TargetEntry * CreateTargetEntryForColumn(Form_pg_attribute attributeTuple, Index
|
||||
rteIndex,
|
||||
|
@ -378,7 +376,7 @@ CreateAllTargetListForRelation(Oid relationId, List *requiredAttributes)
|
|||
* only the required columns of the given relation. If there is not required
|
||||
* columns then a dummy NULL column is put as the only entry.
|
||||
*/
|
||||
static List *
|
||||
List *
|
||||
CreateFilteredTargetListForRelation(Oid relationId, List *requiredAttributes)
|
||||
{
|
||||
Relation relation = relation_open(relationId, AccessShareLock);
|
||||
|
|
|
@ -61,7 +61,8 @@ typedef enum RecurringTuplesType
|
|||
RECURRING_TUPLES_FUNCTION,
|
||||
RECURRING_TUPLES_EMPTY_JOIN_TREE,
|
||||
RECURRING_TUPLES_RESULT_FUNCTION,
|
||||
RECURRING_TUPLES_VALUES
|
||||
RECURRING_TUPLES_VALUES,
|
||||
RECURRING_TUPLES_JSON_TABLE
|
||||
} RecurringTuplesType;
|
||||
|
||||
/*
|
||||
|
@ -347,7 +348,8 @@ IsFunctionOrValuesRTE(Node *node)
|
|||
RangeTblEntry *rangeTblEntry = (RangeTblEntry *) node;
|
||||
|
||||
if (rangeTblEntry->rtekind == RTE_FUNCTION ||
|
||||
rangeTblEntry->rtekind == RTE_VALUES)
|
||||
rangeTblEntry->rtekind == RTE_VALUES ||
|
||||
IsJsonTableRTE(rangeTblEntry))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
@ -700,6 +702,13 @@ DeferErrorIfFromClauseRecurs(Query *queryTree)
|
|||
"the FROM clause contains VALUES", NULL,
|
||||
NULL);
|
||||
}
|
||||
else if (recurType == RECURRING_TUPLES_JSON_TABLE)
|
||||
{
|
||||
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
|
||||
"correlated subqueries are not supported when "
|
||||
"the FROM clause contains JSON_TABLE", NULL,
|
||||
NULL);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
|
@ -1204,7 +1213,8 @@ DeferErrorIfUnsupportedTableCombination(Query *queryTree)
|
|||
*/
|
||||
if (rangeTableEntry->rtekind == RTE_RELATION ||
|
||||
rangeTableEntry->rtekind == RTE_SUBQUERY ||
|
||||
rangeTableEntry->rtekind == RTE_RESULT)
|
||||
rangeTableEntry->rtekind == RTE_RESULT ||
|
||||
IsJsonTableRTE(rangeTableEntry))
|
||||
{
|
||||
/* accepted */
|
||||
}
|
||||
|
@ -1372,6 +1382,13 @@ DeferErrorIfUnsupportedUnionQuery(Query *subqueryTree)
|
|||
"VALUES is not supported within a "
|
||||
"UNION", NULL);
|
||||
}
|
||||
else if (recurType == RECURRING_TUPLES_JSON_TABLE)
|
||||
{
|
||||
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
|
||||
"cannot push down this subquery",
|
||||
"JSON_TABLE is not supported within a "
|
||||
"UNION", NULL);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1477,6 +1494,11 @@ RecurringTypeDescription(RecurringTuplesType recurType)
|
|||
return "a VALUES clause";
|
||||
}
|
||||
|
||||
case RECURRING_TUPLES_JSON_TABLE:
|
||||
{
|
||||
return "a JSON_TABLE";
|
||||
}
|
||||
|
||||
case RECURRING_TUPLES_INVALID:
|
||||
{
|
||||
/*
|
||||
|
@ -1673,7 +1695,8 @@ DeferredErrorIfUnsupportedLateralSubquery(PlannerInfo *plannerInfo,
|
|||
* strings anyway.
|
||||
*/
|
||||
if (recurType != RECURRING_TUPLES_VALUES &&
|
||||
recurType != RECURRING_TUPLES_RESULT_FUNCTION)
|
||||
recurType != RECURRING_TUPLES_RESULT_FUNCTION &&
|
||||
recurType != RECURRING_TUPLES_JSON_TABLE)
|
||||
{
|
||||
recurTypeDescription = psprintf("%s (%s)", recurTypeDescription,
|
||||
recurringRangeTableEntry->eref->
|
||||
|
@ -1750,6 +1773,26 @@ ContainsRecurringRangeTable(List *rangeTable, RecurringTuplesType *recurType)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* IsJsonTableRTE checks whether the RTE refers to a JSON_TABLE
|
||||
* table function, which was introduced in PostgreSQL 17.
|
||||
*/
|
||||
bool
|
||||
IsJsonTableRTE(RangeTblEntry *rte)
|
||||
{
|
||||
#if PG_VERSION_NUM >= PG_VERSION_17
|
||||
if (rte == NULL)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
return (rte->rtekind == RTE_TABLEFUNC &&
|
||||
rte->tablefunc->functype == TFT_JSON_TABLE);
|
||||
#endif
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* HasRecurringTuples returns whether any part of the expression will generate
|
||||
* the same set of tuples in every query on shards when executing a distributed
|
||||
|
@ -1811,6 +1854,11 @@ HasRecurringTuples(Node *node, RecurringTuplesType *recurType)
|
|||
*recurType = RECURRING_TUPLES_VALUES;
|
||||
return true;
|
||||
}
|
||||
else if (IsJsonTableRTE(rangeTableEntry))
|
||||
{
|
||||
*recurType = RECURRING_TUPLES_JSON_TABLE;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
@ -2049,6 +2097,16 @@ CreateSubqueryTargetListAndAdjustVars(List *columnList)
|
|||
*/
|
||||
column->varno = 1;
|
||||
column->varattno = resNo;
|
||||
|
||||
/*
|
||||
* 1 subquery means there is one range table entry so with Postgres 16+ we need
|
||||
* to ensure that column's varnullingrels - the set of join rels that can null
|
||||
* the var - is empty. Otherwise, when given the query, the Postgres planner
|
||||
* may attempt to access a non-existent range table and segfault, as in #7787.
|
||||
*/
|
||||
#if PG_VERSION_NUM >= PG_VERSION_16
|
||||
column->varnullingrels = NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
return subqueryTargetEntryList;
|
||||
|
|
|
@ -2291,6 +2291,129 @@ BuildReadIntermediateResultsArrayQuery(List *targetEntryList,
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* For the given target list, build an empty relation with the same target list.
|
||||
* For example, if the target list is (a, b, c), and resultId is "empty", then
|
||||
* it returns a Query object for this SQL:
|
||||
* SELECT a, b, c FROM (VALUES (NULL, NULL, NULL)) AS empty(a, b, c) WHERE false;
|
||||
*/
|
||||
Query *
|
||||
BuildEmptyResultQuery(List *targetEntryList, char *resultId)
|
||||
{
|
||||
List *targetList = NIL;
|
||||
ListCell *targetEntryCell = NULL;
|
||||
|
||||
List *colTypes = NIL;
|
||||
List *colTypMods = NIL;
|
||||
List *colCollations = NIL;
|
||||
List *colNames = NIL;
|
||||
|
||||
List *valueConsts = NIL;
|
||||
List *valueTargetList = NIL;
|
||||
List *valueColNames = NIL;
|
||||
|
||||
int targetIndex = 1;
|
||||
|
||||
/* build the target list and column lists needed */
|
||||
foreach(targetEntryCell, targetEntryList)
|
||||
{
|
||||
TargetEntry *targetEntry = (TargetEntry *) lfirst(targetEntryCell);
|
||||
Node *targetExpr = (Node *) targetEntry->expr;
|
||||
char *columnName = targetEntry->resname;
|
||||
Oid columnType = exprType(targetExpr);
|
||||
Oid columnTypMod = exprTypmod(targetExpr);
|
||||
Oid columnCollation = exprCollation(targetExpr);
|
||||
|
||||
if (targetEntry->resjunk)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
Var *tgtVar = makeVar(1, targetIndex, columnType, columnTypMod, columnCollation,
|
||||
0);
|
||||
TargetEntry *tgtEntry = makeTargetEntry((Expr *) tgtVar, targetIndex, columnName,
|
||||
false);
|
||||
Const *valueConst = makeConst(columnType, columnTypMod, columnCollation, 0,
|
||||
(Datum) 0, true, false);
|
||||
|
||||
StringInfoData *columnString = makeStringInfo();
|
||||
appendStringInfo(columnString, "column%d", targetIndex);
|
||||
|
||||
TargetEntry *valueTgtEntry = makeTargetEntry((Expr *) tgtVar, targetIndex,
|
||||
columnString->data, false);
|
||||
|
||||
valueConsts = lappend(valueConsts, valueConst);
|
||||
valueTargetList = lappend(valueTargetList, valueTgtEntry);
|
||||
valueColNames = lappend(valueColNames, makeString(columnString->data));
|
||||
|
||||
colNames = lappend(colNames, makeString(columnName));
|
||||
colTypes = lappend_oid(colTypes, columnType);
|
||||
colTypMods = lappend_oid(colTypMods, columnTypMod);
|
||||
colCollations = lappend_oid(colCollations, columnCollation);
|
||||
|
||||
targetList = lappend(targetList, tgtEntry);
|
||||
|
||||
targetIndex++;
|
||||
}
|
||||
|
||||
/* Build a RangeTable Entry for the VALUES relation */
|
||||
RangeTblEntry *valuesRangeTable = makeNode(RangeTblEntry);
|
||||
valuesRangeTable->rtekind = RTE_VALUES;
|
||||
valuesRangeTable->values_lists = list_make1(valueConsts);
|
||||
valuesRangeTable->colcollations = colCollations;
|
||||
valuesRangeTable->coltypes = colTypes;
|
||||
valuesRangeTable->coltypmods = colTypMods;
|
||||
valuesRangeTable->alias = NULL;
|
||||
valuesRangeTable->eref = makeAlias("*VALUES*", valueColNames);
|
||||
valuesRangeTable->inFromCl = true;
|
||||
|
||||
RangeTblRef *valuesRTRef = makeNode(RangeTblRef);
|
||||
valuesRTRef->rtindex = 1;
|
||||
|
||||
FromExpr *valuesJoinTree = makeNode(FromExpr);
|
||||
valuesJoinTree->fromlist = list_make1(valuesRTRef);
|
||||
|
||||
/* build the VALUES query */
|
||||
Query *valuesQuery = makeNode(Query);
|
||||
valuesQuery->canSetTag = true;
|
||||
valuesQuery->commandType = CMD_SELECT;
|
||||
valuesQuery->rtable = list_make1(valuesRangeTable);
|
||||
#if PG_VERSION_NUM >= PG_VERSION_16
|
||||
valuesQuery->rteperminfos = NIL;
|
||||
#endif
|
||||
valuesQuery->jointree = valuesJoinTree;
|
||||
valuesQuery->targetList = valueTargetList;
|
||||
|
||||
/* build the relation selecting from the VALUES */
|
||||
RangeTblEntry *emptyRangeTable = makeNode(RangeTblEntry);
|
||||
emptyRangeTable->rtekind = RTE_SUBQUERY;
|
||||
emptyRangeTable->subquery = valuesQuery;
|
||||
emptyRangeTable->alias = makeAlias(resultId, colNames);
|
||||
emptyRangeTable->eref = emptyRangeTable->alias;
|
||||
emptyRangeTable->inFromCl = true;
|
||||
|
||||
/* build the SELECT query */
|
||||
Query *resultQuery = makeNode(Query);
|
||||
resultQuery->commandType = CMD_SELECT;
|
||||
resultQuery->canSetTag = true;
|
||||
resultQuery->rtable = list_make1(emptyRangeTable);
|
||||
#if PG_VERSION_NUM >= PG_VERSION_16
|
||||
resultQuery->rteperminfos = NIL;
|
||||
#endif
|
||||
RangeTblRef *rangeTableRef = makeNode(RangeTblRef);
|
||||
rangeTableRef->rtindex = 1;
|
||||
|
||||
/* insert a FALSE qual to ensure 0 rows returned */
|
||||
FromExpr *joinTree = makeNode(FromExpr);
|
||||
joinTree->fromlist = list_make1(rangeTableRef);
|
||||
joinTree->quals = makeBoolConst(false, false);
|
||||
resultQuery->jointree = joinTree;
|
||||
resultQuery->targetList = targetList;
|
||||
|
||||
return resultQuery;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* BuildReadIntermediateResultsQuery is the common code for generating
|
||||
* queries to read from result files. It is used by
|
||||
|
|
|
@ -1143,7 +1143,7 @@ ConflictWithIsolationTestingBeforeCopy(void)
|
|||
const bool sessionLock = false;
|
||||
const bool dontWait = false;
|
||||
|
||||
if (RunningUnderIsolationTest)
|
||||
if (RunningUnderCitusTestSuite)
|
||||
{
|
||||
SET_LOCKTAG_ADVISORY(tag, MyDatabaseId,
|
||||
SHARD_MOVE_ADVISORY_LOCK_SECOND_KEY,
|
||||
|
@ -1177,7 +1177,7 @@ ConflictWithIsolationTestingAfterCopy(void)
|
|||
const bool sessionLock = false;
|
||||
const bool dontWait = false;
|
||||
|
||||
if (RunningUnderIsolationTest)
|
||||
if (RunningUnderCitusTestSuite)
|
||||
{
|
||||
SET_LOCKTAG_ADVISORY(tag, MyDatabaseId,
|
||||
SHARD_MOVE_ADVISORY_LOCK_FIRST_KEY,
|
||||
|
@ -1882,14 +1882,15 @@ WaitForGroupedLogicalRepTargetsToCatchUp(XLogRecPtr sourcePosition,
|
|||
GetCurrentTimestamp(),
|
||||
logicalReplicationProgressReportTimeout))
|
||||
{
|
||||
ereport(LOG, (errmsg(
|
||||
"The LSN of the target subscriptions on node %s:%d have "
|
||||
"increased from %ld to %ld at %s where the source LSN is %ld ",
|
||||
superuserConnection->hostname,
|
||||
superuserConnection->port, previousTargetBeforeThisLoop,
|
||||
targetPosition,
|
||||
timestamptz_to_str(previousLSNIncrementTime),
|
||||
sourcePosition)));
|
||||
ereport(LOG, (errmsg("The LSN of the target subscriptions on node %s:%d "
|
||||
"has increased from %X/%X to %X/%X at %s where the "
|
||||
"source LSN is %X/%X ",
|
||||
superuserConnection->hostname,
|
||||
superuserConnection->port,
|
||||
LSN_FORMAT_ARGS(previousTargetBeforeThisLoop),
|
||||
LSN_FORMAT_ARGS(targetPosition),
|
||||
timestamptz_to_str(previousLSNIncrementTime),
|
||||
LSN_FORMAT_ARGS(sourcePosition))));
|
||||
|
||||
previousReportTime = GetCurrentTimestamp();
|
||||
}
|
||||
|
|
|
@ -94,6 +94,42 @@ replication_origin_filter_cb(LogicalDecodingContext *ctx, RepOriginId origin_id)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* update_replication_progress is copied from Postgres 15. We use it to send keepalive
|
||||
* messages when we are filtering out the wal changes resulting from the initial copy.
|
||||
* If we do not send out messages long enough, wal reciever will time out.
|
||||
* Postgres 16 has refactored this code such that keepalive messages are sent during
|
||||
* reordering phase which is above change_cb. So we do not need to send keepalive in
|
||||
* change_cb.
|
||||
*/
|
||||
#if (PG_VERSION_NUM < PG_VERSION_16)
|
||||
static void
|
||||
update_replication_progress(LogicalDecodingContext *ctx, bool skipped_xact)
|
||||
{
|
||||
static int changes_count = 0;
|
||||
|
||||
/*
|
||||
* We don't want to try sending a keepalive message after processing each
|
||||
* change as that can have overhead. Tests revealed that there is no
|
||||
* noticeable overhead in doing it after continuously processing 100 or so
|
||||
* changes.
|
||||
*/
|
||||
#define CHANGES_THRESHOLD 100
|
||||
|
||||
/*
|
||||
* After continuously processing CHANGES_THRESHOLD changes, we
|
||||
* try to send a keepalive message if required.
|
||||
*/
|
||||
if (ctx->end_xact || ++changes_count >= CHANGES_THRESHOLD)
|
||||
{
|
||||
OutputPluginUpdateProgress(ctx, skipped_xact);
|
||||
changes_count = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* shard_split_change_cb function emits the incoming tuple change
|
||||
* to the appropriate destination shard.
|
||||
|
@ -112,6 +148,12 @@ shard_split_change_cb(LogicalDecodingContext *ctx, ReorderBufferTXN *txn,
|
|||
return;
|
||||
}
|
||||
|
||||
#if (PG_VERSION_NUM < PG_VERSION_16)
|
||||
|
||||
/* Send replication keepalive. */
|
||||
update_replication_progress(ctx, false);
|
||||
#endif
|
||||
|
||||
/* check if the relation is publishable.*/
|
||||
if (!is_publishable_relation(relation))
|
||||
{
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include "catalog/pg_extension.h"
|
||||
#include "commands/explain.h"
|
||||
#include "commands/extension.h"
|
||||
#include "commands/seclabel.h"
|
||||
#include "common/string.h"
|
||||
#include "executor/executor.h"
|
||||
#include "libpq/auth.h"
|
||||
|
@ -173,15 +174,11 @@ static bool FinishedStartupCitusBackend = false;
|
|||
|
||||
static object_access_hook_type PrevObjectAccessHook = NULL;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
static shmem_request_hook_type prev_shmem_request_hook = NULL;
|
||||
#endif
|
||||
|
||||
void _PG_init(void);
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
static void citus_shmem_request(void);
|
||||
#endif
|
||||
static void CitusObjectAccessHook(ObjectAccessType access, Oid classId, Oid objectId, int
|
||||
subId, void *arg);
|
||||
static void DoInitialCleanup(void);
|
||||
|
@ -474,10 +471,8 @@ _PG_init(void)
|
|||
original_client_auth_hook = ClientAuthentication_hook;
|
||||
ClientAuthentication_hook = CitusAuthHook;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
prev_shmem_request_hook = shmem_request_hook;
|
||||
shmem_request_hook = citus_shmem_request;
|
||||
#endif
|
||||
|
||||
InitializeMaintenanceDaemon();
|
||||
|
||||
|
@ -572,6 +567,16 @@ _PG_init(void)
|
|||
INIT_COLUMNAR_SYMBOL(PGFunction, columnar_storage_info);
|
||||
INIT_COLUMNAR_SYMBOL(PGFunction, columnar_store_memory_stats);
|
||||
INIT_COLUMNAR_SYMBOL(PGFunction, test_columnar_storage_write_new_page);
|
||||
|
||||
/*
|
||||
* This part is only for SECURITY LABEL tests
|
||||
* mimicking what an actual security label provider would do
|
||||
*/
|
||||
if (RunningUnderCitusTestSuite)
|
||||
{
|
||||
register_label_provider("citus '!tests_label_provider",
|
||||
citus_test_object_relabel);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -591,8 +596,6 @@ AdjustDynamicLibraryPathForCdcDecoders(void)
|
|||
}
|
||||
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
|
||||
/*
|
||||
* Requests any additional shared memory required for citus.
|
||||
*/
|
||||
|
@ -613,9 +616,6 @@ citus_shmem_request(void)
|
|||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* DoInitialCleanup does cleanup at start time.
|
||||
* Currently it:
|
||||
|
@ -2293,13 +2293,14 @@ RegisterCitusConfigVariables(void)
|
|||
WarnIfReplicationModelIsSet, NULL, NULL);
|
||||
|
||||
DefineCustomBoolVariable(
|
||||
"citus.running_under_isolation_test",
|
||||
"citus.running_under_citus_test_suite",
|
||||
gettext_noop(
|
||||
"Only useful for testing purposes, when set to true, Citus does some "
|
||||
"tricks to implement useful isolation tests with rebalancing. Should "
|
||||
"tricks to implement useful isolation tests with rebalancing. It also "
|
||||
"registers a dummy label provider for SECURITY LABEL tests. Should "
|
||||
"never be set to true on production systems "),
|
||||
gettext_noop("for details of the tricks implemented, refer to the source code"),
|
||||
&RunningUnderIsolationTest,
|
||||
&RunningUnderCitusTestSuite,
|
||||
false,
|
||||
PGC_SUSET,
|
||||
GUC_SUPERUSER_ONLY | GUC_NO_SHOW_ALL | GUC_NOT_IN_SAMPLE,
|
||||
|
@ -2853,14 +2854,27 @@ ApplicationNameAssignHook(const char *newval, void *extra)
|
|||
DetermineCitusBackendType(newval);
|
||||
|
||||
/*
|
||||
* AssignGlobalPID might read from catalog tables to get the the local
|
||||
* nodeid. But ApplicationNameAssignHook might be called before catalog
|
||||
* access is available to the backend (such as in early stages of
|
||||
* authentication). We use StartupCitusBackend to initialize the global pid
|
||||
* after catalogs are available. After that happens this hook becomes
|
||||
* responsible to update the global pid on later application_name changes.
|
||||
* So we set the FinishedStartupCitusBackend flag in StartupCitusBackend to
|
||||
* indicate when this responsibility handoff has happened.
|
||||
* We use StartupCitusBackend to initialize the global pid after catalogs
|
||||
* are available. After that happens this hook becomes responsible to update
|
||||
* the global pid on later application_name changes. So we set the
|
||||
* FinishedStartupCitusBackend flag in StartupCitusBackend to indicate when
|
||||
* this responsibility handoff has happened.
|
||||
*
|
||||
* Also note that when application_name changes, we don't actually need to
|
||||
* try re-assigning the global pid for external client backends and
|
||||
* background workers because application_name doesn't affect the global
|
||||
* pid for such backends - note that !IsExternalClientBackend() check covers
|
||||
* both types of backends. Plus,
|
||||
* trying to re-assign the global pid for such backends would unnecessarily
|
||||
* cause performing a catalog access when the cached local node id is
|
||||
* invalidated. However, accessing to the catalog tables is dangerous in
|
||||
* certain situations like when we're not in a transaction block. And for
|
||||
* the other types of backends, i.e., the Citus internal backends, we need
|
||||
* to re-assign the global pid when the application_name changes because for
|
||||
* such backends we simply extract the global pid inherited from the
|
||||
* originating backend from the application_name -that's specified by
|
||||
* originating backend when openning that connection- and this doesn't require
|
||||
* catalog access.
|
||||
*
|
||||
* Another solution to the catalog table acccess problem would be to update
|
||||
* global pid lazily, like we do for HideShards. But that's not possible
|
||||
|
@ -2870,7 +2884,7 @@ ApplicationNameAssignHook(const char *newval, void *extra)
|
|||
* as reasonably possible, which is also why we extract global pids in the
|
||||
* AuthHook already (extracting doesn't require catalog access).
|
||||
*/
|
||||
if (FinishedStartupCitusBackend)
|
||||
if (FinishedStartupCitusBackend && !IsExternalClientBackend())
|
||||
{
|
||||
AssignGlobalPID(newval);
|
||||
}
|
||||
|
@ -2905,6 +2919,9 @@ NodeConninfoGucCheckHook(char **newval, void **extra, GucSource source)
|
|||
"sslcrl",
|
||||
"sslkey",
|
||||
"sslmode",
|
||||
#if PG_VERSION_NUM >= PG_VERSION_17
|
||||
"sslnegotiation",
|
||||
#endif
|
||||
"sslrootcert",
|
||||
"tcp_user_timeout",
|
||||
};
|
||||
|
|
|
@ -2,3 +2,4 @@
|
|||
|
||||
-- bump version to 13.0-1
|
||||
#include "udfs/citus_prepare_pg_upgrade/13.0-1.sql"
|
||||
#include "udfs/create_time_partitions/13.0-1.sql"
|
||||
|
|
|
@ -1,2 +1,4 @@
|
|||
-- citus--13.0-1--12.1-1
|
||||
-- this is an empty downgrade path since citus--12.1-1--13.0-1.sql is empty
|
||||
|
||||
#include "../udfs/create_time_partitions/10.2-1.sql"
|
||||
|
|
|
@ -0,0 +1,58 @@
|
|||
CREATE OR REPLACE FUNCTION pg_catalog.create_time_partitions(
|
||||
table_name regclass,
|
||||
partition_interval INTERVAL,
|
||||
end_at timestamptz,
|
||||
start_from timestamptz DEFAULT now())
|
||||
returns boolean
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
DECLARE
|
||||
-- partitioned table name
|
||||
schema_name_text name;
|
||||
table_name_text name;
|
||||
|
||||
-- record for to-be-created partition
|
||||
missing_partition_record record;
|
||||
|
||||
-- result indiciates whether any partitions were created
|
||||
partition_created bool := false;
|
||||
BEGIN
|
||||
IF start_from >= end_at THEN
|
||||
RAISE 'start_from (%) must be older than end_at (%)', start_from, end_at;
|
||||
END IF;
|
||||
|
||||
IF NOT isfinite(partition_interval) THEN
|
||||
RAISE 'Partition interval must be a finite value';
|
||||
END IF;
|
||||
|
||||
SELECT nspname, relname
|
||||
INTO schema_name_text, table_name_text
|
||||
FROM pg_class JOIN pg_namespace ON pg_class.relnamespace = pg_namespace.oid
|
||||
WHERE pg_class.oid = table_name::oid;
|
||||
|
||||
-- Get missing partition range info using the get_missing_partition_ranges
|
||||
-- and create partitions using that info.
|
||||
FOR missing_partition_record IN
|
||||
SELECT *
|
||||
FROM get_missing_time_partition_ranges(table_name, partition_interval, end_at, start_from)
|
||||
LOOP
|
||||
EXECUTE format('CREATE TABLE %I.%I PARTITION OF %I.%I FOR VALUES FROM (%L) TO (%L)',
|
||||
schema_name_text,
|
||||
missing_partition_record.partition_name,
|
||||
schema_name_text,
|
||||
table_name_text,
|
||||
missing_partition_record.range_from_value,
|
||||
missing_partition_record.range_to_value);
|
||||
|
||||
partition_created := true;
|
||||
END LOOP;
|
||||
|
||||
RETURN partition_created;
|
||||
END;
|
||||
$$;
|
||||
COMMENT ON FUNCTION pg_catalog.create_time_partitions(
|
||||
table_name regclass,
|
||||
partition_interval INTERVAL,
|
||||
end_at timestamptz,
|
||||
start_from timestamptz)
|
||||
IS 'create time partitions for the given range';
|
|
@ -21,6 +21,10 @@ BEGIN
|
|||
RAISE 'start_from (%) must be older than end_at (%)', start_from, end_at;
|
||||
END IF;
|
||||
|
||||
IF NOT isfinite(partition_interval) THEN
|
||||
RAISE 'Partition interval must be a finite value';
|
||||
END IF;
|
||||
|
||||
SELECT nspname, relname
|
||||
INTO schema_name_text, table_name_text
|
||||
FROM pg_class JOIN pg_namespace ON pg_class.relnamespace = pg_namespace.oid
|
||||
|
|
|
@ -310,7 +310,7 @@ fake_relation_set_new_filenode(Relation rel,
|
|||
*/
|
||||
*minmulti = GetOldestMultiXactId();
|
||||
|
||||
SMgrRelation srel = RelationCreateStorage_compat(*newrnode, persistence, true);
|
||||
SMgrRelation srel = RelationCreateStorage(*newrnode, persistence, true);
|
||||
|
||||
/*
|
||||
* If required, set up an init fork for an unlogged table so that it can
|
||||
|
|
|
@ -50,6 +50,13 @@ activate_node_snapshot(PG_FUNCTION_ARGS)
|
|||
* so we are using first primary worker node just for test purposes.
|
||||
*/
|
||||
WorkerNode *dummyWorkerNode = GetFirstPrimaryWorkerNode();
|
||||
if (dummyWorkerNode == NULL)
|
||||
{
|
||||
ereport(ERROR, (errmsg("no worker nodes found"),
|
||||
errdetail("Function activate_node_snapshot is meant to be "
|
||||
"used when running tests on a multi-node cluster "
|
||||
"with workers.")));
|
||||
}
|
||||
|
||||
/*
|
||||
* Create MetadataSyncContext which is used throughout nodes' activation.
|
||||
|
|
|
@ -190,6 +190,9 @@ run_commands_on_session_level_connection_to_node(PG_FUNCTION_ARGS)
|
|||
|
||||
/*
|
||||
* override_backend_data_gpid is a wrapper around SetBackendDataGpid().
|
||||
* Also sets distributedCommandOriginator to true since the only caller of
|
||||
* this method calls this function actually wants this backend to
|
||||
* be treated as a distributed command originator with the given global pid.
|
||||
*/
|
||||
Datum
|
||||
override_backend_data_gpid(PG_FUNCTION_ARGS)
|
||||
|
@ -199,6 +202,7 @@ override_backend_data_gpid(PG_FUNCTION_ARGS)
|
|||
uint64 gpid = PG_GETARG_INT64(0);
|
||||
|
||||
SetBackendDataGlobalPID(gpid);
|
||||
SetBackendDataDistributedCommandOriginator(true);
|
||||
|
||||
PG_RETURN_VOID();
|
||||
}
|
||||
|
|
|
@ -49,13 +49,8 @@ makeIntConst(int val, int location)
|
|||
{
|
||||
A_Const *n = makeNode(A_Const);
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
n->val.ival.type = T_Integer;
|
||||
n->val.ival.ival = val;
|
||||
#else
|
||||
n->val.type = T_Integer;
|
||||
n->val.val.ival = val;
|
||||
#endif
|
||||
n->location = location;
|
||||
|
||||
return (Node *) n;
|
||||
|
|
|
@ -395,7 +395,7 @@ StoreAllActiveTransactions(Tuplestorestate *tupleStore, TupleDesc tupleDescripto
|
|||
bool showCurrentBackendDetails = showAllBackends;
|
||||
BackendData *currentBackend =
|
||||
&backendManagementShmemData->backends[backendIndex];
|
||||
PGPROC *currentProc = &ProcGlobal->allProcs[backendIndex];
|
||||
PGPROC *currentProc = GetPGProcByNumber(backendIndex);
|
||||
|
||||
/* to work on data after releasing g spinlock to protect against errors */
|
||||
uint64 transactionNumber = 0;
|
||||
|
@ -420,7 +420,7 @@ StoreAllActiveTransactions(Tuplestorestate *tupleStore, TupleDesc tupleDescripto
|
|||
}
|
||||
|
||||
Oid databaseId = currentBackend->databaseId;
|
||||
int backendPid = ProcGlobal->allProcs[backendIndex].pid;
|
||||
int backendPid = GetPGProcByNumber(backendIndex)->pid;
|
||||
|
||||
/*
|
||||
* We prefer to use worker_query instead of distributedCommandOriginator in
|
||||
|
@ -519,15 +519,6 @@ UserHasPermissionToViewStatsOf(Oid currentUserId, Oid backendOwnedId)
|
|||
void
|
||||
InitializeBackendManagement(void)
|
||||
{
|
||||
/* on PG 15, we use shmem_request_hook_type */
|
||||
#if PG_VERSION_NUM < PG_VERSION_15
|
||||
|
||||
/* allocate shared memory */
|
||||
if (!IsUnderPostmaster)
|
||||
{
|
||||
RequestAddinShmemSpace(BackendManagementShmemSize());
|
||||
}
|
||||
#endif
|
||||
prev_shmem_startup_hook = shmem_startup_hook;
|
||||
shmem_startup_hook = BackendManagementShmemInit;
|
||||
}
|
||||
|
@ -855,6 +846,16 @@ GetCurrentDistributedTransactionId(void)
|
|||
void
|
||||
AssignDistributedTransactionId(void)
|
||||
{
|
||||
/*
|
||||
* MyBackendData should always be available. However, we observed some
|
||||
* crashes where certain hooks were not executed.
|
||||
* Bug 3697586: Server crashes when assigning distributed transaction
|
||||
*/
|
||||
if (!MyBackendData)
|
||||
{
|
||||
ereport(ERROR, (errmsg("backend is not ready for distributed transactions")));
|
||||
}
|
||||
|
||||
pg_atomic_uint64 *transactionNumberSequence =
|
||||
&backendManagementShmemData->nextTransactionNumber;
|
||||
|
||||
|
@ -964,6 +965,23 @@ SetBackendDataGlobalPID(uint64 gpid)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* SetBackendDataDistributedCommandOriginator sets the distributedCommandOriginator
|
||||
* field on MyBackendData.
|
||||
*/
|
||||
void
|
||||
SetBackendDataDistributedCommandOriginator(bool distributedCommandOriginator)
|
||||
{
|
||||
if (!MyBackendData)
|
||||
{
|
||||
return;
|
||||
}
|
||||
SpinLockAcquire(&MyBackendData->mutex);
|
||||
MyBackendData->distributedCommandOriginator = distributedCommandOriginator;
|
||||
SpinLockRelease(&MyBackendData->mutex);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetGlobalPID returns the global process id of the current backend.
|
||||
*/
|
||||
|
@ -1280,7 +1298,7 @@ ActiveDistributedTransactionNumbers(void)
|
|||
/* build list of starting procs */
|
||||
for (int curBackend = 0; curBackend < MaxBackends; curBackend++)
|
||||
{
|
||||
PGPROC *currentProc = &ProcGlobal->allProcs[curBackend];
|
||||
PGPROC *currentProc = GetPGProcByNumber(curBackend);
|
||||
BackendData currentBackendData;
|
||||
|
||||
if (currentProc->pid == 0)
|
||||
|
|
|
@ -375,7 +375,7 @@ AssociateDistributedTransactionWithBackendProc(TransactionNode *transactionNode)
|
|||
|
||||
for (int backendIndex = 0; backendIndex < MaxBackends; ++backendIndex)
|
||||
{
|
||||
PGPROC *currentProc = &ProcGlobal->allProcs[backendIndex];
|
||||
PGPROC *currentProc = GetPGProcByNumber(backendIndex);
|
||||
BackendData currentBackendData;
|
||||
|
||||
/* we're not interested in processes that are not active or waiting on a lock */
|
||||
|
|
|
@ -561,7 +561,7 @@ BuildLocalWaitGraph(bool onlyDistributedTx)
|
|||
/* build list of starting procs */
|
||||
for (int curBackend = 0; curBackend < totalProcs; curBackend++)
|
||||
{
|
||||
PGPROC *currentProc = &ProcGlobal->allProcs[curBackend];
|
||||
PGPROC *currentProc = GetPGProcByNumber(curBackend);
|
||||
BackendData currentBackendData;
|
||||
|
||||
if (currentProc->pid == 0)
|
||||
|
|
|
@ -53,7 +53,8 @@ PG_FUNCTION_INFO_V1(recover_prepared_transactions);
|
|||
|
||||
|
||||
/* Local functions forward declarations */
|
||||
static int RecoverWorkerTransactions(WorkerNode *workerNode);
|
||||
static int RecoverWorkerTransactions(WorkerNode *workerNode,
|
||||
MultiConnection *connection);
|
||||
static List * PendingWorkerTransactionList(MultiConnection *connection);
|
||||
static bool IsTransactionInProgress(HTAB *activeTransactionNumberSet,
|
||||
char *preparedTransactionName);
|
||||
|
@ -123,10 +124,51 @@ RecoverTwoPhaseCommits(void)
|
|||
LockTransactionRecovery(ShareUpdateExclusiveLock);
|
||||
|
||||
List *workerList = ActivePrimaryNodeList(NoLock);
|
||||
List *workerConnections = NIL;
|
||||
WorkerNode *workerNode = NULL;
|
||||
MultiConnection *connection = NULL;
|
||||
|
||||
/*
|
||||
* Pre-establish all connections to worker nodes.
|
||||
*
|
||||
* We do this to enforce a consistent lock acquisition order and prevent deadlocks.
|
||||
* Currently, during extension updates, we take strong locks on the Citus
|
||||
* catalog tables in a specific order: first on pg_dist_authinfo, then on
|
||||
* pg_dist_transaction. It's critical that any operation locking these two
|
||||
* tables adheres to this order, or a deadlock could occur.
|
||||
*
|
||||
* Note that RecoverWorkerTransactions() retains its lock until the end
|
||||
* of the transaction, while GetNodeConnection() releases its lock after
|
||||
* the catalog lookup. So when there are multiple workers in the active primary
|
||||
* node list, the lock acquisition order may reverse in subsequent iterations
|
||||
* of the loop calling RecoverWorkerTransactions(), increasing the risk
|
||||
* of deadlock.
|
||||
*
|
||||
* By establishing all worker connections upfront, we ensure that
|
||||
* RecoverWorkerTransactions() deals with a single distributed catalog table,
|
||||
* thereby preventing deadlocks regardless of the lock acquisition sequence
|
||||
* used in the upgrade extension script.
|
||||
*/
|
||||
|
||||
foreach_declared_ptr(workerNode, workerList)
|
||||
{
|
||||
recoveredTransactionCount += RecoverWorkerTransactions(workerNode);
|
||||
int connectionFlags = 0;
|
||||
char *nodeName = workerNode->workerName;
|
||||
int nodePort = workerNode->workerPort;
|
||||
|
||||
connection = GetNodeConnection(connectionFlags, nodeName, nodePort);
|
||||
Assert(connection != NULL);
|
||||
|
||||
/*
|
||||
* We don't verify connection validity here.
|
||||
* Instead, RecoverWorkerTransactions() performs the necessary
|
||||
* sanity checks on the connection state.
|
||||
*/
|
||||
workerConnections = lappend(workerConnections, connection);
|
||||
}
|
||||
forboth_ptr(workerNode, workerList, connection, workerConnections)
|
||||
{
|
||||
recoveredTransactionCount += RecoverWorkerTransactions(workerNode, connection);
|
||||
}
|
||||
|
||||
return recoveredTransactionCount;
|
||||
|
@ -138,7 +180,7 @@ RecoverTwoPhaseCommits(void)
|
|||
* started by this node on the specified worker.
|
||||
*/
|
||||
static int
|
||||
RecoverWorkerTransactions(WorkerNode *workerNode)
|
||||
RecoverWorkerTransactions(WorkerNode *workerNode, MultiConnection *connection)
|
||||
{
|
||||
int recoveredTransactionCount = 0;
|
||||
|
||||
|
@ -156,8 +198,7 @@ RecoverWorkerTransactions(WorkerNode *workerNode)
|
|||
|
||||
bool recoveryFailed = false;
|
||||
|
||||
int connectionFlags = 0;
|
||||
MultiConnection *connection = GetNodeConnection(connectionFlags, nodeName, nodePort);
|
||||
Assert(connection != NULL);
|
||||
if (connection->pgConn == NULL || PQstatus(connection->pgConn) != CONNECTION_OK)
|
||||
{
|
||||
ereport(WARNING, (errmsg("transaction recovery cannot connect to %s:%d",
|
||||
|
|
|
@ -1393,87 +1393,6 @@ CalculateBackoffDelay(int retryCount)
|
|||
}
|
||||
|
||||
|
||||
#if PG_VERSION_NUM < PG_VERSION_15
|
||||
static const char *
|
||||
error_severity(int elevel)
|
||||
{
|
||||
const char *prefix;
|
||||
|
||||
switch (elevel)
|
||||
{
|
||||
case DEBUG1:
|
||||
case DEBUG2:
|
||||
case DEBUG3:
|
||||
case DEBUG4:
|
||||
case DEBUG5:
|
||||
{
|
||||
prefix = gettext_noop("DEBUG");
|
||||
break;
|
||||
}
|
||||
|
||||
case LOG:
|
||||
case LOG_SERVER_ONLY:
|
||||
{
|
||||
prefix = gettext_noop("LOG");
|
||||
break;
|
||||
}
|
||||
|
||||
case INFO:
|
||||
{
|
||||
prefix = gettext_noop("INFO");
|
||||
break;
|
||||
}
|
||||
|
||||
case NOTICE:
|
||||
{
|
||||
prefix = gettext_noop("NOTICE");
|
||||
break;
|
||||
}
|
||||
|
||||
case WARNING:
|
||||
{
|
||||
prefix = gettext_noop("WARNING");
|
||||
break;
|
||||
}
|
||||
|
||||
case WARNING_CLIENT_ONLY:
|
||||
{
|
||||
prefix = gettext_noop("WARNING");
|
||||
break;
|
||||
}
|
||||
|
||||
case ERROR:
|
||||
{
|
||||
prefix = gettext_noop("ERROR");
|
||||
break;
|
||||
}
|
||||
|
||||
case FATAL:
|
||||
{
|
||||
prefix = gettext_noop("FATAL");
|
||||
break;
|
||||
}
|
||||
|
||||
case PANIC:
|
||||
{
|
||||
prefix = gettext_noop("PANIC");
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
{
|
||||
prefix = "???";
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return prefix;
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* bgw_generate_returned_message -
|
||||
* generates the message to be inserted into the job_run_details table
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include "unistd.h"
|
||||
|
||||
#include "access/hash.h"
|
||||
#include "common/pg_prng.h"
|
||||
#include "executor/execdesc.h"
|
||||
#include "storage/ipc.h"
|
||||
#include "storage/lwlock.h"
|
||||
|
@ -38,10 +39,6 @@
|
|||
#include "distributed/tuplestore.h"
|
||||
#include "distributed/utils/citus_stat_tenants.h"
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
#include "common/pg_prng.h"
|
||||
#endif
|
||||
|
||||
static void AttributeMetricsIfApplicable(void);
|
||||
|
||||
ExecutorEnd_hook_type prev_ExecutorEnd = NULL;
|
||||
|
@ -298,13 +295,7 @@ AttributeTask(char *tenantId, int colocationId, CmdType commandType)
|
|||
/* If the tenant is not found in the hash table, we will track the query with a probability of StatTenantsSampleRateForNewTenants. */
|
||||
if (!found)
|
||||
{
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
double randomValue = pg_prng_double(&pg_global_prng_state);
|
||||
#else
|
||||
|
||||
/* Generate a random double between 0 and 1 */
|
||||
double randomValue = (double) random() / MAX_RANDOM_VALUE;
|
||||
#endif
|
||||
bool shouldTrackQuery = randomValue <= StatTenantsSampleRateForNewTenants;
|
||||
if (!shouldTrackQuery)
|
||||
{
|
||||
|
|
|
@ -362,10 +362,8 @@ ErrorIfShardPlacementsNotColocated(Oid leftRelationId, Oid rightRelationId)
|
|||
leftRelationName, rightRelationName)));
|
||||
}
|
||||
|
||||
List *leftPlacementList = ShardPlacementListSortedByWorker(
|
||||
leftShardId);
|
||||
List *rightPlacementList = ShardPlacementListSortedByWorker(
|
||||
rightShardId);
|
||||
List *leftPlacementList = ShardPlacementList(leftShardId);
|
||||
List *rightPlacementList = ShardPlacementList(rightShardId);
|
||||
|
||||
if (list_length(leftPlacementList) != list_length(rightPlacementList))
|
||||
{
|
||||
|
|
|
@ -470,12 +470,11 @@ SingleReplicatedTable(Oid relationId)
|
|||
return false;
|
||||
}
|
||||
|
||||
List *shardIntervalList = LoadShardList(relationId);
|
||||
uint64 *shardIdPointer = NULL;
|
||||
foreach_declared_ptr(shardIdPointer, shardIntervalList)
|
||||
foreach_declared_ptr(shardIdPointer, shardList)
|
||||
{
|
||||
uint64 shardId = *shardIdPointer;
|
||||
shardPlacementList = ShardPlacementListSortedByWorker(shardId);
|
||||
shardPlacementList = ShardPlacementList(shardId);
|
||||
|
||||
if (list_length(shardPlacementList) != 1)
|
||||
{
|
||||
|
|
|
@ -170,14 +170,10 @@ WorkerDropDistributedTable(Oid relationId)
|
|||
*/
|
||||
if (!IsAnyObjectAddressOwnedByExtension(list_make1(distributedTableObject), NULL))
|
||||
{
|
||||
char *relName = get_rel_name(relationId);
|
||||
Oid schemaId = get_rel_namespace(relationId);
|
||||
char *schemaName = get_namespace_name(schemaId);
|
||||
|
||||
StringInfo dropCommand = makeStringInfo();
|
||||
appendStringInfo(dropCommand, "DROP%sTABLE %s CASCADE",
|
||||
IsForeignTable(relationId) ? " FOREIGN " : " ",
|
||||
quote_qualified_identifier(schemaName, relName));
|
||||
generate_qualified_relation_name(relationId));
|
||||
|
||||
Node *dropCommandNode = ParseTreeNode(dropCommand->data);
|
||||
|
||||
|
|
|
@ -441,7 +441,7 @@ FilterShardsFromPgclass(Node *node, void *context)
|
|||
/*
|
||||
* We process the whole rtable rather than visiting individual RangeTblEntry's
|
||||
* in the walker, since we need to know the varno to generate the right
|
||||
* fiter.
|
||||
* filter.
|
||||
*/
|
||||
int varno = 0;
|
||||
RangeTblEntry *rangeTableEntry = NULL;
|
||||
|
@ -471,20 +471,39 @@ FilterShardsFromPgclass(Node *node, void *context)
|
|||
/* make sure the expression is in the right memory context */
|
||||
MemoryContext originalContext = MemoryContextSwitchTo(queryContext);
|
||||
|
||||
|
||||
/* add relation_is_a_known_shard(oid) IS NOT TRUE to the quals of the query */
|
||||
Node *newQual = CreateRelationIsAKnownShardFilter(varno);
|
||||
Node *oldQuals = query->jointree->quals;
|
||||
if (oldQuals)
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_17
|
||||
|
||||
/*
|
||||
* In PG17, MERGE queries introduce a new struct `mergeJoinCondition`.
|
||||
* We need to handle this condition safely.
|
||||
*/
|
||||
if (query->mergeJoinCondition != NULL)
|
||||
{
|
||||
query->jointree->quals = (Node *) makeBoolExpr(
|
||||
/* Add the filter to mergeJoinCondition */
|
||||
query->mergeJoinCondition = (Node *) makeBoolExpr(
|
||||
AND_EXPR,
|
||||
list_make2(oldQuals, newQual),
|
||||
list_make2(query->mergeJoinCondition, newQual),
|
||||
-1);
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
query->jointree->quals = newQual;
|
||||
/* Handle older versions or queries without mergeJoinCondition */
|
||||
Node *oldQuals = query->jointree->quals;
|
||||
if (oldQuals)
|
||||
{
|
||||
query->jointree->quals = (Node *) makeBoolExpr(
|
||||
AND_EXPR,
|
||||
list_make2(oldQuals, newQual),
|
||||
-1);
|
||||
}
|
||||
else
|
||||
{
|
||||
query->jointree->quals = newQual;
|
||||
}
|
||||
}
|
||||
|
||||
MemoryContextSwitchTo(originalContext);
|
||||
|
|
|
@ -14,14 +14,6 @@
|
|||
|
||||
#include "pg_version_constants.h"
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
#define ExecARDeleteTriggers_compat(a, b, c, d, e, f) \
|
||||
ExecARDeleteTriggers(a, b, c, d, e, f)
|
||||
#else
|
||||
#define ExecARDeleteTriggers_compat(a, b, c, d, e, f) \
|
||||
ExecARDeleteTriggers(a, b, c, d, e)
|
||||
#endif
|
||||
|
||||
#define ACLCHECK_OBJECT_TABLE OBJECT_TABLE
|
||||
|
||||
#define ExplainPropertyLong(qlabel, value, es) \
|
||||
|
|
|
@ -61,6 +61,7 @@ extern void AssignGlobalPID(const char *applicationName);
|
|||
extern uint64 GetGlobalPID(void);
|
||||
extern void SetBackendDataDatabaseId(void);
|
||||
extern void SetBackendDataGlobalPID(uint64 gpid);
|
||||
extern void SetBackendDataDistributedCommandOriginator(bool distributedCommandOriginator);
|
||||
extern uint64 ExtractGlobalPID(const char *applicationName);
|
||||
extern int ExtractNodeIdFromGlobalPID(uint64 globalPID, bool missingOk);
|
||||
extern int ExtractProcessIdFromGlobalPID(uint64 globalPID);
|
||||
|
|
|
@ -510,6 +510,11 @@ extern List * AlterSchemaOwnerStmtObjectAddress(Node *node, bool missing_ok,
|
|||
extern List * AlterSchemaRenameStmtObjectAddress(Node *node, bool missing_ok, bool
|
||||
isPostprocess);
|
||||
|
||||
/* seclabel.c - forward declarations*/
|
||||
extern List * PostprocessSecLabelStmt(Node *node, const char *queryString);
|
||||
extern List * SecLabelStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess);
|
||||
extern void citus_test_object_relabel(const ObjectAddress *object, const char *seclabel);
|
||||
|
||||
/* sequence.c - forward declarations */
|
||||
extern List * PreprocessAlterSequenceStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext);
|
||||
|
@ -520,13 +525,11 @@ extern List * PostprocessAlterSequenceSchemaStmt(Node *node, const char *querySt
|
|||
extern List * PreprocessAlterSequenceOwnerStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext);
|
||||
extern List * PostprocessAlterSequenceOwnerStmt(Node *node, const char *queryString);
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
extern List * PreprocessAlterSequencePersistenceStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext
|
||||
processUtilityContext);
|
||||
extern List * PreprocessSequenceAlterTableStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext);
|
||||
#endif
|
||||
extern List * PreprocessDropSequenceStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext);
|
||||
extern List * SequenceDropStmtObjectAddress(Node *stmt, bool missing_ok, bool
|
||||
|
@ -542,10 +545,8 @@ extern List * AlterSequenceSchemaStmtObjectAddress(Node *node, bool missing_ok,
|
|||
isPostprocess);
|
||||
extern List * AlterSequenceOwnerStmtObjectAddress(Node *node, bool missing_ok, bool
|
||||
isPostprocess);
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
extern List * AlterSequencePersistenceStmtObjectAddress(Node *node, bool missing_ok, bool
|
||||
isPostprocess);
|
||||
#endif
|
||||
extern List * RenameSequenceStmtObjectAddress(Node *node, bool missing_ok, bool
|
||||
isPostprocess);
|
||||
extern void ErrorIfUnsupportedSeqStmt(CreateSeqStmt *createSeqStmt);
|
||||
|
@ -749,8 +750,6 @@ extern List * CreateTriggerStmtObjectAddress(Node *node, bool missingOk, bool
|
|||
isPostprocess);
|
||||
extern void CreateTriggerEventExtendNames(CreateTrigStmt *createTriggerStmt,
|
||||
char *schemaName, uint64 shardId);
|
||||
extern List * PreprocessAlterTriggerRenameStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext);
|
||||
extern List * PostprocessAlterTriggerRenameStmt(Node *node, const char *queryString);
|
||||
extern void AlterTriggerRenameEventExtendNames(RenameStmt *renameTriggerStmt,
|
||||
char *schemaName, uint64 shardId);
|
||||
|
|
|
@ -259,14 +259,15 @@ extern void QualifyRenameTextSearchDictionaryStmt(Node *node);
|
|||
extern void QualifyTextSearchConfigurationCommentStmt(Node *node);
|
||||
extern void QualifyTextSearchDictionaryCommentStmt(Node *node);
|
||||
|
||||
/* forward declarations for deparse_seclabel_stmts.c */
|
||||
extern char * DeparseSecLabelStmt(Node *node);
|
||||
|
||||
/* forward declarations for deparse_sequence_stmts.c */
|
||||
extern char * DeparseDropSequenceStmt(Node *node);
|
||||
extern char * DeparseRenameSequenceStmt(Node *node);
|
||||
extern char * DeparseAlterSequenceSchemaStmt(Node *node);
|
||||
extern char * DeparseAlterSequenceOwnerStmt(Node *node);
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
extern char * DeparseAlterSequencePersistenceStmt(Node *node);
|
||||
#endif
|
||||
extern char * DeparseGrantOnSequenceStmt(Node *node);
|
||||
|
||||
/* forward declarations for qualify_sequence_stmt.c */
|
||||
|
@ -274,9 +275,7 @@ extern void QualifyRenameSequenceStmt(Node *node);
|
|||
extern void QualifyDropSequenceStmt(Node *node);
|
||||
extern void QualifyAlterSequenceSchemaStmt(Node *node);
|
||||
extern void QualifyAlterSequenceOwnerStmt(Node *node);
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
extern void QualifyAlterSequencePersistenceStmt(Node *node);
|
||||
#endif
|
||||
extern void QualifyGrantOnSequenceStmt(Node *node);
|
||||
|
||||
#endif /* CITUS_DEPARSER_H */
|
||||
|
|
|
@ -28,11 +28,6 @@
|
|||
|
||||
#define CURSOR_OPT_FORCE_DISTRIBUTED 0x080000
|
||||
|
||||
/* Hack to compile Citus on pre-MERGE Postgres versions */
|
||||
#if PG_VERSION_NUM < PG_VERSION_15
|
||||
#define CMD_MERGE CMD_UNKNOWN
|
||||
#endif
|
||||
|
||||
|
||||
/* level of planner calls */
|
||||
extern int PlannerLevel;
|
||||
|
|
|
@ -128,9 +128,6 @@ extern List * IdentitySequenceDependencyCommandList(Oid targetRelationId);
|
|||
|
||||
extern List * DDLCommandsForSequence(Oid sequenceOid, char *ownerName);
|
||||
extern List * GetSequencesFromAttrDef(Oid attrdefOid);
|
||||
#if PG_VERSION_NUM < PG_VERSION_15
|
||||
ObjectAddress GetAttrDefaultColumnAddress(Oid attrdefoid);
|
||||
#endif
|
||||
extern List * GetAttrDefsFromSequence(Oid seqOid);
|
||||
extern void GetDependentSequencesWithRelation(Oid relationId, List **seqInfoList,
|
||||
AttrNumber attnum, char depType);
|
||||
|
|
|
@ -118,7 +118,7 @@ extern bool HasDangerousJoinUsing(List *rtableList, Node *jtnode);
|
|||
extern Job * RouterJob(Query *originalQuery,
|
||||
PlannerRestrictionContext *plannerRestrictionContext,
|
||||
DeferredErrorMessage **planningError);
|
||||
extern bool ContainsOnlyLocalTables(RTEListProperties *rteProperties);
|
||||
extern bool ContainsOnlyLocalOrReferenceTables(RTEListProperties *rteProperties);
|
||||
extern RangeTblEntry * ExtractSourceResultRangeTableEntry(Query *query);
|
||||
|
||||
#endif /* MULTI_ROUTER_PLANNER_H */
|
||||
|
|
|
@ -39,5 +39,7 @@ extern Query * WrapRteRelationIntoSubquery(RangeTblEntry *rteRelation,
|
|||
List *requiredAttributes,
|
||||
RTEPermissionInfo *perminfo);
|
||||
extern List * CreateAllTargetListForRelation(Oid relationId, List *requiredAttributes);
|
||||
extern List * CreateFilteredTargetListForRelation(Oid relationId,
|
||||
List *requiredAttributes);
|
||||
|
||||
#endif /* QUERY_COLOCATION_CHECKER_H */
|
||||
|
|
|
@ -46,6 +46,7 @@ extern DeferredErrorMessage * DeferErrorIfCannotPushdownSubquery(Query *subquery
|
|||
bool
|
||||
outerMostQueryHasLimit);
|
||||
extern DeferredErrorMessage * DeferErrorIfUnsupportedUnionQuery(Query *queryTree);
|
||||
extern bool IsJsonTableRTE(RangeTblEntry *rte);
|
||||
|
||||
|
||||
#endif /* QUERY_PUSHDOWN_PLANNING_H */
|
||||
|
|
|
@ -40,6 +40,7 @@ extern Query * BuildReadIntermediateResultsArrayQuery(List *targetEntryList,
|
|||
List *columnAliasList,
|
||||
List *resultIdList,
|
||||
bool useBinaryCopyFormat);
|
||||
extern Query * BuildEmptyResultQuery(List *targetEntryList, char *resultId);
|
||||
extern bool GeneratingSubplans(void);
|
||||
extern bool ContainsLocalTableDistributedTableJoin(List *rangeTableList);
|
||||
extern void ReplaceRTERelationWithRteSubquery(RangeTblEntry *rangeTableEntry,
|
||||
|
|
|
@ -28,5 +28,10 @@ extern List * GenerateTaskListWithRedistributedResults(
|
|||
bool useBinaryFormat);
|
||||
extern bool IsSupportedRedistributionTarget(Oid targetRelationId);
|
||||
extern bool IsRedistributablePlan(Plan *selectPlan);
|
||||
extern bool HasMergeNotMatchedBySource(Query *query);
|
||||
extern void AdjustTaskQueryForEmptySource(Oid targetRelationId,
|
||||
Query *mergeQuery,
|
||||
List *emptySourceTaskList,
|
||||
char *resultIdPrefix);
|
||||
|
||||
#endif /* REPARTITION_EXECUTOR_H */
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue