mirror of https://github.com/citusdata/citus.git
Compare commits
30 Commits
Author | SHA1 | Date |
---|---|---|
|
17f09d4ad7 | |
|
98bbfc27c8 | |
|
37a1c3d9db | |
|
30307e6e79 | |
|
6ba68a978f | |
|
78b272d971 | |
|
d8a0d1c071 | |
|
bd17e5fd77 | |
|
e4ba71f5de | |
|
f551eb50c2 | |
|
21be1af515 | |
|
c18586c009 | |
|
d13e9f50b2 | |
|
2de879dc59 | |
|
78f0b0c6dd | |
|
cb99468716 | |
|
46337a53ae | |
|
15f5796eee | |
|
fcd3b6c12f | |
|
6d833a90e5 | |
|
12c27ace2f | |
|
262c335860 | |
|
7b98fbb05e | |
|
58155c5779 | |
|
17149b92b2 | |
|
1a9066c34a | |
|
e14f4c3dee | |
|
5525676aad | |
|
234df62106 | |
|
5dd08835df |
1035
.circleci/config.yml
1035
.circleci/config.yml
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,23 @@
|
||||||
|
name: 'Parallelization matrix'
|
||||||
|
inputs:
|
||||||
|
count:
|
||||||
|
required: false
|
||||||
|
default: 32
|
||||||
|
outputs:
|
||||||
|
json:
|
||||||
|
value: ${{ steps.generate_matrix.outputs.json }}
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
steps:
|
||||||
|
- name: Generate parallelization matrix
|
||||||
|
id: generate_matrix
|
||||||
|
shell: bash
|
||||||
|
run: |-
|
||||||
|
json_array="{\"include\": ["
|
||||||
|
for ((i = 1; i <= ${{ inputs.count }}; i++)); do
|
||||||
|
json_array+="{\"id\":\"$i\"},"
|
||||||
|
done
|
||||||
|
json_array=${json_array%,}
|
||||||
|
json_array+=" ]}"
|
||||||
|
echo "json=$json_array" >> "$GITHUB_OUTPUT"
|
||||||
|
echo "json=$json_array"
|
|
@ -0,0 +1,38 @@
|
||||||
|
name: save_logs_and_results
|
||||||
|
inputs:
|
||||||
|
folder:
|
||||||
|
required: false
|
||||||
|
default: "log"
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- uses: actions/upload-artifact@v3.1.1
|
||||||
|
name: Upload logs
|
||||||
|
with:
|
||||||
|
name: ${{ inputs.folder }}
|
||||||
|
if-no-files-found: ignore
|
||||||
|
path: |
|
||||||
|
src/test/**/proxy.output
|
||||||
|
src/test/**/results/
|
||||||
|
src/test/**/tmp_check/master/log
|
||||||
|
src/test/**/tmp_check/worker.57638/log
|
||||||
|
src/test/**/tmp_check/worker.57637/log
|
||||||
|
src/test/**/*.diffs
|
||||||
|
src/test/**/out/ddls.sql
|
||||||
|
src/test/**/out/queries.sql
|
||||||
|
src/test/**/logfile_*
|
||||||
|
/tmp/pg_upgrade_newData_logs
|
||||||
|
- name: Publish regression.diffs
|
||||||
|
run: |-
|
||||||
|
diffs="$(find src/test/regress -name "*.diffs" -exec cat {} \;)"
|
||||||
|
if ! [ -z "$diffs" ]; then
|
||||||
|
echo '```diff' >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo -E "$diffs" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo -E $diffs
|
||||||
|
fi
|
||||||
|
shell: bash
|
||||||
|
- name: Print stack traces
|
||||||
|
run: "./ci/print_stack_trace.sh"
|
||||||
|
if: failure()
|
||||||
|
shell: bash
|
|
@ -0,0 +1,35 @@
|
||||||
|
name: setup_extension
|
||||||
|
inputs:
|
||||||
|
pg_major:
|
||||||
|
required: false
|
||||||
|
skip_installation:
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
type: boolean
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- name: Expose $PG_MAJOR to Github Env
|
||||||
|
run: |-
|
||||||
|
if [ -z "${{ inputs.pg_major }}" ]; then
|
||||||
|
echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV
|
||||||
|
else
|
||||||
|
echo "PG_MAJOR=${{ inputs.pg_major }}" >> $GITHUB_ENV
|
||||||
|
fi
|
||||||
|
shell: bash
|
||||||
|
- uses: actions/download-artifact@v3.0.1
|
||||||
|
with:
|
||||||
|
name: build-${{ env.PG_MAJOR }}
|
||||||
|
- name: Install Extension
|
||||||
|
if: ${{ inputs.skip_installation == 'false' }}
|
||||||
|
run: tar xfv "install-$PG_MAJOR.tar" --directory /
|
||||||
|
shell: bash
|
||||||
|
- name: Configure
|
||||||
|
run: |-
|
||||||
|
chown -R circleci .
|
||||||
|
git config --global --add safe.directory ${GITHUB_WORKSPACE}
|
||||||
|
gosu circleci ./configure --without-pg-version-check
|
||||||
|
shell: bash
|
||||||
|
- name: Enable core dumps
|
||||||
|
run: ulimit -c unlimited
|
||||||
|
shell: bash
|
|
@ -0,0 +1,27 @@
|
||||||
|
name: coverage
|
||||||
|
inputs:
|
||||||
|
flags:
|
||||||
|
required: false
|
||||||
|
codecov_token:
|
||||||
|
required: true
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- uses: codecov/codecov-action@v3
|
||||||
|
with:
|
||||||
|
flags: ${{ inputs.flags }}
|
||||||
|
token: ${{ inputs.codecov_token }}
|
||||||
|
verbose: true
|
||||||
|
gcov: true
|
||||||
|
- name: Create codeclimate coverage
|
||||||
|
run: |-
|
||||||
|
lcov --directory . --capture --output-file lcov.info
|
||||||
|
lcov --remove lcov.info -o lcov.info '/usr/*'
|
||||||
|
sed "s=^SF:$PWD/=SF:=g" -i lcov.info # relative pats are required by codeclimate
|
||||||
|
mkdir -p /tmp/codeclimate
|
||||||
|
cc-test-reporter format-coverage -t lcov -o /tmp/codeclimate/${{ inputs.flags }}.json lcov.info
|
||||||
|
shell: bash
|
||||||
|
- uses: actions/upload-artifact@v3.1.1
|
||||||
|
with:
|
||||||
|
path: "/tmp/codeclimate/*.json"
|
||||||
|
name: codeclimate
|
|
@ -0,0 +1,490 @@
|
||||||
|
name: Build & Test
|
||||||
|
run-name: Build & Test - ${{ github.event.pull_request.title || github.ref_name }}
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
skip_test_flakyness:
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
type: boolean
|
||||||
|
pull_request:
|
||||||
|
types: [opened, reopened,synchronize]
|
||||||
|
jobs:
|
||||||
|
# Since GHA does not interpolate env varibles in matrix context, we need to
|
||||||
|
# define them in a separate job and use them in other jobs.
|
||||||
|
params:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Initialize parameters
|
||||||
|
outputs:
|
||||||
|
build_image_name: "citus/extbuilder"
|
||||||
|
test_image_name: "citus/exttester"
|
||||||
|
citusupgrade_image_name: "citus/citusupgradetester"
|
||||||
|
fail_test_image_name: "citus/failtester"
|
||||||
|
pgupgrade_image_name: "citus/pgupgradetester"
|
||||||
|
style_checker_image_name: "citus/stylechecker"
|
||||||
|
style_checker_tools_version: "0.8.18"
|
||||||
|
image_suffix: "-v3417e8d"
|
||||||
|
pg13_version: '{ "major": "13", "full": "13.10" }'
|
||||||
|
pg14_version: '{ "major": "14", "full": "14.7" }'
|
||||||
|
pg15_version: '{ "major": "15", "full": "15.2" }'
|
||||||
|
upgrade_pg_versions: "13.10-14.7-15.2"
|
||||||
|
steps:
|
||||||
|
# Since GHA jobs needs at least one step we use a noop step here.
|
||||||
|
- name: Set up parameters
|
||||||
|
run: echo 'noop'
|
||||||
|
check-sql-snapshots:
|
||||||
|
needs: params
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
container:
|
||||||
|
image: ${{ needs.params.outputs.build_image_name }}:latest
|
||||||
|
options: --user root
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3.5.0
|
||||||
|
- name: Check Snapshots
|
||||||
|
run: |
|
||||||
|
git config --global --add safe.directory ${GITHUB_WORKSPACE}
|
||||||
|
ci/check_sql_snapshots.sh
|
||||||
|
check-style:
|
||||||
|
needs: params
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
container:
|
||||||
|
image: ${{ needs.params.outputs.style_checker_image_name }}:${{ needs.params.outputs.style_checker_tools_version }}${{ needs.params.outputs.image_suffix }}
|
||||||
|
steps:
|
||||||
|
- name: Check Snapshots
|
||||||
|
run: |
|
||||||
|
git config --global --add safe.directory ${GITHUB_WORKSPACE}
|
||||||
|
- uses: actions/checkout@v3.5.0
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: Check C Style
|
||||||
|
run: citus_indent --check
|
||||||
|
- name: Check Python style
|
||||||
|
run: black --check .
|
||||||
|
- name: Check Python import order
|
||||||
|
run: isort --check .
|
||||||
|
- name: Check Python lints
|
||||||
|
run: flake8 .
|
||||||
|
- name: Fix whitespace
|
||||||
|
run: ci/editorconfig.sh && git diff --exit-code
|
||||||
|
- name: Remove useless declarations
|
||||||
|
run: ci/remove_useless_declarations.sh && git diff --cached --exit-code
|
||||||
|
- name: Normalize test output
|
||||||
|
run: ci/normalize_expected.sh && git diff --exit-code
|
||||||
|
- name: Check for C-style comments in migration files
|
||||||
|
run: ci/disallow_c_comments_in_migrations.sh && git diff --exit-code
|
||||||
|
- name: 'Check for comment--cached ns that start with # character in spec files'
|
||||||
|
run: ci/disallow_hash_comments_in_spec_files.sh && git diff --exit-code
|
||||||
|
- name: Check for gitignore entries .for source files
|
||||||
|
run: ci/fix_gitignore.sh && git diff --exit-code
|
||||||
|
- name: Check for lengths of changelog entries
|
||||||
|
run: ci/disallow_long_changelog_entries.sh
|
||||||
|
- name: Check for banned C API usage
|
||||||
|
run: ci/banned.h.sh
|
||||||
|
- name: Check for tests missing in schedules
|
||||||
|
run: ci/check_all_tests_are_run.sh
|
||||||
|
- name: Check if all CI scripts are actually run
|
||||||
|
run: ci/check_all_ci_scripts_are_run.sh
|
||||||
|
- name: Check if all GUCs are sorted alphabetically
|
||||||
|
run: ci/check_gucs_are_alphabetically_sorted.sh
|
||||||
|
- name: Check for missing downgrade scripts
|
||||||
|
run: ci/check_migration_files.sh
|
||||||
|
build:
|
||||||
|
needs: params
|
||||||
|
name: Build for PG${{ fromJson(matrix.pg_version).major }}
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
image_name:
|
||||||
|
- ${{ needs.params.outputs.build_image_name }}
|
||||||
|
image_suffix:
|
||||||
|
- ${{ needs.params.outputs.image_suffix}}
|
||||||
|
pg_version:
|
||||||
|
- ${{ needs.params.outputs.pg13_version }}
|
||||||
|
- ${{ needs.params.outputs.pg14_version }}
|
||||||
|
- ${{ needs.params.outputs.pg15_version }}
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
container:
|
||||||
|
image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ matrix.image_suffix }}"
|
||||||
|
options: --user root
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3.5.0
|
||||||
|
- name: Expose $PG_MAJOR to Github Env
|
||||||
|
run: echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV
|
||||||
|
shell: bash
|
||||||
|
- name: Build
|
||||||
|
run: "./ci/build-citus.sh"
|
||||||
|
shell: bash
|
||||||
|
- uses: actions/upload-artifact@v3.1.1
|
||||||
|
with:
|
||||||
|
name: build-${{ env.PG_MAJOR }}
|
||||||
|
path: |-
|
||||||
|
./build-${{ env.PG_MAJOR }}/*
|
||||||
|
./install-${{ env.PG_MAJOR }}.tar
|
||||||
|
test-citus:
|
||||||
|
name: PG${{ fromJson(matrix.pg_version).major }} - ${{ matrix.make }}
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
suite:
|
||||||
|
- regress
|
||||||
|
image_name:
|
||||||
|
- ${{ needs.params.outputs.test_image_name }}
|
||||||
|
pg_version:
|
||||||
|
- ${{ needs.params.outputs.pg13_version }}
|
||||||
|
- ${{ needs.params.outputs.pg13_version }}
|
||||||
|
- ${{ needs.params.outputs.pg14_version }}
|
||||||
|
make:
|
||||||
|
- check-split
|
||||||
|
- check-multi
|
||||||
|
- check-multi-1
|
||||||
|
- check-multi-mx
|
||||||
|
- check-vanilla
|
||||||
|
- check-isolation
|
||||||
|
- check-operations
|
||||||
|
- check-follower-cluster
|
||||||
|
- check-columnar
|
||||||
|
- check-columnar-isolation
|
||||||
|
- check-enterprise
|
||||||
|
- check-enterprise-isolation
|
||||||
|
- check-enterprise-isolation-logicalrep-1
|
||||||
|
- check-enterprise-isolation-logicalrep-2
|
||||||
|
- check-enterprise-isolation-logicalrep-3
|
||||||
|
include:
|
||||||
|
- make: check-failure
|
||||||
|
pg_version: ${{ needs.params.outputs.pg13_version }}
|
||||||
|
suite: regress
|
||||||
|
image_name: ${{ needs.params.outputs.fail_test_image_name }}
|
||||||
|
- make: check-failure
|
||||||
|
pg_version: ${{ needs.params.outputs.pg14_version }}
|
||||||
|
suite: regress
|
||||||
|
image_name: ${{ needs.params.outputs.fail_test_image_name }}
|
||||||
|
- make: check-failure
|
||||||
|
pg_version: ${{ needs.params.outputs.pg15_version }}
|
||||||
|
suite: regress
|
||||||
|
image_name: ${{ needs.params.outputs.fail_test_image_name }}
|
||||||
|
- make: check-enterprise-failure
|
||||||
|
pg_version: ${{ needs.params.outputs.pg13_version }}
|
||||||
|
suite: regress
|
||||||
|
image_name: ${{ needs.params.outputs.fail_test_image_name }}
|
||||||
|
- make: check-enterprise-failure
|
||||||
|
pg_version: ${{ needs.params.outputs.pg14_version }}
|
||||||
|
suite: regress
|
||||||
|
image_name: ${{ needs.params.outputs.fail_test_image_name }}
|
||||||
|
- make: check-enterprise-failure
|
||||||
|
pg_version: ${{ needs.params.outputs.pg15_version }}
|
||||||
|
suite: regress
|
||||||
|
image_name: ${{ needs.params.outputs.fail_test_image_name }}
|
||||||
|
- make: check-pytest
|
||||||
|
pg_version: ${{ needs.params.outputs.pg13_version }}
|
||||||
|
suite: regress
|
||||||
|
image_name: ${{ needs.params.outputs.fail_test_image_name }}
|
||||||
|
- make: check-pytest
|
||||||
|
pg_version: ${{ needs.params.outputs.pg14_version }}
|
||||||
|
suite: regress
|
||||||
|
image_name: ${{ needs.params.outputs.fail_test_image_name }}
|
||||||
|
- make: check-pytest
|
||||||
|
pg_version: ${{ needs.params.outputs.pg15_version }}
|
||||||
|
suite: regress
|
||||||
|
image_name: ${{ needs.params.outputs.fail_test_image_name }}
|
||||||
|
- make: installcheck
|
||||||
|
suite: cdc
|
||||||
|
image_name: ${{ needs.params.outputs.test_image_name }}
|
||||||
|
pg_version: ${{ needs.params.outputs.pg15_version }}
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
container:
|
||||||
|
image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ needs.params.outputs.image_suffix }}"
|
||||||
|
options: --user root --dns=8.8.8.8
|
||||||
|
# Due to Github creates a default network for each job, we need to use
|
||||||
|
# --dns= to have similar DNS settings as our other CI systems or local
|
||||||
|
# machines. Otherwise, we may see different results.
|
||||||
|
needs:
|
||||||
|
- params
|
||||||
|
- build
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3.5.0
|
||||||
|
- uses: "./.github/actions/setup_extension"
|
||||||
|
- name: Run Test
|
||||||
|
run: gosu circleci make -C src/test/${{ matrix.suite }} ${{ matrix.make }}
|
||||||
|
timeout-minutes: 20
|
||||||
|
- uses: "./.github/actions/save_logs_and_results"
|
||||||
|
if: always()
|
||||||
|
with:
|
||||||
|
folder: ${{ fromJson(matrix.pg_version).major }}_${{ matrix.make }}
|
||||||
|
- uses: "./.github/actions/upload_coverage"
|
||||||
|
if: always()
|
||||||
|
with:
|
||||||
|
flags: ${{ env.PG_MAJOR }}_${{ matrix.suite }}_${{ matrix.make }}
|
||||||
|
codecov_token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
|
test-arbitrary-configs:
|
||||||
|
name: PG${{ fromJson(matrix.pg_version).major }} - check-arbitrary-configs-${{ matrix.parallel }}
|
||||||
|
runs-on: ["self-hosted", "1ES.Pool=1es-gha-citusdata-pool"]
|
||||||
|
container:
|
||||||
|
image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ needs.params.outputs.image_suffix }}"
|
||||||
|
options: --user root
|
||||||
|
needs:
|
||||||
|
- params
|
||||||
|
- build
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
image_name:
|
||||||
|
- ${{ needs.params.outputs.fail_test_image_name }}
|
||||||
|
pg_version:
|
||||||
|
- ${{ needs.params.outputs.pg13_version }}
|
||||||
|
- ${{ needs.params.outputs.pg14_version }}
|
||||||
|
- ${{ needs.params.outputs.pg15_version }}
|
||||||
|
parallel: [0,1,2,3,4,5] # workaround for running 6 parallel jobs
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3.5.0
|
||||||
|
- uses: "./.github/actions/setup_extension"
|
||||||
|
- name: Test arbitrary configs
|
||||||
|
run: |-
|
||||||
|
# we use parallel jobs to split the tests into 6 parts and run them in parallel
|
||||||
|
# the script below extracts the tests for the current job
|
||||||
|
N=6 # Total number of jobs (see matrix.parallel)
|
||||||
|
X=${{ matrix.parallel }} # Current job number
|
||||||
|
TESTS=$(src/test/regress/citus_tests/print_test_names.py |
|
||||||
|
tr '\n' ',' | awk -v N="$N" -v X="$X" -F, '{
|
||||||
|
split("", parts)
|
||||||
|
for (i = 1; i <= NF; i++) {
|
||||||
|
parts[i % N] = parts[i % N] $i ","
|
||||||
|
}
|
||||||
|
print substr(parts[X], 1, length(parts[X])-1)
|
||||||
|
}')
|
||||||
|
echo $TESTS
|
||||||
|
gosu circleci \
|
||||||
|
make -C src/test/regress \
|
||||||
|
check-arbitrary-configs parallel=4 CONFIGS=$TESTS
|
||||||
|
- uses: "./.github/actions/save_logs_and_results"
|
||||||
|
if: always()
|
||||||
|
- uses: "./.github/actions/upload_coverage"
|
||||||
|
if: always()
|
||||||
|
with:
|
||||||
|
flags: ${{ env.pg_major }}_upgrade
|
||||||
|
codecov_token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
|
test-pg-upgrade:
|
||||||
|
name: PG${{ matrix.old_pg_major }}-PG${{ matrix.new_pg_major }} - check-pg-upgrade
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
container:
|
||||||
|
image: "${{ needs.params.outputs.pgupgrade_image_name }}:${{ needs.params.outputs.upgrade_pg_versions }}${{ needs.params.outputs.image_suffix }}"
|
||||||
|
options: --user root
|
||||||
|
needs:
|
||||||
|
- params
|
||||||
|
- build
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- old_pg_major: 13
|
||||||
|
new_pg_major: 14
|
||||||
|
- old_pg_major: 14
|
||||||
|
new_pg_major: 15
|
||||||
|
env:
|
||||||
|
old_pg_major: ${{ matrix.old_pg_major }}
|
||||||
|
new_pg_major: ${{ matrix.new_pg_major }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3.5.0
|
||||||
|
- uses: "./.github/actions/setup_extension"
|
||||||
|
with:
|
||||||
|
pg_major: "${{ env.old_pg_major }}"
|
||||||
|
- uses: "./.github/actions/setup_extension"
|
||||||
|
with:
|
||||||
|
pg_major: "${{ env.new_pg_major }}"
|
||||||
|
- name: Install and test postgres upgrade
|
||||||
|
run: |-
|
||||||
|
gosu circleci \
|
||||||
|
make -C src/test/regress \
|
||||||
|
check-pg-upgrade \
|
||||||
|
old-bindir=/usr/lib/postgresql/${{ env.old_pg_major }}/bin \
|
||||||
|
new-bindir=/usr/lib/postgresql/${{ env.new_pg_major }}/bin
|
||||||
|
- name: Copy pg_upgrade logs for newData dir
|
||||||
|
run: |-
|
||||||
|
mkdir -p /tmp/pg_upgrade_newData_logs
|
||||||
|
if ls src/test/regress/tmp_upgrade/newData/*.log 1> /dev/null 2>&1; then
|
||||||
|
cp src/test/regress/tmp_upgrade/newData/*.log /tmp/pg_upgrade_newData_logs
|
||||||
|
fi
|
||||||
|
if: failure()
|
||||||
|
- uses: "./.github/actions/save_logs_and_results"
|
||||||
|
if: always()
|
||||||
|
- uses: "./.github/actions/upload_coverage"
|
||||||
|
if: always()
|
||||||
|
with:
|
||||||
|
flags: ${{ env.old_pg_major }}_${{ env.new_pg_major }}_upgrade
|
||||||
|
codecov_token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
|
test-citus-upgrade:
|
||||||
|
name: PG${{ fromJson(needs.params.outputs.pg13_version).major }} - check-citus-upgrade
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
container:
|
||||||
|
image: "${{ needs.params.outputs.citusupgrade_image_name }}:${{ fromJson(needs.params.outputs.pg13_version).full }}${{ needs.params.outputs.image_suffix }}"
|
||||||
|
options: --user root
|
||||||
|
needs:
|
||||||
|
- params
|
||||||
|
- build
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3.5.0
|
||||||
|
- uses: "./.github/actions/setup_extension"
|
||||||
|
with:
|
||||||
|
skip_installation: true
|
||||||
|
- name: Install and test citus upgrade
|
||||||
|
run: |-
|
||||||
|
# run make check-citus-upgrade for all citus versions
|
||||||
|
# the image has ${CITUS_VERSIONS} set with all verions it contains the binaries of
|
||||||
|
for citus_version in ${CITUS_VERSIONS}; do \
|
||||||
|
gosu circleci \
|
||||||
|
make -C src/test/regress \
|
||||||
|
check-citus-upgrade \
|
||||||
|
bindir=/usr/lib/postgresql/${PG_MAJOR}/bin \
|
||||||
|
citus-old-version=${citus_version} \
|
||||||
|
citus-pre-tar=/install-pg${PG_MAJOR}-citus${citus_version}.tar \
|
||||||
|
citus-post-tar=${GITHUB_WORKSPACE}/install-$PG_MAJOR.tar; \
|
||||||
|
done;
|
||||||
|
# run make check-citus-upgrade-mixed for all citus versions
|
||||||
|
# the image has ${CITUS_VERSIONS} set with all verions it contains the binaries of
|
||||||
|
for citus_version in ${CITUS_VERSIONS}; do \
|
||||||
|
gosu circleci \
|
||||||
|
make -C src/test/regress \
|
||||||
|
check-citus-upgrade-mixed \
|
||||||
|
citus-old-version=${citus_version} \
|
||||||
|
bindir=/usr/lib/postgresql/${PG_MAJOR}/bin \
|
||||||
|
citus-pre-tar=/install-pg${PG_MAJOR}-citus${citus_version}.tar \
|
||||||
|
citus-post-tar=${GITHUB_WORKSPACE}/install-$PG_MAJOR.tar; \
|
||||||
|
done;
|
||||||
|
- uses: "./.github/actions/save_logs_and_results"
|
||||||
|
if: always()
|
||||||
|
- uses: "./.github/actions/upload_coverage"
|
||||||
|
if: always()
|
||||||
|
with:
|
||||||
|
flags: ${{ env.pg_major }}_upgrade
|
||||||
|
codecov_token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
|
upload-coverage:
|
||||||
|
if: always()
|
||||||
|
env:
|
||||||
|
CC_TEST_REPORTER_ID: ${{ secrets.CC_TEST_REPORTER_ID }}
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
container:
|
||||||
|
image: ${{ needs.params.outputs.test_image_name }}:${{ fromJson(needs.params.outputs.pg15_version).full }}${{ needs.params.outputs.image_suffix }}
|
||||||
|
needs:
|
||||||
|
- params
|
||||||
|
- test-citus
|
||||||
|
- test-arbitrary-configs
|
||||||
|
- test-citus-upgrade
|
||||||
|
- test-pg-upgrade
|
||||||
|
steps:
|
||||||
|
- uses: actions/download-artifact@v3.0.1
|
||||||
|
with:
|
||||||
|
name: "codeclimate"
|
||||||
|
path: "codeclimate"
|
||||||
|
- name: Upload coverage results to Code Climate
|
||||||
|
run: |-
|
||||||
|
cc-test-reporter sum-coverage codeclimate/*.json -o total.json
|
||||||
|
cc-test-reporter upload-coverage -i total.json
|
||||||
|
ch_benchmark:
|
||||||
|
name: CH Benchmark
|
||||||
|
if: startsWith(github.ref, 'refs/heads/ch_benchmark/')
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
needs:
|
||||||
|
- build
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3.5.0
|
||||||
|
- uses: azure/login@v1
|
||||||
|
with:
|
||||||
|
creds: ${{ secrets.AZURE_CREDENTIALS }}
|
||||||
|
- name: install dependencies and run ch_benchmark tests
|
||||||
|
uses: azure/CLI@v1
|
||||||
|
with:
|
||||||
|
inlineScript: |
|
||||||
|
cd ./src/test/hammerdb
|
||||||
|
chmod +x run_hammerdb.sh
|
||||||
|
run_hammerdb.sh citusbot_ch_benchmark_rg
|
||||||
|
tpcc_benchmark:
|
||||||
|
name: TPCC Benchmark
|
||||||
|
if: startsWith(github.ref, 'refs/heads/tpcc_benchmark/')
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
needs:
|
||||||
|
- build
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3.5.0
|
||||||
|
- uses: azure/login@v1
|
||||||
|
with:
|
||||||
|
creds: ${{ secrets.AZURE_CREDENTIALS }}
|
||||||
|
- name: install dependencies and run tpcc_benchmark tests
|
||||||
|
uses: azure/CLI@v1
|
||||||
|
with:
|
||||||
|
inlineScript: |
|
||||||
|
cd ./src/test/hammerdb
|
||||||
|
chmod +x run_hammerdb.sh
|
||||||
|
run_hammerdb.sh citusbot_tpcc_benchmark_rg
|
||||||
|
prepare_parallelization_matrix_32:
|
||||||
|
name: Parallel 32
|
||||||
|
if: ${{ needs.test-flakyness-pre.outputs.tests != ''}}
|
||||||
|
needs: test-flakyness-pre
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
outputs:
|
||||||
|
json: ${{ steps.parallelization.outputs.json }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3.5.0
|
||||||
|
- uses: "./.github/actions/parallelization"
|
||||||
|
id: parallelization
|
||||||
|
with:
|
||||||
|
count: 32
|
||||||
|
test-flakyness-pre:
|
||||||
|
name: Detect regression tests need to be ran
|
||||||
|
if: ${{ !inputs.skip_test_flakyness }}}
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
needs: build
|
||||||
|
outputs:
|
||||||
|
tests: ${{ steps.detect-regression-tests.outputs.tests }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3.5.0
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: Detect regression tests need to be ran
|
||||||
|
id: detect-regression-tests
|
||||||
|
run: |-
|
||||||
|
detected_changes=$(git diff origin/release-11.3... --name-only --diff-filter=AM | (grep 'src/test/regress/sql/.*\.sql\|src/test/regress/spec/.*\.spec\|src/test/regress/citus_tests/test/test_.*\.py' || true))
|
||||||
|
tests=${detected_changes}
|
||||||
|
if [ -z "$tests" ]; then
|
||||||
|
echo "No test found."
|
||||||
|
else
|
||||||
|
echo "Detected tests " $tests
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo 'tests<<EOF' >> $GITHUB_OUTPUT
|
||||||
|
echo "$tests" >> "$GITHUB_OUTPUT"
|
||||||
|
echo 'EOF' >> $GITHUB_OUTPUT
|
||||||
|
test-flakyness:
|
||||||
|
if: false
|
||||||
|
name: Test flakyness
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
container:
|
||||||
|
image: ${{ needs.params.outputs.fail_test_image_name }}:${{ needs.params.outputs.pg15_version }}${{ needs.params.outputs.image_suffix }}
|
||||||
|
options: --user root
|
||||||
|
env:
|
||||||
|
runs: 8
|
||||||
|
needs:
|
||||||
|
- params
|
||||||
|
- build
|
||||||
|
- test-flakyness-pre
|
||||||
|
- prepare_parallelization_matrix_32
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix: ${{ fromJson(needs.prepare_parallelization_matrix_32.outputs.json) }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3.5.0
|
||||||
|
- uses: actions/download-artifact@v3.0.1
|
||||||
|
- uses: "./.github/actions/setup_extension"
|
||||||
|
- name: Run minimal tests
|
||||||
|
run: |-
|
||||||
|
tests="${{ needs.test-flakyness-pre.outputs.tests }}"
|
||||||
|
tests_array=($tests)
|
||||||
|
for test in "${tests_array[@]}"
|
||||||
|
do
|
||||||
|
test_name=$(echo "$test" | sed -r "s/.+\/(.+)\..+/\1/")
|
||||||
|
gosu circleci src/test/regress/citus_tests/run_test.py $test_name --repeat ${{ env.runs }} --use-base-schedule --use-whole-schedule-line
|
||||||
|
done
|
||||||
|
shell: bash
|
||||||
|
- uses: "./.github/actions/save_logs_and_results"
|
||||||
|
if: always()
|
|
@ -0,0 +1,79 @@
|
||||||
|
name: Flaky test debugging
|
||||||
|
run-name: Flaky test debugging - ${{ inputs.flaky_test }} (${{ inputs.flaky_test_runs_per_job }}x${{ inputs.flaky_test_parallel_jobs }})
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
flaky_test:
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
description: Test to run
|
||||||
|
flaky_test_runs_per_job:
|
||||||
|
required: false
|
||||||
|
default: 8
|
||||||
|
type: number
|
||||||
|
description: Number of times to run the test
|
||||||
|
flaky_test_parallel_jobs:
|
||||||
|
required: false
|
||||||
|
default: 32
|
||||||
|
type: number
|
||||||
|
description: Number of parallel jobs to run
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
name: Build Citus
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
container:
|
||||||
|
image: ${{ vars.build_image_name }}:${{ vars.pg15_version }}${{ vars.image_suffix }}
|
||||||
|
options: --user root
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3.5.0
|
||||||
|
- name: Configure, Build, and Install
|
||||||
|
run: |
|
||||||
|
echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV
|
||||||
|
./ci/build-citus.sh
|
||||||
|
shell: bash
|
||||||
|
- uses: actions/upload-artifact@v3.1.1
|
||||||
|
with:
|
||||||
|
name: build-${{ env.PG_MAJOR }}
|
||||||
|
path: |-
|
||||||
|
./build-${{ env.PG_MAJOR }}/*
|
||||||
|
./install-${{ env.PG_MAJOR }}.tar
|
||||||
|
prepare_parallelization_matrix:
|
||||||
|
name: Prepare parallelization matrix
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
outputs:
|
||||||
|
json: ${{ steps.parallelization.outputs.json }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3.5.0
|
||||||
|
- uses: "./.github/actions/parallelization"
|
||||||
|
id: parallelization
|
||||||
|
with:
|
||||||
|
count: ${{ inputs.flaky_test_parallel_jobs }}
|
||||||
|
test_flakyness:
|
||||||
|
name: Test flakyness
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
container:
|
||||||
|
image: ${{ vars.fail_test_image_name }}:${{ vars.pg15_version }}${{ vars.image_suffix }}
|
||||||
|
options: --user root
|
||||||
|
needs:
|
||||||
|
[build, prepare_parallelization_matrix]
|
||||||
|
env:
|
||||||
|
test: "${{ inputs.flaky_test }}"
|
||||||
|
runs: "${{ inputs.flaky_test_runs_per_job }}"
|
||||||
|
skip: false
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix: ${{ fromJson(needs.prepare_parallelization_matrix.outputs.json) }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3.5.0
|
||||||
|
- uses: "./.github/actions/setup_extension"
|
||||||
|
- name: Run minimal tests
|
||||||
|
run: |-
|
||||||
|
gosu circleci src/test/regress/citus_tests/run_test.py ${{ env.test }} --repeat ${{ env.runs }} --use-base-schedule --use-whole-schedule-line
|
||||||
|
shell: bash
|
||||||
|
- uses: "./.github/actions/save_logs_and_results"
|
||||||
|
if: always()
|
||||||
|
with:
|
||||||
|
folder: ${{ matrix.id }}
|
|
@ -20,14 +20,16 @@ jobs:
|
||||||
- name: Get Postgres Versions
|
- name: Get Postgres Versions
|
||||||
id: get-postgres-versions
|
id: get-postgres-versions
|
||||||
run: |
|
run: |
|
||||||
# Postgres versions are stored in .circleci/config.yml file in "build-[pg-version] format. Below command
|
set -euxo pipefail
|
||||||
# extracts the versions and get the unique values.
|
# Postgres versions are stored in .github/workflows/build_and_test.yml
|
||||||
pg_versions=`grep -Eo 'build-[[:digit:]]{2}' .circleci/config.yml|sed -e "s/^build-//"|sort|uniq|tr '\n' ','| head -c -1`
|
# file in json strings with major and full keys.
|
||||||
|
# Below command extracts the versions and get the unique values.
|
||||||
|
pg_versions=$(cat .github/workflows/build_and_test.yml | grep -oE '"major": "[0-9]+", "full": "[0-9.]+"' | sed -E 's/"major": "([0-9]+)", "full": "([0-9.]+)"/\1/g' | sort | uniq | tr '\n', ',')
|
||||||
pg_versions_array="[ ${pg_versions} ]"
|
pg_versions_array="[ ${pg_versions} ]"
|
||||||
echo "Supported PG Versions: ${pg_versions_array}"
|
echo "Supported PG Versions: ${pg_versions_array}"
|
||||||
# Below line is needed to set the output variable to be used in the next job
|
# Below line is needed to set the output variable to be used in the next job
|
||||||
echo "pg_versions=${pg_versions_array}" >> $GITHUB_OUTPUT
|
echo "pg_versions=${pg_versions_array}" >> $GITHUB_OUTPUT
|
||||||
|
shell: bash
|
||||||
rpm_build_tests:
|
rpm_build_tests:
|
||||||
name: rpm_build_tests
|
name: rpm_build_tests
|
||||||
needs: get_postgres_versions_from_file
|
needs: get_postgres_versions_from_file
|
||||||
|
@ -43,7 +45,7 @@ jobs:
|
||||||
- oraclelinux-7
|
- oraclelinux-7
|
||||||
- oraclelinux-8
|
- oraclelinux-8
|
||||||
- centos-7
|
- centos-7
|
||||||
- centos-8
|
- almalinux-8
|
||||||
- almalinux-9
|
- almalinux-9
|
||||||
POSTGRES_VERSION: ${{ fromJson(needs.get_postgres_versions_from_file.outputs.pg_versions) }}
|
POSTGRES_VERSION: ${{ fromJson(needs.get_postgres_versions_from_file.outputs.pg_versions) }}
|
||||||
|
|
||||||
|
@ -112,7 +114,6 @@ jobs:
|
||||||
- ubuntu-bionic-all
|
- ubuntu-bionic-all
|
||||||
- ubuntu-focal-all
|
- ubuntu-focal-all
|
||||||
- ubuntu-jammy-all
|
- ubuntu-jammy-all
|
||||||
- ubuntu-kinetic-all
|
|
||||||
|
|
||||||
POSTGRES_VERSION: ${{ fromJson(needs.get_postgres_versions_from_file.outputs.pg_versions) }}
|
POSTGRES_VERSION: ${{ fromJson(needs.get_postgres_versions_from_file.outputs.pg_versions) }}
|
||||||
|
|
||||||
|
@ -155,7 +156,7 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
echo "Postgres version: ${POSTGRES_VERSION}"
|
echo "Postgres version: ${POSTGRES_VERSION}"
|
||||||
|
|
||||||
apt-get update -y
|
apt-get update -y || true
|
||||||
## Install required packages to execute packaging tools for deb based distros
|
## Install required packages to execute packaging tools for deb based distros
|
||||||
apt-get install python3-dev python3-pip -y
|
apt-get install python3-dev python3-pip -y
|
||||||
apt-get purge -y python3-yaml
|
apt-get purge -y python3-yaml
|
||||||
|
|
|
@ -38,6 +38,8 @@ lib*.pc
|
||||||
/Makefile.global
|
/Makefile.global
|
||||||
/src/Makefile.custom
|
/src/Makefile.custom
|
||||||
/compile_commands.json
|
/compile_commands.json
|
||||||
|
/src/backend/distributed/cdc/build-cdc-*/*
|
||||||
|
/src/test/cdc/tmp_check/*
|
||||||
|
|
||||||
# temporary files vim creates
|
# temporary files vim creates
|
||||||
*.swp
|
*.swp
|
||||||
|
|
222
CHANGELOG.md
222
CHANGELOG.md
|
@ -1,3 +1,225 @@
|
||||||
|
### citus v11.3.1 (February 12, 2024) ###
|
||||||
|
|
||||||
|
* Disallows MERGE when the query prunes down to zero shards (#6946)
|
||||||
|
|
||||||
|
* Fixes a bug related to non-existent objects in DDL commands (#6984)
|
||||||
|
|
||||||
|
* Fixes a bug that could cause COPY logic to skip data in case of OOM (#7152)
|
||||||
|
|
||||||
|
* Fixes a bug with deleting colocation groups (#6929)
|
||||||
|
|
||||||
|
* Fixes incorrect results on fetching scrollable with hold cursors (#7014)
|
||||||
|
|
||||||
|
* Fixes memory and memory context leaks in Foreign Constraint Graphs (#7236)
|
||||||
|
|
||||||
|
* Fixes replicate reference tables task fail when user is superuser (#6930)
|
||||||
|
|
||||||
|
* Fixes the incorrect column count after ALTER TABLE (#7379)
|
||||||
|
|
||||||
|
* Improves citus_shard_sizes performance (#7050)
|
||||||
|
|
||||||
|
* Makes sure to disallow creating a replicated distributed table
|
||||||
|
concurrently (#7219)
|
||||||
|
|
||||||
|
* Removes pg_send_cancellation and all references (#7135)
|
||||||
|
|
||||||
|
### citus v11.3.0 (May 2, 2023) ###
|
||||||
|
|
||||||
|
* Introduces CDC implementation for Citus using logical replication
|
||||||
|
(#6623, #6810, #6827)
|
||||||
|
|
||||||
|
* Adds support for `MERGE` command on co-located distributed tables joined on
|
||||||
|
distribution column (#6696, #6733)
|
||||||
|
|
||||||
|
* Adds the view `citus_stats_tenants` that monitor statistics on tenant usages
|
||||||
|
(#6725)
|
||||||
|
|
||||||
|
* Adds the GUC `citus.max_background_task_executors_per_node` to control number
|
||||||
|
of background task executors involving a node (#6771)
|
||||||
|
|
||||||
|
* Allows parallel shard moves in background rebalancer (#6756)
|
||||||
|
|
||||||
|
* Introduces the GUC `citus.metadata_sync_mode` that introduces nontransactional
|
||||||
|
mode for metadata sync (#6728, #6889)
|
||||||
|
|
||||||
|
* Propagates CREATE/ALTER/DROP PUBLICATION statements for distributed tables
|
||||||
|
(#6776)
|
||||||
|
|
||||||
|
* Adds the GUC `citus.enable_non_colocated_router_query_pushdown` to ensure
|
||||||
|
generating a consistent distributed plan for the queries that reference
|
||||||
|
non-colocated distributed tables when set to "false" (#6793)
|
||||||
|
|
||||||
|
* Checks if all moves are able to be done via logical replication for rebalancer
|
||||||
|
(#6754)
|
||||||
|
|
||||||
|
* Correctly reports shard size in `citus_shards` view (#6748)
|
||||||
|
|
||||||
|
* Fixes a bug in shard copy operations (#6721)
|
||||||
|
|
||||||
|
* Fixes a bug that prevents enforcing identity column restrictions on worker
|
||||||
|
nodes (#6738)
|
||||||
|
|
||||||
|
* Fixes a bug with `INSERT .. SELECT` queries with identity columns (#6802)
|
||||||
|
|
||||||
|
* Fixes an issue that caused some queries with custom aggregates to fail (#6805)
|
||||||
|
|
||||||
|
* Fixes an issue when `citus_set_coordinator_host` is called more than once
|
||||||
|
(#6837)
|
||||||
|
|
||||||
|
* Fixes an uninitialized memory access in shard split API (#6845)
|
||||||
|
|
||||||
|
* Fixes memory leak and max allocation block errors during metadata syncing
|
||||||
|
(#6728)
|
||||||
|
|
||||||
|
* Fixes memory leak in `undistribute_table` (#6693)
|
||||||
|
|
||||||
|
* Fixes memory leak in `alter_distributed_table` (#6726)
|
||||||
|
|
||||||
|
* Fixes memory leak in `create_distributed_table` (#6722)
|
||||||
|
|
||||||
|
* Fixes memory leak issue with query results that returns single row (#6724)
|
||||||
|
|
||||||
|
* Improves rebalancer when shard groups have placement count less than worker
|
||||||
|
count (#6739)
|
||||||
|
|
||||||
|
* Makes sure to stop maintenance daemon when dropping a database even without
|
||||||
|
Citus extension (#6688)
|
||||||
|
|
||||||
|
* Prevents using `alter_distributed_table` and `undistribute_table` UDFs when a
|
||||||
|
table has identity columns (#6738)
|
||||||
|
|
||||||
|
* Prevents using identity columns on data types other than `bigint` on
|
||||||
|
distributed tables (#6738)
|
||||||
|
|
||||||
|
### citus v11.2.1 (April 20, 2023) ###
|
||||||
|
|
||||||
|
* Correctly reports shard size in `citus_shards` view (#6748)
|
||||||
|
|
||||||
|
* Fixes a bug in shard copy operations (#6721)
|
||||||
|
|
||||||
|
* Fixes a bug with `INSERT .. SELECT` queries with identity columns (#6802)
|
||||||
|
|
||||||
|
* Fixes an uninitialized memory access in shard split API (#6845)
|
||||||
|
|
||||||
|
* Fixes compilation for PG13.10 and PG14.7 (#6711)
|
||||||
|
|
||||||
|
* Fixes memory leak in `alter_distributed_table` (#6726)
|
||||||
|
|
||||||
|
* Fixes memory leak issue with query results that returns single row (#6724)
|
||||||
|
|
||||||
|
* Prevents using `alter_distributed_table` and `undistribute_table` UDFs when a
|
||||||
|
table has identity columns (#6738)
|
||||||
|
|
||||||
|
* Prevents using identity columns on data types other than `bigint` on
|
||||||
|
distributed tables (#6738)
|
||||||
|
|
||||||
|
### citus v11.1.6 (April 20, 2023) ###
|
||||||
|
|
||||||
|
* Correctly reports shard size in `citus_shards` view (#6748)
|
||||||
|
|
||||||
|
* Fixes a bug in shard copy operations (#6721)
|
||||||
|
|
||||||
|
* Fixes a bug that breaks pg upgrades if the user has a columnar table (#6624)
|
||||||
|
|
||||||
|
* Fixes a bug that causes background rebalancer to fail when a reference table
|
||||||
|
doesn't have a primary key (#6682)
|
||||||
|
|
||||||
|
* Fixes a regression in allowed foreign keys on distributed tables (#6550)
|
||||||
|
|
||||||
|
* Fixes a use-after-free bug in connection management (#6685)
|
||||||
|
|
||||||
|
* Fixes an unexpected foreign table error by disallowing to drop the
|
||||||
|
`table_name` option (#6669)
|
||||||
|
|
||||||
|
* Fixes an uninitialized memory access in shard split API (#6845)
|
||||||
|
|
||||||
|
* Fixes compilation for PG13.10 and PG14.7 (#6711)
|
||||||
|
|
||||||
|
* Fixes crash that happens when trying to replicate a reference table that is
|
||||||
|
actually dropped (#6595)
|
||||||
|
|
||||||
|
* Fixes memory leak issue with query results that returns single row (#6724)
|
||||||
|
|
||||||
|
* Fixes the modifiers for subscription and role creation (#6603)
|
||||||
|
|
||||||
|
* Makes sure to quote all identifiers used for logical replication to prevent
|
||||||
|
potential issues (#6604)
|
||||||
|
|
||||||
|
* Makes sure to skip foreign key validations at the end of shard moves (#6640)
|
||||||
|
|
||||||
|
### citus v11.0.8 (April 20, 2023) ###
|
||||||
|
|
||||||
|
* Correctly reports shard size in `citus_shards` view (#6748)
|
||||||
|
|
||||||
|
* Fixes a bug that breaks pg upgrades if the user has a columnar table (#6624)
|
||||||
|
|
||||||
|
* Fixes an unexpected foreign table error by disallowing to drop the
|
||||||
|
`table_name` option (#6669)
|
||||||
|
|
||||||
|
* Fixes compilation warning on PG13 + OpenSSL 3.0 (#6038, #6502)
|
||||||
|
|
||||||
|
* Fixes crash that happens when trying to replicate a reference table that is
|
||||||
|
actually dropped (#6595)
|
||||||
|
|
||||||
|
* Fixes memory leak issue with query results that returns single row (#6724)
|
||||||
|
|
||||||
|
* Fixes the modifiers for subscription and role creation (#6603)
|
||||||
|
|
||||||
|
* Fixes two potential dangling pointer issues (#6504, #6507)
|
||||||
|
|
||||||
|
* Makes sure to quote all identifiers used for logical replication to prevent
|
||||||
|
potential issues (#6604)
|
||||||
|
|
||||||
|
### citus v10.2.9 (April 20, 2023) ###
|
||||||
|
|
||||||
|
* Correctly reports shard size in `citus_shards` view (#6748)
|
||||||
|
|
||||||
|
* Fixes a bug in `ALTER EXTENSION citus UPDATE` (#6383)
|
||||||
|
|
||||||
|
* Fixes a bug that breaks pg upgrades if the user has a columnar table (#6624)
|
||||||
|
|
||||||
|
* Fixes a bug that prevents retaining columnar table options after a
|
||||||
|
table-rewrite (#6337)
|
||||||
|
|
||||||
|
* Fixes memory leak issue with query results that returns single row (#6724)
|
||||||
|
|
||||||
|
* Raises memory limits in columnar from 256MB to 1GB for reads and writes
|
||||||
|
(#6419)
|
||||||
|
|
||||||
|
### citus v10.1.6 (April 20, 2023) ###
|
||||||
|
|
||||||
|
* Fixes a crash that occurs when the aggregate that cannot be pushed-down
|
||||||
|
returns empty result from a worker (#5679)
|
||||||
|
|
||||||
|
* Fixes columnar freezing/wraparound bug (#5962)
|
||||||
|
|
||||||
|
* Fixes memory leak issue with query results that returns single row (#6724)
|
||||||
|
|
||||||
|
* Prevents alter table functions from dropping extensions (#5974)
|
||||||
|
|
||||||
|
### citus v10.0.8 (April 20, 2023) ###
|
||||||
|
|
||||||
|
* Fixes a bug that could break `DROP SCHEMA/EXTENSON` commands when there is a
|
||||||
|
columnar table (#5458)
|
||||||
|
|
||||||
|
* Fixes a crash that occurs when the aggregate that cannot be pushed-down
|
||||||
|
returns empty result from a worker (#5679)
|
||||||
|
|
||||||
|
* Fixes columnar freezing/wraparound bug (#5962)
|
||||||
|
|
||||||
|
* Fixes memory leak issue with query results that returns single row (#6724)
|
||||||
|
|
||||||
|
* Prevents alter table functions from dropping extensions (#5974)
|
||||||
|
|
||||||
|
### citus v9.5.12 (April 20, 2023) ###
|
||||||
|
|
||||||
|
* Fixes a crash that occurs when the aggregate that cannot be pushed-down
|
||||||
|
returns empty result from a worker (#5679)
|
||||||
|
|
||||||
|
* Fixes memory leak issue with query results that returns single row (#6724)
|
||||||
|
|
||||||
|
* Prevents alter table functions from dropping extensions (#5974)
|
||||||
|
|
||||||
### citus v11.2.0 (January 30, 2023) ###
|
### citus v11.2.0 (January 30, 2023) ###
|
||||||
|
|
||||||
* Adds support for outer joins with reference tables / complex subquery-CTEs
|
* Adds support for outer joins with reference tables / complex subquery-CTEs
|
||||||
|
|
16
Makefile
16
Makefile
|
@ -11,7 +11,7 @@ endif
|
||||||
|
|
||||||
include Makefile.global
|
include Makefile.global
|
||||||
|
|
||||||
all: extension pg_send_cancellation
|
all: extension
|
||||||
|
|
||||||
|
|
||||||
# build columnar only
|
# build columnar only
|
||||||
|
@ -40,22 +40,14 @@ clean-full:
|
||||||
|
|
||||||
install-downgrades:
|
install-downgrades:
|
||||||
$(MAKE) -C src/backend/distributed/ install-downgrades
|
$(MAKE) -C src/backend/distributed/ install-downgrades
|
||||||
install-all: install-headers install-pg_send_cancellation
|
install-all: install-headers
|
||||||
$(MAKE) -C src/backend/columnar/ install-all
|
$(MAKE) -C src/backend/columnar/ install-all
|
||||||
$(MAKE) -C src/backend/distributed/ install-all
|
$(MAKE) -C src/backend/distributed/ install-all
|
||||||
|
|
||||||
# build citus_send_cancellation binary
|
|
||||||
pg_send_cancellation:
|
|
||||||
$(MAKE) -C src/bin/pg_send_cancellation/ all
|
|
||||||
install-pg_send_cancellation: pg_send_cancellation
|
|
||||||
$(MAKE) -C src/bin/pg_send_cancellation/ install
|
|
||||||
clean-pg_send_cancellation:
|
|
||||||
$(MAKE) -C src/bin/pg_send_cancellation/ clean
|
|
||||||
.PHONY: pg_send_cancellation install-pg_send_cancellation clean-pg_send_cancellation
|
|
||||||
|
|
||||||
# Add to generic targets
|
# Add to generic targets
|
||||||
install: install-extension install-headers install-pg_send_cancellation
|
install: install-extension install-headers
|
||||||
clean: clean-extension clean-pg_send_cancellation
|
clean: clean-extension
|
||||||
|
|
||||||
# apply or check style
|
# apply or check style
|
||||||
reindent:
|
reindent:
|
||||||
|
|
|
@ -15,9 +15,6 @@ PG_MAJOR=${PG_MAJOR:?please provide the postgres major version}
|
||||||
codename=${VERSION#*(}
|
codename=${VERSION#*(}
|
||||||
codename=${codename%)*}
|
codename=${codename%)*}
|
||||||
|
|
||||||
# get project from argument
|
|
||||||
project="${CIRCLE_PROJECT_REPONAME}"
|
|
||||||
|
|
||||||
# we'll do everything with absolute paths
|
# we'll do everything with absolute paths
|
||||||
basedir="$(pwd)"
|
basedir="$(pwd)"
|
||||||
|
|
||||||
|
@ -28,7 +25,7 @@ build_ext() {
|
||||||
pg_major="$1"
|
pg_major="$1"
|
||||||
|
|
||||||
builddir="${basedir}/build-${pg_major}"
|
builddir="${basedir}/build-${pg_major}"
|
||||||
echo "Beginning build of ${project} for PostgreSQL ${pg_major}..." >&2
|
echo "Beginning build for PostgreSQL ${pg_major}..." >&2
|
||||||
|
|
||||||
# do everything in a subdirectory to avoid clutter in current directory
|
# do everything in a subdirectory to avoid clutter in current directory
|
||||||
mkdir -p "${builddir}" && cd "${builddir}"
|
mkdir -p "${builddir}" && cd "${builddir}"
|
||||||
|
|
|
@ -14,8 +14,8 @@ ci_scripts=$(
|
||||||
grep -v -E '^(ci_helpers.sh|fix_style.sh)$'
|
grep -v -E '^(ci_helpers.sh|fix_style.sh)$'
|
||||||
)
|
)
|
||||||
for script in $ci_scripts; do
|
for script in $ci_scripts; do
|
||||||
if ! grep "\\bci/$script\\b" .circleci/config.yml > /dev/null; then
|
if ! grep "\\bci/$script\\b" -r .github > /dev/null; then
|
||||||
echo "ERROR: CI script with name \"$script\" is not actually used in .circleci/config.yml"
|
echo "ERROR: CI script with name \"$script\" is not actually used in .github folder"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
if ! grep "^## \`$script\`\$" ci/README.md > /dev/null; then
|
if ! grep "^## \`$script\`\$" ci/README.md > /dev/null; then
|
||||||
|
|
|
@ -1,96 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Testing this script locally requires you to set the following environment
|
|
||||||
# variables:
|
|
||||||
# CIRCLE_BRANCH, GIT_USERNAME and GIT_TOKEN
|
|
||||||
|
|
||||||
# fail if trying to reference a variable that is not set.
|
|
||||||
set -u
|
|
||||||
# exit immediately if a command fails
|
|
||||||
set -e
|
|
||||||
# Fail on pipe failures
|
|
||||||
set -o pipefail
|
|
||||||
|
|
||||||
PR_BRANCH="${CIRCLE_BRANCH}"
|
|
||||||
ENTERPRISE_REMOTE="https://${GIT_USERNAME}:${GIT_TOKEN}@github.com/citusdata/citus-enterprise"
|
|
||||||
|
|
||||||
# shellcheck disable=SC1091
|
|
||||||
source ci/ci_helpers.sh
|
|
||||||
|
|
||||||
# List executed commands. This is done so debugging this script is easier when
|
|
||||||
# it fails. It's explicitly done after git remote add so username and password
|
|
||||||
# are not shown in CI output (even though it's also filtered out by CircleCI)
|
|
||||||
set -x
|
|
||||||
|
|
||||||
check_compile () {
|
|
||||||
echo "INFO: checking if merged code can be compiled"
|
|
||||||
./configure --without-libcurl
|
|
||||||
make -j10
|
|
||||||
}
|
|
||||||
|
|
||||||
# Clone current git repo (which should be community) to a temporary working
|
|
||||||
# directory and go there
|
|
||||||
GIT_DIR_ROOT="$(git rev-parse --show-toplevel)"
|
|
||||||
TMP_GIT_DIR="$(mktemp --directory -t citus-merge-check.XXXXXXXXX)"
|
|
||||||
git clone "$GIT_DIR_ROOT" "$TMP_GIT_DIR"
|
|
||||||
cd "$TMP_GIT_DIR"
|
|
||||||
|
|
||||||
# Fails in CI without this
|
|
||||||
git config user.email "citus-bot@microsoft.com"
|
|
||||||
git config user.name "citus bot"
|
|
||||||
|
|
||||||
# Disable "set -x" temporarily, because $ENTERPRISE_REMOTE contains passwords
|
|
||||||
{ set +x ; } 2> /dev/null
|
|
||||||
git remote add enterprise "$ENTERPRISE_REMOTE"
|
|
||||||
set -x
|
|
||||||
|
|
||||||
git remote set-url --push enterprise no-pushing
|
|
||||||
|
|
||||||
# Fetch enterprise-master
|
|
||||||
git fetch enterprise enterprise-master
|
|
||||||
|
|
||||||
|
|
||||||
git checkout "enterprise/enterprise-master"
|
|
||||||
|
|
||||||
if git merge --no-commit "origin/$PR_BRANCH"; then
|
|
||||||
echo "INFO: community PR branch could be merged into enterprise-master"
|
|
||||||
# check that we can compile after the merge
|
|
||||||
if check_compile; then
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "WARN: Failed to compile after community PR branch was merged into enterprise"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# undo partial merge
|
|
||||||
git merge --abort
|
|
||||||
|
|
||||||
# If we have a conflict on enterprise merge on the master branch, we have a problem.
|
|
||||||
# Provide an error message to indicate that enterprise merge is needed to fix this check.
|
|
||||||
if [[ $PR_BRANCH = master ]]; then
|
|
||||||
echo "ERROR: Master branch has merge conflicts with enterprise-master."
|
|
||||||
echo "Try re-running this CI job after merging your changes into enterprise-master."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if ! git fetch enterprise "$PR_BRANCH" ; then
|
|
||||||
echo "ERROR: enterprise/$PR_BRANCH was not found and community PR branch could not be merged into enterprise-master"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Show the top commit of the enterprise PR branch to make debugging easier
|
|
||||||
git log -n 1 "enterprise/$PR_BRANCH"
|
|
||||||
|
|
||||||
# Check that this branch contains the top commit of the current community PR
|
|
||||||
# branch. If it does not it means it's not up to date with the current PR, so
|
|
||||||
# the enterprise branch should be updated.
|
|
||||||
if ! git merge-base --is-ancestor "origin/$PR_BRANCH" "enterprise/$PR_BRANCH" ; then
|
|
||||||
echo "ERROR: enterprise/$PR_BRANCH is not up to date with community PR branch"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Now check if we can merge the enterprise PR into enterprise-master without
|
|
||||||
# issues.
|
|
||||||
git merge --no-commit "enterprise/$PR_BRANCH"
|
|
||||||
# check that we can compile after the merge
|
|
||||||
check_compile
|
|
|
@ -1,6 +1,6 @@
|
||||||
#! /bin/sh
|
#! /bin/sh
|
||||||
# Guess values for system-dependent variables and create Makefiles.
|
# Guess values for system-dependent variables and create Makefiles.
|
||||||
# Generated by GNU Autoconf 2.69 for Citus 11.3devel.
|
# Generated by GNU Autoconf 2.69 for Citus 11.3.1.
|
||||||
#
|
#
|
||||||
#
|
#
|
||||||
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
|
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
|
||||||
|
@ -579,8 +579,8 @@ MAKEFLAGS=
|
||||||
# Identity of this package.
|
# Identity of this package.
|
||||||
PACKAGE_NAME='Citus'
|
PACKAGE_NAME='Citus'
|
||||||
PACKAGE_TARNAME='citus'
|
PACKAGE_TARNAME='citus'
|
||||||
PACKAGE_VERSION='11.3devel'
|
PACKAGE_VERSION='11.3.1'
|
||||||
PACKAGE_STRING='Citus 11.3devel'
|
PACKAGE_STRING='Citus 11.3.1'
|
||||||
PACKAGE_BUGREPORT=''
|
PACKAGE_BUGREPORT=''
|
||||||
PACKAGE_URL=''
|
PACKAGE_URL=''
|
||||||
|
|
||||||
|
@ -1262,7 +1262,7 @@ if test "$ac_init_help" = "long"; then
|
||||||
# Omit some internal or obsolete options to make the list less imposing.
|
# Omit some internal or obsolete options to make the list less imposing.
|
||||||
# This message is too long to be a string in the A/UX 3.1 sh.
|
# This message is too long to be a string in the A/UX 3.1 sh.
|
||||||
cat <<_ACEOF
|
cat <<_ACEOF
|
||||||
\`configure' configures Citus 11.3devel to adapt to many kinds of systems.
|
\`configure' configures Citus 11.3.1 to adapt to many kinds of systems.
|
||||||
|
|
||||||
Usage: $0 [OPTION]... [VAR=VALUE]...
|
Usage: $0 [OPTION]... [VAR=VALUE]...
|
||||||
|
|
||||||
|
@ -1324,7 +1324,7 @@ fi
|
||||||
|
|
||||||
if test -n "$ac_init_help"; then
|
if test -n "$ac_init_help"; then
|
||||||
case $ac_init_help in
|
case $ac_init_help in
|
||||||
short | recursive ) echo "Configuration of Citus 11.3devel:";;
|
short | recursive ) echo "Configuration of Citus 11.3.1:";;
|
||||||
esac
|
esac
|
||||||
cat <<\_ACEOF
|
cat <<\_ACEOF
|
||||||
|
|
||||||
|
@ -1429,7 +1429,7 @@ fi
|
||||||
test -n "$ac_init_help" && exit $ac_status
|
test -n "$ac_init_help" && exit $ac_status
|
||||||
if $ac_init_version; then
|
if $ac_init_version; then
|
||||||
cat <<\_ACEOF
|
cat <<\_ACEOF
|
||||||
Citus configure 11.3devel
|
Citus configure 11.3.1
|
||||||
generated by GNU Autoconf 2.69
|
generated by GNU Autoconf 2.69
|
||||||
|
|
||||||
Copyright (C) 2012 Free Software Foundation, Inc.
|
Copyright (C) 2012 Free Software Foundation, Inc.
|
||||||
|
@ -1912,7 +1912,7 @@ cat >config.log <<_ACEOF
|
||||||
This file contains any messages produced by compilers while
|
This file contains any messages produced by compilers while
|
||||||
running configure, to aid debugging if configure makes a mistake.
|
running configure, to aid debugging if configure makes a mistake.
|
||||||
|
|
||||||
It was created by Citus $as_me 11.3devel, which was
|
It was created by Citus $as_me 11.3.1, which was
|
||||||
generated by GNU Autoconf 2.69. Invocation command line was
|
generated by GNU Autoconf 2.69. Invocation command line was
|
||||||
|
|
||||||
$ $0 $@
|
$ $0 $@
|
||||||
|
@ -5393,7 +5393,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
|
||||||
# report actual input values of CONFIG_FILES etc. instead of their
|
# report actual input values of CONFIG_FILES etc. instead of their
|
||||||
# values after options handling.
|
# values after options handling.
|
||||||
ac_log="
|
ac_log="
|
||||||
This file was extended by Citus $as_me 11.3devel, which was
|
This file was extended by Citus $as_me 11.3.1, which was
|
||||||
generated by GNU Autoconf 2.69. Invocation command line was
|
generated by GNU Autoconf 2.69. Invocation command line was
|
||||||
|
|
||||||
CONFIG_FILES = $CONFIG_FILES
|
CONFIG_FILES = $CONFIG_FILES
|
||||||
|
@ -5455,7 +5455,7 @@ _ACEOF
|
||||||
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
|
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
|
||||||
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
|
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
|
||||||
ac_cs_version="\\
|
ac_cs_version="\\
|
||||||
Citus config.status 11.3devel
|
Citus config.status 11.3.1
|
||||||
configured by $0, generated by GNU Autoconf 2.69,
|
configured by $0, generated by GNU Autoconf 2.69,
|
||||||
with options \\"\$ac_cs_config\\"
|
with options \\"\$ac_cs_config\\"
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
# everyone needing autoconf installed, the resulting files are checked
|
# everyone needing autoconf installed, the resulting files are checked
|
||||||
# into the SCM.
|
# into the SCM.
|
||||||
|
|
||||||
AC_INIT([Citus], [11.3devel])
|
AC_INIT([Citus], [11.3.1])
|
||||||
AC_COPYRIGHT([Copyright (c) Citus Data, Inc.])
|
AC_COPYRIGHT([Copyright (c) Citus Data, Inc.])
|
||||||
|
|
||||||
# we'll need sed and awk for some of the version commands
|
# we'll need sed and awk for some of the version commands
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# Columnar extension
|
# Columnar extension
|
||||||
comment = 'Citus Columnar extension'
|
comment = 'Citus Columnar extension'
|
||||||
default_version = '11.2-1'
|
default_version = '11.3-1'
|
||||||
module_pathname = '$libdir/citus_columnar'
|
module_pathname = '$libdir/citus_columnar'
|
||||||
relocatable = false
|
relocatable = false
|
||||||
schema = pg_catalog
|
schema = pg_catalog
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
-- citus_columnar--11.2-1--11.3-1
|
|
@ -0,0 +1 @@
|
||||||
|
-- citus_columnar--11.3-1--11.2-1
|
|
@ -1,6 +1,6 @@
|
||||||
# Citus extension
|
# Citus extension
|
||||||
comment = 'Citus distributed database'
|
comment = 'Citus distributed database'
|
||||||
default_version = '11.3-1'
|
default_version = '11.3-2'
|
||||||
module_pathname = '$libdir/citus'
|
module_pathname = '$libdir/citus'
|
||||||
relocatable = false
|
relocatable = false
|
||||||
schema = pg_catalog
|
schema = pg_catalog
|
||||||
|
|
|
@ -1710,20 +1710,13 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands,
|
||||||
}
|
}
|
||||||
else if (ShouldSyncTableMetadata(sourceId))
|
else if (ShouldSyncTableMetadata(sourceId))
|
||||||
{
|
{
|
||||||
char *qualifiedTableName = quote_qualified_identifier(schemaName, sourceName);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We are converting a citus local table to a distributed/reference table,
|
* We are converting a citus local table to a distributed/reference table,
|
||||||
* so we should prevent dropping the sequence on the table. Otherwise, we'd
|
* so we should prevent dropping the sequence on the table. Otherwise, we'd
|
||||||
* lose track of the previous changes in the sequence.
|
* lose track of the previous changes in the sequence.
|
||||||
*/
|
*/
|
||||||
StringInfo command = makeStringInfo();
|
char *command = WorkerDropSequenceDependencyCommand(sourceId);
|
||||||
|
SendCommandToWorkersWithMetadata(command);
|
||||||
appendStringInfo(command,
|
|
||||||
"SELECT pg_catalog.worker_drop_sequence_dependency(%s);",
|
|
||||||
quote_literal_cstr(qualifiedTableName));
|
|
||||||
|
|
||||||
SendCommandToWorkersWithMetadata(command->data);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -415,6 +415,19 @@ CreateDistributedTableConcurrently(Oid relationId, char *distributionColumnName,
|
||||||
if (!IsColocateWithDefault(colocateWithTableName) && !IsColocateWithNone(
|
if (!IsColocateWithDefault(colocateWithTableName) && !IsColocateWithNone(
|
||||||
colocateWithTableName))
|
colocateWithTableName))
|
||||||
{
|
{
|
||||||
|
if (replicationModel != REPLICATION_MODEL_STREAMING)
|
||||||
|
{
|
||||||
|
ereport(ERROR, (errmsg("cannot create distributed table "
|
||||||
|
"concurrently because Citus allows "
|
||||||
|
"concurrent table distribution only when "
|
||||||
|
"citus.shard_replication_factor = 1"),
|
||||||
|
errhint("table %s is requested to be colocated "
|
||||||
|
"with %s which has "
|
||||||
|
"citus.shard_replication_factor > 1",
|
||||||
|
get_rel_name(relationId),
|
||||||
|
colocateWithTableName)));
|
||||||
|
}
|
||||||
|
|
||||||
EnsureColocateWithTableIsValid(relationId, distributionMethod,
|
EnsureColocateWithTableIsValid(relationId, distributionMethod,
|
||||||
distributionColumnName,
|
distributionColumnName,
|
||||||
colocateWithTableName);
|
colocateWithTableName);
|
||||||
|
|
|
@ -393,9 +393,17 @@ GetDependencyCreateDDLCommands(const ObjectAddress *dependency)
|
||||||
tableDDLCommand));
|
tableDDLCommand));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* we need to drop table, if exists, first to make table creation idempotent */
|
/*
|
||||||
|
* We need to drop table, if exists, first to make table creation
|
||||||
|
* idempotent. Before dropping the table, we should also break
|
||||||
|
* dependencies with sequences since `drop cascade table` would also
|
||||||
|
* drop depended sequences. This is safe as we still record dependency
|
||||||
|
* with the sequence during table creation.
|
||||||
|
*/
|
||||||
commandList = lcons(DropTableIfExistsCommand(relationId),
|
commandList = lcons(DropTableIfExistsCommand(relationId),
|
||||||
commandList);
|
commandList);
|
||||||
|
commandList = lcons(WorkerDropSequenceDependencyCommand(relationId),
|
||||||
|
commandList);
|
||||||
}
|
}
|
||||||
|
|
||||||
return commandList;
|
return commandList;
|
||||||
|
|
|
@ -187,7 +187,9 @@ multi_ProcessUtility(PlannedStmt *pstmt,
|
||||||
IsA(parsetree, ExecuteStmt) ||
|
IsA(parsetree, ExecuteStmt) ||
|
||||||
IsA(parsetree, PrepareStmt) ||
|
IsA(parsetree, PrepareStmt) ||
|
||||||
IsA(parsetree, DiscardStmt) ||
|
IsA(parsetree, DiscardStmt) ||
|
||||||
IsA(parsetree, DeallocateStmt))
|
IsA(parsetree, DeallocateStmt) ||
|
||||||
|
IsA(parsetree, DeclareCursorStmt) ||
|
||||||
|
IsA(parsetree, FetchStmt))
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Skip additional checks for common commands that do not have any
|
* Skip additional checks for common commands that do not have any
|
||||||
|
|
|
@ -716,14 +716,14 @@ PutRemoteCopyData(MultiConnection *connection, const char *buffer, int nbytes)
|
||||||
Assert(PQisnonblocking(pgConn));
|
Assert(PQisnonblocking(pgConn));
|
||||||
|
|
||||||
int copyState = PQputCopyData(pgConn, buffer, nbytes);
|
int copyState = PQputCopyData(pgConn, buffer, nbytes);
|
||||||
if (copyState == -1)
|
if (copyState <= 0)
|
||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* PQputCopyData may have queued up part of the data even if it managed
|
* PQputCopyData may have queued up part of the data even if it managed
|
||||||
* to send some of it succesfully. We provide back pressure by waiting
|
* to send some of it successfully. We provide back pressure by waiting
|
||||||
* until the socket is writable to prevent the internal libpq buffers
|
* until the socket is writable to prevent the internal libpq buffers
|
||||||
* from growing excessively.
|
* from growing excessively.
|
||||||
*
|
*
|
||||||
|
|
|
@ -1406,8 +1406,15 @@ set_join_column_names(deparse_namespace *dpns, RangeTblEntry *rte,
|
||||||
|
|
||||||
/* Assert we processed the right number of columns */
|
/* Assert we processed the right number of columns */
|
||||||
#ifdef USE_ASSERT_CHECKING
|
#ifdef USE_ASSERT_CHECKING
|
||||||
while (i < colinfo->num_cols && colinfo->colnames[i] == NULL)
|
for (int col_index = 0; col_index < colinfo->num_cols; col_index++)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* In the above processing-loops, "i" advances only if
|
||||||
|
* the column is not new, check if this is a new column.
|
||||||
|
*/
|
||||||
|
if (colinfo->is_new_col[col_index])
|
||||||
i++;
|
i++;
|
||||||
|
}
|
||||||
Assert(i == colinfo->num_cols);
|
Assert(i == colinfo->num_cols);
|
||||||
Assert(j == nnewcolumns);
|
Assert(j == nnewcolumns);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1529,8 +1529,15 @@ set_join_column_names(deparse_namespace *dpns, RangeTblEntry *rte,
|
||||||
|
|
||||||
/* Assert we processed the right number of columns */
|
/* Assert we processed the right number of columns */
|
||||||
#ifdef USE_ASSERT_CHECKING
|
#ifdef USE_ASSERT_CHECKING
|
||||||
while (i < colinfo->num_cols && colinfo->colnames[i] == NULL)
|
for (int col_index = 0; col_index < colinfo->num_cols; col_index++)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* In the above processing-loops, "i" advances only if
|
||||||
|
* the column is not new, check if this is a new column.
|
||||||
|
*/
|
||||||
|
if (colinfo->is_new_col[col_index])
|
||||||
i++;
|
i++;
|
||||||
|
}
|
||||||
Assert(i == colinfo->num_cols);
|
Assert(i == colinfo->num_cols);
|
||||||
Assert(j == nnewcolumns);
|
Assert(j == nnewcolumns);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1566,8 +1566,15 @@ set_join_column_names(deparse_namespace *dpns, RangeTblEntry *rte,
|
||||||
|
|
||||||
/* Assert we processed the right number of columns */
|
/* Assert we processed the right number of columns */
|
||||||
#ifdef USE_ASSERT_CHECKING
|
#ifdef USE_ASSERT_CHECKING
|
||||||
while (i < colinfo->num_cols && colinfo->colnames[i] == NULL)
|
for (int col_index = 0; col_index < colinfo->num_cols; col_index++)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* In the above processing-loops, "i" advances only if
|
||||||
|
* the column is not new, check if this is a new column.
|
||||||
|
*/
|
||||||
|
if (colinfo->is_new_col[col_index])
|
||||||
i++;
|
i++;
|
||||||
|
}
|
||||||
Assert(i == colinfo->num_cols);
|
Assert(i == colinfo->num_cols);
|
||||||
Assert(j == nnewcolumns);
|
Assert(j == nnewcolumns);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -780,7 +780,19 @@ CitusEndScan(CustomScanState *node)
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
CitusReScan(CustomScanState *node)
|
CitusReScan(CustomScanState *node)
|
||||||
{ }
|
{
|
||||||
|
if (node->ss.ps.ps_ResultTupleSlot)
|
||||||
|
{
|
||||||
|
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
|
||||||
|
}
|
||||||
|
ExecScanReScan(&node->ss);
|
||||||
|
|
||||||
|
CitusScanState *scanState = (CitusScanState *) node;
|
||||||
|
if (scanState->tuplestorestate)
|
||||||
|
{
|
||||||
|
tuplestore_rescan(scanState->tuplestorestate);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -129,6 +129,8 @@ static void LogLocalCommand(Task *task);
|
||||||
static uint64 LocallyPlanAndExecuteMultipleQueries(List *queryStrings,
|
static uint64 LocallyPlanAndExecuteMultipleQueries(List *queryStrings,
|
||||||
TupleDestination *tupleDest,
|
TupleDestination *tupleDest,
|
||||||
Task *task);
|
Task *task);
|
||||||
|
static void SetColocationIdAndPartitionKeyValueForTasks(List *taskList,
|
||||||
|
Job *distributedPlan);
|
||||||
static void LocallyExecuteUtilityTask(Task *task);
|
static void LocallyExecuteUtilityTask(Task *task);
|
||||||
static void ExecuteUdfTaskQuery(Query *localUdfCommandQuery);
|
static void ExecuteUdfTaskQuery(Query *localUdfCommandQuery);
|
||||||
static void EnsureTransitionPossible(LocalExecutionStatus from,
|
static void EnsureTransitionPossible(LocalExecutionStatus from,
|
||||||
|
@ -228,6 +230,17 @@ ExecuteLocalTaskListExtended(List *taskList,
|
||||||
EnsureTaskExecutionAllowed(isRemote);
|
EnsureTaskExecutionAllowed(isRemote);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If workerJob has a partitionKeyValue, we need to set the colocation id
|
||||||
|
* and partition key value for each task before we start executing them
|
||||||
|
* because tenant stats are collected based on these values of a task.
|
||||||
|
*/
|
||||||
|
if (distributedPlan != NULL && distributedPlan->workerJob != NULL && taskList != NIL)
|
||||||
|
{
|
||||||
|
SetJobColocationId(distributedPlan->workerJob);
|
||||||
|
SetColocationIdAndPartitionKeyValueForTasks(taskList, distributedPlan->workerJob);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Use a new memory context that gets reset after every task to free
|
* Use a new memory context that gets reset after every task to free
|
||||||
* the deparsed query string and query plan.
|
* the deparsed query string and query plan.
|
||||||
|
@ -367,6 +380,26 @@ ExecuteLocalTaskListExtended(List *taskList,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* SetColocationIdAndPartitionKeyValueForTasks sets colocationId and partitionKeyValue
|
||||||
|
* for the tasks in the taskList if workerJob has a colocationId and partitionKeyValue.
|
||||||
|
*/
|
||||||
|
static void
|
||||||
|
SetColocationIdAndPartitionKeyValueForTasks(List *taskList, Job *workerJob)
|
||||||
|
{
|
||||||
|
if (workerJob->colocationId != 0 &&
|
||||||
|
workerJob->partitionKeyValue != NULL)
|
||||||
|
{
|
||||||
|
Task *task = NULL;
|
||||||
|
foreach_ptr(task, taskList)
|
||||||
|
{
|
||||||
|
task->colocationId = workerJob->colocationId;
|
||||||
|
task->partitionKeyValue = workerJob->partitionKeyValue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* LocallyPlanAndExecuteMultipleQueries plans and executes the given query strings
|
* LocallyPlanAndExecuteMultipleQueries plans and executes the given query strings
|
||||||
* one by one.
|
* one by one.
|
||||||
|
|
|
@ -686,7 +686,7 @@ DropMetadataSnapshotOnNode(WorkerNode *workerNode)
|
||||||
bool singleTransaction = true;
|
bool singleTransaction = true;
|
||||||
List *dropMetadataCommandList = DetachPartitionCommandList();
|
List *dropMetadataCommandList = DetachPartitionCommandList();
|
||||||
dropMetadataCommandList = lappend(dropMetadataCommandList,
|
dropMetadataCommandList = lappend(dropMetadataCommandList,
|
||||||
BREAK_CITUS_TABLE_SEQUENCE_DEPENDENCY_COMMAND);
|
BREAK_ALL_CITUS_TABLE_SEQUENCE_DEPENDENCY_COMMAND);
|
||||||
dropMetadataCommandList = lappend(dropMetadataCommandList,
|
dropMetadataCommandList = lappend(dropMetadataCommandList,
|
||||||
WorkerDropAllShellTablesCommand(singleTransaction));
|
WorkerDropAllShellTablesCommand(singleTransaction));
|
||||||
dropMetadataCommandList = list_concat(dropMetadataCommandList,
|
dropMetadataCommandList = list_concat(dropMetadataCommandList,
|
||||||
|
@ -4235,6 +4235,22 @@ WorkerDropAllShellTablesCommand(bool singleTransaction)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* WorkerDropSequenceDependencyCommand returns command to drop sequence dependencies for
|
||||||
|
* given table.
|
||||||
|
*/
|
||||||
|
char *
|
||||||
|
WorkerDropSequenceDependencyCommand(Oid relationId)
|
||||||
|
{
|
||||||
|
char *qualifiedTableName = generate_qualified_relation_name(relationId);
|
||||||
|
StringInfo breakSequenceDepCommand = makeStringInfo();
|
||||||
|
appendStringInfo(breakSequenceDepCommand,
|
||||||
|
BREAK_CITUS_TABLE_SEQUENCE_DEPENDENCY_COMMAND,
|
||||||
|
quote_literal_cstr(qualifiedTableName));
|
||||||
|
return breakSequenceDepCommand->data;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* PropagateNodeWideObjectsCommandList is called during node activation to
|
* PropagateNodeWideObjectsCommandList is called during node activation to
|
||||||
* propagate any object that should be propagated for every node. These are
|
* propagate any object that should be propagated for every node. These are
|
||||||
|
@ -4352,8 +4368,8 @@ SendNodeWideObjectsSyncCommands(MetadataSyncContext *context)
|
||||||
void
|
void
|
||||||
SendShellTableDeletionCommands(MetadataSyncContext *context)
|
SendShellTableDeletionCommands(MetadataSyncContext *context)
|
||||||
{
|
{
|
||||||
/* break all sequence deps for citus tables and remove all shell tables */
|
/* break all sequence deps for citus tables */
|
||||||
char *breakSeqDepsCommand = BREAK_CITUS_TABLE_SEQUENCE_DEPENDENCY_COMMAND;
|
char *breakSeqDepsCommand = BREAK_ALL_CITUS_TABLE_SEQUENCE_DEPENDENCY_COMMAND;
|
||||||
SendOrCollectCommandListToActivatedNodes(context, list_make1(breakSeqDepsCommand));
|
SendOrCollectCommandListToActivatedNodes(context, list_make1(breakSeqDepsCommand));
|
||||||
|
|
||||||
/* remove shell tables */
|
/* remove shell tables */
|
||||||
|
|
|
@ -91,7 +91,8 @@ static bool DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId,
|
||||||
SizeQueryType sizeQueryType, bool failOnError,
|
SizeQueryType sizeQueryType, bool failOnError,
|
||||||
uint64 *tableSize);
|
uint64 *tableSize);
|
||||||
static List * ShardIntervalsOnWorkerGroup(WorkerNode *workerNode, Oid relationId);
|
static List * ShardIntervalsOnWorkerGroup(WorkerNode *workerNode, Oid relationId);
|
||||||
static char * GenerateShardStatisticsQueryForShardList(List *shardIntervalList);
|
static char * GenerateShardIdNameValuesForShardList(List *shardIntervalList,
|
||||||
|
bool firstValue);
|
||||||
static char * GenerateSizeQueryForRelationNameList(List *quotedShardNames,
|
static char * GenerateSizeQueryForRelationNameList(List *quotedShardNames,
|
||||||
char *sizeFunction);
|
char *sizeFunction);
|
||||||
static char * GetWorkerPartitionedSizeUDFNameBySizeQueryType(SizeQueryType sizeQueryType);
|
static char * GetWorkerPartitionedSizeUDFNameBySizeQueryType(SizeQueryType sizeQueryType);
|
||||||
|
@ -101,10 +102,10 @@ static char * GenerateAllShardStatisticsQueryForNode(WorkerNode *workerNode,
|
||||||
static List * GenerateShardStatisticsQueryList(List *workerNodeList, List *citusTableIds);
|
static List * GenerateShardStatisticsQueryList(List *workerNodeList, List *citusTableIds);
|
||||||
static void ErrorIfNotSuitableToGetSize(Oid relationId);
|
static void ErrorIfNotSuitableToGetSize(Oid relationId);
|
||||||
static List * OpenConnectionToNodes(List *workerNodeList);
|
static List * OpenConnectionToNodes(List *workerNodeList);
|
||||||
static void ReceiveShardNameAndSizeResults(List *connectionList,
|
static void ReceiveShardIdAndSizeResults(List *connectionList,
|
||||||
Tuplestorestate *tupleStore,
|
Tuplestorestate *tupleStore,
|
||||||
TupleDesc tupleDescriptor);
|
TupleDesc tupleDescriptor);
|
||||||
static void AppendShardSizeQuery(StringInfo selectQuery, ShardInterval *shardInterval);
|
static void AppendShardIdNameValues(StringInfo selectQuery, ShardInterval *shardInterval);
|
||||||
|
|
||||||
static HeapTuple CreateDiskSpaceTuple(TupleDesc tupleDesc, uint64 availableBytes,
|
static HeapTuple CreateDiskSpaceTuple(TupleDesc tupleDesc, uint64 availableBytes,
|
||||||
uint64 totalBytes);
|
uint64 totalBytes);
|
||||||
|
@ -253,7 +254,7 @@ GetNodeDiskSpaceStatsForConnection(MultiConnection *connection, uint64 *availabl
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* citus_shard_sizes returns all shard names and their sizes.
|
* citus_shard_sizes returns all shard ids and their sizes.
|
||||||
*/
|
*/
|
||||||
Datum
|
Datum
|
||||||
citus_shard_sizes(PG_FUNCTION_ARGS)
|
citus_shard_sizes(PG_FUNCTION_ARGS)
|
||||||
|
@ -271,7 +272,7 @@ citus_shard_sizes(PG_FUNCTION_ARGS)
|
||||||
TupleDesc tupleDescriptor = NULL;
|
TupleDesc tupleDescriptor = NULL;
|
||||||
Tuplestorestate *tupleStore = SetupTuplestore(fcinfo, &tupleDescriptor);
|
Tuplestorestate *tupleStore = SetupTuplestore(fcinfo, &tupleDescriptor);
|
||||||
|
|
||||||
ReceiveShardNameAndSizeResults(connectionList, tupleStore, tupleDescriptor);
|
ReceiveShardIdAndSizeResults(connectionList, tupleStore, tupleDescriptor);
|
||||||
|
|
||||||
PG_RETURN_VOID();
|
PG_RETURN_VOID();
|
||||||
}
|
}
|
||||||
|
@ -446,11 +447,11 @@ GenerateShardStatisticsQueryList(List *workerNodeList, List *citusTableIds)
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ReceiveShardNameAndSizeResults receives shard name and size results from the given
|
* ReceiveShardIdAndSizeResults receives shard id and size results from the given
|
||||||
* connection list.
|
* connection list.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
ReceiveShardNameAndSizeResults(List *connectionList, Tuplestorestate *tupleStore,
|
ReceiveShardIdAndSizeResults(List *connectionList, Tuplestorestate *tupleStore,
|
||||||
TupleDesc tupleDescriptor)
|
TupleDesc tupleDescriptor)
|
||||||
{
|
{
|
||||||
MultiConnection *connection = NULL;
|
MultiConnection *connection = NULL;
|
||||||
|
@ -488,13 +489,9 @@ ReceiveShardNameAndSizeResults(List *connectionList, Tuplestorestate *tupleStore
|
||||||
memset(values, 0, sizeof(values));
|
memset(values, 0, sizeof(values));
|
||||||
memset(isNulls, false, sizeof(isNulls));
|
memset(isNulls, false, sizeof(isNulls));
|
||||||
|
|
||||||
/* format is [0] shard id, [1] shard name, [2] size */
|
/* format is [0] shard id, [1] size */
|
||||||
char *tableName = PQgetvalue(result, rowIndex, 1);
|
values[0] = ParseIntField(result, rowIndex, 0);
|
||||||
Datum resultStringDatum = CStringGetDatum(tableName);
|
values[1] = ParseIntField(result, rowIndex, 1);
|
||||||
Datum textDatum = DirectFunctionCall1(textin, resultStringDatum);
|
|
||||||
|
|
||||||
values[0] = textDatum;
|
|
||||||
values[1] = ParseIntField(result, rowIndex, 2);
|
|
||||||
|
|
||||||
tuplestore_putvalues(tupleStore, tupleDescriptor, values, isNulls);
|
tuplestore_putvalues(tupleStore, tupleDescriptor, values, isNulls);
|
||||||
}
|
}
|
||||||
|
@ -920,6 +917,12 @@ static char *
|
||||||
GenerateAllShardStatisticsQueryForNode(WorkerNode *workerNode, List *citusTableIds)
|
GenerateAllShardStatisticsQueryForNode(WorkerNode *workerNode, List *citusTableIds)
|
||||||
{
|
{
|
||||||
StringInfo allShardStatisticsQuery = makeStringInfo();
|
StringInfo allShardStatisticsQuery = makeStringInfo();
|
||||||
|
bool insertedValues = false;
|
||||||
|
|
||||||
|
appendStringInfoString(allShardStatisticsQuery, "SELECT shard_id, ");
|
||||||
|
appendStringInfo(allShardStatisticsQuery, PG_TOTAL_RELATION_SIZE_FUNCTION,
|
||||||
|
"table_name");
|
||||||
|
appendStringInfoString(allShardStatisticsQuery, " FROM (VALUES ");
|
||||||
|
|
||||||
Oid relationId = InvalidOid;
|
Oid relationId = InvalidOid;
|
||||||
foreach_oid(relationId, citusTableIds)
|
foreach_oid(relationId, citusTableIds)
|
||||||
|
@ -934,34 +937,49 @@ GenerateAllShardStatisticsQueryForNode(WorkerNode *workerNode, List *citusTableI
|
||||||
{
|
{
|
||||||
List *shardIntervalsOnNode = ShardIntervalsOnWorkerGroup(workerNode,
|
List *shardIntervalsOnNode = ShardIntervalsOnWorkerGroup(workerNode,
|
||||||
relationId);
|
relationId);
|
||||||
char *shardStatisticsQuery =
|
if (list_length(shardIntervalsOnNode) == 0)
|
||||||
GenerateShardStatisticsQueryForShardList(shardIntervalsOnNode);
|
{
|
||||||
appendStringInfoString(allShardStatisticsQuery, shardStatisticsQuery);
|
relation_close(relation, AccessShareLock);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
char *shardIdNameValues =
|
||||||
|
GenerateShardIdNameValuesForShardList(shardIntervalsOnNode,
|
||||||
|
!insertedValues);
|
||||||
|
insertedValues = true;
|
||||||
|
appendStringInfoString(allShardStatisticsQuery, shardIdNameValues);
|
||||||
relation_close(relation, AccessShareLock);
|
relation_close(relation, AccessShareLock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Add a dummy entry so that UNION ALL doesn't complain */
|
if (!insertedValues)
|
||||||
appendStringInfo(allShardStatisticsQuery, "SELECT 0::bigint, NULL::text, 0::bigint;");
|
{
|
||||||
|
return "SELECT 0 AS shard_id, '' AS table_name LIMIT 0";
|
||||||
|
}
|
||||||
|
|
||||||
|
appendStringInfoString(allShardStatisticsQuery, ") t(shard_id, table_name) "
|
||||||
|
"WHERE to_regclass(table_name) IS NOT NULL");
|
||||||
return allShardStatisticsQuery->data;
|
return allShardStatisticsQuery->data;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* GenerateShardStatisticsQueryForShardList generates a query that returns:
|
* GenerateShardIdNameValuesForShardList generates a list of (shard_id, shard_name) values
|
||||||
* SELECT shard_id, shard_name, shard_size for all shards in the list
|
* for all shards in the list
|
||||||
*/
|
*/
|
||||||
static char *
|
static char *
|
||||||
GenerateShardStatisticsQueryForShardList(List *shardIntervalList)
|
GenerateShardIdNameValuesForShardList(List *shardIntervalList, bool firstValue)
|
||||||
{
|
{
|
||||||
StringInfo selectQuery = makeStringInfo();
|
StringInfo selectQuery = makeStringInfo();
|
||||||
|
|
||||||
ShardInterval *shardInterval = NULL;
|
ShardInterval *shardInterval = NULL;
|
||||||
foreach_ptr(shardInterval, shardIntervalList)
|
foreach_ptr(shardInterval, shardIntervalList)
|
||||||
{
|
{
|
||||||
AppendShardSizeQuery(selectQuery, shardInterval);
|
if (!firstValue)
|
||||||
appendStringInfo(selectQuery, " UNION ALL ");
|
{
|
||||||
|
appendStringInfoString(selectQuery, ", ");
|
||||||
|
}
|
||||||
|
firstValue = false;
|
||||||
|
AppendShardIdNameValues(selectQuery, shardInterval);
|
||||||
}
|
}
|
||||||
|
|
||||||
return selectQuery->data;
|
return selectQuery->data;
|
||||||
|
@ -969,11 +987,10 @@ GenerateShardStatisticsQueryForShardList(List *shardIntervalList)
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* AppendShardSizeQuery appends a query in the following form to selectQuery
|
* AppendShardIdNameValues appends (shard_id, shard_name) for shard
|
||||||
* SELECT shard_id, shard_name, shard_size
|
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
AppendShardSizeQuery(StringInfo selectQuery, ShardInterval *shardInterval)
|
AppendShardIdNameValues(StringInfo selectQuery, ShardInterval *shardInterval)
|
||||||
{
|
{
|
||||||
uint64 shardId = shardInterval->shardId;
|
uint64 shardId = shardInterval->shardId;
|
||||||
Oid schemaId = get_rel_namespace(shardInterval->relationId);
|
Oid schemaId = get_rel_namespace(shardInterval->relationId);
|
||||||
|
@ -985,9 +1002,7 @@ AppendShardSizeQuery(StringInfo selectQuery, ShardInterval *shardInterval)
|
||||||
char *shardQualifiedName = quote_qualified_identifier(schemaName, shardName);
|
char *shardQualifiedName = quote_qualified_identifier(schemaName, shardName);
|
||||||
char *quotedShardName = quote_literal_cstr(shardQualifiedName);
|
char *quotedShardName = quote_literal_cstr(shardQualifiedName);
|
||||||
|
|
||||||
appendStringInfo(selectQuery, "SELECT " UINT64_FORMAT " AS shard_id, ", shardId);
|
appendStringInfo(selectQuery, "(" UINT64_FORMAT ", %s)", shardId, quotedShardName);
|
||||||
appendStringInfo(selectQuery, "%s AS shard_name, ", quotedShardName);
|
|
||||||
appendStringInfo(selectQuery, PG_TOTAL_RELATION_SIZE_FUNCTION, quotedShardName);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -108,7 +108,8 @@ static void BlockDistributedQueriesOnMetadataNodes(void);
|
||||||
static WorkerNode * TupleToWorkerNode(TupleDesc tupleDescriptor, HeapTuple heapTuple);
|
static WorkerNode * TupleToWorkerNode(TupleDesc tupleDescriptor, HeapTuple heapTuple);
|
||||||
static bool NodeIsLocal(WorkerNode *worker);
|
static bool NodeIsLocal(WorkerNode *worker);
|
||||||
static void SetLockTimeoutLocally(int32 lock_cooldown);
|
static void SetLockTimeoutLocally(int32 lock_cooldown);
|
||||||
static void UpdateNodeLocation(int32 nodeId, char *newNodeName, int32 newNodePort);
|
static void UpdateNodeLocation(int32 nodeId, char *newNodeName, int32 newNodePort,
|
||||||
|
bool localOnly);
|
||||||
static bool UnsetMetadataSyncedForAllWorkers(void);
|
static bool UnsetMetadataSyncedForAllWorkers(void);
|
||||||
static char * GetMetadataSyncCommandToSetNodeColumn(WorkerNode *workerNode,
|
static char * GetMetadataSyncCommandToSetNodeColumn(WorkerNode *workerNode,
|
||||||
int columnIndex,
|
int columnIndex,
|
||||||
|
@ -231,8 +232,8 @@ citus_set_coordinator_host(PG_FUNCTION_ARGS)
|
||||||
* do not need to worry about concurrent changes (e.g. deletion) and
|
* do not need to worry about concurrent changes (e.g. deletion) and
|
||||||
* can proceed to update immediately.
|
* can proceed to update immediately.
|
||||||
*/
|
*/
|
||||||
|
bool localOnly = false;
|
||||||
UpdateNodeLocation(coordinatorNode->nodeId, nodeNameString, nodePort);
|
UpdateNodeLocation(coordinatorNode->nodeId, nodeNameString, nodePort, localOnly);
|
||||||
|
|
||||||
/* clear cached plans that have the old host/port */
|
/* clear cached plans that have the old host/port */
|
||||||
ResetPlanCache();
|
ResetPlanCache();
|
||||||
|
@ -1290,7 +1291,8 @@ citus_update_node(PG_FUNCTION_ARGS)
|
||||||
*/
|
*/
|
||||||
ResetPlanCache();
|
ResetPlanCache();
|
||||||
|
|
||||||
UpdateNodeLocation(nodeId, newNodeNameString, newNodePort);
|
bool localOnly = true;
|
||||||
|
UpdateNodeLocation(nodeId, newNodeNameString, newNodePort, localOnly);
|
||||||
|
|
||||||
/* we should be able to find the new node from the metadata */
|
/* we should be able to find the new node from the metadata */
|
||||||
workerNode = FindWorkerNodeAnyCluster(newNodeNameString, newNodePort);
|
workerNode = FindWorkerNodeAnyCluster(newNodeNameString, newNodePort);
|
||||||
|
@ -1352,7 +1354,7 @@ SetLockTimeoutLocally(int32 lockCooldown)
|
||||||
|
|
||||||
|
|
||||||
static void
|
static void
|
||||||
UpdateNodeLocation(int32 nodeId, char *newNodeName, int32 newNodePort)
|
UpdateNodeLocation(int32 nodeId, char *newNodeName, int32 newNodePort, bool localOnly)
|
||||||
{
|
{
|
||||||
const bool indexOK = true;
|
const bool indexOK = true;
|
||||||
|
|
||||||
|
@ -1396,6 +1398,20 @@ UpdateNodeLocation(int32 nodeId, char *newNodeName, int32 newNodePort)
|
||||||
|
|
||||||
CommandCounterIncrement();
|
CommandCounterIncrement();
|
||||||
|
|
||||||
|
if (!localOnly && EnableMetadataSync)
|
||||||
|
{
|
||||||
|
WorkerNode *updatedNode = FindWorkerNodeAnyCluster(newNodeName, newNodePort);
|
||||||
|
Assert(updatedNode->nodeId == nodeId);
|
||||||
|
|
||||||
|
/* send the delete command to all primary nodes with metadata */
|
||||||
|
char *nodeDeleteCommand = NodeDeleteCommand(updatedNode->nodeId);
|
||||||
|
SendCommandToWorkersWithMetadata(nodeDeleteCommand);
|
||||||
|
|
||||||
|
/* send the insert command to all primary nodes with metadata */
|
||||||
|
char *nodeInsertCommand = NodeListInsertCommand(list_make1(updatedNode));
|
||||||
|
SendCommandToWorkersWithMetadata(nodeInsertCommand);
|
||||||
|
}
|
||||||
|
|
||||||
systable_endscan(scanDescriptor);
|
systable_endscan(scanDescriptor);
|
||||||
table_close(pgDistNode, NoLock);
|
table_close(pgDistNode, NoLock);
|
||||||
}
|
}
|
||||||
|
|
|
@ -515,6 +515,16 @@ GetRebalanceSteps(RebalanceOptions *options)
|
||||||
|
|
||||||
/* sort the lists to make the function more deterministic */
|
/* sort the lists to make the function more deterministic */
|
||||||
List *activeWorkerList = SortedActiveWorkers();
|
List *activeWorkerList = SortedActiveWorkers();
|
||||||
|
int shardAllowedNodeCount = 0;
|
||||||
|
WorkerNode *workerNode = NULL;
|
||||||
|
foreach_ptr(workerNode, activeWorkerList)
|
||||||
|
{
|
||||||
|
if (workerNode->shouldHaveShards)
|
||||||
|
{
|
||||||
|
shardAllowedNodeCount++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
List *activeShardPlacementListList = NIL;
|
List *activeShardPlacementListList = NIL;
|
||||||
List *unbalancedShards = NIL;
|
List *unbalancedShards = NIL;
|
||||||
|
|
||||||
|
@ -532,8 +542,7 @@ GetRebalanceSteps(RebalanceOptions *options)
|
||||||
shardPlacementList, options->workerNode);
|
shardPlacementList, options->workerNode);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (list_length(activeShardPlacementListForRelation) >= list_length(
|
if (list_length(activeShardPlacementListForRelation) >= shardAllowedNodeCount)
|
||||||
activeWorkerList))
|
|
||||||
{
|
{
|
||||||
activeShardPlacementListList = lappend(activeShardPlacementListList,
|
activeShardPlacementListList = lappend(activeShardPlacementListList,
|
||||||
activeShardPlacementListForRelation);
|
activeShardPlacementListForRelation);
|
||||||
|
@ -2165,7 +2174,10 @@ RebalanceTableShardsBackground(RebalanceOptions *options, Oid shardReplicationMo
|
||||||
quote_literal_cstr(shardTranferModeLabel));
|
quote_literal_cstr(shardTranferModeLabel));
|
||||||
|
|
||||||
int32 nodesInvolved[] = { 0 };
|
int32 nodesInvolved[] = { 0 };
|
||||||
BackgroundTask *task = ScheduleBackgroundTask(jobId, GetUserId(), buf.data, 0,
|
|
||||||
|
/* replicate_reference_tables permissions require superuser */
|
||||||
|
Oid superUserId = CitusExtensionOwner();
|
||||||
|
BackgroundTask *task = ScheduleBackgroundTask(jobId, superUserId, buf.data, 0,
|
||||||
NULL, 0, nodesInvolved);
|
NULL, 0, nodesInvolved);
|
||||||
replicateRefTablesTaskId = task->taskid;
|
replicateRefTablesTaskId = task->taskid;
|
||||||
}
|
}
|
||||||
|
@ -2268,7 +2280,7 @@ UpdateShardPlacement(PlacementUpdateEvent *placementUpdateEvent,
|
||||||
if (updateType == PLACEMENT_UPDATE_MOVE)
|
if (updateType == PLACEMENT_UPDATE_MOVE)
|
||||||
{
|
{
|
||||||
appendStringInfo(placementUpdateCommand,
|
appendStringInfo(placementUpdateCommand,
|
||||||
"SELECT citus_move_shard_placement(%ld,%u,%u,%s)",
|
"SELECT pg_catalog.citus_move_shard_placement(%ld,%u,%u,%s)",
|
||||||
shardId,
|
shardId,
|
||||||
sourceNode->nodeId,
|
sourceNode->nodeId,
|
||||||
targetNode->nodeId,
|
targetNode->nodeId,
|
||||||
|
@ -2277,7 +2289,7 @@ UpdateShardPlacement(PlacementUpdateEvent *placementUpdateEvent,
|
||||||
else if (updateType == PLACEMENT_UPDATE_COPY)
|
else if (updateType == PLACEMENT_UPDATE_COPY)
|
||||||
{
|
{
|
||||||
appendStringInfo(placementUpdateCommand,
|
appendStringInfo(placementUpdateCommand,
|
||||||
"SELECT citus_copy_shard_placement(%ld,%u,%u,%s)",
|
"SELECT pg_catalog.citus_copy_shard_placement(%ld,%u,%u,%s)",
|
||||||
shardId,
|
shardId,
|
||||||
sourceNode->nodeId,
|
sourceNode->nodeId,
|
||||||
targetNode->nodeId,
|
targetNode->nodeId,
|
||||||
|
|
|
@ -1810,7 +1810,7 @@ CreateWorkerForPlacementSet(List *workersForPlacementList)
|
||||||
/* we don't have value field as it's a set */
|
/* we don't have value field as it's a set */
|
||||||
info.entrysize = info.keysize;
|
info.entrysize = info.keysize;
|
||||||
|
|
||||||
uint32 hashFlags = (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
|
uint32 hashFlags = (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT | HASH_COMPARE);
|
||||||
|
|
||||||
HTAB *workerForPlacementSet = hash_create("worker placement set", 32, &info,
|
HTAB *workerForPlacementSet = hash_create("worker placement set", 32, &info,
|
||||||
hashFlags);
|
hashFlags);
|
||||||
|
|
|
@ -855,7 +855,7 @@ ProcessShardStatisticsRow(PGresult *result, int64 rowIndex, uint64 *shardId,
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
*shardSize = ParseIntField(result, rowIndex, 2);
|
*shardSize = ParseIntField(result, rowIndex, 1);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1885,19 +1885,38 @@ RouterJob(Query *originalQuery, PlannerRestrictionContext *plannerRestrictionCon
|
||||||
{
|
{
|
||||||
RangeTblEntry *updateOrDeleteOrMergeRTE = ExtractResultRelationRTE(originalQuery);
|
RangeTblEntry *updateOrDeleteOrMergeRTE = ExtractResultRelationRTE(originalQuery);
|
||||||
|
|
||||||
/*
|
|
||||||
* If all of the shards are pruned, we replace the relation RTE into
|
|
||||||
* subquery RTE that returns no results. However, this is not useful
|
|
||||||
* for UPDATE and DELETE queries. Therefore, if we detect a UPDATE or
|
|
||||||
* DELETE RTE with subquery type, we just set task list to empty and return
|
|
||||||
* the job.
|
|
||||||
*/
|
|
||||||
if (updateOrDeleteOrMergeRTE->rtekind == RTE_SUBQUERY)
|
if (updateOrDeleteOrMergeRTE->rtekind == RTE_SUBQUERY)
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
* Not generating tasks for MERGE target relation might
|
||||||
|
* result in incorrect behavior as source rows with NOT
|
||||||
|
* MATCHED clause might qualify for insertion.
|
||||||
|
*/
|
||||||
|
if (IsMergeQuery(originalQuery))
|
||||||
|
{
|
||||||
|
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||||
|
errmsg("Merge command is currently "
|
||||||
|
"unsupported with filters that "
|
||||||
|
"prunes down to zero shards"),
|
||||||
|
errhint("Avoid `WHERE false` clause or "
|
||||||
|
"any equivalent filters that "
|
||||||
|
"could prune down to zero shards")));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* If all of the shards are pruned, we replace the
|
||||||
|
* relation RTE into subquery RTE that returns no
|
||||||
|
* results. However, this is not useful for UPDATE
|
||||||
|
* and DELETE queries. Therefore, if we detect a
|
||||||
|
* UPDATE or DELETE RTE with subquery type, we just
|
||||||
|
* set task list to empty and return the job.
|
||||||
|
*/
|
||||||
job->taskList = NIL;
|
job->taskList = NIL;
|
||||||
return job;
|
return job;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (isMultiShardModifyQuery)
|
if (isMultiShardModifyQuery)
|
||||||
{
|
{
|
||||||
|
|
|
@ -0,0 +1,9 @@
|
||||||
|
DROP VIEW citus_shards;
|
||||||
|
DROP VIEW IF EXISTS pg_catalog.citus_tables;
|
||||||
|
DROP VIEW IF EXISTS public.citus_tables;
|
||||||
|
DROP FUNCTION citus_shard_sizes;
|
||||||
|
|
||||||
|
#include "udfs/citus_shard_sizes/11.3-2.sql"
|
||||||
|
|
||||||
|
#include "udfs/citus_shards/11.3-2.sql"
|
||||||
|
#include "udfs/citus_tables/11.3-2.sql"
|
|
@ -0,0 +1,13 @@
|
||||||
|
DROP VIEW IF EXISTS public.citus_tables;
|
||||||
|
DROP VIEW IF EXISTS pg_catalog.citus_tables;
|
||||||
|
|
||||||
|
DROP VIEW pg_catalog.citus_shards;
|
||||||
|
DROP FUNCTION pg_catalog.citus_shard_sizes;
|
||||||
|
#include "../udfs/citus_shard_sizes/10.0-1.sql"
|
||||||
|
-- citus_shards/11.1-1.sql tries to create citus_shards in pg_catalog but it is not allowed.
|
||||||
|
-- Here we use citus_shards/10.0-1.sql to properly create the view in citus schema and
|
||||||
|
-- then alter it to pg_catalog, so citus_shards/11.1-1.sql can REPLACE it without any errors.
|
||||||
|
#include "../udfs/citus_shards/10.0-1.sql"
|
||||||
|
|
||||||
|
#include "../udfs/citus_tables/11.1-1.sql"
|
||||||
|
#include "../udfs/citus_shards/11.1-1.sql"
|
|
@ -0,0 +1,6 @@
|
||||||
|
CREATE OR REPLACE FUNCTION pg_catalog.citus_shard_sizes(OUT shard_id int, OUT size bigint)
|
||||||
|
RETURNS SETOF RECORD
|
||||||
|
LANGUAGE C STRICT
|
||||||
|
AS 'MODULE_PATHNAME', $$citus_shard_sizes$$;
|
||||||
|
COMMENT ON FUNCTION pg_catalog.citus_shard_sizes(OUT shard_id int, OUT size bigint)
|
||||||
|
IS 'returns shards sizes across citus cluster';
|
|
@ -1,6 +1,6 @@
|
||||||
CREATE FUNCTION pg_catalog.citus_shard_sizes(OUT table_name text, OUT size bigint)
|
CREATE OR REPLACE FUNCTION pg_catalog.citus_shard_sizes(OUT shard_id int, OUT size bigint)
|
||||||
RETURNS SETOF RECORD
|
RETURNS SETOF RECORD
|
||||||
LANGUAGE C STRICT
|
LANGUAGE C STRICT
|
||||||
AS 'MODULE_PATHNAME', $$citus_shard_sizes$$;
|
AS 'MODULE_PATHNAME', $$citus_shard_sizes$$;
|
||||||
COMMENT ON FUNCTION pg_catalog.citus_shard_sizes(OUT table_name text, OUT size bigint)
|
COMMENT ON FUNCTION pg_catalog.citus_shard_sizes(OUT shard_id int, OUT size bigint)
|
||||||
IS 'returns shards sizes across citus cluster';
|
IS 'returns shards sizes across citus cluster';
|
||||||
|
|
|
@ -0,0 +1,46 @@
|
||||||
|
CREATE OR REPLACE VIEW citus.citus_shards AS
|
||||||
|
SELECT
|
||||||
|
pg_dist_shard.logicalrelid AS table_name,
|
||||||
|
pg_dist_shard.shardid,
|
||||||
|
shard_name(pg_dist_shard.logicalrelid, pg_dist_shard.shardid) as shard_name,
|
||||||
|
CASE WHEN partkey IS NOT NULL THEN 'distributed' WHEN repmodel = 't' THEN 'reference' ELSE 'local' END AS citus_table_type,
|
||||||
|
colocationid AS colocation_id,
|
||||||
|
pg_dist_node.nodename,
|
||||||
|
pg_dist_node.nodeport,
|
||||||
|
size as shard_size
|
||||||
|
FROM
|
||||||
|
pg_dist_shard
|
||||||
|
JOIN
|
||||||
|
pg_dist_placement
|
||||||
|
ON
|
||||||
|
pg_dist_shard.shardid = pg_dist_placement.shardid
|
||||||
|
JOIN
|
||||||
|
pg_dist_node
|
||||||
|
ON
|
||||||
|
pg_dist_placement.groupid = pg_dist_node.groupid
|
||||||
|
JOIN
|
||||||
|
pg_dist_partition
|
||||||
|
ON
|
||||||
|
pg_dist_partition.logicalrelid = pg_dist_shard.logicalrelid
|
||||||
|
LEFT JOIN
|
||||||
|
(SELECT shard_id, max(size) as size from citus_shard_sizes() GROUP BY shard_id) as shard_sizes
|
||||||
|
ON
|
||||||
|
pg_dist_shard.shardid = shard_sizes.shard_id
|
||||||
|
WHERE
|
||||||
|
pg_dist_placement.shardstate = 1
|
||||||
|
AND
|
||||||
|
-- filter out tables owned by extensions
|
||||||
|
pg_dist_partition.logicalrelid NOT IN (
|
||||||
|
SELECT
|
||||||
|
objid
|
||||||
|
FROM
|
||||||
|
pg_depend
|
||||||
|
WHERE
|
||||||
|
classid = 'pg_class'::regclass AND refclassid = 'pg_extension'::regclass AND deptype = 'e'
|
||||||
|
)
|
||||||
|
ORDER BY
|
||||||
|
pg_dist_shard.logicalrelid::text, shardid
|
||||||
|
;
|
||||||
|
|
||||||
|
ALTER VIEW citus.citus_shards SET SCHEMA pg_catalog;
|
||||||
|
GRANT SELECT ON pg_catalog.citus_shards TO public;
|
|
@ -1,4 +1,4 @@
|
||||||
CREATE OR REPLACE VIEW pg_catalog.citus_shards AS
|
CREATE OR REPLACE VIEW citus.citus_shards AS
|
||||||
SELECT
|
SELECT
|
||||||
pg_dist_shard.logicalrelid AS table_name,
|
pg_dist_shard.logicalrelid AS table_name,
|
||||||
pg_dist_shard.shardid,
|
pg_dist_shard.shardid,
|
||||||
|
@ -23,7 +23,7 @@ JOIN
|
||||||
ON
|
ON
|
||||||
pg_dist_partition.logicalrelid = pg_dist_shard.logicalrelid
|
pg_dist_partition.logicalrelid = pg_dist_shard.logicalrelid
|
||||||
LEFT JOIN
|
LEFT JOIN
|
||||||
(SELECT (regexp_matches(table_name,'_(\d+)$'))[1]::int as shard_id, max(size) as size from citus_shard_sizes() GROUP BY shard_id) as shard_sizes
|
(SELECT shard_id, max(size) as size from citus_shard_sizes() GROUP BY shard_id) as shard_sizes
|
||||||
ON
|
ON
|
||||||
pg_dist_shard.shardid = shard_sizes.shard_id
|
pg_dist_shard.shardid = shard_sizes.shard_id
|
||||||
WHERE
|
WHERE
|
||||||
|
@ -42,4 +42,5 @@ ORDER BY
|
||||||
pg_dist_shard.logicalrelid::text, shardid
|
pg_dist_shard.logicalrelid::text, shardid
|
||||||
;
|
;
|
||||||
|
|
||||||
|
ALTER VIEW citus.citus_shards SET SCHEMA pg_catalog;
|
||||||
GRANT SELECT ON pg_catalog.citus_shards TO public;
|
GRANT SELECT ON pg_catalog.citus_shards TO public;
|
||||||
|
|
|
@ -8,6 +8,8 @@ CREATE OR REPLACE FUNCTION pg_catalog.citus_stat_tenants (
|
||||||
OUT read_count_in_last_period INT,
|
OUT read_count_in_last_period INT,
|
||||||
OUT query_count_in_this_period INT,
|
OUT query_count_in_this_period INT,
|
||||||
OUT query_count_in_last_period INT,
|
OUT query_count_in_last_period INT,
|
||||||
|
OUT cpu_usage_in_this_period DOUBLE PRECISION,
|
||||||
|
OUT cpu_usage_in_last_period DOUBLE PRECISION,
|
||||||
OUT score BIGINT
|
OUT score BIGINT
|
||||||
)
|
)
|
||||||
RETURNS SETOF record
|
RETURNS SETOF record
|
||||||
|
@ -51,6 +53,8 @@ AS (
|
||||||
read_count_in_last_period INT,
|
read_count_in_last_period INT,
|
||||||
query_count_in_this_period INT,
|
query_count_in_this_period INT,
|
||||||
query_count_in_last_period INT,
|
query_count_in_last_period INT,
|
||||||
|
cpu_usage_in_this_period DOUBLE PRECISION,
|
||||||
|
cpu_usage_in_last_period DOUBLE PRECISION,
|
||||||
score BIGINT
|
score BIGINT
|
||||||
)
|
)
|
||||||
ORDER BY score DESC
|
ORDER BY score DESC
|
||||||
|
@ -66,7 +70,9 @@ SELECT
|
||||||
read_count_in_this_period,
|
read_count_in_this_period,
|
||||||
read_count_in_last_period,
|
read_count_in_last_period,
|
||||||
query_count_in_this_period,
|
query_count_in_this_period,
|
||||||
query_count_in_last_period
|
query_count_in_last_period,
|
||||||
|
cpu_usage_in_this_period,
|
||||||
|
cpu_usage_in_last_period
|
||||||
FROM pg_catalog.citus_stat_tenants(FALSE);
|
FROM pg_catalog.citus_stat_tenants(FALSE);
|
||||||
|
|
||||||
ALTER VIEW citus.citus_stat_tenants SET SCHEMA pg_catalog;
|
ALTER VIEW citus.citus_stat_tenants SET SCHEMA pg_catalog;
|
||||||
|
|
|
@ -8,6 +8,8 @@ CREATE OR REPLACE FUNCTION pg_catalog.citus_stat_tenants (
|
||||||
OUT read_count_in_last_period INT,
|
OUT read_count_in_last_period INT,
|
||||||
OUT query_count_in_this_period INT,
|
OUT query_count_in_this_period INT,
|
||||||
OUT query_count_in_last_period INT,
|
OUT query_count_in_last_period INT,
|
||||||
|
OUT cpu_usage_in_this_period DOUBLE PRECISION,
|
||||||
|
OUT cpu_usage_in_last_period DOUBLE PRECISION,
|
||||||
OUT score BIGINT
|
OUT score BIGINT
|
||||||
)
|
)
|
||||||
RETURNS SETOF record
|
RETURNS SETOF record
|
||||||
|
@ -51,6 +53,8 @@ AS (
|
||||||
read_count_in_last_period INT,
|
read_count_in_last_period INT,
|
||||||
query_count_in_this_period INT,
|
query_count_in_this_period INT,
|
||||||
query_count_in_last_period INT,
|
query_count_in_last_period INT,
|
||||||
|
cpu_usage_in_this_period DOUBLE PRECISION,
|
||||||
|
cpu_usage_in_last_period DOUBLE PRECISION,
|
||||||
score BIGINT
|
score BIGINT
|
||||||
)
|
)
|
||||||
ORDER BY score DESC
|
ORDER BY score DESC
|
||||||
|
@ -66,7 +70,9 @@ SELECT
|
||||||
read_count_in_this_period,
|
read_count_in_this_period,
|
||||||
read_count_in_last_period,
|
read_count_in_last_period,
|
||||||
query_count_in_this_period,
|
query_count_in_this_period,
|
||||||
query_count_in_last_period
|
query_count_in_last_period,
|
||||||
|
cpu_usage_in_this_period,
|
||||||
|
cpu_usage_in_last_period
|
||||||
FROM pg_catalog.citus_stat_tenants(FALSE);
|
FROM pg_catalog.citus_stat_tenants(FALSE);
|
||||||
|
|
||||||
ALTER VIEW citus.citus_stat_tenants SET SCHEMA pg_catalog;
|
ALTER VIEW citus.citus_stat_tenants SET SCHEMA pg_catalog;
|
||||||
|
|
|
@ -6,6 +6,8 @@ CREATE OR REPLACE FUNCTION pg_catalog.citus_stat_tenants_local(
|
||||||
OUT read_count_in_last_period INT,
|
OUT read_count_in_last_period INT,
|
||||||
OUT query_count_in_this_period INT,
|
OUT query_count_in_this_period INT,
|
||||||
OUT query_count_in_last_period INT,
|
OUT query_count_in_last_period INT,
|
||||||
|
OUT cpu_usage_in_this_period DOUBLE PRECISION,
|
||||||
|
OUT cpu_usage_in_last_period DOUBLE PRECISION,
|
||||||
OUT score BIGINT)
|
OUT score BIGINT)
|
||||||
RETURNS SETOF RECORD
|
RETURNS SETOF RECORD
|
||||||
LANGUAGE C
|
LANGUAGE C
|
||||||
|
@ -19,7 +21,9 @@ SELECT
|
||||||
read_count_in_this_period,
|
read_count_in_this_period,
|
||||||
read_count_in_last_period,
|
read_count_in_last_period,
|
||||||
query_count_in_this_period,
|
query_count_in_this_period,
|
||||||
query_count_in_last_period
|
query_count_in_last_period,
|
||||||
|
cpu_usage_in_this_period,
|
||||||
|
cpu_usage_in_last_period
|
||||||
FROM pg_catalog.citus_stat_tenants_local()
|
FROM pg_catalog.citus_stat_tenants_local()
|
||||||
ORDER BY score DESC;
|
ORDER BY score DESC;
|
||||||
|
|
||||||
|
|
|
@ -6,6 +6,8 @@ CREATE OR REPLACE FUNCTION pg_catalog.citus_stat_tenants_local(
|
||||||
OUT read_count_in_last_period INT,
|
OUT read_count_in_last_period INT,
|
||||||
OUT query_count_in_this_period INT,
|
OUT query_count_in_this_period INT,
|
||||||
OUT query_count_in_last_period INT,
|
OUT query_count_in_last_period INT,
|
||||||
|
OUT cpu_usage_in_this_period DOUBLE PRECISION,
|
||||||
|
OUT cpu_usage_in_last_period DOUBLE PRECISION,
|
||||||
OUT score BIGINT)
|
OUT score BIGINT)
|
||||||
RETURNS SETOF RECORD
|
RETURNS SETOF RECORD
|
||||||
LANGUAGE C
|
LANGUAGE C
|
||||||
|
@ -19,7 +21,9 @@ SELECT
|
||||||
read_count_in_this_period,
|
read_count_in_this_period,
|
||||||
read_count_in_last_period,
|
read_count_in_last_period,
|
||||||
query_count_in_this_period,
|
query_count_in_this_period,
|
||||||
query_count_in_last_period
|
query_count_in_last_period,
|
||||||
|
cpu_usage_in_this_period,
|
||||||
|
cpu_usage_in_last_period
|
||||||
FROM pg_catalog.citus_stat_tenants_local()
|
FROM pg_catalog.citus_stat_tenants_local()
|
||||||
ORDER BY score DESC;
|
ORDER BY score DESC;
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,55 @@
|
||||||
|
DO $$
|
||||||
|
declare
|
||||||
|
citus_tables_create_query text;
|
||||||
|
BEGIN
|
||||||
|
citus_tables_create_query=$CTCQ$
|
||||||
|
CREATE OR REPLACE VIEW %I.citus_tables AS
|
||||||
|
SELECT
|
||||||
|
logicalrelid AS table_name,
|
||||||
|
CASE WHEN partkey IS NOT NULL THEN 'distributed' ELSE
|
||||||
|
CASE when repmodel = 't' THEN 'reference' ELSE 'local' END
|
||||||
|
END AS citus_table_type,
|
||||||
|
coalesce(column_to_column_name(logicalrelid, partkey), '<none>') AS distribution_column,
|
||||||
|
colocationid AS colocation_id,
|
||||||
|
pg_size_pretty(table_sizes.table_size) AS table_size,
|
||||||
|
(select count(*) from pg_dist_shard where logicalrelid = p.logicalrelid) AS shard_count,
|
||||||
|
pg_get_userbyid(relowner) AS table_owner,
|
||||||
|
amname AS access_method
|
||||||
|
FROM
|
||||||
|
pg_dist_partition p
|
||||||
|
JOIN
|
||||||
|
pg_class c ON (p.logicalrelid = c.oid)
|
||||||
|
LEFT JOIN
|
||||||
|
pg_am a ON (a.oid = c.relam)
|
||||||
|
JOIN
|
||||||
|
(
|
||||||
|
SELECT ds.logicalrelid AS table_id, SUM(css.size) AS table_size
|
||||||
|
FROM citus_shard_sizes() css, pg_dist_shard ds
|
||||||
|
WHERE css.shard_id = ds.shardid
|
||||||
|
GROUP BY ds.logicalrelid
|
||||||
|
) table_sizes ON (table_sizes.table_id = p.logicalrelid)
|
||||||
|
WHERE
|
||||||
|
-- filter out tables owned by extensions
|
||||||
|
logicalrelid NOT IN (
|
||||||
|
SELECT
|
||||||
|
objid
|
||||||
|
FROM
|
||||||
|
pg_depend
|
||||||
|
WHERE
|
||||||
|
classid = 'pg_class'::regclass AND refclassid = 'pg_extension'::regclass AND deptype = 'e'
|
||||||
|
)
|
||||||
|
ORDER BY
|
||||||
|
logicalrelid::text;
|
||||||
|
$CTCQ$;
|
||||||
|
|
||||||
|
IF EXISTS (SELECT 1 FROM pg_namespace WHERE nspname = 'public') THEN
|
||||||
|
EXECUTE format(citus_tables_create_query, 'public');
|
||||||
|
GRANT SELECT ON public.citus_tables TO public;
|
||||||
|
ELSE
|
||||||
|
EXECUTE format(citus_tables_create_query, 'citus');
|
||||||
|
ALTER VIEW citus.citus_tables SET SCHEMA pg_catalog;
|
||||||
|
GRANT SELECT ON pg_catalog.citus_tables TO public;
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
END;
|
||||||
|
$$;
|
|
@ -11,7 +11,7 @@ citus_tables_create_query=$CTCQ$
|
||||||
END AS citus_table_type,
|
END AS citus_table_type,
|
||||||
coalesce(column_to_column_name(logicalrelid, partkey), '<none>') AS distribution_column,
|
coalesce(column_to_column_name(logicalrelid, partkey), '<none>') AS distribution_column,
|
||||||
colocationid AS colocation_id,
|
colocationid AS colocation_id,
|
||||||
pg_size_pretty(citus_total_relation_size(logicalrelid, fail_on_error := false)) AS table_size,
|
pg_size_pretty(table_sizes.table_size) AS table_size,
|
||||||
(select count(*) from pg_dist_shard where logicalrelid = p.logicalrelid) AS shard_count,
|
(select count(*) from pg_dist_shard where logicalrelid = p.logicalrelid) AS shard_count,
|
||||||
pg_get_userbyid(relowner) AS table_owner,
|
pg_get_userbyid(relowner) AS table_owner,
|
||||||
amname AS access_method
|
amname AS access_method
|
||||||
|
@ -21,6 +21,13 @@ citus_tables_create_query=$CTCQ$
|
||||||
pg_class c ON (p.logicalrelid = c.oid)
|
pg_class c ON (p.logicalrelid = c.oid)
|
||||||
LEFT JOIN
|
LEFT JOIN
|
||||||
pg_am a ON (a.oid = c.relam)
|
pg_am a ON (a.oid = c.relam)
|
||||||
|
JOIN
|
||||||
|
(
|
||||||
|
SELECT ds.logicalrelid AS table_id, SUM(css.size) AS table_size
|
||||||
|
FROM citus_shard_sizes() css, pg_dist_shard ds
|
||||||
|
WHERE css.shard_id = ds.shardid
|
||||||
|
GROUP BY ds.logicalrelid
|
||||||
|
) table_sizes ON (table_sizes.table_id = p.logicalrelid)
|
||||||
WHERE
|
WHERE
|
||||||
-- filter out tables owned by extensions
|
-- filter out tables owned by extensions
|
||||||
logicalrelid NOT IN (
|
logicalrelid NOT IN (
|
||||||
|
|
|
@ -1,70 +0,0 @@
|
||||||
/*-------------------------------------------------------------------------
|
|
||||||
*
|
|
||||||
* pg_send_cancellation.c
|
|
||||||
*
|
|
||||||
* This file contains functions to test setting pg_send_cancellation.
|
|
||||||
*
|
|
||||||
* Copyright (c) Citus Data, Inc.
|
|
||||||
*
|
|
||||||
*-------------------------------------------------------------------------
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include "postgres.h"
|
|
||||||
#include "miscadmin.h"
|
|
||||||
#include "fmgr.h"
|
|
||||||
#include "port.h"
|
|
||||||
|
|
||||||
#include "postmaster/postmaster.h"
|
|
||||||
|
|
||||||
|
|
||||||
#define PG_SEND_CANCELLATION_VERSION \
|
|
||||||
"pg_send_cancellation (PostgreSQL) " PG_VERSION "\n"
|
|
||||||
|
|
||||||
|
|
||||||
/* exports for SQL callable functions */
|
|
||||||
PG_FUNCTION_INFO_V1(get_cancellation_key);
|
|
||||||
PG_FUNCTION_INFO_V1(run_pg_send_cancellation);
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* get_cancellation_key returns the cancellation key of the current process
|
|
||||||
* as an integer.
|
|
||||||
*/
|
|
||||||
Datum
|
|
||||||
get_cancellation_key(PG_FUNCTION_ARGS)
|
|
||||||
{
|
|
||||||
PG_RETURN_INT32(MyCancelKey);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* run_pg_send_cancellation runs the pg_send_cancellation program with
|
|
||||||
* the specified arguments
|
|
||||||
*/
|
|
||||||
Datum
|
|
||||||
run_pg_send_cancellation(PG_FUNCTION_ARGS)
|
|
||||||
{
|
|
||||||
int pid = PG_GETARG_INT32(0);
|
|
||||||
int cancelKey = PG_GETARG_INT32(1);
|
|
||||||
|
|
||||||
char sendCancellationPath[MAXPGPATH];
|
|
||||||
char command[1024];
|
|
||||||
|
|
||||||
/* Locate executable backend before we change working directory */
|
|
||||||
if (find_other_exec(my_exec_path, "pg_send_cancellation",
|
|
||||||
PG_SEND_CANCELLATION_VERSION,
|
|
||||||
sendCancellationPath) < 0)
|
|
||||||
{
|
|
||||||
ereport(ERROR, (errmsg("could not locate pg_send_cancellation")));
|
|
||||||
}
|
|
||||||
|
|
||||||
pg_snprintf(command, sizeof(command), "%s %d %d %s %d",
|
|
||||||
sendCancellationPath, pid, cancelKey, "localhost", PostPortNumber);
|
|
||||||
|
|
||||||
if (system(command) != 0)
|
|
||||||
{
|
|
||||||
ereport(ERROR, (errmsg("failed to run command: %s", command)));
|
|
||||||
}
|
|
||||||
|
|
||||||
PG_RETURN_VOID();
|
|
||||||
}
|
|
|
@ -462,21 +462,25 @@ HasDropCommandViolatesOwnership(Node *node)
|
||||||
static bool
|
static bool
|
||||||
AnyObjectViolatesOwnership(DropStmt *dropStmt)
|
AnyObjectViolatesOwnership(DropStmt *dropStmt)
|
||||||
{
|
{
|
||||||
|
bool hasOwnershipViolation = false;
|
||||||
volatile ObjectAddress objectAddress = { 0 };
|
volatile ObjectAddress objectAddress = { 0 };
|
||||||
Relation relation = NULL;
|
Relation relation = NULL;
|
||||||
bool objectViolatesOwnership = false;
|
|
||||||
ObjectType objectType = dropStmt->removeType;
|
ObjectType objectType = dropStmt->removeType;
|
||||||
bool missingOk = dropStmt->missing_ok;
|
bool missingOk = dropStmt->missing_ok;
|
||||||
|
|
||||||
|
MemoryContext savedContext = CurrentMemoryContext;
|
||||||
|
ResourceOwner savedOwner = CurrentResourceOwner;
|
||||||
|
BeginInternalSubTransaction(NULL);
|
||||||
|
MemoryContextSwitchTo(savedContext);
|
||||||
|
|
||||||
|
PG_TRY();
|
||||||
|
{
|
||||||
Node *object = NULL;
|
Node *object = NULL;
|
||||||
foreach_ptr(object, dropStmt->objects)
|
foreach_ptr(object, dropStmt->objects)
|
||||||
{
|
|
||||||
PG_TRY();
|
|
||||||
{
|
{
|
||||||
objectAddress = get_object_address(objectType, object,
|
objectAddress = get_object_address(objectType, object,
|
||||||
&relation, AccessShareLock, missingOk);
|
&relation, AccessShareLock, missingOk);
|
||||||
|
|
||||||
|
|
||||||
if (OidIsValid(objectAddress.objectId))
|
if (OidIsValid(objectAddress.objectId))
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
|
@ -487,29 +491,39 @@ AnyObjectViolatesOwnership(DropStmt *dropStmt)
|
||||||
objectAddress,
|
objectAddress,
|
||||||
object, relation);
|
object, relation);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
PG_CATCH();
|
|
||||||
{
|
|
||||||
if (OidIsValid(objectAddress.objectId))
|
|
||||||
{
|
|
||||||
/* ownership violation */
|
|
||||||
objectViolatesOwnership = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
PG_END_TRY();
|
|
||||||
|
|
||||||
if (relation != NULL)
|
if (relation != NULL)
|
||||||
{
|
{
|
||||||
relation_close(relation, AccessShareLock);
|
relation_close(relation, NoLock);
|
||||||
relation = NULL;
|
relation = NULL;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* we found ownership violation, so can return here */
|
ReleaseCurrentSubTransaction();
|
||||||
if (objectViolatesOwnership)
|
MemoryContextSwitchTo(savedContext);
|
||||||
|
CurrentResourceOwner = savedOwner;
|
||||||
|
}
|
||||||
|
PG_CATCH();
|
||||||
{
|
{
|
||||||
return true;
|
MemoryContextSwitchTo(savedContext);
|
||||||
}
|
ErrorData *edata = CopyErrorData();
|
||||||
}
|
FlushErrorState();
|
||||||
|
|
||||||
return false;
|
hasOwnershipViolation = true;
|
||||||
|
if (relation != NULL)
|
||||||
|
{
|
||||||
|
relation_close(relation, NoLock);
|
||||||
|
relation = NULL;
|
||||||
|
}
|
||||||
|
RollbackAndReleaseCurrentSubTransaction();
|
||||||
|
MemoryContextSwitchTo(savedContext);
|
||||||
|
CurrentResourceOwner = savedOwner;
|
||||||
|
|
||||||
|
/* Rethrow error with LOG_SERVER_ONLY to prevent log to be sent to client */
|
||||||
|
edata->elevel = LOG_SERVER_ONLY;
|
||||||
|
ThrowErrorData(edata);
|
||||||
|
}
|
||||||
|
PG_END_TRY();
|
||||||
|
|
||||||
|
return hasOwnershipViolation;
|
||||||
}
|
}
|
||||||
|
|
|
@ -141,7 +141,17 @@ SetRangeTblExtraData(RangeTblEntry *rte, CitusRTEKind rteKind, char *fragmentSch
|
||||||
fauxFunction->funcexpr = (Node *) fauxFuncExpr;
|
fauxFunction->funcexpr = (Node *) fauxFuncExpr;
|
||||||
|
|
||||||
/* set the column count to pass ruleutils checks, not used elsewhere */
|
/* set the column count to pass ruleutils checks, not used elsewhere */
|
||||||
|
if (rte->relid != 0)
|
||||||
|
{
|
||||||
|
Relation rel = RelationIdGetRelation(rte->relid);
|
||||||
|
fauxFunction->funccolcount = RelationGetNumberOfAttributes(rel);
|
||||||
|
RelationClose(rel);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
fauxFunction->funccolcount = list_length(rte->eref->colnames);
|
fauxFunction->funccolcount = list_length(rte->eref->colnames);
|
||||||
|
}
|
||||||
|
|
||||||
fauxFunction->funccolnames = funcColumnNames;
|
fauxFunction->funccolnames = funcColumnNames;
|
||||||
fauxFunction->funccoltypes = funcColumnTypes;
|
fauxFunction->funccoltypes = funcColumnTypes;
|
||||||
fauxFunction->funccoltypmods = funcColumnTypeMods;
|
fauxFunction->funccoltypmods = funcColumnTypeMods;
|
||||||
|
|
|
@ -12,13 +12,14 @@
|
||||||
#include "unistd.h"
|
#include "unistd.h"
|
||||||
|
|
||||||
#include "distributed/citus_safe_lib.h"
|
#include "distributed/citus_safe_lib.h"
|
||||||
|
#include "distributed/colocation_utils.h"
|
||||||
|
#include "distributed/distributed_planner.h"
|
||||||
|
#include "distributed/jsonbutils.h"
|
||||||
#include "distributed/log_utils.h"
|
#include "distributed/log_utils.h"
|
||||||
#include "distributed/listutils.h"
|
#include "distributed/listutils.h"
|
||||||
#include "distributed/metadata_cache.h"
|
#include "distributed/metadata_cache.h"
|
||||||
#include "distributed/jsonbutils.h"
|
#include "distributed/multi_executor.h"
|
||||||
#include "distributed/colocation_utils.h"
|
|
||||||
#include "distributed/tuplestore.h"
|
#include "distributed/tuplestore.h"
|
||||||
#include "distributed/colocation_utils.h"
|
|
||||||
#include "distributed/utils/citus_stat_tenants.h"
|
#include "distributed/utils/citus_stat_tenants.h"
|
||||||
#include "executor/execdesc.h"
|
#include "executor/execdesc.h"
|
||||||
#include "storage/ipc.h"
|
#include "storage/ipc.h"
|
||||||
|
@ -38,12 +39,14 @@ ExecutorEnd_hook_type prev_ExecutorEnd = NULL;
|
||||||
|
|
||||||
#define ATTRIBUTE_PREFIX "/*{\"tId\":"
|
#define ATTRIBUTE_PREFIX "/*{\"tId\":"
|
||||||
#define ATTRIBUTE_STRING_FORMAT "/*{\"tId\":%s,\"cId\":%d}*/"
|
#define ATTRIBUTE_STRING_FORMAT "/*{\"tId\":%s,\"cId\":%d}*/"
|
||||||
#define STAT_TENANTS_COLUMNS 7
|
#define STAT_TENANTS_COLUMNS 9
|
||||||
#define ONE_QUERY_SCORE 1000000000
|
#define ONE_QUERY_SCORE 1000000000
|
||||||
|
|
||||||
static char AttributeToTenant[MAX_TENANT_ATTRIBUTE_LENGTH] = "";
|
static char AttributeToTenant[MAX_TENANT_ATTRIBUTE_LENGTH] = "";
|
||||||
static CmdType AttributeToCommandType = CMD_UNKNOWN;
|
static CmdType AttributeToCommandType = CMD_UNKNOWN;
|
||||||
static int AttributeToColocationGroupId = INVALID_COLOCATION_ID;
|
static int AttributeToColocationGroupId = INVALID_COLOCATION_ID;
|
||||||
|
static clock_t QueryStartClock = { 0 };
|
||||||
|
static clock_t QueryEndClock = { 0 };
|
||||||
|
|
||||||
static const char *SharedMemoryNameForMultiTenantMonitor =
|
static const char *SharedMemoryNameForMultiTenantMonitor =
|
||||||
"Shared memory for multi tenant monitor";
|
"Shared memory for multi tenant monitor";
|
||||||
|
@ -56,7 +59,7 @@ static int CompareTenantScore(const void *leftElement, const void *rightElement)
|
||||||
static void UpdatePeriodsIfNecessary(TenantStats *tenantStats, TimestampTz queryTime);
|
static void UpdatePeriodsIfNecessary(TenantStats *tenantStats, TimestampTz queryTime);
|
||||||
static void ReduceScoreIfNecessary(TenantStats *tenantStats, TimestampTz queryTime);
|
static void ReduceScoreIfNecessary(TenantStats *tenantStats, TimestampTz queryTime);
|
||||||
static void EvictTenantsIfNecessary(TimestampTz queryTime);
|
static void EvictTenantsIfNecessary(TimestampTz queryTime);
|
||||||
static void RecordTenantStats(TenantStats *tenantStats);
|
static void RecordTenantStats(TenantStats *tenantStats, TimestampTz queryTime);
|
||||||
static void CreateMultiTenantMonitor(void);
|
static void CreateMultiTenantMonitor(void);
|
||||||
static MultiTenantMonitor * CreateSharedMemoryForMultiTenantMonitor(void);
|
static MultiTenantMonitor * CreateSharedMemoryForMultiTenantMonitor(void);
|
||||||
static MultiTenantMonitor * GetMultiTenantMonitor(void);
|
static MultiTenantMonitor * GetMultiTenantMonitor(void);
|
||||||
|
@ -142,7 +145,9 @@ citus_stat_tenants_local(PG_FUNCTION_ARGS)
|
||||||
tenantStats->writesInThisPeriod);
|
tenantStats->writesInThisPeriod);
|
||||||
values[5] = Int32GetDatum(tenantStats->readsInLastPeriod +
|
values[5] = Int32GetDatum(tenantStats->readsInLastPeriod +
|
||||||
tenantStats->writesInLastPeriod);
|
tenantStats->writesInLastPeriod);
|
||||||
values[6] = Int64GetDatum(tenantStats->score);
|
values[6] = Float8GetDatum(tenantStats->cpuUsageInThisPeriod);
|
||||||
|
values[7] = Float8GetDatum(tenantStats->cpuUsageInLastPeriod);
|
||||||
|
values[8] = Int64GetDatum(tenantStats->score);
|
||||||
|
|
||||||
tuplestore_putvalues(tupleStore, tupleDescriptor, values, isNulls);
|
tuplestore_putvalues(tupleStore, tupleDescriptor, values, isNulls);
|
||||||
}
|
}
|
||||||
|
@ -225,6 +230,7 @@ AttributeTask(char *tenantId, int colocationId, CmdType commandType)
|
||||||
strncpy_s(AttributeToTenant, MAX_TENANT_ATTRIBUTE_LENGTH, tenantId,
|
strncpy_s(AttributeToTenant, MAX_TENANT_ATTRIBUTE_LENGTH, tenantId,
|
||||||
MAX_TENANT_ATTRIBUTE_LENGTH - 1);
|
MAX_TENANT_ATTRIBUTE_LENGTH - 1);
|
||||||
AttributeToCommandType = commandType;
|
AttributeToCommandType = commandType;
|
||||||
|
QueryStartClock = clock();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -316,6 +322,17 @@ AttributeMetricsIfApplicable()
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* return if we are not in the top level to make sure we are not
|
||||||
|
* stopping counting time for a sub-level execution
|
||||||
|
*/
|
||||||
|
if (ExecutorLevel != 0 || PlannerLevel != 0)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
QueryEndClock = clock();
|
||||||
|
|
||||||
TimestampTz queryTime = GetCurrentTimestamp();
|
TimestampTz queryTime = GetCurrentTimestamp();
|
||||||
|
|
||||||
MultiTenantMonitor *monitor = GetMultiTenantMonitor();
|
MultiTenantMonitor *monitor = GetMultiTenantMonitor();
|
||||||
|
@ -345,7 +362,7 @@ AttributeMetricsIfApplicable()
|
||||||
|
|
||||||
UpdatePeriodsIfNecessary(tenantStats, queryTime);
|
UpdatePeriodsIfNecessary(tenantStats, queryTime);
|
||||||
ReduceScoreIfNecessary(tenantStats, queryTime);
|
ReduceScoreIfNecessary(tenantStats, queryTime);
|
||||||
RecordTenantStats(tenantStats);
|
RecordTenantStats(tenantStats, queryTime);
|
||||||
|
|
||||||
LWLockRelease(&tenantStats->lock);
|
LWLockRelease(&tenantStats->lock);
|
||||||
}
|
}
|
||||||
|
@ -372,7 +389,7 @@ AttributeMetricsIfApplicable()
|
||||||
|
|
||||||
UpdatePeriodsIfNecessary(tenantStats, queryTime);
|
UpdatePeriodsIfNecessary(tenantStats, queryTime);
|
||||||
ReduceScoreIfNecessary(tenantStats, queryTime);
|
ReduceScoreIfNecessary(tenantStats, queryTime);
|
||||||
RecordTenantStats(tenantStats);
|
RecordTenantStats(tenantStats, queryTime);
|
||||||
|
|
||||||
LWLockRelease(&tenantStats->lock);
|
LWLockRelease(&tenantStats->lock);
|
||||||
}
|
}
|
||||||
|
@ -396,6 +413,7 @@ static void
|
||||||
UpdatePeriodsIfNecessary(TenantStats *tenantStats, TimestampTz queryTime)
|
UpdatePeriodsIfNecessary(TenantStats *tenantStats, TimestampTz queryTime)
|
||||||
{
|
{
|
||||||
long long int periodInMicroSeconds = StatTenantsPeriod * USECS_PER_SEC;
|
long long int periodInMicroSeconds = StatTenantsPeriod * USECS_PER_SEC;
|
||||||
|
long long int periodInMilliSeconds = StatTenantsPeriod * 1000;
|
||||||
TimestampTz periodStart = queryTime - (queryTime % periodInMicroSeconds);
|
TimestampTz periodStart = queryTime - (queryTime % periodInMicroSeconds);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -410,20 +428,23 @@ UpdatePeriodsIfNecessary(TenantStats *tenantStats, TimestampTz queryTime)
|
||||||
|
|
||||||
tenantStats->readsInLastPeriod = tenantStats->readsInThisPeriod;
|
tenantStats->readsInLastPeriod = tenantStats->readsInThisPeriod;
|
||||||
tenantStats->readsInThisPeriod = 0;
|
tenantStats->readsInThisPeriod = 0;
|
||||||
|
|
||||||
|
tenantStats->cpuUsageInLastPeriod = tenantStats->cpuUsageInThisPeriod;
|
||||||
|
tenantStats->cpuUsageInThisPeriod = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the last query is more than two periods ago, we clean the last period counts too.
|
* If the last query is more than two periods ago, we clean the last period counts too.
|
||||||
*/
|
*/
|
||||||
if (TimestampDifferenceExceeds(tenantStats->lastQueryTime, periodStart,
|
if (TimestampDifferenceExceeds(tenantStats->lastQueryTime, periodStart,
|
||||||
periodInMicroSeconds))
|
periodInMilliSeconds))
|
||||||
{
|
{
|
||||||
tenantStats->writesInLastPeriod = 0;
|
tenantStats->writesInLastPeriod = 0;
|
||||||
|
|
||||||
tenantStats->readsInLastPeriod = 0;
|
tenantStats->readsInLastPeriod = 0;
|
||||||
}
|
|
||||||
|
|
||||||
tenantStats->lastQueryTime = queryTime;
|
tenantStats->cpuUsageInLastPeriod = 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -503,7 +524,7 @@ EvictTenantsIfNecessary(TimestampTz queryTime)
|
||||||
* RecordTenantStats records the query statistics for the tenant.
|
* RecordTenantStats records the query statistics for the tenant.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
RecordTenantStats(TenantStats *tenantStats)
|
RecordTenantStats(TenantStats *tenantStats, TimestampTz queryTime)
|
||||||
{
|
{
|
||||||
if (tenantStats->score < LLONG_MAX - ONE_QUERY_SCORE)
|
if (tenantStats->score < LLONG_MAX - ONE_QUERY_SCORE)
|
||||||
{
|
{
|
||||||
|
@ -524,6 +545,11 @@ RecordTenantStats(TenantStats *tenantStats)
|
||||||
{
|
{
|
||||||
tenantStats->writesInThisPeriod++;
|
tenantStats->writesInThisPeriod++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
double queryCpuTime = ((double) (QueryEndClock - QueryStartClock)) / CLOCKS_PER_SEC;
|
||||||
|
tenantStats->cpuUsageInThisPeriod += queryCpuTime;
|
||||||
|
|
||||||
|
tenantStats->lastQueryTime = queryTime;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -170,12 +170,11 @@ BreakColocation(Oid sourceRelationId)
|
||||||
*/
|
*/
|
||||||
Relation pgDistColocation = table_open(DistColocationRelationId(), ExclusiveLock);
|
Relation pgDistColocation = table_open(DistColocationRelationId(), ExclusiveLock);
|
||||||
|
|
||||||
uint32 newColocationId = GetNextColocationId();
|
uint32 oldColocationId = TableColocationId(sourceRelationId);
|
||||||
bool localOnly = false;
|
CreateColocationGroupForRelation(sourceRelationId);
|
||||||
UpdateRelationColocationGroup(sourceRelationId, newColocationId, localOnly);
|
|
||||||
|
|
||||||
/* if there is not any remaining table in the colocation group, delete it */
|
/* if there is not any remaining table in the old colocation group, delete it */
|
||||||
DeleteColocationGroupIfNoTablesBelong(sourceRelationId);
|
DeleteColocationGroupIfNoTablesBelong(oldColocationId);
|
||||||
|
|
||||||
table_close(pgDistColocation, NoLock);
|
table_close(pgDistColocation, NoLock);
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
#include "distributed/version_compat.h"
|
#include "distributed/version_compat.h"
|
||||||
#include "nodes/pg_list.h"
|
#include "nodes/pg_list.h"
|
||||||
#include "storage/lockdefs.h"
|
#include "storage/lockdefs.h"
|
||||||
|
#include "utils/catcache.h"
|
||||||
#include "utils/fmgroids.h"
|
#include "utils/fmgroids.h"
|
||||||
#include "utils/hsearch.h"
|
#include "utils/hsearch.h"
|
||||||
#include "common/hashfn.h"
|
#include "common/hashfn.h"
|
||||||
|
@ -96,6 +97,8 @@ static List * GetConnectedListHelper(ForeignConstraintRelationshipNode *node,
|
||||||
bool isReferencing);
|
bool isReferencing);
|
||||||
static List * GetForeignConstraintRelationshipHelper(Oid relationId, bool isReferencing);
|
static List * GetForeignConstraintRelationshipHelper(Oid relationId, bool isReferencing);
|
||||||
|
|
||||||
|
MemoryContext ForeignConstraintRelationshipMemoryContext = NULL;
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* GetForeignKeyConnectedRelationIdList returns a list of relation id's for
|
* GetForeignKeyConnectedRelationIdList returns a list of relation id's for
|
||||||
|
@ -321,17 +324,36 @@ CreateForeignConstraintRelationshipGraph()
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
ClearForeignConstraintRelationshipGraphContext();
|
/*
|
||||||
|
* Lazily create our memory context once and reset on every reuse.
|
||||||
|
* Since we have cleared and invalidated the fConstraintRelationshipGraph, right
|
||||||
|
* before we can simply reset the context if it was already existing.
|
||||||
|
*/
|
||||||
|
if (ForeignConstraintRelationshipMemoryContext == NULL)
|
||||||
|
{
|
||||||
|
/* make sure we've initialized CacheMemoryContext */
|
||||||
|
if (CacheMemoryContext == NULL)
|
||||||
|
{
|
||||||
|
CreateCacheMemoryContext();
|
||||||
|
}
|
||||||
|
|
||||||
MemoryContext fConstraintRelationshipMemoryContext = AllocSetContextCreateInternal(
|
ForeignConstraintRelationshipMemoryContext = AllocSetContextCreate(
|
||||||
CacheMemoryContext,
|
CacheMemoryContext,
|
||||||
"Forign Constraint Relationship Graph Context",
|
"Foreign Constraint Relationship Graph Context",
|
||||||
ALLOCSET_DEFAULT_MINSIZE,
|
ALLOCSET_DEFAULT_MINSIZE,
|
||||||
ALLOCSET_DEFAULT_INITSIZE,
|
ALLOCSET_DEFAULT_INITSIZE,
|
||||||
ALLOCSET_DEFAULT_MAXSIZE);
|
ALLOCSET_DEFAULT_MAXSIZE);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
fConstraintRelationshipGraph = NULL;
|
||||||
|
MemoryContextReset(ForeignConstraintRelationshipMemoryContext);
|
||||||
|
}
|
||||||
|
|
||||||
|
Assert(fConstraintRelationshipGraph == NULL);
|
||||||
|
|
||||||
MemoryContext oldContext = MemoryContextSwitchTo(
|
MemoryContext oldContext = MemoryContextSwitchTo(
|
||||||
fConstraintRelationshipMemoryContext);
|
ForeignConstraintRelationshipMemoryContext);
|
||||||
|
|
||||||
fConstraintRelationshipGraph = (ForeignConstraintRelationshipGraph *) palloc(
|
fConstraintRelationshipGraph = (ForeignConstraintRelationshipGraph *) palloc(
|
||||||
sizeof(ForeignConstraintRelationshipGraph));
|
sizeof(ForeignConstraintRelationshipGraph));
|
||||||
|
@ -631,22 +653,3 @@ CreateOrFindNode(HTAB *adjacencyLists, Oid relid)
|
||||||
|
|
||||||
return node;
|
return node;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* ClearForeignConstraintRelationshipGraphContext clear all the allocated memory obtained
|
|
||||||
* for foreign constraint relationship graph. Since all the variables of relationship
|
|
||||||
* graph was obtained within the same context, destroying hash map is enough as
|
|
||||||
* it deletes the context.
|
|
||||||
*/
|
|
||||||
void
|
|
||||||
ClearForeignConstraintRelationshipGraphContext()
|
|
||||||
{
|
|
||||||
if (fConstraintRelationshipGraph == NULL)
|
|
||||||
{
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
hash_destroy(fConstraintRelationshipGraph->nodeMap);
|
|
||||||
fConstraintRelationshipGraph = NULL;
|
|
||||||
}
|
|
||||||
|
|
|
@ -420,7 +420,7 @@ CopyShardPlacementToWorkerNodeQuery(ShardPlacement *sourceShardPlacement,
|
||||||
"auto";
|
"auto";
|
||||||
|
|
||||||
appendStringInfo(queryString,
|
appendStringInfo(queryString,
|
||||||
"SELECT citus_copy_shard_placement("
|
"SELECT pg_catalog.citus_copy_shard_placement("
|
||||||
UINT64_FORMAT ", %d, %d, "
|
UINT64_FORMAT ", %d, %d, "
|
||||||
"transfer_mode := %s)",
|
"transfer_mode := %s)",
|
||||||
sourceShardPlacement->shardId,
|
sourceShardPlacement->shardId,
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
pg_send_cancellation
|
|
|
@ -1,24 +0,0 @@
|
||||||
citus_top_builddir = ../../..
|
|
||||||
|
|
||||||
PROGRAM = pg_send_cancellation
|
|
||||||
PGFILEDESC = "pg_send_cancellation sends a custom cancellation message"
|
|
||||||
OBJS = $(citus_abs_srcdir)/src/bin/pg_send_cancellation/pg_send_cancellation.o
|
|
||||||
PG_CPPFLAGS = -I$(libpq_srcdir)
|
|
||||||
PG_LIBS_INTERNAL = $(libpq_pgport)
|
|
||||||
PG_LDFLAGS += $(LDFLAGS)
|
|
||||||
|
|
||||||
include $(citus_top_builddir)/Makefile.global
|
|
||||||
|
|
||||||
# We reuse all the Citus flags (incl. security flags), but we are building a program not a shared library
|
|
||||||
# We sometimes build Citus with a newer version of gcc than Postgres was built
|
|
||||||
# with and this breaks LTO (link-time optimization). Even if disabling it can
|
|
||||||
# have some perf impact this is ok because pg_send_cancellation is only used
|
|
||||||
# for tests anyway.
|
|
||||||
override CFLAGS := $(filter-out -shared, $(CFLAGS)) -fno-lto
|
|
||||||
|
|
||||||
# Filter out unneeded dependencies
|
|
||||||
override LIBS := $(filter-out -lz -lreadline -ledit -ltermcap -lncurses -lcurses -lpam, $(LIBS))
|
|
||||||
|
|
||||||
clean: clean-pg_send_cancellation
|
|
||||||
clean-pg_send_cancellation:
|
|
||||||
rm -f $(PROGRAM) $(OBJS)
|
|
|
@ -1,47 +0,0 @@
|
||||||
# pg_send_cancellation
|
|
||||||
|
|
||||||
pg_send_cancellation is a program for manually sending a cancellation
|
|
||||||
to a Postgres endpoint. It is effectively a command-line version of
|
|
||||||
PQcancel in libpq, but it can use any PID or cancellation key.
|
|
||||||
|
|
||||||
We use pg_send_cancellation primarily to propagate cancellations between pgbouncers
|
|
||||||
behind a load balancer. Since the cancellation protocol involves
|
|
||||||
opening a new connection, the new connection may go to a different
|
|
||||||
node that does not recognize the cancellation key. To handle that
|
|
||||||
scenario, we modified pgbouncer to pass unrecognized cancellation
|
|
||||||
keys to a shell command.
|
|
||||||
|
|
||||||
Users can configure the cancellation_command, which will be run with:
|
|
||||||
```
|
|
||||||
<cancellation_command> <client ip> <client port> <pid> <cancel key>
|
|
||||||
```
|
|
||||||
|
|
||||||
Note that pgbouncer does not use actual PIDs. Instead, it generates PID and cancellation key together a random 8-byte number. This makes the chance of collisions exceedingly small.
|
|
||||||
|
|
||||||
By providing pg_send_cancellation as part of Citus, we can use a shell script that pgbouncer invokes to propagate the cancellation to all *other* worker nodes in the same cluster, for example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
#!/bin/sh
|
|
||||||
remote_ip=$1
|
|
||||||
remote_port=$2
|
|
||||||
pid=$3
|
|
||||||
cancel_key=$4
|
|
||||||
|
|
||||||
postgres_path=/usr/pgsql-14/bin
|
|
||||||
pgbouncer_port=6432
|
|
||||||
|
|
||||||
nodes_query="select nodename from pg_dist_node where groupid > 0 and groupid not in (select groupid from pg_dist_local_group) and nodecluster = current_setting('citus.cluster_name')"
|
|
||||||
|
|
||||||
# Get hostnames of other worker nodes in the cluster, and send cancellation to their pgbouncers
|
|
||||||
$postgres_path/psql -c "$nodes_query" -tAX | xargs -n 1 sh -c "$postgres_path/pg_send_cancellation $pid $cancel_key \$0 $pgbouncer_port"
|
|
||||||
```
|
|
||||||
|
|
||||||
One thing we need to be careful about is that the cancellations do not get forwarded
|
|
||||||
back-and-forth. This is handled in pgbouncer by setting the last bit of all generated
|
|
||||||
cancellation keys (sent to clients) to 1, and setting the last bit of all forwarded bits to 0.
|
|
||||||
That way, when a pgbouncer receives a cancellation key with the last bit set to 0,
|
|
||||||
it knows it is from another pgbouncer and should not forward further, and should set
|
|
||||||
the last bit to 1 when comparing to stored cancellation keys.
|
|
||||||
|
|
||||||
Another thing we need to be careful about is that the integers should be encoded
|
|
||||||
as big endian on the wire.
|
|
|
@ -1,261 +0,0 @@
|
||||||
/*
|
|
||||||
* pg_send_cancellation is a program for manually sending a cancellation
|
|
||||||
* to a Postgres endpoint. It is effectively a command-line version of
|
|
||||||
* PQcancel in libpq, but it can use any PID or cancellation key.
|
|
||||||
*
|
|
||||||
* Portions Copyright (c) Citus Data, Inc.
|
|
||||||
*
|
|
||||||
* For the internal_cancel function:
|
|
||||||
*
|
|
||||||
* Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
|
|
||||||
* Portions Copyright (c) 1994, Regents of the University of California
|
|
||||||
*
|
|
||||||
* Permission to use, copy, modify, and distribute this software and its
|
|
||||||
* documentation for any purpose, without fee, and without a written agreement
|
|
||||||
* is hereby granted, provided that the above copyright notice and this
|
|
||||||
* paragraph and the following two paragraphs appear in all copies.
|
|
||||||
*
|
|
||||||
* IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
|
|
||||||
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING
|
|
||||||
* LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS
|
|
||||||
* DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE
|
|
||||||
* POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
*
|
|
||||||
* THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
|
|
||||||
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
|
|
||||||
* AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
|
|
||||||
* ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO
|
|
||||||
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
#include "postgres_fe.h"
|
|
||||||
|
|
||||||
#include <sys/stat.h>
|
|
||||||
#include <fcntl.h>
|
|
||||||
#include <ctype.h>
|
|
||||||
#include <time.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
|
|
||||||
#include "common/ip.h"
|
|
||||||
#include "common/link-canary.h"
|
|
||||||
#include "common/scram-common.h"
|
|
||||||
#include "common/string.h"
|
|
||||||
#include "libpq-fe.h"
|
|
||||||
#include "libpq-int.h"
|
|
||||||
#include "mb/pg_wchar.h"
|
|
||||||
#include "port/pg_bswap.h"
|
|
||||||
|
|
||||||
|
|
||||||
#define ERROR_BUFFER_SIZE 256
|
|
||||||
|
|
||||||
|
|
||||||
static int internal_cancel(SockAddr *raddr, int be_pid, int be_key,
|
|
||||||
char *errbuf, int errbufsize);
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* main entry point into the pg_send_cancellation program.
|
|
||||||
*/
|
|
||||||
int
|
|
||||||
main(int argc, char *argv[])
|
|
||||||
{
|
|
||||||
if (argc == 2 && strcmp(argv[1], "-V") == 0)
|
|
||||||
{
|
|
||||||
pg_fprintf(stdout, "pg_send_cancellation (PostgreSQL) " PG_VERSION "\n");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (argc < 4 || argc > 5)
|
|
||||||
{
|
|
||||||
char *program = argv[0];
|
|
||||||
pg_fprintf(stderr, "%s requires 4 arguments\n\n", program);
|
|
||||||
pg_fprintf(stderr, "Usage: %s <pid> <cancel key> <hostname> [port]\n", program);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
char *pidString = argv[1];
|
|
||||||
char *cancelKeyString = argv[2];
|
|
||||||
char *host = argv[3];
|
|
||||||
char *portString = "5432";
|
|
||||||
|
|
||||||
if (argc >= 5)
|
|
||||||
{
|
|
||||||
portString = argv[4];
|
|
||||||
}
|
|
||||||
|
|
||||||
/* parse the PID and cancellation key */
|
|
||||||
int pid = strtol(pidString, NULL, 10);
|
|
||||||
int cancelAuthCode = strtol(cancelKeyString, NULL, 10);
|
|
||||||
|
|
||||||
char errorBuffer[ERROR_BUFFER_SIZE] = { 0 };
|
|
||||||
|
|
||||||
struct addrinfo *ipAddressList;
|
|
||||||
struct addrinfo hint;
|
|
||||||
int ipAddressListFamily = AF_UNSPEC;
|
|
||||||
SockAddr socketAddress;
|
|
||||||
|
|
||||||
memset(&hint, 0, sizeof(hint));
|
|
||||||
hint.ai_socktype = SOCK_STREAM;
|
|
||||||
hint.ai_family = ipAddressListFamily;
|
|
||||||
|
|
||||||
/* resolve the hostname to an IP */
|
|
||||||
int ret = pg_getaddrinfo_all(host, portString, &hint, &ipAddressList);
|
|
||||||
if (ret || !ipAddressList)
|
|
||||||
{
|
|
||||||
pg_fprintf(stderr, "could not translate host name \"%s\" to address: %s\n",
|
|
||||||
host, gai_strerror(ret));
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ipAddressList->ai_addrlen > sizeof(socketAddress.addr))
|
|
||||||
{
|
|
||||||
pg_fprintf(stderr, "invalid address length");
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Explanation of IGNORE-BANNED:
|
|
||||||
* This is a common pattern when using getaddrinfo. The system guarantees
|
|
||||||
* that ai_addrlen < sizeof(socketAddress.addr). Out of an abundance of
|
|
||||||
* caution. We also check it above.
|
|
||||||
*/
|
|
||||||
memcpy(&socketAddress.addr, ipAddressList->ai_addr, ipAddressList->ai_addrlen); /* IGNORE-BANNED */
|
|
||||||
socketAddress.salen = ipAddressList->ai_addrlen;
|
|
||||||
|
|
||||||
/* send the cancellation */
|
|
||||||
bool cancelSucceeded = internal_cancel(&socketAddress, pid, cancelAuthCode,
|
|
||||||
errorBuffer, sizeof(errorBuffer));
|
|
||||||
if (!cancelSucceeded)
|
|
||||||
{
|
|
||||||
pg_fprintf(stderr, "sending cancellation to %s:%s failed: %s",
|
|
||||||
host, portString, errorBuffer);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
pg_freeaddrinfo_all(ipAddressListFamily, ipAddressList);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/* *INDENT-OFF* */
|
|
||||||
|
|
||||||
/*
|
|
||||||
* internal_cancel is copied from fe-connect.c
|
|
||||||
*
|
|
||||||
* The return value is true if the cancel request was successfully
|
|
||||||
* dispatched, false if not (in which case an error message is available).
|
|
||||||
* Note: successful dispatch is no guarantee that there will be any effect at
|
|
||||||
* the backend. The application must read the operation result as usual.
|
|
||||||
*
|
|
||||||
* CAUTION: we want this routine to be safely callable from a signal handler
|
|
||||||
* (for example, an application might want to call it in a SIGINT handler).
|
|
||||||
* This means we cannot use any C library routine that might be non-reentrant.
|
|
||||||
* malloc/free are often non-reentrant, and anything that might call them is
|
|
||||||
* just as dangerous. We avoid sprintf here for that reason. Building up
|
|
||||||
* error messages with strcpy/strcat is tedious but should be quite safe.
|
|
||||||
* We also save/restore errno in case the signal handler support doesn't.
|
|
||||||
*
|
|
||||||
* internal_cancel() is an internal helper function to make code-sharing
|
|
||||||
* between the two versions of the cancel function possible.
|
|
||||||
*/
|
|
||||||
static int
|
|
||||||
internal_cancel(SockAddr *raddr, int be_pid, int be_key,
|
|
||||||
char *errbuf, int errbufsize)
|
|
||||||
{
|
|
||||||
int save_errno = SOCK_ERRNO;
|
|
||||||
pgsocket tmpsock = PGINVALID_SOCKET;
|
|
||||||
char sebuf[PG_STRERROR_R_BUFLEN];
|
|
||||||
int maxlen;
|
|
||||||
struct
|
|
||||||
{
|
|
||||||
uint32 packetlen;
|
|
||||||
CancelRequestPacket cp;
|
|
||||||
} crp;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We need to open a temporary connection to the postmaster. Do this with
|
|
||||||
* only kernel calls.
|
|
||||||
*/
|
|
||||||
if ((tmpsock = socket(raddr->addr.ss_family, SOCK_STREAM, 0)) == PGINVALID_SOCKET)
|
|
||||||
{
|
|
||||||
strlcpy(errbuf, "PQcancel() -- socket() failed: ", errbufsize);
|
|
||||||
goto cancel_errReturn;
|
|
||||||
}
|
|
||||||
retry3:
|
|
||||||
if (connect(tmpsock, (struct sockaddr *) &raddr->addr, raddr->salen) < 0)
|
|
||||||
{
|
|
||||||
if (SOCK_ERRNO == EINTR)
|
|
||||||
/* Interrupted system call - we'll just try again */
|
|
||||||
goto retry3;
|
|
||||||
strlcpy(errbuf, "PQcancel() -- connect() failed: ", errbufsize);
|
|
||||||
goto cancel_errReturn;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We needn't set nonblocking I/O or NODELAY options here.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Create and send the cancel request packet. */
|
|
||||||
|
|
||||||
crp.packetlen = pg_hton32((uint32) sizeof(crp));
|
|
||||||
crp.cp.cancelRequestCode = (MsgType) pg_hton32(CANCEL_REQUEST_CODE);
|
|
||||||
crp.cp.backendPID = pg_hton32(be_pid);
|
|
||||||
crp.cp.cancelAuthCode = pg_hton32(be_key);
|
|
||||||
|
|
||||||
retry4:
|
|
||||||
if (send(tmpsock, (char *) &crp, sizeof(crp), 0) != (int) sizeof(crp))
|
|
||||||
{
|
|
||||||
if (SOCK_ERRNO == EINTR)
|
|
||||||
/* Interrupted system call - we'll just try again */
|
|
||||||
goto retry4;
|
|
||||||
strlcpy(errbuf, "PQcancel() -- send() failed: ", errbufsize);
|
|
||||||
goto cancel_errReturn;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Wait for the postmaster to close the connection, which indicates that
|
|
||||||
* it's processed the request. Without this delay, we might issue another
|
|
||||||
* command only to find that our cancel zaps that command instead of the
|
|
||||||
* one we thought we were canceling. Note we don't actually expect this
|
|
||||||
* read to obtain any data, we are just waiting for EOF to be signaled.
|
|
||||||
*/
|
|
||||||
retry5:
|
|
||||||
if (recv(tmpsock, (char *) &crp, 1, 0) < 0)
|
|
||||||
{
|
|
||||||
if (SOCK_ERRNO == EINTR)
|
|
||||||
/* Interrupted system call - we'll just try again */
|
|
||||||
goto retry5;
|
|
||||||
/* we ignore other error conditions */
|
|
||||||
}
|
|
||||||
|
|
||||||
/* All done */
|
|
||||||
closesocket(tmpsock);
|
|
||||||
SOCK_ERRNO_SET(save_errno);
|
|
||||||
return true;
|
|
||||||
|
|
||||||
cancel_errReturn:
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Make sure we don't overflow the error buffer. Leave space for the \n at
|
|
||||||
* the end, and for the terminating zero.
|
|
||||||
*/
|
|
||||||
maxlen = errbufsize - strlen(errbuf) - 2;
|
|
||||||
if (maxlen >= 0)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* Explanation of IGNORE-BANNED:
|
|
||||||
* This is well-tested libpq code that we would like to preserve in its
|
|
||||||
* original form. The appropriate length calculation is done above.
|
|
||||||
*/
|
|
||||||
strncat(errbuf, SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)), /* IGNORE-BANNED */
|
|
||||||
maxlen);
|
|
||||||
strcat(errbuf, "\n"); /* IGNORE-BANNED */
|
|
||||||
}
|
|
||||||
if (tmpsock != PGINVALID_SOCKET)
|
|
||||||
closesocket(tmpsock);
|
|
||||||
SOCK_ERRNO_SET(save_errno);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* *INDENT-ON* */
|
|
|
@ -20,7 +20,6 @@ extern bool ShouldUndistributeCitusLocalTable(Oid relationId);
|
||||||
extern List * ReferencedRelationIdList(Oid relationId);
|
extern List * ReferencedRelationIdList(Oid relationId);
|
||||||
extern List * ReferencingRelationIdList(Oid relationId);
|
extern List * ReferencingRelationIdList(Oid relationId);
|
||||||
extern void SetForeignConstraintRelationshipGraphInvalid(void);
|
extern void SetForeignConstraintRelationshipGraphInvalid(void);
|
||||||
extern void ClearForeignConstraintRelationshipGraphContext(void);
|
|
||||||
extern bool OidVisited(HTAB *oidVisitedMap, Oid oid);
|
extern bool OidVisited(HTAB *oidVisitedMap, Oid oid);
|
||||||
extern void VisitOid(HTAB *oidVisitedMap, Oid oid);
|
extern void VisitOid(HTAB *oidVisitedMap, Oid oid);
|
||||||
|
|
||||||
|
|
|
@ -156,6 +156,7 @@ extern void SendOrCollectCommandListToSingleNode(MetadataSyncContext *context,
|
||||||
extern void ActivateNodeList(MetadataSyncContext *context);
|
extern void ActivateNodeList(MetadataSyncContext *context);
|
||||||
|
|
||||||
extern char * WorkerDropAllShellTablesCommand(bool singleTransaction);
|
extern char * WorkerDropAllShellTablesCommand(bool singleTransaction);
|
||||||
|
extern char * WorkerDropSequenceDependencyCommand(Oid relationId);
|
||||||
|
|
||||||
extern void SyncDistributedObjects(MetadataSyncContext *context);
|
extern void SyncDistributedObjects(MetadataSyncContext *context);
|
||||||
extern void SendNodeWideObjectsSyncCommands(MetadataSyncContext *context);
|
extern void SendNodeWideObjectsSyncCommands(MetadataSyncContext *context);
|
||||||
|
@ -180,8 +181,10 @@ extern void SendInterTableRelationshipCommands(MetadataSyncContext *context);
|
||||||
|
|
||||||
#define REMOVE_ALL_CITUS_TABLES_COMMAND \
|
#define REMOVE_ALL_CITUS_TABLES_COMMAND \
|
||||||
"SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition"
|
"SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition"
|
||||||
#define BREAK_CITUS_TABLE_SEQUENCE_DEPENDENCY_COMMAND \
|
#define BREAK_ALL_CITUS_TABLE_SEQUENCE_DEPENDENCY_COMMAND \
|
||||||
"SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition"
|
"SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition"
|
||||||
|
#define BREAK_CITUS_TABLE_SEQUENCE_DEPENDENCY_COMMAND \
|
||||||
|
"SELECT pg_catalog.worker_drop_sequence_dependency(%s);"
|
||||||
|
|
||||||
#define DISABLE_DDL_PROPAGATION "SET citus.enable_ddl_propagation TO 'off'"
|
#define DISABLE_DDL_PROPAGATION "SET citus.enable_ddl_propagation TO 'off'"
|
||||||
#define ENABLE_DDL_PROPAGATION "SET citus.enable_ddl_propagation TO 'on'"
|
#define ENABLE_DDL_PROPAGATION "SET citus.enable_ddl_propagation TO 'on'"
|
||||||
|
|
|
@ -41,7 +41,7 @@
|
||||||
#define WORKER_PARTITIONED_RELATION_TOTAL_SIZE_FUNCTION \
|
#define WORKER_PARTITIONED_RELATION_TOTAL_SIZE_FUNCTION \
|
||||||
"worker_partitioned_relation_total_size(%s)"
|
"worker_partitioned_relation_total_size(%s)"
|
||||||
|
|
||||||
#define SHARD_SIZES_COLUMN_COUNT (3)
|
#define SHARD_SIZES_COLUMN_COUNT (2)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Flag to keep track of whether the process is currently in a function converting the
|
* Flag to keep track of whether the process is currently in a function converting the
|
||||||
|
|
|
@ -42,6 +42,13 @@ typedef struct TenantStats
|
||||||
int writesInLastPeriod;
|
int writesInLastPeriod;
|
||||||
int writesInThisPeriod;
|
int writesInThisPeriod;
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* CPU time usage of this tenant in this and last periods.
|
||||||
|
*/
|
||||||
|
double cpuUsageInLastPeriod;
|
||||||
|
double cpuUsageInThisPeriod;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The latest time this tenant ran a query. This value is used to update the score later.
|
* The latest time this tenant ran a query. This value is used to update the score later.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -257,7 +257,7 @@ s/pg_cancel_backend\('[0-9]+'::bigint\)/pg_cancel_backend('xxxxx'::bigint)/g
|
||||||
s/issuing SELECT pg_cancel_backend\([0-9]+::integer\)/issuing SELECT pg_cancel_backend(xxxxx::integer)/g
|
s/issuing SELECT pg_cancel_backend\([0-9]+::integer\)/issuing SELECT pg_cancel_backend(xxxxx::integer)/g
|
||||||
|
|
||||||
# shard_rebalancer output for flaky nodeIds
|
# shard_rebalancer output for flaky nodeIds
|
||||||
s/issuing SELECT citus_copy_shard_placement\(43[0-9]+,[0-9]+,[0-9]+,'block_writes'\)/issuing SELECT citus_copy_shard_placement(43xxxx,xx,xx,'block_writes')/g
|
s/issuing SELECT pg_catalog.citus_copy_shard_placement\(43[0-9]+,[0-9]+,[0-9]+,'block_writes'\)/issuing SELECT pg_catalog.citus_copy_shard_placement(43xxxx,xx,xx,'block_writes')/g
|
||||||
|
|
||||||
# node id in run_command_on_all_nodes warning
|
# node id in run_command_on_all_nodes warning
|
||||||
s/Error on node with node id [0-9]+/Error on node with node id xxxxx/g
|
s/Error on node with node id [0-9]+/Error on node with node id xxxxx/g
|
||||||
|
@ -309,3 +309,6 @@ s/(NOTICE: issuing SET LOCAL application_name TO 'citus_rebalancer gpid=)[0-9]+
|
||||||
s/improvement of 0.1[0-9]* is lower/improvement of 0.1xxxxx is lower/g
|
s/improvement of 0.1[0-9]* is lower/improvement of 0.1xxxxx is lower/g
|
||||||
# normalize tenants statistics annotations
|
# normalize tenants statistics annotations
|
||||||
s/\/\*\{"tId":.*\*\///g
|
s/\/\*\{"tId":.*\*\///g
|
||||||
|
|
||||||
|
# Notice message that contains current columnar version that makes it harder to bump versions
|
||||||
|
s/(NOTICE: issuing CREATE EXTENSION IF NOT EXISTS citus_columnar WITH SCHEMA pg_catalog VERSION )"[0-9]+\.[0-9]+-[0-9]+"/\1 "x.y-z"/
|
||||||
|
|
|
@ -112,6 +112,14 @@ DEPS = {
|
||||||
"multi_mx_function_table_reference",
|
"multi_mx_function_table_reference",
|
||||||
],
|
],
|
||||||
),
|
),
|
||||||
|
"background_rebalance": TestDeps(
|
||||||
|
None,
|
||||||
|
[
|
||||||
|
"multi_test_helpers",
|
||||||
|
"multi_cluster_management",
|
||||||
|
],
|
||||||
|
worker_count=3,
|
||||||
|
),
|
||||||
"background_rebalance_parallel": TestDeps(
|
"background_rebalance_parallel": TestDeps(
|
||||||
None,
|
None,
|
||||||
[
|
[
|
||||||
|
|
|
@ -10,7 +10,6 @@ test: isolation_move_placement_vs_modification
|
||||||
test: isolation_move_placement_vs_modification_fk
|
test: isolation_move_placement_vs_modification_fk
|
||||||
test: isolation_tenant_isolation_with_fkey_to_reference
|
test: isolation_tenant_isolation_with_fkey_to_reference
|
||||||
test: isolation_ref2ref_foreign_keys_enterprise
|
test: isolation_ref2ref_foreign_keys_enterprise
|
||||||
test: isolation_pg_send_cancellation
|
|
||||||
test: isolation_shard_move_vs_start_metadata_sync
|
test: isolation_shard_move_vs_start_metadata_sync
|
||||||
test: isolation_tenant_isolation
|
test: isolation_tenant_isolation
|
||||||
test: isolation_tenant_isolation_nonblocking
|
test: isolation_tenant_isolation_nonblocking
|
||||||
|
|
|
@ -310,6 +310,61 @@ SELECT public.wait_until_metadata_sync(30000);
|
||||||
|
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
|
-- make sure a non-super user can rebalance when there are reference tables to replicate
|
||||||
|
CREATE TABLE ref_table(a int primary key);
|
||||||
|
SELECT create_reference_table('ref_table');
|
||||||
|
create_reference_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- add a new node to trigger replicate_reference_tables task
|
||||||
|
SELECT 1 FROM citus_add_node('localhost', :worker_3_port);
|
||||||
|
?column?
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SET ROLE non_super_user_rebalance;
|
||||||
|
SELECT 1 FROM citus_rebalance_start(shard_transfer_mode := 'force_logical');
|
||||||
|
NOTICE: Scheduled 1 moves as job xxx
|
||||||
|
DETAIL: Rebalance scheduled as background job
|
||||||
|
HINT: To monitor progress, run: SELECT * FROM citus_rebalance_status();
|
||||||
|
?column?
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- wait for success
|
||||||
|
SELECT citus_rebalance_wait();
|
||||||
|
citus_rebalance_wait
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT state, details from citus_rebalance_status();
|
||||||
|
state | details
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
finished | {"tasks": [], "task_state_counts": {"done": 2}}
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
RESET ROLE;
|
||||||
|
-- reset the the number of nodes by removing the previously added node
|
||||||
|
SELECT 1 FROM citus_drain_node('localhost', :worker_3_port);
|
||||||
|
NOTICE: Moving shard xxxxx from localhost:xxxxx to localhost:xxxxx ...
|
||||||
|
?column?
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
CALL citus_cleanup_orphaned_resources();
|
||||||
|
NOTICE: cleaned up 1 orphaned resources
|
||||||
|
SELECT 1 FROM citus_remove_node('localhost', :worker_3_port);
|
||||||
|
?column?
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1
|
||||||
|
(1 row)
|
||||||
|
|
||||||
SET client_min_messages TO WARNING;
|
SET client_min_messages TO WARNING;
|
||||||
DROP SCHEMA background_rebalance CASCADE;
|
DROP SCHEMA background_rebalance CASCADE;
|
||||||
DROP USER non_super_user_rebalance;
|
DROP USER non_super_user_rebalance;
|
||||||
|
|
|
@ -466,17 +466,27 @@ SELECT citus_rebalance_start AS job_id from citus_rebalance_start() \gset
|
||||||
-- see dependent tasks to understand which tasks remain runnable because of
|
-- see dependent tasks to understand which tasks remain runnable because of
|
||||||
-- citus.max_background_task_executors_per_node
|
-- citus.max_background_task_executors_per_node
|
||||||
-- and which tasks are actually blocked from colocation group dependencies
|
-- and which tasks are actually blocked from colocation group dependencies
|
||||||
SELECT D.task_id,
|
SELECT (SELECT T.command FROM pg_dist_background_task T WHERE T.task_id = D.task_id),
|
||||||
(SELECT T.command FROM pg_dist_background_task T WHERE T.task_id = D.task_id),
|
(SELECT T.command depends_on_command FROM pg_dist_background_task T WHERE T.task_id = D.depends_on)
|
||||||
D.depends_on,
|
FROM pg_dist_background_task_depend D WHERE job_id in (:job_id) ORDER BY 1, 2 ASC;
|
||||||
(SELECT T.command FROM pg_dist_background_task T WHERE T.task_id = D.depends_on)
|
command | depends_on_command
|
||||||
FROM pg_dist_background_task_depend D WHERE job_id in (:job_id) ORDER BY D.task_id, D.depends_on ASC;
|
|
||||||
task_id | command | depends_on | command
|
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
1014 | SELECT pg_catalog.citus_move_shard_placement(85674026,50,57,'auto') | 1013 | SELECT pg_catalog.citus_move_shard_placement(85674025,50,56,'auto')
|
SELECT pg_catalog.citus_move_shard_placement(85674026,50,57,'auto') | SELECT pg_catalog.citus_move_shard_placement(85674025,50,56,'auto')
|
||||||
1016 | SELECT pg_catalog.citus_move_shard_placement(85674032,50,57,'auto') | 1015 | SELECT pg_catalog.citus_move_shard_placement(85674031,50,56,'auto')
|
SELECT pg_catalog.citus_move_shard_placement(85674032,50,57,'auto') | SELECT pg_catalog.citus_move_shard_placement(85674031,50,56,'auto')
|
||||||
1018 | SELECT pg_catalog.citus_move_shard_placement(85674038,50,57,'auto') | 1017 | SELECT pg_catalog.citus_move_shard_placement(85674037,50,56,'auto')
|
SELECT pg_catalog.citus_move_shard_placement(85674038,50,57,'auto') | SELECT pg_catalog.citus_move_shard_placement(85674037,50,56,'auto')
|
||||||
1020 | SELECT pg_catalog.citus_move_shard_placement(85674044,50,57,'auto') | 1019 | SELECT pg_catalog.citus_move_shard_placement(85674043,50,56,'auto')
|
SELECT pg_catalog.citus_move_shard_placement(85674044,50,57,'auto') | SELECT pg_catalog.citus_move_shard_placement(85674043,50,56,'auto')
|
||||||
|
(4 rows)
|
||||||
|
|
||||||
|
SELECT task_id, depends_on
|
||||||
|
FROM pg_dist_background_task_depend
|
||||||
|
WHERE job_id in (:job_id)
|
||||||
|
ORDER BY 1, 2 ASC;
|
||||||
|
task_id | depends_on
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1014 | 1013
|
||||||
|
1016 | 1015
|
||||||
|
1018 | 1017
|
||||||
|
1020 | 1019
|
||||||
(4 rows)
|
(4 rows)
|
||||||
|
|
||||||
-- default citus.max_background_task_executors_per_node is 1
|
-- default citus.max_background_task_executors_per_node is 1
|
||||||
|
|
|
@ -71,14 +71,17 @@ INSERT INTO dist_tbl VALUES (2, 'abcd');
|
||||||
UPDATE dist_tbl SET b = a + 1 WHERE a = 3;
|
UPDATE dist_tbl SET b = a + 1 WHERE a = 3;
|
||||||
UPDATE dist_tbl SET b = a + 1 WHERE a = 4;
|
UPDATE dist_tbl SET b = a + 1 WHERE a = 4;
|
||||||
DELETE FROM dist_tbl WHERE a = 5;
|
DELETE FROM dist_tbl WHERE a = 5;
|
||||||
SELECT tenant_attribute, read_count_in_this_period, read_count_in_last_period, query_count_in_this_period, query_count_in_last_period FROM citus_stat_tenants(true) ORDER BY tenant_attribute;
|
SELECT tenant_attribute, read_count_in_this_period, read_count_in_last_period, query_count_in_this_period, query_count_in_last_period,
|
||||||
tenant_attribute | read_count_in_this_period | read_count_in_last_period | query_count_in_this_period | query_count_in_last_period
|
(cpu_usage_in_this_period>0) AS cpu_is_used_in_this_period, (cpu_usage_in_last_period>0) AS cpu_is_used_in_last_period
|
||||||
|
FROM citus_stat_tenants(true)
|
||||||
|
ORDER BY tenant_attribute;
|
||||||
|
tenant_attribute | read_count_in_this_period | read_count_in_last_period | query_count_in_this_period | query_count_in_last_period | cpu_is_used_in_this_period | cpu_is_used_in_last_period
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
1 | 0 | 0 | 1 | 0
|
1 | 0 | 0 | 1 | 0 | t | f
|
||||||
2 | 0 | 0 | 1 | 0
|
2 | 0 | 0 | 1 | 0 | t | f
|
||||||
3 | 0 | 0 | 1 | 0
|
3 | 0 | 0 | 1 | 0 | t | f
|
||||||
4 | 0 | 0 | 1 | 0
|
4 | 0 | 0 | 1 | 0 | t | f
|
||||||
5 | 0 | 0 | 1 | 0
|
5 | 0 | 0 | 1 | 0 | t | f
|
||||||
(5 rows)
|
(5 rows)
|
||||||
|
|
||||||
SELECT citus_stat_tenants_reset();
|
SELECT citus_stat_tenants_reset();
|
||||||
|
@ -241,26 +244,48 @@ SELECT count(*)>=0 FROM dist_tbl WHERE a = 1;
|
||||||
|
|
||||||
INSERT INTO dist_tbl VALUES (5, 'abcd');
|
INSERT INTO dist_tbl VALUES (5, 'abcd');
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
SELECT tenant_attribute, read_count_in_this_period, read_count_in_last_period, query_count_in_this_period, query_count_in_last_period FROM citus_stat_tenants_local ORDER BY tenant_attribute;
|
SELECT tenant_attribute, read_count_in_this_period, read_count_in_last_period, query_count_in_this_period, query_count_in_last_period,
|
||||||
tenant_attribute | read_count_in_this_period | read_count_in_last_period | query_count_in_this_period | query_count_in_last_period
|
(cpu_usage_in_this_period>0) AS cpu_is_used_in_this_period, (cpu_usage_in_last_period>0) AS cpu_is_used_in_last_period
|
||||||
|
FROM citus_stat_tenants_local
|
||||||
|
ORDER BY tenant_attribute;
|
||||||
|
tenant_attribute | read_count_in_this_period | read_count_in_last_period | query_count_in_this_period | query_count_in_last_period | cpu_is_used_in_this_period | cpu_is_used_in_last_period
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
1 | 1 | 0 | 1 | 0
|
1 | 1 | 0 | 1 | 0 | t | f
|
||||||
5 | 0 | 0 | 1 | 0
|
5 | 0 | 0 | 1 | 0 | t | f
|
||||||
(2 rows)
|
(2 rows)
|
||||||
|
|
||||||
-- simulate passing the period
|
-- simulate passing the period
|
||||||
SET citus.stat_tenants_period TO 2;
|
SET citus.stat_tenants_period TO 5;
|
||||||
SELECT sleep_until_next_period();
|
SELECT sleep_until_next_period();
|
||||||
sleep_until_next_period
|
sleep_until_next_period
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
SELECT tenant_attribute, read_count_in_this_period, read_count_in_last_period, query_count_in_this_period, query_count_in_last_period FROM citus_stat_tenants_local ORDER BY tenant_attribute;
|
SELECT tenant_attribute, read_count_in_this_period, read_count_in_last_period, query_count_in_this_period, query_count_in_last_period,
|
||||||
tenant_attribute | read_count_in_this_period | read_count_in_last_period | query_count_in_this_period | query_count_in_last_period
|
(cpu_usage_in_this_period>0) AS cpu_is_used_in_this_period, (cpu_usage_in_last_period>0) AS cpu_is_used_in_last_period
|
||||||
|
FROM citus_stat_tenants_local
|
||||||
|
ORDER BY tenant_attribute;
|
||||||
|
tenant_attribute | read_count_in_this_period | read_count_in_last_period | query_count_in_this_period | query_count_in_last_period | cpu_is_used_in_this_period | cpu_is_used_in_last_period
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
1 | 0 | 1 | 0 | 1
|
1 | 0 | 1 | 0 | 1 | f | t
|
||||||
5 | 0 | 0 | 0 | 1
|
5 | 0 | 0 | 0 | 1 | f | t
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
|
SELECT sleep_until_next_period();
|
||||||
|
sleep_until_next_period
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT tenant_attribute, read_count_in_this_period, read_count_in_last_period, query_count_in_this_period, query_count_in_last_period,
|
||||||
|
(cpu_usage_in_this_period>0) AS cpu_is_used_in_this_period, (cpu_usage_in_last_period>0) AS cpu_is_used_in_last_period
|
||||||
|
FROM citus_stat_tenants_local
|
||||||
|
ORDER BY tenant_attribute;
|
||||||
|
tenant_attribute | read_count_in_this_period | read_count_in_last_period | query_count_in_this_period | query_count_in_last_period | cpu_is_used_in_this_period | cpu_is_used_in_last_period
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1 | 0 | 0 | 0 | 0 | f | f
|
||||||
|
5 | 0 | 0 | 0 | 0 | f | f
|
||||||
(2 rows)
|
(2 rows)
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
@ -478,13 +503,17 @@ SELECT count(*)>=0 FROM dist_tbl_text WHERE a = 'bcde*';
|
||||||
t
|
t
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
|
DELETE FROM dist_tbl_text WHERE a = '/b*c/de';
|
||||||
|
DELETE FROM dist_tbl_text WHERE a = '/bcde';
|
||||||
|
DELETE FROM dist_tbl_text WHERE a = U&'\0061\0308bc';
|
||||||
|
DELETE FROM dist_tbl_text WHERE a = 'bcde*';
|
||||||
SELECT tenant_attribute, read_count_in_this_period, read_count_in_last_period, query_count_in_this_period, query_count_in_last_period FROM citus_stat_tenants_local ORDER BY tenant_attribute;
|
SELECT tenant_attribute, read_count_in_this_period, read_count_in_last_period, query_count_in_this_period, query_count_in_last_period FROM citus_stat_tenants_local ORDER BY tenant_attribute;
|
||||||
tenant_attribute | read_count_in_this_period | read_count_in_last_period | query_count_in_this_period | query_count_in_last_period
|
tenant_attribute | read_count_in_this_period | read_count_in_last_period | query_count_in_this_period | query_count_in_last_period
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
/b*c/de | 1 | 0 | 1 | 0
|
/b*c/de | 1 | 0 | 2 | 0
|
||||||
/bcde | 1 | 0 | 1 | 0
|
/bcde | 1 | 0 | 2 | 0
|
||||||
äbc | 1 | 0 | 1 | 0
|
äbc | 1 | 0 | 2 | 0
|
||||||
bcde* | 1 | 0 | 1 | 0
|
bcde* | 1 | 0 | 2 | 0
|
||||||
(4 rows)
|
(4 rows)
|
||||||
|
|
||||||
-- test local cached queries & prepared statements
|
-- test local cached queries & prepared statements
|
||||||
|
@ -564,10 +593,10 @@ EXECUTE dist_tbl_text_select_plan('bcde*');
|
||||||
SELECT tenant_attribute, read_count_in_this_period, read_count_in_last_period, query_count_in_this_period, query_count_in_last_period FROM citus_stat_tenants_local ORDER BY tenant_attribute;
|
SELECT tenant_attribute, read_count_in_this_period, read_count_in_last_period, query_count_in_this_period, query_count_in_last_period FROM citus_stat_tenants_local ORDER BY tenant_attribute;
|
||||||
tenant_attribute | read_count_in_this_period | read_count_in_last_period | query_count_in_this_period | query_count_in_last_period
|
tenant_attribute | read_count_in_this_period | read_count_in_last_period | query_count_in_this_period | query_count_in_last_period
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
/b*c/de | 4 | 0 | 4 | 0
|
/b*c/de | 4 | 0 | 5 | 0
|
||||||
/bcde | 4 | 0 | 4 | 0
|
/bcde | 4 | 0 | 5 | 0
|
||||||
äbc | 4 | 0 | 4 | 0
|
äbc | 4 | 0 | 5 | 0
|
||||||
bcde* | 4 | 0 | 4 | 0
|
bcde* | 4 | 0 | 5 | 0
|
||||||
(4 rows)
|
(4 rows)
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
@ -650,10 +679,10 @@ SET search_path TO citus_stat_tenants;
|
||||||
SELECT tenant_attribute, read_count_in_this_period, read_count_in_last_period, query_count_in_this_period, query_count_in_last_period FROM citus_stat_tenants ORDER BY tenant_attribute;
|
SELECT tenant_attribute, read_count_in_this_period, read_count_in_last_period, query_count_in_this_period, query_count_in_last_period FROM citus_stat_tenants ORDER BY tenant_attribute;
|
||||||
tenant_attribute | read_count_in_this_period | read_count_in_last_period | query_count_in_this_period | query_count_in_last_period
|
tenant_attribute | read_count_in_this_period | read_count_in_last_period | query_count_in_this_period | query_count_in_last_period
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
/b*c/de | 7 | 0 | 7 | 0
|
/b*c/de | 7 | 0 | 8 | 0
|
||||||
/bcde | 7 | 0 | 7 | 0
|
/bcde | 7 | 0 | 8 | 0
|
||||||
äbc | 7 | 0 | 7 | 0
|
äbc | 7 | 0 | 8 | 0
|
||||||
bcde* | 7 | 0 | 7 | 0
|
bcde* | 7 | 0 | 8 | 0
|
||||||
(4 rows)
|
(4 rows)
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
@ -716,5 +745,131 @@ SELECT count(*)>=0 FROM citus_stat_tenants_local();
|
||||||
|
|
||||||
RESET ROLE;
|
RESET ROLE;
|
||||||
DROP ROLE stats_non_superuser;
|
DROP ROLE stats_non_superuser;
|
||||||
|
-- test function push down
|
||||||
|
CREATE OR REPLACE FUNCTION
|
||||||
|
select_from_dist_tbl_text(p_keyword text)
|
||||||
|
RETURNS boolean LANGUAGE plpgsql AS $fn$
|
||||||
|
BEGIN
|
||||||
|
RETURN(SELECT count(*)>=0 FROM citus_stat_tenants.dist_tbl_text WHERE a = $1);
|
||||||
|
END;
|
||||||
|
$fn$;
|
||||||
|
SELECT create_distributed_function(
|
||||||
|
'select_from_dist_tbl_text(text)', 'p_keyword', colocate_with => 'dist_tbl_text'
|
||||||
|
);
|
||||||
|
create_distributed_function
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_stat_tenants_reset();
|
||||||
|
citus_stat_tenants_reset
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT select_from_dist_tbl_text('/b*c/de');
|
||||||
|
select_from_dist_tbl_text
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT select_from_dist_tbl_text('/b*c/de');
|
||||||
|
select_from_dist_tbl_text
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT select_from_dist_tbl_text(U&'\0061\0308bc');
|
||||||
|
select_from_dist_tbl_text
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT select_from_dist_tbl_text(U&'\0061\0308bc');
|
||||||
|
select_from_dist_tbl_text
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT tenant_attribute, query_count_in_this_period FROM citus_stat_tenants;
|
||||||
|
tenant_attribute | query_count_in_this_period
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
/b*c/de | 2
|
||||||
|
äbc | 2
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
|
CREATE OR REPLACE PROCEDURE select_from_dist_tbl_text_proc(
|
||||||
|
p_keyword text
|
||||||
|
)
|
||||||
|
LANGUAGE plpgsql
|
||||||
|
AS $$
|
||||||
|
BEGIN
|
||||||
|
PERFORM select_from_dist_tbl_text(p_keyword);
|
||||||
|
PERFORM count(*)>=0 FROM citus_stat_tenants.dist_tbl_text WHERE b < 0;
|
||||||
|
PERFORM count(*)>=0 FROM citus_stat_tenants.dist_tbl_text;
|
||||||
|
PERFORM count(*)>=0 FROM citus_stat_tenants.dist_tbl_text WHERE a = p_keyword;
|
||||||
|
COMMIT;
|
||||||
|
END;$$;
|
||||||
|
CALL citus_stat_tenants.select_from_dist_tbl_text_proc('/b*c/de');
|
||||||
|
CALL citus_stat_tenants.select_from_dist_tbl_text_proc('/b*c/de');
|
||||||
|
CALL citus_stat_tenants.select_from_dist_tbl_text_proc('/b*c/de');
|
||||||
|
CALL citus_stat_tenants.select_from_dist_tbl_text_proc(U&'\0061\0308bc');
|
||||||
|
CALL citus_stat_tenants.select_from_dist_tbl_text_proc(U&'\0061\0308bc');
|
||||||
|
CALL citus_stat_tenants.select_from_dist_tbl_text_proc(U&'\0061\0308bc');
|
||||||
|
CALL citus_stat_tenants.select_from_dist_tbl_text_proc(NULL);
|
||||||
|
SELECT tenant_attribute, query_count_in_this_period FROM citus_stat_tenants;
|
||||||
|
tenant_attribute | query_count_in_this_period
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
/b*c/de | 8
|
||||||
|
äbc | 8
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
|
CREATE OR REPLACE VIEW
|
||||||
|
select_from_dist_tbl_text_view
|
||||||
|
AS
|
||||||
|
SELECT * FROM citus_stat_tenants.dist_tbl_text;
|
||||||
|
SELECT count(*)>=0 FROM select_from_dist_tbl_text_view WHERE a = '/b*c/de';
|
||||||
|
?column?
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT count(*)>=0 FROM select_from_dist_tbl_text_view WHERE a = '/b*c/de';
|
||||||
|
?column?
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT count(*)>=0 FROM select_from_dist_tbl_text_view WHERE a = '/b*c/de';
|
||||||
|
?column?
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT count(*)>=0 FROM select_from_dist_tbl_text_view WHERE a = U&'\0061\0308bc';
|
||||||
|
?column?
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT count(*)>=0 FROM select_from_dist_tbl_text_view WHERE a = U&'\0061\0308bc';
|
||||||
|
?column?
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT count(*)>=0 FROM select_from_dist_tbl_text_view WHERE a = U&'\0061\0308bc';
|
||||||
|
?column?
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT tenant_attribute, query_count_in_this_period FROM citus_stat_tenants;
|
||||||
|
tenant_attribute | query_count_in_this_period
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
/b*c/de | 11
|
||||||
|
äbc | 11
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
SET client_min_messages TO ERROR;
|
SET client_min_messages TO ERROR;
|
||||||
DROP SCHEMA citus_stat_tenants CASCADE;
|
DROP SCHEMA citus_stat_tenants CASCADE;
|
||||||
|
|
|
@ -64,11 +64,11 @@ SET citus.multi_shard_modify_mode TO sequential;
|
||||||
SELECT citus_update_table_statistics('test_table_statistics_hash');
|
SELECT citus_update_table_statistics('test_table_statistics_hash');
|
||||||
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing SELECT 981000 AS shard_id, 'public.test_table_statistics_hash_981000' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981000') UNION ALL SELECT 981001 AS shard_id, 'public.test_table_statistics_hash_981001' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981001') UNION ALL SELECT 981002 AS shard_id, 'public.test_table_statistics_hash_981002' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981002') UNION ALL SELECT 981003 AS shard_id, 'public.test_table_statistics_hash_981003' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981003') UNION ALL SELECT 981004 AS shard_id, 'public.test_table_statistics_hash_981004' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981004') UNION ALL SELECT 981005 AS shard_id, 'public.test_table_statistics_hash_981005' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981005') UNION ALL SELECT 981006 AS shard_id, 'public.test_table_statistics_hash_981006' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981006') UNION ALL SELECT 981007 AS shard_id, 'public.test_table_statistics_hash_981007' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981007') UNION ALL SELECT 0::bigint, NULL::text, 0::bigint;
|
NOTICE: issuing SELECT shard_id, pg_total_relation_size(table_name) FROM (VALUES (981000, 'public.test_table_statistics_hash_981000'), (981001, 'public.test_table_statistics_hash_981001'), (981002, 'public.test_table_statistics_hash_981002'), (981003, 'public.test_table_statistics_hash_981003'), (981004, 'public.test_table_statistics_hash_981004'), (981005, 'public.test_table_statistics_hash_981005'), (981006, 'public.test_table_statistics_hash_981006'), (981007, 'public.test_table_statistics_hash_981007')) t(shard_id, table_name) WHERE to_regclass(table_name) IS NOT NULL
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing SELECT 981000 AS shard_id, 'public.test_table_statistics_hash_981000' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981000') UNION ALL SELECT 981001 AS shard_id, 'public.test_table_statistics_hash_981001' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981001') UNION ALL SELECT 981002 AS shard_id, 'public.test_table_statistics_hash_981002' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981002') UNION ALL SELECT 981003 AS shard_id, 'public.test_table_statistics_hash_981003' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981003') UNION ALL SELECT 981004 AS shard_id, 'public.test_table_statistics_hash_981004' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981004') UNION ALL SELECT 981005 AS shard_id, 'public.test_table_statistics_hash_981005' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981005') UNION ALL SELECT 981006 AS shard_id, 'public.test_table_statistics_hash_981006' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981006') UNION ALL SELECT 981007 AS shard_id, 'public.test_table_statistics_hash_981007' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981007') UNION ALL SELECT 0::bigint, NULL::text, 0::bigint;
|
NOTICE: issuing SELECT shard_id, pg_total_relation_size(table_name) FROM (VALUES (981000, 'public.test_table_statistics_hash_981000'), (981001, 'public.test_table_statistics_hash_981001'), (981002, 'public.test_table_statistics_hash_981002'), (981003, 'public.test_table_statistics_hash_981003'), (981004, 'public.test_table_statistics_hash_981004'), (981005, 'public.test_table_statistics_hash_981005'), (981006, 'public.test_table_statistics_hash_981006'), (981007, 'public.test_table_statistics_hash_981007')) t(shard_id, table_name) WHERE to_regclass(table_name) IS NOT NULL
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing COMMIT
|
NOTICE: issuing COMMIT
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
|
@ -152,11 +152,11 @@ SET citus.multi_shard_modify_mode TO sequential;
|
||||||
SELECT citus_update_table_statistics('test_table_statistics_append');
|
SELECT citus_update_table_statistics('test_table_statistics_append');
|
||||||
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing SELECT 981008 AS shard_id, 'public.test_table_statistics_append_981008' AS shard_name, pg_total_relation_size('public.test_table_statistics_append_981008') UNION ALL SELECT 981009 AS shard_id, 'public.test_table_statistics_append_981009' AS shard_name, pg_total_relation_size('public.test_table_statistics_append_981009') UNION ALL SELECT 0::bigint, NULL::text, 0::bigint;
|
NOTICE: issuing SELECT shard_id, pg_total_relation_size(table_name) FROM (VALUES (981008, 'public.test_table_statistics_append_981008'), (981009, 'public.test_table_statistics_append_981009')) t(shard_id, table_name) WHERE to_regclass(table_name) IS NOT NULL
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing SELECT 981008 AS shard_id, 'public.test_table_statistics_append_981008' AS shard_name, pg_total_relation_size('public.test_table_statistics_append_981008') UNION ALL SELECT 981009 AS shard_id, 'public.test_table_statistics_append_981009' AS shard_name, pg_total_relation_size('public.test_table_statistics_append_981009') UNION ALL SELECT 0::bigint, NULL::text, 0::bigint;
|
NOTICE: issuing SELECT shard_id, pg_total_relation_size(table_name) FROM (VALUES (981008, 'public.test_table_statistics_append_981008'), (981009, 'public.test_table_statistics_append_981009')) t(shard_id, table_name) WHERE to_regclass(table_name) IS NOT NULL
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing COMMIT
|
NOTICE: issuing COMMIT
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
|
|
|
@ -36,6 +36,19 @@ set citus.shard_replication_factor to 2;
|
||||||
select create_distributed_table_concurrently('test','key', 'hash');
|
select create_distributed_table_concurrently('test','key', 'hash');
|
||||||
ERROR: cannot distribute a table concurrently when citus.shard_replication_factor > 1
|
ERROR: cannot distribute a table concurrently when citus.shard_replication_factor > 1
|
||||||
set citus.shard_replication_factor to 1;
|
set citus.shard_replication_factor to 1;
|
||||||
|
set citus.shard_replication_factor to 2;
|
||||||
|
create table dist_1(a int);
|
||||||
|
select create_distributed_table('dist_1', 'a');
|
||||||
|
create_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
set citus.shard_replication_factor to 1;
|
||||||
|
create table dist_2(a int);
|
||||||
|
select create_distributed_table_concurrently('dist_2', 'a', colocate_with=>'dist_1');
|
||||||
|
ERROR: cannot create distributed table concurrently because Citus allows concurrent table distribution only when citus.shard_replication_factor = 1
|
||||||
|
HINT: table dist_2 is requested to be colocated with dist_1 which has citus.shard_replication_factor > 1
|
||||||
begin;
|
begin;
|
||||||
select create_distributed_table_concurrently('test','key');
|
select create_distributed_table_concurrently('test','key');
|
||||||
ERROR: create_distributed_table_concurrently cannot run inside a transaction block
|
ERROR: create_distributed_table_concurrently cannot run inside a transaction block
|
||||||
|
@ -138,27 +151,8 @@ select count(*) from test;
|
||||||
rollback;
|
rollback;
|
||||||
-- verify that we can undistribute the table
|
-- verify that we can undistribute the table
|
||||||
begin;
|
begin;
|
||||||
|
set local client_min_messages to warning;
|
||||||
select undistribute_table('test', cascade_via_foreign_keys := true);
|
select undistribute_table('test', cascade_via_foreign_keys := true);
|
||||||
NOTICE: converting the partitions of create_distributed_table_concurrently.test
|
|
||||||
NOTICE: creating a new table for create_distributed_table_concurrently.test
|
|
||||||
NOTICE: dropping the old create_distributed_table_concurrently.test
|
|
||||||
NOTICE: renaming the new table to create_distributed_table_concurrently.test
|
|
||||||
NOTICE: creating a new table for create_distributed_table_concurrently.ref
|
|
||||||
NOTICE: moving the data of create_distributed_table_concurrently.ref
|
|
||||||
NOTICE: dropping the old create_distributed_table_concurrently.ref
|
|
||||||
NOTICE: drop cascades to constraint test_id_fkey_1190041 on table create_distributed_table_concurrently.test_1190041
|
|
||||||
CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false)"
|
|
||||||
PL/pgSQL function citus_drop_trigger() line XX at PERFORM
|
|
||||||
SQL statement "DROP TABLE create_distributed_table_concurrently.ref CASCADE"
|
|
||||||
NOTICE: renaming the new table to create_distributed_table_concurrently.ref
|
|
||||||
NOTICE: creating a new table for create_distributed_table_concurrently.test_1
|
|
||||||
NOTICE: moving the data of create_distributed_table_concurrently.test_1
|
|
||||||
NOTICE: dropping the old create_distributed_table_concurrently.test_1
|
|
||||||
NOTICE: renaming the new table to create_distributed_table_concurrently.test_1
|
|
||||||
NOTICE: creating a new table for create_distributed_table_concurrently.test_2
|
|
||||||
NOTICE: moving the data of create_distributed_table_concurrently.test_2
|
|
||||||
NOTICE: dropping the old create_distributed_table_concurrently.test_2
|
|
||||||
NOTICE: renaming the new table to create_distributed_table_concurrently.test_2
|
|
||||||
undistribute_table
|
undistribute_table
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
|
@ -187,6 +187,8 @@ ORDER BY placementid;
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- reset cluster to original state
|
-- reset cluster to original state
|
||||||
|
ALTER SEQUENCE pg_dist_node_nodeid_seq RESTART 2;
|
||||||
|
ALTER SEQUENCE pg_dist_groupid_seq RESTART 2;
|
||||||
SELECT citus.mitmproxy('conn.allow()');
|
SELECT citus.mitmproxy('conn.allow()');
|
||||||
mitmproxy
|
mitmproxy
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
|
@ -196,7 +198,7 @@ SELECT citus.mitmproxy('conn.allow()');
|
||||||
SELECT master_add_node('localhost', :worker_2_proxy_port);
|
SELECT master_add_node('localhost', :worker_2_proxy_port);
|
||||||
master_add_node
|
master_add_node
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
4
|
2
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- verify node is added
|
-- verify node is added
|
||||||
|
|
|
@ -12,6 +12,8 @@ SET citus.shard_count TO 2;
|
||||||
SET citus.shard_replication_factor TO 1;
|
SET citus.shard_replication_factor TO 1;
|
||||||
SET citus.max_adaptive_executor_pool_size TO 1;
|
SET citus.max_adaptive_executor_pool_size TO 1;
|
||||||
SELECT pg_backend_pid() as pid \gset
|
SELECT pg_backend_pid() as pid \gset
|
||||||
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 222222;
|
||||||
|
ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 333333;
|
||||||
-- make sure coordinator is in the metadata
|
-- make sure coordinator is in the metadata
|
||||||
SELECT citus_set_coordinator_host('localhost', 57636);
|
SELECT citus_set_coordinator_host('localhost', 57636);
|
||||||
citus_set_coordinator_host
|
citus_set_coordinator_host
|
||||||
|
@ -189,8 +191,8 @@ SELECT create_distributed_table_concurrently('table_1', 'id');
|
||||||
SELECT * FROM pg_dist_shard WHERE logicalrelid = 'table_1'::regclass;
|
SELECT * FROM pg_dist_shard WHERE logicalrelid = 'table_1'::regclass;
|
||||||
logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue
|
logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
table_1 | 1880080 | t | -2147483648 | -1
|
table_1 | 222247 | t | -2147483648 | -1
|
||||||
table_1 | 1880081 | t | 0 | 2147483647
|
table_1 | 222248 | t | 0 | 2147483647
|
||||||
(2 rows)
|
(2 rows)
|
||||||
|
|
||||||
DROP SCHEMA create_dist_tbl_con CASCADE;
|
DROP SCHEMA create_dist_tbl_con CASCADE;
|
||||||
|
@ -201,3 +203,5 @@ SELECT citus_remove_node('localhost', 57636);
|
||||||
|
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
|
ALTER SEQUENCE pg_dist_node_nodeid_seq RESTART 3;
|
||||||
|
ALTER SEQUENCE pg_dist_groupid_seq RESTART 3;
|
||||||
|
|
|
@ -19,10 +19,30 @@ SET client_min_messages TO ERROR;
|
||||||
-- Create roles
|
-- Create roles
|
||||||
CREATE ROLE foo1;
|
CREATE ROLE foo1;
|
||||||
CREATE ROLE foo2;
|
CREATE ROLE foo2;
|
||||||
|
-- Create collation
|
||||||
|
CREATE COLLATION german_phonebook (provider = icu, locale = 'de-u-co-phonebk');
|
||||||
|
-- Create type
|
||||||
|
CREATE TYPE pair_type AS (a int, b int);
|
||||||
|
-- Create function
|
||||||
|
CREATE FUNCTION one_as_result() RETURNS INT LANGUAGE SQL AS
|
||||||
|
$$
|
||||||
|
SELECT 1;
|
||||||
|
$$;
|
||||||
|
-- Create text search dictionary
|
||||||
|
CREATE TEXT SEARCH DICTIONARY my_german_dict (
|
||||||
|
template = snowball,
|
||||||
|
language = german,
|
||||||
|
stopwords = german
|
||||||
|
);
|
||||||
|
-- Create text search config
|
||||||
|
CREATE TEXT SEARCH CONFIGURATION my_ts_config ( parser = default );
|
||||||
|
ALTER TEXT SEARCH CONFIGURATION my_ts_config ALTER MAPPING FOR asciiword WITH my_german_dict;
|
||||||
-- Create sequence
|
-- Create sequence
|
||||||
CREATE SEQUENCE seq;
|
CREATE SEQUENCE seq;
|
||||||
-- Create colocated distributed tables
|
-- Create colocated distributed tables
|
||||||
CREATE TABLE dist1 (id int PRIMARY KEY default nextval('seq'));
|
CREATE TABLE dist1 (id int PRIMARY KEY default nextval('seq'), col int default (one_as_result()), myserial serial, phone text COLLATE german_phonebook, initials pair_type);
|
||||||
|
CREATE SEQUENCE seq_owned OWNED BY dist1.id;
|
||||||
|
CREATE INDEX dist1_search_phone_idx ON dist1 USING gin (to_tsvector('my_ts_config'::regconfig, (COALESCE(phone, ''::text))::text));
|
||||||
SELECT create_distributed_table('dist1', 'id');
|
SELECT create_distributed_table('dist1', 'id');
|
||||||
create_distributed_table
|
create_distributed_table
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
|
@ -52,12 +72,30 @@ CREATE TABLE loc1 (id int PRIMARY KEY);
|
||||||
INSERT INTO loc1 SELECT i FROM generate_series(1,100) i;
|
INSERT INTO loc1 SELECT i FROM generate_series(1,100) i;
|
||||||
CREATE TABLE loc2 (id int REFERENCES loc1(id));
|
CREATE TABLE loc2 (id int REFERENCES loc1(id));
|
||||||
INSERT INTO loc2 SELECT i FROM generate_series(1,100) i;
|
INSERT INTO loc2 SELECT i FROM generate_series(1,100) i;
|
||||||
|
-- Create publication
|
||||||
|
CREATE PUBLICATION pub_all;
|
||||||
|
-- citus_set_coordinator_host with wrong port
|
||||||
|
SELECT citus_set_coordinator_host('localhost', 9999);
|
||||||
|
citus_set_coordinator_host
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- citus_set_coordinator_host with correct port
|
||||||
SELECT citus_set_coordinator_host('localhost', :master_port);
|
SELECT citus_set_coordinator_host('localhost', :master_port);
|
||||||
citus_set_coordinator_host
|
citus_set_coordinator_host
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
|
-- show coordinator port is correct on all workers
|
||||||
|
SELECT * FROM run_command_on_workers($$SELECT row(nodename,nodeport) FROM pg_dist_node WHERE groupid = 0$$);
|
||||||
|
nodename | nodeport | success | result
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
localhost | 9060 | t | (localhost,57636)
|
||||||
|
localhost | 57637 | t | (localhost,57636)
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
SELECT citus_add_local_table_to_metadata('loc1', cascade_via_foreign_keys => true);
|
SELECT citus_add_local_table_to_metadata('loc1', cascade_via_foreign_keys => true);
|
||||||
citus_add_local_table_to_metadata
|
citus_add_local_table_to_metadata
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
|
@ -152,8 +190,8 @@ SELECT citus.mitmproxy('conn.onQuery(query="INSERT INTO pg_dist_node").kill()');
|
||||||
|
|
||||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||||
-- Failure to drop sequence
|
-- Failure to drop sequence dependency for all tables
|
||||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.worker_drop_sequence_dependency").cancel(' || :pid || ')');
|
SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.worker_drop_sequence_dependency.*FROM pg_dist_partition").cancel(' || :pid || ')');
|
||||||
mitmproxy
|
mitmproxy
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
@ -161,7 +199,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.worker_drop_sequen
|
||||||
|
|
||||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
ERROR: canceling statement due to user request
|
ERROR: canceling statement due to user request
|
||||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.worker_drop_sequence_dependency").kill()');
|
SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.worker_drop_sequence_dependency.*FROM pg_dist_partition").kill()');
|
||||||
mitmproxy
|
mitmproxy
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
@ -305,7 +343,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="ALTER DATABASE.*OWNER TO").kill()');
|
||||||
|
|
||||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||||
-- Filure to create schema
|
-- Failure to create schema
|
||||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE SCHEMA IF NOT EXISTS mx_metadata_sync_multi_trans AUTHORIZATION").cancel(' || :pid || ')');
|
SELECT citus.mitmproxy('conn.onQuery(query="CREATE SCHEMA IF NOT EXISTS mx_metadata_sync_multi_trans AUTHORIZATION").cancel(' || :pid || ')');
|
||||||
mitmproxy
|
mitmproxy
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
|
@ -320,6 +358,108 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE SCHEMA IF NOT EXISTS mx_metad
|
||||||
|
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||||
|
-- Failure to create collation
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_replace_object.*CREATE COLLATION mx_metadata_sync_multi_trans.german_phonebook").cancel(' || :pid || ')');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
ERROR: canceling statement due to user request
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_replace_object.*CREATE COLLATION mx_metadata_sync_multi_trans.german_phonebook").kill()');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||||
|
-- Failure to create function
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="CREATE OR REPLACE FUNCTION mx_metadata_sync_multi_trans.one_as_result").cancel(' || :pid || ')');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
ERROR: canceling statement due to user request
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="CREATE OR REPLACE FUNCTION mx_metadata_sync_multi_trans.one_as_result").kill()');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||||
|
-- Failure to create text search dictionary
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_replace_object.*my_german_dict").cancel(' || :pid || ')');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
ERROR: canceling statement due to user request
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_replace_object.*my_german_dict").kill()');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||||
|
-- Failure to create text search config
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_replace_object.*my_ts_config").cancel(' || :pid || ')');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
ERROR: canceling statement due to user request
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_replace_object.*my_ts_config").kill()');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||||
|
-- Failure to create type
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_replace_object.*pair_type").cancel(' || :pid || ')');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
ERROR: canceling statement due to user request
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_replace_object.*pair_type").kill()');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||||
|
-- Failure to create publication
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="CREATE PUBLICATION.*pub_all").cancel(' || :pid || ')');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
ERROR: canceling statement due to user request
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="CREATE PUBLICATION.*pub_all").kill()');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||||
-- Failure to create sequence
|
-- Failure to create sequence
|
||||||
|
@ -337,6 +477,40 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_apply_sequence_command
|
||||||
|
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||||
|
-- Failure to drop sequence dependency for distributed table
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.worker_drop_sequence_dependency.*mx_metadata_sync_multi_trans.dist1").cancel(' || :pid || ')');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
ERROR: canceling statement due to user request
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.worker_drop_sequence_dependency.*mx_metadata_sync_multi_trans.dist1").kill()');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||||
|
-- Failure to drop distributed table if exists
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS mx_metadata_sync_multi_trans.dist1").cancel(' || :pid || ')');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
ERROR: canceling statement due to user request
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS mx_metadata_sync_multi_trans.dist1").kill()');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||||
-- Failure to create distributed table
|
-- Failure to create distributed table
|
||||||
|
@ -354,6 +528,40 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_
|
||||||
|
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||||
|
-- Failure to record sequence dependency for table
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.worker_record_sequence_dependency").cancel(' || :pid || ')');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
ERROR: canceling statement due to user request
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.worker_record_sequence_dependency").kill()');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||||
|
-- Failure to create index for table
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="CREATE INDEX dist1_search_phone_idx ON mx_metadata_sync_multi_trans.dist1 USING gin").cancel(' || :pid || ')');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
ERROR: canceling statement due to user request
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="CREATE INDEX dist1_search_phone_idx ON mx_metadata_sync_multi_trans.dist1 USING gin").kill()');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||||
-- Failure to create reference table
|
-- Failure to create reference table
|
||||||
|
@ -524,6 +732,125 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_object_met
|
||||||
|
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||||
|
-- Failure to mark function as distributed
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*one_as_result").cancel(' || :pid || ')');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
ERROR: canceling statement due to user request
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*one_as_result").kill()');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||||
|
-- Failure to mark collation as distributed
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*german_phonebook").cancel(' || :pid || ')');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
ERROR: canceling statement due to user request
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*german_phonebook").kill()');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||||
|
-- Failure to mark text search dictionary as distributed
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*my_german_dict").cancel(' || :pid || ')');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
ERROR: canceling statement due to user request
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*my_german_dict").kill()');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||||
|
-- Failure to mark text search configuration as distributed
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*my_ts_config").cancel(' || :pid || ')');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
ERROR: canceling statement due to user request
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*my_ts_config").kill()');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||||
|
-- Failure to mark type as distributed
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*pair_type").cancel(' || :pid || ')');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
ERROR: canceling statement due to user request
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*pair_type").kill()');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||||
|
-- Failure to mark sequence as distributed
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*seq_owned").cancel(' || :pid || ')');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
ERROR: canceling statement due to user request
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*seq_owned").kill()');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||||
|
-- Failure to mark publication as distributed
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*pub_all").cancel(' || :pid || ')');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
ERROR: canceling statement due to user request
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*pub_all").kill()');
|
||||||
|
mitmproxy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||||
-- Failure to set isactive to true
|
-- Failure to set isactive to true
|
||||||
|
@ -581,8 +908,8 @@ ERROR: connection not open
|
||||||
SELECT * FROM pg_dist_node ORDER BY nodeport;
|
SELECT * FROM pg_dist_node ORDER BY nodeport;
|
||||||
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards
|
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
4 | 4 | localhost | 9060 | default | f | t | primary | default | f | t
|
2 | 2 | localhost | 9060 | default | f | t | primary | default | f | t
|
||||||
6 | 0 | localhost | 57636 | default | t | t | primary | default | t | f
|
3 | 0 | localhost | 57636 | default | t | t | primary | default | t | f
|
||||||
1 | 1 | localhost | 57637 | default | t | t | primary | default | t | t
|
1 | 1 | localhost | 57637 | default | t | t | primary | default | t | t
|
||||||
(3 rows)
|
(3 rows)
|
||||||
|
|
||||||
|
@ -610,24 +937,14 @@ UPDATE dist1 SET id = :failed_node_val WHERE id = :failed_node_val;
|
||||||
-- Show that we can still delete from a shard at the node from coordinator
|
-- Show that we can still delete from a shard at the node from coordinator
|
||||||
DELETE FROM dist1 WHERE id = :failed_node_val;
|
DELETE FROM dist1 WHERE id = :failed_node_val;
|
||||||
-- Show that DDL would still propagate to the node
|
-- Show that DDL would still propagate to the node
|
||||||
SET client_min_messages TO NOTICE;
|
|
||||||
SET citus.log_remote_commands TO 1;
|
|
||||||
CREATE SCHEMA dummy;
|
CREATE SCHEMA dummy;
|
||||||
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
SELECT * FROM run_command_on_workers($$SELECT nspname FROM pg_namespace WHERE nspname = 'dummy'$$);
|
||||||
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
nodename | nodeport | success | result
|
||||||
NOTICE: issuing SET citus.enable_ddl_propagation TO 'off'
|
---------------------------------------------------------------------
|
||||||
NOTICE: issuing CREATE SCHEMA dummy
|
localhost | 9060 | t | dummy
|
||||||
NOTICE: issuing SET citus.enable_ddl_propagation TO 'on'
|
localhost | 57637 | t | dummy
|
||||||
NOTICE: issuing SET citus.enable_ddl_propagation TO 'off'
|
(2 rows)
|
||||||
NOTICE: issuing CREATE SCHEMA dummy
|
|
||||||
NOTICE: issuing SET citus.enable_ddl_propagation TO 'on'
|
|
||||||
NOTICE: issuing WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['dummy']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
|
||||||
NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx'
|
|
||||||
NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx'
|
|
||||||
NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx'
|
|
||||||
NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx'
|
|
||||||
SET citus.log_remote_commands TO 0;
|
|
||||||
SET client_min_messages TO ERROR;
|
|
||||||
-- Successfully activate the node after many failures
|
-- Successfully activate the node after many failures
|
||||||
SELECT citus.mitmproxy('conn.allow()');
|
SELECT citus.mitmproxy('conn.allow()');
|
||||||
mitmproxy
|
mitmproxy
|
||||||
|
@ -638,14 +955,14 @@ SELECT citus.mitmproxy('conn.allow()');
|
||||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
citus_activate_node
|
citus_activate_node
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
4
|
2
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- Activate the node once more to verify it works again with already synced metadata
|
-- Activate the node once more to verify it works again with already synced metadata
|
||||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
citus_activate_node
|
citus_activate_node
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
4
|
2
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- Show node metadata info on worker2 and coordinator after success
|
-- Show node metadata info on worker2 and coordinator after success
|
||||||
|
@ -653,8 +970,8 @@ SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
SELECT * FROM pg_dist_node ORDER BY nodeport;
|
SELECT * FROM pg_dist_node ORDER BY nodeport;
|
||||||
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards
|
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
4 | 4 | localhost | 9060 | default | t | t | primary | default | t | t
|
2 | 2 | localhost | 9060 | default | t | t | primary | default | t | t
|
||||||
6 | 0 | localhost | 57636 | default | t | t | primary | default | t | f
|
3 | 0 | localhost | 57636 | default | t | t | primary | default | t | f
|
||||||
1 | 1 | localhost | 57637 | default | t | t | primary | default | t | t
|
1 | 1 | localhost | 57637 | default | t | t | primary | default | t | t
|
||||||
(3 rows)
|
(3 rows)
|
||||||
|
|
||||||
|
@ -662,8 +979,8 @@ SELECT * FROM pg_dist_node ORDER BY nodeport;
|
||||||
SELECT * FROM pg_dist_node ORDER BY nodeport;
|
SELECT * FROM pg_dist_node ORDER BY nodeport;
|
||||||
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards
|
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
4 | 4 | localhost | 9060 | default | t | t | primary | default | t | t
|
2 | 2 | localhost | 9060 | default | t | t | primary | default | t | t
|
||||||
6 | 0 | localhost | 57636 | default | t | t | primary | default | t | f
|
3 | 0 | localhost | 57636 | default | t | t | primary | default | t | f
|
||||||
1 | 1 | localhost | 57637 | default | t | t | primary | default | t | t
|
1 | 1 | localhost | 57637 | default | t | t | primary | default | t | t
|
||||||
(3 rows)
|
(3 rows)
|
||||||
|
|
||||||
|
@ -674,9 +991,10 @@ SELECT citus.mitmproxy('conn.allow()');
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
RESET citus.metadata_sync_mode;
|
RESET citus.metadata_sync_mode;
|
||||||
|
DROP PUBLICATION pub_all;
|
||||||
DROP SCHEMA dummy;
|
DROP SCHEMA dummy;
|
||||||
DROP SCHEMA mx_metadata_sync_multi_trans CASCADE;
|
DROP SCHEMA mx_metadata_sync_multi_trans CASCADE;
|
||||||
NOTICE: drop cascades to 10 other objects
|
NOTICE: drop cascades to 15 other objects
|
||||||
DROP ROLE foo1;
|
DROP ROLE foo1;
|
||||||
DROP ROLE foo2;
|
DROP ROLE foo2;
|
||||||
SELECT citus_remove_node('localhost', :master_port);
|
SELECT citus_remove_node('localhost', :master_port);
|
||||||
|
@ -685,3 +1003,5 @@ SELECT citus_remove_node('localhost', :master_port);
|
||||||
|
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
|
ALTER SEQUENCE pg_dist_node_nodeid_seq RESTART 3;
|
||||||
|
ALTER SEQUENCE pg_dist_groupid_seq RESTART 3;
|
||||||
|
|
|
@ -210,6 +210,7 @@ select create_distributed_table('partitioned_tbl_with_fkey','x');
|
||||||
|
|
||||||
create table partition_1_with_fkey partition of partitioned_tbl_with_fkey for values from ('2022-01-01') to ('2022-12-31');
|
create table partition_1_with_fkey partition of partitioned_tbl_with_fkey for values from ('2022-01-01') to ('2022-12-31');
|
||||||
create table partition_2_with_fkey partition of partitioned_tbl_with_fkey for values from ('2023-01-01') to ('2023-12-31');
|
create table partition_2_with_fkey partition of partitioned_tbl_with_fkey for values from ('2023-01-01') to ('2023-12-31');
|
||||||
|
create table partition_3_with_fkey partition of partitioned_tbl_with_fkey for values from ('2024-01-01') to ('2024-12-31');
|
||||||
insert into partitioned_tbl_with_fkey (x,y) select s,s%10 from generate_series(1,100) s;
|
insert into partitioned_tbl_with_fkey (x,y) select s,s%10 from generate_series(1,100) s;
|
||||||
ALTER TABLE partitioned_tbl_with_fkey ADD CONSTRAINT fkey_to_ref_tbl FOREIGN KEY (y) REFERENCES ref_table_with_fkey(id);
|
ALTER TABLE partitioned_tbl_with_fkey ADD CONSTRAINT fkey_to_ref_tbl FOREIGN KEY (y) REFERENCES ref_table_with_fkey(id);
|
||||||
WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'partitioned_tbl_with_fkey'::regclass ORDER BY shardid LIMIT 1)
|
WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'partitioned_tbl_with_fkey'::regclass ORDER BY shardid LIMIT 1)
|
||||||
|
|
|
@ -1,42 +0,0 @@
|
||||||
Parsed test spec with 2 sessions
|
|
||||||
|
|
||||||
starting permutation: s1-register s2-lock s1-lock s2-wrong-cancel-1 s2-wrong-cancel-2 s2-cancel
|
|
||||||
step s1-register:
|
|
||||||
INSERT INTO cancel_table VALUES (pg_backend_pid(), get_cancellation_key());
|
|
||||||
|
|
||||||
step s2-lock:
|
|
||||||
BEGIN;
|
|
||||||
LOCK TABLE cancel_table IN ACCESS EXCLUSIVE MODE;
|
|
||||||
|
|
||||||
step s1-lock:
|
|
||||||
BEGIN;
|
|
||||||
LOCK TABLE cancel_table IN ACCESS EXCLUSIVE MODE;
|
|
||||||
END;
|
|
||||||
<waiting ...>
|
|
||||||
step s2-wrong-cancel-1:
|
|
||||||
SELECT run_pg_send_cancellation(pid + 1, cancel_key) FROM cancel_table;
|
|
||||||
|
|
||||||
run_pg_send_cancellation
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
step s2-wrong-cancel-2:
|
|
||||||
SELECT run_pg_send_cancellation(pid, cancel_key + 1) FROM cancel_table;
|
|
||||||
|
|
||||||
run_pg_send_cancellation
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
step s2-cancel:
|
|
||||||
SELECT run_pg_send_cancellation(pid, cancel_key) FROM cancel_table;
|
|
||||||
END;
|
|
||||||
|
|
||||||
run_pg_send_cancellation
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
step s1-lock: <... completed>
|
|
||||||
ERROR: canceling statement due to user request
|
|
|
@ -1290,8 +1290,82 @@ SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.p
|
||||||
(schema,{test_schema_for_sequence_propagation},{})
|
(schema,{test_schema_for_sequence_propagation},{})
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
|
-- Bug: https://github.com/citusdata/citus/issues/7378
|
||||||
|
-- Create a reference table
|
||||||
|
CREATE TABLE tbl_ref_mats(row_id integer primary key);
|
||||||
|
INSERT INTO tbl_ref_mats VALUES (1), (2);
|
||||||
|
SELECT create_reference_table('tbl_ref_mats');
|
||||||
|
NOTICE: Copying data from local table...
|
||||||
|
NOTICE: copying the data has completed
|
||||||
|
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||||
|
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.tbl_ref_mats$$)
|
||||||
|
create_reference_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- Create a distributed table
|
||||||
|
CREATE TABLE tbl_dist_mats(series_id integer);
|
||||||
|
INSERT INTO tbl_dist_mats VALUES (1), (1), (2), (2);
|
||||||
|
SELECT create_distributed_table('tbl_dist_mats', 'series_id');
|
||||||
|
NOTICE: Copying data from local table...
|
||||||
|
NOTICE: copying the data has completed
|
||||||
|
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||||
|
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.tbl_dist_mats$$)
|
||||||
|
create_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- Create a view that joins the distributed table with the reference table on the distribution key.
|
||||||
|
CREATE VIEW vw_citus_views as
|
||||||
|
SELECT d.series_id FROM tbl_dist_mats d JOIN tbl_ref_mats r ON d.series_id = r.row_id;
|
||||||
|
-- The view initially works fine
|
||||||
|
SELECT * FROM vw_citus_views ORDER BY 1;
|
||||||
|
series_id
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1
|
||||||
|
1
|
||||||
|
2
|
||||||
|
2
|
||||||
|
(4 rows)
|
||||||
|
|
||||||
|
-- Now, alter the table
|
||||||
|
ALTER TABLE tbl_ref_mats ADD COLUMN category1 varchar(50);
|
||||||
|
SELECT * FROM vw_citus_views ORDER BY 1;
|
||||||
|
series_id
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1
|
||||||
|
1
|
||||||
|
2
|
||||||
|
2
|
||||||
|
(4 rows)
|
||||||
|
|
||||||
|
ALTER TABLE tbl_ref_mats ADD COLUMN category2 varchar(50);
|
||||||
|
SELECT * FROM vw_citus_views ORDER BY 1;
|
||||||
|
series_id
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1
|
||||||
|
1
|
||||||
|
2
|
||||||
|
2
|
||||||
|
(4 rows)
|
||||||
|
|
||||||
|
ALTER TABLE tbl_ref_mats DROP COLUMN category1;
|
||||||
|
SELECT * FROM vw_citus_views ORDER BY 1;
|
||||||
|
series_id
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1
|
||||||
|
1
|
||||||
|
2
|
||||||
|
2
|
||||||
|
(4 rows)
|
||||||
|
|
||||||
DROP SCHEMA test_schema_for_sequence_propagation CASCADE;
|
DROP SCHEMA test_schema_for_sequence_propagation CASCADE;
|
||||||
NOTICE: drop cascades to 2 other objects
|
NOTICE: drop cascades to 2 other objects
|
||||||
DETAIL: drop cascades to sequence test_schema_for_sequence_propagation.seq_10
|
DETAIL: drop cascades to sequence test_schema_for_sequence_propagation.seq_10
|
||||||
drop cascades to default value for column x of table table_without_sequence
|
drop cascades to default value for column x of table table_without_sequence
|
||||||
DROP TABLE table_without_sequence;
|
DROP TABLE table_without_sequence;
|
||||||
|
DROP TABLE tbl_ref_mats CASCADE;
|
||||||
|
NOTICE: drop cascades to view vw_citus_views
|
||||||
|
DROP TABLE tbl_dist_mats CASCADE;
|
||||||
|
|
|
@ -1330,12 +1330,28 @@ SELECT * FROM multi_extension.print_extension_changes();
|
||||||
| view citus_stat_tenants_local
|
| view citus_stat_tenants_local
|
||||||
(11 rows)
|
(11 rows)
|
||||||
|
|
||||||
|
-- Test downgrade to 11.3-1 from 11.3-2
|
||||||
|
ALTER EXTENSION citus UPDATE TO '11.3-2';
|
||||||
|
ALTER EXTENSION citus UPDATE TO '11.3-1';
|
||||||
|
-- Should be empty result since upgrade+downgrade should be a no-op
|
||||||
|
SELECT * FROM multi_extension.print_extension_changes();
|
||||||
|
previous_object | current_object
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
(0 rows)
|
||||||
|
|
||||||
|
-- Snapshot of state at 11.3-2
|
||||||
|
ALTER EXTENSION citus UPDATE TO '11.3-2';
|
||||||
|
SELECT * FROM multi_extension.print_extension_changes();
|
||||||
|
previous_object | current_object
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
(0 rows)
|
||||||
|
|
||||||
DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff;
|
DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff;
|
||||||
-- show running version
|
-- show running version
|
||||||
SHOW citus.version;
|
SHOW citus.version;
|
||||||
citus.version
|
citus.version
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
11.3devel
|
11.3.1
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- ensure no unexpected objects were created outside pg_catalog
|
-- ensure no unexpected objects were created outside pg_catalog
|
||||||
|
|
|
@ -650,7 +650,7 @@ NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing SET citus.enable_ddl_propagation TO 'off'
|
NOTICE: issuing SET citus.enable_ddl_propagation TO 'off'
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing CREATE EXTENSION IF NOT EXISTS citus_columnar WITH SCHEMA pg_catalog VERSION "11.2-1";
|
NOTICE: issuing CREATE EXTENSION IF NOT EXISTS citus_columnar WITH SCHEMA pg_catalog VERSION "x.y-z";
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing COMMIT
|
NOTICE: issuing COMMIT
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
|
@ -658,7 +658,7 @@ NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing SET citus.enable_ddl_propagation TO 'off'
|
NOTICE: issuing SET citus.enable_ddl_propagation TO 'off'
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing CREATE EXTENSION IF NOT EXISTS citus_columnar WITH SCHEMA pg_catalog VERSION "11.2-1";
|
NOTICE: issuing CREATE EXTENSION IF NOT EXISTS citus_columnar WITH SCHEMA pg_catalog VERSION "x.y-z";
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing COMMIT
|
NOTICE: issuing COMMIT
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
|
|
|
@ -149,6 +149,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
||||||
RESET ROLE
|
RESET ROLE
|
||||||
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
|
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
|
||||||
SELECT citus_internal_add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's')
|
SELECT citus_internal_add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's')
|
||||||
|
SELECT pg_catalog.worker_drop_sequence_dependency('public.mx_test_table');
|
||||||
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
|
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||||
SELECT pg_catalog.worker_record_sequence_dependency('public.mx_test_table_col_3_seq'::regclass,'public.mx_test_table'::regclass,'col_3')
|
SELECT pg_catalog.worker_record_sequence_dependency('public.mx_test_table_col_3_seq'::regclass,'public.mx_test_table'::regclass,'col_3')
|
||||||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
||||||
|
@ -174,7 +175,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
||||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||||
(49 rows)
|
(50 rows)
|
||||||
|
|
||||||
-- Show that CREATE INDEX commands are included in the activate node snapshot
|
-- Show that CREATE INDEX commands are included in the activate node snapshot
|
||||||
CREATE INDEX mx_index ON mx_test_table(col_2);
|
CREATE INDEX mx_index ON mx_test_table(col_2);
|
||||||
|
@ -206,6 +207,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
||||||
RESET ROLE
|
RESET ROLE
|
||||||
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
|
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
|
||||||
SELECT citus_internal_add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's')
|
SELECT citus_internal_add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's')
|
||||||
|
SELECT pg_catalog.worker_drop_sequence_dependency('public.mx_test_table');
|
||||||
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
|
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||||
SELECT pg_catalog.worker_record_sequence_dependency('public.mx_test_table_col_3_seq'::regclass,'public.mx_test_table'::regclass,'col_3')
|
SELECT pg_catalog.worker_record_sequence_dependency('public.mx_test_table_col_3_seq'::regclass,'public.mx_test_table'::regclass,'col_3')
|
||||||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
||||||
|
@ -231,7 +233,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
||||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||||
(50 rows)
|
(51 rows)
|
||||||
|
|
||||||
-- Show that schema changes are included in the activate node snapshot
|
-- Show that schema changes are included in the activate node snapshot
|
||||||
CREATE SCHEMA mx_testing_schema;
|
CREATE SCHEMA mx_testing_schema;
|
||||||
|
@ -265,6 +267,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
||||||
RESET ROLE
|
RESET ROLE
|
||||||
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
|
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
|
||||||
SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's')
|
SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's')
|
||||||
|
SELECT pg_catalog.worker_drop_sequence_dependency('mx_testing_schema.mx_test_table');
|
||||||
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
|
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||||
SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3')
|
SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3')
|
||||||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
||||||
|
@ -291,7 +294,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
||||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||||
(52 rows)
|
(53 rows)
|
||||||
|
|
||||||
-- Show that append distributed tables are not included in the activate node snapshot
|
-- Show that append distributed tables are not included in the activate node snapshot
|
||||||
CREATE TABLE non_mx_test_table (col_1 int, col_2 text);
|
CREATE TABLE non_mx_test_table (col_1 int, col_2 text);
|
||||||
|
@ -331,6 +334,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
||||||
RESET ROLE
|
RESET ROLE
|
||||||
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
|
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
|
||||||
SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's')
|
SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's')
|
||||||
|
SELECT pg_catalog.worker_drop_sequence_dependency('mx_testing_schema.mx_test_table');
|
||||||
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
|
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||||
SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3')
|
SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3')
|
||||||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
||||||
|
@ -357,7 +361,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
||||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||||
(52 rows)
|
(53 rows)
|
||||||
|
|
||||||
-- Show that range distributed tables are not included in the activate node snapshot
|
-- Show that range distributed tables are not included in the activate node snapshot
|
||||||
UPDATE pg_dist_partition SET partmethod='r' WHERE logicalrelid='non_mx_test_table'::regclass;
|
UPDATE pg_dist_partition SET partmethod='r' WHERE logicalrelid='non_mx_test_table'::regclass;
|
||||||
|
@ -390,6 +394,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
||||||
RESET ROLE
|
RESET ROLE
|
||||||
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
|
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
|
||||||
SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's')
|
SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's')
|
||||||
|
SELECT pg_catalog.worker_drop_sequence_dependency('mx_testing_schema.mx_test_table');
|
||||||
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
|
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||||
SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3')
|
SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3')
|
||||||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
||||||
|
@ -416,7 +421,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
||||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||||
(52 rows)
|
(53 rows)
|
||||||
|
|
||||||
-- Test start_metadata_sync_to_node and citus_activate_node UDFs
|
-- Test start_metadata_sync_to_node and citus_activate_node UDFs
|
||||||
-- Ensure that hasmetadata=false for all nodes
|
-- Ensure that hasmetadata=false for all nodes
|
||||||
|
@ -1943,6 +1948,12 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
||||||
SELECT citus_internal_add_partition_metadata ('public.dist_table_1'::regclass, 'h', 'a', 10010, 's')
|
SELECT citus_internal_add_partition_metadata ('public.dist_table_1'::regclass, 'h', 'a', 10010, 's')
|
||||||
SELECT citus_internal_add_partition_metadata ('public.mx_ref'::regclass, 'n', NULL, 10009, 't')
|
SELECT citus_internal_add_partition_metadata ('public.mx_ref'::regclass, 'n', NULL, 10009, 't')
|
||||||
SELECT citus_internal_add_partition_metadata ('public.test_table'::regclass, 'h', 'id', 10010, 's')
|
SELECT citus_internal_add_partition_metadata ('public.test_table'::regclass, 'h', 'id', 10010, 's')
|
||||||
|
SELECT pg_catalog.worker_drop_sequence_dependency('mx_test_schema_1.mx_table_1');
|
||||||
|
SELECT pg_catalog.worker_drop_sequence_dependency('mx_test_schema_2.mx_table_2');
|
||||||
|
SELECT pg_catalog.worker_drop_sequence_dependency('mx_testing_schema.mx_test_table');
|
||||||
|
SELECT pg_catalog.worker_drop_sequence_dependency('public.dist_table_1');
|
||||||
|
SELECT pg_catalog.worker_drop_sequence_dependency('public.mx_ref');
|
||||||
|
SELECT pg_catalog.worker_drop_sequence_dependency('public.test_table');
|
||||||
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
|
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||||
SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3')
|
SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3')
|
||||||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
||||||
|
@ -1997,7 +2008,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
||||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.dist_table_1'::regclass, 1310074, 't'::"char", '-2147483648', '-1073741825'), ('public.dist_table_1'::regclass, 1310075, 't'::"char", '-1073741824', '-1'), ('public.dist_table_1'::regclass, 1310076, 't'::"char", '0', '1073741823'), ('public.dist_table_1'::regclass, 1310077, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.dist_table_1'::regclass, 1310074, 't'::"char", '-2147483648', '-1073741825'), ('public.dist_table_1'::regclass, 1310075, 't'::"char", '-1073741824', '-1'), ('public.dist_table_1'::regclass, 1310076, 't'::"char", '0', '1073741823'), ('public.dist_table_1'::regclass, 1310077, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_ref'::regclass, 1310073, 't'::"char", NULL, NULL)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_ref'::regclass, 1310073, 't'::"char", NULL, NULL)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.test_table'::regclass, 1310083, 't'::"char", '-2147483648', '-1073741825'), ('public.test_table'::regclass, 1310084, 't'::"char", '-1073741824', '-1'), ('public.test_table'::regclass, 1310085, 't'::"char", '0', '1073741823'), ('public.test_table'::regclass, 1310086, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.test_table'::regclass, 1310083, 't'::"char", '-2147483648', '-1073741825'), ('public.test_table'::regclass, 1310084, 't'::"char", '-1073741824', '-1'), ('public.test_table'::regclass, 1310085, 't'::"char", '0', '1073741823'), ('public.test_table'::regclass, 1310086, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||||
(111 rows)
|
(117 rows)
|
||||||
|
|
||||||
-- shouldn't work since test_table is MX
|
-- shouldn't work since test_table is MX
|
||||||
ALTER TABLE test_table ADD COLUMN id3 bigserial;
|
ALTER TABLE test_table ADD COLUMN id3 bigserial;
|
||||||
|
|
|
@ -149,6 +149,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
||||||
RESET ROLE
|
RESET ROLE
|
||||||
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
|
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
|
||||||
SELECT citus_internal_add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's')
|
SELECT citus_internal_add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's')
|
||||||
|
SELECT pg_catalog.worker_drop_sequence_dependency('public.mx_test_table');
|
||||||
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
|
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||||
SELECT pg_catalog.worker_record_sequence_dependency('public.mx_test_table_col_3_seq'::regclass,'public.mx_test_table'::regclass,'col_3')
|
SELECT pg_catalog.worker_record_sequence_dependency('public.mx_test_table_col_3_seq'::regclass,'public.mx_test_table'::regclass,'col_3')
|
||||||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
||||||
|
@ -174,7 +175,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
||||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||||
(49 rows)
|
(50 rows)
|
||||||
|
|
||||||
-- Show that CREATE INDEX commands are included in the activate node snapshot
|
-- Show that CREATE INDEX commands are included in the activate node snapshot
|
||||||
CREATE INDEX mx_index ON mx_test_table(col_2);
|
CREATE INDEX mx_index ON mx_test_table(col_2);
|
||||||
|
@ -206,6 +207,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
||||||
RESET ROLE
|
RESET ROLE
|
||||||
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
|
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
|
||||||
SELECT citus_internal_add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's')
|
SELECT citus_internal_add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's')
|
||||||
|
SELECT pg_catalog.worker_drop_sequence_dependency('public.mx_test_table');
|
||||||
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
|
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||||
SELECT pg_catalog.worker_record_sequence_dependency('public.mx_test_table_col_3_seq'::regclass,'public.mx_test_table'::regclass,'col_3')
|
SELECT pg_catalog.worker_record_sequence_dependency('public.mx_test_table_col_3_seq'::regclass,'public.mx_test_table'::regclass,'col_3')
|
||||||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
||||||
|
@ -231,7 +233,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
||||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||||
(50 rows)
|
(51 rows)
|
||||||
|
|
||||||
-- Show that schema changes are included in the activate node snapshot
|
-- Show that schema changes are included in the activate node snapshot
|
||||||
CREATE SCHEMA mx_testing_schema;
|
CREATE SCHEMA mx_testing_schema;
|
||||||
|
@ -265,6 +267,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
||||||
RESET ROLE
|
RESET ROLE
|
||||||
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
|
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
|
||||||
SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's')
|
SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's')
|
||||||
|
SELECT pg_catalog.worker_drop_sequence_dependency('mx_testing_schema.mx_test_table');
|
||||||
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
|
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||||
SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3')
|
SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3')
|
||||||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
||||||
|
@ -291,7 +294,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
||||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||||
(52 rows)
|
(53 rows)
|
||||||
|
|
||||||
-- Show that append distributed tables are not included in the activate node snapshot
|
-- Show that append distributed tables are not included in the activate node snapshot
|
||||||
CREATE TABLE non_mx_test_table (col_1 int, col_2 text);
|
CREATE TABLE non_mx_test_table (col_1 int, col_2 text);
|
||||||
|
@ -331,6 +334,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
||||||
RESET ROLE
|
RESET ROLE
|
||||||
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
|
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
|
||||||
SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's')
|
SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's')
|
||||||
|
SELECT pg_catalog.worker_drop_sequence_dependency('mx_testing_schema.mx_test_table');
|
||||||
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
|
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||||
SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3')
|
SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3')
|
||||||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
||||||
|
@ -357,7 +361,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
||||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||||
(52 rows)
|
(53 rows)
|
||||||
|
|
||||||
-- Show that range distributed tables are not included in the activate node snapshot
|
-- Show that range distributed tables are not included in the activate node snapshot
|
||||||
UPDATE pg_dist_partition SET partmethod='r' WHERE logicalrelid='non_mx_test_table'::regclass;
|
UPDATE pg_dist_partition SET partmethod='r' WHERE logicalrelid='non_mx_test_table'::regclass;
|
||||||
|
@ -390,6 +394,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
||||||
RESET ROLE
|
RESET ROLE
|
||||||
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
|
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
|
||||||
SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's')
|
SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's')
|
||||||
|
SELECT pg_catalog.worker_drop_sequence_dependency('mx_testing_schema.mx_test_table');
|
||||||
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
|
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||||
SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3')
|
SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3')
|
||||||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
||||||
|
@ -416,7 +421,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
||||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||||
(52 rows)
|
(53 rows)
|
||||||
|
|
||||||
-- Test start_metadata_sync_to_node and citus_activate_node UDFs
|
-- Test start_metadata_sync_to_node and citus_activate_node UDFs
|
||||||
-- Ensure that hasmetadata=false for all nodes
|
-- Ensure that hasmetadata=false for all nodes
|
||||||
|
@ -1943,6 +1948,12 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
||||||
SELECT citus_internal_add_partition_metadata ('public.dist_table_1'::regclass, 'h', 'a', 10010, 's')
|
SELECT citus_internal_add_partition_metadata ('public.dist_table_1'::regclass, 'h', 'a', 10010, 's')
|
||||||
SELECT citus_internal_add_partition_metadata ('public.mx_ref'::regclass, 'n', NULL, 10009, 't')
|
SELECT citus_internal_add_partition_metadata ('public.mx_ref'::regclass, 'n', NULL, 10009, 't')
|
||||||
SELECT citus_internal_add_partition_metadata ('public.test_table'::regclass, 'h', 'id', 10010, 's')
|
SELECT citus_internal_add_partition_metadata ('public.test_table'::regclass, 'h', 'id', 10010, 's')
|
||||||
|
SELECT pg_catalog.worker_drop_sequence_dependency('mx_test_schema_1.mx_table_1');
|
||||||
|
SELECT pg_catalog.worker_drop_sequence_dependency('mx_test_schema_2.mx_table_2');
|
||||||
|
SELECT pg_catalog.worker_drop_sequence_dependency('mx_testing_schema.mx_test_table');
|
||||||
|
SELECT pg_catalog.worker_drop_sequence_dependency('public.dist_table_1');
|
||||||
|
SELECT pg_catalog.worker_drop_sequence_dependency('public.mx_ref');
|
||||||
|
SELECT pg_catalog.worker_drop_sequence_dependency('public.test_table');
|
||||||
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
|
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||||
SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3')
|
SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3')
|
||||||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
||||||
|
@ -1997,7 +2008,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
||||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.dist_table_1'::regclass, 1310074, 't'::"char", '-2147483648', '-1073741825'), ('public.dist_table_1'::regclass, 1310075, 't'::"char", '-1073741824', '-1'), ('public.dist_table_1'::regclass, 1310076, 't'::"char", '0', '1073741823'), ('public.dist_table_1'::regclass, 1310077, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.dist_table_1'::regclass, 1310074, 't'::"char", '-2147483648', '-1073741825'), ('public.dist_table_1'::regclass, 1310075, 't'::"char", '-1073741824', '-1'), ('public.dist_table_1'::regclass, 1310076, 't'::"char", '0', '1073741823'), ('public.dist_table_1'::regclass, 1310077, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_ref'::regclass, 1310073, 't'::"char", NULL, NULL)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_ref'::regclass, 1310073, 't'::"char", NULL, NULL)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.test_table'::regclass, 1310083, 't'::"char", '-2147483648', '-1073741825'), ('public.test_table'::regclass, 1310084, 't'::"char", '-1073741824', '-1'), ('public.test_table'::regclass, 1310085, 't'::"char", '0', '1073741823'), ('public.test_table'::regclass, 1310086, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.test_table'::regclass, 1310083, 't'::"char", '-2147483648', '-1073741825'), ('public.test_table'::regclass, 1310084, 't'::"char", '-1073741824', '-1'), ('public.test_table'::regclass, 1310085, 't'::"char", '0', '1073741823'), ('public.test_table'::regclass, 1310086, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||||
(111 rows)
|
(117 rows)
|
||||||
|
|
||||||
-- shouldn't work since test_table is MX
|
-- shouldn't work since test_table is MX
|
||||||
ALTER TABLE test_table ADD COLUMN id3 bigserial;
|
ALTER TABLE test_table ADD COLUMN id3 bigserial;
|
||||||
|
|
|
@ -1095,6 +1095,9 @@ ALTER TABLE IF EXISTS non_existent_table SET SCHEMA non_existent_schema;
|
||||||
NOTICE: relation "non_existent_table" does not exist, skipping
|
NOTICE: relation "non_existent_table" does not exist, skipping
|
||||||
DROP SCHEMA existing_schema, another_existing_schema CASCADE;
|
DROP SCHEMA existing_schema, another_existing_schema CASCADE;
|
||||||
NOTICE: drop cascades to table existing_schema.table_set_schema
|
NOTICE: drop cascades to table existing_schema.table_set_schema
|
||||||
|
-- test DROP SCHEMA with nonexisting schemas
|
||||||
|
DROP SCHEMA ax, bx, cx, dx, ex, fx, gx, jx;
|
||||||
|
ERROR: schema "ax" does not exist
|
||||||
-- test ALTER TABLE SET SCHEMA with interesting names
|
-- test ALTER TABLE SET SCHEMA with interesting names
|
||||||
CREATE SCHEMA "cItuS.T E E N'sSchema";
|
CREATE SCHEMA "cItuS.T E E N'sSchema";
|
||||||
CREATE SCHEMA "citus-teen's scnd schm.";
|
CREATE SCHEMA "citus-teen's scnd schm.";
|
||||||
|
@ -1361,6 +1364,7 @@ SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM pg_catalog.p
|
||||||
(schema,{run_test_schema},{})
|
(schema,{run_test_schema},{})
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
|
DROP TABLE public.nation_local;
|
||||||
DROP SCHEMA run_test_schema, test_schema_support_join_1, test_schema_support_join_2, "Citus'Teen123", "CiTUS.TEEN2", bar, test_schema_support CASCADE;
|
DROP SCHEMA run_test_schema, test_schema_support_join_1, test_schema_support_join_2, "Citus'Teen123", "CiTUS.TEEN2", bar, test_schema_support CASCADE;
|
||||||
-- verify that the dropped schema is removed from worker's pg_dist_object
|
-- verify that the dropped schema is removed from worker's pg_dist_object
|
||||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM pg_catalog.pg_dist_object
|
SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM pg_catalog.pg_dist_object
|
||||||
|
|
|
@ -254,6 +254,76 @@ FETCH FORWARD 3 FROM holdCursor;
|
||||||
1 | 19
|
1 | 19
|
||||||
(3 rows)
|
(3 rows)
|
||||||
|
|
||||||
|
CLOSE holdCursor;
|
||||||
|
-- Test DECLARE CURSOR .. WITH HOLD inside transaction block
|
||||||
|
BEGIN;
|
||||||
|
DECLARE holdCursor CURSOR WITH HOLD FOR
|
||||||
|
SELECT * FROM cursor_me WHERE x = 1 ORDER BY y;
|
||||||
|
FETCH 3 FROM holdCursor;
|
||||||
|
x | y
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1 | 10
|
||||||
|
1 | 11
|
||||||
|
1 | 12
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
FETCH BACKWARD 3 FROM holdCursor;
|
||||||
|
x | y
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1 | 11
|
||||||
|
1 | 10
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
|
FETCH FORWARD 3 FROM holdCursor;
|
||||||
|
x | y
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1 | 10
|
||||||
|
1 | 11
|
||||||
|
1 | 12
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
COMMIT;
|
||||||
|
FETCH 3 FROM holdCursor;
|
||||||
|
x | y
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1 | 13
|
||||||
|
1 | 14
|
||||||
|
1 | 15
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
CLOSE holdCursor;
|
||||||
|
-- Test DECLARE NO SCROLL CURSOR .. WITH HOLD inside transaction block
|
||||||
|
BEGIN;
|
||||||
|
DECLARE holdCursor NO SCROLL CURSOR WITH HOLD FOR
|
||||||
|
SELECT * FROM cursor_me WHERE x = 1 ORDER BY y;
|
||||||
|
FETCH 3 FROM holdCursor;
|
||||||
|
x | y
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1 | 10
|
||||||
|
1 | 11
|
||||||
|
1 | 12
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
FETCH FORWARD 3 FROM holdCursor;
|
||||||
|
x | y
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1 | 13
|
||||||
|
1 | 14
|
||||||
|
1 | 15
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
COMMIT;
|
||||||
|
FETCH 3 FROM holdCursor;
|
||||||
|
x | y
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1 | 16
|
||||||
|
1 | 17
|
||||||
|
1 | 18
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
FETCH BACKWARD 3 FROM holdCursor;
|
||||||
|
ERROR: cursor can only scan forward
|
||||||
|
HINT: Declare it with SCROLL option to enable backward scan.
|
||||||
CLOSE holdCursor;
|
CLOSE holdCursor;
|
||||||
-- Test DECLARE CURSOR .. WITH HOLD with parameter
|
-- Test DECLARE CURSOR .. WITH HOLD with parameter
|
||||||
CREATE OR REPLACE FUNCTION declares_cursor(p int)
|
CREATE OR REPLACE FUNCTION declares_cursor(p int)
|
||||||
|
|
|
@ -20,13 +20,14 @@ SELECT create_distributed_table('dist_table_test', 'a');
|
||||||
CREATE TABLE postgres_table_test(a int primary key);
|
CREATE TABLE postgres_table_test(a int primary key);
|
||||||
-- make sure that all rebalance operations works fine when
|
-- make sure that all rebalance operations works fine when
|
||||||
-- reference tables are replicated to the coordinator
|
-- reference tables are replicated to the coordinator
|
||||||
|
SET client_min_messages TO ERROR;
|
||||||
SELECT 1 FROM master_add_node('localhost', :master_port, groupId=>0);
|
SELECT 1 FROM master_add_node('localhost', :master_port, groupId=>0);
|
||||||
NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata
|
|
||||||
?column?
|
?column?
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
1
|
1
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
|
RESET client_min_messages;
|
||||||
-- should just be noops even if we add the coordinator to the pg_dist_node
|
-- should just be noops even if we add the coordinator to the pg_dist_node
|
||||||
SELECT rebalance_table_shards('dist_table_test');
|
SELECT rebalance_table_shards('dist_table_test');
|
||||||
rebalance_table_shards
|
rebalance_table_shards
|
||||||
|
@ -221,7 +222,7 @@ NOTICE: issuing SET LOCAL citus.shard_count TO '4';
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing SET LOCAL citus.shard_replication_factor TO '2';
|
NOTICE: issuing SET LOCAL citus.shard_replication_factor TO '2';
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing SELECT citus_copy_shard_placement(43xxxx,xx,xx,'block_writes')
|
NOTICE: issuing SELECT pg_catalog.citus_copy_shard_placement(43xxxx,xx,xx,'block_writes')
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing COMMIT
|
NOTICE: issuing COMMIT
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
|
@ -244,7 +245,7 @@ NOTICE: issuing SET LOCAL citus.shard_count TO '4';
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing SET LOCAL citus.shard_replication_factor TO '2';
|
NOTICE: issuing SET LOCAL citus.shard_replication_factor TO '2';
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing SELECT citus_copy_shard_placement(43xxxx,xx,xx,'block_writes')
|
NOTICE: issuing SELECT pg_catalog.citus_copy_shard_placement(43xxxx,xx,xx,'block_writes')
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing COMMIT
|
NOTICE: issuing COMMIT
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
|
@ -267,7 +268,7 @@ NOTICE: issuing SET LOCAL citus.shard_count TO '4';
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing SET LOCAL citus.shard_replication_factor TO '2';
|
NOTICE: issuing SET LOCAL citus.shard_replication_factor TO '2';
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing SELECT citus_copy_shard_placement(43xxxx,xx,xx,'block_writes')
|
NOTICE: issuing SELECT pg_catalog.citus_copy_shard_placement(43xxxx,xx,xx,'block_writes')
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing COMMIT
|
NOTICE: issuing COMMIT
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
|
@ -290,7 +291,7 @@ NOTICE: issuing SET LOCAL citus.shard_count TO '4';
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing SET LOCAL citus.shard_replication_factor TO '2';
|
NOTICE: issuing SET LOCAL citus.shard_replication_factor TO '2';
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing SELECT citus_copy_shard_placement(43xxxx,xx,xx,'block_writes')
|
NOTICE: issuing SELECT pg_catalog.citus_copy_shard_placement(43xxxx,xx,xx,'block_writes')
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing COMMIT
|
NOTICE: issuing COMMIT
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
|
@ -2713,6 +2714,113 @@ SELECT sh.logicalrelid, pl.nodeport
|
||||||
(5 rows)
|
(5 rows)
|
||||||
|
|
||||||
DROP TABLE single_shard_colocation_1a, single_shard_colocation_1b, single_shard_colocation_1c, single_shard_colocation_2a, single_shard_colocation_2b CASCADE;
|
DROP TABLE single_shard_colocation_1a, single_shard_colocation_1b, single_shard_colocation_1c, single_shard_colocation_2a, single_shard_colocation_2b CASCADE;
|
||||||
|
-- test the same with coordinator shouldhaveshards = false and shard_count = 2
|
||||||
|
-- so that the shard allowed node count would be 2 when rebalancing
|
||||||
|
-- for such cases, we only count the nodes that are allowed for shard placements
|
||||||
|
UPDATE pg_dist_node SET shouldhaveshards=false WHERE nodeport = :master_port;
|
||||||
|
create table two_shard_colocation_1a (a int primary key);
|
||||||
|
create table two_shard_colocation_1b (a int primary key);
|
||||||
|
SET citus.shard_replication_factor = 1;
|
||||||
|
select create_distributed_table('two_shard_colocation_1a','a', colocate_with => 'none', shard_count => 2);
|
||||||
|
create_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
select create_distributed_table('two_shard_colocation_1b','a',colocate_with=>'two_shard_colocation_1a');
|
||||||
|
create_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
create table two_shard_colocation_2a (a int primary key);
|
||||||
|
create table two_shard_colocation_2b (a int primary key);
|
||||||
|
select create_distributed_table('two_shard_colocation_2a','a', colocate_with => 'none', shard_count => 2);
|
||||||
|
create_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
select create_distributed_table('two_shard_colocation_2b','a',colocate_with=>'two_shard_colocation_2a');
|
||||||
|
create_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- move shards of colocation group 1 to worker1
|
||||||
|
SELECT citus_move_shard_placement(sh.shardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port)
|
||||||
|
FROM pg_dist_shard sh JOIN pg_dist_shard_placement pl ON sh.shardid = pl.shardid
|
||||||
|
WHERE sh.logicalrelid = 'two_shard_colocation_1a'::regclass
|
||||||
|
AND pl.nodeport = :worker_2_port
|
||||||
|
LIMIT 1;
|
||||||
|
citus_move_shard_placement
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- move shards of colocation group 2 to worker2
|
||||||
|
SELECT citus_move_shard_placement(sh.shardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port)
|
||||||
|
FROM pg_dist_shard sh JOIN pg_dist_shard_placement pl ON sh.shardid = pl.shardid
|
||||||
|
WHERE sh.logicalrelid = 'two_shard_colocation_2a'::regclass
|
||||||
|
AND pl.nodeport = :worker_1_port
|
||||||
|
LIMIT 1;
|
||||||
|
citus_move_shard_placement
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- current state:
|
||||||
|
-- coordinator: []
|
||||||
|
-- worker 1: [1_1, 1_2]
|
||||||
|
-- worker 2: [2_1, 2_2]
|
||||||
|
SELECT sh.logicalrelid, pl.nodeport
|
||||||
|
FROM pg_dist_shard sh JOIN pg_dist_shard_placement pl ON sh.shardid = pl.shardid
|
||||||
|
WHERE sh.logicalrelid::text IN ('two_shard_colocation_1a', 'two_shard_colocation_1b', 'two_shard_colocation_2a', 'two_shard_colocation_2b')
|
||||||
|
ORDER BY sh.logicalrelid, pl.nodeport;
|
||||||
|
logicalrelid | nodeport
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
two_shard_colocation_1a | 57637
|
||||||
|
two_shard_colocation_1a | 57637
|
||||||
|
two_shard_colocation_1b | 57637
|
||||||
|
two_shard_colocation_1b | 57637
|
||||||
|
two_shard_colocation_2a | 57638
|
||||||
|
two_shard_colocation_2a | 57638
|
||||||
|
two_shard_colocation_2b | 57638
|
||||||
|
two_shard_colocation_2b | 57638
|
||||||
|
(8 rows)
|
||||||
|
|
||||||
|
-- If we take the coordinator into account, the rebalancer considers this as balanced and does nothing (shard_count < worker_count)
|
||||||
|
-- but because the coordinator is not allowed for shards, rebalancer will distribute each colocation group to both workers
|
||||||
|
select rebalance_table_shards(shard_transfer_mode:='block_writes');
|
||||||
|
NOTICE: Moving shard xxxxx from localhost:xxxxx to localhost:xxxxx ...
|
||||||
|
NOTICE: Moving shard xxxxx from localhost:xxxxx to localhost:xxxxx ...
|
||||||
|
rebalance_table_shards
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- final state:
|
||||||
|
-- coordinator: []
|
||||||
|
-- worker 1: [1_1, 2_1]
|
||||||
|
-- worker 2: [1_2, 2_2]
|
||||||
|
SELECT sh.logicalrelid, pl.nodeport
|
||||||
|
FROM pg_dist_shard sh JOIN pg_dist_shard_placement pl ON sh.shardid = pl.shardid
|
||||||
|
WHERE sh.logicalrelid::text IN ('two_shard_colocation_1a', 'two_shard_colocation_1b', 'two_shard_colocation_2a', 'two_shard_colocation_2b')
|
||||||
|
ORDER BY sh.logicalrelid, pl.nodeport;
|
||||||
|
logicalrelid | nodeport
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
two_shard_colocation_1a | 57637
|
||||||
|
two_shard_colocation_1a | 57638
|
||||||
|
two_shard_colocation_1b | 57637
|
||||||
|
two_shard_colocation_1b | 57638
|
||||||
|
two_shard_colocation_2a | 57637
|
||||||
|
two_shard_colocation_2a | 57638
|
||||||
|
two_shard_colocation_2b | 57637
|
||||||
|
two_shard_colocation_2b | 57638
|
||||||
|
(8 rows)
|
||||||
|
|
||||||
|
-- cleanup
|
||||||
|
DROP TABLE two_shard_colocation_1a, two_shard_colocation_1b, two_shard_colocation_2a, two_shard_colocation_2b CASCADE;
|
||||||
-- verify we detect if one of the tables do not have a replica identity or primary key
|
-- verify we detect if one of the tables do not have a replica identity or primary key
|
||||||
-- and error out in case of shard transfer mode = auto
|
-- and error out in case of shard transfer mode = auto
|
||||||
SELECT 1 FROM citus_remove_node('localhost', :worker_2_port);
|
SELECT 1 FROM citus_remove_node('localhost', :worker_2_port);
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
test: multi_test_helpers multi_test_helpers_superuser
|
test: multi_test_helpers multi_test_helpers_superuser
|
||||||
test: multi_cluster_management
|
test: multi_cluster_management
|
||||||
test: multi_test_catalog_views
|
test: multi_test_catalog_views
|
||||||
|
test: worker_copy_table_to_node
|
||||||
test: shard_rebalancer_unit
|
test: shard_rebalancer_unit
|
||||||
test: shard_rebalancer
|
test: shard_rebalancer
|
||||||
test: background_rebalance
|
test: background_rebalance
|
||||||
test: worker_copy_table_to_node
|
|
||||||
test: background_rebalance_parallel
|
test: background_rebalance_parallel
|
||||||
test: foreign_key_to_reference_shard_rebalance
|
test: foreign_key_to_reference_shard_rebalance
|
||||||
test: multi_move_mx
|
test: multi_move_mx
|
||||||
|
|
|
@ -1,65 +0,0 @@
|
||||||
setup
|
|
||||||
{
|
|
||||||
CREATE FUNCTION run_pg_send_cancellation(int,int)
|
|
||||||
RETURNS void
|
|
||||||
AS 'citus'
|
|
||||||
LANGUAGE C STRICT;
|
|
||||||
|
|
||||||
CREATE FUNCTION get_cancellation_key()
|
|
||||||
RETURNS int
|
|
||||||
AS 'citus'
|
|
||||||
LANGUAGE C STRICT;
|
|
||||||
|
|
||||||
CREATE TABLE cancel_table (pid int, cancel_key int);
|
|
||||||
}
|
|
||||||
|
|
||||||
teardown
|
|
||||||
{
|
|
||||||
DROP TABLE IF EXISTS cancel_table;
|
|
||||||
}
|
|
||||||
|
|
||||||
session "s1"
|
|
||||||
|
|
||||||
/* store the PID and cancellation key of session 1 */
|
|
||||||
step "s1-register"
|
|
||||||
{
|
|
||||||
INSERT INTO cancel_table VALUES (pg_backend_pid(), get_cancellation_key());
|
|
||||||
}
|
|
||||||
|
|
||||||
/* lock the table from session 1, will block and get cancelled */
|
|
||||||
step "s1-lock"
|
|
||||||
{
|
|
||||||
BEGIN;
|
|
||||||
LOCK TABLE cancel_table IN ACCESS EXCLUSIVE MODE;
|
|
||||||
END;
|
|
||||||
}
|
|
||||||
|
|
||||||
session "s2"
|
|
||||||
|
|
||||||
/* lock the table from session 2 to block session 1 */
|
|
||||||
step "s2-lock"
|
|
||||||
{
|
|
||||||
BEGIN;
|
|
||||||
LOCK TABLE cancel_table IN ACCESS EXCLUSIVE MODE;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* PID mismatch */
|
|
||||||
step "s2-wrong-cancel-1"
|
|
||||||
{
|
|
||||||
SELECT run_pg_send_cancellation(pid + 1, cancel_key) FROM cancel_table;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* cancellation key mismatch */
|
|
||||||
step "s2-wrong-cancel-2"
|
|
||||||
{
|
|
||||||
SELECT run_pg_send_cancellation(pid, cancel_key + 1) FROM cancel_table;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* cancel the LOCK statement in session 1 */
|
|
||||||
step "s2-cancel"
|
|
||||||
{
|
|
||||||
SELECT run_pg_send_cancellation(pid, cancel_key) FROM cancel_table;
|
|
||||||
END;
|
|
||||||
}
|
|
||||||
|
|
||||||
permutation "s1-register" "s2-lock" "s1-lock" "s2-wrong-cancel-1" "s2-wrong-cancel-2" "s2-cancel"
|
|
|
@ -110,6 +110,27 @@ SELECT public.wait_for_resource_cleanup();
|
||||||
SELECT 1 FROM citus_remove_node('localhost', :master_port);
|
SELECT 1 FROM citus_remove_node('localhost', :master_port);
|
||||||
SELECT public.wait_until_metadata_sync(30000);
|
SELECT public.wait_until_metadata_sync(30000);
|
||||||
|
|
||||||
|
-- make sure a non-super user can rebalance when there are reference tables to replicate
|
||||||
|
CREATE TABLE ref_table(a int primary key);
|
||||||
|
SELECT create_reference_table('ref_table');
|
||||||
|
|
||||||
|
-- add a new node to trigger replicate_reference_tables task
|
||||||
|
SELECT 1 FROM citus_add_node('localhost', :worker_3_port);
|
||||||
|
|
||||||
|
SET ROLE non_super_user_rebalance;
|
||||||
|
SELECT 1 FROM citus_rebalance_start(shard_transfer_mode := 'force_logical');
|
||||||
|
|
||||||
|
-- wait for success
|
||||||
|
SELECT citus_rebalance_wait();
|
||||||
|
SELECT state, details from citus_rebalance_status();
|
||||||
|
|
||||||
|
RESET ROLE;
|
||||||
|
|
||||||
|
-- reset the the number of nodes by removing the previously added node
|
||||||
|
SELECT 1 FROM citus_drain_node('localhost', :worker_3_port);
|
||||||
|
CALL citus_cleanup_orphaned_resources();
|
||||||
|
SELECT 1 FROM citus_remove_node('localhost', :worker_3_port);
|
||||||
|
|
||||||
SET client_min_messages TO WARNING;
|
SET client_min_messages TO WARNING;
|
||||||
DROP SCHEMA background_rebalance CASCADE;
|
DROP SCHEMA background_rebalance CASCADE;
|
||||||
DROP USER non_super_user_rebalance;
|
DROP USER non_super_user_rebalance;
|
||||||
|
|
|
@ -204,11 +204,14 @@ SELECT citus_rebalance_start AS job_id from citus_rebalance_start() \gset
|
||||||
-- see dependent tasks to understand which tasks remain runnable because of
|
-- see dependent tasks to understand which tasks remain runnable because of
|
||||||
-- citus.max_background_task_executors_per_node
|
-- citus.max_background_task_executors_per_node
|
||||||
-- and which tasks are actually blocked from colocation group dependencies
|
-- and which tasks are actually blocked from colocation group dependencies
|
||||||
SELECT D.task_id,
|
SELECT (SELECT T.command FROM pg_dist_background_task T WHERE T.task_id = D.task_id),
|
||||||
(SELECT T.command FROM pg_dist_background_task T WHERE T.task_id = D.task_id),
|
(SELECT T.command depends_on_command FROM pg_dist_background_task T WHERE T.task_id = D.depends_on)
|
||||||
D.depends_on,
|
FROM pg_dist_background_task_depend D WHERE job_id in (:job_id) ORDER BY 1, 2 ASC;
|
||||||
(SELECT T.command FROM pg_dist_background_task T WHERE T.task_id = D.depends_on)
|
|
||||||
FROM pg_dist_background_task_depend D WHERE job_id in (:job_id) ORDER BY D.task_id, D.depends_on ASC;
|
SELECT task_id, depends_on
|
||||||
|
FROM pg_dist_background_task_depend
|
||||||
|
WHERE job_id in (:job_id)
|
||||||
|
ORDER BY 1, 2 ASC;
|
||||||
|
|
||||||
-- default citus.max_background_task_executors_per_node is 1
|
-- default citus.max_background_task_executors_per_node is 1
|
||||||
-- show that first exactly one task per node is running
|
-- show that first exactly one task per node is running
|
||||||
|
|
|
@ -35,7 +35,10 @@ UPDATE dist_tbl SET b = a + 1 WHERE a = 3;
|
||||||
UPDATE dist_tbl SET b = a + 1 WHERE a = 4;
|
UPDATE dist_tbl SET b = a + 1 WHERE a = 4;
|
||||||
DELETE FROM dist_tbl WHERE a = 5;
|
DELETE FROM dist_tbl WHERE a = 5;
|
||||||
|
|
||||||
SELECT tenant_attribute, read_count_in_this_period, read_count_in_last_period, query_count_in_this_period, query_count_in_last_period FROM citus_stat_tenants(true) ORDER BY tenant_attribute;
|
SELECT tenant_attribute, read_count_in_this_period, read_count_in_last_period, query_count_in_this_period, query_count_in_last_period,
|
||||||
|
(cpu_usage_in_this_period>0) AS cpu_is_used_in_this_period, (cpu_usage_in_last_period>0) AS cpu_is_used_in_last_period
|
||||||
|
FROM citus_stat_tenants(true)
|
||||||
|
ORDER BY tenant_attribute;
|
||||||
|
|
||||||
SELECT citus_stat_tenants_reset();
|
SELECT citus_stat_tenants_reset();
|
||||||
|
|
||||||
|
@ -84,13 +87,26 @@ SELECT count(*)>=0 FROM dist_tbl WHERE a = 1;
|
||||||
INSERT INTO dist_tbl VALUES (5, 'abcd');
|
INSERT INTO dist_tbl VALUES (5, 'abcd');
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
SELECT tenant_attribute, read_count_in_this_period, read_count_in_last_period, query_count_in_this_period, query_count_in_last_period FROM citus_stat_tenants_local ORDER BY tenant_attribute;
|
SELECT tenant_attribute, read_count_in_this_period, read_count_in_last_period, query_count_in_this_period, query_count_in_last_period,
|
||||||
|
(cpu_usage_in_this_period>0) AS cpu_is_used_in_this_period, (cpu_usage_in_last_period>0) AS cpu_is_used_in_last_period
|
||||||
|
FROM citus_stat_tenants_local
|
||||||
|
ORDER BY tenant_attribute;
|
||||||
|
|
||||||
-- simulate passing the period
|
-- simulate passing the period
|
||||||
SET citus.stat_tenants_period TO 2;
|
SET citus.stat_tenants_period TO 5;
|
||||||
SELECT sleep_until_next_period();
|
SELECT sleep_until_next_period();
|
||||||
|
|
||||||
SELECT tenant_attribute, read_count_in_this_period, read_count_in_last_period, query_count_in_this_period, query_count_in_last_period FROM citus_stat_tenants_local ORDER BY tenant_attribute;
|
SELECT tenant_attribute, read_count_in_this_period, read_count_in_last_period, query_count_in_this_period, query_count_in_last_period,
|
||||||
|
(cpu_usage_in_this_period>0) AS cpu_is_used_in_this_period, (cpu_usage_in_last_period>0) AS cpu_is_used_in_last_period
|
||||||
|
FROM citus_stat_tenants_local
|
||||||
|
ORDER BY tenant_attribute;
|
||||||
|
|
||||||
|
SELECT sleep_until_next_period();
|
||||||
|
|
||||||
|
SELECT tenant_attribute, read_count_in_this_period, read_count_in_last_period, query_count_in_this_period, query_count_in_last_period,
|
||||||
|
(cpu_usage_in_this_period>0) AS cpu_is_used_in_this_period, (cpu_usage_in_last_period>0) AS cpu_is_used_in_last_period
|
||||||
|
FROM citus_stat_tenants_local
|
||||||
|
ORDER BY tenant_attribute;
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
SET search_path TO citus_stat_tenants;
|
SET search_path TO citus_stat_tenants;
|
||||||
|
@ -158,6 +174,11 @@ SELECT count(*)>=0 FROM dist_tbl_text WHERE a = '/bcde';
|
||||||
SELECT count(*)>=0 FROM dist_tbl_text WHERE a = U&'\0061\0308bc';
|
SELECT count(*)>=0 FROM dist_tbl_text WHERE a = U&'\0061\0308bc';
|
||||||
SELECT count(*)>=0 FROM dist_tbl_text WHERE a = 'bcde*';
|
SELECT count(*)>=0 FROM dist_tbl_text WHERE a = 'bcde*';
|
||||||
|
|
||||||
|
DELETE FROM dist_tbl_text WHERE a = '/b*c/de';
|
||||||
|
DELETE FROM dist_tbl_text WHERE a = '/bcde';
|
||||||
|
DELETE FROM dist_tbl_text WHERE a = U&'\0061\0308bc';
|
||||||
|
DELETE FROM dist_tbl_text WHERE a = 'bcde*';
|
||||||
|
|
||||||
SELECT tenant_attribute, read_count_in_this_period, read_count_in_last_period, query_count_in_this_period, query_count_in_last_period FROM citus_stat_tenants_local ORDER BY tenant_attribute;
|
SELECT tenant_attribute, read_count_in_this_period, read_count_in_last_period, query_count_in_this_period, query_count_in_last_period FROM citus_stat_tenants_local ORDER BY tenant_attribute;
|
||||||
|
|
||||||
-- test local cached queries & prepared statements
|
-- test local cached queries & prepared statements
|
||||||
|
@ -231,5 +252,64 @@ SELECT count(*)>=0 FROM citus_stat_tenants_local();
|
||||||
RESET ROLE;
|
RESET ROLE;
|
||||||
DROP ROLE stats_non_superuser;
|
DROP ROLE stats_non_superuser;
|
||||||
|
|
||||||
|
-- test function push down
|
||||||
|
CREATE OR REPLACE FUNCTION
|
||||||
|
select_from_dist_tbl_text(p_keyword text)
|
||||||
|
RETURNS boolean LANGUAGE plpgsql AS $fn$
|
||||||
|
BEGIN
|
||||||
|
RETURN(SELECT count(*)>=0 FROM citus_stat_tenants.dist_tbl_text WHERE a = $1);
|
||||||
|
END;
|
||||||
|
$fn$;
|
||||||
|
|
||||||
|
SELECT create_distributed_function(
|
||||||
|
'select_from_dist_tbl_text(text)', 'p_keyword', colocate_with => 'dist_tbl_text'
|
||||||
|
);
|
||||||
|
|
||||||
|
SELECT citus_stat_tenants_reset();
|
||||||
|
|
||||||
|
SELECT select_from_dist_tbl_text('/b*c/de');
|
||||||
|
SELECT select_from_dist_tbl_text('/b*c/de');
|
||||||
|
SELECT select_from_dist_tbl_text(U&'\0061\0308bc');
|
||||||
|
SELECT select_from_dist_tbl_text(U&'\0061\0308bc');
|
||||||
|
|
||||||
|
SELECT tenant_attribute, query_count_in_this_period FROM citus_stat_tenants;
|
||||||
|
|
||||||
|
CREATE OR REPLACE PROCEDURE select_from_dist_tbl_text_proc(
|
||||||
|
p_keyword text
|
||||||
|
)
|
||||||
|
LANGUAGE plpgsql
|
||||||
|
AS $$
|
||||||
|
BEGIN
|
||||||
|
PERFORM select_from_dist_tbl_text(p_keyword);
|
||||||
|
PERFORM count(*)>=0 FROM citus_stat_tenants.dist_tbl_text WHERE b < 0;
|
||||||
|
PERFORM count(*)>=0 FROM citus_stat_tenants.dist_tbl_text;
|
||||||
|
PERFORM count(*)>=0 FROM citus_stat_tenants.dist_tbl_text WHERE a = p_keyword;
|
||||||
|
COMMIT;
|
||||||
|
END;$$;
|
||||||
|
|
||||||
|
CALL citus_stat_tenants.select_from_dist_tbl_text_proc('/b*c/de');
|
||||||
|
CALL citus_stat_tenants.select_from_dist_tbl_text_proc('/b*c/de');
|
||||||
|
CALL citus_stat_tenants.select_from_dist_tbl_text_proc('/b*c/de');
|
||||||
|
CALL citus_stat_tenants.select_from_dist_tbl_text_proc(U&'\0061\0308bc');
|
||||||
|
CALL citus_stat_tenants.select_from_dist_tbl_text_proc(U&'\0061\0308bc');
|
||||||
|
CALL citus_stat_tenants.select_from_dist_tbl_text_proc(U&'\0061\0308bc');
|
||||||
|
CALL citus_stat_tenants.select_from_dist_tbl_text_proc(NULL);
|
||||||
|
|
||||||
|
SELECT tenant_attribute, query_count_in_this_period FROM citus_stat_tenants;
|
||||||
|
|
||||||
|
CREATE OR REPLACE VIEW
|
||||||
|
select_from_dist_tbl_text_view
|
||||||
|
AS
|
||||||
|
SELECT * FROM citus_stat_tenants.dist_tbl_text;
|
||||||
|
|
||||||
|
SELECT count(*)>=0 FROM select_from_dist_tbl_text_view WHERE a = '/b*c/de';
|
||||||
|
SELECT count(*)>=0 FROM select_from_dist_tbl_text_view WHERE a = '/b*c/de';
|
||||||
|
SELECT count(*)>=0 FROM select_from_dist_tbl_text_view WHERE a = '/b*c/de';
|
||||||
|
SELECT count(*)>=0 FROM select_from_dist_tbl_text_view WHERE a = U&'\0061\0308bc';
|
||||||
|
SELECT count(*)>=0 FROM select_from_dist_tbl_text_view WHERE a = U&'\0061\0308bc';
|
||||||
|
SELECT count(*)>=0 FROM select_from_dist_tbl_text_view WHERE a = U&'\0061\0308bc';
|
||||||
|
|
||||||
|
SELECT tenant_attribute, query_count_in_this_period FROM citus_stat_tenants;
|
||||||
|
|
||||||
SET client_min_messages TO ERROR;
|
SET client_min_messages TO ERROR;
|
||||||
DROP SCHEMA citus_stat_tenants CASCADE;
|
DROP SCHEMA citus_stat_tenants CASCADE;
|
||||||
|
|
|
@ -28,6 +28,14 @@ set citus.shard_replication_factor to 2;
|
||||||
select create_distributed_table_concurrently('test','key', 'hash');
|
select create_distributed_table_concurrently('test','key', 'hash');
|
||||||
set citus.shard_replication_factor to 1;
|
set citus.shard_replication_factor to 1;
|
||||||
|
|
||||||
|
set citus.shard_replication_factor to 2;
|
||||||
|
create table dist_1(a int);
|
||||||
|
select create_distributed_table('dist_1', 'a');
|
||||||
|
set citus.shard_replication_factor to 1;
|
||||||
|
|
||||||
|
create table dist_2(a int);
|
||||||
|
select create_distributed_table_concurrently('dist_2', 'a', colocate_with=>'dist_1');
|
||||||
|
|
||||||
begin;
|
begin;
|
||||||
select create_distributed_table_concurrently('test','key');
|
select create_distributed_table_concurrently('test','key');
|
||||||
rollback;
|
rollback;
|
||||||
|
@ -63,6 +71,7 @@ rollback;
|
||||||
|
|
||||||
-- verify that we can undistribute the table
|
-- verify that we can undistribute the table
|
||||||
begin;
|
begin;
|
||||||
|
set local client_min_messages to warning;
|
||||||
select undistribute_table('test', cascade_via_foreign_keys := true);
|
select undistribute_table('test', cascade_via_foreign_keys := true);
|
||||||
rollback;
|
rollback;
|
||||||
|
|
||||||
|
|
|
@ -97,6 +97,8 @@ WHERE s.logicalrelid = 'user_table'::regclass AND n.isactive
|
||||||
ORDER BY placementid;
|
ORDER BY placementid;
|
||||||
|
|
||||||
-- reset cluster to original state
|
-- reset cluster to original state
|
||||||
|
ALTER SEQUENCE pg_dist_node_nodeid_seq RESTART 2;
|
||||||
|
ALTER SEQUENCE pg_dist_groupid_seq RESTART 2;
|
||||||
SELECT citus.mitmproxy('conn.allow()');
|
SELECT citus.mitmproxy('conn.allow()');
|
||||||
SELECT master_add_node('localhost', :worker_2_proxy_port);
|
SELECT master_add_node('localhost', :worker_2_proxy_port);
|
||||||
|
|
||||||
|
|
|
@ -15,6 +15,9 @@ SET citus.shard_replication_factor TO 1;
|
||||||
SET citus.max_adaptive_executor_pool_size TO 1;
|
SET citus.max_adaptive_executor_pool_size TO 1;
|
||||||
SELECT pg_backend_pid() as pid \gset
|
SELECT pg_backend_pid() as pid \gset
|
||||||
|
|
||||||
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 222222;
|
||||||
|
ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 333333;
|
||||||
|
|
||||||
-- make sure coordinator is in the metadata
|
-- make sure coordinator is in the metadata
|
||||||
SELECT citus_set_coordinator_host('localhost', 57636);
|
SELECT citus_set_coordinator_host('localhost', 57636);
|
||||||
|
|
||||||
|
@ -108,3 +111,5 @@ SELECT * FROM pg_dist_shard WHERE logicalrelid = 'table_1'::regclass;
|
||||||
DROP SCHEMA create_dist_tbl_con CASCADE;
|
DROP SCHEMA create_dist_tbl_con CASCADE;
|
||||||
SET search_path TO default;
|
SET search_path TO default;
|
||||||
SELECT citus_remove_node('localhost', 57636);
|
SELECT citus_remove_node('localhost', 57636);
|
||||||
|
ALTER SEQUENCE pg_dist_node_nodeid_seq RESTART 3;
|
||||||
|
ALTER SEQUENCE pg_dist_groupid_seq RESTART 3;
|
||||||
|
|
|
@ -18,11 +18,36 @@ SET client_min_messages TO ERROR;
|
||||||
CREATE ROLE foo1;
|
CREATE ROLE foo1;
|
||||||
CREATE ROLE foo2;
|
CREATE ROLE foo2;
|
||||||
|
|
||||||
|
-- Create collation
|
||||||
|
CREATE COLLATION german_phonebook (provider = icu, locale = 'de-u-co-phonebk');
|
||||||
|
|
||||||
|
-- Create type
|
||||||
|
CREATE TYPE pair_type AS (a int, b int);
|
||||||
|
|
||||||
|
-- Create function
|
||||||
|
CREATE FUNCTION one_as_result() RETURNS INT LANGUAGE SQL AS
|
||||||
|
$$
|
||||||
|
SELECT 1;
|
||||||
|
$$;
|
||||||
|
|
||||||
|
-- Create text search dictionary
|
||||||
|
CREATE TEXT SEARCH DICTIONARY my_german_dict (
|
||||||
|
template = snowball,
|
||||||
|
language = german,
|
||||||
|
stopwords = german
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Create text search config
|
||||||
|
CREATE TEXT SEARCH CONFIGURATION my_ts_config ( parser = default );
|
||||||
|
ALTER TEXT SEARCH CONFIGURATION my_ts_config ALTER MAPPING FOR asciiword WITH my_german_dict;
|
||||||
|
|
||||||
-- Create sequence
|
-- Create sequence
|
||||||
CREATE SEQUENCE seq;
|
CREATE SEQUENCE seq;
|
||||||
|
|
||||||
-- Create colocated distributed tables
|
-- Create colocated distributed tables
|
||||||
CREATE TABLE dist1 (id int PRIMARY KEY default nextval('seq'));
|
CREATE TABLE dist1 (id int PRIMARY KEY default nextval('seq'), col int default (one_as_result()), myserial serial, phone text COLLATE german_phonebook, initials pair_type);
|
||||||
|
CREATE SEQUENCE seq_owned OWNED BY dist1.id;
|
||||||
|
CREATE INDEX dist1_search_phone_idx ON dist1 USING gin (to_tsvector('my_ts_config'::regconfig, (COALESCE(phone, ''::text))::text));
|
||||||
SELECT create_distributed_table('dist1', 'id');
|
SELECT create_distributed_table('dist1', 'id');
|
||||||
INSERT INTO dist1 SELECT i FROM generate_series(1,100) i;
|
INSERT INTO dist1 SELECT i FROM generate_series(1,100) i;
|
||||||
|
|
||||||
|
@ -42,7 +67,15 @@ INSERT INTO loc1 SELECT i FROM generate_series(1,100) i;
|
||||||
CREATE TABLE loc2 (id int REFERENCES loc1(id));
|
CREATE TABLE loc2 (id int REFERENCES loc1(id));
|
||||||
INSERT INTO loc2 SELECT i FROM generate_series(1,100) i;
|
INSERT INTO loc2 SELECT i FROM generate_series(1,100) i;
|
||||||
|
|
||||||
|
-- Create publication
|
||||||
|
CREATE PUBLICATION pub_all;
|
||||||
|
|
||||||
|
-- citus_set_coordinator_host with wrong port
|
||||||
|
SELECT citus_set_coordinator_host('localhost', 9999);
|
||||||
|
-- citus_set_coordinator_host with correct port
|
||||||
SELECT citus_set_coordinator_host('localhost', :master_port);
|
SELECT citus_set_coordinator_host('localhost', :master_port);
|
||||||
|
-- show coordinator port is correct on all workers
|
||||||
|
SELECT * FROM run_command_on_workers($$SELECT row(nodename,nodeport) FROM pg_dist_node WHERE groupid = 0$$);
|
||||||
SELECT citus_add_local_table_to_metadata('loc1', cascade_via_foreign_keys => true);
|
SELECT citus_add_local_table_to_metadata('loc1', cascade_via_foreign_keys => true);
|
||||||
|
|
||||||
-- Create partitioned distributed table
|
-- Create partitioned distributed table
|
||||||
|
@ -83,10 +116,10 @@ SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
SELECT citus.mitmproxy('conn.onQuery(query="INSERT INTO pg_dist_node").kill()');
|
SELECT citus.mitmproxy('conn.onQuery(query="INSERT INTO pg_dist_node").kill()');
|
||||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
|
||||||
-- Failure to drop sequence
|
-- Failure to drop sequence dependency for all tables
|
||||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.worker_drop_sequence_dependency").cancel(' || :pid || ')');
|
SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.worker_drop_sequence_dependency.*FROM pg_dist_partition").cancel(' || :pid || ')');
|
||||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.worker_drop_sequence_dependency").kill()');
|
SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.worker_drop_sequence_dependency.*FROM pg_dist_partition").kill()');
|
||||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
|
||||||
-- Failure to drop shell table
|
-- Failure to drop shell table
|
||||||
|
@ -137,24 +170,84 @@ SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
SELECT citus.mitmproxy('conn.onQuery(query="ALTER DATABASE.*OWNER TO").kill()');
|
SELECT citus.mitmproxy('conn.onQuery(query="ALTER DATABASE.*OWNER TO").kill()');
|
||||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
|
||||||
-- Filure to create schema
|
-- Failure to create schema
|
||||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE SCHEMA IF NOT EXISTS mx_metadata_sync_multi_trans AUTHORIZATION").cancel(' || :pid || ')');
|
SELECT citus.mitmproxy('conn.onQuery(query="CREATE SCHEMA IF NOT EXISTS mx_metadata_sync_multi_trans AUTHORIZATION").cancel(' || :pid || ')');
|
||||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE SCHEMA IF NOT EXISTS mx_metadata_sync_multi_trans AUTHORIZATION").kill()');
|
SELECT citus.mitmproxy('conn.onQuery(query="CREATE SCHEMA IF NOT EXISTS mx_metadata_sync_multi_trans AUTHORIZATION").kill()');
|
||||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
|
||||||
|
-- Failure to create collation
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_replace_object.*CREATE COLLATION mx_metadata_sync_multi_trans.german_phonebook").cancel(' || :pid || ')');
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_replace_object.*CREATE COLLATION mx_metadata_sync_multi_trans.german_phonebook").kill()');
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
|
||||||
|
-- Failure to create function
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="CREATE OR REPLACE FUNCTION mx_metadata_sync_multi_trans.one_as_result").cancel(' || :pid || ')');
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="CREATE OR REPLACE FUNCTION mx_metadata_sync_multi_trans.one_as_result").kill()');
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
|
||||||
|
-- Failure to create text search dictionary
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_replace_object.*my_german_dict").cancel(' || :pid || ')');
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_replace_object.*my_german_dict").kill()');
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
|
||||||
|
-- Failure to create text search config
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_replace_object.*my_ts_config").cancel(' || :pid || ')');
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_replace_object.*my_ts_config").kill()');
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
|
||||||
|
-- Failure to create type
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_replace_object.*pair_type").cancel(' || :pid || ')');
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_replace_object.*pair_type").kill()');
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
|
||||||
|
-- Failure to create publication
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="CREATE PUBLICATION.*pub_all").cancel(' || :pid || ')');
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="CREATE PUBLICATION.*pub_all").kill()');
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
|
||||||
-- Failure to create sequence
|
-- Failure to create sequence
|
||||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_apply_sequence_command").cancel(' || :pid || ')');
|
SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_apply_sequence_command").cancel(' || :pid || ')');
|
||||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_apply_sequence_command").kill()');
|
SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_apply_sequence_command").kill()');
|
||||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
|
||||||
|
-- Failure to drop sequence dependency for distributed table
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.worker_drop_sequence_dependency.*mx_metadata_sync_multi_trans.dist1").cancel(' || :pid || ')');
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.worker_drop_sequence_dependency.*mx_metadata_sync_multi_trans.dist1").kill()');
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
|
||||||
|
-- Failure to drop distributed table if exists
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS mx_metadata_sync_multi_trans.dist1").cancel(' || :pid || ')');
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS mx_metadata_sync_multi_trans.dist1").kill()');
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
|
||||||
-- Failure to create distributed table
|
-- Failure to create distributed table
|
||||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_trans.dist1").cancel(' || :pid || ')');
|
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_trans.dist1").cancel(' || :pid || ')');
|
||||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_trans.dist1").kill()');
|
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_trans.dist1").kill()');
|
||||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
|
||||||
|
-- Failure to record sequence dependency for table
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.worker_record_sequence_dependency").cancel(' || :pid || ')');
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.worker_record_sequence_dependency").kill()');
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
|
||||||
|
-- Failure to create index for table
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="CREATE INDEX dist1_search_phone_idx ON mx_metadata_sync_multi_trans.dist1 USING gin").cancel(' || :pid || ')');
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="CREATE INDEX dist1_search_phone_idx ON mx_metadata_sync_multi_trans.dist1 USING gin").kill()');
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
|
||||||
-- Failure to create reference table
|
-- Failure to create reference table
|
||||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_trans.ref").cancel(' || :pid || ')');
|
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_trans.ref").cancel(' || :pid || ')');
|
||||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
@ -215,6 +308,48 @@ SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_object_metadata").kill()');
|
SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_object_metadata").kill()');
|
||||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
|
||||||
|
-- Failure to mark function as distributed
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*one_as_result").cancel(' || :pid || ')');
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*one_as_result").kill()');
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
|
||||||
|
-- Failure to mark collation as distributed
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*german_phonebook").cancel(' || :pid || ')');
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*german_phonebook").kill()');
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
|
||||||
|
-- Failure to mark text search dictionary as distributed
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*my_german_dict").cancel(' || :pid || ')');
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*my_german_dict").kill()');
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
|
||||||
|
-- Failure to mark text search configuration as distributed
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*my_ts_config").cancel(' || :pid || ')');
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*my_ts_config").kill()');
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
|
||||||
|
-- Failure to mark type as distributed
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*pair_type").cancel(' || :pid || ')');
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*pair_type").kill()');
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
|
||||||
|
-- Failure to mark sequence as distributed
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*seq_owned").cancel(' || :pid || ')');
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*seq_owned").kill()');
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
|
||||||
|
-- Failure to mark publication as distributed
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*pub_all").cancel(' || :pid || ')');
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*pub_all").kill()');
|
||||||
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
|
||||||
-- Failure to set isactive to true
|
-- Failure to set isactive to true
|
||||||
SELECT citus.mitmproxy('conn.onQuery(query="UPDATE pg_dist_node SET isactive = TRUE").cancel(' || :pid || ')');
|
SELECT citus.mitmproxy('conn.onQuery(query="UPDATE pg_dist_node SET isactive = TRUE").cancel(' || :pid || ')');
|
||||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||||
|
@ -255,11 +390,8 @@ UPDATE dist1 SET id = :failed_node_val WHERE id = :failed_node_val;
|
||||||
DELETE FROM dist1 WHERE id = :failed_node_val;
|
DELETE FROM dist1 WHERE id = :failed_node_val;
|
||||||
|
|
||||||
-- Show that DDL would still propagate to the node
|
-- Show that DDL would still propagate to the node
|
||||||
SET client_min_messages TO NOTICE;
|
|
||||||
SET citus.log_remote_commands TO 1;
|
|
||||||
CREATE SCHEMA dummy;
|
CREATE SCHEMA dummy;
|
||||||
SET citus.log_remote_commands TO 0;
|
SELECT * FROM run_command_on_workers($$SELECT nspname FROM pg_namespace WHERE nspname = 'dummy'$$);
|
||||||
SET client_min_messages TO ERROR;
|
|
||||||
|
|
||||||
-- Successfully activate the node after many failures
|
-- Successfully activate the node after many failures
|
||||||
SELECT citus.mitmproxy('conn.allow()');
|
SELECT citus.mitmproxy('conn.allow()');
|
||||||
|
@ -275,8 +407,11 @@ SELECT * FROM pg_dist_node ORDER BY nodeport;
|
||||||
SELECT citus.mitmproxy('conn.allow()');
|
SELECT citus.mitmproxy('conn.allow()');
|
||||||
|
|
||||||
RESET citus.metadata_sync_mode;
|
RESET citus.metadata_sync_mode;
|
||||||
|
DROP PUBLICATION pub_all;
|
||||||
DROP SCHEMA dummy;
|
DROP SCHEMA dummy;
|
||||||
DROP SCHEMA mx_metadata_sync_multi_trans CASCADE;
|
DROP SCHEMA mx_metadata_sync_multi_trans CASCADE;
|
||||||
DROP ROLE foo1;
|
DROP ROLE foo1;
|
||||||
DROP ROLE foo2;
|
DROP ROLE foo2;
|
||||||
SELECT citus_remove_node('localhost', :master_port);
|
SELECT citus_remove_node('localhost', :master_port);
|
||||||
|
ALTER SEQUENCE pg_dist_node_nodeid_seq RESTART 3;
|
||||||
|
ALTER SEQUENCE pg_dist_groupid_seq RESTART 3;
|
||||||
|
|
|
@ -84,6 +84,7 @@ create table partitioned_tbl_with_fkey (x int, y int, t timestamptz default now(
|
||||||
select create_distributed_table('partitioned_tbl_with_fkey','x');
|
select create_distributed_table('partitioned_tbl_with_fkey','x');
|
||||||
create table partition_1_with_fkey partition of partitioned_tbl_with_fkey for values from ('2022-01-01') to ('2022-12-31');
|
create table partition_1_with_fkey partition of partitioned_tbl_with_fkey for values from ('2022-01-01') to ('2022-12-31');
|
||||||
create table partition_2_with_fkey partition of partitioned_tbl_with_fkey for values from ('2023-01-01') to ('2023-12-31');
|
create table partition_2_with_fkey partition of partitioned_tbl_with_fkey for values from ('2023-01-01') to ('2023-12-31');
|
||||||
|
create table partition_3_with_fkey partition of partitioned_tbl_with_fkey for values from ('2024-01-01') to ('2024-12-31');
|
||||||
insert into partitioned_tbl_with_fkey (x,y) select s,s%10 from generate_series(1,100) s;
|
insert into partitioned_tbl_with_fkey (x,y) select s,s%10 from generate_series(1,100) s;
|
||||||
|
|
||||||
ALTER TABLE partitioned_tbl_with_fkey ADD CONSTRAINT fkey_to_ref_tbl FOREIGN KEY (y) REFERENCES ref_table_with_fkey(id);
|
ALTER TABLE partitioned_tbl_with_fkey ADD CONSTRAINT fkey_to_ref_tbl FOREIGN KEY (y) REFERENCES ref_table_with_fkey(id);
|
||||||
|
|
|
@ -667,5 +667,33 @@ ALTER TABLE table_without_sequence ADD COLUMN x BIGINT DEFAULT nextval('test_sch
|
||||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object WHERE objid IN ('test_schema_for_sequence_propagation.seq_10'::regclass);
|
SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object WHERE objid IN ('test_schema_for_sequence_propagation.seq_10'::regclass);
|
||||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object WHERE objid IN ('test_schema_for_sequence_propagation'::regnamespace);
|
SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object WHERE objid IN ('test_schema_for_sequence_propagation'::regnamespace);
|
||||||
|
|
||||||
|
-- Bug: https://github.com/citusdata/citus/issues/7378
|
||||||
|
|
||||||
|
-- Create a reference table
|
||||||
|
CREATE TABLE tbl_ref_mats(row_id integer primary key);
|
||||||
|
INSERT INTO tbl_ref_mats VALUES (1), (2);
|
||||||
|
SELECT create_reference_table('tbl_ref_mats');
|
||||||
|
|
||||||
|
-- Create a distributed table
|
||||||
|
CREATE TABLE tbl_dist_mats(series_id integer);
|
||||||
|
INSERT INTO tbl_dist_mats VALUES (1), (1), (2), (2);
|
||||||
|
SELECT create_distributed_table('tbl_dist_mats', 'series_id');
|
||||||
|
|
||||||
|
-- Create a view that joins the distributed table with the reference table on the distribution key.
|
||||||
|
CREATE VIEW vw_citus_views as
|
||||||
|
SELECT d.series_id FROM tbl_dist_mats d JOIN tbl_ref_mats r ON d.series_id = r.row_id;
|
||||||
|
|
||||||
|
-- The view initially works fine
|
||||||
|
SELECT * FROM vw_citus_views ORDER BY 1;
|
||||||
|
-- Now, alter the table
|
||||||
|
ALTER TABLE tbl_ref_mats ADD COLUMN category1 varchar(50);
|
||||||
|
SELECT * FROM vw_citus_views ORDER BY 1;
|
||||||
|
ALTER TABLE tbl_ref_mats ADD COLUMN category2 varchar(50);
|
||||||
|
SELECT * FROM vw_citus_views ORDER BY 1;
|
||||||
|
ALTER TABLE tbl_ref_mats DROP COLUMN category1;
|
||||||
|
SELECT * FROM vw_citus_views ORDER BY 1;
|
||||||
|
|
||||||
DROP SCHEMA test_schema_for_sequence_propagation CASCADE;
|
DROP SCHEMA test_schema_for_sequence_propagation CASCADE;
|
||||||
DROP TABLE table_without_sequence;
|
DROP TABLE table_without_sequence;
|
||||||
|
DROP TABLE tbl_ref_mats CASCADE;
|
||||||
|
DROP TABLE tbl_dist_mats CASCADE;
|
||||||
|
|
|
@ -591,6 +591,16 @@ SELECT * FROM multi_extension.print_extension_changes();
|
||||||
ALTER EXTENSION citus UPDATE TO '11.3-1';
|
ALTER EXTENSION citus UPDATE TO '11.3-1';
|
||||||
SELECT * FROM multi_extension.print_extension_changes();
|
SELECT * FROM multi_extension.print_extension_changes();
|
||||||
|
|
||||||
|
-- Test downgrade to 11.3-1 from 11.3-2
|
||||||
|
ALTER EXTENSION citus UPDATE TO '11.3-2';
|
||||||
|
ALTER EXTENSION citus UPDATE TO '11.3-1';
|
||||||
|
-- Should be empty result since upgrade+downgrade should be a no-op
|
||||||
|
SELECT * FROM multi_extension.print_extension_changes();
|
||||||
|
|
||||||
|
-- Snapshot of state at 11.3-2
|
||||||
|
ALTER EXTENSION citus UPDATE TO '11.3-2';
|
||||||
|
SELECT * FROM multi_extension.print_extension_changes();
|
||||||
|
|
||||||
DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff;
|
DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff;
|
||||||
|
|
||||||
-- show running version
|
-- show running version
|
||||||
|
|
|
@ -802,6 +802,9 @@ ALTER TABLE IF EXISTS non_existent_table SET SCHEMA non_existent_schema;
|
||||||
DROP SCHEMA existing_schema, another_existing_schema CASCADE;
|
DROP SCHEMA existing_schema, another_existing_schema CASCADE;
|
||||||
|
|
||||||
|
|
||||||
|
-- test DROP SCHEMA with nonexisting schemas
|
||||||
|
DROP SCHEMA ax, bx, cx, dx, ex, fx, gx, jx;
|
||||||
|
|
||||||
-- test ALTER TABLE SET SCHEMA with interesting names
|
-- test ALTER TABLE SET SCHEMA with interesting names
|
||||||
CREATE SCHEMA "cItuS.T E E N'sSchema";
|
CREATE SCHEMA "cItuS.T E E N'sSchema";
|
||||||
CREATE SCHEMA "citus-teen's scnd schm.";
|
CREATE SCHEMA "citus-teen's scnd schm.";
|
||||||
|
@ -968,6 +971,7 @@ SET client_min_messages TO WARNING;
|
||||||
|
|
||||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM pg_catalog.pg_dist_object
|
SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM pg_catalog.pg_dist_object
|
||||||
WHERE classid=2615 and objid IN (select oid from pg_namespace where nspname='run_test_schema');
|
WHERE classid=2615 and objid IN (select oid from pg_namespace where nspname='run_test_schema');
|
||||||
|
DROP TABLE public.nation_local;
|
||||||
DROP SCHEMA run_test_schema, test_schema_support_join_1, test_schema_support_join_2, "Citus'Teen123", "CiTUS.TEEN2", bar, test_schema_support CASCADE;
|
DROP SCHEMA run_test_schema, test_schema_support_join_1, test_schema_support_join_2, "Citus'Teen123", "CiTUS.TEEN2", bar, test_schema_support CASCADE;
|
||||||
-- verify that the dropped schema is removed from worker's pg_dist_object
|
-- verify that the dropped schema is removed from worker's pg_dist_object
|
||||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM pg_catalog.pg_dist_object
|
SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM pg_catalog.pg_dist_object
|
||||||
|
|
|
@ -137,6 +137,30 @@ FETCH FORWARD 3 FROM holdCursor;
|
||||||
|
|
||||||
CLOSE holdCursor;
|
CLOSE holdCursor;
|
||||||
|
|
||||||
|
-- Test DECLARE CURSOR .. WITH HOLD inside transaction block
|
||||||
|
BEGIN;
|
||||||
|
DECLARE holdCursor CURSOR WITH HOLD FOR
|
||||||
|
SELECT * FROM cursor_me WHERE x = 1 ORDER BY y;
|
||||||
|
FETCH 3 FROM holdCursor;
|
||||||
|
FETCH BACKWARD 3 FROM holdCursor;
|
||||||
|
FETCH FORWARD 3 FROM holdCursor;
|
||||||
|
COMMIT;
|
||||||
|
|
||||||
|
FETCH 3 FROM holdCursor;
|
||||||
|
CLOSE holdCursor;
|
||||||
|
|
||||||
|
-- Test DECLARE NO SCROLL CURSOR .. WITH HOLD inside transaction block
|
||||||
|
BEGIN;
|
||||||
|
DECLARE holdCursor NO SCROLL CURSOR WITH HOLD FOR
|
||||||
|
SELECT * FROM cursor_me WHERE x = 1 ORDER BY y;
|
||||||
|
FETCH 3 FROM holdCursor;
|
||||||
|
FETCH FORWARD 3 FROM holdCursor;
|
||||||
|
COMMIT;
|
||||||
|
|
||||||
|
FETCH 3 FROM holdCursor;
|
||||||
|
FETCH BACKWARD 3 FROM holdCursor;
|
||||||
|
CLOSE holdCursor;
|
||||||
|
|
||||||
-- Test DECLARE CURSOR .. WITH HOLD with parameter
|
-- Test DECLARE CURSOR .. WITH HOLD with parameter
|
||||||
CREATE OR REPLACE FUNCTION declares_cursor(p int)
|
CREATE OR REPLACE FUNCTION declares_cursor(p int)
|
||||||
RETURNS void AS $$
|
RETURNS void AS $$
|
||||||
|
|
|
@ -13,7 +13,9 @@ CREATE TABLE postgres_table_test(a int primary key);
|
||||||
|
|
||||||
-- make sure that all rebalance operations works fine when
|
-- make sure that all rebalance operations works fine when
|
||||||
-- reference tables are replicated to the coordinator
|
-- reference tables are replicated to the coordinator
|
||||||
|
SET client_min_messages TO ERROR;
|
||||||
SELECT 1 FROM master_add_node('localhost', :master_port, groupId=>0);
|
SELECT 1 FROM master_add_node('localhost', :master_port, groupId=>0);
|
||||||
|
RESET client_min_messages;
|
||||||
|
|
||||||
-- should just be noops even if we add the coordinator to the pg_dist_node
|
-- should just be noops even if we add the coordinator to the pg_dist_node
|
||||||
SELECT rebalance_table_shards('dist_table_test');
|
SELECT rebalance_table_shards('dist_table_test');
|
||||||
|
@ -1497,6 +1499,61 @@ SELECT sh.logicalrelid, pl.nodeport
|
||||||
|
|
||||||
DROP TABLE single_shard_colocation_1a, single_shard_colocation_1b, single_shard_colocation_1c, single_shard_colocation_2a, single_shard_colocation_2b CASCADE;
|
DROP TABLE single_shard_colocation_1a, single_shard_colocation_1b, single_shard_colocation_1c, single_shard_colocation_2a, single_shard_colocation_2b CASCADE;
|
||||||
|
|
||||||
|
-- test the same with coordinator shouldhaveshards = false and shard_count = 2
|
||||||
|
-- so that the shard allowed node count would be 2 when rebalancing
|
||||||
|
-- for such cases, we only count the nodes that are allowed for shard placements
|
||||||
|
UPDATE pg_dist_node SET shouldhaveshards=false WHERE nodeport = :master_port;
|
||||||
|
|
||||||
|
create table two_shard_colocation_1a (a int primary key);
|
||||||
|
create table two_shard_colocation_1b (a int primary key);
|
||||||
|
SET citus.shard_replication_factor = 1;
|
||||||
|
|
||||||
|
select create_distributed_table('two_shard_colocation_1a','a', colocate_with => 'none', shard_count => 2);
|
||||||
|
select create_distributed_table('two_shard_colocation_1b','a',colocate_with=>'two_shard_colocation_1a');
|
||||||
|
|
||||||
|
create table two_shard_colocation_2a (a int primary key);
|
||||||
|
create table two_shard_colocation_2b (a int primary key);
|
||||||
|
select create_distributed_table('two_shard_colocation_2a','a', colocate_with => 'none', shard_count => 2);
|
||||||
|
select create_distributed_table('two_shard_colocation_2b','a',colocate_with=>'two_shard_colocation_2a');
|
||||||
|
|
||||||
|
-- move shards of colocation group 1 to worker1
|
||||||
|
SELECT citus_move_shard_placement(sh.shardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port)
|
||||||
|
FROM pg_dist_shard sh JOIN pg_dist_shard_placement pl ON sh.shardid = pl.shardid
|
||||||
|
WHERE sh.logicalrelid = 'two_shard_colocation_1a'::regclass
|
||||||
|
AND pl.nodeport = :worker_2_port
|
||||||
|
LIMIT 1;
|
||||||
|
-- move shards of colocation group 2 to worker2
|
||||||
|
SELECT citus_move_shard_placement(sh.shardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port)
|
||||||
|
FROM pg_dist_shard sh JOIN pg_dist_shard_placement pl ON sh.shardid = pl.shardid
|
||||||
|
WHERE sh.logicalrelid = 'two_shard_colocation_2a'::regclass
|
||||||
|
AND pl.nodeport = :worker_1_port
|
||||||
|
LIMIT 1;
|
||||||
|
|
||||||
|
-- current state:
|
||||||
|
-- coordinator: []
|
||||||
|
-- worker 1: [1_1, 1_2]
|
||||||
|
-- worker 2: [2_1, 2_2]
|
||||||
|
SELECT sh.logicalrelid, pl.nodeport
|
||||||
|
FROM pg_dist_shard sh JOIN pg_dist_shard_placement pl ON sh.shardid = pl.shardid
|
||||||
|
WHERE sh.logicalrelid::text IN ('two_shard_colocation_1a', 'two_shard_colocation_1b', 'two_shard_colocation_2a', 'two_shard_colocation_2b')
|
||||||
|
ORDER BY sh.logicalrelid, pl.nodeport;
|
||||||
|
|
||||||
|
-- If we take the coordinator into account, the rebalancer considers this as balanced and does nothing (shard_count < worker_count)
|
||||||
|
-- but because the coordinator is not allowed for shards, rebalancer will distribute each colocation group to both workers
|
||||||
|
select rebalance_table_shards(shard_transfer_mode:='block_writes');
|
||||||
|
|
||||||
|
-- final state:
|
||||||
|
-- coordinator: []
|
||||||
|
-- worker 1: [1_1, 2_1]
|
||||||
|
-- worker 2: [1_2, 2_2]
|
||||||
|
SELECT sh.logicalrelid, pl.nodeport
|
||||||
|
FROM pg_dist_shard sh JOIN pg_dist_shard_placement pl ON sh.shardid = pl.shardid
|
||||||
|
WHERE sh.logicalrelid::text IN ('two_shard_colocation_1a', 'two_shard_colocation_1b', 'two_shard_colocation_2a', 'two_shard_colocation_2b')
|
||||||
|
ORDER BY sh.logicalrelid, pl.nodeport;
|
||||||
|
|
||||||
|
-- cleanup
|
||||||
|
DROP TABLE two_shard_colocation_1a, two_shard_colocation_1b, two_shard_colocation_2a, two_shard_colocation_2b CASCADE;
|
||||||
|
|
||||||
-- verify we detect if one of the tables do not have a replica identity or primary key
|
-- verify we detect if one of the tables do not have a replica identity or primary key
|
||||||
-- and error out in case of shard transfer mode = auto
|
-- and error out in case of shard transfer mode = auto
|
||||||
SELECT 1 FROM citus_remove_node('localhost', :worker_2_port);
|
SELECT 1 FROM citus_remove_node('localhost', :worker_2_port);
|
||||||
|
|
Loading…
Reference in New Issue